aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-01 21:44:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-01 21:44:08 -0400
commit731c7d3a205ba89b475b2aa71b5f13dd6ae3de56 (patch)
treed2b9c3e0a98b94dfc3e4e60e35622c0143ef4ed4 /drivers
parent77a87824ed676ca8ff8482e4157d3adb284fd381 (diff)
parent753e7c8cbd8c503b962294303c7b5e9ea8513443 (diff)
Merge tag 'drm-for-v4.8' of git://people.freedesktop.org/~airlied/linux
Merge drm updates from Dave Airlie: "This is the main drm pull request for 4.8. I'm down with a cold at the moment so hopefully this isn't in too bad a state, I finished pulling stuff last week mostly (nouveau fixes just went in today), so only this message should be influenced by illness. Apologies to anyone who's major feature I missed :-) Core: Lockless GEM BO freeing Non-blocking atomic work Documentation changes (rst/sphinx) Prep for new fencing changes Simple display helpers Master/auth changes Register/unregister rework Loads of trivial patches/fixes. New stuff: ARM Mali display driver (not the 3D chip) sii902x RGB->HDMI bridge Panel: Support for new panels Improved backlight support Bridge: Convert ADV7511 to bridge driver ADV7533 support TC358767 (DSI/DPI to eDP) encoder chip support i915: BXT support enabled by default GVT-g infrastructure GuC command submission and fixes BXT workarounds SKL/BKL workarounds Demidlayering device registration Thundering herd fixes Missing pci ids Atomic updates amdgpu/radeon: ATPX improvements for better dGPU power control on PX systems New power features for CZ/BR/ST Pipelined BO moves and evictions in TTM GPU scheduler improvements GPU reset improvements Overclocking on dGPUs with amdgpu Polaris powermanagement enabled nouveau: GK20A/GM20B volt and clock improvements. Initial support for GP100/GP104 GPUs, GP104 will not yet support acceleration due to NVIDIA having not released firmware for them as of yet. exynos: Exynos5433 SoC with IOMMU support. vc4: Shader validation for branching imx-drm: Atomic mode setting conversion Reworked DMFC FIFO allocation External bridge support analogix-dp: RK3399 eDP support Lots of fixes. rockchip: Lots of small fixes. msm: DT bindings cleanups Shrinker and madvise support ASoC HDMI codec support tegra: Host1x driver cleanups SOR reworking for DP support Runtime PM support omapdrm: PLL enhancements Header refactoring Gamma table support arcgpu: Simulator support virtio-gpu: Atomic modesetting fixes. rcar-du: Misc fixes. mediatek: MT8173 HDMI support sti: ASOC HDMI codec support Minor fixes fsl-dcu: Suspend/resume support Bridge support amdkfd: Minor fixes. etnaviv: Enable GPU clock gating hisilicon: Vblank and other fixes" * tag 'drm-for-v4.8' of git://people.freedesktop.org/~airlied/linux: (1575 commits) drm/nouveau/gr/nv3x: fix instobj write offsets in gr setup drm/nouveau/acpi: fix lockup with PCIe runtime PM drm/nouveau/acpi: check for function 0x1B before using it drm/nouveau/acpi: return supported DSM functions drm/nouveau/acpi: ensure matching ACPI handle and supported functions drm/nouveau/fbcon: fix font width not divisible by 8 drm/amd/powerplay: remove enable_clock_power_gatings_tasks from initialize and resume events drm/amd/powerplay: move clockgating to after ungating power in pp for uvd/vce drm/amdgpu: add query device id and revision id into system info entry at CGS drm/amdgpu: add new definition in bif header drm/amd/powerplay: rename smum header guards drm/amdgpu: enable UVD context buffer for older HW drm/amdgpu: fix default UVD context size drm/amdgpu: fix incorrect type of info_id drm/amdgpu: make amdgpu_cgs_call_acpi_method as static drm/amdgpu: comment out unused defaults_staturn_pro static const structure to fix the build drm/amdgpu: enable UVD VM only on polaris drm/amdgpu: increase timeout of IB test drm/amdgpu: add destroy session when generate VCE destroy msg. drm/amd: fix deadlock of job_list_lock V2 ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/char/agp/intel-gtt.c8
-rw-r--r--drivers/dma-buf/Kconfig15
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c59
-rw-r--r--drivers/dma-buf/fence-array.c144
-rw-r--r--drivers/dma-buf/fence.c8
-rw-r--r--drivers/dma-buf/sync_file.c2
-rw-r--r--drivers/gpu/drm/Makefile5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h112
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c68
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c287
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c264
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c172
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c230
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h115
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c170
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c334
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.c216
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ci_dpm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c83
-rw-r--r--drivers/gpu/drm/amd/amdgpu/fiji_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c137
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c468
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c41
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_dpm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smc.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_smum.h (renamed from drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h)4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ppsmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c48
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_smc.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c77
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c76
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c182
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c160
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/include/amd_pcie.h14
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h11
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h1
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h108
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h2
-rw-r--r--drivers/gpu/drm/amd/include/cgs_common.h19
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c153
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c23
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c404
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c74
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h15
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c20
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c464
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c590
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h26
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c303
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h165
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c33
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h17
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c272
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c27
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h1
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/hwmgr.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smumgr.h29
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c3
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h4
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.c190
-rw-r--r--drivers/gpu/drm/amd/scheduler/gpu_scheduler.h60
-rw-r--r--drivers/gpu/drm/amd/scheduler/sched_fence.c81
-rw-r--r--drivers/gpu/drm/arc/Kconfig1
-rw-r--r--drivers/gpu/drm/arc/Makefile2
-rw-r--r--drivers/gpu/drm/arc/arcpgu.h2
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_drv.c64
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c18
-rw-r--r--drivers/gpu/drm/arc/arcpgu_sim.c128
-rw-r--r--drivers/gpu/drm/arm/Kconfig17
-rw-r--r--drivers/gpu/drm/arm/Makefile2
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c19
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c13
-rw-r--r--drivers/gpu/drm/arm/malidp_crtc.c216
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c519
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.h54
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c691
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.h241
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c298
-rw-r--r--drivers/gpu/drm/arm/malidp_regs.h172
-rw-r--r--drivers/gpu/drm/armada/Kconfig4
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c16
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c1
-rw-r--r--drivers/gpu/drm/ast/Kconfig4
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c2
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c3
-rw-r--r--drivers/gpu/drm/ast/ast_main.c3
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c10
-rw-r--r--drivers/gpu/drm/ast/ast_ttm.c13
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c6
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c19
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c12
-rw-r--r--drivers/gpu/drm/bochs/Kconfig4
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c15
-rw-r--r--drivers/gpu/drm/bridge/Kconfig19
-rw-r--r--drivers/gpu/drm/bridge/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig15
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h (renamed from drivers/gpu/drm/i2c/adv7511.h)103
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c (renamed from drivers/gpu/drm/i2c/adv7511.c)324
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7533.c265
-rw-r--r--drivers/gpu/drm/bridge/analogix-anx78xx.c8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.h8
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c12
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h5
-rw-r--r--drivers/gpu/drm/bridge/dw-hdmi.c30
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c14
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c467
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c1413
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c4
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c11
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c13
-rw-r--r--drivers/gpu/drm/drm_atomic.c105
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c566
-rw-r--r--drivers/gpu/drm/drm_auth.c285
-rw-r--r--drivers/gpu/drm/drm_bridge.c2
-rw-r--r--drivers/gpu/drm/drm_bufs.c8
-rw-r--r--drivers/gpu/drm/drm_cache.c1
-rw-r--r--drivers/gpu/drm/drm_crtc.c717
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c36
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h88
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c3
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c58
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c14
-rw-r--r--drivers/gpu/drm/drm_drv.c245
-rw-r--r--drivers/gpu/drm/drm_edid_load.c2
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c43
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c56
-rw-r--r--drivers/gpu/drm/drm_fops.c149
-rw-r--r--drivers/gpu/drm/drm_fourcc.c320
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_info.c117
-rw-r--r--drivers/gpu/drm/drm_internal.h21
-rw-r--r--drivers/gpu/drm/drm_ioctl.c176
-rw-r--r--drivers/gpu/drm/drm_irq.c243
-rw-r--r--drivers/gpu/drm/drm_legacy.h8
-rw-r--r--drivers/gpu/drm/drm_lock.c240
-rw-r--r--drivers/gpu/drm/drm_memory.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c38
-rw-r--r--drivers/gpu/drm/drm_mm.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c4
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c13
-rw-r--r--drivers/gpu/drm/drm_pci.c51
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c38
-rw-r--r--drivers/gpu/drm/drm_platform.c18
-rw-r--r--drivers/gpu/drm/drm_prime.c10
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c21
-rw-r--r--drivers/gpu/drm/drm_scatter.c2
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c206
-rw-r--r--drivers/gpu/drm/drm_sysfs.c71
-rw-r--r--drivers/gpu/drm/drm_vm.c58
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c16
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c54
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h2
-rw-r--r--drivers/gpu/drm/etnaviv/state_hi.xml.h7
-rw-r--r--drivers/gpu/drm/exynos/Kconfig6
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.c77
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h91
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c10
-rw-r--r--drivers/gpu/drm/fsl-dcu/Kconfig5
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c44
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c49
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h2
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c15
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h3
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c16
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h1
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c87
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_tcon.c1
-rw-r--r--drivers/gpu/drm/gma500/Kconfig4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c3
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c9
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c11
-rw-r--r--drivers/gpu/drm/gma500/gma_display.h4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c7
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c9
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/Kconfig1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c34
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c11
-rw-r--r--drivers/gpu/drm/i2c/Kconfig6
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/ch7006_drv.c9
-rw-r--r--drivers/gpu/drm/i915/Kconfig22
-rw-r--r--drivers/gpu/drm/i915/Kconfig.debug3
-rw-r--r--drivers/gpu/drm/i915/Makefile12
-rw-r--r--drivers/gpu/drm/i915/gvt/Makefile5
-rw-r--r--drivers/gpu/drm/i915/gvt/debug.h34
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c145
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h69
-rw-r--r--drivers/gpu/drm/i915/gvt/hypercall.h38
-rw-r--r--drivers/gpu/drm/i915/gvt/mpt.h49
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c53
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c570
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1587
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1956
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h786
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c1262
-rw-r--r--drivers/gpu/drm/i915/i915_gem_batch_pool.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c515
-rw-r--r--drivers/gpu/drm/i915/i915_gem_dmabuf.h45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c55
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c44
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence.c38
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c390
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h84
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c45
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c58
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c12
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c203
-rw-r--r--drivers/gpu/drm/i915/i915_guc_reg.h6
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c538
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c807
-rw-r--r--drivers/gpu/drm/i915/i915_params.c23
-rw-r--r--drivers/gpu/drm/i915/i915_params.h5
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c503
-rw-r--r--drivers/gpu/drm/i915/i915_pvinfo.h113
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h65
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c8
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c40
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h54
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c44
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h92
-rw-r--r--drivers/gpu/drm/i915/intel_atomic.c5
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c50
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c23
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h16
-rw-r--r--drivers/gpu/drm/i915/intel_breadcrumbs.c595
-rw-r--r--drivers/gpu/drm/i915/intel_color.c23
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c78
-rw-r--r--drivers/gpu/drm/i915/intel_csr.c25
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c274
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c388
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2645
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c1087
-rw-r--r--drivers/gpu/drm/i915/intel_dp_aux_backlight.c172
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c25
-rw-r--r--drivers/gpu/drm/i915/intel_dpio_phy.c470
-rw-r--r--drivers/gpu/drm/i915/intel_dpll_mgr.c63
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h353
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c135
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c179
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_panel_vbt.c90
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_pll.c42
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c41
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c116
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c122
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c30
-rw-r--r--drivers/gpu/drm/i915/intel_guc.h50
-rw-r--r--drivers/gpu/drm/i915/intel_guc_fwif.h3
-rw-r--r--drivers/gpu/drm/i915/intel_guc_loader.c222
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c104
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.h45
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c429
-rw-r--r--drivers/gpu/drm/i915/intel_hotplug.c134
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c22
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c964
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h24
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c58
-rw-r--r--drivers/gpu/drm/i915/intel_mocs.c100
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c4
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c210
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c151
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c48
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c1466
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c118
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1323
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h176
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c282
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c87
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c32
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c75
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c19
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c328
-rw-r--r--drivers/gpu/drm/i915/intel_vbt_defs.h6
-rw-r--r--drivers/gpu/drm/imx/Kconfig1
-rw-r--r--drivers/gpu/drm/imx/dw_hdmi-imx.c32
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c121
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h21
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c189
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c97
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c400
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c548
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h16
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c149
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig9
-rw-r--r--drivers/gpu/drm/mediatek/Makefile7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c265
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.h26
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c1828
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.h23
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c358
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi_regs.h238
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mipi_tx.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c515
-rw-r--r--drivers/gpu/drm/mgag200/Kconfig4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c4
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_ttm.c13
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c17
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c8
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c69
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c32
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c10
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c117
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h14
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c31
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c44
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h203
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c113
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c125
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c339
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h16
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c235
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c22
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c39
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c270
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h24
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c12
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c139
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h23
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c168
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c26
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c6
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h8
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c71
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c6
-rw-r--r--drivers/gpu/drm/nouveau/Kconfig6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c10
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/tvnv17.c9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/cl0080.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvif/class.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/device.h18
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h2
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h10
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h14
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h1
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h3
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h9
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c105
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c75
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c27
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_usif.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c11
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/core/subdev.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c102
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c72
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/user.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c78
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c66
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h10
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c55
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c81
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c136
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c38
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c58
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c67
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c34
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c179
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c171
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c59
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c7
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c20
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c394
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c896
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c19
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c69
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h5
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c146
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c75
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c118
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c103
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h12
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c44
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c88
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c54
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h18
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c28
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c27
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h11
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c40
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile28
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c11
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c5
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c26
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c7
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c3
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c471
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc_coefs.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c136
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c57
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c10
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c255
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss_features.h1
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_common.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_phy.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_pll.c78
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h871
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/pll.c129
-rw-r--r--drivers/gpu/drm/omapdrm/dss/rfbi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/video-pll.c9
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c10
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c56
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c16
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h14
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fb.c20
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c8
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c15
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c166
-rw-r--r--drivers/gpu/drm/qxl/Kconfig5
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c11
-rw-r--r--drivers/gpu/drm/qxl/qxl_draw.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h1
-rw-r--r--drivers/gpu/drm/qxl/qxl_fb.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c10
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/cik.c17
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c10
-rw-r--r--drivers/gpu/drm/radeon/si.c45
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig1
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c17
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c117
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c68
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c20
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h5
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c3
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig5
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c189
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi.c9
-rw-r--r--drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c1
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h12
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c81
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c5
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c90
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c7
-rw-r--r--drivers/gpu/drm/shmobile/Kconfig1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c4
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c3
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c2
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/sti_awg_utils.c4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c26
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h3
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c71
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c39
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c148
-rw-r--r--drivers/gpu/drm/sti/sti_drv.h1
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c43
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c46
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c43
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c350
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h13
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c40
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h2
-rw-r--r--drivers/gpu/drm/sti/sti_plane.c40
-rw-r--r--drivers/gpu/drm/sti/sti_plane.h9
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c43
-rw-r--r--drivers/gpu/drm/sti/sti_vid.c12
-rw-r--r--drivers/gpu/drm/sti/sti_vid.h2
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_crtc.c12
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c13
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_framebuffer.c3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c10
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c9
-rw-r--r--drivers/gpu/drm/tegra/dc.c176
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c245
-rw-r--r--drivers/gpu/drm/tegra/drm.c4
-rw-r--r--drivers/gpu/drm/tegra/drm.h2
-rw-r--r--drivers/gpu/drm/tegra/dsi.c248
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c508
-rw-r--r--drivers/gpu/drm/tegra/hdmi.h21
-rw-r--r--drivers/gpu/drm/tegra/output.c9
-rw-r--r--drivers/gpu/drm/tegra/rgb.c1
-rw-r--r--drivers/gpu/drm/tegra/sor.c717
-rw-r--r--drivers/gpu/drm/tegra/sor.h3
-rw-r--r--drivers/gpu/drm/tilcdc/Kconfig1
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c165
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c111
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c19
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c6
-rw-r--r--drivers/gpu/drm/udl/Kconfig5
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_bo.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c183
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c23
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c70
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h12
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c11
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c22
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_qpu_defines.h17
-rw-r--r--drivers/gpu/drm/vc4/vc4_regs.h22
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_validate_shaders.c449
-rw-r--r--drivers/gpu/drm/vgem/Makefile2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c291
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.h20
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c283
-rw-r--r--drivers/gpu/drm/via/via_mm.c2
-rw-r--r--drivers/gpu/drm/virtio/Kconfig4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c187
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c150
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c7
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h4
-rw-r--r--drivers/gpu/host1x/cdma.c42
-rw-r--r--drivers/gpu/host1x/channel.c5
-rw-r--r--drivers/gpu/host1x/debug.c38
-rw-r--r--drivers/gpu/host1x/dev.c16
-rw-r--r--drivers/gpu/host1x/dev.h38
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c23
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c5
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c36
-rw-r--r--drivers/gpu/host1x/hw/intr_hw.c30
-rw-r--r--drivers/gpu/host1x/hw/syncpt_hw.c10
-rw-r--r--drivers/gpu/host1x/intr.c16
-rw-r--r--drivers/gpu/host1x/intr.h4
-rw-r--r--drivers/gpu/host1x/job.c8
-rw-r--r--drivers/gpu/host1x/syncpt.c58
-rw-r--r--drivers/gpu/host1x/syncpt.h8
-rw-r--r--drivers/gpu/ipu-v3/ipu-dc.c9
-rw-r--r--drivers/gpu/ipu-v3/ipu-di.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-dmfc.c213
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c62
-rw-r--r--drivers/media/platform/omap/omap_vout.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap/omap_voutlib.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c4
-rw-r--r--drivers/platform/x86/apple-gmux.c55
-rw-r--r--drivers/staging/android/sync_debug.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c10
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c60
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c44
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c54
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c58
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c47
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c83
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c45
-rw-r--r--drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c46
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/core.c4
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/display.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dpi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dsi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss-of.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss.h11
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/dss_features.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi.h3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/manager.c3
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/output.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/overlay.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/rfbi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/venc.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/dss/video-pll.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c2
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb.h2
836 files changed, 42810 insertions, 20788 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index aef87fdbd187..44311296ec02 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
840 return false; 840 return false;
841} 841}
842 842
843void intel_gtt_insert_page(dma_addr_t addr,
844 unsigned int pg,
845 unsigned int flags)
846{
847 intel_private.driver->write_entry(addr, pg, flags);
848}
849EXPORT_SYMBOL(intel_gtt_insert_page);
850
843void intel_gtt_insert_sg_entries(struct sg_table *st, 851void intel_gtt_insert_sg_entries(struct sg_table *st,
844 unsigned int pg_start, 852 unsigned int pg_start,
845 unsigned int flags) 853 unsigned int flags)
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 9824bc4addf8..25bcfa0b474f 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -1,11 +1,20 @@
1menu "DMABUF options" 1menu "DMABUF options"
2 2
3config SYNC_FILE 3config SYNC_FILE
4 bool "sync_file support for fences" 4 bool "Explicit Synchronization Framework"
5 default n 5 default n
6 select ANON_INODES 6 select ANON_INODES
7 select DMA_SHARED_BUFFER 7 select DMA_SHARED_BUFFER
8 ---help--- 8 ---help---
9 This option enables the fence framework synchronization to export 9 The Sync File Framework adds explicit syncronization via
10 sync_files to userspace that can represent one or more fences. 10 userspace. It enables send/receive 'struct fence' objects to/from
11 userspace via Sync File fds for synchronization between drivers via
12 userspace components. It has been ported from Android.
13
14 The first and main user for this is graphics in which a fence is
15 associated with a buffer. When a job is submitted to the GPU a fence
16 is attached to the buffer and is transferred via userspace, using Sync
17 Files fds, to the DRM driver for example. More details at
18 Documentation/sync_file.txt.
19
11endmenu 20endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 4a424eca75ed..f353db213a81 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,2 +1,2 @@
1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o 1obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
2obj-$(CONFIG_SYNC_FILE) += sync_file.o 2obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6355ab38d630..ddaee60ae52a 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -334,6 +334,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
334 struct reservation_object *resv = exp_info->resv; 334 struct reservation_object *resv = exp_info->resv;
335 struct file *file; 335 struct file *file;
336 size_t alloc_size = sizeof(struct dma_buf); 336 size_t alloc_size = sizeof(struct dma_buf);
337 int ret;
337 338
338 if (!exp_info->resv) 339 if (!exp_info->resv)
339 alloc_size += sizeof(struct reservation_object); 340 alloc_size += sizeof(struct reservation_object);
@@ -357,8 +358,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
357 358
358 dmabuf = kzalloc(alloc_size, GFP_KERNEL); 359 dmabuf = kzalloc(alloc_size, GFP_KERNEL);
359 if (!dmabuf) { 360 if (!dmabuf) {
360 module_put(exp_info->owner); 361 ret = -ENOMEM;
361 return ERR_PTR(-ENOMEM); 362 goto err_module;
362 } 363 }
363 364
364 dmabuf->priv = exp_info->priv; 365 dmabuf->priv = exp_info->priv;
@@ -379,8 +380,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
379 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, 380 file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf,
380 exp_info->flags); 381 exp_info->flags);
381 if (IS_ERR(file)) { 382 if (IS_ERR(file)) {
382 kfree(dmabuf); 383 ret = PTR_ERR(file);
383 return ERR_CAST(file); 384 goto err_dmabuf;
384 } 385 }
385 386
386 file->f_mode |= FMODE_LSEEK; 387 file->f_mode |= FMODE_LSEEK;
@@ -394,6 +395,12 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
394 mutex_unlock(&db_list.lock); 395 mutex_unlock(&db_list.lock);
395 396
396 return dmabuf; 397 return dmabuf;
398
399err_dmabuf:
400 kfree(dmabuf);
401err_module:
402 module_put(exp_info->owner);
403 return ERR_PTR(ret);
397} 404}
398EXPORT_SYMBOL_GPL(dma_buf_export); 405EXPORT_SYMBOL_GPL(dma_buf_export);
399 406
@@ -824,7 +831,7 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
824EXPORT_SYMBOL_GPL(dma_buf_vunmap); 831EXPORT_SYMBOL_GPL(dma_buf_vunmap);
825 832
826#ifdef CONFIG_DEBUG_FS 833#ifdef CONFIG_DEBUG_FS
827static int dma_buf_describe(struct seq_file *s) 834static int dma_buf_debug_show(struct seq_file *s, void *unused)
828{ 835{
829 int ret; 836 int ret;
830 struct dma_buf *buf_obj; 837 struct dma_buf *buf_obj;
@@ -879,17 +886,9 @@ static int dma_buf_describe(struct seq_file *s)
879 return 0; 886 return 0;
880} 887}
881 888
882static int dma_buf_show(struct seq_file *s, void *unused)
883{
884 void (*func)(struct seq_file *) = s->private;
885
886 func(s);
887 return 0;
888}
889
890static int dma_buf_debug_open(struct inode *inode, struct file *file) 889static int dma_buf_debug_open(struct inode *inode, struct file *file)
891{ 890{
892 return single_open(file, dma_buf_show, inode->i_private); 891 return single_open(file, dma_buf_debug_show, NULL);
893} 892}
894 893
895static const struct file_operations dma_buf_debug_fops = { 894static const struct file_operations dma_buf_debug_fops = {
@@ -903,20 +902,23 @@ static struct dentry *dma_buf_debugfs_dir;
903 902
904static int dma_buf_init_debugfs(void) 903static int dma_buf_init_debugfs(void)
905{ 904{
905 struct dentry *d;
906 int err = 0; 906 int err = 0;
907 907
908 dma_buf_debugfs_dir = debugfs_create_dir("dma_buf", NULL); 908 d = debugfs_create_dir("dma_buf", NULL);
909 if (IS_ERR(d))
910 return PTR_ERR(d);
909 911
910 if (IS_ERR(dma_buf_debugfs_dir)) { 912 dma_buf_debugfs_dir = d;
911 err = PTR_ERR(dma_buf_debugfs_dir);
912 dma_buf_debugfs_dir = NULL;
913 return err;
914 }
915
916 err = dma_buf_debugfs_create_file("bufinfo", dma_buf_describe);
917 913
918 if (err) 914 d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
915 NULL, &dma_buf_debug_fops);
916 if (IS_ERR(d)) {
919 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n"); 917 pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
918 debugfs_remove_recursive(dma_buf_debugfs_dir);
919 dma_buf_debugfs_dir = NULL;
920 err = PTR_ERR(d);
921 }
920 922
921 return err; 923 return err;
922} 924}
@@ -926,17 +928,6 @@ static void dma_buf_uninit_debugfs(void)
926 if (dma_buf_debugfs_dir) 928 if (dma_buf_debugfs_dir)
927 debugfs_remove_recursive(dma_buf_debugfs_dir); 929 debugfs_remove_recursive(dma_buf_debugfs_dir);
928} 930}
929
930int dma_buf_debugfs_create_file(const char *name,
931 int (*write)(struct seq_file *))
932{
933 struct dentry *d;
934
935 d = debugfs_create_file(name, S_IRUGO, dma_buf_debugfs_dir,
936 write, &dma_buf_debug_fops);
937
938 return PTR_ERR_OR_ZERO(d);
939}
940#else 931#else
941static inline int dma_buf_init_debugfs(void) 932static inline int dma_buf_init_debugfs(void)
942{ 933{
diff --git a/drivers/dma-buf/fence-array.c b/drivers/dma-buf/fence-array.c
new file mode 100644
index 000000000000..a8731c853da6
--- /dev/null
+++ b/drivers/dma-buf/fence-array.c
@@ -0,0 +1,144 @@
1/*
2 * fence-array: aggregate fences to be waited together
3 *
4 * Copyright (C) 2016 Collabora Ltd
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
6 * Authors:
7 * Gustavo Padovan <gustavo@padovan.org>
8 * Christian König <christian.koenig@amd.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published by
12 * the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
18 */
19
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/fence-array.h>
23
24static void fence_array_cb_func(struct fence *f, struct fence_cb *cb);
25
26static const char *fence_array_get_driver_name(struct fence *fence)
27{
28 return "fence_array";
29}
30
31static const char *fence_array_get_timeline_name(struct fence *fence)
32{
33 return "unbound";
34}
35
36static void fence_array_cb_func(struct fence *f, struct fence_cb *cb)
37{
38 struct fence_array_cb *array_cb =
39 container_of(cb, struct fence_array_cb, cb);
40 struct fence_array *array = array_cb->array;
41
42 if (atomic_dec_and_test(&array->num_pending))
43 fence_signal(&array->base);
44 fence_put(&array->base);
45}
46
47static bool fence_array_enable_signaling(struct fence *fence)
48{
49 struct fence_array *array = to_fence_array(fence);
50 struct fence_array_cb *cb = (void *)(&array[1]);
51 unsigned i;
52
53 for (i = 0; i < array->num_fences; ++i) {
54 cb[i].array = array;
55 /*
56 * As we may report that the fence is signaled before all
57 * callbacks are complete, we need to take an additional
58 * reference count on the array so that we do not free it too
59 * early. The core fence handling will only hold the reference
60 * until we signal the array as complete (but that is now
61 * insufficient).
62 */
63 fence_get(&array->base);
64 if (fence_add_callback(array->fences[i], &cb[i].cb,
65 fence_array_cb_func)) {
66 fence_put(&array->base);
67 if (atomic_dec_and_test(&array->num_pending))
68 return false;
69 }
70 }
71
72 return true;
73}
74
75static bool fence_array_signaled(struct fence *fence)
76{
77 struct fence_array *array = to_fence_array(fence);
78
79 return atomic_read(&array->num_pending) <= 0;
80}
81
82static void fence_array_release(struct fence *fence)
83{
84 struct fence_array *array = to_fence_array(fence);
85 unsigned i;
86
87 for (i = 0; i < array->num_fences; ++i)
88 fence_put(array->fences[i]);
89
90 kfree(array->fences);
91 fence_free(fence);
92}
93
94const struct fence_ops fence_array_ops = {
95 .get_driver_name = fence_array_get_driver_name,
96 .get_timeline_name = fence_array_get_timeline_name,
97 .enable_signaling = fence_array_enable_signaling,
98 .signaled = fence_array_signaled,
99 .wait = fence_default_wait,
100 .release = fence_array_release,
101};
102
103/**
104 * fence_array_create - Create a custom fence array
105 * @num_fences: [in] number of fences to add in the array
106 * @fences: [in] array containing the fences
107 * @context: [in] fence context to use
108 * @seqno: [in] sequence number to use
109 * @signal_on_any [in] signal on any fence in the array
110 *
111 * Allocate a fence_array object and initialize the base fence with fence_init().
112 * In case of error it returns NULL.
113 *
114 * The caller should allocte the fences array with num_fences size
115 * and fill it with the fences it wants to add to the object. Ownership of this
116 * array is take and fence_put() is used on each fence on release.
117 *
118 * If @signal_on_any is true the fence array signals if any fence in the array
119 * signals, otherwise it signals when all fences in the array signal.
120 */
121struct fence_array *fence_array_create(int num_fences, struct fence **fences,
122 u64 context, unsigned seqno,
123 bool signal_on_any)
124{
125 struct fence_array *array;
126 size_t size = sizeof(*array);
127
128 /* Allocate the callback structures behind the array. */
129 size += num_fences * sizeof(struct fence_array_cb);
130 array = kzalloc(size, GFP_KERNEL);
131 if (!array)
132 return NULL;
133
134 spin_lock_init(&array->lock);
135 fence_init(&array->base, &fence_array_ops, &array->lock,
136 context, seqno);
137
138 array->num_fences = num_fences;
139 atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
140 array->fences = fences;
141
142 return array;
143}
144EXPORT_SYMBOL(fence_array_create);
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7b05dbe9b296..4d51f9e83fa8 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
35 * context or not. One device can have multiple separate contexts, 35 * context or not. One device can have multiple separate contexts,
36 * and they're used if some engine can run independently of another. 36 * and they're used if some engine can run independently of another.
37 */ 37 */
38static atomic_t fence_context_counter = ATOMIC_INIT(0); 38static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
39 39
40/** 40/**
41 * fence_context_alloc - allocate an array of fence contexts 41 * fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
44 * This function will return the first index of the number of fences allocated. 44 * This function will return the first index of the number of fences allocated.
45 * The fence context is used for setting fence->context to a unique number. 45 * The fence context is used for setting fence->context to a unique number.
46 */ 46 */
47unsigned fence_context_alloc(unsigned num) 47u64 fence_context_alloc(unsigned num)
48{ 48{
49 BUG_ON(!num); 49 BUG_ON(!num);
50 return atomic_add_return(num, &fence_context_counter) - num; 50 return atomic64_add_return(num, &fence_context_counter) - num;
51} 51}
52EXPORT_SYMBOL(fence_context_alloc); 52EXPORT_SYMBOL(fence_context_alloc);
53 53
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
513 */ 513 */
514void 514void
515fence_init(struct fence *fence, const struct fence_ops *ops, 515fence_init(struct fence *fence, const struct fence_ops *ops,
516 spinlock_t *lock, unsigned context, unsigned seqno) 516 spinlock_t *lock, u64 context, unsigned seqno)
517{ 517{
518 BUG_ON(!lock); 518 BUG_ON(!lock);
519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling || 519 BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
index f08cf2d8309e..9aaa608dfe01 100644
--- a/drivers/dma-buf/sync_file.c
+++ b/drivers/dma-buf/sync_file.c
@@ -82,7 +82,7 @@ struct sync_file *sync_file_create(struct fence *fence)
82 82
83 sync_file->num_fences = 1; 83 sync_file->num_fences = 1;
84 atomic_set(&sync_file->status, 1); 84 atomic_set(&sync_file->status, 1);
85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d", 85 snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%llu-%d",
86 fence->ops->get_driver_name(fence), 86 fence->ops->get_driver_name(fence),
87 fence->ops->get_timeline_name(fence), fence->context, 87 fence->ops->get_timeline_name(fence), fence->context,
88 fence->seqno); 88 fence->seqno);
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index be43afb08c69..e3dba6f44a79 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -8,7 +8,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \ 8 drm_lock.o drm_memory.o drm_drv.o drm_vm.o \
9 drm_scatter.o drm_pci.o \ 9 drm_scatter.o drm_pci.o \
10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \ 10 drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
11 drm_crtc.o drm_modes.o drm_edid.o \ 11 drm_crtc.o drm_fourcc.o drm_modes.o drm_edid.o \
12 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 12 drm_info.o drm_debugfs.o drm_encoder_slave.o \
13 drm_trace_points.o drm_global.o drm_prime.o \ 13 drm_trace_points.o drm_global.o drm_prime.o \
14 drm_rect.o drm_vma_manager.o drm_flip_work.o \ 14 drm_rect.o drm_vma_manager.o drm_flip_work.o \
@@ -23,7 +23,8 @@ drm-$(CONFIG_AGP) += drm_agpsupport.o
23 23
24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 24drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ 25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o 26 drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
27 drm_simple_kms_helper.o
27 28
28drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 29drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
29drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o 30drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index e055d5be1c3c..8ebc5f1eb4c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -85,8 +85,12 @@ extern int amdgpu_vm_debug;
85extern int amdgpu_sched_jobs; 85extern int amdgpu_sched_jobs;
86extern int amdgpu_sched_hw_submission; 86extern int amdgpu_sched_hw_submission;
87extern int amdgpu_powerplay; 87extern int amdgpu_powerplay;
88extern int amdgpu_powercontainment;
88extern unsigned amdgpu_pcie_gen_cap; 89extern unsigned amdgpu_pcie_gen_cap;
89extern unsigned amdgpu_pcie_lane_cap; 90extern unsigned amdgpu_pcie_lane_cap;
91extern unsigned amdgpu_cg_mask;
92extern unsigned amdgpu_pg_mask;
93extern char *amdgpu_disable_cu;
90 94
91#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 95#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
92#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 96#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
183int amdgpu_set_powergating_state(struct amdgpu_device *adev, 187int amdgpu_set_powergating_state(struct amdgpu_device *adev,
184 enum amd_ip_block_type block_type, 188 enum amd_ip_block_type block_type,
185 enum amd_powergating_state state); 189 enum amd_powergating_state state);
190int amdgpu_wait_for_idle(struct amdgpu_device *adev,
191 enum amd_ip_block_type block_type);
192bool amdgpu_is_idle(struct amdgpu_device *adev,
193 enum amd_ip_block_type block_type);
186 194
187struct amdgpu_ip_block_version { 195struct amdgpu_ip_block_version {
188 enum amd_ip_block_type type; 196 enum amd_ip_block_type type;
@@ -298,13 +306,16 @@ struct amdgpu_ring_funcs {
298 uint32_t oa_base, uint32_t oa_size); 306 uint32_t oa_base, uint32_t oa_size);
299 /* testing functions */ 307 /* testing functions */
300 int (*test_ring)(struct amdgpu_ring *ring); 308 int (*test_ring)(struct amdgpu_ring *ring);
301 int (*test_ib)(struct amdgpu_ring *ring); 309 int (*test_ib)(struct amdgpu_ring *ring, long timeout);
302 /* insert NOP packets */ 310 /* insert NOP packets */
303 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); 311 void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
304 /* pad the indirect buffer to the necessary number of dw */ 312 /* pad the indirect buffer to the necessary number of dw */
305 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 313 void (*pad_ib)(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
306 unsigned (*init_cond_exec)(struct amdgpu_ring *ring); 314 unsigned (*init_cond_exec)(struct amdgpu_ring *ring);
307 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset); 315 void (*patch_cond_exec)(struct amdgpu_ring *ring, unsigned offset);
316 /* note usage for clock and power gating */
317 void (*begin_use)(struct amdgpu_ring *ring);
318 void (*end_use)(struct amdgpu_ring *ring);
308}; 319};
309 320
310/* 321/*
@@ -594,11 +605,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
594 struct amdgpu_sync *sync, 605 struct amdgpu_sync *sync,
595 struct reservation_object *resv, 606 struct reservation_object *resv,
596 void *owner); 607 void *owner);
597bool amdgpu_sync_is_idle(struct amdgpu_sync *sync); 608struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
598int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, 609 struct amdgpu_ring *ring);
599 struct fence *fence);
600struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); 610struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
601int amdgpu_sync_wait(struct amdgpu_sync *sync);
602void amdgpu_sync_free(struct amdgpu_sync *sync); 611void amdgpu_sync_free(struct amdgpu_sync *sync);
603int amdgpu_sync_init(void); 612int amdgpu_sync_init(void);
604void amdgpu_sync_fini(void); 613void amdgpu_sync_fini(void);
@@ -754,12 +763,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
754int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, 763int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
755 struct amdgpu_job **job); 764 struct amdgpu_job **job);
756 765
766void amdgpu_job_free_resources(struct amdgpu_job *job);
757void amdgpu_job_free(struct amdgpu_job *job); 767void amdgpu_job_free(struct amdgpu_job *job);
758void amdgpu_job_free_func(struct kref *refcount);
759int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, 768int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
760 struct amd_sched_entity *entity, void *owner, 769 struct amd_sched_entity *entity, void *owner,
761 struct fence **f); 770 struct fence **f);
762void amdgpu_job_timeout_func(struct work_struct *work);
763 771
764struct amdgpu_ring { 772struct amdgpu_ring {
765 struct amdgpu_device *adev; 773 struct amdgpu_device *adev;
@@ -767,12 +775,9 @@ struct amdgpu_ring {
767 struct amdgpu_fence_driver fence_drv; 775 struct amdgpu_fence_driver fence_drv;
768 struct amd_gpu_scheduler sched; 776 struct amd_gpu_scheduler sched;
769 777
770 spinlock_t fence_lock;
771 struct amdgpu_bo *ring_obj; 778 struct amdgpu_bo *ring_obj;
772 volatile uint32_t *ring; 779 volatile uint32_t *ring;
773 unsigned rptr_offs; 780 unsigned rptr_offs;
774 u64 next_rptr_gpu_addr;
775 volatile u32 *next_rptr_cpu_addr;
776 unsigned wptr; 781 unsigned wptr;
777 unsigned wptr_old; 782 unsigned wptr_old;
778 unsigned ring_size; 783 unsigned ring_size;
@@ -791,14 +796,16 @@ struct amdgpu_ring {
791 u32 doorbell_index; 796 u32 doorbell_index;
792 bool use_doorbell; 797 bool use_doorbell;
793 unsigned wptr_offs; 798 unsigned wptr_offs;
794 unsigned next_rptr_offs;
795 unsigned fence_offs; 799 unsigned fence_offs;
796 uint64_t current_ctx; 800 uint64_t current_ctx;
797 enum amdgpu_ring_type type; 801 enum amdgpu_ring_type type;
798 char name[16]; 802 char name[16];
799 unsigned cond_exe_offs; 803 unsigned cond_exe_offs;
800 u64 cond_exe_gpu_addr; 804 u64 cond_exe_gpu_addr;
801 volatile u32 *cond_exe_cpu_addr; 805 volatile u32 *cond_exe_cpu_addr;
806#if defined(CONFIG_DEBUG_FS)
807 struct dentry *ent;
808#endif
802}; 809};
803 810
804/* 811/*
@@ -861,6 +868,7 @@ struct amdgpu_vm {
861 struct amdgpu_bo *page_directory; 868 struct amdgpu_bo *page_directory;
862 unsigned max_pde_used; 869 unsigned max_pde_used;
863 struct fence *page_directory_fence; 870 struct fence *page_directory_fence;
871 uint64_t last_eviction_counter;
864 872
865 /* array of page tables, one for each page directory entry */ 873 /* array of page tables, one for each page directory entry */
866 struct amdgpu_vm_pt *page_tables; 874 struct amdgpu_vm_pt *page_tables;
@@ -883,13 +891,14 @@ struct amdgpu_vm_id {
883 struct fence *first; 891 struct fence *first;
884 struct amdgpu_sync active; 892 struct amdgpu_sync active;
885 struct fence *last_flush; 893 struct fence *last_flush;
886 struct amdgpu_ring *last_user;
887 atomic64_t owner; 894 atomic64_t owner;
888 895
889 uint64_t pd_gpu_addr; 896 uint64_t pd_gpu_addr;
890 /* last flushed PD/PT update */ 897 /* last flushed PD/PT update */
891 struct fence *flushed_updates; 898 struct fence *flushed_updates;
892 899
900 uint32_t current_gpu_reset_count;
901
893 uint32_t gds_base; 902 uint32_t gds_base;
894 uint32_t gds_size; 903 uint32_t gds_size;
895 uint32_t gws_base; 904 uint32_t gws_base;
@@ -905,6 +914,10 @@ struct amdgpu_vm_manager {
905 struct list_head ids_lru; 914 struct list_head ids_lru;
906 struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; 915 struct amdgpu_vm_id ids[AMDGPU_NUM_VM];
907 916
917 /* Handling of VM fences */
918 u64 fence_context;
919 unsigned seqno[AMDGPU_MAX_RINGS];
920
908 uint32_t max_pfn; 921 uint32_t max_pfn;
909 /* vram base address for page table entry */ 922 /* vram base address for page table entry */
910 u64 vram_base_offset; 923 u64 vram_base_offset;
@@ -926,17 +939,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
926void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, 939void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
927 struct list_head *validated, 940 struct list_head *validated,
928 struct amdgpu_bo_list_entry *entry); 941 struct amdgpu_bo_list_entry *entry);
929void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); 942void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
943 struct list_head *duplicates);
930void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, 944void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
931 struct amdgpu_vm *vm); 945 struct amdgpu_vm *vm);
932int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 946int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
933 struct amdgpu_sync *sync, struct fence *fence, 947 struct amdgpu_sync *sync, struct fence *fence,
934 unsigned *vm_id, uint64_t *vm_pd_addr); 948 struct amdgpu_job *job);
935int amdgpu_vm_flush(struct amdgpu_ring *ring, 949int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job);
936 unsigned vm_id, uint64_t pd_addr,
937 uint32_t gds_base, uint32_t gds_size,
938 uint32_t gws_base, uint32_t gws_size,
939 uint32_t oa_base, uint32_t oa_size);
940void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); 950void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id);
941uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); 951uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
942int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, 952int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
@@ -1142,6 +1152,12 @@ struct amdgpu_cu_info {
1142 uint32_t bitmap[4][4]; 1152 uint32_t bitmap[4][4];
1143}; 1153};
1144 1154
1155struct amdgpu_gfx_funcs {
1156 /* get the gpu clock counter */
1157 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1158 void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance);
1159};
1160
1145struct amdgpu_gfx { 1161struct amdgpu_gfx {
1146 struct mutex gpu_clock_mutex; 1162 struct mutex gpu_clock_mutex;
1147 struct amdgpu_gca_config config; 1163 struct amdgpu_gca_config config;
@@ -1178,6 +1194,7 @@ struct amdgpu_gfx {
1178 /* ce ram size*/ 1194 /* ce ram size*/
1179 unsigned ce_ram_size; 1195 unsigned ce_ram_size;
1180 struct amdgpu_cu_info cu_info; 1196 struct amdgpu_cu_info cu_info;
1197 const struct amdgpu_gfx_funcs *funcs;
1181}; 1198};
1182 1199
1183int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, 1200int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
@@ -1195,10 +1212,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
1195void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); 1212void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
1196void amdgpu_ring_commit(struct amdgpu_ring *ring); 1213void amdgpu_ring_commit(struct amdgpu_ring *ring);
1197void amdgpu_ring_undo(struct amdgpu_ring *ring); 1214void amdgpu_ring_undo(struct amdgpu_ring *ring);
1198unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
1199 uint32_t **data);
1200int amdgpu_ring_restore(struct amdgpu_ring *ring,
1201 unsigned size, uint32_t *data);
1202int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, 1215int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
1203 unsigned ring_size, u32 nop, u32 align_mask, 1216 unsigned ring_size, u32 nop, u32 align_mask,
1204 struct amdgpu_irq_src *irq_src, unsigned irq_type, 1217 struct amdgpu_irq_src *irq_src, unsigned irq_type,
@@ -1250,6 +1263,7 @@ struct amdgpu_job {
1250 uint32_t num_ibs; 1263 uint32_t num_ibs;
1251 void *owner; 1264 void *owner;
1252 uint64_t ctx; 1265 uint64_t ctx;
1266 bool vm_needs_flush;
1253 unsigned vm_id; 1267 unsigned vm_id;
1254 uint64_t vm_pd_addr; 1268 uint64_t vm_pd_addr;
1255 uint32_t gds_base, gds_size; 1269 uint32_t gds_base, gds_size;
@@ -1257,8 +1271,7 @@ struct amdgpu_job {
1257 uint32_t oa_base, oa_size; 1271 uint32_t oa_base, oa_size;
1258 1272
1259 /* user fence handling */ 1273 /* user fence handling */
1260 struct amdgpu_bo *uf_bo; 1274 uint64_t uf_addr;
1261 uint32_t uf_offset;
1262 uint64_t uf_sequence; 1275 uint64_t uf_sequence;
1263 1276
1264}; 1277};
@@ -1560,6 +1573,12 @@ struct amdgpu_dpm_funcs {
1560 u32 (*get_fan_control_mode)(struct amdgpu_device *adev); 1573 u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
1561 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); 1574 int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
1562 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); 1575 int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
1576 int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
1577 int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
1578 int (*get_sclk_od)(struct amdgpu_device *adev);
1579 int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
1580 int (*get_mclk_od)(struct amdgpu_device *adev);
1581 int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
1563}; 1582};
1564 1583
1565struct amdgpu_dpm { 1584struct amdgpu_dpm {
@@ -1662,6 +1681,7 @@ struct amdgpu_uvd {
1662 struct amdgpu_ring ring; 1681 struct amdgpu_ring ring;
1663 struct amdgpu_irq_src irq; 1682 struct amdgpu_irq_src irq;
1664 bool address_64_bit; 1683 bool address_64_bit;
1684 bool use_ctx_buf;
1665 struct amd_sched_entity entity; 1685 struct amd_sched_entity entity;
1666}; 1686};
1667 1687
@@ -1683,6 +1703,7 @@ struct amdgpu_vce {
1683 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES]; 1703 struct drm_file *filp[AMDGPU_MAX_VCE_HANDLES];
1684 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES]; 1704 uint32_t img_size[AMDGPU_MAX_VCE_HANDLES];
1685 struct delayed_work idle_work; 1705 struct delayed_work idle_work;
1706 struct mutex idle_mutex;
1686 const struct firmware *fw; /* VCE firmware */ 1707 const struct firmware *fw; /* VCE firmware */
1687 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1708 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1688 struct amdgpu_irq_src irq; 1709 struct amdgpu_irq_src irq;
@@ -1767,6 +1788,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor);
1767void amdgpu_debugfs_cleanup(struct drm_minor *minor); 1788void amdgpu_debugfs_cleanup(struct drm_minor *minor);
1768#endif 1789#endif
1769 1790
1791int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
1792
1770/* 1793/*
1771 * amdgpu smumgr functions 1794 * amdgpu smumgr functions
1772 */ 1795 */
@@ -1811,12 +1834,8 @@ struct amdgpu_asic_funcs {
1811 u32 sh_num, u32 reg_offset, u32 *value); 1834 u32 sh_num, u32 reg_offset, u32 *value);
1812 void (*set_vga_state)(struct amdgpu_device *adev, bool state); 1835 void (*set_vga_state)(struct amdgpu_device *adev, bool state);
1813 int (*reset)(struct amdgpu_device *adev); 1836 int (*reset)(struct amdgpu_device *adev);
1814 /* wait for mc_idle */
1815 int (*wait_for_mc_idle)(struct amdgpu_device *adev);
1816 /* get the reference clock */ 1837 /* get the reference clock */
1817 u32 (*get_xclk)(struct amdgpu_device *adev); 1838 u32 (*get_xclk)(struct amdgpu_device *adev);
1818 /* get the gpu clock counter */
1819 uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev);
1820 /* MM block clocks */ 1839 /* MM block clocks */
1821 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); 1840 int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk);
1822 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); 1841 int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk);
@@ -2003,6 +2022,10 @@ struct amdgpu_device {
2003 spinlock_t didt_idx_lock; 2022 spinlock_t didt_idx_lock;
2004 amdgpu_rreg_t didt_rreg; 2023 amdgpu_rreg_t didt_rreg;
2005 amdgpu_wreg_t didt_wreg; 2024 amdgpu_wreg_t didt_wreg;
2025 /* protects concurrent gc_cac register access */
2026 spinlock_t gc_cac_idx_lock;
2027 amdgpu_rreg_t gc_cac_rreg;
2028 amdgpu_wreg_t gc_cac_wreg;
2006 /* protects concurrent ENDPOINT (audio) register access */ 2029 /* protects concurrent ENDPOINT (audio) register access */
2007 spinlock_t audio_endpt_idx_lock; 2030 spinlock_t audio_endpt_idx_lock;
2008 amdgpu_block_rreg_t audio_endpt_rreg; 2031 amdgpu_block_rreg_t audio_endpt_rreg;
@@ -2028,6 +2051,7 @@ struct amdgpu_device {
2028 atomic64_t vram_vis_usage; 2051 atomic64_t vram_vis_usage;
2029 atomic64_t gtt_usage; 2052 atomic64_t gtt_usage;
2030 atomic64_t num_bytes_moved; 2053 atomic64_t num_bytes_moved;
2054 atomic64_t num_evictions;
2031 atomic_t gpu_reset_counter; 2055 atomic_t gpu_reset_counter;
2032 2056
2033 /* display */ 2057 /* display */
@@ -2038,7 +2062,7 @@ struct amdgpu_device {
2038 struct amdgpu_irq_src hpd_irq; 2062 struct amdgpu_irq_src hpd_irq;
2039 2063
2040 /* rings */ 2064 /* rings */
2041 unsigned fence_context; 2065 u64 fence_context;
2042 unsigned num_rings; 2066 unsigned num_rings;
2043 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 2067 struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
2044 bool ib_pool_ready; 2068 bool ib_pool_ready;
@@ -2131,6 +2155,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v);
2131#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) 2155#define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v))
2132#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) 2156#define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg))
2133#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) 2157#define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v))
2158#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg))
2159#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v))
2134#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) 2160#define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg))
2135#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) 2161#define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v))
2136#define WREG32_P(reg, val, mask) \ 2162#define WREG32_P(reg, val, mask) \
@@ -2206,12 +2232,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2206 */ 2232 */
2207#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) 2233#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state))
2208#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) 2234#define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev))
2209#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev))
2210#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) 2235#define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev))
2211#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) 2236#define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d))
2212#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) 2237#define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec))
2213#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) 2238#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev)))
2214#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev))
2215#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) 2239#define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev))
2216#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) 2240#define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l))
2217#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) 2241#define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v)))
@@ -2222,7 +2246,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2222#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) 2246#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
2223#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) 2247#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
2224#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) 2248#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
2225#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) 2249#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
2226#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) 2250#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
2227#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) 2251#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
2228#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) 2252#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))
@@ -2264,6 +2288,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2264#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) 2288#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
2265#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) 2289#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
2266#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) 2290#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
2291#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev))
2292#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance))
2267 2293
2268#define amdgpu_dpm_get_temperature(adev) \ 2294#define amdgpu_dpm_get_temperature(adev) \
2269 ((adev)->pp_enabled ? \ 2295 ((adev)->pp_enabled ? \
@@ -2342,6 +2368,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
2342#define amdgpu_dpm_force_clock_level(adev, type, level) \ 2368#define amdgpu_dpm_force_clock_level(adev, type, level) \
2343 (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) 2369 (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
2344 2370
2371#define amdgpu_dpm_get_sclk_od(adev) \
2372 (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
2373
2374#define amdgpu_dpm_set_sclk_od(adev, value) \
2375 (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
2376
2377#define amdgpu_dpm_get_mclk_od(adev) \
2378 ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
2379
2380#define amdgpu_dpm_set_mclk_od(adev, value) \
2381 ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
2382
2345#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ 2383#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
2346 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) 2384 (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
2347 2385
@@ -2383,9 +2421,13 @@ bool amdgpu_device_is_px(struct drm_device *dev);
2383#if defined(CONFIG_VGA_SWITCHEROO) 2421#if defined(CONFIG_VGA_SWITCHEROO)
2384void amdgpu_register_atpx_handler(void); 2422void amdgpu_register_atpx_handler(void);
2385void amdgpu_unregister_atpx_handler(void); 2423void amdgpu_unregister_atpx_handler(void);
2424bool amdgpu_has_atpx_dgpu_power_cntl(void);
2425bool amdgpu_is_atpx_hybrid(void);
2386#else 2426#else
2387static inline void amdgpu_register_atpx_handler(void) {} 2427static inline void amdgpu_register_atpx_handler(void) {}
2388static inline void amdgpu_unregister_atpx_handler(void) {} 2428static inline void amdgpu_unregister_atpx_handler(void) {}
2429static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
2430static inline bool amdgpu_is_atpx_hybrid(void) { return false; }
2389#endif 2431#endif
2390 2432
2391/* 2433/*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 32809f749903..d080d0807a5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
240{ 240{
241 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; 241 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
242 242
243 if (rdev->asic_funcs->get_gpu_clock_counter) 243 if (rdev->gfx.funcs->get_gpu_clock_counter)
244 return rdev->asic_funcs->get_gpu_clock_counter(rdev); 244 return rdev->gfx.funcs->get_gpu_clock_counter(rdev);
245 return 0; 245 return 0;
246} 246}
247 247
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 9df1bcb35bf0..983175363b06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -551,28 +551,19 @@ int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
551 le16_to_cpu(firmware_info->info.usReferenceClock); 551 le16_to_cpu(firmware_info->info.usReferenceClock);
552 ppll->reference_div = 0; 552 ppll->reference_div = 0;
553 553
554 if (crev < 2) 554 ppll->pll_out_min =
555 ppll->pll_out_min = 555 le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
556 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
557 else
558 ppll->pll_out_min =
559 le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
560 ppll->pll_out_max = 556 ppll->pll_out_max =
561 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); 557 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
562 558
563 if (crev >= 4) { 559 ppll->lcd_pll_out_min =
564 ppll->lcd_pll_out_min = 560 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
565 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; 561 if (ppll->lcd_pll_out_min == 0)
566 if (ppll->lcd_pll_out_min == 0)
567 ppll->lcd_pll_out_min = ppll->pll_out_min;
568 ppll->lcd_pll_out_max =
569 le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
570 if (ppll->lcd_pll_out_max == 0)
571 ppll->lcd_pll_out_max = ppll->pll_out_max;
572 } else {
573 ppll->lcd_pll_out_min = ppll->pll_out_min; 562 ppll->lcd_pll_out_min = ppll->pll_out_min;
563 ppll->lcd_pll_out_max =
564 le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
565 if (ppll->lcd_pll_out_max == 0)
574 ppll->lcd_pll_out_max = ppll->pll_out_max; 566 ppll->lcd_pll_out_max = ppll->pll_out_max;
575 }
576 567
577 if (ppll->pll_out_min == 0) 568 if (ppll->pll_out_min == 0)
578 ppll->pll_out_min = 64800; 569 ppll->pll_out_min = 64800;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 35a1248aaa77..49de92600074 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/delay.h>
13 14
14#include "amd_acpi.h" 15#include "amd_acpi.h"
15 16
@@ -27,6 +28,7 @@ struct amdgpu_atpx_functions {
27struct amdgpu_atpx { 28struct amdgpu_atpx {
28 acpi_handle handle; 29 acpi_handle handle;
29 struct amdgpu_atpx_functions functions; 30 struct amdgpu_atpx_functions functions;
31 bool is_hybrid;
30}; 32};
31 33
32static struct amdgpu_atpx_priv { 34static struct amdgpu_atpx_priv {
@@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) {
63 return amdgpu_atpx_priv.atpx_detected; 65 return amdgpu_atpx_priv.atpx_detected;
64} 66}
65 67
68bool amdgpu_has_atpx_dgpu_power_cntl(void) {
69 return amdgpu_atpx_priv.atpx.functions.power_cntl;
70}
71
72bool amdgpu_is_atpx_hybrid(void) {
73 return amdgpu_atpx_priv.atpx.is_hybrid;
74}
75
66/** 76/**
67 * amdgpu_atpx_call - call an ATPX method 77 * amdgpu_atpx_call - call an ATPX method
68 * 78 *
@@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
142 */ 152 */
143static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) 153static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
144{ 154{
145 /* make sure required functions are enabled */ 155 u32 valid_bits = 0;
146 /* dGPU power control is required */
147 if (atpx->functions.power_cntl == false) {
148 printk("ATPX dGPU power cntl not present, forcing\n");
149 atpx->functions.power_cntl = true;
150 }
151 156
152 if (atpx->functions.px_params) { 157 if (atpx->functions.px_params) {
153 union acpi_object *info; 158 union acpi_object *info;
154 struct atpx_px_params output; 159 struct atpx_px_params output;
155 size_t size; 160 size_t size;
156 u32 valid_bits;
157 161
158 info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); 162 info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
159 if (!info) 163 if (!info)
@@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
172 memcpy(&output, info->buffer.pointer, size); 176 memcpy(&output, info->buffer.pointer, size);
173 177
174 valid_bits = output.flags & output.valid_flags; 178 valid_bits = output.flags & output.valid_flags;
175 /* if separate mux flag is set, mux controls are required */
176 if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
177 atpx->functions.i2c_mux_cntl = true;
178 atpx->functions.disp_mux_cntl = true;
179 }
180 /* if any outputs are muxed, mux controls are required */
181 if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
182 ATPX_TV_SIGNAL_MUXED |
183 ATPX_DFP_SIGNAL_MUXED))
184 atpx->functions.disp_mux_cntl = true;
185 179
186 kfree(info); 180 kfree(info);
187 } 181 }
182
183 /* if separate mux flag is set, mux controls are required */
184 if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
185 atpx->functions.i2c_mux_cntl = true;
186 atpx->functions.disp_mux_cntl = true;
187 }
188 /* if any outputs are muxed, mux controls are required */
189 if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
190 ATPX_TV_SIGNAL_MUXED |
191 ATPX_DFP_SIGNAL_MUXED))
192 atpx->functions.disp_mux_cntl = true;
193
194
195 /* some bioses set these bits rather than flagging power_cntl as supported */
196 if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
197 ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
198 atpx->functions.power_cntl = true;
199
200 atpx->is_hybrid = false;
201 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
202 printk("ATPX Hybrid Graphics\n");
203#if 1
204 /* This is a temporary hack until the D3 cold support
205 * makes it upstream. The ATPX power_control method seems
206 * to still work on even if the system should be using
207 * the new standardized hybrid D3 cold ACPI interface.
208 */
209 atpx->functions.power_cntl = true;
210#else
211 atpx->functions.power_cntl = false;
212#endif
213 atpx->is_hybrid = true;
214 }
215
188 return 0; 216 return 0;
189} 217}
190 218
@@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state)
259 if (!info) 287 if (!info)
260 return -EIO; 288 return -EIO;
261 kfree(info); 289 kfree(info);
290
291 /* 200ms delay is required after off */
292 if (state == 0)
293 msleep(200);
262 } 294 }
263 return 0; 295 return 0;
264} 296}
@@ -507,7 +539,6 @@ static int amdgpu_atpx_get_client_id(struct pci_dev *pdev)
507static const struct vga_switcheroo_handler amdgpu_atpx_handler = { 539static const struct vga_switcheroo_handler amdgpu_atpx_handler = {
508 .switchto = amdgpu_atpx_switchto, 540 .switchto = amdgpu_atpx_switchto,
509 .power_state = amdgpu_atpx_power_state, 541 .power_state = amdgpu_atpx_power_state,
510 .init = amdgpu_atpx_init,
511 .get_client_id = amdgpu_atpx_get_client_id, 542 .get_client_id = amdgpu_atpx_get_client_id,
512}; 543};
513 544
@@ -542,6 +573,7 @@ static bool amdgpu_atpx_detect(void)
542 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", 573 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
543 acpi_method_name); 574 acpi_method_name);
544 amdgpu_atpx_priv.atpx_detected = true; 575 amdgpu_atpx_priv.atpx_detected = true;
576 amdgpu_atpx_init();
545 return true; 577 return true;
546 } 578 }
547 return false; 579 return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
index 99ca75baa47d..2b6afe123f3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
@@ -352,22 +352,22 @@ bool amdgpu_get_bios(struct amdgpu_device *adev)
352 uint16_t tmp, bios_header_start; 352 uint16_t tmp, bios_header_start;
353 353
354 r = amdgpu_atrm_get_bios(adev); 354 r = amdgpu_atrm_get_bios(adev);
355 if (r == false) 355 if (!r)
356 r = amdgpu_acpi_vfct_bios(adev); 356 r = amdgpu_acpi_vfct_bios(adev);
357 if (r == false) 357 if (!r)
358 r = igp_read_bios_from_vram(adev); 358 r = igp_read_bios_from_vram(adev);
359 if (r == false) 359 if (!r)
360 r = amdgpu_read_bios(adev); 360 r = amdgpu_read_bios(adev);
361 if (r == false) { 361 if (!r) {
362 r = amdgpu_read_bios_from_rom(adev); 362 r = amdgpu_read_bios_from_rom(adev);
363 } 363 }
364 if (r == false) { 364 if (!r) {
365 r = amdgpu_read_disabled_bios(adev); 365 r = amdgpu_read_disabled_bios(adev);
366 } 366 }
367 if (r == false) { 367 if (!r) {
368 r = amdgpu_read_platform_bios(adev); 368 r = amdgpu_read_platform_bios(adev);
369 } 369 }
370 if (r == false || adev->bios == NULL) { 370 if (!r || adev->bios == NULL) {
371 DRM_ERROR("Unable to locate a BIOS ROM\n"); 371 DRM_ERROR("Unable to locate a BIOS ROM\n");
372 adev->bios = NULL; 372 adev->bios = NULL;
373 return false; 373 return false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 823bf5e0b0c8..651115dcce12 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
94 unsigned last_entry = 0, first_userptr = num_entries; 94 unsigned last_entry = 0, first_userptr = num_entries;
95 unsigned i; 95 unsigned i;
96 int r; 96 int r;
97 unsigned long total_size = 0;
97 98
98 array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); 99 array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
99 if (!array) 100 if (!array)
@@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
140 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) 141 if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
141 oa_obj = entry->robj; 142 oa_obj = entry->robj;
142 143
144 total_size += amdgpu_bo_size(entry->robj);
143 trace_amdgpu_bo_list_set(list, entry->robj); 145 trace_amdgpu_bo_list_set(list, entry->robj);
144 } 146 }
145 147
@@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
155 list->array = array; 157 list->array = array;
156 list->num_entries = num_entries; 158 list->num_entries = num_entries;
157 159
160 trace_amdgpu_cs_bo_status(list->num_entries, total_size);
158 return 0; 161 return 0;
159 162
160error_free: 163error_free:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index cf6f49fc1c75..bc0440f7a31d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device,
312 return RREG32_UVD_CTX(index); 312 return RREG32_UVD_CTX(index);
313 case CGS_IND_REG__DIDT: 313 case CGS_IND_REG__DIDT:
314 return RREG32_DIDT(index); 314 return RREG32_DIDT(index);
315 case CGS_IND_REG_GC_CAC:
316 return RREG32_GC_CAC(index);
315 case CGS_IND_REG__AUDIO_ENDPT: 317 case CGS_IND_REG__AUDIO_ENDPT:
316 DRM_ERROR("audio endpt register access not implemented.\n"); 318 DRM_ERROR("audio endpt register access not implemented.\n");
317 return 0; 319 return 0;
@@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device,
336 return WREG32_UVD_CTX(index, value); 338 return WREG32_UVD_CTX(index, value);
337 case CGS_IND_REG__DIDT: 339 case CGS_IND_REG__DIDT:
338 return WREG32_DIDT(index, value); 340 return WREG32_DIDT(index, value);
341 case CGS_IND_REG_GC_CAC:
342 return WREG32_GC_CAC(index, value);
339 case CGS_IND_REG__AUDIO_ENDPT: 343 case CGS_IND_REG__AUDIO_ENDPT:
340 DRM_ERROR("audio endpt register access not implemented.\n"); 344 DRM_ERROR("audio endpt register access not implemented.\n");
341 return; 345 return;
@@ -748,6 +752,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
748 752
749 if (!adev->pm.fw) { 753 if (!adev->pm.fw) {
750 switch (adev->asic_type) { 754 switch (adev->asic_type) {
755 case CHIP_TOPAZ:
756 strcpy(fw_name, "amdgpu/topaz_smc.bin");
757 break;
751 case CHIP_TONGA: 758 case CHIP_TONGA:
752 strcpy(fw_name, "amdgpu/tonga_smc.bin"); 759 strcpy(fw_name, "amdgpu/tonga_smc.bin");
753 break; 760 break;
@@ -787,6 +794,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
787 } 794 }
788 795
789 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; 796 hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data;
797 amdgpu_ucode_print_smc_hdr(&hdr->header);
790 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); 798 adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version);
791 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); 799 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes);
792 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); 800 ucode_start_address = le32_to_cpu(hdr->ucode_start_addr);
@@ -795,13 +803,14 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
795 803
796 info->version = adev->pm.fw_version; 804 info->version = adev->pm.fw_version;
797 info->image_size = ucode_size; 805 info->image_size = ucode_size;
806 info->ucode_start_address = ucode_start_address;
798 info->kptr = (void *)src; 807 info->kptr = (void *)src;
799 } 808 }
800 return 0; 809 return 0;
801} 810}
802 811
803static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, 812static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
804 struct cgs_system_info *sys_info) 813 struct cgs_system_info *sys_info)
805{ 814{
806 CGS_FUNC_ADEV; 815 CGS_FUNC_ADEV;
807 816
@@ -821,6 +830,12 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
821 case CGS_SYSTEM_INFO_PCIE_MLW: 830 case CGS_SYSTEM_INFO_PCIE_MLW:
822 sys_info->value = adev->pm.pcie_mlw_mask; 831 sys_info->value = adev->pm.pcie_mlw_mask;
823 break; 832 break;
833 case CGS_SYSTEM_INFO_PCIE_DEV:
834 sys_info->value = adev->pdev->device;
835 break;
836 case CGS_SYSTEM_INFO_PCIE_REV:
837 sys_info->value = adev->pdev->revision;
838 break;
824 case CGS_SYSTEM_INFO_CG_FLAGS: 839 case CGS_SYSTEM_INFO_CG_FLAGS:
825 sys_info->value = adev->cg_flags; 840 sys_info->value = adev->cg_flags;
826 break; 841 break;
@@ -830,6 +845,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
830 case CGS_SYSTEM_INFO_GFX_CU_INFO: 845 case CGS_SYSTEM_INFO_GFX_CU_INFO:
831 sys_info->value = adev->gfx.cu_info.number; 846 sys_info->value = adev->gfx.cu_info.number;
832 break; 847 break;
848 case CGS_SYSTEM_INFO_GFX_SE_INFO:
849 sys_info->value = adev->gfx.config.max_shader_engines;
850 break;
833 default: 851 default:
834 return -ENODEV; 852 return -ENODEV;
835 } 853 }
@@ -903,14 +921,12 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
903 acpi_handle handle; 921 acpi_handle handle;
904 struct acpi_object_list input; 922 struct acpi_object_list input;
905 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; 923 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
906 union acpi_object *params = NULL; 924 union acpi_object *params, *obj;
907 union acpi_object *obj = NULL;
908 uint8_t name[5] = {'\0'}; 925 uint8_t name[5] = {'\0'};
909 struct cgs_acpi_method_argument *argument = NULL; 926 struct cgs_acpi_method_argument *argument;
910 uint32_t i, count; 927 uint32_t i, count;
911 acpi_status status; 928 acpi_status status;
912 int result = 0; 929 int result;
913 uint32_t func_no = 0xFFFFFFFF;
914 930
915 handle = ACPI_HANDLE(&adev->pdev->dev); 931 handle = ACPI_HANDLE(&adev->pdev->dev);
916 if (!handle) 932 if (!handle)
@@ -927,7 +943,6 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
927 if (info->pinput_argument == NULL) 943 if (info->pinput_argument == NULL)
928 return -EINVAL; 944 return -EINVAL;
929 argument = info->pinput_argument; 945 argument = info->pinput_argument;
930 func_no = argument->value;
931 for (i = 0; i < info->input_count; i++) { 946 for (i = 0; i < info->input_count; i++) {
932 if (((argument->type == ACPI_TYPE_STRING) || 947 if (((argument->type == ACPI_TYPE_STRING) ||
933 (argument->type == ACPI_TYPE_BUFFER)) && 948 (argument->type == ACPI_TYPE_BUFFER)) &&
@@ -972,11 +987,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
972 params->integer.value = argument->value; 987 params->integer.value = argument->value;
973 break; 988 break;
974 case ACPI_TYPE_STRING: 989 case ACPI_TYPE_STRING:
975 params->string.length = argument->method_length; 990 params->string.length = argument->data_length;
976 params->string.pointer = argument->pointer; 991 params->string.pointer = argument->pointer;
977 break; 992 break;
978 case ACPI_TYPE_BUFFER: 993 case ACPI_TYPE_BUFFER:
979 params->buffer.length = argument->method_length; 994 params->buffer.length = argument->data_length;
980 params->buffer.pointer = argument->pointer; 995 params->buffer.pointer = argument->pointer;
981 break; 996 break;
982 default: 997 default:
@@ -996,7 +1011,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
996 1011
997 if (ACPI_FAILURE(status)) { 1012 if (ACPI_FAILURE(status)) {
998 result = -EIO; 1013 result = -EIO;
999 goto error; 1014 goto free_input;
1000 } 1015 }
1001 1016
1002 /* return the output info */ 1017 /* return the output info */
@@ -1006,7 +1021,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1006 if ((obj->type != ACPI_TYPE_PACKAGE) || 1021 if ((obj->type != ACPI_TYPE_PACKAGE) ||
1007 (obj->package.count != count)) { 1022 (obj->package.count != count)) {
1008 result = -EIO; 1023 result = -EIO;
1009 goto error; 1024 goto free_obj;
1010 } 1025 }
1011 params = obj->package.elements; 1026 params = obj->package.elements;
1012 } else 1027 } else
@@ -1014,13 +1029,13 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1014 1029
1015 if (params == NULL) { 1030 if (params == NULL) {
1016 result = -EIO; 1031 result = -EIO;
1017 goto error; 1032 goto free_obj;
1018 } 1033 }
1019 1034
1020 for (i = 0; i < count; i++) { 1035 for (i = 0; i < count; i++) {
1021 if (argument->type != params->type) { 1036 if (argument->type != params->type) {
1022 result = -EIO; 1037 result = -EIO;
1023 goto error; 1038 goto free_obj;
1024 } 1039 }
1025 switch (params->type) { 1040 switch (params->type) {
1026 case ACPI_TYPE_INTEGER: 1041 case ACPI_TYPE_INTEGER:
@@ -1030,7 +1045,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1030 if ((params->string.length != argument->data_length) || 1045 if ((params->string.length != argument->data_length) ||
1031 (params->string.pointer == NULL)) { 1046 (params->string.pointer == NULL)) {
1032 result = -EIO; 1047 result = -EIO;
1033 goto error; 1048 goto free_obj;
1034 } 1049 }
1035 strncpy(argument->pointer, 1050 strncpy(argument->pointer,
1036 params->string.pointer, 1051 params->string.pointer,
@@ -1039,7 +1054,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1039 case ACPI_TYPE_BUFFER: 1054 case ACPI_TYPE_BUFFER:
1040 if (params->buffer.pointer == NULL) { 1055 if (params->buffer.pointer == NULL) {
1041 result = -EIO; 1056 result = -EIO;
1042 goto error; 1057 goto free_obj;
1043 } 1058 }
1044 memcpy(argument->pointer, 1059 memcpy(argument->pointer,
1045 params->buffer.pointer, 1060 params->buffer.pointer,
@@ -1052,9 +1067,10 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1052 params++; 1067 params++;
1053 } 1068 }
1054 1069
1055error: 1070 result = 0;
1056 if (obj != NULL) 1071free_obj:
1057 kfree(obj); 1072 kfree(obj);
1073free_input:
1058 kfree((void *)input.pointer); 1074 kfree((void *)input.pointer);
1059 return result; 1075 return result;
1060} 1076}
@@ -1066,7 +1082,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device,
1066} 1082}
1067#endif 1083#endif
1068 1084
1069int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, 1085static int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1070 uint32_t acpi_method, 1086 uint32_t acpi_method,
1071 uint32_t acpi_function, 1087 uint32_t acpi_function,
1072 void *pinput, void *poutput, 1088 void *pinput, void *poutput,
@@ -1079,17 +1095,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device,
1079 struct cgs_acpi_method_info info = {0}; 1095 struct cgs_acpi_method_info info = {0};
1080 1096
1081 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; 1097 acpi_input[0].type = CGS_ACPI_TYPE_INTEGER;
1082 acpi_input[0].method_length = sizeof(uint32_t);
1083 acpi_input[0].data_length = sizeof(uint32_t); 1098 acpi_input[0].data_length = sizeof(uint32_t);
1084 acpi_input[0].value = acpi_function; 1099 acpi_input[0].value = acpi_function;
1085 1100
1086 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; 1101 acpi_input[1].type = CGS_ACPI_TYPE_BUFFER;
1087 acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE;
1088 acpi_input[1].data_length = input_size; 1102 acpi_input[1].data_length = input_size;
1089 acpi_input[1].pointer = pinput; 1103 acpi_input[1].pointer = pinput;
1090 1104
1091 acpi_output.type = CGS_ACPI_TYPE_BUFFER; 1105 acpi_output.type = CGS_ACPI_TYPE_BUFFER;
1092 acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE;
1093 acpi_output.data_length = output_size; 1106 acpi_output.data_length = output_size;
1094 acpi_output.pointer = poutput; 1107 acpi_output.pointer = poutput;
1095 1108
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index cb07da41152b..ff0b55a65ca3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
1690 DRM_MODE_SCALE_NONE); 1690 DRM_MODE_SCALE_NONE);
1691 /* no HPD on analog connectors */ 1691 /* no HPD on analog connectors */
1692 amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; 1692 amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE;
1693 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1694 connector->interlace_allowed = true; 1693 connector->interlace_allowed = true;
1695 connector->doublescan_allowed = true; 1694 connector->doublescan_allowed = true;
1696 break; 1695 break;
@@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev,
1893 } 1892 }
1894 1893
1895 if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { 1894 if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) {
1896 if (i2c_bus->valid) 1895 if (i2c_bus->valid) {
1897 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1896 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
1897 DRM_CONNECTOR_POLL_DISCONNECT;
1898 }
1898 } else 1899 } else
1899 connector->polled = DRM_CONNECTOR_POLL_HPD; 1900 connector->polled = DRM_CONNECTOR_POLL_HPD;
1900 1901
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9bc8f1d99733..0307ff5887c5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
216 if (ret) 216 if (ret)
217 goto free_all_kdata; 217 goto free_all_kdata;
218 218
219 if (p->uf_entry.robj) { 219 if (p->uf_entry.robj)
220 p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj); 220 p->job->uf_addr = uf_offset;
221 p->job->uf_offset = uf_offset;
222 }
223
224 kfree(chunk_array); 221 kfree(chunk_array);
225 return 0; 222 return 0;
226 223
@@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
459 list_splice(&need_pages, &p->validated); 456 list_splice(&need_pages, &p->validated);
460 } 457 }
461 458
462 amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); 459 amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates);
463 460
464 p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); 461 p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev);
465 p->bytes_moved = 0; 462 p->bytes_moved = 0;
@@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
472 if (r) 469 if (r)
473 goto error_validate; 470 goto error_validate;
474 471
472 fpriv->vm.last_eviction_counter =
473 atomic64_read(&p->adev->num_evictions);
474
475 if (p->bo_list) { 475 if (p->bo_list) {
476 struct amdgpu_bo *gds = p->bo_list->gds_obj; 476 struct amdgpu_bo *gds = p->bo_list->gds_obj;
477 struct amdgpu_bo *gws = p->bo_list->gws_obj; 477 struct amdgpu_bo *gws = p->bo_list->gws_obj;
@@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
499 } 499 }
500 } 500 }
501 501
502 if (p->uf_entry.robj)
503 p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj);
504
502error_validate: 505error_validate:
503 if (r) { 506 if (r) {
504 amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); 507 amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm);
@@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
653 656
654 /* Only for UVD/VCE VM emulation */ 657 /* Only for UVD/VCE VM emulation */
655 if (ring->funcs->parse_cs) { 658 if (ring->funcs->parse_cs) {
659 p->job->vm = NULL;
656 for (i = 0; i < p->job->num_ibs; i++) { 660 for (i = 0; i < p->job->num_ibs; i++) {
657 r = amdgpu_ring_parse_cs(ring, p, i); 661 r = amdgpu_ring_parse_cs(ring, p, i);
658 if (r) 662 if (r)
659 return r; 663 return r;
660 } 664 }
661 } 665 } else {
666 p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
662 667
663 r = amdgpu_bo_vm_update_pte(p, vm); 668 r = amdgpu_bo_vm_update_pte(p, vm);
664 if (!r) 669 if (r)
665 amdgpu_cs_sync_rings(p); 670 return r;
671 }
666 672
667 return r; 673 return amdgpu_cs_sync_rings(p);
668} 674}
669 675
670static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) 676static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r)
@@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
761 } 767 }
762 768
763 /* UVD & VCE fw doesn't support user fences */ 769 /* UVD & VCE fw doesn't support user fences */
764 if (parser->job->uf_bo && ( 770 if (parser->job->uf_addr && (
765 parser->job->ring->type == AMDGPU_RING_TYPE_UVD || 771 parser->job->ring->type == AMDGPU_RING_TYPE_UVD ||
766 parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) 772 parser->job->ring->type == AMDGPU_RING_TYPE_VCE))
767 return -EINVAL; 773 return -EINVAL;
@@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
830{ 836{
831 struct amdgpu_ring *ring = p->job->ring; 837 struct amdgpu_ring *ring = p->job->ring;
832 struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; 838 struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
833 struct fence *fence;
834 struct amdgpu_job *job; 839 struct amdgpu_job *job;
835 int r; 840 int r;
836 841
837 job = p->job; 842 job = p->job;
838 p->job = NULL; 843 p->job = NULL;
839 844
840 r = amd_sched_job_init(&job->base, &ring->sched, 845 r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
841 entity, amdgpu_job_timeout_func,
842 amdgpu_job_free_func,
843 p->filp, &fence);
844 if (r) { 846 if (r) {
845 amdgpu_job_free(job); 847 amdgpu_job_free(job);
846 return r; 848 return r;
@@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
848 850
849 job->owner = p->filp; 851 job->owner = p->filp;
850 job->ctx = entity->fence_context; 852 job->ctx = entity->fence_context;
851 p->fence = fence_get(fence); 853 p->fence = fence_get(&job->base.s_fence->finished);
852 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); 854 cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
853 job->uf_sequence = cs->out.handle; 855 job->uf_sequence = cs->out.handle;
856 amdgpu_job_free_resources(job);
854 857
855 trace_amdgpu_cs_ioctl(job); 858 trace_amdgpu_cs_ioctl(job);
856 amd_sched_entity_push_job(&job->base); 859 amd_sched_entity_push_job(&job->base);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6e920086af46..df7ab2458e50 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -25,6 +25,7 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/kthread.h>
28#include <linux/console.h> 29#include <linux/console.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30#include <linux/debugfs.h> 31#include <linux/debugfs.h>
@@ -35,6 +36,7 @@
35#include <linux/vga_switcheroo.h> 36#include <linux/vga_switcheroo.h>
36#include <linux/efi.h> 37#include <linux/efi.h>
37#include "amdgpu.h" 38#include "amdgpu.h"
39#include "amdgpu_trace.h"
38#include "amdgpu_i2c.h" 40#include "amdgpu_i2c.h"
39#include "atom.h" 41#include "atom.h"
40#include "amdgpu_atombios.h" 42#include "amdgpu_atombios.h"
@@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev)
79uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, 81uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
80 bool always_indirect) 82 bool always_indirect)
81{ 83{
84 uint32_t ret;
85
82 if ((reg * 4) < adev->rmmio_size && !always_indirect) 86 if ((reg * 4) < adev->rmmio_size && !always_indirect)
83 return readl(((void __iomem *)adev->rmmio) + (reg * 4)); 87 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4));
84 else { 88 else {
85 unsigned long flags; 89 unsigned long flags;
86 uint32_t ret;
87 90
88 spin_lock_irqsave(&adev->mmio_idx_lock, flags); 91 spin_lock_irqsave(&adev->mmio_idx_lock, flags);
89 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); 92 writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
90 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); 93 ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
91 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); 94 spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
92
93 return ret;
94 } 95 }
96 trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret);
97 return ret;
95} 98}
96 99
97void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, 100void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
98 bool always_indirect) 101 bool always_indirect)
99{ 102{
103 trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
104
100 if ((reg * 4) < adev->rmmio_size && !always_indirect) 105 if ((reg * 4) < adev->rmmio_size && !always_indirect)
101 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); 106 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
102 else { 107 else {
@@ -1070,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev,
1070 int i, r = 0; 1075 int i, r = 0;
1071 1076
1072 for (i = 0; i < adev->num_ip_blocks; i++) { 1077 for (i = 0; i < adev->num_ip_blocks; i++) {
1078 if (!adev->ip_block_status[i].valid)
1079 continue;
1073 if (adev->ip_blocks[i].type == block_type) { 1080 if (adev->ip_blocks[i].type == block_type) {
1074 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1081 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
1075 state); 1082 state);
1076 if (r) 1083 if (r)
1077 return r; 1084 return r;
1085 break;
1078 } 1086 }
1079 } 1087 }
1080 return r; 1088 return r;
@@ -1087,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev,
1087 int i, r = 0; 1095 int i, r = 0;
1088 1096
1089 for (i = 0; i < adev->num_ip_blocks; i++) { 1097 for (i = 0; i < adev->num_ip_blocks; i++) {
1098 if (!adev->ip_block_status[i].valid)
1099 continue;
1090 if (adev->ip_blocks[i].type == block_type) { 1100 if (adev->ip_blocks[i].type == block_type) {
1091 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, 1101 r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev,
1092 state); 1102 state);
1093 if (r) 1103 if (r)
1094 return r; 1104 return r;
1105 break;
1095 } 1106 }
1096 } 1107 }
1097 return r; 1108 return r;
1098} 1109}
1099 1110
1111int amdgpu_wait_for_idle(struct amdgpu_device *adev,
1112 enum amd_ip_block_type block_type)
1113{
1114 int i, r;
1115
1116 for (i = 0; i < adev->num_ip_blocks; i++) {
1117 if (!adev->ip_block_status[i].valid)
1118 continue;
1119 if (adev->ip_blocks[i].type == block_type) {
1120 r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev);
1121 if (r)
1122 return r;
1123 break;
1124 }
1125 }
1126 return 0;
1127
1128}
1129
1130bool amdgpu_is_idle(struct amdgpu_device *adev,
1131 enum amd_ip_block_type block_type)
1132{
1133 int i;
1134
1135 for (i = 0; i < adev->num_ip_blocks; i++) {
1136 if (!adev->ip_block_status[i].valid)
1137 continue;
1138 if (adev->ip_blocks[i].type == block_type)
1139 return adev->ip_blocks[i].funcs->is_idle((void *)adev);
1140 }
1141 return true;
1142
1143}
1144
1100const struct amdgpu_ip_block_version * amdgpu_get_ip_block( 1145const struct amdgpu_ip_block_version * amdgpu_get_ip_block(
1101 struct amdgpu_device *adev, 1146 struct amdgpu_device *adev,
1102 enum amd_ip_block_type type) 1147 enum amd_ip_block_type type)
@@ -1209,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1209 } 1254 }
1210 } 1255 }
1211 1256
1257 adev->cg_flags &= amdgpu_cg_mask;
1258 adev->pg_flags &= amdgpu_pg_mask;
1259
1212 return 0; 1260 return 0;
1213} 1261}
1214 1262
@@ -1440,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1440 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; 1488 adev->uvd_ctx_wreg = &amdgpu_invalid_wreg;
1441 adev->didt_rreg = &amdgpu_invalid_rreg; 1489 adev->didt_rreg = &amdgpu_invalid_rreg;
1442 adev->didt_wreg = &amdgpu_invalid_wreg; 1490 adev->didt_wreg = &amdgpu_invalid_wreg;
1491 adev->gc_cac_rreg = &amdgpu_invalid_rreg;
1492 adev->gc_cac_wreg = &amdgpu_invalid_wreg;
1443 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; 1493 adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg;
1444 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; 1494 adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg;
1445 1495
1496
1446 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", 1497 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1447 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, 1498 amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device,
1448 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); 1499 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
@@ -1467,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1467 spin_lock_init(&adev->pcie_idx_lock); 1518 spin_lock_init(&adev->pcie_idx_lock);
1468 spin_lock_init(&adev->uvd_ctx_idx_lock); 1519 spin_lock_init(&adev->uvd_ctx_idx_lock);
1469 spin_lock_init(&adev->didt_idx_lock); 1520 spin_lock_init(&adev->didt_idx_lock);
1521 spin_lock_init(&adev->gc_cac_idx_lock);
1470 spin_lock_init(&adev->audio_endpt_idx_lock); 1522 spin_lock_init(&adev->audio_endpt_idx_lock);
1471 1523
1472 adev->rmmio_base = pci_resource_start(adev->pdev, 5); 1524 adev->rmmio_base = pci_resource_start(adev->pdev, 5);
@@ -1511,17 +1563,20 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1511 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); 1563 vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain);
1512 1564
1513 /* Read BIOS */ 1565 /* Read BIOS */
1514 if (!amdgpu_get_bios(adev)) 1566 if (!amdgpu_get_bios(adev)) {
1515 return -EINVAL; 1567 r = -EINVAL;
1568 goto failed;
1569 }
1516 /* Must be an ATOMBIOS */ 1570 /* Must be an ATOMBIOS */
1517 if (!adev->is_atom_bios) { 1571 if (!adev->is_atom_bios) {
1518 dev_err(adev->dev, "Expecting atombios for GPU\n"); 1572 dev_err(adev->dev, "Expecting atombios for GPU\n");
1519 return -EINVAL; 1573 r = -EINVAL;
1574 goto failed;
1520 } 1575 }
1521 r = amdgpu_atombios_init(adev); 1576 r = amdgpu_atombios_init(adev);
1522 if (r) { 1577 if (r) {
1523 dev_err(adev->dev, "amdgpu_atombios_init failed\n"); 1578 dev_err(adev->dev, "amdgpu_atombios_init failed\n");
1524 return r; 1579 goto failed;
1525 } 1580 }
1526 1581
1527 /* See if the asic supports SR-IOV */ 1582 /* See if the asic supports SR-IOV */
@@ -1538,7 +1593,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1538 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { 1593 !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) {
1539 if (!adev->bios) { 1594 if (!adev->bios) {
1540 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); 1595 dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n");
1541 return -EINVAL; 1596 r = -EINVAL;
1597 goto failed;
1542 } 1598 }
1543 DRM_INFO("GPU not posted. posting now...\n"); 1599 DRM_INFO("GPU not posted. posting now...\n");
1544 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1600 amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1548,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1548 r = amdgpu_atombios_get_clock_info(adev); 1604 r = amdgpu_atombios_get_clock_info(adev);
1549 if (r) { 1605 if (r) {
1550 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); 1606 dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n");
1551 return r; 1607 goto failed;
1552 } 1608 }
1553 /* init i2c buses */ 1609 /* init i2c buses */
1554 amdgpu_atombios_i2c_init(adev); 1610 amdgpu_atombios_i2c_init(adev);
@@ -1557,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1557 r = amdgpu_fence_driver_init(adev); 1613 r = amdgpu_fence_driver_init(adev);
1558 if (r) { 1614 if (r) {
1559 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); 1615 dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
1560 return r; 1616 goto failed;
1561 } 1617 }
1562 1618
1563 /* init the mode config */ 1619 /* init the mode config */
@@ -1567,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1567 if (r) { 1623 if (r) {
1568 dev_err(adev->dev, "amdgpu_init failed\n"); 1624 dev_err(adev->dev, "amdgpu_init failed\n");
1569 amdgpu_fini(adev); 1625 amdgpu_fini(adev);
1570 return r; 1626 goto failed;
1571 } 1627 }
1572 1628
1573 adev->accel_working = true; 1629 adev->accel_working = true;
@@ -1577,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1577 r = amdgpu_ib_pool_init(adev); 1633 r = amdgpu_ib_pool_init(adev);
1578 if (r) { 1634 if (r) {
1579 dev_err(adev->dev, "IB initialization failed (%d).\n", r); 1635 dev_err(adev->dev, "IB initialization failed (%d).\n", r);
1580 return r; 1636 goto failed;
1581 } 1637 }
1582 1638
1583 r = amdgpu_ib_ring_tests(adev); 1639 r = amdgpu_ib_ring_tests(adev);
@@ -1594,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1594 DRM_ERROR("registering register debugfs failed (%d).\n", r); 1650 DRM_ERROR("registering register debugfs failed (%d).\n", r);
1595 } 1651 }
1596 1652
1653 r = amdgpu_debugfs_firmware_init(adev);
1654 if (r) {
1655 DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
1656 return r;
1657 }
1658
1597 if ((amdgpu_testing & 1)) { 1659 if ((amdgpu_testing & 1)) {
1598 if (adev->accel_working) 1660 if (adev->accel_working)
1599 amdgpu_test_moves(adev); 1661 amdgpu_test_moves(adev);
@@ -1619,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev,
1619 r = amdgpu_late_init(adev); 1681 r = amdgpu_late_init(adev);
1620 if (r) { 1682 if (r) {
1621 dev_err(adev->dev, "amdgpu_late_init failed\n"); 1683 dev_err(adev->dev, "amdgpu_late_init failed\n");
1622 return r; 1684 goto failed;
1623 } 1685 }
1624 1686
1625 return 0; 1687 return 0;
1688
1689failed:
1690 if (runtime)
1691 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1692 return r;
1626} 1693}
1627 1694
1628static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); 1695static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev);
@@ -1645,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1645 amdgpu_bo_evict_vram(adev); 1712 amdgpu_bo_evict_vram(adev);
1646 amdgpu_ib_pool_fini(adev); 1713 amdgpu_ib_pool_fini(adev);
1647 amdgpu_fence_driver_fini(adev); 1714 amdgpu_fence_driver_fini(adev);
1715 drm_crtc_force_disable_all(adev->ddev);
1648 amdgpu_fbdev_fini(adev); 1716 amdgpu_fbdev_fini(adev);
1649 r = amdgpu_fini(adev); 1717 r = amdgpu_fini(adev);
1650 kfree(adev->ip_block_status); 1718 kfree(adev->ip_block_status);
@@ -1656,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1656 kfree(adev->bios); 1724 kfree(adev->bios);
1657 adev->bios = NULL; 1725 adev->bios = NULL;
1658 vga_switcheroo_unregister_client(adev->pdev); 1726 vga_switcheroo_unregister_client(adev->pdev);
1727 if (adev->flags & AMD_IS_PX)
1728 vga_switcheroo_fini_domain_pm_ops(adev->dev);
1659 vga_client_register(adev->pdev, NULL, NULL, NULL); 1729 vga_client_register(adev->pdev, NULL, NULL, NULL);
1660 if (adev->rio_mem) 1730 if (adev->rio_mem)
1661 pci_iounmap(adev->pdev, adev->rio_mem); 1731 pci_iounmap(adev->pdev, adev->rio_mem);
@@ -1841,7 +1911,23 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1841 } 1911 }
1842 1912
1843 drm_kms_helper_poll_enable(dev); 1913 drm_kms_helper_poll_enable(dev);
1914
1915 /*
1916 * Most of the connector probing functions try to acquire runtime pm
1917 * refs to ensure that the GPU is powered on when connector polling is
1918 * performed. Since we're calling this from a runtime PM callback,
1919 * trying to acquire rpm refs will cause us to deadlock.
1920 *
1921 * Since we're guaranteed to be holding the rpm lock, it's safe to
1922 * temporarily disable the rpm helpers so this doesn't deadlock us.
1923 */
1924#ifdef CONFIG_PM
1925 dev->dev->power.disable_depth++;
1926#endif
1844 drm_helper_hpd_irq_event(dev); 1927 drm_helper_hpd_irq_event(dev);
1928#ifdef CONFIG_PM
1929 dev->dev->power.disable_depth--;
1930#endif
1845 1931
1846 if (fbcon) { 1932 if (fbcon) {
1847 amdgpu_fbdev_set_suspend(adev, 0); 1933 amdgpu_fbdev_set_suspend(adev, 0);
@@ -1861,11 +1947,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1861 */ 1947 */
1862int amdgpu_gpu_reset(struct amdgpu_device *adev) 1948int amdgpu_gpu_reset(struct amdgpu_device *adev)
1863{ 1949{
1864 unsigned ring_sizes[AMDGPU_MAX_RINGS];
1865 uint32_t *ring_data[AMDGPU_MAX_RINGS];
1866
1867 bool saved = false;
1868
1869 int i, r; 1950 int i, r;
1870 int resched; 1951 int resched;
1871 1952
@@ -1874,22 +1955,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
1874 /* block TTM */ 1955 /* block TTM */
1875 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); 1956 resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
1876 1957
1877 r = amdgpu_suspend(adev); 1958 /* block scheduler */
1878
1879 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 1959 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1880 struct amdgpu_ring *ring = adev->rings[i]; 1960 struct amdgpu_ring *ring = adev->rings[i];
1961
1881 if (!ring) 1962 if (!ring)
1882 continue; 1963 continue;
1883 1964 kthread_park(ring->sched.thread);
1884 ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); 1965 amd_sched_hw_job_reset(&ring->sched);
1885 if (ring_sizes[i]) {
1886 saved = true;
1887 dev_info(adev->dev, "Saved %d dwords of commands "
1888 "on ring %d.\n", ring_sizes[i], i);
1889 }
1890 } 1966 }
1967 /* after all hw jobs are reset, hw fence is meaningless, so force_completion */
1968 amdgpu_fence_driver_force_completion(adev);
1969
1970 /* save scratch */
1971 amdgpu_atombios_scratch_regs_save(adev);
1972 r = amdgpu_suspend(adev);
1891 1973
1892retry: 1974retry:
1975 /* Disable fb access */
1976 if (adev->mode_info.num_crtc) {
1977 struct amdgpu_mode_mc_save save;
1978 amdgpu_display_stop_mc_access(adev, &save);
1979 amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC);
1980 }
1981
1893 r = amdgpu_asic_reset(adev); 1982 r = amdgpu_asic_reset(adev);
1894 /* post card */ 1983 /* post card */
1895 amdgpu_atom_asic_init(adev->mode_info.atom_context); 1984 amdgpu_atom_asic_init(adev->mode_info.atom_context);
@@ -1898,32 +1987,29 @@ retry:
1898 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); 1987 dev_info(adev->dev, "GPU reset succeeded, trying to resume\n");
1899 r = amdgpu_resume(adev); 1988 r = amdgpu_resume(adev);
1900 } 1989 }
1901 1990 /* restore scratch */
1991 amdgpu_atombios_scratch_regs_restore(adev);
1902 if (!r) { 1992 if (!r) {
1993 r = amdgpu_ib_ring_tests(adev);
1994 if (r) {
1995 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
1996 r = amdgpu_suspend(adev);
1997 goto retry;
1998 }
1999
1903 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2000 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1904 struct amdgpu_ring *ring = adev->rings[i]; 2001 struct amdgpu_ring *ring = adev->rings[i];
1905 if (!ring) 2002 if (!ring)
1906 continue; 2003 continue;
1907 2004 amd_sched_job_recovery(&ring->sched);
1908 amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); 2005 kthread_unpark(ring->sched.thread);
1909 ring_sizes[i] = 0;
1910 ring_data[i] = NULL;
1911 }
1912
1913 r = amdgpu_ib_ring_tests(adev);
1914 if (r) {
1915 dev_err(adev->dev, "ib ring test failed (%d).\n", r);
1916 if (saved) {
1917 saved = false;
1918 r = amdgpu_suspend(adev);
1919 goto retry;
1920 }
1921 } 2006 }
1922 } else { 2007 } else {
1923 amdgpu_fence_driver_force_completion(adev); 2008 dev_err(adev->dev, "asic resume failed (%d).\n", r);
1924 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { 2009 for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
1925 if (adev->rings[i]) 2010 if (adev->rings[i]) {
1926 kfree(ring_data[i]); 2011 kthread_unpark(adev->rings[i]->sched.thread);
2012 }
1927 } 2013 }
1928 } 2014 }
1929 2015
@@ -1934,13 +2020,11 @@ retry:
1934 /* bad news, how to tell it to userspace ? */ 2020 /* bad news, how to tell it to userspace ? */
1935 dev_info(adev->dev, "GPU reset failed\n"); 2021 dev_info(adev->dev, "GPU reset failed\n");
1936 } 2022 }
2023 amdgpu_irq_gpu_reset_resume_helper(adev);
1937 2024
1938 return r; 2025 return r;
1939} 2026}
1940 2027
1941#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
1942#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
1943
1944void amdgpu_get_pcie_info(struct amdgpu_device *adev) 2028void amdgpu_get_pcie_info(struct amdgpu_device *adev)
1945{ 2029{
1946 u32 mask; 2030 u32 mask;
@@ -2094,20 +2178,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2094 struct amdgpu_device *adev = f->f_inode->i_private; 2178 struct amdgpu_device *adev = f->f_inode->i_private;
2095 ssize_t result = 0; 2179 ssize_t result = 0;
2096 int r; 2180 int r;
2181 bool use_bank;
2182 unsigned instance_bank, sh_bank, se_bank;
2097 2183
2098 if (size & 0x3 || *pos & 0x3) 2184 if (size & 0x3 || *pos & 0x3)
2099 return -EINVAL; 2185 return -EINVAL;
2100 2186
2187 if (*pos & (1ULL << 62)) {
2188 se_bank = (*pos >> 24) & 0x3FF;
2189 sh_bank = (*pos >> 34) & 0x3FF;
2190 instance_bank = (*pos >> 44) & 0x3FF;
2191 use_bank = 1;
2192 *pos &= 0xFFFFFF;
2193 } else {
2194 use_bank = 0;
2195 }
2196
2197 if (use_bank) {
2198 if (sh_bank >= adev->gfx.config.max_sh_per_se ||
2199 se_bank >= adev->gfx.config.max_shader_engines)
2200 return -EINVAL;
2201 mutex_lock(&adev->grbm_idx_mutex);
2202 amdgpu_gfx_select_se_sh(adev, se_bank,
2203 sh_bank, instance_bank);
2204 }
2205
2101 while (size) { 2206 while (size) {
2102 uint32_t value; 2207 uint32_t value;
2103 2208
2104 if (*pos > adev->rmmio_size) 2209 if (*pos > adev->rmmio_size)
2105 return result; 2210 goto end;
2106 2211
2107 value = RREG32(*pos >> 2); 2212 value = RREG32(*pos >> 2);
2108 r = put_user(value, (uint32_t *)buf); 2213 r = put_user(value, (uint32_t *)buf);
2109 if (r) 2214 if (r) {
2110 return r; 2215 result = r;
2216 goto end;
2217 }
2111 2218
2112 result += 4; 2219 result += 4;
2113 buf += 4; 2220 buf += 4;
@@ -2115,6 +2222,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf,
2115 size -= 4; 2222 size -= 4;
2116 } 2223 }
2117 2224
2225end:
2226 if (use_bank) {
2227 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
2228 mutex_unlock(&adev->grbm_idx_mutex);
2229 }
2230
2118 return result; 2231 return result;
2119} 2232}
2120 2233
@@ -2314,6 +2427,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user *
2314 return result; 2427 return result;
2315} 2428}
2316 2429
2430static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf,
2431 size_t size, loff_t *pos)
2432{
2433 struct amdgpu_device *adev = f->f_inode->i_private;
2434 ssize_t result = 0;
2435 int r;
2436 uint32_t *config, no_regs = 0;
2437
2438 if (size & 0x3 || *pos & 0x3)
2439 return -EINVAL;
2440
2441 config = kmalloc(256 * sizeof(*config), GFP_KERNEL);
2442 if (!config)
2443 return -ENOMEM;
2444
2445 /* version, increment each time something is added */
2446 config[no_regs++] = 0;
2447 config[no_regs++] = adev->gfx.config.max_shader_engines;
2448 config[no_regs++] = adev->gfx.config.max_tile_pipes;
2449 config[no_regs++] = adev->gfx.config.max_cu_per_sh;
2450 config[no_regs++] = adev->gfx.config.max_sh_per_se;
2451 config[no_regs++] = adev->gfx.config.max_backends_per_se;
2452 config[no_regs++] = adev->gfx.config.max_texture_channel_caches;
2453 config[no_regs++] = adev->gfx.config.max_gprs;
2454 config[no_regs++] = adev->gfx.config.max_gs_threads;
2455 config[no_regs++] = adev->gfx.config.max_hw_contexts;
2456 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend;
2457 config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend;
2458 config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size;
2459 config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size;
2460 config[no_regs++] = adev->gfx.config.num_tile_pipes;
2461 config[no_regs++] = adev->gfx.config.backend_enable_mask;
2462 config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes;
2463 config[no_regs++] = adev->gfx.config.mem_row_size_in_kb;
2464 config[no_regs++] = adev->gfx.config.shader_engine_tile_size;
2465 config[no_regs++] = adev->gfx.config.num_gpus;
2466 config[no_regs++] = adev->gfx.config.multi_gpu_tile_size;
2467 config[no_regs++] = adev->gfx.config.mc_arb_ramcfg;
2468 config[no_regs++] = adev->gfx.config.gb_addr_config;
2469 config[no_regs++] = adev->gfx.config.num_rbs;
2470
2471 while (size && (*pos < no_regs * 4)) {
2472 uint32_t value;
2473
2474 value = config[*pos >> 2];
2475 r = put_user(value, (uint32_t *)buf);
2476 if (r) {
2477 kfree(config);
2478 return r;
2479 }
2480
2481 result += 4;
2482 buf += 4;
2483 *pos += 4;
2484 size -= 4;
2485 }
2486
2487 kfree(config);
2488 return result;
2489}
2490
2491
2317static const struct file_operations amdgpu_debugfs_regs_fops = { 2492static const struct file_operations amdgpu_debugfs_regs_fops = {
2318 .owner = THIS_MODULE, 2493 .owner = THIS_MODULE,
2319 .read = amdgpu_debugfs_regs_read, 2494 .read = amdgpu_debugfs_regs_read,
@@ -2339,11 +2514,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = {
2339 .llseek = default_llseek 2514 .llseek = default_llseek
2340}; 2515};
2341 2516
2517static const struct file_operations amdgpu_debugfs_gca_config_fops = {
2518 .owner = THIS_MODULE,
2519 .read = amdgpu_debugfs_gca_config_read,
2520 .llseek = default_llseek
2521};
2522
2342static const struct file_operations *debugfs_regs[] = { 2523static const struct file_operations *debugfs_regs[] = {
2343 &amdgpu_debugfs_regs_fops, 2524 &amdgpu_debugfs_regs_fops,
2344 &amdgpu_debugfs_regs_didt_fops, 2525 &amdgpu_debugfs_regs_didt_fops,
2345 &amdgpu_debugfs_regs_pcie_fops, 2526 &amdgpu_debugfs_regs_pcie_fops,
2346 &amdgpu_debugfs_regs_smc_fops, 2527 &amdgpu_debugfs_regs_smc_fops,
2528 &amdgpu_debugfs_gca_config_fops,
2347}; 2529};
2348 2530
2349static const char *debugfs_regs_names[] = { 2531static const char *debugfs_regs_names[] = {
@@ -2351,6 +2533,7 @@ static const char *debugfs_regs_names[] = {
2351 "amdgpu_regs_didt", 2533 "amdgpu_regs_didt",
2352 "amdgpu_regs_pcie", 2534 "amdgpu_regs_pcie",
2353 "amdgpu_regs_smc", 2535 "amdgpu_regs_smc",
2536 "amdgpu_gca_config",
2354}; 2537};
2355 2538
2356static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) 2539static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index b0832da2ef7e..76f96028313d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -122,7 +122,7 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
122 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 122 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
123 usleep_range(min_udelay, 2 * min_udelay); 123 usleep_range(min_udelay, 2 * min_udelay);
124 spin_lock_irqsave(&crtc->dev->event_lock, flags); 124 spin_lock_irqsave(&crtc->dev->event_lock, flags);
125 }; 125 }
126 126
127 if (!repcnt) 127 if (!repcnt)
128 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, " 128 DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
@@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
220 220
221 r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); 221 r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base);
222 if (unlikely(r != 0)) { 222 if (unlikely(r != 0)) {
223 amdgpu_bo_unreserve(new_rbo);
224 r = -EINVAL; 223 r = -EINVAL;
225 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 224 DRM_ERROR("failed to pin new rbo buffer before flip\n");
226 goto cleanup; 225 goto unreserve;
227 } 226 }
228 227
229 r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, 228 r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl,
230 &work->shared_count, 229 &work->shared_count,
231 &work->shared); 230 &work->shared);
232 if (unlikely(r != 0)) { 231 if (unlikely(r != 0)) {
233 amdgpu_bo_unreserve(new_rbo);
234 DRM_ERROR("failed to get fences for buffer\n"); 232 DRM_ERROR("failed to get fences for buffer\n");
235 goto cleanup; 233 goto unpin;
236 } 234 }
237 235
238 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); 236 amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags);
@@ -240,7 +238,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
240 238
241 work->base = base; 239 work->base = base;
242 240
243 r = drm_vblank_get(crtc->dev, amdgpu_crtc->crtc_id); 241 r = drm_crtc_vblank_get(crtc);
244 if (r) { 242 if (r) {
245 DRM_ERROR("failed to get vblank before flip\n"); 243 DRM_ERROR("failed to get vblank before flip\n");
246 goto pflip_cleanup; 244 goto pflip_cleanup;
@@ -268,16 +266,18 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
268 return 0; 266 return 0;
269 267
270vblank_cleanup: 268vblank_cleanup:
271 drm_vblank_put(crtc->dev, amdgpu_crtc->crtc_id); 269 drm_crtc_vblank_put(crtc);
272 270
273pflip_cleanup: 271pflip_cleanup:
274 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) { 272 if (unlikely(amdgpu_bo_reserve(new_rbo, false) != 0)) {
275 DRM_ERROR("failed to reserve new rbo in error path\n"); 273 DRM_ERROR("failed to reserve new rbo in error path\n");
276 goto cleanup; 274 goto cleanup;
277 } 275 }
276unpin:
278 if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { 277 if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) {
279 DRM_ERROR("failed to unpin new rbo in error path\n"); 278 DRM_ERROR("failed to unpin new rbo in error path\n");
280 } 279 }
280unreserve:
281 amdgpu_bo_unreserve(new_rbo); 281 amdgpu_bo_unreserve(new_rbo);
282 282
283cleanup: 283cleanup:
@@ -516,9 +516,7 @@ static void amdgpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
516{ 516{
517 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb); 517 struct amdgpu_framebuffer *amdgpu_fb = to_amdgpu_framebuffer(fb);
518 518
519 if (amdgpu_fb->obj) { 519 drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
520 drm_gem_object_unreference_unlocked(amdgpu_fb->obj);
521 }
522 drm_framebuffer_cleanup(fb); 520 drm_framebuffer_cleanup(fb);
523 kfree(amdgpu_fb); 521 kfree(amdgpu_fb);
524} 522}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f888c015f76c..9aa533cf4ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -52,9 +52,10 @@
52 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP) 52 * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
53 * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same 53 * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
54 * at the end of IBs. 54 * at the end of IBs.
55 * - 3.3.0 - Add VM support for UVD on supported hardware.
55 */ 56 */
56#define KMS_DRIVER_MAJOR 3 57#define KMS_DRIVER_MAJOR 3
57#define KMS_DRIVER_MINOR 2 58#define KMS_DRIVER_MINOR 3
58#define KMS_DRIVER_PATCHLEVEL 0 59#define KMS_DRIVER_PATCHLEVEL 0
59 60
60int amdgpu_vram_limit = 0; 61int amdgpu_vram_limit = 0;
@@ -82,8 +83,12 @@ int amdgpu_exp_hw_support = 0;
82int amdgpu_sched_jobs = 32; 83int amdgpu_sched_jobs = 32;
83int amdgpu_sched_hw_submission = 2; 84int amdgpu_sched_hw_submission = 2;
84int amdgpu_powerplay = -1; 85int amdgpu_powerplay = -1;
86int amdgpu_powercontainment = 1;
85unsigned amdgpu_pcie_gen_cap = 0; 87unsigned amdgpu_pcie_gen_cap = 0;
86unsigned amdgpu_pcie_lane_cap = 0; 88unsigned amdgpu_pcie_lane_cap = 0;
89unsigned amdgpu_cg_mask = 0xffffffff;
90unsigned amdgpu_pg_mask = 0xffffffff;
91char *amdgpu_disable_cu = NULL;
87 92
88MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); 93MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
89module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); 94module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -160,6 +165,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
160#ifdef CONFIG_DRM_AMD_POWERPLAY 165#ifdef CONFIG_DRM_AMD_POWERPLAY
161MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); 166MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))");
162module_param_named(powerplay, amdgpu_powerplay, int, 0444); 167module_param_named(powerplay, amdgpu_powerplay, int, 0444);
168
169MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)");
170module_param_named(powercontainment, amdgpu_powercontainment, int, 0444);
163#endif 171#endif
164 172
165MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); 173MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
@@ -168,6 +176,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
168MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); 176MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
169module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); 177module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
170 178
179MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
180module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444);
181
182MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
183module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
184
185MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
186module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
187
171static const struct pci_device_id pciidlist[] = { 188static const struct pci_device_id pciidlist[] = {
172#ifdef CONFIG_DRM_AMDGPU_CIK 189#ifdef CONFIG_DRM_AMDGPU_CIK
173 /* Kaveri */ 190 /* Kaveri */
@@ -413,7 +430,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
413 pci_save_state(pdev); 430 pci_save_state(pdev);
414 pci_disable_device(pdev); 431 pci_disable_device(pdev);
415 pci_ignore_hotplug(pdev); 432 pci_ignore_hotplug(pdev);
416 pci_set_power_state(pdev, PCI_D3cold); 433 if (amdgpu_is_atpx_hybrid())
434 pci_set_power_state(pdev, PCI_D3cold);
435 else if (!amdgpu_has_atpx_dgpu_power_cntl())
436 pci_set_power_state(pdev, PCI_D3hot);
417 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 437 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
418 438
419 return 0; 439 return 0;
@@ -430,7 +450,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
430 450
431 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 451 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
432 452
433 pci_set_power_state(pdev, PCI_D0); 453 if (amdgpu_is_atpx_hybrid() ||
454 !amdgpu_has_atpx_dgpu_power_cntl())
455 pci_set_power_state(pdev, PCI_D0);
434 pci_restore_state(pdev); 456 pci_restore_state(pdev);
435 ret = pci_enable_device(pdev); 457 ret = pci_enable_device(pdev);
436 if (ret) 458 if (ret)
@@ -515,7 +537,7 @@ static struct drm_driver kms_driver = {
515 .driver_features = 537 .driver_features =
516 DRIVER_USE_AGP | 538 DRIVER_USE_AGP |
517 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | 539 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM |
518 DRIVER_PRIME | DRIVER_RENDER, 540 DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET,
519 .dev_priv_size = 0, 541 .dev_priv_size = 0,
520 .load = amdgpu_driver_load_kms, 542 .load = amdgpu_driver_load_kms,
521 .open = amdgpu_driver_open_kms, 543 .open = amdgpu_driver_open_kms,
@@ -590,7 +612,6 @@ static int __init amdgpu_init(void)
590 DRM_INFO("amdgpu kernel modesetting enabled.\n"); 612 DRM_INFO("amdgpu kernel modesetting enabled.\n");
591 driver = &kms_driver; 613 driver = &kms_driver;
592 pdriver = &amdgpu_kms_pci_driver; 614 pdriver = &amdgpu_kms_pci_driver;
593 driver->driver_features |= DRIVER_MODESET;
594 driver->num_ioctls = amdgpu_max_kms_ioctl; 615 driver->num_ioctls = amdgpu_max_kms_ioctl;
595 amdgpu_register_atpx_handler(); 616 amdgpu_register_atpx_handler();
596 /* let modprobe override vga console setting */ 617 /* let modprobe override vga console setting */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index d1558768cfb7..0b109aebfec6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -204,16 +204,25 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
204 if (seq != ring->fence_drv.sync_seq) 204 if (seq != ring->fence_drv.sync_seq)
205 amdgpu_fence_schedule_fallback(ring); 205 amdgpu_fence_schedule_fallback(ring);
206 206
207 while (last_seq != seq) { 207 if (unlikely(seq == last_seq))
208 return;
209
210 last_seq &= drv->num_fences_mask;
211 seq &= drv->num_fences_mask;
212
213 do {
208 struct fence *fence, **ptr; 214 struct fence *fence, **ptr;
209 215
210 ptr = &drv->fences[++last_seq & drv->num_fences_mask]; 216 ++last_seq;
217 last_seq &= drv->num_fences_mask;
218 ptr = &drv->fences[last_seq];
211 219
212 /* There is always exactly one thread signaling this fence slot */ 220 /* There is always exactly one thread signaling this fence slot */
213 fence = rcu_dereference_protected(*ptr, 1); 221 fence = rcu_dereference_protected(*ptr, 1);
214 RCU_INIT_POINTER(*ptr, NULL); 222 RCU_INIT_POINTER(*ptr, NULL);
215 223
216 BUG_ON(!fence); 224 if (!fence)
225 continue;
217 226
218 r = fence_signal(fence); 227 r = fence_signal(fence);
219 if (!r) 228 if (!r)
@@ -222,7 +231,7 @@ void amdgpu_fence_process(struct amdgpu_ring *ring)
222 BUG(); 231 BUG();
223 232
224 fence_put(fence); 233 fence_put(fence);
225 } 234 } while (last_seq != seq);
226} 235}
227 236
228/** 237/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 8fab6486064f..88fbed2389c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
503 if (r) 503 if (r)
504 goto error_print; 504 goto error_print;
505 505
506 amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); 506 amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates);
507 list_for_each_entry(entry, &list, head) { 507 list_for_each_entry(entry, &list, head) {
508 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); 508 domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type);
509 /* if anything is swapped out don't swap it in here, 509 /* if anything is swapped out don't swap it in here,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 9f95da4f0536..a074edd95c70 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
70 } 70 }
71 } 71 }
72} 72}
73
74/**
75 * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
76 *
77 * @mask: array in which the per-shader array disable masks will be stored
78 * @max_se: number of SEs
79 * @max_sh: number of SHs
80 *
81 * The bitmask of CUs to be disabled in the shader array determined by se and
82 * sh is stored in mask[se * max_sh + sh].
83 */
84void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
85{
86 unsigned se, sh, cu;
87 const char *p;
88
89 memset(mask, 0, sizeof(*mask) * max_se * max_sh);
90
91 if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
92 return;
93
94 p = amdgpu_disable_cu;
95 for (;;) {
96 char *next;
97 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
98 if (ret < 3) {
99 DRM_ERROR("amdgpu: could not parse disable_cu\n");
100 return;
101 }
102
103 if (se < max_se && sh < max_sh && cu < 16) {
104 DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
105 mask[se * max_sh + sh] |= 1u << cu;
106 } else {
107 DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
108 se, sh, cu);
109 }
110
111 next = strchr(p, ',');
112 if (!next)
113 break;
114 p = next + 1;
115 }
116}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index dc06cbda7be6..51321e154c09 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -27,4 +27,6 @@
27int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); 27int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
28void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); 28void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
29 29
30unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh);
31
30#endif 32#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 34e35423b78e..a31d7ef3032c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -33,6 +33,8 @@
33#include "amdgpu.h" 33#include "amdgpu.h"
34#include "atom.h" 34#include "atom.h"
35 35
36#define AMDGPU_IB_TEST_TIMEOUT msecs_to_jiffies(1000)
37
36/* 38/*
37 * IB 39 * IB
38 * IBs (Indirect Buffers) and areas of GPU accessible memory where 40 * IBs (Indirect Buffers) and areas of GPU accessible memory where
@@ -122,7 +124,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
122 bool skip_preamble, need_ctx_switch; 124 bool skip_preamble, need_ctx_switch;
123 unsigned patch_offset = ~0; 125 unsigned patch_offset = ~0;
124 struct amdgpu_vm *vm; 126 struct amdgpu_vm *vm;
125 struct fence *hwf;
126 uint64_t ctx; 127 uint64_t ctx;
127 128
128 unsigned i; 129 unsigned i;
@@ -160,10 +161,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
160 patch_offset = amdgpu_ring_init_cond_exec(ring); 161 patch_offset = amdgpu_ring_init_cond_exec(ring);
161 162
162 if (vm) { 163 if (vm) {
163 r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, 164 r = amdgpu_vm_flush(ring, job);
164 job->gds_base, job->gds_size,
165 job->gws_base, job->gws_size,
166 job->oa_base, job->oa_size);
167 if (r) { 165 if (r) {
168 amdgpu_ring_undo(ring); 166 amdgpu_ring_undo(ring);
169 return r; 167 return r;
@@ -193,7 +191,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
193 if (ring->funcs->emit_hdp_invalidate) 191 if (ring->funcs->emit_hdp_invalidate)
194 amdgpu_ring_emit_hdp_invalidate(ring); 192 amdgpu_ring_emit_hdp_invalidate(ring);
195 193
196 r = amdgpu_fence_emit(ring, &hwf); 194 r = amdgpu_fence_emit(ring, f);
197 if (r) { 195 if (r) {
198 dev_err(adev->dev, "failed to emit fence (%d)\n", r); 196 dev_err(adev->dev, "failed to emit fence (%d)\n", r);
199 if (job && job->vm_id) 197 if (job && job->vm_id)
@@ -203,17 +201,11 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
203 } 201 }
204 202
205 /* wrap the last IB with fence */ 203 /* wrap the last IB with fence */
206 if (job && job->uf_bo) { 204 if (job && job->uf_addr) {
207 uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo); 205 amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
208
209 addr += job->uf_offset;
210 amdgpu_ring_emit_fence(ring, addr, job->uf_sequence,
211 AMDGPU_FENCE_FLAG_64BIT); 206 AMDGPU_FENCE_FLAG_64BIT);
212 } 207 }
213 208
214 if (f)
215 *f = fence_get(hwf);
216
217 if (patch_offset != ~0 && ring->funcs->patch_cond_exec) 209 if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
218 amdgpu_ring_patch_cond_exec(ring, patch_offset); 210 amdgpu_ring_patch_cond_exec(ring, patch_offset);
219 211
@@ -296,7 +288,7 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
296 if (!ring || !ring->ready) 288 if (!ring || !ring->ready)
297 continue; 289 continue;
298 290
299 r = amdgpu_ring_test_ib(ring); 291 r = amdgpu_ring_test_ib(ring, AMDGPU_IB_TEST_TIMEOUT);
300 if (r) { 292 if (r) {
301 ring->ready = false; 293 ring->ready = false;
302 294
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 835a3fa8d8df..278708f5a744 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev,
383 return r; 383 return r;
384} 384}
385 385
386void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
387{
388 int i, j;
389 for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) {
390 struct amdgpu_irq_src *src = adev->irq.sources[i];
391 if (!src)
392 continue;
393 for (j = 0; j < src->num_types; j++)
394 amdgpu_irq_update(adev, src, j);
395 }
396}
397
386/** 398/**
387 * amdgpu_irq_get - enable interrupt 399 * amdgpu_irq_get - enable interrupt
388 * 400 *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
index e124b59f39c1..7ef09352e534 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h
@@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
94 unsigned type); 94 unsigned type);
95bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, 95bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
96 unsigned type); 96 unsigned type);
97void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev);
97 98
98int amdgpu_irq_add_domain(struct amdgpu_device *adev); 99int amdgpu_irq_add_domain(struct amdgpu_device *adev);
99void amdgpu_irq_remove_domain(struct amdgpu_device *adev); 100void amdgpu_irq_remove_domain(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index f0dafa514fe4..6674d40eb3ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -28,21 +28,15 @@
28#include "amdgpu.h" 28#include "amdgpu.h"
29#include "amdgpu_trace.h" 29#include "amdgpu_trace.h"
30 30
31static void amdgpu_job_free_handler(struct work_struct *ws) 31static void amdgpu_job_timedout(struct amd_sched_job *s_job)
32{ 32{
33 struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); 33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34 amd_sched_job_put(&job->base);
35}
36 34
37void amdgpu_job_timeout_func(struct work_struct *work)
38{
39 struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work);
40 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", 35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
41 job->base.sched->name, 36 job->base.sched->name,
42 (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), 37 atomic_read(&job->ring->fence_drv.last_seq),
43 job->ring->fence_drv.sync_seq); 38 job->ring->fence_drv.sync_seq);
44 39 amdgpu_gpu_reset(job->adev);
45 amd_sched_job_put(&job->base);
46} 40}
47 41
48int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, 42int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
@@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
63 (*job)->vm = vm; 57 (*job)->vm = vm;
64 (*job)->ibs = (void *)&(*job)[1]; 58 (*job)->ibs = (void *)&(*job)[1];
65 (*job)->num_ibs = num_ibs; 59 (*job)->num_ibs = num_ibs;
66 INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
67 60
68 amdgpu_sync_create(&(*job)->sync); 61 amdgpu_sync_create(&(*job)->sync);
69 62
@@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
86 return r; 79 return r;
87} 80}
88 81
89void amdgpu_job_free(struct amdgpu_job *job) 82void amdgpu_job_free_resources(struct amdgpu_job *job)
90{ 83{
91 unsigned i;
92 struct fence *f; 84 struct fence *f;
85 unsigned i;
86
93 /* use sched fence if available */ 87 /* use sched fence if available */
94 f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; 88 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
95 89
96 for (i = 0; i < job->num_ibs; ++i) 90 for (i = 0; i < job->num_ibs; ++i)
97 amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); 91 amdgpu_ib_free(job->adev, &job->ibs[i], f);
98 fence_put(job->fence); 92}
99 93
100 amdgpu_bo_unref(&job->uf_bo); 94void amdgpu_job_free_cb(struct amd_sched_job *s_job)
101 amdgpu_sync_free(&job->sync); 95{
96 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
102 97
103 if (!job->base.use_sched) 98 fence_put(job->fence);
104 kfree(job); 99 amdgpu_sync_free(&job->sync);
100 kfree(job);
105} 101}
106 102
107void amdgpu_job_free_func(struct kref *refcount) 103void amdgpu_job_free(struct amdgpu_job *job)
108{ 104{
109 struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount); 105 amdgpu_job_free_resources(job);
106
107 fence_put(job->fence);
108 amdgpu_sync_free(&job->sync);
110 kfree(job); 109 kfree(job);
111} 110}
112 111
@@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
114 struct amd_sched_entity *entity, void *owner, 113 struct amd_sched_entity *entity, void *owner,
115 struct fence **f) 114 struct fence **f)
116{ 115{
117 struct fence *fence;
118 int r; 116 int r;
119 job->ring = ring; 117 job->ring = ring;
120 118
121 if (!f) 119 if (!f)
122 return -EINVAL; 120 return -EINVAL;
123 121
124 r = amd_sched_job_init(&job->base, &ring->sched, 122 r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
125 entity, amdgpu_job_timeout_func,
126 amdgpu_job_free_func, owner, &fence);
127 if (r) 123 if (r)
128 return r; 124 return r;
129 125
130 job->owner = owner; 126 job->owner = owner;
131 job->ctx = entity->fence_context; 127 job->ctx = entity->fence_context;
132 *f = fence_get(fence); 128 *f = fence_get(&job->base.s_fence->finished);
129 amdgpu_job_free_resources(job);
133 amd_sched_entity_push_job(&job->base); 130 amd_sched_entity_push_job(&job->base);
134 131
135 return 0; 132 return 0;
@@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
147 int r; 144 int r;
148 145
149 r = amdgpu_vm_grab_id(vm, ring, &job->sync, 146 r = amdgpu_vm_grab_id(vm, ring, &job->sync,
150 &job->base.s_fence->base, 147 &job->base.s_fence->finished,
151 &job->vm_id, &job->vm_pd_addr); 148 job);
152 if (r) 149 if (r)
153 DRM_ERROR("Error getting VM ID (%d)\n", r); 150 DRM_ERROR("Error getting VM ID (%d)\n", r);
154 151
@@ -170,29 +167,24 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
170 } 167 }
171 job = to_amdgpu_job(sched_job); 168 job = to_amdgpu_job(sched_job);
172 169
173 r = amdgpu_sync_wait(&job->sync); 170 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
174 if (r) {
175 DRM_ERROR("failed to sync wait (%d)\n", r);
176 return NULL;
177 }
178 171
179 trace_amdgpu_sched_run_job(job); 172 trace_amdgpu_sched_run_job(job);
180 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, 173 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
181 job->sync.last_vm_update, job, &fence); 174 job->sync.last_vm_update, job, &fence);
182 if (r) { 175 if (r)
183 DRM_ERROR("Error scheduling IBs (%d)\n", r); 176 DRM_ERROR("Error scheduling IBs (%d)\n", r);
184 goto err;
185 }
186 177
187err: 178 /* if gpu reset, hw fence will be replaced here */
188 job->fence = fence; 179 fence_put(job->fence);
189 amdgpu_job_free(job); 180 job->fence = fence_get(fence);
181 amdgpu_job_free_resources(job);
190 return fence; 182 return fence;
191} 183}
192 184
193const struct amd_sched_backend_ops amdgpu_sched_ops = { 185const struct amd_sched_backend_ops amdgpu_sched_ops = {
194 .dependency = amdgpu_job_dependency, 186 .dependency = amdgpu_job_dependency,
195 .run_job = amdgpu_job_run, 187 .run_job = amdgpu_job_run,
196 .begin_job = amd_sched_job_begin, 188 .timedout_job = amdgpu_job_timedout,
197 .finish_job = amd_sched_job_finish, 189 .free_job = amdgpu_job_free_cb
198}; 190};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index d851ea15059f..d942654a1de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev)
60 if (adev->rmmio == NULL) 60 if (adev->rmmio == NULL)
61 goto done_free; 61 goto done_free;
62 62
63 pm_runtime_get_sync(dev->dev); 63 if (amdgpu_device_is_px(dev)) {
64 pm_runtime_get_sync(dev->dev);
65 pm_runtime_forbid(dev->dev);
66 }
64 67
65 amdgpu_amdkfd_device_fini(adev); 68 amdgpu_amdkfd_device_fini(adev);
66 69
@@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
135 } 138 }
136 139
137out: 140out:
138 if (r) 141 if (r) {
142 /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */
143 if (adev->rmmio && amdgpu_device_is_px(dev))
144 pm_runtime_put_noidle(dev->dev);
139 amdgpu_driver_unload_kms(dev); 145 amdgpu_driver_unload_kms(dev);
140 146 }
141 147
142 return r; 148 return r;
143} 149}
144 150
151static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
152 struct drm_amdgpu_query_fw *query_fw,
153 struct amdgpu_device *adev)
154{
155 switch (query_fw->fw_type) {
156 case AMDGPU_INFO_FW_VCE:
157 fw_info->ver = adev->vce.fw_version;
158 fw_info->feature = adev->vce.fb_version;
159 break;
160 case AMDGPU_INFO_FW_UVD:
161 fw_info->ver = adev->uvd.fw_version;
162 fw_info->feature = 0;
163 break;
164 case AMDGPU_INFO_FW_GMC:
165 fw_info->ver = adev->mc.fw_version;
166 fw_info->feature = 0;
167 break;
168 case AMDGPU_INFO_FW_GFX_ME:
169 fw_info->ver = adev->gfx.me_fw_version;
170 fw_info->feature = adev->gfx.me_feature_version;
171 break;
172 case AMDGPU_INFO_FW_GFX_PFP:
173 fw_info->ver = adev->gfx.pfp_fw_version;
174 fw_info->feature = adev->gfx.pfp_feature_version;
175 break;
176 case AMDGPU_INFO_FW_GFX_CE:
177 fw_info->ver = adev->gfx.ce_fw_version;
178 fw_info->feature = adev->gfx.ce_feature_version;
179 break;
180 case AMDGPU_INFO_FW_GFX_RLC:
181 fw_info->ver = adev->gfx.rlc_fw_version;
182 fw_info->feature = adev->gfx.rlc_feature_version;
183 break;
184 case AMDGPU_INFO_FW_GFX_MEC:
185 if (query_fw->index == 0) {
186 fw_info->ver = adev->gfx.mec_fw_version;
187 fw_info->feature = adev->gfx.mec_feature_version;
188 } else if (query_fw->index == 1) {
189 fw_info->ver = adev->gfx.mec2_fw_version;
190 fw_info->feature = adev->gfx.mec2_feature_version;
191 } else
192 return -EINVAL;
193 break;
194 case AMDGPU_INFO_FW_SMC:
195 fw_info->ver = adev->pm.fw_version;
196 fw_info->feature = 0;
197 break;
198 case AMDGPU_INFO_FW_SDMA:
199 if (query_fw->index >= adev->sdma.num_instances)
200 return -EINVAL;
201 fw_info->ver = adev->sdma.instance[query_fw->index].fw_version;
202 fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
203 break;
204 default:
205 return -EINVAL;
206 }
207 return 0;
208}
209
145/* 210/*
146 * Userspace get information ioctl 211 * Userspace get information ioctl
147 */ 212 */
@@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
288 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; 353 return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0;
289 } 354 }
290 case AMDGPU_INFO_TIMESTAMP: 355 case AMDGPU_INFO_TIMESTAMP:
291 ui64 = amdgpu_asic_get_gpu_clock_counter(adev); 356 ui64 = amdgpu_gfx_get_gpu_clock_counter(adev);
292 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; 357 return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0;
293 case AMDGPU_INFO_FW_VERSION: { 358 case AMDGPU_INFO_FW_VERSION: {
294 struct drm_amdgpu_info_firmware fw_info; 359 struct drm_amdgpu_info_firmware fw_info;
360 int ret;
295 361
296 /* We only support one instance of each IP block right now. */ 362 /* We only support one instance of each IP block right now. */
297 if (info->query_fw.ip_instance != 0) 363 if (info->query_fw.ip_instance != 0)
298 return -EINVAL; 364 return -EINVAL;
299 365
300 switch (info->query_fw.fw_type) { 366 ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev);
301 case AMDGPU_INFO_FW_VCE: 367 if (ret)
302 fw_info.ver = adev->vce.fw_version; 368 return ret;
303 fw_info.feature = adev->vce.fb_version; 369
304 break;
305 case AMDGPU_INFO_FW_UVD:
306 fw_info.ver = adev->uvd.fw_version;
307 fw_info.feature = 0;
308 break;
309 case AMDGPU_INFO_FW_GMC:
310 fw_info.ver = adev->mc.fw_version;
311 fw_info.feature = 0;
312 break;
313 case AMDGPU_INFO_FW_GFX_ME:
314 fw_info.ver = adev->gfx.me_fw_version;
315 fw_info.feature = adev->gfx.me_feature_version;
316 break;
317 case AMDGPU_INFO_FW_GFX_PFP:
318 fw_info.ver = adev->gfx.pfp_fw_version;
319 fw_info.feature = adev->gfx.pfp_feature_version;
320 break;
321 case AMDGPU_INFO_FW_GFX_CE:
322 fw_info.ver = adev->gfx.ce_fw_version;
323 fw_info.feature = adev->gfx.ce_feature_version;
324 break;
325 case AMDGPU_INFO_FW_GFX_RLC:
326 fw_info.ver = adev->gfx.rlc_fw_version;
327 fw_info.feature = adev->gfx.rlc_feature_version;
328 break;
329 case AMDGPU_INFO_FW_GFX_MEC:
330 if (info->query_fw.index == 0) {
331 fw_info.ver = adev->gfx.mec_fw_version;
332 fw_info.feature = adev->gfx.mec_feature_version;
333 } else if (info->query_fw.index == 1) {
334 fw_info.ver = adev->gfx.mec2_fw_version;
335 fw_info.feature = adev->gfx.mec2_feature_version;
336 } else
337 return -EINVAL;
338 break;
339 case AMDGPU_INFO_FW_SMC:
340 fw_info.ver = adev->pm.fw_version;
341 fw_info.feature = 0;
342 break;
343 case AMDGPU_INFO_FW_SDMA:
344 if (info->query_fw.index >= adev->sdma.num_instances)
345 return -EINVAL;
346 fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version;
347 fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version;
348 break;
349 default:
350 return -EINVAL;
351 }
352 return copy_to_user(out, &fw_info, 370 return copy_to_user(out, &fw_info,
353 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; 371 min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0;
354 } 372 }
@@ -566,6 +584,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
566 584
567 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr); 585 amdgpu_ctx_mgr_fini(&fpriv->ctx_mgr);
568 586
587 amdgpu_uvd_free_handles(adev, file_priv);
588 amdgpu_vce_free_handles(adev, file_priv);
589
569 amdgpu_vm_fini(adev, &fpriv->vm); 590 amdgpu_vm_fini(adev, &fpriv->vm);
570 591
571 idr_for_each_entry(&fpriv->bo_list_handles, list, handle) 592 idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
@@ -590,10 +611,6 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
590void amdgpu_driver_preclose_kms(struct drm_device *dev, 611void amdgpu_driver_preclose_kms(struct drm_device *dev,
591 struct drm_file *file_priv) 612 struct drm_file *file_priv)
592{ 613{
593 struct amdgpu_device *adev = dev->dev_private;
594
595 amdgpu_uvd_free_handles(adev, file_priv);
596 amdgpu_vce_free_handles(adev, file_priv);
597} 614}
598 615
599/* 616/*
@@ -756,3 +773,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
756 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 773 DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
757}; 774};
758const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); 775const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms);
776
777/*
778 * Debugfs info
779 */
780#if defined(CONFIG_DEBUG_FS)
781
782static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data)
783{
784 struct drm_info_node *node = (struct drm_info_node *) m->private;
785 struct drm_device *dev = node->minor->dev;
786 struct amdgpu_device *adev = dev->dev_private;
787 struct drm_amdgpu_info_firmware fw_info;
788 struct drm_amdgpu_query_fw query_fw;
789 int ret, i;
790
791 /* VCE */
792 query_fw.fw_type = AMDGPU_INFO_FW_VCE;
793 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
794 if (ret)
795 return ret;
796 seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n",
797 fw_info.feature, fw_info.ver);
798
799 /* UVD */
800 query_fw.fw_type = AMDGPU_INFO_FW_UVD;
801 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
802 if (ret)
803 return ret;
804 seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n",
805 fw_info.feature, fw_info.ver);
806
807 /* GMC */
808 query_fw.fw_type = AMDGPU_INFO_FW_GMC;
809 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
810 if (ret)
811 return ret;
812 seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n",
813 fw_info.feature, fw_info.ver);
814
815 /* ME */
816 query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME;
817 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
818 if (ret)
819 return ret;
820 seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n",
821 fw_info.feature, fw_info.ver);
822
823 /* PFP */
824 query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP;
825 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
826 if (ret)
827 return ret;
828 seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n",
829 fw_info.feature, fw_info.ver);
830
831 /* CE */
832 query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE;
833 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
834 if (ret)
835 return ret;
836 seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n",
837 fw_info.feature, fw_info.ver);
838
839 /* RLC */
840 query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC;
841 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
842 if (ret)
843 return ret;
844 seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n",
845 fw_info.feature, fw_info.ver);
846
847 /* MEC */
848 query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC;
849 query_fw.index = 0;
850 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
851 if (ret)
852 return ret;
853 seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n",
854 fw_info.feature, fw_info.ver);
855
856 /* MEC2 */
857 if (adev->asic_type == CHIP_KAVERI ||
858 (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) {
859 query_fw.index = 1;
860 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
861 if (ret)
862 return ret;
863 seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n",
864 fw_info.feature, fw_info.ver);
865 }
866
867 /* SMC */
868 query_fw.fw_type = AMDGPU_INFO_FW_SMC;
869 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
870 if (ret)
871 return ret;
872 seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n",
873 fw_info.feature, fw_info.ver);
874
875 /* SDMA */
876 query_fw.fw_type = AMDGPU_INFO_FW_SDMA;
877 for (i = 0; i < adev->sdma.num_instances; i++) {
878 query_fw.index = i;
879 ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
880 if (ret)
881 return ret;
882 seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n",
883 i, fw_info.feature, fw_info.ver);
884 }
885
886 return 0;
887}
888
889static const struct drm_info_list amdgpu_firmware_info_list[] = {
890 {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL},
891};
892#endif
893
894int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev)
895{
896#if defined(CONFIG_DEBUG_FS)
897 return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list,
898 ARRAY_SIZE(amdgpu_firmware_info_list));
899#else
900 return 0;
901#endif
902}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 7ecea83ce453..6f0873c75a25 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
589 struct ttm_mem_reg *new_mem) 589 struct ttm_mem_reg *new_mem)
590{ 590{
591 struct amdgpu_bo *rbo; 591 struct amdgpu_bo *rbo;
592 struct ttm_mem_reg *old_mem = &bo->mem;
592 593
593 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) 594 if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
594 return; 595 return;
@@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
602 603
603 /* move_notify is called before move happens */ 604 /* move_notify is called before move happens */
604 amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); 605 amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem);
606
607 trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type);
605} 608}
606 609
607int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) 610int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 0e13d80d2a95..ff63b88b0ffa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -347,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
347 347
348 if (adev->pp_enabled) 348 if (adev->pp_enabled)
349 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 349 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
350 else if (adev->pm.funcs->print_clock_levels)
351 size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
350 352
351 return size; 353 return size;
352} 354}
@@ -363,7 +365,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
363 uint32_t i, mask = 0; 365 uint32_t i, mask = 0;
364 char sub_str[2]; 366 char sub_str[2];
365 367
366 for (i = 0; i < strlen(buf) - 1; i++) { 368 for (i = 0; i < strlen(buf); i++) {
369 if (*(buf + i) == '\n')
370 continue;
367 sub_str[0] = *(buf + i); 371 sub_str[0] = *(buf + i);
368 sub_str[1] = '\0'; 372 sub_str[1] = '\0';
369 ret = kstrtol(sub_str, 0, &level); 373 ret = kstrtol(sub_str, 0, &level);
@@ -377,6 +381,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
377 381
378 if (adev->pp_enabled) 382 if (adev->pp_enabled)
379 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 383 amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
384 else if (adev->pm.funcs->force_clock_level)
385 adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
380fail: 386fail:
381 return count; 387 return count;
382} 388}
@@ -391,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
391 397
392 if (adev->pp_enabled) 398 if (adev->pp_enabled)
393 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 399 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
400 else if (adev->pm.funcs->print_clock_levels)
401 size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
394 402
395 return size; 403 return size;
396} 404}
@@ -407,7 +415,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
407 uint32_t i, mask = 0; 415 uint32_t i, mask = 0;
408 char sub_str[2]; 416 char sub_str[2];
409 417
410 for (i = 0; i < strlen(buf) - 1; i++) { 418 for (i = 0; i < strlen(buf); i++) {
419 if (*(buf + i) == '\n')
420 continue;
411 sub_str[0] = *(buf + i); 421 sub_str[0] = *(buf + i);
412 sub_str[1] = '\0'; 422 sub_str[1] = '\0';
413 ret = kstrtol(sub_str, 0, &level); 423 ret = kstrtol(sub_str, 0, &level);
@@ -421,6 +431,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
421 431
422 if (adev->pp_enabled) 432 if (adev->pp_enabled)
423 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 433 amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
434 else if (adev->pm.funcs->force_clock_level)
435 adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
424fail: 436fail:
425 return count; 437 return count;
426} 438}
@@ -435,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
435 447
436 if (adev->pp_enabled) 448 if (adev->pp_enabled)
437 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 449 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
450 else if (adev->pm.funcs->print_clock_levels)
451 size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
438 452
439 return size; 453 return size;
440} 454}
@@ -451,7 +465,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
451 uint32_t i, mask = 0; 465 uint32_t i, mask = 0;
452 char sub_str[2]; 466 char sub_str[2];
453 467
454 for (i = 0; i < strlen(buf) - 1; i++) { 468 for (i = 0; i < strlen(buf); i++) {
469 if (*(buf + i) == '\n')
470 continue;
455 sub_str[0] = *(buf + i); 471 sub_str[0] = *(buf + i);
456 sub_str[1] = '\0'; 472 sub_str[1] = '\0';
457 ret = kstrtol(sub_str, 0, &level); 473 ret = kstrtol(sub_str, 0, &level);
@@ -465,6 +481,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
465 481
466 if (adev->pp_enabled) 482 if (adev->pp_enabled)
467 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 483 amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
484 else if (adev->pm.funcs->force_clock_level)
485 adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
486fail:
487 return count;
488}
489
490static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
491 struct device_attribute *attr,
492 char *buf)
493{
494 struct drm_device *ddev = dev_get_drvdata(dev);
495 struct amdgpu_device *adev = ddev->dev_private;
496 uint32_t value = 0;
497
498 if (adev->pp_enabled)
499 value = amdgpu_dpm_get_sclk_od(adev);
500 else if (adev->pm.funcs->get_sclk_od)
501 value = adev->pm.funcs->get_sclk_od(adev);
502
503 return snprintf(buf, PAGE_SIZE, "%d\n", value);
504}
505
506static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
507 struct device_attribute *attr,
508 const char *buf,
509 size_t count)
510{
511 struct drm_device *ddev = dev_get_drvdata(dev);
512 struct amdgpu_device *adev = ddev->dev_private;
513 int ret;
514 long int value;
515
516 ret = kstrtol(buf, 0, &value);
517
518 if (ret) {
519 count = -EINVAL;
520 goto fail;
521 }
522
523 if (adev->pp_enabled) {
524 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
525 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
526 } else if (adev->pm.funcs->set_sclk_od) {
527 adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
528 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
529 amdgpu_pm_compute_clocks(adev);
530 }
531
532fail:
533 return count;
534}
535
536static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
537 struct device_attribute *attr,
538 char *buf)
539{
540 struct drm_device *ddev = dev_get_drvdata(dev);
541 struct amdgpu_device *adev = ddev->dev_private;
542 uint32_t value = 0;
543
544 if (adev->pp_enabled)
545 value = amdgpu_dpm_get_mclk_od(adev);
546 else if (adev->pm.funcs->get_mclk_od)
547 value = adev->pm.funcs->get_mclk_od(adev);
548
549 return snprintf(buf, PAGE_SIZE, "%d\n", value);
550}
551
552static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
553 struct device_attribute *attr,
554 const char *buf,
555 size_t count)
556{
557 struct drm_device *ddev = dev_get_drvdata(dev);
558 struct amdgpu_device *adev = ddev->dev_private;
559 int ret;
560 long int value;
561
562 ret = kstrtol(buf, 0, &value);
563
564 if (ret) {
565 count = -EINVAL;
566 goto fail;
567 }
568
569 if (adev->pp_enabled) {
570 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
571 amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
572 } else if (adev->pm.funcs->set_mclk_od) {
573 adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
574 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
575 amdgpu_pm_compute_clocks(adev);
576 }
577
468fail: 578fail:
469 return count; 579 return count;
470} 580}
@@ -490,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR,
490static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 600static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR,
491 amdgpu_get_pp_dpm_pcie, 601 amdgpu_get_pp_dpm_pcie,
492 amdgpu_set_pp_dpm_pcie); 602 amdgpu_set_pp_dpm_pcie);
603static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR,
604 amdgpu_get_pp_sclk_od,
605 amdgpu_set_pp_sclk_od);
606static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR,
607 amdgpu_get_pp_mclk_od,
608 amdgpu_set_pp_mclk_od);
493 609
494static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 610static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
495 struct device_attribute *attr, 611 struct device_attribute *attr,
@@ -1108,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
1108 DRM_ERROR("failed to create device file pp_table\n"); 1224 DRM_ERROR("failed to create device file pp_table\n");
1109 return ret; 1225 return ret;
1110 } 1226 }
1111 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1112 if (ret) {
1113 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1114 return ret;
1115 }
1116 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1117 if (ret) {
1118 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1119 return ret;
1120 }
1121 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1122 if (ret) {
1123 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1124 return ret;
1125 }
1126 } 1227 }
1228
1229 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk);
1230 if (ret) {
1231 DRM_ERROR("failed to create device file pp_dpm_sclk\n");
1232 return ret;
1233 }
1234 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk);
1235 if (ret) {
1236 DRM_ERROR("failed to create device file pp_dpm_mclk\n");
1237 return ret;
1238 }
1239 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie);
1240 if (ret) {
1241 DRM_ERROR("failed to create device file pp_dpm_pcie\n");
1242 return ret;
1243 }
1244 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od);
1245 if (ret) {
1246 DRM_ERROR("failed to create device file pp_sclk_od\n");
1247 return ret;
1248 }
1249 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od);
1250 if (ret) {
1251 DRM_ERROR("failed to create device file pp_mclk_od\n");
1252 return ret;
1253 }
1254
1127 ret = amdgpu_debugfs_pm_init(adev); 1255 ret = amdgpu_debugfs_pm_init(adev);
1128 if (ret) { 1256 if (ret) {
1129 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 1257 DRM_ERROR("Failed to register debugfs file for dpm!\n");
@@ -1146,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev)
1146 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 1274 device_remove_file(adev->dev, &dev_attr_pp_cur_state);
1147 device_remove_file(adev->dev, &dev_attr_pp_force_state); 1275 device_remove_file(adev->dev, &dev_attr_pp_force_state);
1148 device_remove_file(adev->dev, &dev_attr_pp_table); 1276 device_remove_file(adev->dev, &dev_attr_pp_table);
1149 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1150 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1151 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
1152 } 1277 }
1278 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk);
1279 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk);
1280 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie);
1281 device_remove_file(adev->dev, &dev_attr_pp_sclk_od);
1282 device_remove_file(adev->dev, &dev_attr_pp_mclk_od);
1153} 1283}
1154 1284
1155void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 1285void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index 82256558e0f5..c5738a22b690 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev)
52 pp_init->chip_family = adev->family; 52 pp_init->chip_family = adev->family;
53 pp_init->chip_id = adev->asic_type; 53 pp_init->chip_id = adev->asic_type;
54 pp_init->device = amdgpu_cgs_create_device(adev); 54 pp_init->device = amdgpu_cgs_create_device(adev);
55 pp_init->powercontainment_enabled = amdgpu_powercontainment;
55 56
56 ret = amd_powerplay_init(pp_init, amd_pp); 57 ret = amd_powerplay_init(pp_init, amd_pp);
57 kfree(pp_init); 58 kfree(pp_init);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 870f9494252c..85aeb0a804bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -28,6 +28,7 @@
28 */ 28 */
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/debugfs.h>
31#include <drm/drmP.h> 32#include <drm/drmP.h>
32#include <drm/amdgpu_drm.h> 33#include <drm/amdgpu_drm.h>
33#include "amdgpu.h" 34#include "amdgpu.h"
@@ -48,6 +49,7 @@
48 */ 49 */
49static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, 50static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
50 struct amdgpu_ring *ring); 51 struct amdgpu_ring *ring);
52static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
51 53
52/** 54/**
53 * amdgpu_ring_alloc - allocate space on the ring buffer 55 * amdgpu_ring_alloc - allocate space on the ring buffer
@@ -73,6 +75,10 @@ int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
73 75
74 ring->count_dw = ndw; 76 ring->count_dw = ndw;
75 ring->wptr_old = ring->wptr; 77 ring->wptr_old = ring->wptr;
78
79 if (ring->funcs->begin_use)
80 ring->funcs->begin_use(ring);
81
76 return 0; 82 return 0;
77} 83}
78 84
@@ -125,6 +131,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
125 131
126 mb(); 132 mb();
127 amdgpu_ring_set_wptr(ring); 133 amdgpu_ring_set_wptr(ring);
134
135 if (ring->funcs->end_use)
136 ring->funcs->end_use(ring);
128} 137}
129 138
130/** 139/**
@@ -137,78 +146,9 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
137void amdgpu_ring_undo(struct amdgpu_ring *ring) 146void amdgpu_ring_undo(struct amdgpu_ring *ring)
138{ 147{
139 ring->wptr = ring->wptr_old; 148 ring->wptr = ring->wptr_old;
140}
141
142/**
143 * amdgpu_ring_backup - Back up the content of a ring
144 *
145 * @ring: the ring we want to back up
146 *
147 * Saves all unprocessed commits from a ring, returns the number of dwords saved.
148 */
149unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
150 uint32_t **data)
151{
152 unsigned size, ptr, i;
153
154 *data = NULL;
155
156 if (ring->ring_obj == NULL)
157 return 0;
158
159 /* it doesn't make sense to save anything if all fences are signaled */
160 if (!amdgpu_fence_count_emitted(ring))
161 return 0;
162
163 ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
164
165 size = ring->wptr + (ring->ring_size / 4);
166 size -= ptr;
167 size &= ring->ptr_mask;
168 if (size == 0)
169 return 0;
170
171 /* and then save the content of the ring */
172 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
173 if (!*data)
174 return 0;
175 for (i = 0; i < size; ++i) {
176 (*data)[i] = ring->ring[ptr++];
177 ptr &= ring->ptr_mask;
178 }
179
180 return size;
181}
182
183/**
184 * amdgpu_ring_restore - append saved commands to the ring again
185 *
186 * @ring: ring to append commands to
187 * @size: number of dwords we want to write
188 * @data: saved commands
189 *
190 * Allocates space on the ring and restore the previously saved commands.
191 */
192int amdgpu_ring_restore(struct amdgpu_ring *ring,
193 unsigned size, uint32_t *data)
194{
195 int i, r;
196
197 if (!size || !data)
198 return 0;
199
200 /* restore the saved ring content */
201 r = amdgpu_ring_alloc(ring, size);
202 if (r)
203 return r;
204
205 for (i = 0; i < size; ++i) {
206 amdgpu_ring_write(ring, data[i]);
207 }
208 149
209 amdgpu_ring_commit(ring); 150 if (ring->funcs->end_use)
210 kfree(data); 151 ring->funcs->end_use(ring);
211 return 0;
212} 152}
213 153
214/** 154/**
@@ -260,14 +200,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
260 return r; 200 return r;
261 } 201 }
262 202
263 r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
264 if (r) {
265 dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
266 return r;
267 }
268 ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
269 ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
270
271 r = amdgpu_wb_get(adev, &ring->cond_exe_offs); 203 r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
272 if (r) { 204 if (r) {
273 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); 205 dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
@@ -276,7 +208,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
276 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4); 208 ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
277 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs]; 209 ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
278 210
279 spin_lock_init(&ring->fence_lock);
280 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type); 211 r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
281 if (r) { 212 if (r) {
282 dev_err(adev->dev, "failed initializing fences (%d).\n", r); 213 dev_err(adev->dev, "failed initializing fences (%d).\n", r);
@@ -310,6 +241,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
310 } 241 }
311 r = amdgpu_bo_kmap(ring->ring_obj, 242 r = amdgpu_bo_kmap(ring->ring_obj,
312 (void **)&ring->ring); 243 (void **)&ring->ring);
244
245 memset((void *)ring->ring, 0, ring->ring_size);
246
313 amdgpu_bo_unreserve(ring->ring_obj); 247 amdgpu_bo_unreserve(ring->ring_obj);
314 if (r) { 248 if (r) {
315 dev_err(adev->dev, "(%d) ring map failed\n", r); 249 dev_err(adev->dev, "(%d) ring map failed\n", r);
@@ -347,7 +281,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
347 amdgpu_wb_free(ring->adev, ring->fence_offs); 281 amdgpu_wb_free(ring->adev, ring->fence_offs);
348 amdgpu_wb_free(ring->adev, ring->rptr_offs); 282 amdgpu_wb_free(ring->adev, ring->rptr_offs);
349 amdgpu_wb_free(ring->adev, ring->wptr_offs); 283 amdgpu_wb_free(ring->adev, ring->wptr_offs);
350 amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
351 284
352 if (ring_obj) { 285 if (ring_obj) {
353 r = amdgpu_bo_reserve(ring_obj, false); 286 r = amdgpu_bo_reserve(ring_obj, false);
@@ -358,6 +291,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
358 } 291 }
359 amdgpu_bo_unref(&ring_obj); 292 amdgpu_bo_unref(&ring_obj);
360 } 293 }
294 amdgpu_debugfs_ring_fini(ring);
361} 295}
362 296
363/* 297/*
@@ -365,57 +299,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
365 */ 299 */
366#if defined(CONFIG_DEBUG_FS) 300#if defined(CONFIG_DEBUG_FS)
367 301
368static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) 302/* Layout of file is 12 bytes consisting of
303 * - rptr
304 * - wptr
305 * - driver's copy of wptr
306 *
307 * followed by n-words of ring data
308 */
309static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
310 size_t size, loff_t *pos)
369{ 311{
370 struct drm_info_node *node = (struct drm_info_node *) m->private; 312 struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
371 struct drm_device *dev = node->minor->dev; 313 int r, i;
372 struct amdgpu_device *adev = dev->dev_private; 314 uint32_t value, result, early[3];
373 int roffset = (unsigned long)node->info_ent->data; 315
374 struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); 316 if (*pos & 3 || size & 3)
375 uint32_t rptr, wptr, rptr_next; 317 return -EINVAL;
376 unsigned i; 318
377 319 result = 0;
378 wptr = amdgpu_ring_get_wptr(ring); 320
379 seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); 321 if (*pos < 12) {
380 322 early[0] = amdgpu_ring_get_rptr(ring);
381 rptr = amdgpu_ring_get_rptr(ring); 323 early[1] = amdgpu_ring_get_wptr(ring);
382 rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); 324 early[2] = ring->wptr;
383 325 for (i = *pos / 4; i < 3 && size; i++) {
384 seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); 326 r = put_user(early[i], (uint32_t *)buf);
385 327 if (r)
386 seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", 328 return r;
387 ring->wptr, ring->wptr); 329 buf += 4;
388 330 result += 4;
389 if (!ring->ready) 331 size -= 4;
390 return 0; 332 *pos += 4;
391 333 }
392 /* print 8 dw before current rptr as often it's the last executed
393 * packet that is the root issue
394 */
395 i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask;
396 while (i != rptr) {
397 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]);
398 if (i == rptr)
399 seq_puts(m, " *");
400 if (i == rptr_next)
401 seq_puts(m, " #");
402 seq_puts(m, "\n");
403 i = (i + 1) & ring->ptr_mask;
404 } 334 }
405 while (i != wptr) { 335
406 seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); 336 while (size) {
407 if (i == rptr) 337 if (*pos >= (ring->ring_size + 12))
408 seq_puts(m, " *"); 338 return result;
409 if (i == rptr_next) 339
410 seq_puts(m, " #"); 340 value = ring->ring[(*pos - 12)/4];
411 seq_puts(m, "\n"); 341 r = put_user(value, (uint32_t*)buf);
412 i = (i + 1) & ring->ptr_mask; 342 if (r)
343 return r;
344 buf += 4;
345 result += 4;
346 size -= 4;
347 *pos += 4;
413 } 348 }
414 return 0; 349
350 return result;
415} 351}
416 352
417static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS]; 353static const struct file_operations amdgpu_debugfs_ring_fops = {
418static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32]; 354 .owner = THIS_MODULE,
355 .read = amdgpu_debugfs_ring_read,
356 .llseek = default_llseek
357};
419 358
420#endif 359#endif
421 360
@@ -423,28 +362,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
423 struct amdgpu_ring *ring) 362 struct amdgpu_ring *ring)
424{ 363{
425#if defined(CONFIG_DEBUG_FS) 364#if defined(CONFIG_DEBUG_FS)
426 unsigned offset = (uint8_t*)ring - (uint8_t*)adev; 365 struct drm_minor *minor = adev->ddev->primary;
427 unsigned i; 366 struct dentry *ent, *root = minor->debugfs_root;
428 struct drm_info_list *info; 367 char name[32];
429 char *name;
430
431 for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) {
432 info = &amdgpu_debugfs_ring_info_list[i];
433 if (!info->data)
434 break;
435 }
436 368
437 if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list))
438 return -ENOSPC;
439
440 name = &amdgpu_debugfs_ring_names[i][0];
441 sprintf(name, "amdgpu_ring_%s", ring->name); 369 sprintf(name, "amdgpu_ring_%s", ring->name);
442 info->name = name;
443 info->show = amdgpu_debugfs_ring_info;
444 info->driver_features = 0;
445 info->data = (void*)(uintptr_t)offset;
446 370
447 return amdgpu_debugfs_add_files(adev, info, 1); 371 ent = debugfs_create_file(name,
372 S_IFREG | S_IRUGO, root,
373 ring, &amdgpu_debugfs_ring_fops);
374 if (IS_ERR(ent))
375 return PTR_ERR(ent);
376
377 i_size_write(ent->d_inode, ring->ring_size + 12);
378 ring->ent = ent;
448#endif 379#endif
449 return 0; 380 return 0;
450} 381}
382
383static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
384{
385#if defined(CONFIG_DEBUG_FS)
386 debugfs_remove(ring->ent);
387#endif
388}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 48618ee324eb..d8af37a845f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
428 soffset, eoffset, eoffset - soffset); 428 soffset, eoffset, eoffset - soffset);
429 429
430 if (i->fence) 430 if (i->fence)
431 seq_printf(m, " protected by 0x%08x on context %d", 431 seq_printf(m, " protected by 0x%08x on context %llu",
432 i->fence->seqno, i->fence->context); 432 i->fence->seqno, i->fence->context);
433 433
434 seq_printf(m, "\n"); 434 seq_printf(m, "\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 34a92808bbd4..5c8d3022fb87 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
223} 223}
224 224
225/** 225/**
226 * amdgpu_sync_is_idle - test if all fences are signaled 226 * amdgpu_sync_peek_fence - get the next fence not signaled yet
227 * 227 *
228 * @sync: the sync object 228 * @sync: the sync object
229 * @ring: optional ring to use for test
229 * 230 *
230 * Returns true if all fences in the sync object are signaled. 231 * Returns the next fence not signaled yet without removing it from the sync
232 * object.
231 */ 233 */
232bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) 234struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
235 struct amdgpu_ring *ring)
233{ 236{
234 struct amdgpu_sync_entry *e; 237 struct amdgpu_sync_entry *e;
235 struct hlist_node *tmp; 238 struct hlist_node *tmp;
@@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
237 240
238 hash_for_each_safe(sync->fences, i, tmp, e, node) { 241 hash_for_each_safe(sync->fences, i, tmp, e, node) {
239 struct fence *f = e->fence; 242 struct fence *f = e->fence;
243 struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
244
245 if (ring && s_fence) {
246 /* For fences from the same ring it is sufficient
247 * when they are scheduled.
248 */
249 if (s_fence->sched == &ring->sched) {
250 if (fence_is_signaled(&s_fence->scheduled))
251 continue;
252
253 return &s_fence->scheduled;
254 }
255 }
240 256
241 if (fence_is_signaled(f)) { 257 if (fence_is_signaled(f)) {
242 hash_del(&e->node); 258 hash_del(&e->node);
@@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync)
245 continue; 261 continue;
246 } 262 }
247 263
248 return false; 264 return f;
249 } 265 }
250 266
251 return true; 267 return NULL;
252} 268}
253 269
254/** 270/**
255 * amdgpu_sync_cycle_fences - move fences from one sync object into another 271 * amdgpu_sync_get_fence - get the next fence from the sync object
256 * 272 *
257 * @dst: the destination sync object 273 * @sync: sync object to use
258 * @src: the source sync object
259 * @fence: fence to add to source
260 * 274 *
261 * Remove all fences from source and put them into destination and add 275 * Get and removes the next fence from the sync object not signaled yet.
262 * fence as new one into source.
263 */ 276 */
264int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src,
265 struct fence *fence)
266{
267 struct amdgpu_sync_entry *e, *newone;
268 struct hlist_node *tmp;
269 int i;
270
271 /* Allocate the new entry before moving the old ones */
272 newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL);
273 if (!newone)
274 return -ENOMEM;
275
276 hash_for_each_safe(src->fences, i, tmp, e, node) {
277 struct fence *f = e->fence;
278
279 hash_del(&e->node);
280 if (fence_is_signaled(f)) {
281 fence_put(f);
282 kmem_cache_free(amdgpu_sync_slab, e);
283 continue;
284 }
285
286 if (amdgpu_sync_add_later(dst, f)) {
287 kmem_cache_free(amdgpu_sync_slab, e);
288 continue;
289 }
290
291 hash_add(dst->fences, &e->node, f->context);
292 }
293
294 hash_add(src->fences, &newone->node, fence->context);
295 newone->fence = fence_get(fence);
296
297 return 0;
298}
299
300struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) 277struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
301{ 278{
302 struct amdgpu_sync_entry *e; 279 struct amdgpu_sync_entry *e;
@@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync)
319 return NULL; 296 return NULL;
320} 297}
321 298
322int amdgpu_sync_wait(struct amdgpu_sync *sync)
323{
324 struct amdgpu_sync_entry *e;
325 struct hlist_node *tmp;
326 int i, r;
327
328 hash_for_each_safe(sync->fences, i, tmp, e, node) {
329 r = fence_wait(e->fence, false);
330 if (r)
331 return r;
332
333 hash_del(&e->node);
334 fence_put(e->fence);
335 kmem_cache_free(amdgpu_sync_slab, e);
336 }
337
338 return 0;
339}
340
341/** 299/**
342 * amdgpu_sync_free - free the sync object 300 * amdgpu_sync_free - free the sync object
343 * 301 *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 26a5f4acf584..0d8d65eb46cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -11,19 +11,68 @@
11#define TRACE_SYSTEM amdgpu 11#define TRACE_SYSTEM amdgpu
12#define TRACE_INCLUDE_FILE amdgpu_trace 12#define TRACE_INCLUDE_FILE amdgpu_trace
13 13
14TRACE_EVENT(amdgpu_mm_rreg,
15 TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
16 TP_ARGS(did, reg, value),
17 TP_STRUCT__entry(
18 __field(unsigned, did)
19 __field(uint32_t, reg)
20 __field(uint32_t, value)
21 ),
22 TP_fast_assign(
23 __entry->did = did;
24 __entry->reg = reg;
25 __entry->value = value;
26 ),
27 TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
28 (unsigned long)__entry->did,
29 (unsigned long)__entry->reg,
30 (unsigned long)__entry->value)
31);
32
33TRACE_EVENT(amdgpu_mm_wreg,
34 TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
35 TP_ARGS(did, reg, value),
36 TP_STRUCT__entry(
37 __field(unsigned, did)
38 __field(uint32_t, reg)
39 __field(uint32_t, value)
40 ),
41 TP_fast_assign(
42 __entry->did = did;
43 __entry->reg = reg;
44 __entry->value = value;
45 ),
46 TP_printk("0x%04lx, 0x%04lx, 0x%08lx",
47 (unsigned long)__entry->did,
48 (unsigned long)__entry->reg,
49 (unsigned long)__entry->value)
50);
51
14TRACE_EVENT(amdgpu_bo_create, 52TRACE_EVENT(amdgpu_bo_create,
15 TP_PROTO(struct amdgpu_bo *bo), 53 TP_PROTO(struct amdgpu_bo *bo),
16 TP_ARGS(bo), 54 TP_ARGS(bo),
17 TP_STRUCT__entry( 55 TP_STRUCT__entry(
18 __field(struct amdgpu_bo *, bo) 56 __field(struct amdgpu_bo *, bo)
19 __field(u32, pages) 57 __field(u32, pages)
58 __field(u32, type)
59 __field(u32, prefer)
60 __field(u32, allow)
61 __field(u32, visible)
20 ), 62 ),
21 63
22 TP_fast_assign( 64 TP_fast_assign(
23 __entry->bo = bo; 65 __entry->bo = bo;
24 __entry->pages = bo->tbo.num_pages; 66 __entry->pages = bo->tbo.num_pages;
67 __entry->type = bo->tbo.mem.mem_type;
68 __entry->prefer = bo->prefered_domains;
69 __entry->allow = bo->allowed_domains;
70 __entry->visible = bo->flags;
25 ), 71 ),
26 TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) 72
73 TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d",
74 __entry->bo, __entry->pages, __entry->type,
75 __entry->prefer, __entry->allow, __entry->visible)
27); 76);
28 77
29TRACE_EVENT(amdgpu_cs, 78TRACE_EVENT(amdgpu_cs,
@@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl,
64 __entry->adev = job->adev; 113 __entry->adev = job->adev;
65 __entry->sched_job = &job->base; 114 __entry->sched_job = &job->base;
66 __entry->ib = job->ibs; 115 __entry->ib = job->ibs;
67 __entry->fence = &job->base.s_fence->base; 116 __entry->fence = &job->base.s_fence->finished;
68 __entry->ring_name = job->ring->name; 117 __entry->ring_name = job->ring->name;
69 __entry->num_ibs = job->num_ibs; 118 __entry->num_ibs = job->num_ibs;
70 ), 119 ),
@@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job,
89 __entry->adev = job->adev; 138 __entry->adev = job->adev;
90 __entry->sched_job = &job->base; 139 __entry->sched_job = &job->base;
91 __entry->ib = job->ibs; 140 __entry->ib = job->ibs;
92 __entry->fence = &job->base.s_fence->base; 141 __entry->fence = &job->base.s_fence->finished;
93 __entry->ring_name = job->ring->name; 142 __entry->ring_name = job->ring->name;
94 __entry->num_ibs = job->num_ibs; 143 __entry->num_ibs = job->num_ibs;
95 ), 144 ),
@@ -100,24 +149,26 @@ TRACE_EVENT(amdgpu_sched_run_job,
100 149
101 150
102TRACE_EVENT(amdgpu_vm_grab_id, 151TRACE_EVENT(amdgpu_vm_grab_id,
103 TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid, 152 TP_PROTO(struct amdgpu_vm *vm, int ring, struct amdgpu_job *job),
104 uint64_t pd_addr), 153 TP_ARGS(vm, ring, job),
105 TP_ARGS(vm, ring, vmid, pd_addr),
106 TP_STRUCT__entry( 154 TP_STRUCT__entry(
107 __field(struct amdgpu_vm *, vm) 155 __field(struct amdgpu_vm *, vm)
108 __field(u32, ring) 156 __field(u32, ring)
109 __field(u32, vmid) 157 __field(u32, vmid)
110 __field(u64, pd_addr) 158 __field(u64, pd_addr)
159 __field(u32, needs_flush)
111 ), 160 ),
112 161
113 TP_fast_assign( 162 TP_fast_assign(
114 __entry->vm = vm; 163 __entry->vm = vm;
115 __entry->ring = ring; 164 __entry->ring = ring;
116 __entry->vmid = vmid; 165 __entry->vmid = job->vm_id;
117 __entry->pd_addr = pd_addr; 166 __entry->pd_addr = job->vm_pd_addr;
167 __entry->needs_flush = job->vm_needs_flush;
118 ), 168 ),
119 TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm, 169 TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx needs_flush=%u",
120 __entry->ring, __entry->vmid, __entry->pd_addr) 170 __entry->vm, __entry->ring, __entry->vmid,
171 __entry->pd_addr, __entry->needs_flush)
121); 172);
122 173
123TRACE_EVENT(amdgpu_vm_bo_map, 174TRACE_EVENT(amdgpu_vm_bo_map,
@@ -244,13 +295,55 @@ TRACE_EVENT(amdgpu_bo_list_set,
244 TP_STRUCT__entry( 295 TP_STRUCT__entry(
245 __field(struct amdgpu_bo_list *, list) 296 __field(struct amdgpu_bo_list *, list)
246 __field(struct amdgpu_bo *, bo) 297 __field(struct amdgpu_bo *, bo)
298 __field(u64, bo_size)
247 ), 299 ),
248 300
249 TP_fast_assign( 301 TP_fast_assign(
250 __entry->list = list; 302 __entry->list = list;
251 __entry->bo = bo; 303 __entry->bo = bo;
304 __entry->bo_size = amdgpu_bo_size(bo);
252 ), 305 ),
253 TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) 306 TP_printk("list=%p, bo=%p, bo_size = %Ld",
307 __entry->list,
308 __entry->bo,
309 __entry->bo_size)
310);
311
312TRACE_EVENT(amdgpu_cs_bo_status,
313 TP_PROTO(uint64_t total_bo, uint64_t total_size),
314 TP_ARGS(total_bo, total_size),
315 TP_STRUCT__entry(
316 __field(u64, total_bo)
317 __field(u64, total_size)
318 ),
319
320 TP_fast_assign(
321 __entry->total_bo = total_bo;
322 __entry->total_size = total_size;
323 ),
324 TP_printk("total bo size = %Ld, total bo count = %Ld",
325 __entry->total_bo, __entry->total_size)
326);
327
328TRACE_EVENT(amdgpu_ttm_bo_move,
329 TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
330 TP_ARGS(bo, new_placement, old_placement),
331 TP_STRUCT__entry(
332 __field(struct amdgpu_bo *, bo)
333 __field(u64, bo_size)
334 __field(u32, new_placement)
335 __field(u32, old_placement)
336 ),
337
338 TP_fast_assign(
339 __entry->bo = bo;
340 __entry->bo_size = amdgpu_bo_size(bo);
341 __entry->new_placement = new_placement;
342 __entry->old_placement = old_placement;
343 ),
344 TP_printk("bo=%p from:%d to %d with size = %Ld",
345 __entry->bo, __entry->old_placement,
346 __entry->new_placement, __entry->bo_size)
254); 347);
255 348
256#endif 349#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3b9053af4762..b7742e62972a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
286 r = amdgpu_copy_buffer(ring, old_start, new_start, 286 r = amdgpu_copy_buffer(ring, old_start, new_start,
287 new_mem->num_pages * PAGE_SIZE, /* bytes */ 287 new_mem->num_pages * PAGE_SIZE, /* bytes */
288 bo->resv, &fence); 288 bo->resv, &fence);
289 /* FIXME: handle copy error */ 289 if (r)
290 r = ttm_bo_move_accel_cleanup(bo, fence, 290 return r;
291 evict, no_wait_gpu, new_mem); 291
292 r = ttm_bo_pipeline_move(bo, fence, evict, new_mem);
292 fence_put(fence); 293 fence_put(fence);
293 return r; 294 return r;
294} 295}
@@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
396 return -EINVAL; 397 return -EINVAL;
397 398
398 adev = amdgpu_get_adev(bo->bdev); 399 adev = amdgpu_get_adev(bo->bdev);
400
401 /* remember the eviction */
402 if (evict)
403 atomic64_inc(&adev->num_evictions);
404
399 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 405 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
400 amdgpu_move_null(bo, new_mem); 406 amdgpu_move_null(bo, new_mem);
401 return 0; 407 return 0;
@@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
429 435
430 if (r) { 436 if (r) {
431memcpy: 437memcpy:
432 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 438 r = ttm_bo_move_memcpy(bo, evict, interruptible,
439 no_wait_gpu, new_mem);
433 if (r) { 440 if (r) {
434 return r; 441 return r;
435 } 442 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d9c88d13f8db..b11f4e8868d7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -40,9 +40,16 @@
40#include "uvd/uvd_4_2_d.h" 40#include "uvd/uvd_4_2_d.h"
41 41
42/* 1 second timeout */ 42/* 1 second timeout */
43#define UVD_IDLE_TIMEOUT_MS 1000 43#define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000)
44
45/* Firmware versions for VI */
46#define FW_1_65_10 ((1 << 24) | (65 << 16) | (10 << 8))
47#define FW_1_87_11 ((1 << 24) | (87 << 16) | (11 << 8))
48#define FW_1_87_12 ((1 << 24) | (87 << 16) | (12 << 8))
49#define FW_1_37_15 ((1 << 24) | (37 << 16) | (15 << 8))
50
44/* Polaris10/11 firmware version */ 51/* Polaris10/11 firmware version */
45#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8)) 52#define FW_1_66_16 ((1 << 24) | (66 << 16) | (16 << 8))
46 53
47/* Firmware Names */ 54/* Firmware Names */
48#ifdef CONFIG_DRM_AMDGPU_CIK 55#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -92,7 +99,6 @@ MODULE_FIRMWARE(FIRMWARE_STONEY);
92MODULE_FIRMWARE(FIRMWARE_POLARIS10); 99MODULE_FIRMWARE(FIRMWARE_POLARIS10);
93MODULE_FIRMWARE(FIRMWARE_POLARIS11); 100MODULE_FIRMWARE(FIRMWARE_POLARIS11);
94 101
95static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
96static void amdgpu_uvd_idle_work_handler(struct work_struct *work); 102static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
97 103
98int amdgpu_uvd_sw_init(struct amdgpu_device *adev) 104int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
@@ -246,6 +252,23 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
246 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) 252 if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
247 adev->uvd.address_64_bit = true; 253 adev->uvd.address_64_bit = true;
248 254
255 switch (adev->asic_type) {
256 case CHIP_TONGA:
257 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
258 break;
259 case CHIP_CARRIZO:
260 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_11;
261 break;
262 case CHIP_FIJI:
263 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_87_12;
264 break;
265 case CHIP_STONEY:
266 adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_37_15;
267 break;
268 default:
269 adev->uvd.use_ctx_buf = adev->asic_type >= CHIP_POLARIS10;
270 }
271
249 return 0; 272 return 0;
250} 273}
251 274
@@ -346,8 +369,6 @@ void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
346 if (handle != 0 && adev->uvd.filp[i] == filp) { 369 if (handle != 0 && adev->uvd.filp[i] == filp) {
347 struct fence *fence; 370 struct fence *fence;
348 371
349 amdgpu_uvd_note_usage(adev);
350
351 r = amdgpu_uvd_get_destroy_msg(ring, handle, 372 r = amdgpu_uvd_get_destroy_msg(ring, handle,
352 false, &fence); 373 false, &fence);
353 if (r) { 374 if (r) {
@@ -438,7 +459,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
438 unsigned fs_in_mb = width_in_mb * height_in_mb; 459 unsigned fs_in_mb = width_in_mb * height_in_mb;
439 460
440 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 461 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer;
441 unsigned min_ctx_size = 0; 462 unsigned min_ctx_size = ~0;
442 463
443 image_size = width * height; 464 image_size = width * height;
444 image_size += image_size / 2; 465 image_size += image_size / 2;
@@ -557,7 +578,7 @@ static int amdgpu_uvd_cs_msg_decode(struct amdgpu_device *adev, uint32_t *msg,
557 /* reference picture buffer */ 578 /* reference picture buffer */
558 min_dpb_size = image_size * num_dpb_buffer; 579 min_dpb_size = image_size * num_dpb_buffer;
559 580
560 if (adev->asic_type < CHIP_POLARIS10){ 581 if (!adev->uvd.use_ctx_buf){
561 /* macroblock context buffer */ 582 /* macroblock context buffer */
562 min_dpb_size += 583 min_dpb_size +=
563 width_in_mb * height_in_mb * num_dpb_buffer * 192; 584 width_in_mb * height_in_mb * num_dpb_buffer * 192;
@@ -662,7 +683,7 @@ static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx,
662 } 683 }
663 684
664 DRM_ERROR("No more free UVD handles!\n"); 685 DRM_ERROR("No more free UVD handles!\n");
665 return -EINVAL; 686 return -ENOSPC;
666 687
667 case 1: 688 case 1:
668 /* it's a decode msg, calc buffer sizes */ 689 /* it's a decode msg, calc buffer sizes */
@@ -913,8 +934,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
913 return -EINVAL; 934 return -EINVAL;
914 } 935 }
915 936
916 amdgpu_uvd_note_usage(ctx.parser->adev);
917
918 return 0; 937 return 0;
919} 938}
920 939
@@ -968,7 +987,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
968 987
969 if (direct) { 988 if (direct) {
970 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 989 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
971 job->fence = f; 990 job->fence = fence_get(f);
972 if (r) 991 if (r)
973 goto err_free; 992 goto err_free;
974 993
@@ -1106,24 +1125,18 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
1106 if (fences == 0 && handles == 0) { 1125 if (fences == 0 && handles == 0) {
1107 if (adev->pm.dpm_enabled) { 1126 if (adev->pm.dpm_enabled) {
1108 amdgpu_dpm_enable_uvd(adev, false); 1127 amdgpu_dpm_enable_uvd(adev, false);
1109 /* just work around for uvd clock remain high even
1110 * when uvd dpm disabled on Polaris10 */
1111 if (adev->asic_type == CHIP_POLARIS10)
1112 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1113 } else { 1128 } else {
1114 amdgpu_asic_set_uvd_clocks(adev, 0, 0); 1129 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
1115 } 1130 }
1116 } else { 1131 } else {
1117 schedule_delayed_work(&adev->uvd.idle_work, 1132 schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1118 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1119 } 1133 }
1120} 1134}
1121 1135
1122static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) 1136void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
1123{ 1137{
1138 struct amdgpu_device *adev = ring->adev;
1124 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1139 bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
1125 set_clocks &= schedule_delayed_work(&adev->uvd.idle_work,
1126 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS));
1127 1140
1128 if (set_clocks) { 1141 if (set_clocks) {
1129 if (adev->pm.dpm_enabled) { 1142 if (adev->pm.dpm_enabled) {
@@ -1133,3 +1146,48 @@ static void amdgpu_uvd_note_usage(struct amdgpu_device *adev)
1133 } 1146 }
1134 } 1147 }
1135} 1148}
1149
1150void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
1151{
1152 schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
1153}
1154
1155/**
1156 * amdgpu_uvd_ring_test_ib - test ib execution
1157 *
1158 * @ring: amdgpu_ring pointer
1159 *
1160 * Test if we can successfully execute an IB
1161 */
1162int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1163{
1164 struct fence *fence;
1165 long r;
1166
1167 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
1168 if (r) {
1169 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
1170 goto error;
1171 }
1172
1173 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
1174 if (r) {
1175 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
1176 goto error;
1177 }
1178
1179 r = fence_wait_timeout(fence, false, timeout);
1180 if (r == 0) {
1181 DRM_ERROR("amdgpu: IB test timed out.\n");
1182 r = -ETIMEDOUT;
1183 } else if (r < 0) {
1184 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
1185 } else {
1186 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
1187 r = 0;
1188 }
1189
1190error:
1191 fence_put(fence);
1192 return r;
1193}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 9a3b449081a7..c850009602d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -35,5 +35,8 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
35void amdgpu_uvd_free_handles(struct amdgpu_device *adev, 35void amdgpu_uvd_free_handles(struct amdgpu_device *adev,
36 struct drm_file *filp); 36 struct drm_file *filp);
37int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx); 37int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx);
38void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring);
39void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring);
40int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout);
38 41
39#endif 42#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 875626a2eccb..05865ce35351 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -36,7 +36,7 @@
36#include "cikd.h" 36#include "cikd.h"
37 37
38/* 1 second timeout */ 38/* 1 second timeout */
39#define VCE_IDLE_TIMEOUT_MS 1000 39#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
40 40
41/* Firmware Names */ 41/* Firmware Names */
42#ifdef CONFIG_DRM_AMDGPU_CIK 42#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -85,8 +85,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
85 unsigned ucode_version, version_major, version_minor, binary_id; 85 unsigned ucode_version, version_major, version_minor, binary_id;
86 int i, r; 86 int i, r;
87 87
88 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
89
90 switch (adev->asic_type) { 88 switch (adev->asic_type) {
91#ifdef CONFIG_DRM_AMDGPU_CIK 89#ifdef CONFIG_DRM_AMDGPU_CIK
92 case CHIP_BONAIRE: 90 case CHIP_BONAIRE:
@@ -197,6 +195,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
197 adev->vce.filp[i] = NULL; 195 adev->vce.filp[i] = NULL;
198 } 196 }
199 197
198 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
199 mutex_init(&adev->vce.idle_mutex);
200
200 return 0; 201 return 0;
201} 202}
202 203
@@ -220,6 +221,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
220 amdgpu_ring_fini(&adev->vce.ring[1]); 221 amdgpu_ring_fini(&adev->vce.ring[1]);
221 222
222 release_firmware(adev->vce.fw); 223 release_firmware(adev->vce.fw);
224 mutex_destroy(&adev->vce.idle_mutex);
223 225
224 return 0; 226 return 0;
225} 227}
@@ -310,37 +312,44 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work)
310 amdgpu_asic_set_vce_clocks(adev, 0, 0); 312 amdgpu_asic_set_vce_clocks(adev, 0, 0);
311 } 313 }
312 } else { 314 } else {
313 schedule_delayed_work(&adev->vce.idle_work, 315 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
314 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
315 } 316 }
316} 317}
317 318
318/** 319/**
319 * amdgpu_vce_note_usage - power up VCE 320 * amdgpu_vce_ring_begin_use - power up VCE
320 * 321 *
321 * @adev: amdgpu_device pointer 322 * @ring: amdgpu ring
322 * 323 *
323 * Make sure VCE is powerd up when we want to use it 324 * Make sure VCE is powerd up when we want to use it
324 */ 325 */
325static void amdgpu_vce_note_usage(struct amdgpu_device *adev) 326void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
326{ 327{
327 bool streams_changed = false; 328 struct amdgpu_device *adev = ring->adev;
328 bool set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work); 329 bool set_clocks;
329 set_clocks &= schedule_delayed_work(&adev->vce.idle_work,
330 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
331
332 if (adev->pm.dpm_enabled) {
333 /* XXX figure out if the streams changed */
334 streams_changed = false;
335 }
336 330
337 if (set_clocks || streams_changed) { 331 mutex_lock(&adev->vce.idle_mutex);
332 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
333 if (set_clocks) {
338 if (adev->pm.dpm_enabled) { 334 if (adev->pm.dpm_enabled) {
339 amdgpu_dpm_enable_vce(adev, true); 335 amdgpu_dpm_enable_vce(adev, true);
340 } else { 336 } else {
341 amdgpu_asic_set_vce_clocks(adev, 53300, 40000); 337 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
342 } 338 }
343 } 339 }
340 mutex_unlock(&adev->vce.idle_mutex);
341}
342
343/**
344 * amdgpu_vce_ring_end_use - power VCE down
345 *
346 * @ring: amdgpu ring
347 *
348 * Schedule work to power VCE down again
349 */
350void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
351{
352 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
344} 353}
345 354
346/** 355/**
@@ -357,11 +366,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
357 int i, r; 366 int i, r;
358 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 367 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
359 uint32_t handle = atomic_read(&adev->vce.handles[i]); 368 uint32_t handle = atomic_read(&adev->vce.handles[i]);
369
360 if (!handle || adev->vce.filp[i] != filp) 370 if (!handle || adev->vce.filp[i] != filp)
361 continue; 371 continue;
362 372
363 amdgpu_vce_note_usage(adev);
364
365 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL); 373 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
366 if (r) 374 if (r)
367 DRM_ERROR("Error destroying VCE handle (%d)!\n", r); 375 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
@@ -437,7 +445,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
437 ib->ptr[i] = 0x0; 445 ib->ptr[i] = 0x0;
438 446
439 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 447 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
440 job->fence = f; 448 job->fence = fence_get(f);
441 if (r) 449 if (r)
442 goto err; 450 goto err;
443 451
@@ -469,7 +477,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
469 struct amdgpu_job *job; 477 struct amdgpu_job *job;
470 struct amdgpu_ib *ib; 478 struct amdgpu_ib *ib;
471 struct fence *f = NULL; 479 struct fence *f = NULL;
472 uint64_t dummy;
473 int i, r; 480 int i, r;
474 481
475 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job); 482 r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, &job);
@@ -477,7 +484,6 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
477 return r; 484 return r;
478 485
479 ib = &job->ibs[0]; 486 ib = &job->ibs[0];
480 dummy = ib->gpu_addr + 1024;
481 487
482 /* stitch together an VCE destroy msg */ 488 /* stitch together an VCE destroy msg */
483 ib->length_dw = 0; 489 ib->length_dw = 0;
@@ -485,11 +491,14 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
485 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */ 491 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
486 ib->ptr[ib->length_dw++] = handle; 492 ib->ptr[ib->length_dw++] = handle;
487 493
488 ib->ptr[ib->length_dw++] = 0x00000014; /* len */ 494 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
489 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */ 495 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
490 ib->ptr[ib->length_dw++] = upper_32_bits(dummy); 496 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
491 ib->ptr[ib->length_dw++] = dummy; 497 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
492 ib->ptr[ib->length_dw++] = 0x00000001; 498 ib->ptr[ib->length_dw++] = 0x00000000;
499 ib->ptr[ib->length_dw++] = 0x00000000;
500 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
501 ib->ptr[ib->length_dw++] = 0x00000000;
493 502
494 ib->ptr[ib->length_dw++] = 0x00000008; /* len */ 503 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
495 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */ 504 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
@@ -499,7 +508,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
499 508
500 if (direct) { 509 if (direct) {
501 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); 510 r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
502 job->fence = f; 511 job->fence = fence_get(f);
503 if (r) 512 if (r)
504 goto err; 513 goto err;
505 514
@@ -580,12 +589,10 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
580 * we we don't have another free session index. 589 * we we don't have another free session index.
581 */ 590 */
582static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p, 591static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
583 uint32_t handle, bool *allocated) 592 uint32_t handle, uint32_t *allocated)
584{ 593{
585 unsigned i; 594 unsigned i;
586 595
587 *allocated = false;
588
589 /* validate the handle */ 596 /* validate the handle */
590 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) { 597 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
591 if (atomic_read(&p->adev->vce.handles[i]) == handle) { 598 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
@@ -602,7 +609,7 @@ static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
602 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) { 609 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
603 p->adev->vce.filp[i] = p->filp; 610 p->adev->vce.filp[i] = p->filp;
604 p->adev->vce.img_size[i] = 0; 611 p->adev->vce.img_size[i] = 0;
605 *allocated = true; 612 *allocated |= 1 << i;
606 return i; 613 return i;
607 } 614 }
608 } 615 }
@@ -622,15 +629,13 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
622 struct amdgpu_ib *ib = &p->job->ibs[ib_idx]; 629 struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
623 unsigned fb_idx = 0, bs_idx = 0; 630 unsigned fb_idx = 0, bs_idx = 0;
624 int session_idx = -1; 631 int session_idx = -1;
625 bool destroyed = false; 632 uint32_t destroyed = 0;
626 bool created = false; 633 uint32_t created = 0;
627 bool allocated = false; 634 uint32_t allocated = 0;
628 uint32_t tmp, handle = 0; 635 uint32_t tmp, handle = 0;
629 uint32_t *size = &tmp; 636 uint32_t *size = &tmp;
630 int i, r = 0, idx = 0; 637 int i, r = 0, idx = 0;
631 638
632 amdgpu_vce_note_usage(p->adev);
633
634 while (idx < ib->length_dw) { 639 while (idx < ib->length_dw) {
635 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx); 640 uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
636 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1); 641 uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
@@ -641,30 +646,30 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
641 goto out; 646 goto out;
642 } 647 }
643 648
644 if (destroyed) {
645 DRM_ERROR("No other command allowed after destroy!\n");
646 r = -EINVAL;
647 goto out;
648 }
649
650 switch (cmd) { 649 switch (cmd) {
651 case 0x00000001: // session 650 case 0x00000001: /* session */
652 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2); 651 handle = amdgpu_get_ib_value(p, ib_idx, idx + 2);
653 session_idx = amdgpu_vce_validate_handle(p, handle, 652 session_idx = amdgpu_vce_validate_handle(p, handle,
654 &allocated); 653 &allocated);
655 if (session_idx < 0) 654 if (session_idx < 0) {
656 return session_idx; 655 r = session_idx;
656 goto out;
657 }
657 size = &p->adev->vce.img_size[session_idx]; 658 size = &p->adev->vce.img_size[session_idx];
658 break; 659 break;
659 660
660 case 0x00000002: // task info 661 case 0x00000002: /* task info */
661 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6); 662 fb_idx = amdgpu_get_ib_value(p, ib_idx, idx + 6);
662 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7); 663 bs_idx = amdgpu_get_ib_value(p, ib_idx, idx + 7);
663 break; 664 break;
664 665
665 case 0x01000001: // create 666 case 0x01000001: /* create */
666 created = true; 667 created |= 1 << session_idx;
667 if (!allocated) { 668 if (destroyed & (1 << session_idx)) {
669 destroyed &= ~(1 << session_idx);
670 allocated |= 1 << session_idx;
671
672 } else if (!(allocated & (1 << session_idx))) {
668 DRM_ERROR("Handle already in use!\n"); 673 DRM_ERROR("Handle already in use!\n");
669 r = -EINVAL; 674 r = -EINVAL;
670 goto out; 675 goto out;
@@ -675,16 +680,16 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
675 8 * 3 / 2; 680 8 * 3 / 2;
676 break; 681 break;
677 682
678 case 0x04000001: // config extension 683 case 0x04000001: /* config extension */
679 case 0x04000002: // pic control 684 case 0x04000002: /* pic control */
680 case 0x04000005: // rate control 685 case 0x04000005: /* rate control */
681 case 0x04000007: // motion estimation 686 case 0x04000007: /* motion estimation */
682 case 0x04000008: // rdo 687 case 0x04000008: /* rdo */
683 case 0x04000009: // vui 688 case 0x04000009: /* vui */
684 case 0x05000002: // auxiliary buffer 689 case 0x05000002: /* auxiliary buffer */
685 break; 690 break;
686 691
687 case 0x03000001: // encode 692 case 0x03000001: /* encode */
688 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9, 693 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 10, idx + 9,
689 *size, 0); 694 *size, 0);
690 if (r) 695 if (r)
@@ -696,18 +701,18 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
696 goto out; 701 goto out;
697 break; 702 break;
698 703
699 case 0x02000001: // destroy 704 case 0x02000001: /* destroy */
700 destroyed = true; 705 destroyed |= 1 << session_idx;
701 break; 706 break;
702 707
703 case 0x05000001: // context buffer 708 case 0x05000001: /* context buffer */
704 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 709 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
705 *size * 2, 0); 710 *size * 2, 0);
706 if (r) 711 if (r)
707 goto out; 712 goto out;
708 break; 713 break;
709 714
710 case 0x05000004: // video bitstream buffer 715 case 0x05000004: /* video bitstream buffer */
711 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4); 716 tmp = amdgpu_get_ib_value(p, ib_idx, idx + 4);
712 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 717 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
713 tmp, bs_idx); 718 tmp, bs_idx);
@@ -715,7 +720,7 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
715 goto out; 720 goto out;
716 break; 721 break;
717 722
718 case 0x05000005: // feedback buffer 723 case 0x05000005: /* feedback buffer */
719 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2, 724 r = amdgpu_vce_cs_reloc(p, ib_idx, idx + 3, idx + 2,
720 4096, fb_idx); 725 4096, fb_idx);
721 if (r) 726 if (r)
@@ -737,21 +742,24 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
737 idx += len / 4; 742 idx += len / 4;
738 } 743 }
739 744
740 if (allocated && !created) { 745 if (allocated & ~created) {
741 DRM_ERROR("New session without create command!\n"); 746 DRM_ERROR("New session without create command!\n");
742 r = -ENOENT; 747 r = -ENOENT;
743 } 748 }
744 749
745out: 750out:
746 if ((!r && destroyed) || (r && allocated)) { 751 if (!r) {
747 /* 752 /* No error, free all destroyed handle slots */
748 * IB contains a destroy msg or we have allocated an 753 tmp = destroyed;
749 * handle and got an error, anyway free the handle 754 } else {
750 */ 755 /* Error during parsing, free all allocated handle slots */
751 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) 756 tmp = allocated;
752 atomic_cmpxchg(&p->adev->vce.handles[i], handle, 0);
753 } 757 }
754 758
759 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
760 if (tmp & (1 << i))
761 atomic_set(&p->adev->vce.handles[i], 0);
762
755 return r; 763 return r;
756} 764}
757 765
@@ -837,10 +845,10 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
837 * @ring: the engine to test on 845 * @ring: the engine to test on
838 * 846 *
839 */ 847 */
840int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring) 848int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
841{ 849{
842 struct fence *fence = NULL; 850 struct fence *fence = NULL;
843 int r; 851 long r;
844 852
845 /* skip vce ring1 ib test for now, since it's not reliable */ 853 /* skip vce ring1 ib test for now, since it's not reliable */
846 if (ring == &ring->adev->vce.ring[1]) 854 if (ring == &ring->adev->vce.ring[1])
@@ -848,21 +856,25 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring)
848 856
849 r = amdgpu_vce_get_create_msg(ring, 1, NULL); 857 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
850 if (r) { 858 if (r) {
851 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); 859 DRM_ERROR("amdgpu: failed to get create msg (%ld).\n", r);
852 goto error; 860 goto error;
853 } 861 }
854 862
855 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence); 863 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
856 if (r) { 864 if (r) {
857 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 865 DRM_ERROR("amdgpu: failed to get destroy ib (%ld).\n", r);
858 goto error; 866 goto error;
859 } 867 }
860 868
861 r = fence_wait(fence, false); 869 r = fence_wait_timeout(fence, false, timeout);
862 if (r) { 870 if (r == 0) {
863 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 871 DRM_ERROR("amdgpu: IB test timed out.\n");
872 r = -ETIMEDOUT;
873 } else if (r < 0) {
874 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
864 } else { 875 } else {
865 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 876 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
877 r = 0;
866 } 878 }
867error: 879error:
868 fence_put(fence); 880 fence_put(fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index f40cf761c66f..63f83d0d985c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -39,6 +39,8 @@ void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib,
39void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, 39void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
40 unsigned flags); 40 unsigned flags);
41int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring); 41int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring);
42int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring); 42int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout);
43void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
44void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
43 45
44#endif 46#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9f36ed30ba11..8e642fc48df4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,6 +25,7 @@
25 * Alex Deucher 25 * Alex Deucher
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include <linux/fence-array.h>
28#include <drm/drmP.h> 29#include <drm/drmP.h>
29#include <drm/amdgpu_drm.h> 30#include <drm/amdgpu_drm.h>
30#include "amdgpu.h" 31#include "amdgpu.h"
@@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
114/** 115/**
115 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list 116 * amdgpu_vm_get_bos - add the vm BOs to a duplicates list
116 * 117 *
118 * @adev: amdgpu device pointer
117 * @vm: vm providing the BOs 119 * @vm: vm providing the BOs
118 * @duplicates: head of duplicates list 120 * @duplicates: head of duplicates list
119 * 121 *
120 * Add the page directory to the BO duplicates list 122 * Add the page directory to the BO duplicates list
121 * for command submission. 123 * for command submission.
122 */ 124 */
123void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) 125void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
126 struct list_head *duplicates)
124{ 127{
128 uint64_t num_evictions;
125 unsigned i; 129 unsigned i;
126 130
131 /* We only need to validate the page tables
132 * if they aren't already valid.
133 */
134 num_evictions = atomic64_read(&adev->num_evictions);
135 if (num_evictions == vm->last_eviction_counter)
136 return;
137
127 /* add the vm page table to the list */ 138 /* add the vm page table to the list */
128 for (i = 0; i <= vm->max_pde_used; ++i) { 139 for (i = 0; i <= vm->max_pde_used; ++i) {
129 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; 140 struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry;
@@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
162 spin_unlock(&glob->lru_lock); 173 spin_unlock(&glob->lru_lock);
163} 174}
164 175
176static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev,
177 struct amdgpu_vm_id *id)
178{
179 return id->current_gpu_reset_count !=
180 atomic_read(&adev->gpu_reset_counter) ? true : false;
181}
182
165/** 183/**
166 * amdgpu_vm_grab_id - allocate the next free VMID 184 * amdgpu_vm_grab_id - allocate the next free VMID
167 * 185 *
@@ -174,18 +192,67 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
174 */ 192 */
175int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 193int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
176 struct amdgpu_sync *sync, struct fence *fence, 194 struct amdgpu_sync *sync, struct fence *fence,
177 unsigned *vm_id, uint64_t *vm_pd_addr) 195 struct amdgpu_job *job)
178{ 196{
179 uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
180 struct amdgpu_device *adev = ring->adev; 197 struct amdgpu_device *adev = ring->adev;
198 uint64_t fence_context = adev->fence_context + ring->idx;
181 struct fence *updates = sync->last_vm_update; 199 struct fence *updates = sync->last_vm_update;
182 struct amdgpu_vm_id *id; 200 struct amdgpu_vm_id *id, *idle;
183 unsigned i = ring->idx; 201 struct fence **fences;
184 int r; 202 unsigned i;
203 int r = 0;
204
205 fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids,
206 GFP_KERNEL);
207 if (!fences)
208 return -ENOMEM;
185 209
186 mutex_lock(&adev->vm_manager.lock); 210 mutex_lock(&adev->vm_manager.lock);
187 211
212 /* Check if we have an idle VMID */
213 i = 0;
214 list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) {
215 fences[i] = amdgpu_sync_peek_fence(&idle->active, ring);
216 if (!fences[i])
217 break;
218 ++i;
219 }
220
221 /* If we can't find a idle VMID to use, wait till one becomes available */
222 if (&idle->list == &adev->vm_manager.ids_lru) {
223 u64 fence_context = adev->vm_manager.fence_context + ring->idx;
224 unsigned seqno = ++adev->vm_manager.seqno[ring->idx];
225 struct fence_array *array;
226 unsigned j;
227
228 for (j = 0; j < i; ++j)
229 fence_get(fences[j]);
230
231 array = fence_array_create(i, fences, fence_context,
232 seqno, true);
233 if (!array) {
234 for (j = 0; j < i; ++j)
235 fence_put(fences[j]);
236 kfree(fences);
237 r = -ENOMEM;
238 goto error;
239 }
240
241
242 r = amdgpu_sync_fence(ring->adev, sync, &array->base);
243 fence_put(&array->base);
244 if (r)
245 goto error;
246
247 mutex_unlock(&adev->vm_manager.lock);
248 return 0;
249
250 }
251 kfree(fences);
252
253 job->vm_needs_flush = true;
188 /* Check if we can use a VMID already assigned to this VM */ 254 /* Check if we can use a VMID already assigned to this VM */
255 i = ring->idx;
189 do { 256 do {
190 struct fence *flushed; 257 struct fence *flushed;
191 258
@@ -196,67 +263,52 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
196 /* Check all the prerequisites to using this VMID */ 263 /* Check all the prerequisites to using this VMID */
197 if (!id) 264 if (!id)
198 continue; 265 continue;
266 if (amdgpu_vm_is_gpu_reset(adev, id))
267 continue;
199 268
200 if (atomic64_read(&id->owner) != vm->client_id) 269 if (atomic64_read(&id->owner) != vm->client_id)
201 continue; 270 continue;
202 271
203 if (pd_addr != id->pd_gpu_addr) 272 if (job->vm_pd_addr != id->pd_gpu_addr)
204 continue; 273 continue;
205 274
206 if (id->last_user != ring && 275 if (!id->last_flush)
207 (!id->last_flush || !fence_is_signaled(id->last_flush)))
208 continue; 276 continue;
209 277
210 flushed = id->flushed_updates; 278 if (id->last_flush->context != fence_context &&
211 if (updates && (!flushed || fence_is_later(updates, flushed))) 279 !fence_is_signaled(id->last_flush))
212 continue; 280 continue;
213 281
214 /* Good we can use this VMID */ 282 flushed = id->flushed_updates;
215 if (id->last_user == ring) { 283 if (updates &&
216 r = amdgpu_sync_fence(ring->adev, sync, 284 (!flushed || fence_is_later(updates, flushed)))
217 id->first); 285 continue;
218 if (r)
219 goto error;
220 }
221 286
222 /* And remember this submission as user of the VMID */ 287 /* Good we can use this VMID. Remember this submission as
288 * user of the VMID.
289 */
223 r = amdgpu_sync_fence(ring->adev, &id->active, fence); 290 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
224 if (r) 291 if (r)
225 goto error; 292 goto error;
226 293
294 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
227 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 295 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
228 vm->ids[ring->idx] = id; 296 vm->ids[ring->idx] = id;
229 297
230 *vm_id = id - adev->vm_manager.ids; 298 job->vm_id = id - adev->vm_manager.ids;
231 *vm_pd_addr = AMDGPU_VM_NO_FLUSH; 299 job->vm_needs_flush = false;
232 trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); 300 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
233 301
234 mutex_unlock(&adev->vm_manager.lock); 302 mutex_unlock(&adev->vm_manager.lock);
235 return 0; 303 return 0;
236 304
237 } while (i != ring->idx); 305 } while (i != ring->idx);
238 306
239 id = list_first_entry(&adev->vm_manager.ids_lru, 307 /* Still no ID to use? Then use the idle one found earlier */
240 struct amdgpu_vm_id, 308 id = idle;
241 list);
242
243 if (!amdgpu_sync_is_idle(&id->active)) {
244 struct list_head *head = &adev->vm_manager.ids_lru;
245 struct amdgpu_vm_id *tmp;
246 309
247 list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru, 310 /* Remember this submission as user of the VMID */
248 list) { 311 r = amdgpu_sync_fence(ring->adev, &id->active, fence);
249 if (amdgpu_sync_is_idle(&id->active)) {
250 list_move(&id->list, head);
251 head = &id->list;
252 }
253 }
254 id = list_first_entry(&adev->vm_manager.ids_lru,
255 struct amdgpu_vm_id,
256 list);
257 }
258
259 r = amdgpu_sync_cycle_fences(sync, &id->active, fence);
260 if (r) 312 if (r)
261 goto error; 313 goto error;
262 314
@@ -269,22 +321,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
269 fence_put(id->flushed_updates); 321 fence_put(id->flushed_updates);
270 id->flushed_updates = fence_get(updates); 322 id->flushed_updates = fence_get(updates);
271 323
272 id->pd_gpu_addr = pd_addr; 324 id->pd_gpu_addr = job->vm_pd_addr;
273 325 id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
274 list_move_tail(&id->list, &adev->vm_manager.ids_lru); 326 list_move_tail(&id->list, &adev->vm_manager.ids_lru);
275 id->last_user = ring;
276 atomic64_set(&id->owner, vm->client_id); 327 atomic64_set(&id->owner, vm->client_id);
277 vm->ids[ring->idx] = id; 328 vm->ids[ring->idx] = id;
278 329
279 *vm_id = id - adev->vm_manager.ids; 330 job->vm_id = id - adev->vm_manager.ids;
280 *vm_pd_addr = pd_addr; 331 trace_amdgpu_vm_grab_id(vm, ring->idx, job);
281 trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
282 332
283error: 333error:
284 mutex_unlock(&adev->vm_manager.lock); 334 mutex_unlock(&adev->vm_manager.lock);
285 return r; 335 return r;
286} 336}
287 337
338static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring)
339{
340 struct amdgpu_device *adev = ring->adev;
341 const struct amdgpu_ip_block_version *ip_block;
342
343 if (ring->type != AMDGPU_RING_TYPE_COMPUTE)
344 /* only compute rings */
345 return false;
346
347 ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX);
348 if (!ip_block)
349 return false;
350
351 if (ip_block->major <= 7) {
352 /* gfx7 has no workaround */
353 return true;
354 } else if (ip_block->major == 8) {
355 if (adev->gfx.mec_fw_version >= 673)
356 /* gfx8 is fixed in MEC firmware 673 */
357 return false;
358 else
359 return true;
360 }
361 return false;
362}
363
288/** 364/**
289 * amdgpu_vm_flush - hardware flush the vm 365 * amdgpu_vm_flush - hardware flush the vm
290 * 366 *
@@ -294,59 +370,52 @@ error:
294 * 370 *
295 * Emit a VM flush when it is necessary. 371 * Emit a VM flush when it is necessary.
296 */ 372 */
297int amdgpu_vm_flush(struct amdgpu_ring *ring, 373int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job)
298 unsigned vm_id, uint64_t pd_addr,
299 uint32_t gds_base, uint32_t gds_size,
300 uint32_t gws_base, uint32_t gws_size,
301 uint32_t oa_base, uint32_t oa_size)
302{ 374{
303 struct amdgpu_device *adev = ring->adev; 375 struct amdgpu_device *adev = ring->adev;
304 struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; 376 struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id];
305 bool gds_switch_needed = ring->funcs->emit_gds_switch && ( 377 bool gds_switch_needed = ring->funcs->emit_gds_switch && (
306 id->gds_base != gds_base || 378 id->gds_base != job->gds_base ||
307 id->gds_size != gds_size || 379 id->gds_size != job->gds_size ||
308 id->gws_base != gws_base || 380 id->gws_base != job->gws_base ||
309 id->gws_size != gws_size || 381 id->gws_size != job->gws_size ||
310 id->oa_base != oa_base || 382 id->oa_base != job->oa_base ||
311 id->oa_size != oa_size); 383 id->oa_size != job->oa_size);
312 int r; 384 int r;
313 385
314 if (ring->funcs->emit_pipeline_sync && ( 386 if (ring->funcs->emit_pipeline_sync && (
315 pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || 387 job->vm_needs_flush || gds_switch_needed ||
316 ring->type == AMDGPU_RING_TYPE_COMPUTE)) 388 amdgpu_vm_ring_has_compute_vm_bug(ring)))
317 amdgpu_ring_emit_pipeline_sync(ring); 389 amdgpu_ring_emit_pipeline_sync(ring);
318 390
319 if (ring->funcs->emit_vm_flush && 391 if (ring->funcs->emit_vm_flush && (job->vm_needs_flush ||
320 pd_addr != AMDGPU_VM_NO_FLUSH) { 392 amdgpu_vm_is_gpu_reset(adev, id))) {
321 struct fence *fence; 393 struct fence *fence;
322 394
323 trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); 395 trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id);
324 amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); 396 amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr);
397
398 r = amdgpu_fence_emit(ring, &fence);
399 if (r)
400 return r;
325 401
326 mutex_lock(&adev->vm_manager.lock); 402 mutex_lock(&adev->vm_manager.lock);
327 if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) { 403 fence_put(id->last_flush);
328 r = amdgpu_fence_emit(ring, &fence); 404 id->last_flush = fence;
329 if (r) {
330 mutex_unlock(&adev->vm_manager.lock);
331 return r;
332 }
333 fence_put(id->last_flush);
334 id->last_flush = fence;
335 }
336 mutex_unlock(&adev->vm_manager.lock); 405 mutex_unlock(&adev->vm_manager.lock);
337 } 406 }
338 407
339 if (gds_switch_needed) { 408 if (gds_switch_needed) {
340 id->gds_base = gds_base; 409 id->gds_base = job->gds_base;
341 id->gds_size = gds_size; 410 id->gds_size = job->gds_size;
342 id->gws_base = gws_base; 411 id->gws_base = job->gws_base;
343 id->gws_size = gws_size; 412 id->gws_size = job->gws_size;
344 id->oa_base = oa_base; 413 id->oa_base = job->oa_base;
345 id->oa_size = oa_size; 414 id->oa_size = job->oa_size;
346 amdgpu_ring_emit_gds_switch(ring, vm_id, 415 amdgpu_ring_emit_gds_switch(ring, job->vm_id,
347 gds_base, gds_size, 416 job->gds_base, job->gds_size,
348 gws_base, gws_size, 417 job->gws_base, job->gws_size,
349 oa_base, oa_size); 418 job->oa_base, job->oa_size);
350 } 419 }
351 420
352 return 0; 421 return 0;
@@ -723,7 +792,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev,
723 * @vm: requested vm 792 * @vm: requested vm
724 * @start: start of GPU address range 793 * @start: start of GPU address range
725 * @end: end of GPU address range 794 * @end: end of GPU address range
726 * @dst: destination address to map to 795 * @dst: destination address to map to, the next dst inside the function
727 * @flags: mapping flags 796 * @flags: mapping flags
728 * 797 *
729 * Update the page tables in the range @start - @end. 798 * Update the page tables in the range @start - @end.
@@ -737,49 +806,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
737{ 806{
738 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; 807 const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1;
739 808
740 uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; 809 uint64_t cur_pe_start, cur_pe_end, cur_dst;
741 uint64_t addr; 810 uint64_t addr; /* next GPU address to be updated */
811 uint64_t pt_idx;
812 struct amdgpu_bo *pt;
813 unsigned nptes; /* next number of ptes to be updated */
814 uint64_t next_pe_start;
815
816 /* initialize the variables */
817 addr = start;
818 pt_idx = addr >> amdgpu_vm_block_size;
819 pt = vm->page_tables[pt_idx].entry.robj;
820
821 if ((addr & ~mask) == (end & ~mask))
822 nptes = end - addr;
823 else
824 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
825
826 cur_pe_start = amdgpu_bo_gpu_offset(pt);
827 cur_pe_start += (addr & mask) * 8;
828 cur_pe_end = cur_pe_start + 8 * nptes;
829 cur_dst = dst;
830
831 /* for next ptb*/
832 addr += nptes;
833 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
742 834
743 /* walk over the address space and update the page tables */ 835 /* walk over the address space and update the page tables */
744 for (addr = start; addr < end; ) { 836 while (addr < end) {
745 uint64_t pt_idx = addr >> amdgpu_vm_block_size; 837 pt_idx = addr >> amdgpu_vm_block_size;
746 struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; 838 pt = vm->page_tables[pt_idx].entry.robj;
747 unsigned nptes;
748 uint64_t pe_start;
749 839
750 if ((addr & ~mask) == (end & ~mask)) 840 if ((addr & ~mask) == (end & ~mask))
751 nptes = end - addr; 841 nptes = end - addr;
752 else 842 else
753 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); 843 nptes = AMDGPU_VM_PTE_COUNT - (addr & mask);
754 844
755 pe_start = amdgpu_bo_gpu_offset(pt); 845 next_pe_start = amdgpu_bo_gpu_offset(pt);
756 pe_start += (addr & mask) * 8; 846 next_pe_start += (addr & mask) * 8;
757
758 if (last_pe_end != pe_start) {
759 847
848 if (cur_pe_end == next_pe_start) {
849 /* The next ptb is consecutive to current ptb.
850 * Don't call amdgpu_vm_frag_ptes now.
851 * Will update two ptbs together in future.
852 */
853 cur_pe_end += 8 * nptes;
854 } else {
760 amdgpu_vm_frag_ptes(adev, vm_update_params, 855 amdgpu_vm_frag_ptes(adev, vm_update_params,
761 last_pe_start, last_pe_end, 856 cur_pe_start, cur_pe_end,
762 last_dst, flags); 857 cur_dst, flags);
763 858
764 last_pe_start = pe_start; 859 cur_pe_start = next_pe_start;
765 last_pe_end = pe_start + 8 * nptes; 860 cur_pe_end = next_pe_start + 8 * nptes;
766 last_dst = dst; 861 cur_dst = dst;
767 } else {
768 last_pe_end += 8 * nptes;
769 } 862 }
770 863
864 /* for next ptb*/
771 addr += nptes; 865 addr += nptes;
772 dst += nptes * AMDGPU_GPU_PAGE_SIZE; 866 dst += nptes * AMDGPU_GPU_PAGE_SIZE;
773 } 867 }
774 868
775 amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, 869 amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start,
776 last_pe_end, last_dst, flags); 870 cur_pe_end, cur_dst, flags);
777} 871}
778 872
779/** 873/**
780 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table 874 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
781 * 875 *
782 * @adev: amdgpu_device pointer 876 * @adev: amdgpu_device pointer
877 * @exclusive: fence we need to sync to
783 * @src: address where to copy page table entries from 878 * @src: address where to copy page table entries from
784 * @pages_addr: DMA addresses to use for mapping 879 * @pages_addr: DMA addresses to use for mapping
785 * @vm: requested vm 880 * @vm: requested vm
@@ -793,6 +888,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev,
793 * Returns 0 for success, -EINVAL for failure. 888 * Returns 0 for success, -EINVAL for failure.
794 */ 889 */
795static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, 890static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
891 struct fence *exclusive,
796 uint64_t src, 892 uint64_t src,
797 dma_addr_t *pages_addr, 893 dma_addr_t *pages_addr,
798 struct amdgpu_vm *vm, 894 struct amdgpu_vm *vm,
@@ -853,6 +949,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
853 949
854 vm_update_params.ib = &job->ibs[0]; 950 vm_update_params.ib = &job->ibs[0];
855 951
952 r = amdgpu_sync_fence(adev, &job->sync, exclusive);
953 if (r)
954 goto error_free;
955
856 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, 956 r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv,
857 owner); 957 owner);
858 if (r) 958 if (r)
@@ -889,6 +989,7 @@ error_free:
889 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks 989 * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
890 * 990 *
891 * @adev: amdgpu_device pointer 991 * @adev: amdgpu_device pointer
992 * @exclusive: fence we need to sync to
892 * @gtt_flags: flags as they are used for GTT 993 * @gtt_flags: flags as they are used for GTT
893 * @pages_addr: DMA addresses to use for mapping 994 * @pages_addr: DMA addresses to use for mapping
894 * @vm: requested vm 995 * @vm: requested vm
@@ -902,6 +1003,7 @@ error_free:
902 * Returns 0 for success, -EINVAL for failure. 1003 * Returns 0 for success, -EINVAL for failure.
903 */ 1004 */
904static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, 1005static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
1006 struct fence *exclusive,
905 uint32_t gtt_flags, 1007 uint32_t gtt_flags,
906 dma_addr_t *pages_addr, 1008 dma_addr_t *pages_addr,
907 struct amdgpu_vm *vm, 1009 struct amdgpu_vm *vm,
@@ -932,7 +1034,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
932 addr += mapping->offset; 1034 addr += mapping->offset;
933 1035
934 if (!pages_addr || src) 1036 if (!pages_addr || src)
935 return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, 1037 return amdgpu_vm_bo_update_mapping(adev, exclusive,
1038 src, pages_addr, vm,
936 start, mapping->it.last, 1039 start, mapping->it.last,
937 flags, addr, fence); 1040 flags, addr, fence);
938 1041
@@ -940,7 +1043,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
940 uint64_t last; 1043 uint64_t last;
941 1044
942 last = min((uint64_t)mapping->it.last, start + max_size - 1); 1045 last = min((uint64_t)mapping->it.last, start + max_size - 1);
943 r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, 1046 r = amdgpu_vm_bo_update_mapping(adev, exclusive,
1047 src, pages_addr, vm,
944 start, last, flags, addr, 1048 start, last, flags, addr,
945 fence); 1049 fence);
946 if (r) 1050 if (r)
@@ -973,6 +1077,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
973 struct amdgpu_bo_va_mapping *mapping; 1077 struct amdgpu_bo_va_mapping *mapping;
974 dma_addr_t *pages_addr = NULL; 1078 dma_addr_t *pages_addr = NULL;
975 uint32_t gtt_flags, flags; 1079 uint32_t gtt_flags, flags;
1080 struct fence *exclusive;
976 uint64_t addr; 1081 uint64_t addr;
977 int r; 1082 int r;
978 1083
@@ -994,8 +1099,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
994 default: 1099 default:
995 break; 1100 break;
996 } 1101 }
1102
1103 exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv);
997 } else { 1104 } else {
998 addr = 0; 1105 addr = 0;
1106 exclusive = NULL;
999 } 1107 }
1000 1108
1001 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); 1109 flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem);
@@ -1007,7 +1115,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
1007 spin_unlock(&vm->status_lock); 1115 spin_unlock(&vm->status_lock);
1008 1116
1009 list_for_each_entry(mapping, &bo_va->invalids, list) { 1117 list_for_each_entry(mapping, &bo_va->invalids, list) {
1010 r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm, 1118 r = amdgpu_vm_bo_split_mapping(adev, exclusive,
1119 gtt_flags, pages_addr, vm,
1011 mapping, flags, addr, 1120 mapping, flags, addr,
1012 &bo_va->last_pt_update); 1121 &bo_va->last_pt_update);
1013 if (r) 1122 if (r)
@@ -1054,7 +1163,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
1054 struct amdgpu_bo_va_mapping, list); 1163 struct amdgpu_bo_va_mapping, list);
1055 list_del(&mapping->list); 1164 list_del(&mapping->list);
1056 1165
1057 r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping, 1166 r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping,
1058 0, 0, NULL); 1167 0, 0, NULL);
1059 kfree(mapping); 1168 kfree(mapping);
1060 if (r) 1169 if (r)
@@ -1445,6 +1554,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
1445 amdgpu_bo_unreserve(vm->page_directory); 1554 amdgpu_bo_unreserve(vm->page_directory);
1446 if (r) 1555 if (r)
1447 goto error_free_page_directory; 1556 goto error_free_page_directory;
1557 vm->last_eviction_counter = atomic64_read(&adev->num_evictions);
1448 1558
1449 return 0; 1559 return 0;
1450 1560
@@ -1516,6 +1626,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
1516 &adev->vm_manager.ids_lru); 1626 &adev->vm_manager.ids_lru);
1517 } 1627 }
1518 1628
1629 adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS);
1630 for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
1631 adev->vm_manager.seqno[i] = 0;
1632
1519 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); 1633 atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
1520 atomic64_set(&adev->vm_manager.client_counter, 0); 1634 atomic64_set(&adev->vm_manager.client_counter, 0);
1521} 1635}
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
index 48b6bd671cda..c32eca26155c 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c
@@ -98,6 +98,7 @@ amdgpu_atombios_encoder_set_backlight_level(struct amdgpu_encoder *amdgpu_encode
98 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 98 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
99 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 99 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
100 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 100 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
101 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
101 if (dig->backlight_level == 0) 102 if (dig->backlight_level == 0)
102 amdgpu_atombios_encoder_setup_dig_transmitter(encoder, 103 amdgpu_atombios_encoder_setup_dig_transmitter(encoder,
103 ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); 104 ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 5ec1f1e9c983..e2f0e5d58d5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -50,7 +50,9 @@
50#include "gmc/gmc_7_1_sh_mask.h" 50#include "gmc/gmc_7_1_sh_mask.h"
51 51
52MODULE_FIRMWARE("radeon/bonaire_smc.bin"); 52MODULE_FIRMWARE("radeon/bonaire_smc.bin");
53MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
53MODULE_FIRMWARE("radeon/hawaii_smc.bin"); 54MODULE_FIRMWARE("radeon/hawaii_smc.bin");
55MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
54 56
55#define MC_CG_ARB_FREQ_F0 0x0a 57#define MC_CG_ARB_FREQ_F0 0x0a
56#define MC_CG_ARB_FREQ_F1 0x0b 58#define MC_CG_ARB_FREQ_F1 0x0b
@@ -84,12 +86,14 @@ static const struct ci_pt_defaults defaults_bonaire_xt =
84 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 } 86 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
85}; 87};
86 88
89#if 0
87static const struct ci_pt_defaults defaults_bonaire_pro = 90static const struct ci_pt_defaults defaults_bonaire_pro =
88{ 91{
89 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062, 92 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
90 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F }, 93 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
91 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB } 94 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
92}; 95};
96#endif
93 97
94static const struct ci_pt_defaults defaults_saturn_xt = 98static const struct ci_pt_defaults defaults_saturn_xt =
95{ 99{
@@ -98,12 +102,14 @@ static const struct ci_pt_defaults defaults_saturn_xt =
98 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 } 102 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
99}; 103};
100 104
105#if 0
101static const struct ci_pt_defaults defaults_saturn_pro = 106static const struct ci_pt_defaults defaults_saturn_pro =
102{ 107{
103 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000, 108 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
104 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A }, 109 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
105 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 } 110 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
106}; 111};
112#endif
107 113
108static const struct ci_pt_config_reg didt_config_ci[] = 114static const struct ci_pt_config_reg didt_config_ci[] =
109{ 115{
@@ -736,19 +742,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
736 742
737 if (pi->caps_sq_ramping || pi->caps_db_ramping || 743 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
738 pi->caps_td_ramping || pi->caps_tcp_ramping) { 744 pi->caps_td_ramping || pi->caps_tcp_ramping) {
739 gfx_v7_0_enter_rlc_safe_mode(adev); 745 adev->gfx.rlc.funcs->enter_safe_mode(adev);
740 746
741 if (enable) { 747 if (enable) {
742 ret = ci_program_pt_config_registers(adev, didt_config_ci); 748 ret = ci_program_pt_config_registers(adev, didt_config_ci);
743 if (ret) { 749 if (ret) {
744 gfx_v7_0_exit_rlc_safe_mode(adev); 750 adev->gfx.rlc.funcs->exit_safe_mode(adev);
745 return ret; 751 return ret;
746 } 752 }
747 } 753 }
748 754
749 ci_do_enable_didt(adev, enable); 755 ci_do_enable_didt(adev, enable);
750 756
751 gfx_v7_0_exit_rlc_safe_mode(adev); 757 adev->gfx.rlc.funcs->exit_safe_mode(adev);
752 } 758 }
753 759
754 return 0; 760 return 0;
@@ -3030,7 +3036,7 @@ static int ci_populate_single_memory_level(struct amdgpu_device *adev,
3030 3036
3031 if (pi->mclk_stutter_mode_threshold && 3037 if (pi->mclk_stutter_mode_threshold &&
3032 (memory_clock <= pi->mclk_stutter_mode_threshold) && 3038 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3033 (pi->uvd_enabled == false) && 3039 (!pi->uvd_enabled) &&
3034 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && 3040 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3035 (adev->pm.dpm.new_active_crtc_count <= 2)) 3041 (adev->pm.dpm.new_active_crtc_count <= 2))
3036 memory_level->StutterEnable = true; 3042 memory_level->StutterEnable = true;
@@ -3636,6 +3642,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3636 3642
3637 ci_setup_default_pcie_tables(adev); 3643 ci_setup_default_pcie_tables(adev);
3638 3644
3645 /* save a copy of the default DPM table */
3646 memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3647 sizeof(struct ci_dpm_table));
3648
3639 return 0; 3649 return 0;
3640} 3650}
3641 3651
@@ -5754,10 +5764,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5754 5764
5755 switch (adev->asic_type) { 5765 switch (adev->asic_type) {
5756 case CHIP_BONAIRE: 5766 case CHIP_BONAIRE:
5757 chip_name = "bonaire"; 5767 if ((adev->pdev->revision == 0x80) ||
5768 (adev->pdev->revision == 0x81) ||
5769 (adev->pdev->device == 0x665f))
5770 chip_name = "bonaire_k";
5771 else
5772 chip_name = "bonaire";
5758 break; 5773 break;
5759 case CHIP_HAWAII: 5774 case CHIP_HAWAII:
5760 chip_name = "hawaii"; 5775 if (adev->pdev->revision == 0x80)
5776 chip_name = "hawaii_k";
5777 else
5778 chip_name = "hawaii";
5761 break; 5779 break;
5762 case CHIP_KAVERI: 5780 case CHIP_KAVERI:
5763 case CHIP_KABINI: 5781 case CHIP_KABINI:
@@ -6404,6 +6422,186 @@ static int ci_dpm_set_powergating_state(void *handle,
6404 return 0; 6422 return 0;
6405} 6423}
6406 6424
6425static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6426 enum pp_clock_type type, char *buf)
6427{
6428 struct ci_power_info *pi = ci_get_pi(adev);
6429 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6430 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6431 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6432
6433 int i, now, size = 0;
6434 uint32_t clock, pcie_speed;
6435
6436 switch (type) {
6437 case PP_SCLK:
6438 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6439 clock = RREG32(mmSMC_MSG_ARG_0);
6440
6441 for (i = 0; i < sclk_table->count; i++) {
6442 if (clock > sclk_table->dpm_levels[i].value)
6443 continue;
6444 break;
6445 }
6446 now = i;
6447
6448 for (i = 0; i < sclk_table->count; i++)
6449 size += sprintf(buf + size, "%d: %uMhz %s\n",
6450 i, sclk_table->dpm_levels[i].value / 100,
6451 (i == now) ? "*" : "");
6452 break;
6453 case PP_MCLK:
6454 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6455 clock = RREG32(mmSMC_MSG_ARG_0);
6456
6457 for (i = 0; i < mclk_table->count; i++) {
6458 if (clock > mclk_table->dpm_levels[i].value)
6459 continue;
6460 break;
6461 }
6462 now = i;
6463
6464 for (i = 0; i < mclk_table->count; i++)
6465 size += sprintf(buf + size, "%d: %uMhz %s\n",
6466 i, mclk_table->dpm_levels[i].value / 100,
6467 (i == now) ? "*" : "");
6468 break;
6469 case PP_PCIE:
6470 pcie_speed = ci_get_current_pcie_speed(adev);
6471 for (i = 0; i < pcie_table->count; i++) {
6472 if (pcie_speed != pcie_table->dpm_levels[i].value)
6473 continue;
6474 break;
6475 }
6476 now = i;
6477
6478 for (i = 0; i < pcie_table->count; i++)
6479 size += sprintf(buf + size, "%d: %s %s\n", i,
6480 (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6481 (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6482 (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6483 (i == now) ? "*" : "");
6484 break;
6485 default:
6486 break;
6487 }
6488
6489 return size;
6490}
6491
6492static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6493 enum pp_clock_type type, uint32_t mask)
6494{
6495 struct ci_power_info *pi = ci_get_pi(adev);
6496
6497 if (adev->pm.dpm.forced_level
6498 != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6499 return -EINVAL;
6500
6501 switch (type) {
6502 case PP_SCLK:
6503 if (!pi->sclk_dpm_key_disabled)
6504 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6505 PPSMC_MSG_SCLKDPM_SetEnabledMask,
6506 pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6507 break;
6508
6509 case PP_MCLK:
6510 if (!pi->mclk_dpm_key_disabled)
6511 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6512 PPSMC_MSG_MCLKDPM_SetEnabledMask,
6513 pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6514 break;
6515
6516 case PP_PCIE:
6517 {
6518 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6519 uint32_t level = 0;
6520
6521 while (tmp >>= 1)
6522 level++;
6523
6524 if (!pi->pcie_dpm_key_disabled)
6525 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6526 PPSMC_MSG_PCIeDPM_ForceLevel,
6527 level);
6528 break;
6529 }
6530 default:
6531 break;
6532 }
6533
6534 return 0;
6535}
6536
6537static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6538{
6539 struct ci_power_info *pi = ci_get_pi(adev);
6540 struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6541 struct ci_single_dpm_table *golden_sclk_table =
6542 &(pi->golden_dpm_table.sclk_table);
6543 int value;
6544
6545 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6546 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6547 100 /
6548 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6549
6550 return value;
6551}
6552
6553static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6554{
6555 struct ci_power_info *pi = ci_get_pi(adev);
6556 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6557 struct ci_single_dpm_table *golden_sclk_table =
6558 &(pi->golden_dpm_table.sclk_table);
6559
6560 if (value > 20)
6561 value = 20;
6562
6563 ps->performance_levels[ps->performance_level_count - 1].sclk =
6564 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6565 value / 100 +
6566 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6567
6568 return 0;
6569}
6570
6571static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6572{
6573 struct ci_power_info *pi = ci_get_pi(adev);
6574 struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6575 struct ci_single_dpm_table *golden_mclk_table =
6576 &(pi->golden_dpm_table.mclk_table);
6577 int value;
6578
6579 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6580 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6581 100 /
6582 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6583
6584 return value;
6585}
6586
6587static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6588{
6589 struct ci_power_info *pi = ci_get_pi(adev);
6590 struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6591 struct ci_single_dpm_table *golden_mclk_table =
6592 &(pi->golden_dpm_table.mclk_table);
6593
6594 if (value > 20)
6595 value = 20;
6596
6597 ps->performance_levels[ps->performance_level_count - 1].mclk =
6598 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6599 value / 100 +
6600 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6601
6602 return 0;
6603}
6604
6407const struct amd_ip_funcs ci_dpm_ip_funcs = { 6605const struct amd_ip_funcs ci_dpm_ip_funcs = {
6408 .name = "ci_dpm", 6606 .name = "ci_dpm",
6409 .early_init = ci_dpm_early_init, 6607 .early_init = ci_dpm_early_init,
@@ -6438,6 +6636,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6438 .get_fan_control_mode = &ci_dpm_get_fan_control_mode, 6636 .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6439 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, 6637 .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6440 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, 6638 .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6639 .print_clock_levels = ci_dpm_print_clock_levels,
6640 .force_clock_level = ci_dpm_force_clock_level,
6641 .get_sclk_od = ci_dpm_get_sclk_od,
6642 .set_sclk_od = ci_dpm_set_sclk_od,
6643 .get_mclk_od = ci_dpm_get_mclk_od,
6644 .set_mclk_od = ci_dpm_set_mclk_od,
6441}; 6645};
6442 6646
6443static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) 6647static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
index faccc30c93bf..91be2996ae7c 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h
@@ -193,6 +193,7 @@ struct ci_pt_defaults {
193 193
194struct ci_power_info { 194struct ci_power_info {
195 struct ci_dpm_table dpm_table; 195 struct ci_dpm_table dpm_table;
196 struct ci_dpm_table golden_dpm_table;
196 u32 voltage_control; 197 u32 voltage_control;
197 u32 mvdd_control; 198 u32 mvdd_control;
198 u32 vddci_control; 199 u32 vddci_control;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 910431808542..4efc901f658c 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -879,7 +879,7 @@ static void cik_vga_set_state(struct amdgpu_device *adev, bool state)
879 uint32_t tmp; 879 uint32_t tmp;
880 880
881 tmp = RREG32(mmCONFIG_CNTL); 881 tmp = RREG32(mmCONFIG_CNTL);
882 if (state == false) 882 if (!state)
883 tmp |= CONFIG_CNTL__VGA_DIS_MASK; 883 tmp |= CONFIG_CNTL__VGA_DIS_MASK;
884 else 884 else
885 tmp &= ~CONFIG_CNTL__VGA_DIS_MASK; 885 tmp &= ~CONFIG_CNTL__VGA_DIS_MASK;
@@ -1035,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
1035 1035
1036 mutex_lock(&adev->grbm_idx_mutex); 1036 mutex_lock(&adev->grbm_idx_mutex);
1037 if (se_num != 0xffffffff || sh_num != 0xffffffff) 1037 if (se_num != 0xffffffff || sh_num != 0xffffffff)
1038 gfx_v7_0_select_se_sh(adev, se_num, sh_num); 1038 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
1039 1039
1040 val = RREG32(reg_offset); 1040 val = RREG32(reg_offset);
1041 1041
1042 if (se_num != 0xffffffff || sh_num != 0xffffffff) 1042 if (se_num != 0xffffffff || sh_num != 0xffffffff)
1043 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 1043 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1044 mutex_unlock(&adev->grbm_idx_mutex); 1044 mutex_unlock(&adev->grbm_idx_mutex);
1045 return val; 1045 return val;
1046} 1046}
@@ -1158,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev,
1158 WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); 1158 WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute);
1159} 1159}
1160 1160
1161static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) 1161static int cik_gpu_pci_config_reset(struct amdgpu_device *adev)
1162{ 1162{
1163 struct kv_reset_save_regs kv_save = { 0 }; 1163 struct kv_reset_save_regs kv_save = { 0 };
1164 u32 i; 1164 u32 i;
1165 int r = -EINVAL;
1165 1166
1166 dev_info(adev->dev, "GPU pci config reset\n"); 1167 dev_info(adev->dev, "GPU pci config reset\n");
1167 1168
@@ -1177,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev)
1177 1178
1178 /* wait for asic to come out of reset */ 1179 /* wait for asic to come out of reset */
1179 for (i = 0; i < adev->usec_timeout; i++) { 1180 for (i = 0; i < adev->usec_timeout; i++) {
1180 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) 1181 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
1182 /* enable BM */
1183 pci_set_master(adev->pdev);
1184 r = 0;
1181 break; 1185 break;
1186 }
1182 udelay(1); 1187 udelay(1);
1183 } 1188 }
1184 1189
1185 /* does asic init need to be run first??? */ 1190 /* does asic init need to be run first??? */
1186 if (adev->flags & AMD_IS_APU) 1191 if (adev->flags & AMD_IS_APU)
1187 kv_restore_regs_for_reset(adev, &kv_save); 1192 kv_restore_regs_for_reset(adev, &kv_save);
1193
1194 return r;
1188} 1195}
1189 1196
1190static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 1197static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -1210,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu
1210 */ 1217 */
1211static int cik_asic_reset(struct amdgpu_device *adev) 1218static int cik_asic_reset(struct amdgpu_device *adev)
1212{ 1219{
1220 int r;
1213 cik_set_bios_scratch_engine_hung(adev, true); 1221 cik_set_bios_scratch_engine_hung(adev, true);
1214 1222
1215 cik_gpu_pci_config_reset(adev); 1223 r = cik_gpu_pci_config_reset(adev);
1216 1224
1217 cik_set_bios_scratch_engine_hung(adev, false); 1225 cik_set_bios_scratch_engine_hung(adev, false);
1218 1226
1219 return 0; 1227 return r;
1220} 1228}
1221 1229
1222static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 1230static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -2014,9 +2022,6 @@ static const struct amdgpu_asic_funcs cik_asic_funcs =
2014 .set_uvd_clocks = &cik_set_uvd_clocks, 2022 .set_uvd_clocks = &cik_set_uvd_clocks,
2015 .set_vce_clocks = &cik_set_vce_clocks, 2023 .set_vce_clocks = &cik_set_vce_clocks,
2016 .get_virtual_caps = &cik_get_virtual_caps, 2024 .get_virtual_caps = &cik_get_virtual_caps,
2017 /* these should be moved to their own ip modules */
2018 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
2019 .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle,
2020}; 2025};
2021 2026
2022static int cik_common_early_init(void *handle) 2027static int cik_common_early_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 9dc4e24e31e7..ee6466912497 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -224,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
224 unsigned vm_id, bool ctx_switch) 224 unsigned vm_id, bool ctx_switch)
225{ 225{
226 u32 extra_bits = vm_id & 0xf; 226 u32 extra_bits = vm_id & 0xf;
227 u32 next_rptr = ring->wptr + 5;
228
229 while ((next_rptr & 7) != 4)
230 next_rptr++;
231
232 next_rptr += 4;
233 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
234 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
235 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
236 amdgpu_ring_write(ring, 1); /* number of DWs to follow */
237 amdgpu_ring_write(ring, next_rptr);
238 227
239 /* IB packet must end on a 8 DW boundary */ 228 /* IB packet must end on a 8 DW boundary */
240 cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); 229 cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8);
@@ -365,7 +354,7 @@ static void cik_sdma_enable(struct amdgpu_device *adev, bool enable)
365 u32 me_cntl; 354 u32 me_cntl;
366 int i; 355 int i;
367 356
368 if (enable == false) { 357 if (!enable) {
369 cik_sdma_gfx_stop(adev); 358 cik_sdma_gfx_stop(adev);
370 cik_sdma_rlc_stop(adev); 359 cik_sdma_rlc_stop(adev);
371 } 360 }
@@ -628,20 +617,19 @@ static int cik_sdma_ring_test_ring(struct amdgpu_ring *ring)
628 * Test a simple IB in the DMA ring (CIK). 617 * Test a simple IB in the DMA ring (CIK).
629 * Returns 0 on success, error on failure. 618 * Returns 0 on success, error on failure.
630 */ 619 */
631static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring) 620static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout)
632{ 621{
633 struct amdgpu_device *adev = ring->adev; 622 struct amdgpu_device *adev = ring->adev;
634 struct amdgpu_ib ib; 623 struct amdgpu_ib ib;
635 struct fence *f = NULL; 624 struct fence *f = NULL;
636 unsigned i;
637 unsigned index; 625 unsigned index;
638 int r;
639 u32 tmp = 0; 626 u32 tmp = 0;
640 u64 gpu_addr; 627 u64 gpu_addr;
628 long r;
641 629
642 r = amdgpu_wb_get(adev, &index); 630 r = amdgpu_wb_get(adev, &index);
643 if (r) { 631 if (r) {
644 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 632 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
645 return r; 633 return r;
646 } 634 }
647 635
@@ -651,11 +639,12 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
651 memset(&ib, 0, sizeof(ib)); 639 memset(&ib, 0, sizeof(ib));
652 r = amdgpu_ib_get(adev, NULL, 256, &ib); 640 r = amdgpu_ib_get(adev, NULL, 256, &ib);
653 if (r) { 641 if (r) {
654 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 642 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
655 goto err0; 643 goto err0;
656 } 644 }
657 645
658 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 646 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE,
647 SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
659 ib.ptr[1] = lower_32_bits(gpu_addr); 648 ib.ptr[1] = lower_32_bits(gpu_addr);
660 ib.ptr[2] = upper_32_bits(gpu_addr); 649 ib.ptr[2] = upper_32_bits(gpu_addr);
661 ib.ptr[3] = 1; 650 ib.ptr[3] = 1;
@@ -665,28 +654,25 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
665 if (r) 654 if (r)
666 goto err1; 655 goto err1;
667 656
668 r = fence_wait(f, false); 657 r = fence_wait_timeout(f, false, timeout);
669 if (r) { 658 if (r == 0) {
670 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 659 DRM_ERROR("amdgpu: IB test timed out\n");
660 r = -ETIMEDOUT;
671 goto err1; 661 goto err1;
672 } 662 } else if (r < 0) {
673 for (i = 0; i < adev->usec_timeout; i++) { 663 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
674 tmp = le32_to_cpu(adev->wb.wb[index]);
675 if (tmp == 0xDEADBEEF)
676 break;
677 DRM_UDELAY(1);
678 }
679 if (i < adev->usec_timeout) {
680 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
681 ring->idx, i);
682 goto err1; 664 goto err1;
665 }
666 tmp = le32_to_cpu(adev->wb.wb[index]);
667 if (tmp == 0xDEADBEEF) {
668 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
669 r = 0;
683 } else { 670 } else {
684 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 671 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
685 r = -EINVAL; 672 r = -EINVAL;
686 } 673 }
687 674
688err1: 675err1:
689 fence_put(f);
690 amdgpu_ib_free(adev, &ib, NULL); 676 amdgpu_ib_free(adev, &ib, NULL);
691 fence_put(f); 677 fence_put(f);
692err0: 678err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 933e425a8154..2a11413ed54a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -425,7 +425,7 @@ static int cz_dpm_init(struct amdgpu_device *adev)
425 pi->mgcg_cgtt_local1 = 0x0; 425 pi->mgcg_cgtt_local1 = 0x0;
426 pi->clock_slow_down_step = 25000; 426 pi->clock_slow_down_step = 25000;
427 pi->skip_clock_slow_down = 1; 427 pi->skip_clock_slow_down = 1;
428 pi->enable_nb_ps_policy = 0; 428 pi->enable_nb_ps_policy = false;
429 pi->caps_power_containment = true; 429 pi->caps_power_containment = true;
430 pi->caps_cac = true; 430 pi->caps_cac = true;
431 pi->didt_enabled = false; 431 pi->didt_enabled = false;
@@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
2219 } 2219 }
2220 } 2220 }
2221 } else { /*pi->caps_vce_pg*/ 2221 } else { /*pi->caps_vce_pg*/
2222 pi->vce_power_gated = gate;
2222 cz_update_vce_dpm(adev); 2223 cz_update_vce_dpm(adev);
2223 cz_enable_vce_dpm(adev, !gate); 2224 cz_enable_vce_dpm(adev, !gate);
2224 } 2225 }
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 8227344d2ff6..c1b04e9aab57 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2667,19 +2667,21 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
2667 } 2667 }
2668} 2668}
2669 2669
2670static void dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2670static int dce_v10_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2671 u16 *blue, uint32_t start, uint32_t size) 2671 u16 *blue, uint32_t size)
2672{ 2672{
2673 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2673 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2674 int end = (start + size > 256) ? 256 : start + size, i; 2674 int i;
2675 2675
2676 /* userspace palettes are always correct as is */ 2676 /* userspace palettes are always correct as is */
2677 for (i = start; i < end; i++) { 2677 for (i = 0; i < size; i++) {
2678 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2678 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2679 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2679 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2680 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2680 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2681 } 2681 }
2682 dce_v10_0_crtc_load_lut(crtc); 2682 dce_v10_0_crtc_load_lut(crtc);
2683
2684 return 0;
2683} 2685}
2684 2686
2685static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc) 2687static void dce_v10_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2717,13 +2719,13 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2717 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2719 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2718 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2720 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2719 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2721 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2720 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2722 drm_crtc_vblank_on(crtc);
2721 dce_v10_0_crtc_load_lut(crtc); 2723 dce_v10_0_crtc_load_lut(crtc);
2722 break; 2724 break;
2723 case DRM_MODE_DPMS_STANDBY: 2725 case DRM_MODE_DPMS_STANDBY:
2724 case DRM_MODE_DPMS_SUSPEND: 2726 case DRM_MODE_DPMS_SUSPEND:
2725 case DRM_MODE_DPMS_OFF: 2727 case DRM_MODE_DPMS_OFF:
2726 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2728 drm_crtc_vblank_off(crtc);
2727 if (amdgpu_crtc->enabled) { 2729 if (amdgpu_crtc->enabled) {
2728 dce_v10_0_vga_enable(crtc, true); 2730 dce_v10_0_vga_enable(crtc, true);
2729 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2731 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3372,7 +3374,7 @@ static int dce_v10_0_pageflip_irq(struct amdgpu_device *adev,
3372 3374
3373 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3375 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3374 3376
3375 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3377 drm_crtc_vblank_put(&amdgpu_crtc->base);
3376 schedule_work(&works->unpin_work); 3378 schedule_work(&works->unpin_work);
3377 3379
3378 return 0; 3380 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index af26ec0bc59d..d4bf133908b1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev,
307 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 307 struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id];
308 u32 tmp; 308 u32 tmp;
309 309
310 /* flip at hsync for async, default is vsync */ 310 /* flip immediate for async, default is vsync */
311 /* use UPDATE_IMMEDIATE_EN instead for async? */
312 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 311 tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset);
313 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 312 tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL,
314 GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0); 313 GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0);
315 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 314 WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp);
316 /* update the scanout addresses */ 315 /* update the scanout addresses */
317 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 316 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset,
@@ -2678,19 +2677,21 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
2678 } 2677 }
2679} 2678}
2680 2679
2681static void dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2680static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2682 u16 *blue, uint32_t start, uint32_t size) 2681 u16 *blue, uint32_t size)
2683{ 2682{
2684 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2683 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2685 int end = (start + size > 256) ? 256 : start + size, i; 2684 int i;
2686 2685
2687 /* userspace palettes are always correct as is */ 2686 /* userspace palettes are always correct as is */
2688 for (i = start; i < end; i++) { 2687 for (i = 0; i < size; i++) {
2689 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2688 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2690 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2689 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2691 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2690 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2692 } 2691 }
2693 dce_v11_0_crtc_load_lut(crtc); 2692 dce_v11_0_crtc_load_lut(crtc);
2693
2694 return 0;
2694} 2695}
2695 2696
2696static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) 2697static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2728,13 +2729,13 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2728 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2729 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2729 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2730 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2730 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2731 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2731 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2732 drm_crtc_vblank_on(crtc);
2732 dce_v11_0_crtc_load_lut(crtc); 2733 dce_v11_0_crtc_load_lut(crtc);
2733 break; 2734 break;
2734 case DRM_MODE_DPMS_STANDBY: 2735 case DRM_MODE_DPMS_STANDBY:
2735 case DRM_MODE_DPMS_SUSPEND: 2736 case DRM_MODE_DPMS_SUSPEND:
2736 case DRM_MODE_DPMS_OFF: 2737 case DRM_MODE_DPMS_OFF:
2737 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2738 drm_crtc_vblank_off(crtc);
2738 if (amdgpu_crtc->enabled) { 2739 if (amdgpu_crtc->enabled) {
2739 dce_v11_0_vga_enable(crtc, true); 2740 dce_v11_0_vga_enable(crtc, true);
2740 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2741 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3433,7 +3434,7 @@ static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev,
3433 3434
3434 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3435 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3435 3436
3436 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3437 drm_crtc_vblank_put(&amdgpu_crtc->base);
3437 schedule_work(&works->unpin_work); 3438 schedule_work(&works->unpin_work);
3438 3439
3439 return 0; 3440 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 3fb65e41a6ef..4fdfab1e9200 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
526 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 526 crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]),
527 CRTC_CONTROL, CRTC_MASTER_EN); 527 CRTC_CONTROL, CRTC_MASTER_EN);
528 if (crtc_enabled) { 528 if (crtc_enabled) {
529#if 0 529#if 1
530 u32 frame_count;
531 int j;
532
533 save->crtc_enabled[i] = true; 530 save->crtc_enabled[i] = true;
534 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 531 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
535 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { 532 if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) {
536 amdgpu_display_vblank_wait(adev, i); 533 /*it is correct only for RGB ; black is 0*/
537 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 534 WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0);
538 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); 535 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
539 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 536 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
540 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
541 }
542 /* wait for the next frame */
543 frame_count = amdgpu_display_vblank_get_counter(adev, i);
544 for (j = 0; j < adev->usec_timeout; j++) {
545 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
546 break;
547 udelay(1);
548 }
549 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
550 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) {
551 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1);
552 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
553 }
554 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
555 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) {
556 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1);
557 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
558 } 537 }
538 mdelay(20);
559#else 539#else
560 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ 540 /* XXX this is a hack to avoid strange behavior with EFI on certain systems */
561 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 541 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
@@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev,
575static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, 555static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev,
576 struct amdgpu_mode_mc_save *save) 556 struct amdgpu_mode_mc_save *save)
577{ 557{
578 u32 tmp, frame_count; 558 u32 tmp;
579 int i, j; 559 int i;
580 560
581 /* update crtc base addresses */ 561 /* update crtc base addresses */
582 for (i = 0; i < adev->mode_info.num_crtc; i++) { 562 for (i = 0; i < adev->mode_info.num_crtc; i++) {
583 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], 563 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
584 upper_32_bits(adev->mc.vram_start)); 564 upper_32_bits(adev->mc.vram_start));
585 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i],
586 upper_32_bits(adev->mc.vram_start));
587 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], 565 WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i],
588 (u32)adev->mc.vram_start); 566 (u32)adev->mc.vram_start);
589 WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
590 (u32)adev->mc.vram_start);
591 567
592 if (save->crtc_enabled[i]) { 568 if (save->crtc_enabled[i]) {
593 tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]);
594 if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) {
595 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3);
596 WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp);
597 }
598 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
599 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) {
600 tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0);
601 WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp);
602 }
603 tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]);
604 if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) {
605 tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0);
606 WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp);
607 }
608 for (j = 0; j < adev->usec_timeout; j++) {
609 tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]);
610 if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0)
611 break;
612 udelay(1);
613 }
614 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); 569 tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]);
615 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); 570 tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0);
616 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1);
617 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 571 WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
618 WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0);
619 /* wait for the next frame */
620 frame_count = amdgpu_display_vblank_get_counter(adev, i);
621 for (j = 0; j < adev->usec_timeout; j++) {
622 if (amdgpu_display_vblank_get_counter(adev, i) != frame_count)
623 break;
624 udelay(1);
625 }
626 } 572 }
573 mdelay(20);
627 } 574 }
628 575
629 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); 576 WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start));
@@ -2574,19 +2521,21 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
2574 } 2521 }
2575} 2522}
2576 2523
2577static void dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2524static int dce_v8_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
2578 u16 *blue, uint32_t start, uint32_t size) 2525 u16 *blue, uint32_t size)
2579{ 2526{
2580 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2527 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2581 int end = (start + size > 256) ? 256 : start + size, i; 2528 int i;
2582 2529
2583 /* userspace palettes are always correct as is */ 2530 /* userspace palettes are always correct as is */
2584 for (i = start; i < end; i++) { 2531 for (i = 0; i < size; i++) {
2585 amdgpu_crtc->lut_r[i] = red[i] >> 6; 2532 amdgpu_crtc->lut_r[i] = red[i] >> 6;
2586 amdgpu_crtc->lut_g[i] = green[i] >> 6; 2533 amdgpu_crtc->lut_g[i] = green[i] >> 6;
2587 amdgpu_crtc->lut_b[i] = blue[i] >> 6; 2534 amdgpu_crtc->lut_b[i] = blue[i] >> 6;
2588 } 2535 }
2589 dce_v8_0_crtc_load_lut(crtc); 2536 dce_v8_0_crtc_load_lut(crtc);
2537
2538 return 0;
2590} 2539}
2591 2540
2592static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc) 2541static void dce_v8_0_crtc_destroy(struct drm_crtc *crtc)
@@ -2624,13 +2573,13 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2624 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); 2573 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2625 amdgpu_irq_update(adev, &adev->crtc_irq, type); 2574 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2626 amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2575 amdgpu_irq_update(adev, &adev->pageflip_irq, type);
2627 drm_vblank_on(dev, amdgpu_crtc->crtc_id); 2576 drm_crtc_vblank_on(crtc);
2628 dce_v8_0_crtc_load_lut(crtc); 2577 dce_v8_0_crtc_load_lut(crtc);
2629 break; 2578 break;
2630 case DRM_MODE_DPMS_STANDBY: 2579 case DRM_MODE_DPMS_STANDBY:
2631 case DRM_MODE_DPMS_SUSPEND: 2580 case DRM_MODE_DPMS_SUSPEND:
2632 case DRM_MODE_DPMS_OFF: 2581 case DRM_MODE_DPMS_OFF:
2633 drm_vblank_off(dev, amdgpu_crtc->crtc_id); 2582 drm_crtc_vblank_off(crtc);
2634 if (amdgpu_crtc->enabled) { 2583 if (amdgpu_crtc->enabled) {
2635 dce_v8_0_vga_enable(crtc, true); 2584 dce_v8_0_vga_enable(crtc, true);
2636 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2585 amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE);
@@ -3376,7 +3325,7 @@ static int dce_v8_0_pageflip_irq(struct amdgpu_device *adev,
3376 3325
3377 spin_unlock_irqrestore(&adev->ddev->event_lock, flags); 3326 spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
3378 3327
3379 drm_vblank_put(adev->ddev, amdgpu_crtc->crtc_id); 3328 drm_crtc_vblank_put(&amdgpu_crtc->base);
3380 schedule_work(&works->unpin_work); 3329 schedule_work(&works->unpin_work);
3381 3330
3382 return 0; 3331 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
index b336c918d6a7..b3e19ba4c57f 100644
--- a/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/fiji_smc.c
@@ -173,7 +173,7 @@ static int fiji_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{ 173{
174 if (!fiji_is_smc_ram_running(adev)) 174 if (!fiji_is_smc_ram_running(adev))
175 { 175 {
176 return -EINVAL;; 176 return -EINVAL;
177 } 177 }
178 178
179 if (wait_smu_response(adev)) { 179 if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index fc8ff4d3ccf8..d869d058ef24 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -1583,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev)
1583 * registers are instanced per SE or SH. 0xffffffff means 1583 * registers are instanced per SE or SH. 0xffffffff means
1584 * broadcast to all SEs or SHs (CIK). 1584 * broadcast to all SEs or SHs (CIK).
1585 */ 1585 */
1586void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) 1586static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev,
1587 u32 se_num, u32 sh_num, u32 instance)
1587{ 1588{
1588 u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; 1589 u32 data;
1590
1591 if (instance == 0xffffffff)
1592 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
1593 else
1594 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
1589 1595
1590 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) 1596 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1591 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | 1597 data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
@@ -1659,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev)
1659 mutex_lock(&adev->grbm_idx_mutex); 1665 mutex_lock(&adev->grbm_idx_mutex);
1660 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1666 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
1661 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1667 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
1662 gfx_v7_0_select_se_sh(adev, i, j); 1668 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
1663 data = gfx_v7_0_get_rb_active_bitmap(adev); 1669 data = gfx_v7_0_get_rb_active_bitmap(adev);
1664 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 1670 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
1665 rb_bitmap_width_per_sh); 1671 rb_bitmap_width_per_sh);
1666 } 1672 }
1667 } 1673 }
1668 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 1674 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1669 mutex_unlock(&adev->grbm_idx_mutex); 1675 mutex_unlock(&adev->grbm_idx_mutex);
1670 1676
1671 adev->gfx.config.backend_enable_mask = active_rbs; 1677 adev->gfx.config.backend_enable_mask = active_rbs;
@@ -1746,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev)
1746 * making sure that the following register writes will be broadcasted 1752 * making sure that the following register writes will be broadcasted
1747 * to all the shaders 1753 * to all the shaders
1748 */ 1754 */
1749 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 1755 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
1750 1756
1751 /* XXX SH_MEM regs */ 1757 /* XXX SH_MEM regs */
1752 /* where to put LDS, scratch, GPUVM in FSA64 space */ 1758 /* where to put LDS, scratch, GPUVM in FSA64 space */
@@ -2050,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2050 unsigned vm_id, bool ctx_switch) 2056 unsigned vm_id, bool ctx_switch)
2051{ 2057{
2052 u32 header, control = 0; 2058 u32 header, control = 0;
2053 u32 next_rptr = ring->wptr + 5;
2054
2055 if (ctx_switch)
2056 next_rptr += 2;
2057
2058 next_rptr += 4;
2059 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2060 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2061 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2062 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2063 amdgpu_ring_write(ring, next_rptr);
2064 2059
2065 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2060 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2066 if (ctx_switch) { 2061 if (ctx_switch) {
@@ -2089,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2089 struct amdgpu_ib *ib, 2084 struct amdgpu_ib *ib,
2090 unsigned vm_id, bool ctx_switch) 2085 unsigned vm_id, bool ctx_switch)
2091{ 2086{
2092 u32 header, control = 0; 2087 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
2093 u32 next_rptr = ring->wptr + 5;
2094
2095 control |= INDIRECT_BUFFER_VALID;
2096 next_rptr += 4;
2097 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2098 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2099 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2100 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2101 amdgpu_ring_write(ring, next_rptr);
2102
2103 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2104 2088
2105 control |= ib->length_dw | (vm_id << 24); 2089 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2106
2107 amdgpu_ring_write(ring, header);
2108 amdgpu_ring_write(ring, 2090 amdgpu_ring_write(ring,
2109#ifdef __BIG_ENDIAN 2091#ifdef __BIG_ENDIAN
2110 (2 << 0) | 2092 (2 << 0) |
@@ -2123,26 +2105,25 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2123 * Provides a basic gfx ring test to verify that IBs are working. 2105 * Provides a basic gfx ring test to verify that IBs are working.
2124 * Returns 0 on success, error on failure. 2106 * Returns 0 on success, error on failure.
2125 */ 2107 */
2126static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring) 2108static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
2127{ 2109{
2128 struct amdgpu_device *adev = ring->adev; 2110 struct amdgpu_device *adev = ring->adev;
2129 struct amdgpu_ib ib; 2111 struct amdgpu_ib ib;
2130 struct fence *f = NULL; 2112 struct fence *f = NULL;
2131 uint32_t scratch; 2113 uint32_t scratch;
2132 uint32_t tmp = 0; 2114 uint32_t tmp = 0;
2133 unsigned i; 2115 long r;
2134 int r;
2135 2116
2136 r = amdgpu_gfx_scratch_get(adev, &scratch); 2117 r = amdgpu_gfx_scratch_get(adev, &scratch);
2137 if (r) { 2118 if (r) {
2138 DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); 2119 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
2139 return r; 2120 return r;
2140 } 2121 }
2141 WREG32(scratch, 0xCAFEDEAD); 2122 WREG32(scratch, 0xCAFEDEAD);
2142 memset(&ib, 0, sizeof(ib)); 2123 memset(&ib, 0, sizeof(ib));
2143 r = amdgpu_ib_get(adev, NULL, 256, &ib); 2124 r = amdgpu_ib_get(adev, NULL, 256, &ib);
2144 if (r) { 2125 if (r) {
2145 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 2126 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
2146 goto err1; 2127 goto err1;
2147 } 2128 }
2148 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 2129 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -2154,21 +2135,19 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
2154 if (r) 2135 if (r)
2155 goto err2; 2136 goto err2;
2156 2137
2157 r = fence_wait(f, false); 2138 r = fence_wait_timeout(f, false, timeout);
2158 if (r) { 2139 if (r == 0) {
2159 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 2140 DRM_ERROR("amdgpu: IB test timed out\n");
2141 r = -ETIMEDOUT;
2160 goto err2; 2142 goto err2;
2161 } 2143 } else if (r < 0) {
2162 for (i = 0; i < adev->usec_timeout; i++) { 2144 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
2163 tmp = RREG32(scratch);
2164 if (tmp == 0xDEADBEEF)
2165 break;
2166 DRM_UDELAY(1);
2167 }
2168 if (i < adev->usec_timeout) {
2169 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
2170 ring->idx, i);
2171 goto err2; 2145 goto err2;
2146 }
2147 tmp = RREG32(scratch);
2148 if (tmp == 0xDEADBEEF) {
2149 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
2150 r = 0;
2172 } else { 2151 } else {
2173 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 2152 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2174 scratch, tmp); 2153 scratch, tmp);
@@ -2176,7 +2155,6 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
2176 } 2155 }
2177 2156
2178err2: 2157err2:
2179 fence_put(f);
2180 amdgpu_ib_free(adev, &ib, NULL); 2158 amdgpu_ib_free(adev, &ib, NULL);
2181 fence_put(f); 2159 fence_put(f);
2182err1: 2160err1:
@@ -3221,7 +3199,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
3221 } 3199 }
3222 } 3200 }
3223 adev->gfx.rlc.cs_data = ci_cs_data; 3201 adev->gfx.rlc.cs_data = ci_cs_data;
3224 adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; 3202 adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
3203 adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */
3225 3204
3226 src_ptr = adev->gfx.rlc.reg_list; 3205 src_ptr = adev->gfx.rlc.reg_list;
3227 dws = adev->gfx.rlc.reg_list_size; 3206 dws = adev->gfx.rlc.reg_list_size;
@@ -3379,7 +3358,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3379 mutex_lock(&adev->grbm_idx_mutex); 3358 mutex_lock(&adev->grbm_idx_mutex);
3380 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3359 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3381 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3360 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3382 gfx_v7_0_select_se_sh(adev, i, j); 3361 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
3383 for (k = 0; k < adev->usec_timeout; k++) { 3362 for (k = 0; k < adev->usec_timeout; k++) {
3384 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) 3363 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3385 break; 3364 break;
@@ -3387,7 +3366,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3387 } 3366 }
3388 } 3367 }
3389 } 3368 }
3390 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3369 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3391 mutex_unlock(&adev->grbm_idx_mutex); 3370 mutex_unlock(&adev->grbm_idx_mutex);
3392 3371
3393 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 3372 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3434,7 +3413,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev)
3434 return orig; 3413 return orig;
3435} 3414}
3436 3415
3437void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) 3416static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3438{ 3417{
3439 u32 tmp, i, mask; 3418 u32 tmp, i, mask;
3440 3419
@@ -3456,7 +3435,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
3456 } 3435 }
3457} 3436}
3458 3437
3459void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) 3438static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3460{ 3439{
3461 u32 tmp; 3440 u32 tmp;
3462 3441
@@ -3471,7 +3450,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev)
3471 * 3450 *
3472 * Halt the RLC ME (MicroEngine) (CIK). 3451 * Halt the RLC ME (MicroEngine) (CIK).
3473 */ 3452 */
3474void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) 3453static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev)
3475{ 3454{
3476 WREG32(mmRLC_CNTL, 0); 3455 WREG32(mmRLC_CNTL, 0);
3477 3456
@@ -3547,7 +3526,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
3547 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); 3526 WREG32(mmRLC_LB_CNTR_MAX, 0x00008000);
3548 3527
3549 mutex_lock(&adev->grbm_idx_mutex); 3528 mutex_lock(&adev->grbm_idx_mutex);
3550 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3529 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3551 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); 3530 WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff);
3552 WREG32(mmRLC_LB_PARAMS, 0x00600408); 3531 WREG32(mmRLC_LB_PARAMS, 0x00600408);
3553 WREG32(mmRLC_LB_CNTL, 0x80000004); 3532 WREG32(mmRLC_LB_CNTL, 0x80000004);
@@ -3587,7 +3566,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
3587 tmp = gfx_v7_0_halt_rlc(adev); 3566 tmp = gfx_v7_0_halt_rlc(adev);
3588 3567
3589 mutex_lock(&adev->grbm_idx_mutex); 3568 mutex_lock(&adev->grbm_idx_mutex);
3590 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3569 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3591 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3570 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3592 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3571 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3593 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | 3572 tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3638,7 +3617,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3638 tmp = gfx_v7_0_halt_rlc(adev); 3617 tmp = gfx_v7_0_halt_rlc(adev);
3639 3618
3640 mutex_lock(&adev->grbm_idx_mutex); 3619 mutex_lock(&adev->grbm_idx_mutex);
3641 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3620 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3642 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3621 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3643 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3622 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3644 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | 3623 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK |
@@ -3689,7 +3668,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable)
3689 tmp = gfx_v7_0_halt_rlc(adev); 3668 tmp = gfx_v7_0_halt_rlc(adev);
3690 3669
3691 mutex_lock(&adev->grbm_idx_mutex); 3670 mutex_lock(&adev->grbm_idx_mutex);
3692 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3671 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3693 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 3672 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
3694 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 3673 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
3695 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; 3674 data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK;
@@ -3867,6 +3846,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev,
3867 } 3846 }
3868} 3847}
3869 3848
3849static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
3850 u32 bitmap)
3851{
3852 u32 data;
3853
3854 if (!bitmap)
3855 return;
3856
3857 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
3858 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
3859
3860 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
3861}
3862
3870static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) 3863static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev)
3871{ 3864{
3872 u32 data, mask; 3865 u32 data, mask;
@@ -4123,7 +4116,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
4123 * Fetches a GPU clock counter snapshot (SI). 4116 * Fetches a GPU clock counter snapshot (SI).
4124 * Returns the 64 bit clock counter snapshot. 4117 * Returns the 64 bit clock counter snapshot.
4125 */ 4118 */
4126uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) 4119static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4127{ 4120{
4128 uint64_t clock; 4121 uint64_t clock;
4129 4122
@@ -4183,12 +4176,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
4183 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); 4176 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
4184} 4177}
4185 4178
4179static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = {
4180 .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter,
4181 .select_se_sh = &gfx_v7_0_select_se_sh,
4182};
4183
4184static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
4185 .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode,
4186 .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode
4187};
4188
4186static int gfx_v7_0_early_init(void *handle) 4189static int gfx_v7_0_early_init(void *handle)
4187{ 4190{
4188 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4191 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4189 4192
4190 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; 4193 adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS;
4191 adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; 4194 adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS;
4195 adev->gfx.funcs = &gfx_v7_0_gfx_funcs;
4196 adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs;
4192 gfx_v7_0_set_ring_funcs(adev); 4197 gfx_v7_0_set_ring_funcs(adev);
4193 gfx_v7_0_set_irq_funcs(adev); 4198 gfx_v7_0_set_irq_funcs(adev);
4194 gfx_v7_0_set_gds_init(adev); 4199 gfx_v7_0_set_gds_init(adev);
@@ -5032,16 +5037,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5032 int i, j, k, counter, active_cu_number = 0; 5037 int i, j, k, counter, active_cu_number = 0;
5033 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 5038 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
5034 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; 5039 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
5040 unsigned disable_masks[4 * 2];
5035 5041
5036 memset(cu_info, 0, sizeof(*cu_info)); 5042 memset(cu_info, 0, sizeof(*cu_info));
5037 5043
5044 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
5045
5038 mutex_lock(&adev->grbm_idx_mutex); 5046 mutex_lock(&adev->grbm_idx_mutex);
5039 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 5047 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
5040 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 5048 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
5041 mask = 1; 5049 mask = 1;
5042 ao_bitmap = 0; 5050 ao_bitmap = 0;
5043 counter = 0; 5051 counter = 0;
5044 gfx_v7_0_select_se_sh(adev, i, j); 5052 gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
5053 if (i < 4 && j < 2)
5054 gfx_v7_0_set_user_cu_inactive_bitmap(
5055 adev, disable_masks[i * 2 + j]);
5045 bitmap = gfx_v7_0_get_cu_active_bitmap(adev); 5056 bitmap = gfx_v7_0_get_cu_active_bitmap(adev);
5046 cu_info->bitmap[i][j] = bitmap; 5057 cu_info->bitmap[i][j] = bitmap;
5047 5058
@@ -5057,7 +5068,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev)
5057 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 5068 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
5058 } 5069 }
5059 } 5070 }
5060 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 5071 gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5061 mutex_unlock(&adev->grbm_idx_mutex); 5072 mutex_unlock(&adev->grbm_idx_mutex);
5062 5073
5063 cu_info->number = active_cu_number; 5074 cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
index e747aa935c88..94e3ea147c26 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h
@@ -26,11 +26,4 @@
26 26
27extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; 27extern const struct amd_ip_funcs gfx_v7_0_ip_funcs;
28 28
29/* XXX these shouldn't be exported */
30void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev);
31void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev);
32void gfx_v7_0_rlc_stop(struct amdgpu_device *adev);
33uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev);
34void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
35
36#endif 29#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index c2ef94511f70..bff8668e9e6d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -787,26 +787,25 @@ static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring)
787 return r; 787 return r;
788} 788}
789 789
790static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring) 790static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
791{ 791{
792 struct amdgpu_device *adev = ring->adev; 792 struct amdgpu_device *adev = ring->adev;
793 struct amdgpu_ib ib; 793 struct amdgpu_ib ib;
794 struct fence *f = NULL; 794 struct fence *f = NULL;
795 uint32_t scratch; 795 uint32_t scratch;
796 uint32_t tmp = 0; 796 uint32_t tmp = 0;
797 unsigned i; 797 long r;
798 int r;
799 798
800 r = amdgpu_gfx_scratch_get(adev, &scratch); 799 r = amdgpu_gfx_scratch_get(adev, &scratch);
801 if (r) { 800 if (r) {
802 DRM_ERROR("amdgpu: failed to get scratch reg (%d).\n", r); 801 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
803 return r; 802 return r;
804 } 803 }
805 WREG32(scratch, 0xCAFEDEAD); 804 WREG32(scratch, 0xCAFEDEAD);
806 memset(&ib, 0, sizeof(ib)); 805 memset(&ib, 0, sizeof(ib));
807 r = amdgpu_ib_get(adev, NULL, 256, &ib); 806 r = amdgpu_ib_get(adev, NULL, 256, &ib);
808 if (r) { 807 if (r) {
809 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 808 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
810 goto err1; 809 goto err1;
811 } 810 }
812 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1); 811 ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
@@ -818,28 +817,25 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
818 if (r) 817 if (r)
819 goto err2; 818 goto err2;
820 819
821 r = fence_wait(f, false); 820 r = fence_wait_timeout(f, false, timeout);
822 if (r) { 821 if (r == 0) {
823 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 822 DRM_ERROR("amdgpu: IB test timed out.\n");
823 r = -ETIMEDOUT;
824 goto err2; 824 goto err2;
825 } 825 } else if (r < 0) {
826 for (i = 0; i < adev->usec_timeout; i++) { 826 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
827 tmp = RREG32(scratch);
828 if (tmp == 0xDEADBEEF)
829 break;
830 DRM_UDELAY(1);
831 }
832 if (i < adev->usec_timeout) {
833 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
834 ring->idx, i);
835 goto err2; 827 goto err2;
828 }
829 tmp = RREG32(scratch);
830 if (tmp == 0xDEADBEEF) {
831 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
832 r = 0;
836 } else { 833 } else {
837 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 834 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
838 scratch, tmp); 835 scratch, tmp);
839 r = -EINVAL; 836 r = -EINVAL;
840 } 837 }
841err2: 838err2:
842 fence_put(f);
843 amdgpu_ib_free(adev, &ib, NULL); 839 amdgpu_ib_free(adev, &ib, NULL);
844 fence_put(f); 840 fence_put(f);
845err1: 841err1:
@@ -1160,6 +1156,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev,
1160 buffer[count++] = cpu_to_le32(0); 1156 buffer[count++] = cpu_to_le32(0);
1161} 1157}
1162 1158
1159static void cz_init_cp_jump_table(struct amdgpu_device *adev)
1160{
1161 const __le32 *fw_data;
1162 volatile u32 *dst_ptr;
1163 int me, i, max_me = 4;
1164 u32 bo_offset = 0;
1165 u32 table_offset, table_size;
1166
1167 if (adev->asic_type == CHIP_CARRIZO)
1168 max_me = 5;
1169
1170 /* write the cp table buffer */
1171 dst_ptr = adev->gfx.rlc.cp_table_ptr;
1172 for (me = 0; me < max_me; me++) {
1173 if (me == 0) {
1174 const struct gfx_firmware_header_v1_0 *hdr =
1175 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1176 fw_data = (const __le32 *)
1177 (adev->gfx.ce_fw->data +
1178 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1179 table_offset = le32_to_cpu(hdr->jt_offset);
1180 table_size = le32_to_cpu(hdr->jt_size);
1181 } else if (me == 1) {
1182 const struct gfx_firmware_header_v1_0 *hdr =
1183 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
1184 fw_data = (const __le32 *)
1185 (adev->gfx.pfp_fw->data +
1186 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1187 table_offset = le32_to_cpu(hdr->jt_offset);
1188 table_size = le32_to_cpu(hdr->jt_size);
1189 } else if (me == 2) {
1190 const struct gfx_firmware_header_v1_0 *hdr =
1191 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1192 fw_data = (const __le32 *)
1193 (adev->gfx.me_fw->data +
1194 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1195 table_offset = le32_to_cpu(hdr->jt_offset);
1196 table_size = le32_to_cpu(hdr->jt_size);
1197 } else if (me == 3) {
1198 const struct gfx_firmware_header_v1_0 *hdr =
1199 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1200 fw_data = (const __le32 *)
1201 (adev->gfx.mec_fw->data +
1202 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1203 table_offset = le32_to_cpu(hdr->jt_offset);
1204 table_size = le32_to_cpu(hdr->jt_size);
1205 } else if (me == 4) {
1206 const struct gfx_firmware_header_v1_0 *hdr =
1207 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
1208 fw_data = (const __le32 *)
1209 (adev->gfx.mec2_fw->data +
1210 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
1211 table_offset = le32_to_cpu(hdr->jt_offset);
1212 table_size = le32_to_cpu(hdr->jt_size);
1213 }
1214
1215 for (i = 0; i < table_size; i ++) {
1216 dst_ptr[bo_offset + i] =
1217 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
1218 }
1219
1220 bo_offset += table_size;
1221 }
1222}
1223
1163static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) 1224static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
1164{ 1225{
1165 int r; 1226 int r;
@@ -1175,6 +1236,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev)
1175 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); 1236 amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj);
1176 adev->gfx.rlc.clear_state_obj = NULL; 1237 adev->gfx.rlc.clear_state_obj = NULL;
1177 } 1238 }
1239
1240 /* jump table block */
1241 if (adev->gfx.rlc.cp_table_obj) {
1242 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
1243 if (unlikely(r != 0))
1244 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
1245 amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj);
1246 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1247
1248 amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj);
1249 adev->gfx.rlc.cp_table_obj = NULL;
1250 }
1178} 1251}
1179 1252
1180static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) 1253static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
@@ -1231,6 +1304,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
1231 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); 1304 amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj);
1232 } 1305 }
1233 1306
1307 if ((adev->asic_type == CHIP_CARRIZO) ||
1308 (adev->asic_type == CHIP_STONEY)) {
1309 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1310 if (adev->gfx.rlc.cp_table_obj == NULL) {
1311 r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true,
1312 AMDGPU_GEM_DOMAIN_VRAM,
1313 AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
1314 NULL, NULL,
1315 &adev->gfx.rlc.cp_table_obj);
1316 if (r) {
1317 dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r);
1318 return r;
1319 }
1320 }
1321
1322 r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false);
1323 if (unlikely(r != 0)) {
1324 dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r);
1325 return r;
1326 }
1327 r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM,
1328 &adev->gfx.rlc.cp_table_gpu_addr);
1329 if (r) {
1330 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1331 dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r);
1332 return r;
1333 }
1334 r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr);
1335 if (r) {
1336 dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r);
1337 return r;
1338 }
1339
1340 cz_init_cp_jump_table(adev);
1341
1342 amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj);
1343 amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj);
1344
1345 }
1346
1234 return 0; 1347 return 0;
1235} 1348}
1236 1349
@@ -1612,7 +1725,6 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
1612 RREG32(sec_ded_counter_registers[i]); 1725 RREG32(sec_ded_counter_registers[i]);
1613 1726
1614fail: 1727fail:
1615 fence_put(f);
1616 amdgpu_ib_free(adev, &ib, NULL); 1728 amdgpu_ib_free(adev, &ib, NULL);
1617 fence_put(f); 1729 fence_put(f);
1618 1730
@@ -3339,9 +3451,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
3339 } 3451 }
3340} 3452}
3341 3453
3342void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) 3454static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev,
3455 u32 se_num, u32 sh_num, u32 instance)
3343{ 3456{
3344 u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 3457 u32 data;
3458
3459 if (instance == 0xffffffff)
3460 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1);
3461 else
3462 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance);
3345 3463
3346 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { 3464 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) {
3347 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 3465 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1);
@@ -3391,13 +3509,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev)
3391 mutex_lock(&adev->grbm_idx_mutex); 3509 mutex_lock(&adev->grbm_idx_mutex);
3392 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3510 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3393 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3511 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3394 gfx_v8_0_select_se_sh(adev, i, j); 3512 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3395 data = gfx_v8_0_get_rb_active_bitmap(adev); 3513 data = gfx_v8_0_get_rb_active_bitmap(adev);
3396 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * 3514 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3397 rb_bitmap_width_per_sh); 3515 rb_bitmap_width_per_sh);
3398 } 3516 }
3399 } 3517 }
3400 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3518 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3401 mutex_unlock(&adev->grbm_idx_mutex); 3519 mutex_unlock(&adev->grbm_idx_mutex);
3402 3520
3403 adev->gfx.config.backend_enable_mask = active_rbs; 3521 adev->gfx.config.backend_enable_mask = active_rbs;
@@ -3501,7 +3619,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
3501 * making sure that the following register writes will be broadcasted 3619 * making sure that the following register writes will be broadcasted
3502 * to all the shaders 3620 * to all the shaders
3503 */ 3621 */
3504 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3622 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3505 3623
3506 WREG32(mmPA_SC_FIFO_SIZE, 3624 WREG32(mmPA_SC_FIFO_SIZE,
3507 (adev->gfx.config.sc_prim_fifo_size_frontend << 3625 (adev->gfx.config.sc_prim_fifo_size_frontend <<
@@ -3524,7 +3642,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3524 mutex_lock(&adev->grbm_idx_mutex); 3642 mutex_lock(&adev->grbm_idx_mutex);
3525 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 3643 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3526 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 3644 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3527 gfx_v8_0_select_se_sh(adev, i, j); 3645 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
3528 for (k = 0; k < adev->usec_timeout; k++) { 3646 for (k = 0; k < adev->usec_timeout; k++) {
3529 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) 3647 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0)
3530 break; 3648 break;
@@ -3532,7 +3650,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev)
3532 } 3650 }
3533 } 3651 }
3534 } 3652 }
3535 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 3653 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
3536 mutex_unlock(&adev->grbm_idx_mutex); 3654 mutex_unlock(&adev->grbm_idx_mutex);
3537 3655
3538 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | 3656 mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK |
@@ -3693,13 +3811,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev)
3693 WREG32(mmRLC_SRM_CNTL, data); 3811 WREG32(mmRLC_SRM_CNTL, data);
3694} 3812}
3695 3813
3696static void polaris11_init_power_gating(struct amdgpu_device *adev) 3814static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev)
3697{ 3815{
3698 uint32_t data; 3816 uint32_t data;
3699 3817
3700 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 3818 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
3701 AMD_PG_SUPPORT_GFX_SMG | 3819 AMD_PG_SUPPORT_GFX_SMG |
3702 AMD_PG_SUPPORT_GFX_DMG)) { 3820 AMD_PG_SUPPORT_GFX_DMG)) {
3703 data = RREG32(mmCP_RB_WPTR_POLL_CNTL); 3821 data = RREG32(mmCP_RB_WPTR_POLL_CNTL);
3704 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; 3822 data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK;
3705 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); 3823 data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT);
@@ -3724,6 +3842,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev)
3724 } 3842 }
3725} 3843}
3726 3844
3845static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev,
3846 bool enable)
3847{
3848 u32 data, orig;
3849
3850 orig = data = RREG32(mmRLC_PG_CNTL);
3851
3852 if (enable)
3853 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3854 else
3855 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
3856
3857 if (orig != data)
3858 WREG32(mmRLC_PG_CNTL, data);
3859}
3860
3861static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
3862 bool enable)
3863{
3864 u32 data, orig;
3865
3866 orig = data = RREG32(mmRLC_PG_CNTL);
3867
3868 if (enable)
3869 data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3870 else
3871 data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
3872
3873 if (orig != data)
3874 WREG32(mmRLC_PG_CNTL, data);
3875}
3876
3877static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable)
3878{
3879 u32 data, orig;
3880
3881 orig = data = RREG32(mmRLC_PG_CNTL);
3882
3883 if (enable)
3884 data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
3885 else
3886 data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
3887
3888 if (orig != data)
3889 WREG32(mmRLC_PG_CNTL, data);
3890}
3891
3727static void gfx_v8_0_init_pg(struct amdgpu_device *adev) 3892static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
3728{ 3893{
3729 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | 3894 if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
@@ -3736,8 +3901,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
3736 gfx_v8_0_init_save_restore_list(adev); 3901 gfx_v8_0_init_save_restore_list(adev);
3737 gfx_v8_0_enable_save_restore_machine(adev); 3902 gfx_v8_0_enable_save_restore_machine(adev);
3738 3903
3739 if (adev->asic_type == CHIP_POLARIS11) 3904 if ((adev->asic_type == CHIP_CARRIZO) ||
3740 polaris11_init_power_gating(adev); 3905 (adev->asic_type == CHIP_STONEY)) {
3906 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
3907 gfx_v8_0_init_power_gating(adev);
3908 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
3909 if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) {
3910 cz_enable_sck_slow_down_on_power_up(adev, true);
3911 cz_enable_sck_slow_down_on_power_down(adev, true);
3912 } else {
3913 cz_enable_sck_slow_down_on_power_up(adev, false);
3914 cz_enable_sck_slow_down_on_power_down(adev, false);
3915 }
3916 if (adev->pg_flags & AMD_PG_SUPPORT_CP)
3917 cz_enable_cp_power_gating(adev, true);
3918 else
3919 cz_enable_cp_power_gating(adev, false);
3920 } else if (adev->asic_type == CHIP_POLARIS11) {
3921 gfx_v8_0_init_power_gating(adev);
3922 }
3741 } 3923 }
3742} 3924}
3743 3925
@@ -4976,7 +5158,7 @@ static int gfx_v8_0_soft_reset(void *handle)
4976 * Fetches a GPU clock counter snapshot. 5158 * Fetches a GPU clock counter snapshot.
4977 * Returns the 64 bit clock counter snapshot. 5159 * Returns the 64 bit clock counter snapshot.
4978 */ 5160 */
4979uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) 5161static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev)
4980{ 5162{
4981 uint64_t clock; 5163 uint64_t clock;
4982 5164
@@ -5036,12 +5218,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring,
5036 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); 5218 amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base));
5037} 5219}
5038 5220
5221static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = {
5222 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
5223 .select_se_sh = &gfx_v8_0_select_se_sh,
5224};
5225
5039static int gfx_v8_0_early_init(void *handle) 5226static int gfx_v8_0_early_init(void *handle)
5040{ 5227{
5041 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5228 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5042 5229
5043 adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; 5230 adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5044 adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; 5231 adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS;
5232 adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5045 gfx_v8_0_set_ring_funcs(adev); 5233 gfx_v8_0_set_ring_funcs(adev);
5046 gfx_v8_0_set_irq_funcs(adev); 5234 gfx_v8_0_set_irq_funcs(adev);
5047 gfx_v8_0_set_gds_init(adev); 5235 gfx_v8_0_set_gds_init(adev);
@@ -5074,51 +5262,43 @@ static int gfx_v8_0_late_init(void *handle)
5074 return 0; 5262 return 0;
5075} 5263}
5076 5264
5077static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, 5265static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
5078 bool enable) 5266 bool enable)
5079{ 5267{
5080 uint32_t data, temp; 5268 uint32_t data, temp;
5081 5269
5082 /* Send msg to SMU via Powerplay */ 5270 if (adev->asic_type == CHIP_POLARIS11)
5083 amdgpu_set_powergating_state(adev, 5271 /* Send msg to SMU via Powerplay */
5084 AMD_IP_BLOCK_TYPE_SMC, 5272 amdgpu_set_powergating_state(adev,
5085 enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); 5273 AMD_IP_BLOCK_TYPE_SMC,
5274 enable ?
5275 AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE);
5086 5276
5087 if (enable) { 5277 temp = data = RREG32(mmRLC_PG_CNTL);
5088 /* Enable static MGPG */ 5278 /* Enable static MGPG */
5089 temp = data = RREG32(mmRLC_PG_CNTL); 5279 if (enable)
5090 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 5280 data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
5091 5281 else
5092 if (temp != data)
5093 WREG32(mmRLC_PG_CNTL, data);
5094 } else {
5095 temp = data = RREG32(mmRLC_PG_CNTL);
5096 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; 5282 data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
5097 5283
5098 if (temp != data) 5284 if (temp != data)
5099 WREG32(mmRLC_PG_CNTL, data); 5285 WREG32(mmRLC_PG_CNTL, data);
5100 }
5101} 5286}
5102 5287
5103static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, 5288static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev,
5104 bool enable) 5289 bool enable)
5105{ 5290{
5106 uint32_t data, temp; 5291 uint32_t data, temp;
5107 5292
5108 if (enable) { 5293 temp = data = RREG32(mmRLC_PG_CNTL);
5109 /* Enable dynamic MGPG */ 5294 /* Enable dynamic MGPG */
5110 temp = data = RREG32(mmRLC_PG_CNTL); 5295 if (enable)
5111 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 5296 data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
5112 5297 else
5113 if (temp != data)
5114 WREG32(mmRLC_PG_CNTL, data);
5115 } else {
5116 temp = data = RREG32(mmRLC_PG_CNTL);
5117 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; 5298 data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
5118 5299
5119 if (temp != data) 5300 if (temp != data)
5120 WREG32(mmRLC_PG_CNTL, data); 5301 WREG32(mmRLC_PG_CNTL, data);
5121 }
5122} 5302}
5123 5303
5124static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, 5304static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev,
@@ -5126,19 +5306,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade
5126{ 5306{
5127 uint32_t data, temp; 5307 uint32_t data, temp;
5128 5308
5129 if (enable) { 5309 temp = data = RREG32(mmRLC_PG_CNTL);
5130 /* Enable quick PG */ 5310 /* Enable quick PG */
5131 temp = data = RREG32(mmRLC_PG_CNTL); 5311 if (enable)
5132 data |= 0x100000; 5312 data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
5313 else
5314 data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK;
5133 5315
5134 if (temp != data) 5316 if (temp != data)
5135 WREG32(mmRLC_PG_CNTL, data); 5317 WREG32(mmRLC_PG_CNTL, data);
5136 } else { 5318}
5137 temp = data = RREG32(mmRLC_PG_CNTL);
5138 data &= ~0x100000;
5139 5319
5140 if (temp != data) 5320static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
5141 WREG32(mmRLC_PG_CNTL, data); 5321 bool enable)
5322{
5323 u32 data, orig;
5324
5325 orig = data = RREG32(mmRLC_PG_CNTL);
5326
5327 if (enable)
5328 data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5329 else
5330 data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
5331
5332 if (orig != data)
5333 WREG32(mmRLC_PG_CNTL, data);
5334}
5335
5336static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev,
5337 bool enable)
5338{
5339 u32 data, orig;
5340
5341 orig = data = RREG32(mmRLC_PG_CNTL);
5342
5343 if (enable)
5344 data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
5345 else
5346 data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
5347
5348 if (orig != data)
5349 WREG32(mmRLC_PG_CNTL, data);
5350
5351 /* Read any GFX register to wake up GFX. */
5352 if (!enable)
5353 data = RREG32(mmDB_RENDER_CONTROL);
5354}
5355
5356static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev,
5357 bool enable)
5358{
5359 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) {
5360 cz_enable_gfx_cg_power_gating(adev, true);
5361 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE)
5362 cz_enable_gfx_pipeline_power_gating(adev, true);
5363 } else {
5364 cz_enable_gfx_cg_power_gating(adev, false);
5365 cz_enable_gfx_pipeline_power_gating(adev, false);
5142 } 5366 }
5143} 5367}
5144 5368
@@ -5146,21 +5370,42 @@ static int gfx_v8_0_set_powergating_state(void *handle,
5146 enum amd_powergating_state state) 5370 enum amd_powergating_state state)
5147{ 5371{
5148 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 5372 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
5373 bool enable = (state == AMD_PG_STATE_GATE) ? true : false;
5149 5374
5150 if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) 5375 if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
5151 return 0; 5376 return 0;
5152 5377
5153 switch (adev->asic_type) { 5378 switch (adev->asic_type) {
5379 case CHIP_CARRIZO:
5380 case CHIP_STONEY:
5381 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)
5382 cz_update_gfx_cg_power_gating(adev, enable);
5383
5384 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5385 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5386 else
5387 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5388
5389 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5390 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5391 else
5392 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5393 break;
5154 case CHIP_POLARIS11: 5394 case CHIP_POLARIS11:
5155 if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) 5395 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
5156 polaris11_enable_gfx_static_mg_power_gating(adev, 5396 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
5157 state == AMD_PG_STATE_GATE ? true : false); 5397 else
5158 else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) 5398 gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false);
5159 polaris11_enable_gfx_dynamic_mg_power_gating(adev, 5399
5160 state == AMD_PG_STATE_GATE ? true : false); 5400 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable)
5401 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true);
5402 else
5403 gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
5404
5405 if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable)
5406 polaris11_enable_gfx_quick_mg_power_gating(adev, true);
5161 else 5407 else
5162 polaris11_enable_gfx_quick_mg_power_gating(adev, 5408 polaris11_enable_gfx_quick_mg_power_gating(adev, false);
5163 state == AMD_PG_STATE_GATE ? true : false);
5164 break; 5409 break;
5165 default: 5410 default:
5166 break; 5411 break;
@@ -5174,7 +5419,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev,
5174{ 5419{
5175 uint32_t data; 5420 uint32_t data;
5176 5421
5177 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 5422 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
5178 5423
5179 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 5424 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
5180 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 5425 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
@@ -5562,6 +5807,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
5562 WREG32(mmRLC_CGCG_CGLS_CTRL, data); 5807 WREG32(mmRLC_CGCG_CGLS_CTRL, data);
5563 } 5808 }
5564 5809
5810 gfx_v8_0_wait_for_rlc_serdes(adev);
5811
5565 adev->gfx.rlc.funcs->exit_safe_mode(adev); 5812 adev->gfx.rlc.funcs->exit_safe_mode(adev);
5566} 5813}
5567static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, 5814static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev,
@@ -5687,17 +5934,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
5687 unsigned vm_id, bool ctx_switch) 5934 unsigned vm_id, bool ctx_switch)
5688{ 5935{
5689 u32 header, control = 0; 5936 u32 header, control = 0;
5690 u32 next_rptr = ring->wptr + 5;
5691
5692 if (ctx_switch)
5693 next_rptr += 2;
5694
5695 next_rptr += 4;
5696 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5697 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
5698 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
5699 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
5700 amdgpu_ring_write(ring, next_rptr);
5701 5937
5702 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 5938 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
5703 if (ctx_switch) { 5939 if (ctx_switch) {
@@ -5726,23 +5962,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
5726 struct amdgpu_ib *ib, 5962 struct amdgpu_ib *ib,
5727 unsigned vm_id, bool ctx_switch) 5963 unsigned vm_id, bool ctx_switch)
5728{ 5964{
5729 u32 header, control = 0; 5965 u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24);
5730 u32 next_rptr = ring->wptr + 5;
5731
5732 control |= INDIRECT_BUFFER_VALID;
5733
5734 next_rptr += 4;
5735 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5736 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
5737 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
5738 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
5739 amdgpu_ring_write(ring, next_rptr);
5740 5966
5741 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 5967 amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
5742
5743 control |= ib->length_dw | (vm_id << 24);
5744
5745 amdgpu_ring_write(ring, header);
5746 amdgpu_ring_write(ring, 5968 amdgpu_ring_write(ring,
5747#ifdef __BIG_ENDIAN 5969#ifdef __BIG_ENDIAN
5748 (2 << 0) | 5970 (2 << 0) |
@@ -6195,9 +6417,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev)
6195{ 6417{
6196 switch (adev->asic_type) { 6418 switch (adev->asic_type) {
6197 case CHIP_TOPAZ: 6419 case CHIP_TOPAZ:
6198 case CHIP_STONEY:
6199 adev->gfx.rlc.funcs = &iceland_rlc_funcs; 6420 adev->gfx.rlc.funcs = &iceland_rlc_funcs;
6200 break; 6421 break;
6422 case CHIP_STONEY:
6201 case CHIP_CARRIZO: 6423 case CHIP_CARRIZO:
6202 adev->gfx.rlc.funcs = &cz_rlc_funcs; 6424 adev->gfx.rlc.funcs = &cz_rlc_funcs;
6203 break; 6425 break;
@@ -6235,6 +6457,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev)
6235 } 6457 }
6236} 6458}
6237 6459
6460static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev,
6461 u32 bitmap)
6462{
6463 u32 data;
6464
6465 if (!bitmap)
6466 return;
6467
6468 data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT;
6469 data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK;
6470
6471 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data);
6472}
6473
6238static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) 6474static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev)
6239{ 6475{
6240 u32 data, mask; 6476 u32 data, mask;
@@ -6255,16 +6491,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
6255 int i, j, k, counter, active_cu_number = 0; 6491 int i, j, k, counter, active_cu_number = 0;
6256 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; 6492 u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0;
6257 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; 6493 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
6494 unsigned disable_masks[4 * 2];
6258 6495
6259 memset(cu_info, 0, sizeof(*cu_info)); 6496 memset(cu_info, 0, sizeof(*cu_info));
6260 6497
6498 amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2);
6499
6261 mutex_lock(&adev->grbm_idx_mutex); 6500 mutex_lock(&adev->grbm_idx_mutex);
6262 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 6501 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
6263 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 6502 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
6264 mask = 1; 6503 mask = 1;
6265 ao_bitmap = 0; 6504 ao_bitmap = 0;
6266 counter = 0; 6505 counter = 0;
6267 gfx_v8_0_select_se_sh(adev, i, j); 6506 gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff);
6507 if (i < 4 && j < 2)
6508 gfx_v8_0_set_user_cu_inactive_bitmap(
6509 adev, disable_masks[i * 2 + j]);
6268 bitmap = gfx_v8_0_get_cu_active_bitmap(adev); 6510 bitmap = gfx_v8_0_get_cu_active_bitmap(adev);
6269 cu_info->bitmap[i][j] = bitmap; 6511 cu_info->bitmap[i][j] = bitmap;
6270 6512
@@ -6280,7 +6522,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev)
6280 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); 6522 ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8));
6281 } 6523 }
6282 } 6524 }
6283 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 6525 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
6284 mutex_unlock(&adev->grbm_idx_mutex); 6526 mutex_unlock(&adev->grbm_idx_mutex);
6285 6527
6286 cu_info->number = active_cu_number; 6528 cu_info->number = active_cu_number;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
index 16a49f53a2fa..bc82c794312c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h
@@ -26,7 +26,6 @@
26 26
27extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; 27extern const struct amd_ip_funcs gfx_v8_0_ip_funcs;
28 28
29uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev);
30void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); 29void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num);
31 30
32#endif 31#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 1feb6439cb0b..d24a82bd0c7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -39,6 +39,7 @@
39 39
40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); 40static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); 41static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
42static int gmc_v7_0_wait_for_idle(void *handle);
42 43
43MODULE_FIRMWARE("radeon/bonaire_mc.bin"); 44MODULE_FIRMWARE("radeon/bonaire_mc.bin");
44MODULE_FIRMWARE("radeon/hawaii_mc.bin"); 45MODULE_FIRMWARE("radeon/hawaii_mc.bin");
@@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev)
73 } 74 }
74} 75}
75 76
76/** 77static void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
77 * gmc7_mc_wait_for_idle - wait for MC idle callback. 78 struct amdgpu_mode_mc_save *save)
78 *
79 * @adev: amdgpu_device pointer
80 *
81 * Wait for the MC (memory controller) to be idle.
82 * (evergreen+).
83 * Returns 0 if the MC is idle, -1 if not.
84 */
85int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev)
86{
87 unsigned i;
88 u32 tmp;
89
90 for (i = 0; i < adev->usec_timeout; i++) {
91 /* read MC_STATUS */
92 tmp = RREG32(mmSRBM_STATUS) & 0x1F00;
93 if (!tmp)
94 return 0;
95 udelay(1);
96 }
97 return -1;
98}
99
100void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
101 struct amdgpu_mode_mc_save *save)
102{ 79{
103 u32 blackout; 80 u32 blackout;
104 81
105 if (adev->mode_info.num_crtc) 82 if (adev->mode_info.num_crtc)
106 amdgpu_display_stop_mc_access(adev, save); 83 amdgpu_display_stop_mc_access(adev, save);
107 84
108 amdgpu_asic_wait_for_mc_idle(adev); 85 gmc_v7_0_wait_for_idle((void *)adev);
109 86
110 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 87 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
111 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { 88 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
120 udelay(100); 97 udelay(100);
121} 98}
122 99
123void gmc_v7_0_mc_resume(struct amdgpu_device *adev, 100static void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
124 struct amdgpu_mode_mc_save *save) 101 struct amdgpu_mode_mc_save *save)
125{ 102{
126 u32 tmp; 103 u32 tmp;
127 104
@@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
311 amdgpu_display_set_vga_render_state(adev, false); 288 amdgpu_display_set_vga_render_state(adev, false);
312 289
313 gmc_v7_0_mc_stop(adev, &save); 290 gmc_v7_0_mc_stop(adev, &save);
314 if (amdgpu_asic_wait_for_mc_idle(adev)) { 291 if (gmc_v7_0_wait_for_idle((void *)adev)) {
315 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 292 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
316 } 293 }
317 /* Update configuration */ 294 /* Update configuration */
@@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev)
331 WREG32(mmMC_VM_AGP_BASE, 0); 308 WREG32(mmMC_VM_AGP_BASE, 0);
332 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 309 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
333 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); 310 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
334 if (amdgpu_asic_wait_for_mc_idle(adev)) { 311 if (gmc_v7_0_wait_for_idle((void *)adev)) {
335 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 312 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
336 } 313 }
337 gmc_v7_0_mc_resume(adev, &save); 314 gmc_v7_0_mc_resume(adev, &save);
@@ -1137,7 +1114,7 @@ static int gmc_v7_0_soft_reset(void *handle)
1137 1114
1138 if (srbm_soft_reset) { 1115 if (srbm_soft_reset) {
1139 gmc_v7_0_mc_stop(adev, &save); 1116 gmc_v7_0_mc_stop(adev, &save);
1140 if (gmc_v7_0_wait_for_idle(adev)) { 1117 if (gmc_v7_0_wait_for_idle((void *)adev)) {
1141 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); 1118 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1142 } 1119 }
1143 1120
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
index 36fcbbc46ada..0b386b5d2f7a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h
@@ -26,11 +26,4 @@
26 26
27extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; 27extern const struct amd_ip_funcs gmc_v7_0_ip_funcs;
28 28
29/* XXX these shouldn't be exported */
30void gmc_v7_0_mc_stop(struct amdgpu_device *adev,
31 struct amdgpu_mode_mc_save *save);
32void gmc_v7_0_mc_resume(struct amdgpu_device *adev,
33 struct amdgpu_mode_mc_save *save);
34int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev);
35
36#endif 29#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 9945d5bbf1fe..717359d3ba8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -41,6 +41,7 @@
41 41
42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); 42static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev);
43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); 43static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev);
44static int gmc_v8_0_wait_for_idle(void *handle);
44 45
45MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); 46MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
46MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); 47MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
@@ -147,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
147 } 148 }
148} 149}
149 150
150/** 151static void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
151 * gmc8_mc_wait_for_idle - wait for MC idle callback. 152 struct amdgpu_mode_mc_save *save)
152 *
153 * @adev: amdgpu_device pointer
154 *
155 * Wait for the MC (memory controller) to be idle.
156 * (evergreen+).
157 * Returns 0 if the MC is idle, -1 if not.
158 */
159int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev)
160{
161 unsigned i;
162 u32 tmp;
163
164 for (i = 0; i < adev->usec_timeout; i++) {
165 /* read MC_STATUS */
166 tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK |
167 SRBM_STATUS__MCB_BUSY_MASK |
168 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK |
169 SRBM_STATUS__MCC_BUSY_MASK |
170 SRBM_STATUS__MCD_BUSY_MASK |
171 SRBM_STATUS__VMC1_BUSY_MASK);
172 if (!tmp)
173 return 0;
174 udelay(1);
175 }
176 return -1;
177}
178
179void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
180 struct amdgpu_mode_mc_save *save)
181{ 153{
182 u32 blackout; 154 u32 blackout;
183 155
184 if (adev->mode_info.num_crtc) 156 if (adev->mode_info.num_crtc)
185 amdgpu_display_stop_mc_access(adev, save); 157 amdgpu_display_stop_mc_access(adev, save);
186 158
187 amdgpu_asic_wait_for_mc_idle(adev); 159 gmc_v8_0_wait_for_idle(adev);
188 160
189 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); 161 blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL);
190 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { 162 if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) {
@@ -199,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
199 udelay(100); 171 udelay(100);
200} 172}
201 173
202void gmc_v8_0_mc_resume(struct amdgpu_device *adev, 174static void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
203 struct amdgpu_mode_mc_save *save) 175 struct amdgpu_mode_mc_save *save)
204{ 176{
205 u32 tmp; 177 u32 tmp;
206 178
@@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
393 amdgpu_display_set_vga_render_state(adev, false); 365 amdgpu_display_set_vga_render_state(adev, false);
394 366
395 gmc_v8_0_mc_stop(adev, &save); 367 gmc_v8_0_mc_stop(adev, &save);
396 if (amdgpu_asic_wait_for_mc_idle(adev)) { 368 if (gmc_v8_0_wait_for_idle((void *)adev)) {
397 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 369 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
398 } 370 }
399 /* Update configuration */ 371 /* Update configuration */
@@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev)
413 WREG32(mmMC_VM_AGP_BASE, 0); 385 WREG32(mmMC_VM_AGP_BASE, 0);
414 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); 386 WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF);
415 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); 387 WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF);
416 if (amdgpu_asic_wait_for_mc_idle(adev)) { 388 if (gmc_v8_0_wait_for_idle((void *)adev)) {
417 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); 389 dev_warn(adev->dev, "Wait for MC idle timedout !\n");
418 } 390 }
419 gmc_v8_0_mc_resume(adev, &save); 391 gmc_v8_0_mc_resume(adev, &save);
@@ -1140,7 +1112,7 @@ static int gmc_v8_0_soft_reset(void *handle)
1140 1112
1141 if (srbm_soft_reset) { 1113 if (srbm_soft_reset) {
1142 gmc_v8_0_mc_stop(adev, &save); 1114 gmc_v8_0_mc_stop(adev, &save);
1143 if (gmc_v8_0_wait_for_idle(adev)) { 1115 if (gmc_v8_0_wait_for_idle((void *)adev)) {
1144 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); 1116 dev_warn(adev->dev, "Wait for GMC idle timed out !\n");
1145 } 1117 }
1146 1118
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
index 973436086b38..fc5001a8119d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h
@@ -26,11 +26,4 @@
26 26
27extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; 27extern const struct amd_ip_funcs gmc_v8_0_ip_funcs;
28 28
29/* XXX these shouldn't be exported */
30void gmc_v8_0_mc_stop(struct amdgpu_device *adev,
31 struct amdgpu_mode_mc_save *save);
32void gmc_v8_0_mc_resume(struct amdgpu_device *adev,
33 struct amdgpu_mode_mc_save *save);
34int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev);
35
36#endif 29#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
index 825ccd63f2dc..2f078ad6095c 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c
@@ -24,7 +24,7 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include "drmP.h" 25#include "drmP.h"
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "iceland_smumgr.h" 27#include "iceland_smum.h"
28 28
29MODULE_FIRMWARE("amdgpu/topaz_smc.bin"); 29MODULE_FIRMWARE("amdgpu/topaz_smc.bin");
30 30
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
index 52ee08193295..211839913728 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smc.c
@@ -25,7 +25,7 @@
25#include "drmP.h" 25#include "drmP.h"
26#include "amdgpu.h" 26#include "amdgpu.h"
27#include "ppsmc.h" 27#include "ppsmc.h"
28#include "iceland_smumgr.h" 28#include "iceland_smum.h"
29#include "smu_ucode_xfer_vi.h" 29#include "smu_ucode_xfer_vi.h"
30#include "amdgpu_ucode.h" 30#include "amdgpu_ucode.h"
31 31
@@ -211,7 +211,7 @@ static int iceland_send_msg_to_smc_without_waiting(struct amdgpu_device *adev,
211 PPSMC_Msg msg) 211 PPSMC_Msg msg)
212{ 212{
213 if (!iceland_is_smc_ram_running(adev)) 213 if (!iceland_is_smc_ram_running(adev))
214 return -EINVAL;; 214 return -EINVAL;
215 215
216 if (wait_smu_response(adev)) { 216 if (wait_smu_response(adev)) {
217 DRM_ERROR("Failed to send previous message\n"); 217 DRM_ERROR("Failed to send previous message\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
index 1e0769e110fa..5983e3150cc5 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_smumgr.h
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_smum.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef ICELAND_SMUMGR_H 24#ifndef ICELAND_SMUM_H
25#define ICELAND_SMUMGR_H 25#define ICELAND_SMUM_H
26 26
27#include "ppsmc.h" 27#include "ppsmc.h"
28 28
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index a789a863d677..a845e883f5fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -191,6 +191,7 @@ static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
191 vid_mapping_table->num_entries = i; 191 vid_mapping_table->num_entries = i;
192} 192}
193 193
194#if 0
194static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = 195static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
195{ 196{
196 { 0, 4, 1 }, 197 { 0, 4, 1 },
@@ -289,6 +290,7 @@ static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
289{ 290{
290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } 291 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
291}; 292};
293#endif
292 294
293static const struct kv_pt_config_reg didt_config_kv[] = 295static const struct kv_pt_config_reg didt_config_kv[] =
294{ 296{
@@ -507,19 +509,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
507 pi->caps_db_ramping || 509 pi->caps_db_ramping ||
508 pi->caps_td_ramping || 510 pi->caps_td_ramping ||
509 pi->caps_tcp_ramping) { 511 pi->caps_tcp_ramping) {
510 gfx_v7_0_enter_rlc_safe_mode(adev); 512 adev->gfx.rlc.funcs->enter_safe_mode(adev);
511 513
512 if (enable) { 514 if (enable) {
513 ret = kv_program_pt_config_registers(adev, didt_config_kv); 515 ret = kv_program_pt_config_registers(adev, didt_config_kv);
514 if (ret) { 516 if (ret) {
515 gfx_v7_0_exit_rlc_safe_mode(adev); 517 adev->gfx.rlc.funcs->exit_safe_mode(adev);
516 return ret; 518 return ret;
517 } 519 }
518 } 520 }
519 521
520 kv_do_enable_didt(adev, enable); 522 kv_do_enable_didt(adev, enable);
521 523
522 gfx_v7_0_exit_rlc_safe_mode(adev); 524 adev->gfx.rlc.funcs->exit_safe_mode(adev);
523 } 525 }
524 526
525 return 0; 527 return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/ppsmc.h b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
index 7837f2ecc357..8463245f424f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ppsmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/ppsmc.h
@@ -90,7 +90,9 @@ typedef uint8_t PPSMC_Result;
90#define PPSMC_StartFanControl ((uint8_t)0x5B) 90#define PPSMC_StartFanControl ((uint8_t)0x5B)
91#define PPSMC_StopFanControl ((uint8_t)0x5C) 91#define PPSMC_StopFanControl ((uint8_t)0x5C)
92#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) 92#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
93#define PPSMC_NoDisplay ((uint8_t)0x5D)
93#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) 94#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
95#define PPSMC_HasDisplay ((uint8_t)0x5E)
94#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) 96#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
95#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) 97#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61)
96#define PPSMC_MSG_EnableULV ((uint8_t)0x62) 98#define PPSMC_MSG_EnableULV ((uint8_t)0x62)
@@ -108,6 +110,7 @@ typedef uint8_t PPSMC_Result;
108#define PPSMC_MSG_DisableDTE ((uint8_t)0x88) 110#define PPSMC_MSG_DisableDTE ((uint8_t)0x88)
109#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) 111#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96)
110#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) 112#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97)
113#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149)
111 114
112/* CI/KV/KB */ 115/* CI/KV/KB */
113#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) 116#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D)
@@ -161,6 +164,7 @@ typedef uint8_t PPSMC_Result;
161#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) 164#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
162#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) 165#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
163#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) 166#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
167#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205)
164 168
165#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) 169#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
166#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) 170#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index b556bd0a8797..1351c7e834a2 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -255,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring,
255 unsigned vm_id, bool ctx_switch) 255 unsigned vm_id, bool ctx_switch)
256{ 256{
257 u32 vmid = vm_id & 0xf; 257 u32 vmid = vm_id & 0xf;
258 u32 next_rptr = ring->wptr + 5;
259
260 while ((next_rptr & 7) != 2)
261 next_rptr++;
262
263 next_rptr += 6;
264
265 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
266 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
267 amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
268 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
269 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
270 amdgpu_ring_write(ring, next_rptr);
271 258
272 /* IB packet must end on a 8 DW boundary */ 259 /* IB packet must end on a 8 DW boundary */
273 sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); 260 sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -406,7 +393,7 @@ static void sdma_v2_4_enable(struct amdgpu_device *adev, bool enable)
406 u32 f32_cntl; 393 u32 f32_cntl;
407 int i; 394 int i;
408 395
409 if (enable == false) { 396 if (!enable) {
410 sdma_v2_4_gfx_stop(adev); 397 sdma_v2_4_gfx_stop(adev);
411 sdma_v2_4_rlc_stop(adev); 398 sdma_v2_4_rlc_stop(adev);
412 } 399 }
@@ -580,19 +567,21 @@ static int sdma_v2_4_start(struct amdgpu_device *adev)
580{ 567{
581 int r; 568 int r;
582 569
583 if (!adev->firmware.smu_load) { 570 if (!adev->pp_enabled) {
584 r = sdma_v2_4_load_microcode(adev); 571 if (!adev->firmware.smu_load) {
585 if (r) 572 r = sdma_v2_4_load_microcode(adev);
586 return r; 573 if (r)
587 } else { 574 return r;
588 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 575 } else {
589 AMDGPU_UCODE_ID_SDMA0); 576 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
590 if (r) 577 AMDGPU_UCODE_ID_SDMA0);
591 return -EINVAL; 578 if (r)
592 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev, 579 return -EINVAL;
593 AMDGPU_UCODE_ID_SDMA1); 580 r = adev->smu.smumgr_funcs->check_fw_load_finish(adev,
594 if (r) 581 AMDGPU_UCODE_ID_SDMA1);
595 return -EINVAL; 582 if (r)
583 return -EINVAL;
584 }
596 } 585 }
597 586
598 /* halt the engine before programing */ 587 /* halt the engine before programing */
@@ -679,20 +668,19 @@ static int sdma_v2_4_ring_test_ring(struct amdgpu_ring *ring)
679 * Test a simple IB in the DMA ring (VI). 668 * Test a simple IB in the DMA ring (VI).
680 * Returns 0 on success, error on failure. 669 * Returns 0 on success, error on failure.
681 */ 670 */
682static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring) 671static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
683{ 672{
684 struct amdgpu_device *adev = ring->adev; 673 struct amdgpu_device *adev = ring->adev;
685 struct amdgpu_ib ib; 674 struct amdgpu_ib ib;
686 struct fence *f = NULL; 675 struct fence *f = NULL;
687 unsigned i;
688 unsigned index; 676 unsigned index;
689 int r;
690 u32 tmp = 0; 677 u32 tmp = 0;
691 u64 gpu_addr; 678 u64 gpu_addr;
679 long r;
692 680
693 r = amdgpu_wb_get(adev, &index); 681 r = amdgpu_wb_get(adev, &index);
694 if (r) { 682 if (r) {
695 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 683 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
696 return r; 684 return r;
697 } 685 }
698 686
@@ -702,7 +690,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
702 memset(&ib, 0, sizeof(ib)); 690 memset(&ib, 0, sizeof(ib));
703 r = amdgpu_ib_get(adev, NULL, 256, &ib); 691 r = amdgpu_ib_get(adev, NULL, 256, &ib);
704 if (r) { 692 if (r) {
705 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 693 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
706 goto err0; 694 goto err0;
707 } 695 }
708 696
@@ -721,28 +709,25 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
721 if (r) 709 if (r)
722 goto err1; 710 goto err1;
723 711
724 r = fence_wait(f, false); 712 r = fence_wait_timeout(f, false, timeout);
725 if (r) { 713 if (r == 0) {
726 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 714 DRM_ERROR("amdgpu: IB test timed out\n");
715 r = -ETIMEDOUT;
727 goto err1; 716 goto err1;
728 } 717 } else if (r) {
729 for (i = 0; i < adev->usec_timeout; i++) { 718 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
730 tmp = le32_to_cpu(adev->wb.wb[index]);
731 if (tmp == 0xDEADBEEF)
732 break;
733 DRM_UDELAY(1);
734 }
735 if (i < adev->usec_timeout) {
736 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
737 ring->idx, i);
738 goto err1; 719 goto err1;
720 }
721 tmp = le32_to_cpu(adev->wb.wb[index]);
722 if (tmp == 0xDEADBEEF) {
723 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
724 r = 0;
739 } else { 725 } else {
740 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 726 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
741 r = -EINVAL; 727 r = -EINVAL;
742 } 728 }
743 729
744err1: 730err1:
745 fence_put(f);
746 amdgpu_ib_free(adev, &ib, NULL); 731 amdgpu_ib_free(adev, &ib, NULL);
747 fence_put(f); 732 fence_put(f);
748err0: 733err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 532ea88da66a..653ce5ed55ae 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -415,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring,
415 unsigned vm_id, bool ctx_switch) 415 unsigned vm_id, bool ctx_switch)
416{ 416{
417 u32 vmid = vm_id & 0xf; 417 u32 vmid = vm_id & 0xf;
418 u32 next_rptr = ring->wptr + 5;
419
420 while ((next_rptr & 7) != 2)
421 next_rptr++;
422 next_rptr += 6;
423
424 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) |
425 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR));
426 amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc);
427 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr));
428 amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
429 amdgpu_ring_write(ring, next_rptr);
430 418
431 /* IB packet must end on a 8 DW boundary */ 419 /* IB packet must end on a 8 DW boundary */
432 sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); 420 sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8);
@@ -616,7 +604,7 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
616 u32 f32_cntl; 604 u32 f32_cntl;
617 int i; 605 int i;
618 606
619 if (enable == false) { 607 if (!enable) {
620 sdma_v3_0_gfx_stop(adev); 608 sdma_v3_0_gfx_stop(adev);
621 sdma_v3_0_rlc_stop(adev); 609 sdma_v3_0_rlc_stop(adev);
622 } 610 }
@@ -908,20 +896,19 @@ static int sdma_v3_0_ring_test_ring(struct amdgpu_ring *ring)
908 * Test a simple IB in the DMA ring (VI). 896 * Test a simple IB in the DMA ring (VI).
909 * Returns 0 on success, error on failure. 897 * Returns 0 on success, error on failure.
910 */ 898 */
911static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring) 899static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
912{ 900{
913 struct amdgpu_device *adev = ring->adev; 901 struct amdgpu_device *adev = ring->adev;
914 struct amdgpu_ib ib; 902 struct amdgpu_ib ib;
915 struct fence *f = NULL; 903 struct fence *f = NULL;
916 unsigned i;
917 unsigned index; 904 unsigned index;
918 int r;
919 u32 tmp = 0; 905 u32 tmp = 0;
920 u64 gpu_addr; 906 u64 gpu_addr;
907 long r;
921 908
922 r = amdgpu_wb_get(adev, &index); 909 r = amdgpu_wb_get(adev, &index);
923 if (r) { 910 if (r) {
924 dev_err(adev->dev, "(%d) failed to allocate wb slot\n", r); 911 dev_err(adev->dev, "(%ld) failed to allocate wb slot\n", r);
925 return r; 912 return r;
926 } 913 }
927 914
@@ -931,7 +918,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
931 memset(&ib, 0, sizeof(ib)); 918 memset(&ib, 0, sizeof(ib));
932 r = amdgpu_ib_get(adev, NULL, 256, &ib); 919 r = amdgpu_ib_get(adev, NULL, 256, &ib);
933 if (r) { 920 if (r) {
934 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r); 921 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
935 goto err0; 922 goto err0;
936 } 923 }
937 924
@@ -950,27 +937,24 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
950 if (r) 937 if (r)
951 goto err1; 938 goto err1;
952 939
953 r = fence_wait(f, false); 940 r = fence_wait_timeout(f, false, timeout);
954 if (r) { 941 if (r == 0) {
955 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 942 DRM_ERROR("amdgpu: IB test timed out\n");
943 r = -ETIMEDOUT;
956 goto err1; 944 goto err1;
957 } 945 } else if (r < 0) {
958 for (i = 0; i < adev->usec_timeout; i++) { 946 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
959 tmp = le32_to_cpu(adev->wb.wb[index]);
960 if (tmp == 0xDEADBEEF)
961 break;
962 DRM_UDELAY(1);
963 }
964 if (i < adev->usec_timeout) {
965 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
966 ring->idx, i);
967 goto err1; 947 goto err1;
948 }
949 tmp = le32_to_cpu(adev->wb.wb[index]);
950 if (tmp == 0xDEADBEEF) {
951 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
952 r = 0;
968 } else { 953 } else {
969 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 954 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp);
970 r = -EINVAL; 955 r = -EINVAL;
971 } 956 }
972err1: 957err1:
973 fence_put(f);
974 amdgpu_ib_free(adev, &ib, NULL); 958 amdgpu_ib_free(adev, &ib, NULL);
975 fence_put(f); 959 fence_put(f);
976err0: 960err0:
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
index 083893dd68c0..940de1836f8f 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_smc.c
@@ -173,7 +173,7 @@ static int tonga_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg)
173{ 173{
174 if (!tonga_is_smc_ram_running(adev)) 174 if (!tonga_is_smc_ram_running(adev))
175 { 175 {
176 return -EINVAL;; 176 return -EINVAL;
177 } 177 }
178 178
179 if (wait_smu_response(adev)) { 179 if (wait_smu_response(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index f07551476a70..132e613ed674 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -34,6 +34,8 @@
34#include "oss/oss_2_0_d.h" 34#include "oss/oss_2_0_d.h"
35#include "oss/oss_2_0_sh_mask.h" 35#include "oss/oss_2_0_sh_mask.h"
36 36
37#include "bif/bif_4_1_d.h"
38
37static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); 39static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
38static void uvd_v4_2_init_cg(struct amdgpu_device *adev); 40static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
39static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); 41static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
@@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
439} 441}
440 442
441/** 443/**
444 * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush
445 *
446 * @ring: amdgpu_ring pointer
447 *
448 * Emits an hdp flush.
449 */
450static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring)
451{
452 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
453 amdgpu_ring_write(ring, 0);
454}
455
456/**
457 * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate
458 *
459 * @ring: amdgpu_ring pointer
460 *
461 * Emits an hdp invalidate.
462 */
463static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
464{
465 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
466 amdgpu_ring_write(ring, 1);
467}
468
469/**
442 * uvd_v4_2_ring_test_ring - register write test 470 * uvd_v4_2_ring_test_ring - register write test
443 * 471 *
444 * @ring: amdgpu_ring pointer 472 * @ring: amdgpu_ring pointer
@@ -499,49 +527,6 @@ static void uvd_v4_2_ring_emit_ib(struct amdgpu_ring *ring,
499} 527}
500 528
501/** 529/**
502 * uvd_v4_2_ring_test_ib - test ib execution
503 *
504 * @ring: amdgpu_ring pointer
505 *
506 * Test if we can successfully execute an IB
507 */
508static int uvd_v4_2_ring_test_ib(struct amdgpu_ring *ring)
509{
510 struct amdgpu_device *adev = ring->adev;
511 struct fence *fence = NULL;
512 int r;
513
514 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
515 if (r) {
516 DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
517 return r;
518 }
519
520 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
521 if (r) {
522 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
523 goto error;
524 }
525
526 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
527 if (r) {
528 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
529 goto error;
530 }
531
532 r = fence_wait(fence, false);
533 if (r) {
534 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
535 goto error;
536 }
537 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
538error:
539 fence_put(fence);
540 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
541 return r;
542}
543
544/**
545 * uvd_v4_2_mc_resume - memory controller programming 530 * uvd_v4_2_mc_resume - memory controller programming
546 * 531 *
547 * @adev: amdgpu_device pointer 532 * @adev: amdgpu_device pointer
@@ -763,10 +748,14 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
763 .parse_cs = amdgpu_uvd_ring_parse_cs, 748 .parse_cs = amdgpu_uvd_ring_parse_cs,
764 .emit_ib = uvd_v4_2_ring_emit_ib, 749 .emit_ib = uvd_v4_2_ring_emit_ib,
765 .emit_fence = uvd_v4_2_ring_emit_fence, 750 .emit_fence = uvd_v4_2_ring_emit_fence,
751 .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush,
752 .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate,
766 .test_ring = uvd_v4_2_ring_test_ring, 753 .test_ring = uvd_v4_2_ring_test_ring,
767 .test_ib = uvd_v4_2_ring_test_ib, 754 .test_ib = amdgpu_uvd_ring_test_ib,
768 .insert_nop = amdgpu_ring_insert_nop, 755 .insert_nop = amdgpu_ring_insert_nop,
769 .pad_ib = amdgpu_ring_generic_pad_ib, 756 .pad_ib = amdgpu_ring_generic_pad_ib,
757 .begin_use = amdgpu_uvd_ring_begin_use,
758 .end_use = amdgpu_uvd_ring_end_use,
770}; 759};
771 760
772static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev) 761static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index e0a76a883d46..101de136ba63 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -31,6 +31,7 @@
31#include "uvd/uvd_5_0_sh_mask.h" 31#include "uvd/uvd_5_0_sh_mask.h"
32#include "oss/oss_2_0_d.h" 32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h" 33#include "oss/oss_2_0_sh_mask.h"
34#include "bif/bif_5_0_d.h"
34#include "vi.h" 35#include "vi.h"
35 36
36static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); 37static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
489} 490}
490 491
491/** 492/**
493 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
494 *
495 * @ring: amdgpu_ring pointer
496 *
497 * Emits an hdp flush.
498 */
499static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
500{
501 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
502 amdgpu_ring_write(ring, 0);
503}
504
505/**
506 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
507 *
508 * @ring: amdgpu_ring pointer
509 *
510 * Emits an hdp invalidate.
511 */
512static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
513{
514 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
515 amdgpu_ring_write(ring, 1);
516}
517
518/**
492 * uvd_v5_0_ring_test_ring - register write test 519 * uvd_v5_0_ring_test_ring - register write test
493 * 520 *
494 * @ring: amdgpu_ring pointer 521 * @ring: amdgpu_ring pointer
@@ -550,49 +577,6 @@ static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
550 amdgpu_ring_write(ring, ib->length_dw); 577 amdgpu_ring_write(ring, ib->length_dw);
551} 578}
552 579
553/**
554 * uvd_v5_0_ring_test_ib - test ib execution
555 *
556 * @ring: amdgpu_ring pointer
557 *
558 * Test if we can successfully execute an IB
559 */
560static int uvd_v5_0_ring_test_ib(struct amdgpu_ring *ring)
561{
562 struct amdgpu_device *adev = ring->adev;
563 struct fence *fence = NULL;
564 int r;
565
566 r = amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
567 if (r) {
568 DRM_ERROR("amdgpu: failed to raise UVD clocks (%d).\n", r);
569 return r;
570 }
571
572 r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
573 if (r) {
574 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r);
575 goto error;
576 }
577
578 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
579 if (r) {
580 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r);
581 goto error;
582 }
583
584 r = fence_wait(fence, false);
585 if (r) {
586 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r);
587 goto error;
588 }
589 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
590error:
591 fence_put(fence);
592 amdgpu_asic_set_uvd_clocks(adev, 0, 0);
593 return r;
594}
595
596static bool uvd_v5_0_is_idle(void *handle) 580static bool uvd_v5_0_is_idle(void *handle)
597{ 581{
598 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 582 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -815,10 +799,14 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
815 .parse_cs = amdgpu_uvd_ring_parse_cs, 799 .parse_cs = amdgpu_uvd_ring_parse_cs,
816 .emit_ib = uvd_v5_0_ring_emit_ib, 800 .emit_ib = uvd_v5_0_ring_emit_ib,
817 .emit_fence = uvd_v5_0_ring_emit_fence, 801 .emit_fence = uvd_v5_0_ring_emit_fence,
802 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
803 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
818 .test_ring = uvd_v5_0_ring_test_ring, 804 .test_ring = uvd_v5_0_ring_test_ring,
819 .test_ib = uvd_v5_0_ring_test_ib, 805 .test_ib = amdgpu_uvd_ring_test_ib,
820 .insert_nop = amdgpu_ring_insert_nop, 806 .insert_nop = amdgpu_ring_insert_nop,
821 .pad_ib = amdgpu_ring_generic_pad_ib, 807 .pad_ib = amdgpu_ring_generic_pad_ib,
808 .begin_use = amdgpu_uvd_ring_begin_use,
809 .end_use = amdgpu_uvd_ring_end_use,
822}; 810};
823 811
824static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev) 812static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index c9929d665c01..7f21102bfb99 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -33,6 +33,8 @@
33#include "oss/oss_2_0_sh_mask.h" 33#include "oss/oss_2_0_sh_mask.h"
34#include "smu/smu_7_1_3_d.h" 34#include "smu/smu_7_1_3_d.h"
35#include "smu/smu_7_1_3_sh_mask.h" 35#include "smu/smu_7_1_3_sh_mask.h"
36#include "bif/bif_5_1_d.h"
37#include "gmc/gmc_8_1_d.h"
36#include "vi.h" 38#include "vi.h"
37 39
38static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); 40static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -385,8 +387,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
385 uint32_t mp_swap_cntl; 387 uint32_t mp_swap_cntl;
386 int i, j, r; 388 int i, j, r;
387 389
388 /*disable DPG */ 390 /* disable DPG */
389 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); 391 WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
390 392
391 /* disable byte swapping */ 393 /* disable byte swapping */
392 lmi_swap_cntl = 0; 394 lmi_swap_cntl = 0;
@@ -405,17 +407,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
405 } 407 }
406 408
407 /* disable interupt */ 409 /* disable interupt */
408 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); 410 WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK);
409 411
410 /* stall UMC and register bus before resetting VCPU */ 412 /* stall UMC and register bus before resetting VCPU */
411 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); 413 WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
412 mdelay(1); 414 mdelay(1);
413 415
414 /* put LMI, VCPU, RBC etc... into reset */ 416 /* put LMI, VCPU, RBC etc... into reset */
415 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | 417 WREG32(mmUVD_SOFT_RESET,
416 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | 418 UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
417 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | 419 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
418 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | 420 UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
421 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK |
422 UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
423 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK |
424 UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
419 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); 425 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
420 mdelay(5); 426 mdelay(5);
421 427
@@ -424,8 +430,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
424 mdelay(5); 430 mdelay(5);
425 431
426 /* initialize UVD memory controller */ 432 /* initialize UVD memory controller */
427 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | 433 WREG32(mmUVD_LMI_CTRL,
428 (1 << 21) | (1 << 9) | (1 << 20)); 434 (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) |
435 UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK |
436 UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK |
437 UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK |
438 UVD_LMI_CTRL__REQ_MODE_MASK |
439 UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK);
429 440
430#ifdef __BIG_ENDIAN 441#ifdef __BIG_ENDIAN
431 /* swap (8 in 32) RB and IB */ 442 /* swap (8 in 32) RB and IB */
@@ -447,10 +458,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
447 mdelay(5); 458 mdelay(5);
448 459
449 /* enable VCPU clock */ 460 /* enable VCPU clock */
450 WREG32(mmUVD_VCPU_CNTL, 1 << 9); 461 WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK);
451 462
452 /* enable UMC */ 463 /* enable UMC */
453 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); 464 WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK);
454 465
455 /* boot up the VCPU */ 466 /* boot up the VCPU */
456 WREG32(mmUVD_SOFT_RESET, 0); 467 WREG32(mmUVD_SOFT_RESET, 0);
@@ -484,10 +495,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev)
484 return r; 495 return r;
485 } 496 }
486 /* enable master interrupt */ 497 /* enable master interrupt */
487 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); 498 WREG32_P(mmUVD_MASTINT_EN,
499 (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK),
500 ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK));
488 501
489 /* clear the bit 4 of UVD_STATUS */ 502 /* clear the bit 4 of UVD_STATUS */
490 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); 503 WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT));
491 504
492 rb_bufsz = order_base_2(ring->ring_size); 505 rb_bufsz = order_base_2(ring->ring_size);
493 tmp = 0; 506 tmp = 0;
@@ -581,6 +594,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq
581} 594}
582 595
583/** 596/**
597 * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush
598 *
599 * @ring: amdgpu_ring pointer
600 *
601 * Emits an hdp flush.
602 */
603static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
604{
605 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
606 amdgpu_ring_write(ring, 0);
607}
608
609/**
610 * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate
611 *
612 * @ring: amdgpu_ring pointer
613 *
614 * Emits an hdp invalidate.
615 */
616static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
617{
618 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
619 amdgpu_ring_write(ring, 1);
620}
621
622/**
584 * uvd_v6_0_ring_test_ring - register write test 623 * uvd_v6_0_ring_test_ring - register write test
585 * 624 *
586 * @ring: amdgpu_ring pointer 625 * @ring: amdgpu_ring pointer
@@ -634,6 +673,9 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
634 struct amdgpu_ib *ib, 673 struct amdgpu_ib *ib,
635 unsigned vm_id, bool ctx_switch) 674 unsigned vm_id, bool ctx_switch)
636{ 675{
676 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_VMID, 0));
677 amdgpu_ring_write(ring, vm_id);
678
637 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0)); 679 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
638 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr)); 680 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
639 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0)); 681 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
@@ -642,39 +684,55 @@ static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring *ring,
642 amdgpu_ring_write(ring, ib->length_dw); 684 amdgpu_ring_write(ring, ib->length_dw);
643} 685}
644 686
645/** 687static void uvd_v6_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
646 * uvd_v6_0_ring_test_ib - test ib execution 688 unsigned vm_id, uint64_t pd_addr)
647 *
648 * @ring: amdgpu_ring pointer
649 *
650 * Test if we can successfully execute an IB
651 */
652static int uvd_v6_0_ring_test_ib(struct amdgpu_ring *ring)
653{ 689{
654 struct fence *fence = NULL; 690 uint32_t reg;
655 int r;
656 691
657 r = amdgpu_uvd_get_create_msg(ring, 1, NULL); 692 if (vm_id < 8)
658 if (r) { 693 reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vm_id;
659 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r); 694 else
660 goto error; 695 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vm_id - 8;
661 }
662 696
663 r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence); 697 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
664 if (r) { 698 amdgpu_ring_write(ring, reg << 2);
665 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r); 699 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
666 goto error; 700 amdgpu_ring_write(ring, pd_addr >> 12);
667 } 701 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
702 amdgpu_ring_write(ring, 0x8);
668 703
669 r = fence_wait(fence, false); 704 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
670 if (r) { 705 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
671 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r); 706 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
672 goto error; 707 amdgpu_ring_write(ring, 1 << vm_id);
673 } 708 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
674 DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 709 amdgpu_ring_write(ring, 0x8);
675error: 710
676 fence_put(fence); 711 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
677 return r; 712 amdgpu_ring_write(ring, mmVM_INVALIDATE_REQUEST << 2);
713 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
714 amdgpu_ring_write(ring, 0);
715 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
716 amdgpu_ring_write(ring, 1 << vm_id); /* mask */
717 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
718 amdgpu_ring_write(ring, 0xC);
719}
720
721static void uvd_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
722{
723 uint32_t seq = ring->fence_drv.sync_seq;
724 uint64_t addr = ring->fence_drv.gpu_addr;
725
726 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
727 amdgpu_ring_write(ring, lower_32_bits(addr));
728 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
729 amdgpu_ring_write(ring, upper_32_bits(addr));
730 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH8, 0));
731 amdgpu_ring_write(ring, 0xffffffff); /* mask */
732 amdgpu_ring_write(ring, PACKET0(mmUVD_GP_SCRATCH9, 0));
733 amdgpu_ring_write(ring, seq);
734 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
735 amdgpu_ring_write(ring, 0xE);
678} 736}
679 737
680static bool uvd_v6_0_is_idle(void *handle) 738static bool uvd_v6_0_is_idle(void *handle)
@@ -847,7 +905,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle,
847 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 905 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
848 static int curstate = -1; 906 static int curstate = -1;
849 907
850 if (adev->asic_type == CHIP_FIJI) 908 if (adev->asic_type == CHIP_FIJI ||
909 adev->asic_type == CHIP_POLARIS10)
851 uvd_v6_set_bypass_mode(adev, enable); 910 uvd_v6_set_bypass_mode(adev, enable);
852 911
853 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) 912 if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
@@ -912,22 +971,51 @@ const struct amd_ip_funcs uvd_v6_0_ip_funcs = {
912 .set_powergating_state = uvd_v6_0_set_powergating_state, 971 .set_powergating_state = uvd_v6_0_set_powergating_state,
913}; 972};
914 973
915static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { 974static const struct amdgpu_ring_funcs uvd_v6_0_ring_phys_funcs = {
916 .get_rptr = uvd_v6_0_ring_get_rptr, 975 .get_rptr = uvd_v6_0_ring_get_rptr,
917 .get_wptr = uvd_v6_0_ring_get_wptr, 976 .get_wptr = uvd_v6_0_ring_get_wptr,
918 .set_wptr = uvd_v6_0_ring_set_wptr, 977 .set_wptr = uvd_v6_0_ring_set_wptr,
919 .parse_cs = amdgpu_uvd_ring_parse_cs, 978 .parse_cs = amdgpu_uvd_ring_parse_cs,
920 .emit_ib = uvd_v6_0_ring_emit_ib, 979 .emit_ib = uvd_v6_0_ring_emit_ib,
921 .emit_fence = uvd_v6_0_ring_emit_fence, 980 .emit_fence = uvd_v6_0_ring_emit_fence,
981 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
982 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
983 .test_ring = uvd_v6_0_ring_test_ring,
984 .test_ib = amdgpu_uvd_ring_test_ib,
985 .insert_nop = amdgpu_ring_insert_nop,
986 .pad_ib = amdgpu_ring_generic_pad_ib,
987 .begin_use = amdgpu_uvd_ring_begin_use,
988 .end_use = amdgpu_uvd_ring_end_use,
989};
990
991static const struct amdgpu_ring_funcs uvd_v6_0_ring_vm_funcs = {
992 .get_rptr = uvd_v6_0_ring_get_rptr,
993 .get_wptr = uvd_v6_0_ring_get_wptr,
994 .set_wptr = uvd_v6_0_ring_set_wptr,
995 .parse_cs = NULL,
996 .emit_ib = uvd_v6_0_ring_emit_ib,
997 .emit_fence = uvd_v6_0_ring_emit_fence,
998 .emit_vm_flush = uvd_v6_0_ring_emit_vm_flush,
999 .emit_pipeline_sync = uvd_v6_0_ring_emit_pipeline_sync,
1000 .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush,
1001 .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate,
922 .test_ring = uvd_v6_0_ring_test_ring, 1002 .test_ring = uvd_v6_0_ring_test_ring,
923 .test_ib = uvd_v6_0_ring_test_ib, 1003 .test_ib = amdgpu_uvd_ring_test_ib,
924 .insert_nop = amdgpu_ring_insert_nop, 1004 .insert_nop = amdgpu_ring_insert_nop,
925 .pad_ib = amdgpu_ring_generic_pad_ib, 1005 .pad_ib = amdgpu_ring_generic_pad_ib,
1006 .begin_use = amdgpu_uvd_ring_begin_use,
1007 .end_use = amdgpu_uvd_ring_end_use,
926}; 1008};
927 1009
928static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev) 1010static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev)
929{ 1011{
930 adev->uvd.ring.funcs = &uvd_v6_0_ring_funcs; 1012 if (adev->asic_type >= CHIP_POLARIS10) {
1013 adev->uvd.ring.funcs = &uvd_v6_0_ring_vm_funcs;
1014 DRM_INFO("UVD is enabled in VM mode\n");
1015 } else {
1016 adev->uvd.ring.funcs = &uvd_v6_0_ring_phys_funcs;
1017 DRM_INFO("UVD is enabled in physical mode\n");
1018 }
931} 1019}
932 1020
933static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = { 1021static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 45d92aceb485..80a37a602181 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -594,6 +594,8 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
594 .test_ib = amdgpu_vce_ring_test_ib, 594 .test_ib = amdgpu_vce_ring_test_ib,
595 .insert_nop = amdgpu_ring_insert_nop, 595 .insert_nop = amdgpu_ring_insert_nop,
596 .pad_ib = amdgpu_ring_generic_pad_ib, 596 .pad_ib = amdgpu_ring_generic_pad_ib,
597 .begin_use = amdgpu_vce_ring_begin_use,
598 .end_use = amdgpu_vce_ring_end_use,
597}; 599};
598 600
599static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev) 601static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 30e8099e94c5..c271abffd8dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,6 +43,7 @@
43#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616 43#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
44#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617 44#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
45#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618 45#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
46#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
46 47
47#define VCE_V3_0_FW_SIZE (384 * 1024) 48#define VCE_V3_0_FW_SIZE (384 * 1024)
48#define VCE_V3_0_STACK_SIZE (64 * 1024) 49#define VCE_V3_0_STACK_SIZE (64 * 1024)
@@ -51,6 +52,7 @@
51static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx); 52static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
52static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev); 53static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
53static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev); 54static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
55static int vce_v3_0_wait_for_idle(void *handle);
54 56
55/** 57/**
56 * vce_v3_0_ring_get_rptr - get read pointer 58 * vce_v3_0_ring_get_rptr - get read pointer
@@ -205,6 +207,32 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
205 vce_v3_0_override_vce_clock_gating(adev, false); 207 vce_v3_0_override_vce_clock_gating(adev, false);
206} 208}
207 209
210static int vce_v3_0_firmware_loaded(struct amdgpu_device *adev)
211{
212 int i, j;
213
214 for (i = 0; i < 10; ++i) {
215 for (j = 0; j < 100; ++j) {
216 uint32_t status = RREG32(mmVCE_STATUS);
217
218 if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
219 return 0;
220 mdelay(10);
221 }
222
223 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
224 WREG32_P(mmVCE_SOFT_RESET,
225 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
226 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
227 mdelay(10);
228 WREG32_P(mmVCE_SOFT_RESET, 0,
229 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
230 mdelay(10);
231 }
232
233 return -ETIMEDOUT;
234}
235
208/** 236/**
209 * vce_v3_0_start - start VCE block 237 * vce_v3_0_start - start VCE block
210 * 238 *
@@ -215,11 +243,24 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
215static int vce_v3_0_start(struct amdgpu_device *adev) 243static int vce_v3_0_start(struct amdgpu_device *adev)
216{ 244{
217 struct amdgpu_ring *ring; 245 struct amdgpu_ring *ring;
218 int idx, i, j, r; 246 int idx, r;
247
248 ring = &adev->vce.ring[0];
249 WREG32(mmVCE_RB_RPTR, ring->wptr);
250 WREG32(mmVCE_RB_WPTR, ring->wptr);
251 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
252 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
253 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
254
255 ring = &adev->vce.ring[1];
256 WREG32(mmVCE_RB_RPTR2, ring->wptr);
257 WREG32(mmVCE_RB_WPTR2, ring->wptr);
258 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
259 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
260 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
219 261
220 mutex_lock(&adev->grbm_idx_mutex); 262 mutex_lock(&adev->grbm_idx_mutex);
221 for (idx = 0; idx < 2; ++idx) { 263 for (idx = 0; idx < 2; ++idx) {
222
223 if (adev->vce.harvest_config & (1 << idx)) 264 if (adev->vce.harvest_config & (1 << idx))
224 continue; 265 continue;
225 266
@@ -233,48 +274,24 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
233 274
234 vce_v3_0_mc_resume(adev, idx); 275 vce_v3_0_mc_resume(adev, idx);
235 276
236 /* set BUSY flag */ 277 WREG32_P(mmVCE_STATUS, VCE_STATUS__JOB_BUSY_MASK,
237 WREG32_P(mmVCE_STATUS, 1, ~1); 278 ~VCE_STATUS__JOB_BUSY_MASK);
279
238 if (adev->asic_type >= CHIP_STONEY) 280 if (adev->asic_type >= CHIP_STONEY)
239 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001); 281 WREG32_P(mmVCE_VCPU_CNTL, 1, ~0x200001);
240 else 282 else
241 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK, 283 WREG32_P(mmVCE_VCPU_CNTL, VCE_VCPU_CNTL__CLK_EN_MASK,
242 ~VCE_VCPU_CNTL__CLK_EN_MASK); 284 ~VCE_VCPU_CNTL__CLK_EN_MASK);
243 285
244 WREG32_P(mmVCE_SOFT_RESET,
245 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
246 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
247
248 mdelay(100);
249
250 WREG32_P(mmVCE_SOFT_RESET, 0, 286 WREG32_P(mmVCE_SOFT_RESET, 0,
251 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK); 287 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
252 288
253 for (i = 0; i < 10; ++i) { 289 mdelay(100);
254 uint32_t status; 290
255 for (j = 0; j < 100; ++j) { 291 r = vce_v3_0_firmware_loaded(adev);
256 status = RREG32(mmVCE_STATUS);
257 if (status & 2)
258 break;
259 mdelay(10);
260 }
261 r = 0;
262 if (status & 2)
263 break;
264
265 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
266 WREG32_P(mmVCE_SOFT_RESET,
267 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
268 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
269 mdelay(10);
270 WREG32_P(mmVCE_SOFT_RESET, 0,
271 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
272 mdelay(10);
273 r = -1;
274 }
275 292
276 /* clear BUSY flag */ 293 /* clear BUSY flag */
277 WREG32_P(mmVCE_STATUS, 0, ~1); 294 WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
278 295
279 /* Set Clock-Gating off */ 296 /* Set Clock-Gating off */
280 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG) 297 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
@@ -290,19 +307,46 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
290 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 307 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
291 mutex_unlock(&adev->grbm_idx_mutex); 308 mutex_unlock(&adev->grbm_idx_mutex);
292 309
293 ring = &adev->vce.ring[0]; 310 return 0;
294 WREG32(mmVCE_RB_RPTR, ring->wptr); 311}
295 WREG32(mmVCE_RB_WPTR, ring->wptr);
296 WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
297 WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
298 WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
299 312
300 ring = &adev->vce.ring[1]; 313static int vce_v3_0_stop(struct amdgpu_device *adev)
301 WREG32(mmVCE_RB_RPTR2, ring->wptr); 314{
302 WREG32(mmVCE_RB_WPTR2, ring->wptr); 315 int idx;
303 WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr); 316
304 WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr)); 317 mutex_lock(&adev->grbm_idx_mutex);
305 WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4); 318 for (idx = 0; idx < 2; ++idx) {
319 if (adev->vce.harvest_config & (1 << idx))
320 continue;
321
322 if (idx == 0)
323 WREG32_P(mmGRBM_GFX_INDEX, 0,
324 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
325 else
326 WREG32_P(mmGRBM_GFX_INDEX,
327 GRBM_GFX_INDEX__VCE_INSTANCE_MASK,
328 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
329
330 if (adev->asic_type >= CHIP_STONEY)
331 WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
332 else
333 WREG32_P(mmVCE_VCPU_CNTL, 0,
334 ~VCE_VCPU_CNTL__CLK_EN_MASK);
335 /* hold on ECPU */
336 WREG32_P(mmVCE_SOFT_RESET,
337 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
338 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
339
340 /* clear BUSY flag */
341 WREG32_P(mmVCE_STATUS, 0, ~VCE_STATUS__JOB_BUSY_MASK);
342
343 /* Set Clock-Gating off */
344 if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
345 vce_v3_0_set_vce_sw_clock_gating(adev, false);
346 }
347
348 WREG32_P(mmGRBM_GFX_INDEX, 0, ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
349 mutex_unlock(&adev->grbm_idx_mutex);
306 350
307 return 0; 351 return 0;
308} 352}
@@ -441,7 +485,14 @@ static int vce_v3_0_hw_init(void *handle)
441 485
442static int vce_v3_0_hw_fini(void *handle) 486static int vce_v3_0_hw_fini(void *handle)
443{ 487{
444 return 0; 488 int r;
489 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
490
491 r = vce_v3_0_wait_for_idle(handle);
492 if (r)
493 return r;
494
495 return vce_v3_0_stop(adev);
445} 496}
446 497
447static int vce_v3_0_suspend(void *handle) 498static int vce_v3_0_suspend(void *handle)
@@ -604,6 +655,18 @@ static int vce_v3_0_process_interrupt(struct amdgpu_device *adev,
604 return 0; 655 return 0;
605} 656}
606 657
658static void vce_v3_set_bypass_mode(struct amdgpu_device *adev, bool enable)
659{
660 u32 tmp = RREG32_SMC(ixGCK_DFS_BYPASS_CNTL);
661
662 if (enable)
663 tmp |= GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
664 else
665 tmp &= ~GCK_DFS_BYPASS_CNTL__BYPASSECLK_MASK;
666
667 WREG32_SMC(ixGCK_DFS_BYPASS_CNTL, tmp);
668}
669
607static int vce_v3_0_set_clockgating_state(void *handle, 670static int vce_v3_0_set_clockgating_state(void *handle,
608 enum amd_clockgating_state state) 671 enum amd_clockgating_state state)
609{ 672{
@@ -611,6 +674,9 @@ static int vce_v3_0_set_clockgating_state(void *handle,
611 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 674 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
612 int i; 675 int i;
613 676
677 if (adev->asic_type == CHIP_POLARIS10)
678 vce_v3_set_bypass_mode(adev, enable);
679
614 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) 680 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
615 return 0; 681 return 0;
616 682
@@ -701,6 +767,8 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
701 .test_ib = amdgpu_vce_ring_test_ib, 767 .test_ib = amdgpu_vce_ring_test_ib,
702 .insert_nop = amdgpu_ring_insert_nop, 768 .insert_nop = amdgpu_ring_insert_nop,
703 .pad_ib = amdgpu_ring_generic_pad_ib, 769 .pad_ib = amdgpu_ring_generic_pad_ib,
770 .begin_use = amdgpu_vce_ring_begin_use,
771 .end_use = amdgpu_vce_ring_end_use,
704}; 772};
705 773
706static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev) 774static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index a65c96029476..03a31c53aec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
203 spin_unlock_irqrestore(&adev->didt_idx_lock, flags); 203 spin_unlock_irqrestore(&adev->didt_idx_lock, flags);
204} 204}
205 205
206static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg)
207{
208 unsigned long flags;
209 u32 r;
210
211 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
212 WREG32(mmGC_CAC_IND_INDEX, (reg));
213 r = RREG32(mmGC_CAC_IND_DATA);
214 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
215 return r;
216}
217
218static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v)
219{
220 unsigned long flags;
221
222 spin_lock_irqsave(&adev->gc_cac_idx_lock, flags);
223 WREG32(mmGC_CAC_IND_INDEX, (reg));
224 WREG32(mmGC_CAC_IND_DATA, (v));
225 spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags);
226}
227
228
206static const u32 tonga_mgcg_cgcg_init[] = 229static const u32 tonga_mgcg_cgcg_init[] =
207{ 230{
208 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, 231 mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
@@ -533,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num,
533 556
534 mutex_lock(&adev->grbm_idx_mutex); 557 mutex_lock(&adev->grbm_idx_mutex);
535 if (se_num != 0xffffffff || sh_num != 0xffffffff) 558 if (se_num != 0xffffffff || sh_num != 0xffffffff)
536 gfx_v8_0_select_se_sh(adev, se_num, sh_num); 559 amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
537 560
538 val = RREG32(reg_offset); 561 val = RREG32(reg_offset);
539 562
540 if (se_num != 0xffffffff || sh_num != 0xffffffff) 563 if (se_num != 0xffffffff || sh_num != 0xffffffff)
541 gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); 564 amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
542 mutex_unlock(&adev->grbm_idx_mutex); 565 mutex_unlock(&adev->grbm_idx_mutex);
543 return val; 566 return val;
544} 567}
@@ -597,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
597 return -EINVAL; 620 return -EINVAL;
598} 621}
599 622
600static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) 623static int vi_gpu_pci_config_reset(struct amdgpu_device *adev)
601{ 624{
602 u32 i; 625 u32 i;
603 626
@@ -612,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev)
612 635
613 /* wait for asic to come out of reset */ 636 /* wait for asic to come out of reset */
614 for (i = 0; i < adev->usec_timeout; i++) { 637 for (i = 0; i < adev->usec_timeout; i++) {
615 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) 638 if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) {
616 break; 639 /* enable BM */
640 pci_set_master(adev->pdev);
641 return 0;
642 }
617 udelay(1); 643 udelay(1);
618 } 644 }
619 645 return -EINVAL;
620} 646}
621 647
622static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) 648static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung)
@@ -642,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun
642 */ 668 */
643static int vi_asic_reset(struct amdgpu_device *adev) 669static int vi_asic_reset(struct amdgpu_device *adev)
644{ 670{
671 int r;
672
645 vi_set_bios_scratch_engine_hung(adev, true); 673 vi_set_bios_scratch_engine_hung(adev, true);
646 674
647 vi_gpu_pci_config_reset(adev); 675 r = vi_gpu_pci_config_reset(adev);
648 676
649 vi_set_bios_scratch_engine_hung(adev, false); 677 vi_set_bios_scratch_engine_hung(adev, false);
650 678
651 return 0; 679 return r;
652} 680}
653 681
654static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, 682static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock,
@@ -1133,9 +1161,6 @@ static const struct amdgpu_asic_funcs vi_asic_funcs =
1133 .set_uvd_clocks = &vi_set_uvd_clocks, 1161 .set_uvd_clocks = &vi_set_uvd_clocks,
1134 .set_vce_clocks = &vi_set_vce_clocks, 1162 .set_vce_clocks = &vi_set_vce_clocks,
1135 .get_virtual_caps = &vi_get_virtual_caps, 1163 .get_virtual_caps = &vi_get_virtual_caps,
1136 /* these should be moved to their own ip modules */
1137 .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter,
1138 .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle,
1139}; 1164};
1140 1165
1141static int vi_common_early_init(void *handle) 1166static int vi_common_early_init(void *handle)
@@ -1156,6 +1181,8 @@ static int vi_common_early_init(void *handle)
1156 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; 1181 adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg;
1157 adev->didt_rreg = &vi_didt_rreg; 1182 adev->didt_rreg = &vi_didt_rreg;
1158 adev->didt_wreg = &vi_didt_wreg; 1183 adev->didt_wreg = &vi_didt_wreg;
1184 adev->gc_cac_rreg = &vi_gc_cac_rreg;
1185 adev->gc_cac_wreg = &vi_gc_cac_wreg;
1159 1186
1160 adev->asic_funcs = &vi_asic_funcs; 1187 adev->asic_funcs = &vi_asic_funcs;
1161 1188
@@ -1229,12 +1256,18 @@ static int vi_common_early_init(void *handle)
1229 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | 1256 adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
1230 AMD_CG_SUPPORT_GFX_MGCG | 1257 AMD_CG_SUPPORT_GFX_MGCG |
1231 AMD_CG_SUPPORT_GFX_MGLS | 1258 AMD_CG_SUPPORT_GFX_MGLS |
1259 AMD_CG_SUPPORT_GFX_RLC_LS |
1260 AMD_CG_SUPPORT_GFX_CP_LS |
1261 AMD_CG_SUPPORT_GFX_CGTS |
1262 AMD_CG_SUPPORT_GFX_MGLS |
1263 AMD_CG_SUPPORT_GFX_CGTS_LS |
1264 AMD_CG_SUPPORT_GFX_CGCG |
1265 AMD_CG_SUPPORT_GFX_CGLS |
1232 AMD_CG_SUPPORT_BIF_LS | 1266 AMD_CG_SUPPORT_BIF_LS |
1233 AMD_CG_SUPPORT_HDP_MGCG | 1267 AMD_CG_SUPPORT_HDP_MGCG |
1234 AMD_CG_SUPPORT_HDP_LS | 1268 AMD_CG_SUPPORT_HDP_LS |
1235 AMD_CG_SUPPORT_SDMA_MGCG | 1269 AMD_CG_SUPPORT_SDMA_MGCG |
1236 AMD_CG_SUPPORT_SDMA_LS; 1270 AMD_CG_SUPPORT_SDMA_LS;
1237 adev->pg_flags = 0;
1238 adev->external_rev_id = adev->rev_id + 0x1; 1271 adev->external_rev_id = adev->rev_id + 0x1;
1239 break; 1272 break;
1240 default: 1273 default:
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ec4036a09f3e..a625b9137da2 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -187,12 +187,12 @@ int init_pipelines(struct device_queue_manager *dqm,
187unsigned int get_first_pipe(struct device_queue_manager *dqm); 187unsigned int get_first_pipe(struct device_queue_manager *dqm);
188unsigned int get_pipes_num(struct device_queue_manager *dqm); 188unsigned int get_pipes_num(struct device_queue_manager *dqm);
189 189
190extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) 190static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
191{ 191{
192 return (pdd->lds_base >> 16) & 0xFF; 192 return (pdd->lds_base >> 16) & 0xFF;
193} 193}
194 194
195extern inline unsigned int 195static inline unsigned int
196get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) 196get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
197{ 197{
198 return (pdd->lds_base >> 60) & 0x0E; 198 return (pdd->lds_base >> 60) & 0x0E;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index d0d5f4baf72d..80113c335966 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -617,10 +617,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd);
617int kfd_init_apertures(struct kfd_process *process); 617int kfd_init_apertures(struct kfd_process *process);
618 618
619/* Queue Context Management */ 619/* Queue Context Management */
620inline uint32_t lower_32(uint64_t x);
621inline uint32_t upper_32(uint64_t x);
622struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd); 620struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
623inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m);
624 621
625int init_queue(struct queue **q, struct queue_properties properties); 622int init_queue(struct queue **q, struct queue_properties properties);
626void uninit_queue(struct queue *q); 623void uninit_queue(struct queue *q);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 7708d90b9da9..4f3849ac8c07 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread);
63void kfd_process_create_wq(void) 63void kfd_process_create_wq(void)
64{ 64{
65 if (!kfd_process_wq) 65 if (!kfd_process_wq)
66 kfd_process_wq = create_workqueue("kfd_process_wq"); 66 kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0);
67} 67}
68 68
69void kfd_process_destroy_wq(void) 69void kfd_process_destroy_wq(void)
70{ 70{
71 if (kfd_process_wq) { 71 if (kfd_process_wq) {
72 flush_workqueue(kfd_process_wq);
73 destroy_workqueue(kfd_process_wq); 72 destroy_workqueue(kfd_process_wq);
74 kfd_process_wq = NULL; 73 kfd_process_wq = NULL;
75 } 74 }
@@ -330,6 +329,7 @@ err_process_pqm_init:
330 synchronize_rcu(); 329 synchronize_rcu();
331 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); 330 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
332err_mmu_notifier: 331err_mmu_notifier:
332 mutex_destroy(&process->mutex);
333 kfd_pasid_free(process->pasid); 333 kfd_pasid_free(process->pasid);
334err_alloc_pasid: 334err_alloc_pasid:
335 kfree(process->queues); 335 kfree(process->queues);
diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h
index 7c2a916c1e63..5eb895fd98bf 100644
--- a/drivers/gpu/drm/amd/include/amd_pcie.h
+++ b/drivers/gpu/drm/amd/include/amd_pcie.h
@@ -37,6 +37,13 @@
37#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF 37#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF
38#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0 38#define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0
39 39
40/* gen: chipset 1/2, asic 1/2/3 */
41#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \
42 | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \
43 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \
44 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \
45 | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3)
46
40/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */ 47/* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */
41#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000 48#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000
42#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000 49#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000
@@ -47,4 +54,11 @@
47#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000 54#define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000
48#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16 55#define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16
49 56
57/* 1/2/4/8/16 lanes */
58#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \
59 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \
60 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \
61 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \
62 | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16)
63
50#endif 64#endif
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index afce1edbe250..a74a0d2ff1ca 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -26,15 +26,6 @@
26#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ 26#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
27 27
28/* 28/*
29* Supported GPU families (aligned with amdgpu_drm.h)
30*/
31#define AMD_FAMILY_UNKNOWN 0
32#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */
33#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
34#define AMD_FAMILY_VI 130 /* Iceland, Tonga */
35#define AMD_FAMILY_CZ 135 /* Carrizo */
36
37/*
38 * Supported ASIC types 29 * Supported ASIC types
39 */ 30 */
40enum amd_asic_type { 31enum amd_asic_type {
@@ -120,6 +111,8 @@ enum amd_powergating_state {
120#define AMD_PG_SUPPORT_SDMA (1 << 8) 111#define AMD_PG_SUPPORT_SDMA (1 << 8)
121#define AMD_PG_SUPPORT_ACP (1 << 9) 112#define AMD_PG_SUPPORT_ACP (1 << 9)
122#define AMD_PG_SUPPORT_SAMU (1 << 10) 113#define AMD_PG_SUPPORT_SAMU (1 << 10)
114#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11)
115#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12)
123 116
124enum amd_pm_state_type { 117enum amd_pm_state_type {
125 /* not used for dpm */ 118 /* not used for dpm */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
index 293329719bba..809759f7bb81 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h
@@ -27,6 +27,7 @@
27#define mmMM_INDEX 0x0 27#define mmMM_INDEX 0x0
28#define mmMM_INDEX_HI 0x6 28#define mmMM_INDEX_HI 0x6
29#define mmMM_DATA 0x1 29#define mmMM_DATA 0x1
30#define mmCC_BIF_BX_STRAP2 0x152A
30#define mmBIF_MM_INDACCESS_CNTL 0x1500 31#define mmBIF_MM_INDACCESS_CNTL 0x1500
31#define mmBIF_DOORBELL_APER_EN 0x1501 32#define mmBIF_DOORBELL_APER_EN 0x1501
32#define mmBUS_CNTL 0x1508 33#define mmBUS_CNTL 0x1508
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
index ebaf67bb1589..90ff7c8a6011 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h
@@ -2823,4 +2823,7 @@
2823#define mmDC_EDC_CSINVOC_CNT 0x3192 2823#define mmDC_EDC_CSINVOC_CNT 0x3192
2824#define mmDC_EDC_RESTORE_CNT 0x3193 2824#define mmDC_EDC_RESTORE_CNT 0x3193
2825 2825
2826#define mmGC_CAC_IND_INDEX 0x129a
2827#define mmGC_CAC_IND_DATA 0x129b
2828
2826#endif /* GFX_8_0_D_H */ 2829#endif /* GFX_8_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
index 7d722458d9f5..4070ca3a68eb 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h
@@ -8730,8 +8730,6 @@
8730#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10 8730#define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10
8731#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000 8731#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000
8732#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11 8732#define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11
8733#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000
8734#define RLC_GPM_STAT__RESERVED__SHIFT 0x12
8735#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000 8733#define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000
8736#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18 8734#define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18
8737#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f 8735#define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f
@@ -8764,8 +8762,10 @@
8764#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12 8762#define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12
8765#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000 8763#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000
8766#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13 8764#define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13
8767#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000 8765#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000
8768#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14 8766#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14
8767#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000
8768#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15
8769#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff 8769#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff
8770#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0 8770#define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0
8771#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00 8771#define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00
@@ -9102,8 +9102,6 @@
9102#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0 9102#define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0
9103#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff 9103#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff
9104#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0 9104#define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0
9105#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00
9106#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8
9107#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff 9105#define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff
9108#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0 9106#define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0
9109#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff 9107#define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff
@@ -9124,14 +9122,8 @@
9124#define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8 9122#define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8
9125#define RLC_SRM_DEBUG__DATA_MASK 0xffffffff 9123#define RLC_SRM_DEBUG__DATA_MASK 0xffffffff
9126#define RLC_SRM_DEBUG__DATA__SHIFT 0x0 9124#define RLC_SRM_DEBUG__DATA__SHIFT 0x0
9127#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff
9128#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0
9129#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00
9130#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa
9131#define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff 9125#define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff
9132#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0 9126#define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0
9133#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff
9134#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0
9135#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00 9127#define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00
9136#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa 9128#define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa
9137#define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff 9129#define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff
@@ -17946,8 +17938,6 @@
17946#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8 17938#define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8
17947#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000 17939#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000
17948#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10 17940#define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10
17949#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000
17950#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18
17951#define VGT_TF_RING_SIZE__SIZE_MASK 0xffff 17941#define VGT_TF_RING_SIZE__SIZE_MASK 0xffff
17952#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0 17942#define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0
17953#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1 17943#define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1
@@ -20502,8 +20492,6 @@
20502#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 20492#define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
20503#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 20493#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
20504#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 20494#define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
20505#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
20506#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
20507#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff 20495#define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff
20508#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0 20496#define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0
20509#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000 20497#define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20558,8 +20546,6 @@
20558#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 20546#define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
20559#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 20547#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
20560#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 20548#define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
20561#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0
20562#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6
20563#define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff 20549#define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff
20564#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0 20550#define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0
20565#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000 20551#define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20614,8 +20600,6 @@
20614#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 20600#define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
20615#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 20601#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
20616#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 20602#define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
20617#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
20618#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
20619#define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff 20603#define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff
20620#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0 20604#define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0
20621#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000 20605#define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20670,8 +20654,6 @@
20670#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 20654#define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
20671#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 20655#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
20672#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 20656#define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
20673#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
20674#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
20675#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff 20657#define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff
20676#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0 20658#define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0
20677#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000 20659#define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20726,8 +20708,6 @@
20726#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 20708#define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4
20727#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 20709#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20
20728#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 20710#define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5
20729#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0
20730#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6
20731#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff 20711#define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff
20732#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0 20712#define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0
20733#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000 20713#define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000
@@ -20773,4 +20753,84 @@
20773#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000 20753#define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000
20774#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18 20754#define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18
20775 20755
20756#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001
20757#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
20758
20759#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e
20760#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
20761#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
20762#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
20763
20764#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
20765#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
20766
20767#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
20768#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
20769
20770#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
20771#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
20772
20773#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
20774#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
20775#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
20776#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
20777
20778#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
20779#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
20780
20781#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
20782#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
20783#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
20784#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
20785
20786#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
20787#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
20788
20789#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
20790#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
20791#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
20792#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
20793
20794#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
20795#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
20796#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
20797
20798#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
20799#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
20800#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
20801
20802#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
20803#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
20804
20805#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
20806#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
20807#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
20808#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
20809
20810#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L
20811#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000
20812
20813#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL
20814#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L
20815#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001
20816#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007
20817
20818#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L
20819#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d
20820
20821#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L
20822#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d
20823
20824#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L
20825#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL
20826#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L
20827#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000
20828#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001
20829#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f
20830
20831#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L
20832#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L
20833#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006
20834#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c
20835
20776#endif /* GFX_8_0_SH_MASK_H */ 20836#endif /* GFX_8_0_SH_MASK_H */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
index 6f6fb34742d2..ec69869c55ff 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/uvd/uvd_6_0_d.h
@@ -111,6 +111,8 @@
111#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5 111#define mmUVD_MIF_RECON1_ADDR_CONFIG 0x39c5
112#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4 112#define ixUVD_MIF_SCLR_ADDR_CONFIG 0x4
113#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f 113#define mmUVD_JPEG_ADDR_CONFIG 0x3a1f
114#define mmUVD_GP_SCRATCH8 0x3c0a
115#define mmUVD_GP_SCRATCH9 0x3c0b
114#define mmUVD_GP_SCRATCH4 0x3d38 116#define mmUVD_GP_SCRATCH4 0x3d38
115 117
116#endif /* UVD_6_0_D_H */ 118#endif /* UVD_6_0_D_H */
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 7464daf89ca1..b86aba9d019f 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -49,6 +49,7 @@ enum cgs_ind_reg {
49 CGS_IND_REG__SMC, 49 CGS_IND_REG__SMC,
50 CGS_IND_REG__UVD_CTX, 50 CGS_IND_REG__UVD_CTX,
51 CGS_IND_REG__DIDT, 51 CGS_IND_REG__DIDT,
52 CGS_IND_REG_GC_CAC,
52 CGS_IND_REG__AUDIO_ENDPT 53 CGS_IND_REG__AUDIO_ENDPT
53}; 54};
54 55
@@ -112,20 +113,23 @@ enum cgs_system_info_id {
112 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1, 113 CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
113 CGS_SYSTEM_INFO_PCIE_GEN_INFO, 114 CGS_SYSTEM_INFO_PCIE_GEN_INFO,
114 CGS_SYSTEM_INFO_PCIE_MLW, 115 CGS_SYSTEM_INFO_PCIE_MLW,
116 CGS_SYSTEM_INFO_PCIE_DEV,
117 CGS_SYSTEM_INFO_PCIE_REV,
115 CGS_SYSTEM_INFO_CG_FLAGS, 118 CGS_SYSTEM_INFO_CG_FLAGS,
116 CGS_SYSTEM_INFO_PG_FLAGS, 119 CGS_SYSTEM_INFO_PG_FLAGS,
117 CGS_SYSTEM_INFO_GFX_CU_INFO, 120 CGS_SYSTEM_INFO_GFX_CU_INFO,
121 CGS_SYSTEM_INFO_GFX_SE_INFO,
118 CGS_SYSTEM_INFO_ID_MAXIMUM, 122 CGS_SYSTEM_INFO_ID_MAXIMUM,
119}; 123};
120 124
121struct cgs_system_info { 125struct cgs_system_info {
122 uint64_t size; 126 uint64_t size;
123 uint64_t info_id; 127 enum cgs_system_info_id info_id;
124 union { 128 union {
125 void *ptr; 129 void *ptr;
126 uint64_t value; 130 uint64_t value;
127 }; 131 };
128 uint64_t padding[13]; 132 uint64_t padding[13];
129}; 133};
130 134
131/* 135/*
@@ -158,6 +162,10 @@ struct cgs_firmware_info {
158 uint16_t feature_version; 162 uint16_t feature_version;
159 uint32_t image_size; 163 uint32_t image_size;
160 uint64_t mc_addr; 164 uint64_t mc_addr;
165
166 /* only for smc firmware */
167 uint32_t ucode_start_address;
168
161 void *kptr; 169 void *kptr;
162}; 170};
163 171
@@ -189,7 +197,6 @@ typedef unsigned long cgs_handle_t;
189 197
190struct cgs_acpi_method_argument { 198struct cgs_acpi_method_argument {
191 uint32_t type; 199 uint32_t type;
192 uint32_t method_length;
193 uint32_t data_length; 200 uint32_t data_length;
194 union{ 201 union{
195 uint32_t value; 202 uint32_t value;
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index e629f8a9fe93..abbb658bdc1e 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -176,7 +176,7 @@ static int pp_hw_fini(void *handle)
176 176
177static bool pp_is_idle(void *handle) 177static bool pp_is_idle(void *handle)
178{ 178{
179 return 0; 179 return false;
180} 180}
181 181
182static int pp_wait_for_idle(void *handle) 182static int pp_wait_for_idle(void *handle)
@@ -536,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
536 case AMD_PP_EVENT_COMPLETE_INIT: 536 case AMD_PP_EVENT_COMPLETE_INIT:
537 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); 537 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
538 break; 538 break;
539 case AMD_PP_EVENT_READJUST_POWER_STATE:
540 pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps;
541 ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
542 break;
539 default: 543 default:
540 break; 544 break;
541 } 545 }
@@ -740,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table)
740 744
741 PP_CHECK_HW(hwmgr); 745 PP_CHECK_HW(hwmgr);
742 746
743 if (hwmgr->hwmgr_func->get_pp_table == NULL) { 747 if (!hwmgr->soft_pp_table)
744 printk(KERN_INFO "%s was not implemented.\n", __func__); 748 return -EINVAL;
745 return 0;
746 }
747 749
748 return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); 750 *table = (char *)hwmgr->soft_pp_table;
751
752 return hwmgr->soft_pp_table_size;
749} 753}
750 754
751static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) 755static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
@@ -759,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
759 763
760 PP_CHECK_HW(hwmgr); 764 PP_CHECK_HW(hwmgr);
761 765
762 if (hwmgr->hwmgr_func->set_pp_table == NULL) { 766 if (!hwmgr->hardcode_pp_table) {
763 printk(KERN_INFO "%s was not implemented.\n", __func__); 767 hwmgr->hardcode_pp_table =
764 return 0; 768 kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
769
770 if (!hwmgr->hardcode_pp_table)
771 return -ENOMEM;
772
773 /* to avoid powerplay crash when hardcode pptable is empty */
774 memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table,
775 hwmgr->soft_pp_table_size);
765 } 776 }
766 777
767 return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); 778 memcpy(hwmgr->hardcode_pp_table, buf, size);
779
780 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
781
782 return amd_powerplay_reset(handle);
768} 783}
769 784
770static int pp_dpm_force_clock_level(void *handle, 785static int pp_dpm_force_clock_level(void *handle,
@@ -806,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle,
806 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); 821 return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
807} 822}
808 823
824static int pp_dpm_get_sclk_od(void *handle)
825{
826 struct pp_hwmgr *hwmgr;
827
828 if (!handle)
829 return -EINVAL;
830
831 hwmgr = ((struct pp_instance *)handle)->hwmgr;
832
833 PP_CHECK_HW(hwmgr);
834
835 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
836 printk(KERN_INFO "%s was not implemented.\n", __func__);
837 return 0;
838 }
839
840 return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
841}
842
843static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
844{
845 struct pp_hwmgr *hwmgr;
846
847 if (!handle)
848 return -EINVAL;
849
850 hwmgr = ((struct pp_instance *)handle)->hwmgr;
851
852 PP_CHECK_HW(hwmgr);
853
854 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
855 printk(KERN_INFO "%s was not implemented.\n", __func__);
856 return 0;
857 }
858
859 return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
860}
861
862static int pp_dpm_get_mclk_od(void *handle)
863{
864 struct pp_hwmgr *hwmgr;
865
866 if (!handle)
867 return -EINVAL;
868
869 hwmgr = ((struct pp_instance *)handle)->hwmgr;
870
871 PP_CHECK_HW(hwmgr);
872
873 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
874 printk(KERN_INFO "%s was not implemented.\n", __func__);
875 return 0;
876 }
877
878 return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
879}
880
881static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
882{
883 struct pp_hwmgr *hwmgr;
884
885 if (!handle)
886 return -EINVAL;
887
888 hwmgr = ((struct pp_instance *)handle)->hwmgr;
889
890 PP_CHECK_HW(hwmgr);
891
892 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
893 printk(KERN_INFO "%s was not implemented.\n", __func__);
894 return 0;
895 }
896
897 return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
898}
899
809const struct amd_powerplay_funcs pp_dpm_funcs = { 900const struct amd_powerplay_funcs pp_dpm_funcs = {
810 .get_temperature = pp_dpm_get_temperature, 901 .get_temperature = pp_dpm_get_temperature,
811 .load_firmware = pp_dpm_load_fw, 902 .load_firmware = pp_dpm_load_fw,
@@ -828,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = {
828 .set_pp_table = pp_dpm_set_pp_table, 919 .set_pp_table = pp_dpm_set_pp_table,
829 .force_clock_level = pp_dpm_force_clock_level, 920 .force_clock_level = pp_dpm_force_clock_level,
830 .print_clock_levels = pp_dpm_print_clock_levels, 921 .print_clock_levels = pp_dpm_print_clock_levels,
922 .get_sclk_od = pp_dpm_get_sclk_od,
923 .set_sclk_od = pp_dpm_set_sclk_od,
924 .get_mclk_od = pp_dpm_get_mclk_od,
925 .set_mclk_od = pp_dpm_set_mclk_od,
831}; 926};
832 927
833static int amd_pp_instance_init(struct amd_pp_init *pp_init, 928static int amd_pp_instance_init(struct amd_pp_init *pp_init,
@@ -909,6 +1004,44 @@ int amd_powerplay_fini(void *handle)
909 return 0; 1004 return 0;
910} 1005}
911 1006
1007int amd_powerplay_reset(void *handle)
1008{
1009 struct pp_instance *instance = (struct pp_instance *)handle;
1010 struct pp_eventmgr *eventmgr;
1011 struct pem_event_data event_data = { {0} };
1012 int ret;
1013
1014 if (instance == NULL)
1015 return -EINVAL;
1016
1017 eventmgr = instance->eventmgr;
1018 if (!eventmgr || !eventmgr->pp_eventmgr_fini)
1019 return -EINVAL;
1020
1021 eventmgr->pp_eventmgr_fini(eventmgr);
1022
1023 ret = pp_sw_fini(handle);
1024 if (ret)
1025 return ret;
1026
1027 kfree(instance->hwmgr->ps);
1028
1029 ret = pp_sw_init(handle);
1030 if (ret)
1031 return ret;
1032
1033 hw_init_power_state_table(instance->hwmgr);
1034
1035 if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL)
1036 return -EINVAL;
1037
1038 ret = eventmgr->pp_eventmgr_init(eventmgr);
1039 if (ret)
1040 return ret;
1041
1042 return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
1043}
1044
912/* export this function to DAL */ 1045/* export this function to DAL */
913 1046
914int amd_powerplay_display_configuration_change(void *handle, 1047int amd_powerplay_display_configuration_change(void *handle,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
index d6635cc4b0fc..635fc4b48184 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventactionchains.c
@@ -30,7 +30,6 @@ static const pem_event_action * const initialize_event[] = {
30 system_config_tasks, 30 system_config_tasks,
31 setup_asic_tasks, 31 setup_asic_tasks,
32 enable_dynamic_state_management_tasks, 32 enable_dynamic_state_management_tasks,
33 enable_clock_power_gatings_tasks,
34 get_2d_performance_state_tasks, 33 get_2d_performance_state_tasks,
35 set_performance_state_tasks, 34 set_performance_state_tasks,
36 initialize_thermal_controller_tasks, 35 initialize_thermal_controller_tasks,
@@ -140,7 +139,6 @@ static const pem_event_action * const resume_event[] = {
140 setup_asic_tasks, 139 setup_asic_tasks,
141 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */ 140 enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
142 enable_dynamic_state_management_tasks, 141 enable_dynamic_state_management_tasks,
143 enable_clock_power_gatings_tasks,
144 enable_disable_bapm_tasks, 142 enable_disable_bapm_tasks,
145 initialize_thermal_controller_tasks, 143 initialize_thermal_controller_tasks,
146 get_2d_performance_state_tasks, 144 get_2d_performance_state_tasks,
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
index 5cd123472db4..b6f45fd01fa6 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c
@@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc
132 132
133int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) 133int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
134{ 134{
135 /* TODO */ 135 return phm_disable_dynamic_state_management(eventmgr->hwmgr);
136 return 0;
137} 136}
138 137
139int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) 138int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index 436fc16dabb6..2028980f1ed4 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -177,12 +177,12 @@ int cz_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
177 cz_dpm_powerdown_uvd(hwmgr); 177 cz_dpm_powerdown_uvd(hwmgr);
178 } else { 178 } else {
179 cz_dpm_powerup_uvd(hwmgr); 179 cz_dpm_powerup_uvd(hwmgr);
180 cgs_set_clockgating_state(hwmgr->device,
181 AMD_IP_BLOCK_TYPE_UVD,
182 AMD_PG_STATE_GATE);
183 cgs_set_powergating_state(hwmgr->device, 180 cgs_set_powergating_state(hwmgr->device,
184 AMD_IP_BLOCK_TYPE_UVD, 181 AMD_IP_BLOCK_TYPE_UVD,
185 AMD_CG_STATE_UNGATE); 182 AMD_CG_STATE_UNGATE);
183 cgs_set_clockgating_state(hwmgr->device,
184 AMD_IP_BLOCK_TYPE_UVD,
185 AMD_PG_STATE_GATE);
186 cz_dpm_update_uvd_dpm(hwmgr, false); 186 cz_dpm_update_uvd_dpm(hwmgr, false);
187 } 187 }
188 188
@@ -206,25 +206,26 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
206 AMD_IP_BLOCK_TYPE_VCE, 206 AMD_IP_BLOCK_TYPE_VCE,
207 AMD_PG_STATE_GATE); 207 AMD_PG_STATE_GATE);
208 cz_enable_disable_vce_dpm(hwmgr, false); 208 cz_enable_disable_vce_dpm(hwmgr, false);
209 /* TODO: to figure out why vce can't be poweroff*/ 209 cz_dpm_powerdown_vce(hwmgr);
210 cz_hwmgr->vce_power_gated = true; 210 cz_hwmgr->vce_power_gated = true;
211 } else { 211 } else {
212 cz_dpm_powerup_vce(hwmgr); 212 cz_dpm_powerup_vce(hwmgr);
213 cz_hwmgr->vce_power_gated = false; 213 cz_hwmgr->vce_power_gated = false;
214 cgs_set_clockgating_state(
215 hwmgr->device,
216 AMD_IP_BLOCK_TYPE_VCE,
217 AMD_PG_STATE_GATE);
218 cgs_set_powergating_state( 214 cgs_set_powergating_state(
219 hwmgr->device, 215 hwmgr->device,
220 AMD_IP_BLOCK_TYPE_VCE, 216 AMD_IP_BLOCK_TYPE_VCE,
221 AMD_CG_STATE_UNGATE); 217 AMD_CG_STATE_UNGATE);
218 cgs_set_clockgating_state(
219 hwmgr->device,
220 AMD_IP_BLOCK_TYPE_VCE,
221 AMD_PG_STATE_GATE);
222 cz_dpm_update_vce_dpm(hwmgr); 222 cz_dpm_update_vce_dpm(hwmgr);
223 cz_enable_disable_vce_dpm(hwmgr, true); 223 cz_enable_disable_vce_dpm(hwmgr, true);
224 return 0; 224 return 0;
225 } 225 }
226 } 226 }
227 } else { 227 } else {
228 cz_hwmgr->vce_power_gated = bgate;
228 cz_dpm_update_vce_dpm(hwmgr); 229 cz_dpm_update_vce_dpm(hwmgr);
229 cz_enable_disable_vce_dpm(hwmgr, !bgate); 230 cz_enable_disable_vce_dpm(hwmgr, !bgate);
230 return 0; 231 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 1f14c477d15d..8cc0df9b534a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1167,9 +1167,9 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1167 1167
1168 cz_ps->action = cz_current_ps->action; 1168 cz_ps->action = cz_current_ps->action;
1169 1169
1170 if ((force_high == false) && (cz_ps->action == FORCE_HIGH)) 1170 if (!force_high && (cz_ps->action == FORCE_HIGH))
1171 cz_ps->action = CANCEL_FORCE_HIGH; 1171 cz_ps->action = CANCEL_FORCE_HIGH;
1172 else if ((force_high == true) && (cz_ps->action != FORCE_HIGH)) 1172 else if (force_high && (cz_ps->action != FORCE_HIGH))
1173 cz_ps->action = FORCE_HIGH; 1173 cz_ps->action = FORCE_HIGH;
1174 else 1174 else
1175 cz_ps->action = DO_NOTHING; 1175 cz_ps->action = DO_NOTHING;
@@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
1180static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 1180static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
1181{ 1181{
1182 int result = 0; 1182 int result = 0;
1183 struct cz_hwmgr *data;
1184
1185 data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
1186 if (data == NULL)
1187 return -ENOMEM;
1188
1189 hwmgr->backend = data;
1183 1190
1184 result = cz_initialize_dpm_defaults(hwmgr); 1191 result = cz_initialize_dpm_defaults(hwmgr);
1185 if (result != 0) { 1192 if (result != 0) {
@@ -1649,7 +1656,7 @@ static void cz_hw_print_display_cfg(
1649 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend); 1656 struct cz_hwmgr *hw_data = (struct cz_hwmgr *)(hwmgr->backend);
1650 uint32_t data = 0; 1657 uint32_t data = 0;
1651 1658
1652 if (hw_data->cc6_settings.cc6_setting_changed == true) { 1659 if (hw_data->cc6_settings.cc6_setting_changed) {
1653 1660
1654 hw_data->cc6_settings.cc6_setting_changed = false; 1661 hw_data->cc6_settings.cc6_setting_changed = false;
1655 1662
@@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = {
1909 1916
1910int cz_hwmgr_init(struct pp_hwmgr *hwmgr) 1917int cz_hwmgr_init(struct pp_hwmgr *hwmgr)
1911{ 1918{
1912 struct cz_hwmgr *cz_hwmgr;
1913 int ret = 0;
1914
1915 cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL);
1916 if (cz_hwmgr == NULL)
1917 return -ENOMEM;
1918
1919 hwmgr->backend = cz_hwmgr;
1920 hwmgr->hwmgr_func = &cz_hwmgr_funcs; 1919 hwmgr->hwmgr_func = &cz_hwmgr_funcs;
1921 hwmgr->pptable_func = &pptable_funcs; 1920 hwmgr->pptable_func = &pptable_funcs;
1922 return ret; 1921 return 0;
1923} 1922}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
index e1b649bd5344..5afe82068b29 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_clockpowergating.c
@@ -56,7 +56,7 @@ int fiji_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
56 fiji_update_uvd_dpm(hwmgr, false); 56 fiji_update_uvd_dpm(hwmgr, false);
57 cgs_set_clockgating_state(hwmgr->device, 57 cgs_set_clockgating_state(hwmgr->device,
58 AMD_IP_BLOCK_TYPE_UVD, 58 AMD_IP_BLOCK_TYPE_UVD,
59 AMD_PG_STATE_UNGATE); 59 AMD_CG_STATE_UNGATE);
60 } 60 }
61 61
62 return 0; 62 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
index 92912ab20944..120a9e2c3152 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c
@@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr,
581 581
582static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 582static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
583{ 583{
584 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
585
586 if (data->soft_pp_table) {
587 kfree(data->soft_pp_table);
588 data->soft_pp_table = NULL;
589 }
590
591 return phm_hwmgr_backend_fini(hwmgr); 584 return phm_hwmgr_backend_fini(hwmgr);
592} 585}
593 586
594static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 587static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
595{ 588{
596 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 589 struct fiji_hwmgr *data;
597 uint32_t i; 590 uint32_t i;
598 struct phm_ppt_v1_information *table_info = 591 struct phm_ppt_v1_information *table_info =
599 (struct phm_ppt_v1_information *)(hwmgr->pptable); 592 (struct phm_ppt_v1_information *)(hwmgr->pptable);
600 bool stay_in_boot; 593 bool stay_in_boot;
601 int result; 594 int result;
602 595
596 data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
597 if (data == NULL)
598 return -ENOMEM;
599
600 hwmgr->backend = data;
601
603 data->dll_default_on = false; 602 data->dll_default_on = false;
604 data->sram_end = SMC_RAM_END; 603 data->sram_end = SMC_RAM_END;
605 604
@@ -699,7 +698,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
699 if (0 == result) { 698 if (0 == result) {
700 struct cgs_system_info sys_info = {0}; 699 struct cgs_system_info sys_info = {0};
701 700
702 data->is_tlu_enabled = 0; 701 data->is_tlu_enabled = false;
703 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 702 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
704 FIJI_MAX_HARDWARE_POWERLEVELS; 703 FIJI_MAX_HARDWARE_POWERLEVELS;
705 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 704 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -734,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
734 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; 733 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
735 result = cgs_query_system_info(hwmgr->device, &sys_info); 734 result = cgs_query_system_info(hwmgr->device, &sys_info);
736 if (result) 735 if (result)
737 data->pcie_gen_cap = 0x30007; 736 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
738 else 737 else
739 data->pcie_gen_cap = (uint32_t)sys_info.value; 738 data->pcie_gen_cap = (uint32_t)sys_info.value;
740 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 739 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -743,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
743 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; 742 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
744 result = cgs_query_system_info(hwmgr->device, &sys_info); 743 result = cgs_query_system_info(hwmgr->device, &sys_info);
745 if (result) 744 if (result)
746 data->pcie_lane_cap = 0x2f0000; 745 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
747 else 746 else
748 data->pcie_lane_cap = (uint32_t)sys_info.value; 747 data->pcie_lane_cap = (uint32_t)sys_info.value;
749 } else { 748 } else {
@@ -1236,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr)
1236 return 0; 1235 return 0;
1237} 1236}
1238 1237
1238static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr)
1239{
1240 /* Reset voting clients before disabling DPM */
1241 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1242 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
1243 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
1244 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
1245
1246 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1247 ixCG_FREQ_TRAN_VOTING_0, 0);
1248 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1249 ixCG_FREQ_TRAN_VOTING_1, 0);
1250 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1251 ixCG_FREQ_TRAN_VOTING_2, 0);
1252 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1253 ixCG_FREQ_TRAN_VOTING_3, 0);
1254 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1255 ixCG_FREQ_TRAN_VOTING_4, 0);
1256 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1257 ixCG_FREQ_TRAN_VOTING_5, 0);
1258 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1259 ixCG_FREQ_TRAN_VOTING_6, 0);
1260 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
1261 ixCG_FREQ_TRAN_VOTING_7, 0);
1262
1263 return 0;
1264}
1265
1239/** 1266/**
1240* Get the location of various tables inside the FW image. 1267* Get the location of various tables inside the FW image.
1241* 1268*
@@ -1363,6 +1390,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
1363} 1390}
1364 1391
1365/** 1392/**
1393* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value
1394*
1395* @param hwmgr the address of the powerplay hardware manager.
1396* @return if success then 0;
1397*/
1398static int fiji_reset_to_default(struct pp_hwmgr *hwmgr)
1399{
1400 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
1401}
1402
1403/**
1366* Initial switch from ARB F0->F1 1404* Initial switch from ARB F0->F1
1367* 1405*
1368* @param hwmgr the address of the powerplay hardware manager. 1406* @param hwmgr the address of the powerplay hardware manager.
@@ -1375,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
1375 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 1413 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
1376} 1414}
1377 1415
1416static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
1417{
1418 uint32_t tmp;
1419
1420 tmp = (cgs_read_ind_register(hwmgr->device,
1421 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
1422 0x0000ff00) >> 8;
1423
1424 if (tmp == MC_CG_ARB_FREQ_F0)
1425 return 0;
1426
1427 return fiji_copy_and_switch_arb_sets(hwmgr,
1428 tmp, MC_CG_ARB_FREQ_F0);
1429}
1430
1378static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, 1431static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr,
1379 struct fiji_single_dpm_table *dpm_table, uint32_t count) 1432 struct fiji_single_dpm_table *dpm_table, uint32_t count)
1380{ 1433{
@@ -1397,7 +1450,7 @@ static void fiji_setup_pcie_table_entry(
1397{ 1450{
1398 dpm_table->dpm_levels[index].value = pcie_gen; 1451 dpm_table->dpm_levels[index].value = pcie_gen;
1399 dpm_table->dpm_levels[index].param1 = pcie_lanes; 1452 dpm_table->dpm_levels[index].param1 = pcie_lanes;
1400 dpm_table->dpm_levels[index].enabled = 1; 1453 dpm_table->dpm_levels[index].enabled = true;
1401} 1454}
1402 1455
1403static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 1456static int fiji_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
@@ -1609,7 +1662,6 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
1609{ 1662{
1610 uint32_t count; 1663 uint32_t count;
1611 uint8_t index; 1664 uint8_t index;
1612 int result = 0;
1613 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 1665 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
1614 struct phm_ppt_v1_information *table_info = 1666 struct phm_ppt_v1_information *table_info =
1615 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1667 (struct phm_ppt_v1_information *)(hwmgr->pptable);
@@ -1631,7 +1683,7 @@ static int fiji_populate_cac_table(struct pp_hwmgr *hwmgr,
1631 VOLTAGE_SCALE)) / 25); 1683 VOLTAGE_SCALE)) / 25);
1632 } 1684 }
1633 1685
1634 return result; 1686 return 0;
1635} 1687}
1636 1688
1637/** 1689/**
@@ -3177,6 +3229,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr)
3177 return 0; 3229 return 0;
3178} 3230}
3179 3231
3232static int fiji_disable_ulv(struct pp_hwmgr *hwmgr)
3233{
3234 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3235 struct fiji_ulv_parm *ulv = &(data->ulv);
3236
3237 if (ulv->ulv_supported)
3238 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
3239
3240 return 0;
3241}
3242
3180static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 3243static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3181{ 3244{
3182 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 3245 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -3197,6 +3260,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3197 return 0; 3260 return 0;
3198} 3261}
3199 3262
3263static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
3264{
3265 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3266 PHM_PlatformCaps_SclkDeepSleep)) {
3267 if (smum_send_msg_to_smc(hwmgr->smumgr,
3268 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
3269 PP_ASSERT_WITH_CODE(false,
3270 "Attempt to disable Master Deep Sleep switch failed!",
3271 return -1);
3272 }
3273 }
3274
3275 return 0;
3276}
3277
3200static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 3278static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3201{ 3279{
3202 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 3280 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -3357,6 +3435,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr)
3357 return 0; 3435 return 0;
3358} 3436}
3359 3437
3438static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3439{
3440 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3441
3442 /* disable SCLK dpm */
3443 if (!data->sclk_dpm_key_disabled)
3444 PP_ASSERT_WITH_CODE(
3445 (smum_send_msg_to_smc(hwmgr->smumgr,
3446 PPSMC_MSG_DPM_Disable) == 0),
3447 "Failed to disable SCLK DPM!",
3448 return -1);
3449
3450 /* disable MCLK dpm */
3451 if (!data->mclk_dpm_key_disabled) {
3452 PP_ASSERT_WITH_CODE(
3453 (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3454 PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0),
3455 "Failed to force MCLK DPM0!",
3456 return -1);
3457
3458 PP_ASSERT_WITH_CODE(
3459 (smum_send_msg_to_smc(hwmgr->smumgr,
3460 PPSMC_MSG_MCLKDPM_Disable) == 0),
3461 "Failed to disable MCLK DPM!",
3462 return -1);
3463 }
3464
3465 return 0;
3466}
3467
3468static int fiji_stop_dpm(struct pp_hwmgr *hwmgr)
3469{
3470 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3471
3472 /* disable general power management */
3473 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
3474 GLOBAL_PWRMGT_EN, 0);
3475 /* disable sclk deep sleep */
3476 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
3477 DYNAMIC_PM_EN, 0);
3478
3479 /* disable PCIE dpm */
3480 if (!data->pcie_dpm_key_disabled) {
3481 PP_ASSERT_WITH_CODE(
3482 (smum_send_msg_to_smc(hwmgr->smumgr,
3483 PPSMC_MSG_PCIeDPM_Disable) == 0),
3484 "Failed to disable pcie DPM during DPM Stop Function!",
3485 return -1);
3486 }
3487
3488 if (fiji_disable_sclk_mclk_dpm(hwmgr)) {
3489 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
3490 return -1;
3491 }
3492
3493 PP_ASSERT_WITH_CODE(
3494 (smum_send_msg_to_smc(hwmgr->smumgr,
3495 PPSMC_MSG_Voltage_Cntl_Disable) == 0),
3496 "Failed to disable voltage DPM during DPM Stop Function!",
3497 return -1);
3498
3499 return 0;
3500}
3501
3360static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, 3502static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr,
3361 uint32_t sources) 3503 uint32_t sources)
3362{ 3504{
@@ -3415,6 +3557,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3415 return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 3557 return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3416} 3558}
3417 3559
3560static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
3561 PHM_AutoThrottleSource source)
3562{
3563 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
3564
3565 if (data->active_auto_throttle_sources & (1 << source)) {
3566 data->active_auto_throttle_sources &= ~(1 << source);
3567 fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
3568 }
3569 return 0;
3570}
3571
3572static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
3573{
3574 return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
3575}
3576
3418static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) 3577static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3419{ 3578{
3420 int tmp_result, result = 0; 3579 int tmp_result, result = 0;
@@ -3529,6 +3688,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
3529 return result; 3688 return result;
3530} 3689}
3531 3690
3691static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
3692{
3693 int tmp_result, result = 0;
3694
3695 tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1;
3696 PP_ASSERT_WITH_CODE(tmp_result == 0,
3697 "DPM is not running right now, no need to disable DPM!",
3698 return 0);
3699
3700 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3701 PHM_PlatformCaps_ThermalController))
3702 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3703 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
3704
3705 tmp_result = fiji_disable_power_containment(hwmgr);
3706 PP_ASSERT_WITH_CODE((tmp_result == 0),
3707 "Failed to disable power containment!", result = tmp_result);
3708
3709 tmp_result = fiji_disable_smc_cac(hwmgr);
3710 PP_ASSERT_WITH_CODE((tmp_result == 0),
3711 "Failed to disable SMC CAC!", result = tmp_result);
3712
3713 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3714 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
3715 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
3716 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
3717
3718 tmp_result = fiji_disable_thermal_auto_throttle(hwmgr);
3719 PP_ASSERT_WITH_CODE((tmp_result == 0),
3720 "Failed to disable thermal auto throttle!", result = tmp_result);
3721
3722 tmp_result = fiji_stop_dpm(hwmgr);
3723 PP_ASSERT_WITH_CODE((tmp_result == 0),
3724 "Failed to stop DPM!", result = tmp_result);
3725
3726 tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr);
3727 PP_ASSERT_WITH_CODE((tmp_result == 0),
3728 "Failed to disable deep sleep master switch!", result = tmp_result);
3729
3730 tmp_result = fiji_disable_ulv(hwmgr);
3731 PP_ASSERT_WITH_CODE((tmp_result == 0),
3732 "Failed to disable ULV!", result = tmp_result);
3733
3734 tmp_result = fiji_clear_voting_clients(hwmgr);
3735 PP_ASSERT_WITH_CODE((tmp_result == 0),
3736 "Failed to clear voting clients!", result = tmp_result);
3737
3738 tmp_result = fiji_reset_to_default(hwmgr);
3739 PP_ASSERT_WITH_CODE((tmp_result == 0),
3740 "Failed to reset to default!", result = tmp_result);
3741
3742 tmp_result = fiji_force_switch_to_arbf0(hwmgr);
3743 PP_ASSERT_WITH_CODE((tmp_result == 0),
3744 "Failed to force to switch arbf0!", result = tmp_result);
3745
3746 return result;
3747}
3748
3532static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) 3749static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr)
3533{ 3750{
3534 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 3751 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -4171,8 +4388,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4171 if ((0 == data->sclk_dpm_key_disabled) && 4388 if ((0 == data->sclk_dpm_key_disabled) &&
4172 (data->need_update_smu7_dpm_table & 4389 (data->need_update_smu7_dpm_table &
4173 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4390 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4174 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), 4391 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4175 "Trying to freeze SCLK DPM when DPM is disabled",); 4392 "Trying to freeze SCLK DPM when DPM is disabled",
4393 );
4176 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4394 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4177 PPSMC_MSG_SCLKDPM_FreezeLevel), 4395 PPSMC_MSG_SCLKDPM_FreezeLevel),
4178 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!", 4396 "Failed to freeze SCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4182,8 +4400,9 @@ static int fiji_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4182 if ((0 == data->mclk_dpm_key_disabled) && 4400 if ((0 == data->mclk_dpm_key_disabled) &&
4183 (data->need_update_smu7_dpm_table & 4401 (data->need_update_smu7_dpm_table &
4184 DPMTABLE_OD_UPDATE_MCLK)) { 4402 DPMTABLE_OD_UPDATE_MCLK)) {
4185 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), 4403 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4186 "Trying to freeze MCLK DPM when DPM is disabled",); 4404 "Trying to freeze MCLK DPM when DPM is disabled",
4405 );
4187 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4406 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4188 PPSMC_MSG_MCLKDPM_FreezeLevel), 4407 PPSMC_MSG_MCLKDPM_FreezeLevel),
4189 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!", 4408 "Failed to freeze MCLK DPM during FreezeSclkMclkDPM Function!",
@@ -4353,7 +4572,6 @@ static int fiji_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4353static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr, 4572static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
4354 const struct fiji_power_state *fiji_ps) 4573 const struct fiji_power_state *fiji_ps)
4355{ 4574{
4356 int result = 0;
4357 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 4575 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
4358 uint32_t high_limit_count; 4576 uint32_t high_limit_count;
4359 4577
@@ -4373,7 +4591,7 @@ static int fiji_trim_dpm_states(struct pp_hwmgr *hwmgr,
4373 fiji_ps->performance_levels[0].memory_clock, 4591 fiji_ps->performance_levels[0].memory_clock,
4374 fiji_ps->performance_levels[high_limit_count].memory_clock); 4592 fiji_ps->performance_levels[high_limit_count].memory_clock);
4375 4593
4376 return result; 4594 return 0;
4377} 4595}
4378 4596
4379static int fiji_generate_dpm_level_enable_mask( 4597static int fiji_generate_dpm_level_enable_mask(
@@ -4632,8 +4850,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4632 (data->need_update_smu7_dpm_table & 4850 (data->need_update_smu7_dpm_table &
4633 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4851 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4634 4852
4635 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), 4853 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4636 "Trying to Unfreeze SCLK DPM when DPM is disabled",); 4854 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4855 );
4637 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4856 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4638 PPSMC_MSG_SCLKDPM_UnfreezeLevel), 4857 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4639 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!", 4858 "Failed to unfreeze SCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -4643,8 +4862,9 @@ static int fiji_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4643 if ((0 == data->mclk_dpm_key_disabled) && 4862 if ((0 == data->mclk_dpm_key_disabled) &&
4644 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 4863 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4645 4864
4646 PP_ASSERT_WITH_CODE(true == fiji_is_dpm_running(hwmgr), 4865 PP_ASSERT_WITH_CODE(fiji_is_dpm_running(hwmgr),
4647 "Trying to Unfreeze MCLK DPM when DPM is disabled",); 4866 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4867 );
4648 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4868 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4649 PPSMC_MSG_SCLKDPM_UnfreezeLevel), 4869 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
4650 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!", 4870 "Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
@@ -5071,42 +5291,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr)
5071 CG_FDO_CTRL2, FDO_PWM_MODE); 5291 CG_FDO_CTRL2, FDO_PWM_MODE);
5072} 5292}
5073 5293
5074static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
5075{
5076 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5077
5078 if (!data->soft_pp_table) {
5079 data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
5080 hwmgr->soft_pp_table_size,
5081 GFP_KERNEL);
5082 if (!data->soft_pp_table)
5083 return -ENOMEM;
5084 }
5085
5086 *table = (char *)&data->soft_pp_table;
5087
5088 return hwmgr->soft_pp_table_size;
5089}
5090
5091static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
5092{
5093 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5094
5095 if (!data->soft_pp_table) {
5096 data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
5097 if (!data->soft_pp_table)
5098 return -ENOMEM;
5099 }
5100
5101 memcpy(data->soft_pp_table, buf, size);
5102
5103 hwmgr->soft_pp_table = data->soft_pp_table;
5104
5105 /* TODO: re-init powerplay to implement modified pptable */
5106
5107 return 0;
5108}
5109
5110static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, 5294static int fiji_force_clock_level(struct pp_hwmgr *hwmgr,
5111 enum pp_clock_type type, uint32_t mask) 5295 enum pp_clock_type type, uint32_t mask)
5112{ 5296{
@@ -5276,12 +5460,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h
5276 return is_update_required; 5460 return is_update_required;
5277} 5461}
5278 5462
5463static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr)
5464{
5465 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5466 struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5467 struct fiji_single_dpm_table *golden_sclk_table =
5468 &(data->golden_dpm_table.sclk_table);
5469 int value;
5470
5471 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5472 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5473 100 /
5474 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5475
5476 return value;
5477}
5478
5479static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5480{
5481 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5482 struct fiji_single_dpm_table *golden_sclk_table =
5483 &(data->golden_dpm_table.sclk_table);
5484 struct pp_power_state *ps;
5485 struct fiji_power_state *fiji_ps;
5486
5487 if (value > 20)
5488 value = 20;
5489
5490 ps = hwmgr->request_ps;
5491
5492 if (ps == NULL)
5493 return -EINVAL;
5494
5495 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5496
5497 fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock =
5498 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5499 value / 100 +
5500 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5501
5502 return 0;
5503}
5504
5505static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr)
5506{
5507 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5508 struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5509 struct fiji_single_dpm_table *golden_mclk_table =
5510 &(data->golden_dpm_table.mclk_table);
5511 int value;
5512
5513 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5514 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5515 100 /
5516 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5517
5518 return value;
5519}
5520
5521static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5522{
5523 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
5524 struct fiji_single_dpm_table *golden_mclk_table =
5525 &(data->golden_dpm_table.mclk_table);
5526 struct pp_power_state *ps;
5527 struct fiji_power_state *fiji_ps;
5528
5529 if (value > 20)
5530 value = 20;
5531
5532 ps = hwmgr->request_ps;
5533
5534 if (ps == NULL)
5535 return -EINVAL;
5536
5537 fiji_ps = cast_phw_fiji_power_state(&ps->hardware);
5538
5539 fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock =
5540 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5541 value / 100 +
5542 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5543
5544 return 0;
5545}
5279 5546
5280static const struct pp_hwmgr_func fiji_hwmgr_funcs = { 5547static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5281 .backend_init = &fiji_hwmgr_backend_init, 5548 .backend_init = &fiji_hwmgr_backend_init,
5282 .backend_fini = &fiji_hwmgr_backend_fini, 5549 .backend_fini = &fiji_hwmgr_backend_fini,
5283 .asic_setup = &fiji_setup_asic_task, 5550 .asic_setup = &fiji_setup_asic_task,
5284 .dynamic_state_management_enable = &fiji_enable_dpm_tasks, 5551 .dynamic_state_management_enable = &fiji_enable_dpm_tasks,
5552 .dynamic_state_management_disable = &fiji_disable_dpm_tasks,
5285 .force_dpm_level = &fiji_dpm_force_dpm_level, 5553 .force_dpm_level = &fiji_dpm_force_dpm_level,
5286 .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries, 5554 .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries,
5287 .get_power_state_size = &fiji_get_power_state_size, 5555 .get_power_state_size = &fiji_get_power_state_size,
@@ -5314,24 +5582,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = {
5314 .get_fan_control_mode = fiji_get_fan_control_mode, 5582 .get_fan_control_mode = fiji_get_fan_control_mode,
5315 .check_states_equal = fiji_check_states_equal, 5583 .check_states_equal = fiji_check_states_equal,
5316 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, 5584 .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration,
5317 .get_pp_table = fiji_get_pp_table,
5318 .set_pp_table = fiji_set_pp_table,
5319 .force_clock_level = fiji_force_clock_level, 5585 .force_clock_level = fiji_force_clock_level,
5320 .print_clock_levels = fiji_print_clock_levels, 5586 .print_clock_levels = fiji_print_clock_levels,
5587 .get_sclk_od = fiji_get_sclk_od,
5588 .set_sclk_od = fiji_set_sclk_od,
5589 .get_mclk_od = fiji_get_mclk_od,
5590 .set_mclk_od = fiji_set_mclk_od,
5321}; 5591};
5322 5592
5323int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) 5593int fiji_hwmgr_init(struct pp_hwmgr *hwmgr)
5324{ 5594{
5325 struct fiji_hwmgr *data;
5326 int ret = 0;
5327
5328 data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL);
5329 if (data == NULL)
5330 return -ENOMEM;
5331
5332 hwmgr->backend = data;
5333 hwmgr->hwmgr_func = &fiji_hwmgr_funcs; 5595 hwmgr->hwmgr_func = &fiji_hwmgr_funcs;
5334 hwmgr->pptable_func = &tonga_pptable_funcs; 5596 hwmgr->pptable_func = &tonga_pptable_funcs;
5335 pp_fiji_thermal_initialize(hwmgr); 5597 pp_fiji_thermal_initialize(hwmgr);
5336 return ret; 5598 return 0;
5337} 5599}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
index 170edf5a772d..bf67c2a92c68 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h
@@ -302,9 +302,6 @@ struct fiji_hwmgr {
302 bool pg_acp_init; 302 bool pg_acp_init;
303 bool frtc_enabled; 303 bool frtc_enabled;
304 bool frtc_status_changed; 304 bool frtc_status_changed;
305
306 /* soft pptable for re-uploading into smu */
307 void *soft_pp_table;
308}; 305};
309 306
310/* To convert to Q8.8 format for firmware */ 307/* To convert to Q8.8 format for firmware */
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
index db23a4068baf..44658451a8d2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c
@@ -73,17 +73,18 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr)
73 73
74 if (!tmp) { 74 if (!tmp) {
75 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 75 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
76 PHM_PlatformCaps_PowerContainment);
77
78 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
79 PHM_PlatformCaps_CAC); 76 PHM_PlatformCaps_CAC);
80 77
81 fiji_hwmgr->fast_watermark_threshold = 100; 78 fiji_hwmgr->fast_watermark_threshold = 100;
82 79
83 tmp = 1; 80 if (hwmgr->powercontainment_enabled) {
84 fiji_hwmgr->enable_dte_feature = tmp ? false : true; 81 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
85 fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; 82 PHM_PlatformCaps_PowerContainment);
86 fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; 83 tmp = 1;
84 fiji_hwmgr->enable_dte_feature = tmp ? false : true;
85 fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false;
86 fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false;
87 }
87 } 88 }
88} 89}
89 90
@@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr)
459 return result; 460 return result;
460} 461}
461 462
463int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr)
464{
465 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
466 int result = 0;
467
468 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
469 PHM_PlatformCaps_CAC) && data->cac_enabled) {
470 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
471 (uint16_t)(PPSMC_MSG_DisableCac));
472 PP_ASSERT_WITH_CODE((smc_result == 0),
473 "Failed to disable CAC in SMC.", result = -1);
474
475 data->cac_enabled = false;
476 }
477 return result;
478}
479
462int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) 480int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
463{ 481{
464 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); 482 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
@@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr)
528 return result; 546 return result;
529} 547}
530 548
549int fiji_disable_power_containment(struct pp_hwmgr *hwmgr)
550{
551 struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend);
552 int result = 0;
553
554 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
555 PHM_PlatformCaps_PowerContainment) &&
556 data->power_containment_features) {
557 int smc_result;
558
559 if (data->power_containment_features &
560 POWERCONTAINMENT_FEATURE_TDCLimit) {
561 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
562 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
563 PP_ASSERT_WITH_CODE((smc_result == 0),
564 "Failed to disable TDCLimit in SMC.",
565 result = smc_result);
566 }
567
568 if (data->power_containment_features &
569 POWERCONTAINMENT_FEATURE_DTE) {
570 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
571 (uint16_t)(PPSMC_MSG_DisableDTE));
572 PP_ASSERT_WITH_CODE((smc_result == 0),
573 "Failed to disable DTE in SMC.",
574 result = smc_result);
575 }
576
577 if (data->power_containment_features &
578 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
579 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
580 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
581 PP_ASSERT_WITH_CODE((smc_result == 0),
582 "Failed to disable PkgPwrTracking in SMC.",
583 result = smc_result);
584 }
585 data->power_containment_features = 0;
586 }
587
588 return result;
589}
590
531int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) 591int fiji_power_control_set_level(struct pp_hwmgr *hwmgr)
532{ 592{
533 struct phm_ppt_v1_information *table_info = 593 struct phm_ppt_v1_information *table_info =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
index 55e58200f33a..fec772421733 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h
@@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type {
36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
38 38
39#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0
40#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6
41#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0
42#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6
43#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0
44#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6
45#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
46#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
47#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
48#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
49#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000
50#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d
51
39struct fiji_pt_config_reg { 52struct fiji_pt_config_reg {
40 uint32_t offset; 53 uint32_t offset;
41 uint32_t mask; 54 uint32_t mask;
@@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
58int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); 71int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
59int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); 72int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr);
60int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); 73int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr);
74int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr);
61int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); 75int fiji_enable_power_containment(struct pp_hwmgr *hwmgr);
76int fiji_disable_power_containment(struct pp_hwmgr *hwmgr);
62int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); 77int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
63int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); 78int fiji_power_control_set_level(struct pp_hwmgr *hwmgr);
64 79
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
index 7a705cee0cc2..a6abe81bc843 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/functiontables.c
@@ -59,8 +59,8 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
59 struct phm_runtime_table_header *rt_table, 59 struct phm_runtime_table_header *rt_table,
60 void *input, void *output) 60 void *input, void *output)
61{ 61{
62 int result = 0; 62 int result;
63 void *temp_storage = NULL; 63 void *temp_storage;
64 64
65 if (hwmgr == NULL || rt_table == NULL) { 65 if (hwmgr == NULL || rt_table == NULL) {
66 printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n"); 66 printk(KERN_ERR "[ powerplay ] Invalid Parameter!\n");
@@ -73,12 +73,13 @@ int phm_dispatch_table(struct pp_hwmgr *hwmgr,
73 printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n"); 73 printk(KERN_ERR "[ powerplay ] Could not allocate table temporary storage\n");
74 return -ENOMEM; 74 return -ENOMEM;
75 } 75 }
76 } else {
77 temp_storage = NULL;
76 } 78 }
77 79
78 result = phm_run_table(hwmgr, rt_table, input, output, temp_storage); 80 result = phm_run_table(hwmgr, rt_table, input, output, temp_storage);
79 81
80 if (NULL != temp_storage) 82 kfree(temp_storage);
81 kfree(temp_storage);
82 83
83 return result; 84 return result;
84} 85}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index efb77eda7508..789f98ad2615 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
154 return ret; 154 return ret;
155} 155}
156 156
157int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr)
158{
159 int ret = -1;
160 bool enabled;
161
162 PHM_FUNC_CHECK(hwmgr);
163
164 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
165 PHM_PlatformCaps_TablelessHardwareInterface)) {
166 if (hwmgr->hwmgr_func->dynamic_state_management_disable)
167 ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr);
168 } else {
169 ret = phm_dispatch_table(hwmgr,
170 &(hwmgr->disable_dynamic_state_management),
171 NULL, NULL);
172 }
173
174 enabled = ret == 0 ? false : true;
175
176 cgs_notify_dpm_enabled(hwmgr->device, enabled);
177
178 return ret;
179}
180
157int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) 181int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level)
158{ 182{
159 PHM_FUNC_CHECK(hwmgr); 183 PHM_FUNC_CHECK(hwmgr);
@@ -314,7 +338,7 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr,
314 if (hwmgr->hwmgr_func->store_cc6_data == NULL) 338 if (hwmgr->hwmgr_func->store_cc6_data == NULL)
315 return -EINVAL; 339 return -EINVAL;
316 340
317 /* to do pass other display configuration in furture */ 341 /* TODO: pass other display configuration in the future */
318 342
319 if (hwmgr->hwmgr_func->store_cc6_data) 343 if (hwmgr->hwmgr_func->store_cc6_data)
320 hwmgr->hwmgr_func->store_cc6_data(hwmgr, 344 hwmgr->hwmgr_func->store_cc6_data(hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index 20f20e075588..27e07624ac28 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -24,6 +24,7 @@
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <drm/amdgpu_drm.h>
27#include "cgs_common.h" 28#include "cgs_common.h"
28#include "power_state.h" 29#include "power_state.h"
29#include "hwmgr.h" 30#include "hwmgr.h"
@@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
58 hwmgr->hw_revision = pp_init->rev_id; 59 hwmgr->hw_revision = pp_init->rev_id;
59 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; 60 hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT;
60 hwmgr->power_source = PP_PowerSource_AC; 61 hwmgr->power_source = PP_PowerSource_AC;
62 hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled;
61 63
62 switch (hwmgr->chip_family) { 64 switch (hwmgr->chip_family) {
63 case AMD_FAMILY_CZ: 65 case AMDGPU_FAMILY_CZ:
64 cz_hwmgr_init(hwmgr); 66 cz_hwmgr_init(hwmgr);
65 break; 67 break;
66 case AMD_FAMILY_VI: 68 case AMDGPU_FAMILY_VI:
67 switch (hwmgr->chip_id) { 69 switch (hwmgr->chip_id) {
68 case CHIP_TONGA: 70 case CHIP_TONGA:
69 tonga_hwmgr_init(hwmgr); 71 tonga_hwmgr_init(hwmgr);
@@ -94,6 +96,8 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr)
94 return -EINVAL; 96 return -EINVAL;
95 97
96 /* do hwmgr finish*/ 98 /* do hwmgr finish*/
99 kfree(hwmgr->hardcode_pp_table);
100
97 kfree(hwmgr->backend); 101 kfree(hwmgr->backend);
98 102
99 kfree(hwmgr->start_thermal_controller.function_list); 103 kfree(hwmgr->start_thermal_controller.function_list);
@@ -530,7 +534,7 @@ int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr
530 534
531 /* initialize vddc_dep_on_dal_pwrl table */ 535 /* initialize vddc_dep_on_dal_pwrl table */
532 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); 536 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
533 table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); 537 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
534 538
535 if (NULL == table_clk_vlt) { 539 if (NULL == table_clk_vlt) {
536 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); 540 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
index 8f142a74ad08..b5edb5105986 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c
@@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
106 data->uvd_power_gated = bgate; 106 data->uvd_power_gated = bgate;
107 107
108 if (bgate) { 108 if (bgate) {
109 cgs_set_clockgating_state(hwmgr->device,
110 AMD_IP_BLOCK_TYPE_UVD,
111 AMD_CG_STATE_GATE);
109 polaris10_update_uvd_dpm(hwmgr, true); 112 polaris10_update_uvd_dpm(hwmgr, true);
110 polaris10_phm_powerdown_uvd(hwmgr); 113 polaris10_phm_powerdown_uvd(hwmgr);
111 } else { 114 } else {
112 polaris10_phm_powerup_uvd(hwmgr); 115 polaris10_phm_powerup_uvd(hwmgr);
113 polaris10_update_uvd_dpm(hwmgr, false); 116 polaris10_update_uvd_dpm(hwmgr, false);
117 cgs_set_clockgating_state(hwmgr->device,
118 AMD_IP_BLOCK_TYPE_UVD,
119 AMD_CG_STATE_UNGATE);
114 } 120 }
115 121
116 return 0; 122 return 0;
@@ -125,11 +131,19 @@ int polaris10_phm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
125 131
126 data->vce_power_gated = bgate; 132 data->vce_power_gated = bgate;
127 133
128 if (bgate) 134 if (bgate) {
135 cgs_set_clockgating_state(hwmgr->device,
136 AMD_IP_BLOCK_TYPE_VCE,
137 AMD_CG_STATE_GATE);
138 polaris10_update_vce_dpm(hwmgr, true);
129 polaris10_phm_powerdown_vce(hwmgr); 139 polaris10_phm_powerdown_vce(hwmgr);
130 else 140 } else {
131 polaris10_phm_powerup_vce(hwmgr); 141 polaris10_phm_powerup_vce(hwmgr);
132 142 polaris10_update_vce_dpm(hwmgr, false);
143 cgs_set_clockgating_state(hwmgr->device,
144 AMD_IP_BLOCK_TYPE_VCE,
145 AMD_CG_STATE_UNGATE);
146 }
133 return 0; 147 return 0;
134} 148}
135 149
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
index 91e25f942d90..769636a0c5b5 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c
@@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr)
389 return 0; 389 return 0;
390} 390}
391 391
392static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr)
393{
394 /* Reset voting clients before disabling DPM */
395 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
396 SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1);
397 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
398 SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1);
399
400 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
401 ixCG_FREQ_TRAN_VOTING_0, 0);
402 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
403 ixCG_FREQ_TRAN_VOTING_1, 0);
404 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
405 ixCG_FREQ_TRAN_VOTING_2, 0);
406 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
407 ixCG_FREQ_TRAN_VOTING_3, 0);
408 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
409 ixCG_FREQ_TRAN_VOTING_4, 0);
410 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
411 ixCG_FREQ_TRAN_VOTING_5, 0);
412 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
413 ixCG_FREQ_TRAN_VOTING_6, 0);
414 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC,
415 ixCG_FREQ_TRAN_VOTING_7, 0);
416
417 return 0;
418}
419
392/** 420/**
393* Get the location of various tables inside the FW image. 421* Get the location of various tables inside the FW image.
394* 422*
@@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr,
515 return 0; 543 return 0;
516} 544}
517 545
546static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr)
547{
548 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults);
549}
550
518/** 551/**
519* Initial switch from ARB F0->F1 552* Initial switch from ARB F0->F1
520* 553*
@@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr)
528 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); 561 MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
529} 562}
530 563
564static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr)
565{
566 uint32_t tmp;
567
568 tmp = (cgs_read_ind_register(hwmgr->device,
569 CGS_IND_REG__SMC, ixSMC_SCRATCH9) &
570 0x0000ff00) >> 8;
571
572 if (tmp == MC_CG_ARB_FREQ_F0)
573 return 0;
574
575 return polaris10_copy_and_switch_arb_sets(hwmgr,
576 tmp, MC_CG_ARB_FREQ_F0);
577}
578
531static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) 579static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
532{ 580{
533 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 581 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -1356,9 +1404,9 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1356 return result; 1404 return result;
1357 } 1405 }
1358 1406
1359 /* in order to prevent MC activity from stutter mode to push DPM up. 1407 /* In order to prevent MC activity from stutter mode to push DPM up,
1360 * the UVD change complements this by putting the MCLK in 1408 * the UVD change complements this by putting the MCLK in
1361 * a higher state by default such that we are not effected by 1409 * a higher state by default such that we are not affected by
1362 * up threshold or and MCLK DPM latency. 1410 * up threshold or and MCLK DPM latency.
1363 */ 1411 */
1364 levels[0].ActivityLevel = 0x1f; 1412 levels[0].ActivityLevel = 0x1f;
@@ -1425,7 +1473,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1425 1473
1426 /* Get MinVoltage and Frequency from DPM0, 1474 /* Get MinVoltage and Frequency from DPM0,
1427 * already converted to SMC_UL */ 1475 * already converted to SMC_UL */
1428 sclk_frequency = data->dpm_table.sclk_table.dpm_levels[0].value; 1476 sclk_frequency = data->vbios_boot_state.sclk_bootup_value;
1429 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1477 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1430 table_info->vdd_dep_on_sclk, 1478 table_info->vdd_dep_on_sclk,
1431 sclk_frequency, 1479 sclk_frequency,
@@ -1461,8 +1509,7 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
1461 1509
1462 1510
1463 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */ 1511 /* Get MinVoltage and Frequency from DPM0, already converted to SMC_UL */
1464 table->MemoryACPILevel.MclkFrequency = 1512 table->MemoryACPILevel.MclkFrequency = data->vbios_boot_state.mclk_bootup_value;
1465 data->dpm_table.mclk_table.dpm_levels[0].value;
1466 result = polaris10_get_dependency_volt_by_clk(hwmgr, 1513 result = polaris10_get_dependency_volt_by_clk(hwmgr,
1467 table_info->vdd_dep_on_mclk, 1514 table_info->vdd_dep_on_mclk,
1468 table->MemoryACPILevel.MclkFrequency, 1515 table->MemoryACPILevel.MclkFrequency,
@@ -1780,7 +1827,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1780{ 1827{
1781 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; 1828 uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
1782 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 1829 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
1783 uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; 1830 uint8_t i, stretch_amount, volt_offset = 0;
1784 struct phm_ppt_v1_information *table_info = 1831 struct phm_ppt_v1_information *table_info =
1785 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1832 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1786 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = 1833 struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1831,11 +1878,8 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
1831 1878
1832 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6; 1879 data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
1833 /* Populate CKS Lookup Table */ 1880 /* Populate CKS Lookup Table */
1834 if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) 1881 if (stretch_amount != 1 && stretch_amount != 2 && stretch_amount != 3 &&
1835 stretch_amount2 = 0; 1882 stretch_amount != 4 && stretch_amount != 5) {
1836 else if (stretch_amount == 3 || stretch_amount == 4)
1837 stretch_amount2 = 1;
1838 else {
1839 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 1883 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
1840 PHM_PlatformCaps_ClockStretcher); 1884 PHM_PlatformCaps_ClockStretcher);
1841 PP_ASSERT_WITH_CODE(false, 1885 PP_ASSERT_WITH_CODE(false,
@@ -1890,9 +1934,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr,
1890 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { 1934 if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) {
1891 config = VR_SVI2_PLANE_2; 1935 config = VR_SVI2_PLANE_2;
1892 table->VRConfig |= (config << VRCONF_MVDD_SHIFT); 1936 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1893 } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { 1937 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start +
1894 config = VR_SMIO_PATTERN_2; 1938 offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1);
1895 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
1896 } else { 1939 } else {
1897 config = VR_STATIC_VOLTAGE; 1940 config = VR_STATIC_VOLTAGE;
1898 table->VRConfig |= (config << VRCONF_MVDD_SHIFT); 1941 table->VRConfig |= (config << VRCONF_MVDD_SHIFT);
@@ -2262,6 +2305,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr)
2262 return 0; 2305 return 0;
2263} 2306}
2264 2307
2308static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr)
2309{
2310 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2311 struct polaris10_ulv_parm *ulv = &(data->ulv);
2312
2313 if (ulv->ulv_supported)
2314 return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV);
2315
2316 return 0;
2317}
2318
2265static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) 2319static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2266{ 2320{
2267 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, 2321 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -2282,6 +2336,21 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2282 return 0; 2336 return 0;
2283} 2337}
2284 2338
2339static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2340{
2341 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2342 PHM_PlatformCaps_SclkDeepSleep)) {
2343 if (smum_send_msg_to_smc(hwmgr->smumgr,
2344 PPSMC_MSG_MASTER_DeepSleep_OFF)) {
2345 PP_ASSERT_WITH_CODE(false,
2346 "Attempt to disable Master Deep Sleep switch failed!",
2347 return -1);
2348 }
2349 }
2350
2351 return 0;
2352}
2353
2285static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) 2354static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2286{ 2355{
2287 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2356 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2379,6 +2448,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr)
2379 return 0; 2448 return 0;
2380} 2449}
2381 2450
2451static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
2452{
2453 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2454
2455 /* disable SCLK dpm */
2456 if (!data->sclk_dpm_key_disabled)
2457 PP_ASSERT_WITH_CODE(
2458 (smum_send_msg_to_smc(hwmgr->smumgr,
2459 PPSMC_MSG_DPM_Disable) == 0),
2460 "Failed to disable SCLK DPM!",
2461 return -1);
2462
2463 /* disable MCLK dpm */
2464 if (!data->mclk_dpm_key_disabled) {
2465 PP_ASSERT_WITH_CODE(
2466 (smum_send_msg_to_smc(hwmgr->smumgr,
2467 PPSMC_MSG_MCLKDPM_Disable) == 0),
2468 "Failed to disable MCLK DPM!",
2469 return -1);
2470 }
2471
2472 return 0;
2473}
2474
2475static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr)
2476{
2477 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2478
2479 /* disable general power management */
2480 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT,
2481 GLOBAL_PWRMGT_EN, 0);
2482 /* disable sclk deep sleep */
2483 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL,
2484 DYNAMIC_PM_EN, 0);
2485
2486 /* disable PCIE dpm */
2487 if (!data->pcie_dpm_key_disabled) {
2488 PP_ASSERT_WITH_CODE(
2489 (smum_send_msg_to_smc(hwmgr->smumgr,
2490 PPSMC_MSG_PCIeDPM_Disable) == 0),
2491 "Failed to disable pcie DPM during DPM Stop Function!",
2492 return -1);
2493 }
2494
2495 if (polaris10_disable_sclk_mclk_dpm(hwmgr)) {
2496 printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!");
2497 return -1;
2498 }
2499
2500 return 0;
2501}
2502
2382static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) 2503static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
2383{ 2504{
2384 bool protection; 2505 bool protection;
@@ -2436,6 +2557,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2436 return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); 2557 return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2437} 2558}
2438 2559
2560static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr,
2561 PHM_AutoThrottleSource source)
2562{
2563 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2564
2565 if (data->active_auto_throttle_sources & (1 << source)) {
2566 data->active_auto_throttle_sources &= ~(1 << source);
2567 polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources);
2568 }
2569 return 0;
2570}
2571
2572static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr)
2573{
2574 return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal);
2575}
2576
2439int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr) 2577int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr)
2440{ 2578{
2441 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 2579 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -2530,6 +2668,10 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2530 PP_ASSERT_WITH_CODE((0 == tmp_result), 2668 PP_ASSERT_WITH_CODE((0 == tmp_result),
2531 "Failed to enable deep sleep master switch!", result = tmp_result); 2669 "Failed to enable deep sleep master switch!", result = tmp_result);
2532 2670
2671 tmp_result = polaris10_enable_didt_config(hwmgr);
2672 PP_ASSERT_WITH_CODE((tmp_result == 0),
2673 "Failed to enable deep sleep master switch!", result = tmp_result);
2674
2533 tmp_result = polaris10_start_dpm(hwmgr); 2675 tmp_result = polaris10_start_dpm(hwmgr);
2534 PP_ASSERT_WITH_CODE((0 == tmp_result), 2676 PP_ASSERT_WITH_CODE((0 == tmp_result),
2535 "Failed to start DPM!", result = tmp_result); 2677 "Failed to start DPM!", result = tmp_result);
@@ -2559,8 +2701,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2559 2701
2560int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) 2702int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
2561{ 2703{
2704 int tmp_result, result = 0;
2562 2705
2563 return 0; 2706 tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1;
2707 PP_ASSERT_WITH_CODE(tmp_result == 0,
2708 "DPM is not running right now, no need to disable DPM!",
2709 return 0);
2710
2711 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2712 PHM_PlatformCaps_ThermalController))
2713 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2714 GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1);
2715
2716 tmp_result = polaris10_disable_power_containment(hwmgr);
2717 PP_ASSERT_WITH_CODE((tmp_result == 0),
2718 "Failed to disable power containment!", result = tmp_result);
2719
2720 tmp_result = polaris10_disable_smc_cac(hwmgr);
2721 PP_ASSERT_WITH_CODE((tmp_result == 0),
2722 "Failed to disable SMC CAC!", result = tmp_result);
2723
2724 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2725 CG_SPLL_SPREAD_SPECTRUM, SSEN, 0);
2726 PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC,
2727 GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0);
2728
2729 tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr);
2730 PP_ASSERT_WITH_CODE((tmp_result == 0),
2731 "Failed to disable thermal auto throttle!", result = tmp_result);
2732
2733 tmp_result = polaris10_stop_dpm(hwmgr);
2734 PP_ASSERT_WITH_CODE((tmp_result == 0),
2735 "Failed to stop DPM!", result = tmp_result);
2736
2737 tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr);
2738 PP_ASSERT_WITH_CODE((tmp_result == 0),
2739 "Failed to disable deep sleep master switch!", result = tmp_result);
2740
2741 tmp_result = polaris10_disable_ulv(hwmgr);
2742 PP_ASSERT_WITH_CODE((tmp_result == 0),
2743 "Failed to disable ULV!", result = tmp_result);
2744
2745 tmp_result = polaris10_clear_voting_clients(hwmgr);
2746 PP_ASSERT_WITH_CODE((tmp_result == 0),
2747 "Failed to clear voting clients!", result = tmp_result);
2748
2749 tmp_result = polaris10_reset_to_default(hwmgr);
2750 PP_ASSERT_WITH_CODE((tmp_result == 0),
2751 "Failed to reset to default!", result = tmp_result);
2752
2753 tmp_result = polaris10_force_switch_to_arbf0(hwmgr);
2754 PP_ASSERT_WITH_CODE((tmp_result == 0),
2755 "Failed to force to switch arbf0!", result = tmp_result);
2756
2757 return result;
2564} 2758}
2565 2759
2566int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) 2760int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
@@ -2571,13 +2765,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr)
2571 2765
2572int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 2766int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
2573{ 2767{
2574 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
2575
2576 if (data->soft_pp_table) {
2577 kfree(data->soft_pp_table);
2578 data->soft_pp_table = NULL;
2579 }
2580
2581 return phm_hwmgr_backend_fini(hwmgr); 2768 return phm_hwmgr_backend_fini(hwmgr);
2582} 2769}
2583 2770
@@ -2624,17 +2811,22 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
2624 PHM_PlatformCaps_DynamicUVDState); 2811 PHM_PlatformCaps_DynamicUVDState);
2625 2812
2626 /* power tune caps Assume disabled */ 2813 /* power tune caps Assume disabled */
2627 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 2814 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2628 PHM_PlatformCaps_SQRamping); 2815 PHM_PlatformCaps_SQRamping);
2629 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 2816 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2630 PHM_PlatformCaps_DBRamping); 2817 PHM_PlatformCaps_DBRamping);
2631 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 2818 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2632 PHM_PlatformCaps_TDRamping); 2819 PHM_PlatformCaps_TDRamping);
2633 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 2820 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2634 PHM_PlatformCaps_TCPRamping); 2821 PHM_PlatformCaps_TCPRamping);
2635 2822
2636 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2823 if (hwmgr->powercontainment_enabled)
2637 PHM_PlatformCaps_PowerContainment); 2824 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2825 PHM_PlatformCaps_PowerContainment);
2826 else
2827 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
2828 PHM_PlatformCaps_PowerContainment);
2829
2638 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 2830 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
2639 PHM_PlatformCaps_CAC); 2831 PHM_PlatformCaps_CAC);
2640 2832
@@ -2706,12 +2898,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr)
2706 } 2898 }
2707 } 2899 }
2708 2900
2709 2901 if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
2710 PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, 2902 VOLTAGE_TYPE_VDDC,
2711 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), 2903 sclk, vv_id, &vddc) != 0) {
2712 "Error retrieving EVV voltage value!", 2904 printk(KERN_WARNING "failed to retrieving EVV voltage!\n");
2713 continue); 2905 continue;
2714 2906 }
2715 2907
2716 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. 2908 /* need to make sure vddc is less than 2v or else, it could burn the ASIC.
2717 * real voltage level in unit of 0.01mv */ 2909 * real voltage level in unit of 0.01mv */
@@ -2968,13 +3160,19 @@ int polaris10_patch_voltage_workaround(struct pp_hwmgr *hwmgr)
2968 3160
2969int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) 3161int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
2970{ 3162{
2971 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 3163 struct polaris10_hwmgr *data;
2972 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 3164 struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
2973 uint32_t temp_reg; 3165 uint32_t temp_reg;
2974 int result; 3166 int result;
2975 struct phm_ppt_v1_information *table_info = 3167 struct phm_ppt_v1_information *table_info =
2976 (struct phm_ppt_v1_information *)(hwmgr->pptable); 3168 (struct phm_ppt_v1_information *)(hwmgr->pptable);
2977 3169
3170 data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL);
3171 if (data == NULL)
3172 return -ENOMEM;
3173
3174 hwmgr->backend = data;
3175
2978 data->dll_default_on = false; 3176 data->dll_default_on = false;
2979 data->sram_end = SMC_RAM_END; 3177 data->sram_end = SMC_RAM_END;
2980 data->mclk_dpm0_activity_target = 0xa; 3178 data->mclk_dpm0_activity_target = 0xa;
@@ -3063,7 +3261,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3063 if (0 == result) { 3261 if (0 == result) {
3064 struct cgs_system_info sys_info = {0}; 3262 struct cgs_system_info sys_info = {0};
3065 3263
3066 data->is_tlu_enabled = 0; 3264 data->is_tlu_enabled = false;
3067 3265
3068 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 3266 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
3069 POLARIS10_MAX_HARDWARE_POWERLEVELS; 3267 POLARIS10_MAX_HARDWARE_POWERLEVELS;
@@ -3148,7 +3346,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3148 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; 3346 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
3149 result = cgs_query_system_info(hwmgr->device, &sys_info); 3347 result = cgs_query_system_info(hwmgr->device, &sys_info);
3150 if (result) 3348 if (result)
3151 data->pcie_gen_cap = 0x30007; 3349 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
3152 else 3350 else
3153 data->pcie_gen_cap = (uint32_t)sys_info.value; 3351 data->pcie_gen_cap = (uint32_t)sys_info.value;
3154 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 3352 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -3157,7 +3355,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
3157 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; 3355 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
3158 result = cgs_query_system_info(hwmgr->device, &sys_info); 3356 result = cgs_query_system_info(hwmgr->device, &sys_info);
3159 if (result) 3357 if (result)
3160 data->pcie_lane_cap = 0x2f0000; 3358 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
3161 else 3359 else
3162 data->pcie_lane_cap = (uint32_t)sys_info.value; 3360 data->pcie_lane_cap = (uint32_t)sys_info.value;
3163 3361
@@ -3446,6 +3644,7 @@ static int polaris10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3446 hwmgr->platform_descriptor.platformCaps, 3644 hwmgr->platform_descriptor.platformCaps,
3447 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock); 3645 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3448 3646
3647
3449 disable_mclk_switching = (1 < info.display_count) || 3648 disable_mclk_switching = (1 < info.display_count) ||
3450 disable_mclk_switching_for_frame_lock; 3649 disable_mclk_switching_for_frame_lock;
3451 3650
@@ -3950,8 +4149,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3950 if ((0 == data->sclk_dpm_key_disabled) && 4149 if ((0 == data->sclk_dpm_key_disabled) &&
3951 (data->need_update_smu7_dpm_table & 4150 (data->need_update_smu7_dpm_table &
3952 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4151 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
3953 PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), 4152 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
3954 "Trying to freeze SCLK DPM when DPM is disabled", 4153 "Trying to freeze SCLK DPM when DPM is disabled",
3955 ); 4154 );
3956 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4155 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3957 PPSMC_MSG_SCLKDPM_FreezeLevel), 4156 PPSMC_MSG_SCLKDPM_FreezeLevel),
@@ -3962,8 +4161,8 @@ static int polaris10_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
3962 if ((0 == data->mclk_dpm_key_disabled) && 4161 if ((0 == data->mclk_dpm_key_disabled) &&
3963 (data->need_update_smu7_dpm_table & 4162 (data->need_update_smu7_dpm_table &
3964 DPMTABLE_OD_UPDATE_MCLK)) { 4163 DPMTABLE_OD_UPDATE_MCLK)) {
3965 PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), 4164 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
3966 "Trying to freeze MCLK DPM when DPM is disabled", 4165 "Trying to freeze MCLK DPM when DPM is disabled",
3967 ); 4166 );
3968 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4167 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
3969 PPSMC_MSG_MCLKDPM_FreezeLevel), 4168 PPSMC_MSG_MCLKDPM_FreezeLevel),
@@ -4123,7 +4322,6 @@ static int polaris10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
4123static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr, 4322static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
4124 const struct polaris10_power_state *polaris10_ps) 4323 const struct polaris10_power_state *polaris10_ps)
4125{ 4324{
4126 int result = 0;
4127 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4325 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4128 uint32_t high_limit_count; 4326 uint32_t high_limit_count;
4129 4327
@@ -4143,7 +4341,7 @@ static int polaris10_trim_dpm_states(struct pp_hwmgr *hwmgr,
4143 polaris10_ps->performance_levels[0].memory_clock, 4341 polaris10_ps->performance_levels[0].memory_clock,
4144 polaris10_ps->performance_levels[high_limit_count].memory_clock); 4342 polaris10_ps->performance_levels[high_limit_count].memory_clock);
4145 4343
4146 return result; 4344 return 0;
4147} 4345}
4148 4346
4149static int polaris10_generate_dpm_level_enable_mask( 4347static int polaris10_generate_dpm_level_enable_mask(
@@ -4226,25 +4424,20 @@ int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4226 return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate); 4424 return polaris10_enable_disable_uvd_dpm(hwmgr, !bgate);
4227} 4425}
4228 4426
4229static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input) 4427int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate)
4230{ 4428{
4231 const struct phm_set_power_state_input *states =
4232 (const struct phm_set_power_state_input *)input;
4233 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 4429 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4234 const struct polaris10_power_state *polaris10_nps =
4235 cast_const_phw_polaris10_power_state(states->pnew_state);
4236 const struct polaris10_power_state *polaris10_cps =
4237 cast_const_phw_polaris10_power_state(states->pcurrent_state);
4238
4239 uint32_t mm_boot_level_offset, mm_boot_level_value; 4430 uint32_t mm_boot_level_offset, mm_boot_level_value;
4240 struct phm_ppt_v1_information *table_info = 4431 struct phm_ppt_v1_information *table_info =
4241 (struct phm_ppt_v1_information *)(hwmgr->pptable); 4432 (struct phm_ppt_v1_information *)(hwmgr->pptable);
4242 4433
4243 if (polaris10_nps->vce_clks.evclk > 0 && 4434 if (!bgate) {
4244 (polaris10_cps == NULL || polaris10_cps->vce_clks.evclk == 0)) { 4435 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4245 4436 PHM_PlatformCaps_StablePState))
4246 data->smc_state_table.VceBootLevel = 4437 data->smc_state_table.VceBootLevel =
4247 (uint8_t) (table_info->mm_dep_table->count - 1); 4438 (uint8_t) (table_info->mm_dep_table->count - 1);
4439 else
4440 data->smc_state_table.VceBootLevel = 0;
4248 4441
4249 mm_boot_level_offset = data->dpm_table_start + 4442 mm_boot_level_offset = data->dpm_table_start +
4250 offsetof(SMU74_Discrete_DpmTable, VceBootLevel); 4443 offsetof(SMU74_Discrete_DpmTable, VceBootLevel);
@@ -4257,18 +4450,14 @@ static int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, const void *input)
4257 cgs_write_ind_register(hwmgr->device, 4450 cgs_write_ind_register(hwmgr->device,
4258 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value); 4451 CGS_IND_REG__SMC, mm_boot_level_offset, mm_boot_level_value);
4259 4452
4260 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState)) { 4453 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_StablePState))
4261 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, 4454 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4262 PPSMC_MSG_VCEDPM_SetEnabledMask, 4455 PPSMC_MSG_VCEDPM_SetEnabledMask,
4263 (uint32_t)1 << data->smc_state_table.VceBootLevel); 4456 (uint32_t)1 << data->smc_state_table.VceBootLevel);
4264
4265 polaris10_enable_disable_vce_dpm(hwmgr, true);
4266 } else if (polaris10_nps->vce_clks.evclk == 0 &&
4267 polaris10_cps != NULL &&
4268 polaris10_cps->vce_clks.evclk > 0)
4269 polaris10_enable_disable_vce_dpm(hwmgr, false);
4270 } 4457 }
4271 4458
4459 polaris10_enable_disable_vce_dpm(hwmgr, !bgate);
4460
4272 return 0; 4461 return 0;
4273} 4462}
4274 4463
@@ -4353,8 +4542,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4353 (data->need_update_smu7_dpm_table & 4542 (data->need_update_smu7_dpm_table &
4354 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 4543 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
4355 4544
4356 PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), 4545 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4357 "Trying to Unfreeze SCLK DPM when DPM is disabled", 4546 "Trying to Unfreeze SCLK DPM when DPM is disabled",
4358 ); 4547 );
4359 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4548 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4360 PPSMC_MSG_SCLKDPM_UnfreezeLevel), 4549 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4365,8 +4554,8 @@ static int polaris10_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
4365 if ((0 == data->mclk_dpm_key_disabled) && 4554 if ((0 == data->mclk_dpm_key_disabled) &&
4366 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 4555 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
4367 4556
4368 PP_ASSERT_WITH_CODE(true == polaris10_is_dpm_running(hwmgr), 4557 PP_ASSERT_WITH_CODE(polaris10_is_dpm_running(hwmgr),
4369 "Trying to Unfreeze MCLK DPM when DPM is disabled", 4558 "Trying to Unfreeze MCLK DPM when DPM is disabled",
4370 ); 4559 );
4371 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr, 4560 PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr->smumgr,
4372 PPSMC_MSG_SCLKDPM_UnfreezeLevel), 4561 PPSMC_MSG_SCLKDPM_UnfreezeLevel),
@@ -4422,6 +4611,8 @@ static int polaris10_notify_smc_display(struct pp_hwmgr *hwmgr)
4422 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL; 4611 return (smum_send_msg_to_smc(hwmgr->smumgr, (PPSMC_Msg)PPSMC_HasDisplay) == 0) ? 0 : -EINVAL;
4423} 4612}
4424 4613
4614
4615
4425static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) 4616static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
4426{ 4617{
4427 int tmp_result, result = 0; 4618 int tmp_result, result = 0;
@@ -4455,11 +4646,6 @@ static int polaris10_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *i
4455 "Failed to generate DPM level enabled mask!", 4646 "Failed to generate DPM level enabled mask!",
4456 result = tmp_result); 4647 result = tmp_result);
4457 4648
4458 tmp_result = polaris10_update_vce_dpm(hwmgr, input);
4459 PP_ASSERT_WITH_CODE((0 == tmp_result),
4460 "Failed to update VCE DPM!",
4461 result = tmp_result);
4462
4463 tmp_result = polaris10_update_sclk_threshold(hwmgr); 4649 tmp_result = polaris10_update_sclk_threshold(hwmgr);
4464 PP_ASSERT_WITH_CODE((0 == tmp_result), 4650 PP_ASSERT_WITH_CODE((0 == tmp_result),
4465 "Failed to update SCLK threshold!", 4651 "Failed to update SCLK threshold!",
@@ -4530,6 +4716,7 @@ int polaris10_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwm
4530 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */ 4716 if (num_active_displays > 1) /* to do && (pHwMgr->pPECI->displayConfiguration.bMultiMonitorInSync != TRUE)) */
4531 polaris10_notify_smc_display_change(hwmgr, false); 4717 polaris10_notify_smc_display_change(hwmgr, false);
4532 4718
4719
4533 return 0; 4720 return 0;
4534} 4721}
4535 4722
@@ -4579,6 +4766,7 @@ int polaris10_program_display_gap(struct pp_hwmgr *hwmgr)
4579 4766
4580 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us)); 4767 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + offsetof(SMU74_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
4581 4768
4769
4582 return 0; 4770 return 0;
4583} 4771}
4584 4772
@@ -4820,42 +5008,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr)
4820 return result; 5008 return result;
4821} 5009}
4822 5010
4823static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
4824{
4825 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4826
4827 if (!data->soft_pp_table) {
4828 data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
4829 hwmgr->soft_pp_table_size,
4830 GFP_KERNEL);
4831 if (!data->soft_pp_table)
4832 return -ENOMEM;
4833 }
4834
4835 *table = (char *)&data->soft_pp_table;
4836
4837 return hwmgr->soft_pp_table_size;
4838}
4839
4840static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
4841{
4842 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
4843
4844 if (!data->soft_pp_table) {
4845 data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
4846 if (!data->soft_pp_table)
4847 return -ENOMEM;
4848 }
4849
4850 memcpy(data->soft_pp_table, buf, size);
4851
4852 hwmgr->soft_pp_table = data->soft_pp_table;
4853
4854 /* TODO: re-init powerplay to implement modified pptable */
4855
4856 return 0;
4857}
4858
4859static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr, 5011static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr,
4860 enum pp_clock_type type, uint32_t mask) 5012 enum pp_clock_type type, uint32_t mask)
4861{ 5013{
@@ -4998,6 +5150,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4998 CG_FDO_CTRL2, FDO_PWM_MODE); 5150 CG_FDO_CTRL2, FDO_PWM_MODE);
4999} 5151}
5000 5152
5153static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr)
5154{
5155 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5156 struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
5157 struct polaris10_single_dpm_table *golden_sclk_table =
5158 &(data->golden_dpm_table.sclk_table);
5159 int value;
5160
5161 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5162 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
5163 100 /
5164 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5165
5166 return value;
5167}
5168
5169static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5170{
5171 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5172 struct polaris10_single_dpm_table *golden_sclk_table =
5173 &(data->golden_dpm_table.sclk_table);
5174 struct pp_power_state *ps;
5175 struct polaris10_power_state *polaris10_ps;
5176
5177 if (value > 20)
5178 value = 20;
5179
5180 ps = hwmgr->request_ps;
5181
5182 if (ps == NULL)
5183 return -EINVAL;
5184
5185 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
5186
5187 polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock =
5188 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
5189 value / 100 +
5190 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
5191
5192 return 0;
5193}
5194
5195static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr)
5196{
5197 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5198 struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
5199 struct polaris10_single_dpm_table *golden_mclk_table =
5200 &(data->golden_dpm_table.mclk_table);
5201 int value;
5202
5203 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
5204 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
5205 100 /
5206 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5207
5208 return value;
5209}
5210
5211static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5212{
5213 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
5214 struct polaris10_single_dpm_table *golden_mclk_table =
5215 &(data->golden_dpm_table.mclk_table);
5216 struct pp_power_state *ps;
5217 struct polaris10_power_state *polaris10_ps;
5218
5219 if (value > 20)
5220 value = 20;
5221
5222 ps = hwmgr->request_ps;
5223
5224 if (ps == NULL)
5225 return -EINVAL;
5226
5227 polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware);
5228
5229 polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock =
5230 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
5231 value / 100 +
5232 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
5233
5234 return 0;
5235}
5001static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { 5236static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
5002 .backend_init = &polaris10_hwmgr_backend_init, 5237 .backend_init = &polaris10_hwmgr_backend_init,
5003 .backend_fini = &polaris10_hwmgr_backend_fini, 5238 .backend_fini = &polaris10_hwmgr_backend_fini,
@@ -5036,22 +5271,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = {
5036 .check_states_equal = polaris10_check_states_equal, 5271 .check_states_equal = polaris10_check_states_equal,
5037 .set_fan_control_mode = polaris10_set_fan_control_mode, 5272 .set_fan_control_mode = polaris10_set_fan_control_mode,
5038 .get_fan_control_mode = polaris10_get_fan_control_mode, 5273 .get_fan_control_mode = polaris10_get_fan_control_mode,
5039 .get_pp_table = polaris10_get_pp_table,
5040 .set_pp_table = polaris10_set_pp_table,
5041 .force_clock_level = polaris10_force_clock_level, 5274 .force_clock_level = polaris10_force_clock_level,
5042 .print_clock_levels = polaris10_print_clock_levels, 5275 .print_clock_levels = polaris10_print_clock_levels,
5043 .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating, 5276 .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating,
5277 .get_sclk_od = polaris10_get_sclk_od,
5278 .set_sclk_od = polaris10_set_sclk_od,
5279 .get_mclk_od = polaris10_get_mclk_od,
5280 .set_mclk_od = polaris10_set_mclk_od,
5044}; 5281};
5045 5282
5046int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr) 5283int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr)
5047{ 5284{
5048 struct polaris10_hwmgr *data;
5049
5050 data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL);
5051 if (data == NULL)
5052 return -ENOMEM;
5053
5054 hwmgr->backend = data;
5055 hwmgr->hwmgr_func = &polaris10_hwmgr_funcs; 5285 hwmgr->hwmgr_func = &polaris10_hwmgr_funcs;
5056 hwmgr->pptable_func = &tonga_pptable_funcs; 5286 hwmgr->pptable_func = &tonga_pptable_funcs;
5057 pp_polaris10_thermal_initialize(hwmgr); 5287 pp_polaris10_thermal_initialize(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
index afc3434822d1..33c33947e827 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h
@@ -309,10 +309,6 @@ struct polaris10_hwmgr {
309 uint32_t up_hyst; 309 uint32_t up_hyst;
310 uint32_t disable_dpm_mask; 310 uint32_t disable_dpm_mask;
311 bool apply_optimized_settings; 311 bool apply_optimized_settings;
312
313 /* soft pptable for re-uploading into smu */
314 void *soft_pp_table;
315
316 uint32_t avfs_vdroop_override_setting; 312 uint32_t avfs_vdroop_override_setting;
317 bool apply_avfs_cks_off_voltage; 313 bool apply_avfs_cks_off_voltage;
318 uint32_t frame_time_x2; 314 uint32_t frame_time_x2;
@@ -356,6 +352,6 @@ int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr);
356int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate); 352int polaris10_update_uvd_dpm(struct pp_hwmgr *hwmgr, bool bgate);
357int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate); 353int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate);
358int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable); 354int polaris10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable);
359 355int polaris10_update_vce_dpm(struct pp_hwmgr *hwmgr, bool bgate);
360#endif 356#endif
361 357
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
index ae96f14b827c..b9cb240a135d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c
@@ -28,10 +28,360 @@
28#include "polaris10_smumgr.h" 28#include "polaris10_smumgr.h"
29#include "smu74_discrete.h" 29#include "smu74_discrete.h"
30#include "pp_debug.h" 30#include "pp_debug.h"
31#include "gca/gfx_8_0_d.h"
32#include "gca/gfx_8_0_sh_mask.h"
33#include "oss/oss_3_0_sh_mask.h"
31 34
32#define VOLTAGE_SCALE 4 35#define VOLTAGE_SCALE 4
33#define POWERTUNE_DEFAULT_SET_MAX 1 36#define POWERTUNE_DEFAULT_SET_MAX 1
34 37
38uint32_t DIDTBlock_Info = SQ_IR_MASK | TCP_IR_MASK | TD_PCC_MASK;
39
40struct polaris10_pt_config_reg GCCACConfig_Polaris10[] = {
41/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
42 * Offset Mask Shift Value Type
43 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
44 */
45 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060013, POLARIS10_CONFIGREG_GC_CAC_IND },
46 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860013, POLARIS10_CONFIGREG_GC_CAC_IND },
47 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060013, POLARIS10_CONFIGREG_GC_CAC_IND },
48 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860013, POLARIS10_CONFIGREG_GC_CAC_IND },
49 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060013, POLARIS10_CONFIGREG_GC_CAC_IND },
50 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860013, POLARIS10_CONFIGREG_GC_CAC_IND },
51 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060013, POLARIS10_CONFIGREG_GC_CAC_IND },
52 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860013, POLARIS10_CONFIGREG_GC_CAC_IND },
53 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060013, POLARIS10_CONFIGREG_GC_CAC_IND },
54
55 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
56 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
57 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
58 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
59 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0013, POLARIS10_CONFIGREG_GC_CAC_IND },
60
61 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100013, POLARIS10_CONFIGREG_GC_CAC_IND },
62 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900013, POLARIS10_CONFIGREG_GC_CAC_IND },
63 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100013, POLARIS10_CONFIGREG_GC_CAC_IND },
64 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900013, POLARIS10_CONFIGREG_GC_CAC_IND },
65 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100013, POLARIS10_CONFIGREG_GC_CAC_IND },
66 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900013, POLARIS10_CONFIGREG_GC_CAC_IND },
67
68 { 0xFFFFFFFF }
69};
70
71struct polaris10_pt_config_reg GCCACConfig_Polaris11[] = {
72/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
73 * Offset Mask Shift Value Type
74 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
75 */
76 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00060011, POLARIS10_CONFIGREG_GC_CAC_IND },
77 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00860011, POLARIS10_CONFIGREG_GC_CAC_IND },
78 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01060011, POLARIS10_CONFIGREG_GC_CAC_IND },
79 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01860011, POLARIS10_CONFIGREG_GC_CAC_IND },
80 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02060011, POLARIS10_CONFIGREG_GC_CAC_IND },
81 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02860011, POLARIS10_CONFIGREG_GC_CAC_IND },
82 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03060011, POLARIS10_CONFIGREG_GC_CAC_IND },
83 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x03860011, POLARIS10_CONFIGREG_GC_CAC_IND },
84 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x04060011, POLARIS10_CONFIGREG_GC_CAC_IND },
85
86 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x000E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
87 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x008E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
88 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x010E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
89 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x018E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
90 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x020E0011, POLARIS10_CONFIGREG_GC_CAC_IND },
91
92 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00100011, POLARIS10_CONFIGREG_GC_CAC_IND },
93 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x00900011, POLARIS10_CONFIGREG_GC_CAC_IND },
94 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01100011, POLARIS10_CONFIGREG_GC_CAC_IND },
95 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x01900011, POLARIS10_CONFIGREG_GC_CAC_IND },
96 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02100011, POLARIS10_CONFIGREG_GC_CAC_IND },
97 { ixGC_CAC_CNTL, 0xFFFFFFFF, 0, 0x02900011, POLARIS10_CONFIGREG_GC_CAC_IND },
98
99 { 0xFFFFFFFF }
100};
101
102struct polaris10_pt_config_reg DIDTConfig_Polaris10[] = {
103/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
104 * Offset Mask Shift Value Type
105 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
106 */
107 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
108 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
109 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
110 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
111
112 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
113 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
114 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
115 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
116
117 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
118 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
119 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
120 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
121
122 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
123 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
124
125 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
126 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
127
128 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
129 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
130 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
131 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
132 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
133 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
134
135 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
136 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
137 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
138 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
139 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
140
141 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
142 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
143 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
144 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
145
146 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
147 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
148 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
149 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
150 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
151 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
152 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
153 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
154
155 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
156 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
157 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
158 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
159
160 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
161 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
162 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
163 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
164
165 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
166 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
167
168 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
169 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
170
171 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
172 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
173 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
174 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
175 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
176 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
177
178 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
179 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
180 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
181 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
182 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
183
184 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
185 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
186 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
187 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
188
189 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
190 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
191 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
192 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
193 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
194 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
195 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0009, POLARIS10_CONFIGREG_DIDT_IND },
196 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
197
198 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
199 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
200 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
201 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
202
203 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
204 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
205 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
206 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
207
208 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
209 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
210
211 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
212 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
213
214 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
215 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
216 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
217 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
218 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
219 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
220
221 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
222 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
223 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
224 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
225 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
226
227 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
228 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
229 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
230 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
231
232 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
233 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
234 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
235 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
236 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
237 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
238 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
239 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
240
241 { 0xFFFFFFFF }
242};
243
244struct polaris10_pt_config_reg DIDTConfig_Polaris11[] = {
245/* ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
246 * Offset Mask Shift Value Type
247 * ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
248 */
249 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT0_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT0__SHIFT, 0x0073, POLARIS10_CONFIGREG_DIDT_IND },
250 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT1_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT1__SHIFT, 0x00ab, POLARIS10_CONFIGREG_DIDT_IND },
251 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT2_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT2__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
252 { ixDIDT_SQ_WEIGHT0_3, DIDT_SQ_WEIGHT0_3__WEIGHT3_MASK, DIDT_SQ_WEIGHT0_3__WEIGHT3__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
253
254 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT4_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT4__SHIFT, 0x0067, POLARIS10_CONFIGREG_DIDT_IND },
255 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT5_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT5__SHIFT, 0x0084, POLARIS10_CONFIGREG_DIDT_IND },
256 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT6_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT6__SHIFT, 0x0027, POLARIS10_CONFIGREG_DIDT_IND },
257 { ixDIDT_SQ_WEIGHT4_7, DIDT_SQ_WEIGHT4_7__WEIGHT7_MASK, DIDT_SQ_WEIGHT4_7__WEIGHT7__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
258
259 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT8_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT8__SHIFT, 0x00aa, POLARIS10_CONFIGREG_DIDT_IND },
260 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT9_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT9__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
261 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT10_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT10__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
262 { ixDIDT_SQ_WEIGHT8_11, DIDT_SQ_WEIGHT8_11__WEIGHT11_MASK, DIDT_SQ_WEIGHT8_11__WEIGHT11__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
263
264 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MIN_POWER_MASK, DIDT_SQ_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
265 { ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
266
267 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
268 { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
269
270 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
271 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
272 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, POLARIS10_CONFIGREG_DIDT_IND },
273 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
274 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
275 { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
276
277 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
278 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
279 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
280 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, POLARIS10_CONFIGREG_DIDT_IND },
281 { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
282
283 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
284 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, POLARIS10_CONFIGREG_DIDT_IND },
285 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, POLARIS10_CONFIGREG_DIDT_IND },
286 { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
287
288 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
289 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
290 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
291 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
292 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
293 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
294 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
295 { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
296
297 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, POLARIS10_CONFIGREG_DIDT_IND },
298 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
299 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, POLARIS10_CONFIGREG_DIDT_IND },
300 { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, POLARIS10_CONFIGREG_DIDT_IND },
301
302 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, POLARIS10_CONFIGREG_DIDT_IND },
303 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, POLARIS10_CONFIGREG_DIDT_IND },
304 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
305 { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
306
307 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
308 { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
309
310 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
311 { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
312
313 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, POLARIS10_CONFIGREG_DIDT_IND },
314 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_0_MASK, DIDT_TD_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
315 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TD_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x000f, POLARIS10_CONFIGREG_DIDT_IND },
316 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_1_MASK, DIDT_TD_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
317 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TD_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
318 { ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__UNUSED_2_MASK, DIDT_TD_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
319
320 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
321 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
322 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
323 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
324 { ixDIDT_TD_STALL_CTRL, DIDT_TD_STALL_CTRL__UNUSED_0_MASK, DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
325
326 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
327 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
328 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x0dde, POLARIS10_CONFIGREG_DIDT_IND },
329 { ixDIDT_TD_TUNING_CTRL, DIDT_TD_TUNING_CTRL__UNUSED_0_MASK, DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
330
331 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
332 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__USE_REF_CLOCK_MASK, DIDT_TD_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
333 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__PHASE_OFFSET_MASK, DIDT_TD_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
334 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
335 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
336 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
337 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0008, POLARIS10_CONFIGREG_DIDT_IND },
338 { ixDIDT_TD_CTRL0, DIDT_TD_CTRL0__UNUSED_0_MASK, DIDT_TD_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
339
340 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT0_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT0__SHIFT, 0x0004, POLARIS10_CONFIGREG_DIDT_IND },
341 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT1_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT1__SHIFT, 0x0037, POLARIS10_CONFIGREG_DIDT_IND },
342 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT2_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT2__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
343 { ixDIDT_TCP_WEIGHT0_3, DIDT_TCP_WEIGHT0_3__WEIGHT3_MASK, DIDT_TCP_WEIGHT0_3__WEIGHT3__SHIFT, 0x00ff, POLARIS10_CONFIGREG_DIDT_IND },
344
345 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT4_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT4__SHIFT, 0x0054, POLARIS10_CONFIGREG_DIDT_IND },
346 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT5_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT5__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
347 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT6_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
348 { ixDIDT_TCP_WEIGHT4_7, DIDT_TCP_WEIGHT4_7__WEIGHT7_MASK, DIDT_TCP_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
349
350 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MIN_POWER_MASK, DIDT_TCP_CTRL1__MIN_POWER__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
351 { ixDIDT_TCP_CTRL1, DIDT_TCP_CTRL1__MAX_POWER_MASK, DIDT_TCP_CTRL1__MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
352
353 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__UNUSED_0_MASK, DIDT_TCP_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
354 { ixDIDT_TCP_CTRL_OCP, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TCP_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, POLARIS10_CONFIGREG_DIDT_IND },
355
356 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TCP_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
357 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_0_MASK, DIDT_TCP_CTRL2__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
358 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_TCP_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x0032, POLARIS10_CONFIGREG_DIDT_IND },
359 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_1_MASK, DIDT_TCP_CTRL2__UNUSED_1__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
360 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_TCP_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
361 { ixDIDT_TCP_CTRL2, DIDT_TCP_CTRL2__UNUSED_2_MASK, DIDT_TCP_CTRL2__UNUSED_2__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
362
363 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
364 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
365 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
366 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x01aa, POLARIS10_CONFIGREG_DIDT_IND },
367 { ixDIDT_TCP_STALL_CTRL, DIDT_TCP_STALL_CTRL__UNUSED_0_MASK, DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
368
369 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
370 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
371 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3dde, POLARIS10_CONFIGREG_DIDT_IND },
372 { ixDIDT_TCP_TUNING_CTRL, DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK, DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
373
374 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, POLARIS10_CONFIGREG_DIDT_IND },
375 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__USE_REF_CLOCK_MASK, DIDT_TCP_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
376 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__PHASE_OFFSET_MASK, DIDT_TCP_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
377 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CTRL_RST_MASK, DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
378 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
379 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
380 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, POLARIS10_CONFIGREG_DIDT_IND },
381 { ixDIDT_TCP_CTRL0, DIDT_TCP_CTRL0__UNUSED_0_MASK, DIDT_TCP_CTRL0__UNUSED_0__SHIFT, 0x0000, POLARIS10_CONFIGREG_DIDT_IND },
382 { 0xFFFFFFFF }
383};
384
35static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = { 385static const struct polaris10_pt_defaults polaris10_power_tune_data_set_array[POWERTUNE_DEFAULT_SET_MAX] = {
36 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt, 386 /* sviLoadLIneEn, SviLoadLineVddC, TDC_VDDC_ThrottleReleaseLimitPerc, TDC_MAWt,
37 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */ 387 * TdcWaterfallCtl, DTEAmbientTempBase, DisplayCac, BAPM_TEMP_GRADIENT */
@@ -209,6 +559,187 @@ static int polaris10_min_max_vgnb_lpml_id_from_bapm_vddc(struct pp_hwmgr *hwmgr)
209 return 0; 559 return 0;
210} 560}
211 561
562static int polaris10_enable_didt(struct pp_hwmgr *hwmgr, const bool enable)
563{
564
565 uint32_t en = enable ? 1 : 0;
566 int32_t result = 0;
567 uint32_t data;
568
569 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping)) {
570 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0);
571 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
572 data |= ((en << DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK);
573 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_SQ_CTRL0, data);
574 DIDTBlock_Info &= ~SQ_Enable_MASK;
575 DIDTBlock_Info |= en << SQ_Enable_SHIFT;
576 }
577
578 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping)) {
579 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0);
580 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
581 data |= ((en << DIDT_DB_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK);
582 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_DB_CTRL0, data);
583 DIDTBlock_Info &= ~DB_Enable_MASK;
584 DIDTBlock_Info |= en << DB_Enable_SHIFT;
585 }
586
587 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping)) {
588 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0);
589 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
590 data |= ((en << DIDT_TD_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK);
591 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TD_CTRL0, data);
592 DIDTBlock_Info &= ~TD_Enable_MASK;
593 DIDTBlock_Info |= en << TD_Enable_SHIFT;
594 }
595
596 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
597 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0);
598 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
599 data |= ((en << DIDT_TCP_CTRL0__DIDT_CTRL_EN__SHIFT) & DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK);
600 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, ixDIDT_TCP_CTRL0, data);
601 DIDTBlock_Info &= ~TCP_Enable_MASK;
602 DIDTBlock_Info |= en << TCP_Enable_SHIFT;
603 }
604
605 if (enable)
606 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_Didt_Block_Function, DIDTBlock_Info);
607
608 return result;
609}
610
611static int polaris10_program_pt_config_registers(struct pp_hwmgr *hwmgr,
612 struct polaris10_pt_config_reg *cac_config_regs)
613{
614 struct polaris10_pt_config_reg *config_regs = cac_config_regs;
615 uint32_t cache = 0;
616 uint32_t data = 0;
617
618 PP_ASSERT_WITH_CODE((config_regs != NULL), "Invalid config register table.", return -EINVAL);
619
620 while (config_regs->offset != 0xFFFFFFFF) {
621 if (config_regs->type == POLARIS10_CONFIGREG_CACHE)
622 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
623 else {
624 switch (config_regs->type) {
625 case POLARIS10_CONFIGREG_SMC_IND:
626 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset);
627 break;
628
629 case POLARIS10_CONFIGREG_DIDT_IND:
630 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset);
631 break;
632
633 case POLARIS10_CONFIGREG_GC_CAC_IND:
634 data = cgs_read_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset);
635 break;
636
637 default:
638 data = cgs_read_register(hwmgr->device, config_regs->offset);
639 break;
640 }
641
642 data &= ~config_regs->mask;
643 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
644 data |= cache;
645
646 switch (config_regs->type) {
647 case POLARIS10_CONFIGREG_SMC_IND:
648 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, config_regs->offset, data);
649 break;
650
651 case POLARIS10_CONFIGREG_DIDT_IND:
652 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__DIDT, config_regs->offset, data);
653 break;
654
655 case POLARIS10_CONFIGREG_GC_CAC_IND:
656 cgs_write_ind_register(hwmgr->device, CGS_IND_REG_GC_CAC, config_regs->offset, data);
657 break;
658
659 default:
660 cgs_write_register(hwmgr->device, config_regs->offset, data);
661 break;
662 }
663 cache = 0;
664 }
665
666 config_regs++;
667 }
668
669 return 0;
670}
671
672int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr)
673{
674 int result;
675 uint32_t num_se = 0;
676 uint32_t count, value, value2;
677 struct cgs_system_info sys_info = {0};
678
679 sys_info.size = sizeof(struct cgs_system_info);
680 sys_info.info_id = CGS_SYSTEM_INFO_GFX_SE_INFO;
681 result = cgs_query_system_info(hwmgr->device, &sys_info);
682
683
684 if (result == 0)
685 num_se = sys_info.value;
686
687 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
688 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
689 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
690 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
691
692 /* TO DO Pre DIDT disable clock gating */
693 value = 0;
694 value2 = cgs_read_register(hwmgr->device, mmGRBM_GFX_INDEX);
695 for (count = 0; count < num_se; count++) {
696 value = SYS_GRBM_GFX_INDEX_DATA__INSTANCE_BROADCAST_WRITES_MASK
697 | SYS_GRBM_GFX_INDEX_DATA__SH_BROADCAST_WRITES_MASK
698 | (count << SYS_GRBM_GFX_INDEX_DATA__SE_INDEX__SHIFT);
699 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value);
700
701 if (hwmgr->chip_id == CHIP_POLARIS10) {
702 result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris10);
703 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
704 result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
705 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
706 } else if (hwmgr->chip_id == CHIP_POLARIS11) {
707 result = polaris10_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
708 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
709 result = polaris10_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
710 PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
711 }
712 }
713 cgs_write_register(hwmgr->device, mmGRBM_GFX_INDEX, value2);
714
715 result = polaris10_enable_didt(hwmgr, true);
716 PP_ASSERT_WITH_CODE((result == 0), "EnableDiDt failed.", return result);
717
718 /* TO DO Post DIDT enable clock gating */
719 }
720
721 return 0;
722}
723
724int polaris10_disable_didt_config(struct pp_hwmgr *hwmgr)
725{
726 int result;
727
728 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping) ||
729 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping) ||
730 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping) ||
731 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping)) {
732 /* TO DO Pre DIDT disable clock gating */
733
734 result = polaris10_enable_didt(hwmgr, false);
735 PP_ASSERT_WITH_CODE((result == 0), "Post DIDT enable clock gating failed.", return result);
736 /* TO DO Post DIDT enable clock gating */
737 }
738
739 return 0;
740}
741
742
212static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr) 743static int polaris10_populate_bapm_vddc_base_leakage_sidd(struct pp_hwmgr *hwmgr)
213{ 744{
214 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 745 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -312,6 +843,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr)
312 return result; 843 return result;
313} 844}
314 845
846int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr)
847{
848 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
849 int result = 0;
850
851 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
852 PHM_PlatformCaps_CAC) && data->cac_enabled) {
853 int smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
854 (uint16_t)(PPSMC_MSG_DisableCac));
855 PP_ASSERT_WITH_CODE((smc_result == 0),
856 "Failed to disable CAC in SMC.", result = -1);
857
858 data->cac_enabled = false;
859 }
860 return result;
861}
862
315int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) 863int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n)
316{ 864{
317 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); 865 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
@@ -373,6 +921,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr)
373 return result; 921 return result;
374} 922}
375 923
924int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr)
925{
926 struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend);
927 int result = 0;
928
929 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
930 PHM_PlatformCaps_PowerContainment) &&
931 data->power_containment_features) {
932 int smc_result;
933
934 if (data->power_containment_features &
935 POWERCONTAINMENT_FEATURE_TDCLimit) {
936 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
937 (uint16_t)(PPSMC_MSG_TDCLimitDisable));
938 PP_ASSERT_WITH_CODE((smc_result == 0),
939 "Failed to disable TDCLimit in SMC.",
940 result = smc_result);
941 }
942
943 if (data->power_containment_features &
944 POWERCONTAINMENT_FEATURE_DTE) {
945 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
946 (uint16_t)(PPSMC_MSG_DisableDTE));
947 PP_ASSERT_WITH_CODE((smc_result == 0),
948 "Failed to disable DTE in SMC.",
949 result = smc_result);
950 }
951
952 if (data->power_containment_features &
953 POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
954 smc_result = smum_send_msg_to_smc(hwmgr->smumgr,
955 (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable));
956 PP_ASSERT_WITH_CODE((smc_result == 0),
957 "Failed to disable PkgPwrTracking in SMC.",
958 result = smc_result);
959 }
960 data->power_containment_features = 0;
961 }
962
963 return result;
964}
965
376int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) 966int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr)
377{ 967{
378 struct phm_ppt_v1_information *table_info = 968 struct phm_ppt_v1_information *table_info =
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
index 68bc1cb6d40c..bc78e28f010d 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h
@@ -27,15 +27,37 @@ enum polaris10_pt_config_reg_type {
27 POLARIS10_CONFIGREG_MMR = 0, 27 POLARIS10_CONFIGREG_MMR = 0,
28 POLARIS10_CONFIGREG_SMC_IND, 28 POLARIS10_CONFIGREG_SMC_IND,
29 POLARIS10_CONFIGREG_DIDT_IND, 29 POLARIS10_CONFIGREG_DIDT_IND,
30 POLARIS10_CONFIGREG_GC_CAC_IND,
30 POLARIS10_CONFIGREG_CACHE, 31 POLARIS10_CONFIGREG_CACHE,
31 POLARIS10_CONFIGREG_MAX 32 POLARIS10_CONFIGREG_MAX
32}; 33};
33 34
35#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000
36#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12
37#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000
38#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12
39#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000
40#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12
41#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
42#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
43#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
44#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
45#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000
46#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e
47
34/* PowerContainment Features */ 48/* PowerContainment Features */
35#define POWERCONTAINMENT_FEATURE_DTE 0x00000001 49#define POWERCONTAINMENT_FEATURE_DTE 0x00000001
36#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 50#define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002
37#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 51#define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004
38 52
53#define ixGC_CAC_CNTL 0x0000
54#define ixDIDT_SQ_STALL_CTRL 0x0004
55#define ixDIDT_SQ_TUNING_CTRL 0x0005
56#define ixDIDT_TD_STALL_CTRL 0x0044
57#define ixDIDT_TD_TUNING_CTRL 0x0045
58#define ixDIDT_TCP_STALL_CTRL 0x0064
59#define ixDIDT_TCP_TUNING_CTRL 0x0065
60
39struct polaris10_pt_config_reg { 61struct polaris10_pt_config_reg {
40 uint32_t offset; 62 uint32_t offset;
41 uint32_t mask; 63 uint32_t mask;
@@ -62,9 +84,11 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr);
62int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); 84int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr);
63int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); 85int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr);
64int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); 86int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr);
87int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr);
65int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); 88int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr);
89int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr);
66int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); 90int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n);
67int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); 91int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr);
68 92int polaris10_enable_didt_config(struct pp_hwmgr *hwmgr);
69#endif /* POLARIS10_POWERTUNE_H */ 93#endif /* POLARIS10_POWERTUNE_H */
70 94
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
index a3c38bbd1e94..1944d289f846 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c
@@ -66,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise)
66 int result; 66 int result;
67 struct cgs_system_info info = {0}; 67 struct cgs_system_info info = {0};
68 68
69 if( 0 != acpi_atcs_notify_pcie_device_ready(device)) 69 if (acpi_atcs_notify_pcie_device_ready(device))
70 return -EINVAL; 70 return -EINVAL;
71 71
72 info.size = sizeof(struct cgs_system_info); 72 info.size = sizeof(struct cgs_system_info);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
index 90b35c5c10a4..26f3e30d0fef 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c
@@ -179,13 +179,12 @@ int atomctrl_set_engine_dram_timings_rv770(
179 179
180 /* They are both in 10KHz Units. */ 180 /* They are both in 10KHz Units. */
181 engine_clock_parameters.ulTargetEngineClock = 181 engine_clock_parameters.ulTargetEngineClock =
182 (uint32_t) engine_clock & SET_CLOCK_FREQ_MASK; 182 cpu_to_le32((engine_clock & SET_CLOCK_FREQ_MASK) |
183 engine_clock_parameters.ulTargetEngineClock |= 183 ((COMPUTE_ENGINE_PLL_PARAM << 24)));
184 (COMPUTE_ENGINE_PLL_PARAM << 24);
185 184
186 /* in 10 khz units.*/ 185 /* in 10 khz units.*/
187 engine_clock_parameters.sReserved.ulClock = 186 engine_clock_parameters.sReserved.ulClock =
188 (uint32_t) memory_clock & SET_CLOCK_FREQ_MASK; 187 cpu_to_le32(memory_clock & SET_CLOCK_FREQ_MASK);
189 return cgs_atom_exec_cmd_table(hwmgr->device, 188 return cgs_atom_exec_cmd_table(hwmgr->device,
190 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings), 189 GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings),
191 &engine_clock_parameters); 190 &engine_clock_parameters);
@@ -252,7 +251,7 @@ int atomctrl_get_memory_pll_dividers_si(
252 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters; 251 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 mpll_parameters;
253 int result; 252 int result;
254 253
255 mpll_parameters.ulClock = (uint32_t) clock_value; 254 mpll_parameters.ulClock = cpu_to_le32(clock_value);
256 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0); 255 mpll_parameters.ucInputFlag = (uint8_t)((strobe_mode) ? 1 : 0);
257 256
258 result = cgs_atom_exec_cmd_table 257 result = cgs_atom_exec_cmd_table
@@ -262,9 +261,9 @@ int atomctrl_get_memory_pll_dividers_si(
262 261
263 if (0 == result) { 262 if (0 == result) {
264 mpll_param->mpll_fb_divider.clk_frac = 263 mpll_param->mpll_fb_divider.clk_frac =
265 mpll_parameters.ulFbDiv.usFbDivFrac; 264 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDivFrac);
266 mpll_param->mpll_fb_divider.cl_kf = 265 mpll_param->mpll_fb_divider.cl_kf =
267 mpll_parameters.ulFbDiv.usFbDiv; 266 le16_to_cpu(mpll_parameters.ulFbDiv.usFbDiv);
268 mpll_param->mpll_post_divider = 267 mpll_param->mpll_post_divider =
269 (uint32_t)mpll_parameters.ucPostDiv; 268 (uint32_t)mpll_parameters.ucPostDiv;
270 mpll_param->vco_mode = 269 mpll_param->vco_mode =
@@ -300,7 +299,7 @@ int atomctrl_get_memory_pll_dividers_vi(struct pp_hwmgr *hwmgr,
300 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters; 299 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_2 mpll_parameters;
301 int result; 300 int result;
302 301
303 mpll_parameters.ulClock.ulClock = (uint32_t)clock_value; 302 mpll_parameters.ulClock.ulClock = cpu_to_le32(clock_value);
304 303
305 result = cgs_atom_exec_cmd_table(hwmgr->device, 304 result = cgs_atom_exec_cmd_table(hwmgr->device,
306 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam), 305 GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam),
@@ -320,7 +319,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
320 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters; 319 COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 pll_parameters;
321 int result; 320 int result;
322 321
323 pll_parameters.ulClock = clock_value; 322 pll_parameters.ulClock = cpu_to_le32(clock_value);
324 323
325 result = cgs_atom_exec_cmd_table 324 result = cgs_atom_exec_cmd_table
326 (hwmgr->device, 325 (hwmgr->device,
@@ -329,7 +328,7 @@ int atomctrl_get_engine_pll_dividers_kong(struct pp_hwmgr *hwmgr,
329 328
330 if (0 == result) { 329 if (0 == result) {
331 dividers->pll_post_divider = pll_parameters.ucPostDiv; 330 dividers->pll_post_divider = pll_parameters.ucPostDiv;
332 dividers->real_clock = pll_parameters.ulClock; 331 dividers->real_clock = le32_to_cpu(pll_parameters.ulClock);
333 } 332 }
334 333
335 return result; 334 return result;
@@ -343,7 +342,7 @@ int atomctrl_get_engine_pll_dividers_vi(
343 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; 342 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
344 int result; 343 int result;
345 344
346 pll_patameters.ulClock.ulClock = clock_value; 345 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
347 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; 346 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
348 347
349 result = cgs_atom_exec_cmd_table 348 result = cgs_atom_exec_cmd_table
@@ -355,12 +354,12 @@ int atomctrl_get_engine_pll_dividers_vi(
355 dividers->pll_post_divider = 354 dividers->pll_post_divider =
356 pll_patameters.ulClock.ucPostDiv; 355 pll_patameters.ulClock.ucPostDiv;
357 dividers->real_clock = 356 dividers->real_clock =
358 pll_patameters.ulClock.ulClock; 357 le32_to_cpu(pll_patameters.ulClock.ulClock);
359 358
360 dividers->ul_fb_div.ul_fb_div_frac = 359 dividers->ul_fb_div.ul_fb_div_frac =
361 pll_patameters.ulFbDiv.usFbDivFrac; 360 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
362 dividers->ul_fb_div.ul_fb_div = 361 dividers->ul_fb_div.ul_fb_div =
363 pll_patameters.ulFbDiv.usFbDiv; 362 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
364 363
365 dividers->uc_pll_ref_div = 364 dividers->uc_pll_ref_div =
366 pll_patameters.ucPllRefDiv; 365 pll_patameters.ucPllRefDiv;
@@ -380,7 +379,7 @@ int atomctrl_get_engine_pll_dividers_ai(struct pp_hwmgr *hwmgr,
380 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters; 379 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_7 pll_patameters;
381 int result; 380 int result;
382 381
383 pll_patameters.ulClock.ulClock = clock_value; 382 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
384 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK; 383 pll_patameters.ulClock.ucPostDiv = COMPUTE_GPUCLK_INPUT_FLAG_SCLK;
385 384
386 result = cgs_atom_exec_cmd_table 385 result = cgs_atom_exec_cmd_table
@@ -412,7 +411,7 @@ int atomctrl_get_dfs_pll_dividers_vi(
412 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters; 411 COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 pll_patameters;
413 int result; 412 int result;
414 413
415 pll_patameters.ulClock.ulClock = clock_value; 414 pll_patameters.ulClock.ulClock = cpu_to_le32(clock_value);
416 pll_patameters.ulClock.ucPostDiv = 415 pll_patameters.ulClock.ucPostDiv =
417 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK; 416 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK;
418 417
@@ -425,12 +424,12 @@ int atomctrl_get_dfs_pll_dividers_vi(
425 dividers->pll_post_divider = 424 dividers->pll_post_divider =
426 pll_patameters.ulClock.ucPostDiv; 425 pll_patameters.ulClock.ucPostDiv;
427 dividers->real_clock = 426 dividers->real_clock =
428 pll_patameters.ulClock.ulClock; 427 le32_to_cpu(pll_patameters.ulClock.ulClock);
429 428
430 dividers->ul_fb_div.ul_fb_div_frac = 429 dividers->ul_fb_div.ul_fb_div_frac =
431 pll_patameters.ulFbDiv.usFbDivFrac; 430 le16_to_cpu(pll_patameters.ulFbDiv.usFbDivFrac);
432 dividers->ul_fb_div.ul_fb_div = 431 dividers->ul_fb_div.ul_fb_div =
433 pll_patameters.ulFbDiv.usFbDiv; 432 le16_to_cpu(pll_patameters.ulFbDiv.usFbDiv);
434 433
435 dividers->uc_pll_ref_div = 434 dividers->uc_pll_ref_div =
436 pll_patameters.ucPllRefDiv; 435 pll_patameters.ucPllRefDiv;
@@ -519,13 +518,13 @@ int atomctrl_get_voltage_table_v3(
519 518
520 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) { 519 for (i = 0; i < voltage_object->asGpioVoltageObj.ucGpioEntryNum; i++) {
521 voltage_table->entries[i].value = 520 voltage_table->entries[i].value =
522 voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue; 521 le16_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].usVoltageValue);
523 voltage_table->entries[i].smio_low = 522 voltage_table->entries[i].smio_low =
524 voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId; 523 le32_to_cpu(voltage_object->asGpioVoltageObj.asVolGpioLut[i].ulVoltageId);
525 } 524 }
526 525
527 voltage_table->mask_low = 526 voltage_table->mask_low =
528 voltage_object->asGpioVoltageObj.ulGpioMaskVal; 527 le32_to_cpu(voltage_object->asGpioVoltageObj.ulGpioMaskVal);
529 voltage_table->count = 528 voltage_table->count =
530 voltage_object->asGpioVoltageObj.ucGpioEntryNum; 529 voltage_object->asGpioVoltageObj.ucGpioEntryNum;
531 voltage_table->phase_delay = 530 voltage_table->phase_delay =
@@ -552,13 +551,13 @@ static bool atomctrl_lookup_gpio_pin(
552 pin_assignment->ucGpioPinBitShift; 551 pin_assignment->ucGpioPinBitShift;
553 gpio_pin_assignment->us_gpio_pin_aindex = 552 gpio_pin_assignment->us_gpio_pin_aindex =
554 le16_to_cpu(pin_assignment->usGpioPin_AIndex); 553 le16_to_cpu(pin_assignment->usGpioPin_AIndex);
555 return false; 554 return true;
556 } 555 }
557 556
558 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; 557 offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1;
559 } 558 }
560 559
561 return true; 560 return false;
562} 561}
563 562
564/** 563/**
@@ -592,12 +591,12 @@ bool atomctrl_get_pp_assign_pin(
592 const uint32_t pinId, 591 const uint32_t pinId,
593 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment) 592 pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment)
594{ 593{
595 bool bRet = 0; 594 bool bRet = false;
596 ATOM_GPIO_PIN_LUT *gpio_lookup_table = 595 ATOM_GPIO_PIN_LUT *gpio_lookup_table =
597 get_gpio_lookup_table(hwmgr->device); 596 get_gpio_lookup_table(hwmgr->device);
598 597
599 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table), 598 PP_ASSERT_WITH_CODE((NULL != gpio_lookup_table),
600 "Could not find GPIO lookup Table in BIOS.", return -1); 599 "Could not find GPIO lookup Table in BIOS.", return false);
601 600
602 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId, 601 bRet = atomctrl_lookup_gpio_pin(gpio_lookup_table, pinId,
603 gpio_pin_assignment); 602 gpio_pin_assignment);
@@ -650,8 +649,8 @@ int atomctrl_calculate_voltage_evv_on_sclk(
650 return -1; 649 return -1;
651 650
652 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 || 651 if (getASICProfilingInfo->asHeader.ucTableFormatRevision < 3 ||
653 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 && 652 (getASICProfilingInfo->asHeader.ucTableFormatRevision == 3 &&
654 getASICProfilingInfo->asHeader.ucTableContentRevision < 4)) 653 getASICProfilingInfo->asHeader.ucTableContentRevision < 4))
655 return -1; 654 return -1;
656 655
657 /*----------------------------------------------------------- 656 /*-----------------------------------------------------------
@@ -662,37 +661,37 @@ int atomctrl_calculate_voltage_evv_on_sclk(
662 661
663 switch (dpm_level) { 662 switch (dpm_level) {
664 case 1: 663 case 1:
665 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm1); 664 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm1));
666 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM1, 1000); 665 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM1), 1000);
667 break; 666 break;
668 case 2: 667 case 2:
669 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm2); 668 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm2));
670 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM2, 1000); 669 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM2), 1000);
671 break; 670 break;
672 case 3: 671 case 3:
673 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm3); 672 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm3));
674 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM3, 1000); 673 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM3), 1000);
675 break; 674 break;
676 case 4: 675 case 4:
677 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm4); 676 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm4));
678 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM4, 1000); 677 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM4), 1000);
679 break; 678 break;
680 case 5: 679 case 5:
681 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm5); 680 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm5));
682 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM5, 1000); 681 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM5), 1000);
683 break; 682 break;
684 case 6: 683 case 6:
685 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm6); 684 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm6));
686 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM6, 1000); 685 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM6), 1000);
687 break; 686 break;
688 case 7: 687 case 7:
689 fPowerDPMx = Convert_ULONG_ToFraction(getASICProfilingInfo->usPowerDpm7); 688 fPowerDPMx = Convert_ULONG_ToFraction(le16_to_cpu(getASICProfilingInfo->usPowerDpm7));
690 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM7, 1000); 689 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM7), 1000);
691 break; 690 break;
692 default: 691 default:
693 printk(KERN_ERR "DPM Level not supported\n"); 692 printk(KERN_ERR "DPM Level not supported\n");
694 fPowerDPMx = Convert_ULONG_ToFraction(1); 693 fPowerDPMx = Convert_ULONG_ToFraction(1);
695 fDerateTDP = GetScaledFraction(getASICProfilingInfo->ulTdpDerateDPM0, 1000); 694 fDerateTDP = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulTdpDerateDPM0), 1000);
696 } 695 }
697 696
698 /*------------------------- 697 /*-------------------------
@@ -716,9 +715,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
716 return result; 715 return result;
717 716
718 /* Finally, the actual fuse value */ 717 /* Finally, the actual fuse value */
719 ul_RO_fused = sOutput_FuseValues.ulEfuseValue; 718 ul_RO_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
720 fMin = GetScaledFraction(sRO_fuse.ulEfuseMin, 1); 719 fMin = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseMin), 1);
721 fRange = GetScaledFraction(sRO_fuse.ulEfuseEncodeRange, 1); 720 fRange = GetScaledFraction(le32_to_cpu(sRO_fuse.ulEfuseEncodeRange), 1);
722 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength); 721 fRO_fused = fDecodeLinearFuse(ul_RO_fused, fMin, fRange, sRO_fuse.ucEfuseLength);
723 722
724 sCACm_fuse = getASICProfilingInfo->sCACm; 723 sCACm_fuse = getASICProfilingInfo->sCACm;
@@ -736,9 +735,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
736 if (result) 735 if (result)
737 return result; 736 return result;
738 737
739 ul_CACm_fused = sOutput_FuseValues.ulEfuseValue; 738 ul_CACm_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
740 fMin = GetScaledFraction(sCACm_fuse.ulEfuseMin, 1000); 739 fMin = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseMin), 1000);
741 fRange = GetScaledFraction(sCACm_fuse.ulEfuseEncodeRange, 1000); 740 fRange = GetScaledFraction(le32_to_cpu(sCACm_fuse.ulEfuseEncodeRange), 1000);
742 741
743 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength); 742 fCACm_fused = fDecodeLinearFuse(ul_CACm_fused, fMin, fRange, sCACm_fuse.ucEfuseLength);
744 743
@@ -756,9 +755,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
756 if (result) 755 if (result)
757 return result; 756 return result;
758 757
759 ul_CACb_fused = sOutput_FuseValues.ulEfuseValue; 758 ul_CACb_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
760 fMin = GetScaledFraction(sCACb_fuse.ulEfuseMin, 1000); 759 fMin = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseMin), 1000);
761 fRange = GetScaledFraction(sCACb_fuse.ulEfuseEncodeRange, 1000); 760 fRange = GetScaledFraction(le32_to_cpu(sCACb_fuse.ulEfuseEncodeRange), 1000);
762 761
763 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength); 762 fCACb_fused = fDecodeLinearFuse(ul_CACb_fused, fMin, fRange, sCACb_fuse.ucEfuseLength);
764 763
@@ -777,9 +776,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
777 if (result) 776 if (result)
778 return result; 777 return result;
779 778
780 ul_Kt_Beta_fused = sOutput_FuseValues.ulEfuseValue; 779 ul_Kt_Beta_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
781 fAverage = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeAverage, 1000); 780 fAverage = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeAverage), 1000);
782 fRange = GetScaledFraction(sKt_Beta_fuse.ulEfuseEncodeRange, 1000); 781 fRange = GetScaledFraction(le32_to_cpu(sKt_Beta_fuse.ulEfuseEncodeRange), 1000);
783 782
784 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused, 783 fKt_Beta_fused = fDecodeLogisticFuse(ul_Kt_Beta_fused,
785 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength); 784 fAverage, fRange, sKt_Beta_fuse.ucEfuseLength);
@@ -798,9 +797,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
798 if (result) 797 if (result)
799 return result; 798 return result;
800 799
801 ul_Kv_m_fused = sOutput_FuseValues.ulEfuseValue; 800 ul_Kv_m_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
802 fAverage = GetScaledFraction(sKv_m_fuse.ulEfuseEncodeAverage, 1000); 801 fAverage = GetScaledFraction(le32_to_cpu(sKv_m_fuse.ulEfuseEncodeAverage), 1000);
803 fRange = GetScaledFraction((sKv_m_fuse.ulEfuseEncodeRange & 0x7fffffff), 1000); 802 fRange = GetScaledFraction((le32_to_cpu(sKv_m_fuse.ulEfuseEncodeRange) & 0x7fffffff), 1000);
804 fRange = fMultiply(fRange, ConvertToFraction(-1)); 803 fRange = fMultiply(fRange, ConvertToFraction(-1));
805 804
806 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused, 805 fKv_m_fused = fDecodeLogisticFuse(ul_Kv_m_fused,
@@ -820,9 +819,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
820 if (result) 819 if (result)
821 return result; 820 return result;
822 821
823 ul_Kv_b_fused = sOutput_FuseValues.ulEfuseValue; 822 ul_Kv_b_fused = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
824 fAverage = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeAverage, 1000); 823 fAverage = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeAverage), 1000);
825 fRange = GetScaledFraction(sKv_b_fuse.ulEfuseEncodeRange, 1000); 824 fRange = GetScaledFraction(le32_to_cpu(sKv_b_fuse.ulEfuseEncodeRange), 1000);
826 825
827 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused, 826 fKv_b_fused = fDecodeLogisticFuse(ul_Kv_b_fused,
828 fAverage, fRange, sKv_b_fuse.ucEfuseLength); 827 fAverage, fRange, sKv_b_fuse.ucEfuseLength);
@@ -851,9 +850,9 @@ int atomctrl_calculate_voltage_evv_on_sclk(
851 if (result) 850 if (result)
852 return result; 851 return result;
853 852
854 ul_FT_Lkg_V0NORM = sOutput_FuseValues.ulEfuseValue; 853 ul_FT_Lkg_V0NORM = le32_to_cpu(sOutput_FuseValues.ulEfuseValue);
855 fLn_MaxDivMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin, 10000); 854 fLn_MaxDivMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeLn_MaxDivMin), 10000);
856 fMin = GetScaledFraction(getASICProfilingInfo->ulLkgEncodeMin, 10000); 855 fMin = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLkgEncodeMin), 10000);
857 856
858 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM, 857 fFT_Lkg_V0NORM = fDecodeLeakageID(ul_FT_Lkg_V0NORM,
859 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength); 858 fLn_MaxDivMin, fMin, getASICProfilingInfo->ucLkgEfuseLength);
@@ -863,40 +862,40 @@ int atomctrl_calculate_voltage_evv_on_sclk(
863 * PART 2 - Grabbing all required values 862 * PART 2 - Grabbing all required values
864 *------------------------------------------- 863 *-------------------------------------------
865 */ 864 */
866 fSM_A0 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A0, 1000000), 865 fSM_A0 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A0), 1000000),
867 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign))); 866 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A0_sign)));
868 fSM_A1 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A1, 1000000), 867 fSM_A1 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A1), 1000000),
869 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign))); 868 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A1_sign)));
870 fSM_A2 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A2, 100000), 869 fSM_A2 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A2), 100000),
871 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign))); 870 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A2_sign)));
872 fSM_A3 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A3, 1000000), 871 fSM_A3 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A3), 1000000),
873 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign))); 872 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A3_sign)));
874 fSM_A4 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A4, 1000000), 873 fSM_A4 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A4), 1000000),
875 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign))); 874 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A4_sign)));
876 fSM_A5 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A5, 1000), 875 fSM_A5 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A5), 1000),
877 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign))); 876 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A5_sign)));
878 fSM_A6 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A6, 1000), 877 fSM_A6 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A6), 1000),
879 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign))); 878 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A6_sign)));
880 fSM_A7 = fMultiply(GetScaledFraction(getASICProfilingInfo->ulSM_A7, 1000), 879 fSM_A7 = fMultiply(GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulSM_A7), 1000),
881 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign))); 880 ConvertToFraction(uPow(-1, getASICProfilingInfo->ucSM_A7_sign)));
882 881
883 fMargin_RO_a = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_a); 882 fMargin_RO_a = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_a));
884 fMargin_RO_b = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_b); 883 fMargin_RO_b = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_b));
885 fMargin_RO_c = ConvertToFraction(getASICProfilingInfo->ulMargin_RO_c); 884 fMargin_RO_c = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_RO_c));
886 885
887 fMargin_fixed = ConvertToFraction(getASICProfilingInfo->ulMargin_fixed); 886 fMargin_fixed = ConvertToFraction(le32_to_cpu(getASICProfilingInfo->ulMargin_fixed));
888 887
889 fMargin_FMAX_mean = GetScaledFraction( 888 fMargin_FMAX_mean = GetScaledFraction(
890 getASICProfilingInfo->ulMargin_Fmax_mean, 10000); 889 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_mean), 10000);
891 fMargin_Plat_mean = GetScaledFraction( 890 fMargin_Plat_mean = GetScaledFraction(
892 getASICProfilingInfo->ulMargin_plat_mean, 10000); 891 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_mean), 10000);
893 fMargin_FMAX_sigma = GetScaledFraction( 892 fMargin_FMAX_sigma = GetScaledFraction(
894 getASICProfilingInfo->ulMargin_Fmax_sigma, 10000); 893 le32_to_cpu(getASICProfilingInfo->ulMargin_Fmax_sigma), 10000);
895 fMargin_Plat_sigma = GetScaledFraction( 894 fMargin_Plat_sigma = GetScaledFraction(
896 getASICProfilingInfo->ulMargin_plat_sigma, 10000); 895 le32_to_cpu(getASICProfilingInfo->ulMargin_plat_sigma), 10000);
897 896
898 fMargin_DC_sigma = GetScaledFraction( 897 fMargin_DC_sigma = GetScaledFraction(
899 getASICProfilingInfo->ulMargin_DC_sigma, 100); 898 le32_to_cpu(getASICProfilingInfo->ulMargin_DC_sigma), 100);
900 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000)); 899 fMargin_DC_sigma = fDivide(fMargin_DC_sigma, ConvertToFraction(1000));
901 900
902 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100)); 901 fCACm_fused = fDivide(fCACm_fused, ConvertToFraction(100));
@@ -908,14 +907,14 @@ int atomctrl_calculate_voltage_evv_on_sclk(
908 fSclk = GetScaledFraction(sclk, 100); 907 fSclk = GetScaledFraction(sclk, 100);
909 908
910 fV_max = fDivide(GetScaledFraction( 909 fV_max = fDivide(GetScaledFraction(
911 getASICProfilingInfo->ulMaxVddc, 1000), ConvertToFraction(4)); 910 le32_to_cpu(getASICProfilingInfo->ulMaxVddc), 1000), ConvertToFraction(4));
912 fT_prod = GetScaledFraction(getASICProfilingInfo->ulBoardCoreTemp, 10); 911 fT_prod = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulBoardCoreTemp), 10);
913 fLKG_Factor = GetScaledFraction(getASICProfilingInfo->ulEvvLkgFactor, 100); 912 fLKG_Factor = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulEvvLkgFactor), 100);
914 fT_FT = GetScaledFraction(getASICProfilingInfo->ulLeakageTemp, 10); 913 fT_FT = GetScaledFraction(le32_to_cpu(getASICProfilingInfo->ulLeakageTemp), 10);
915 fV_FT = fDivide(GetScaledFraction( 914 fV_FT = fDivide(GetScaledFraction(
916 getASICProfilingInfo->ulLeakageVoltage, 1000), ConvertToFraction(4)); 915 le32_to_cpu(getASICProfilingInfo->ulLeakageVoltage), 1000), ConvertToFraction(4));
917 fV_min = fDivide(GetScaledFraction( 916 fV_min = fDivide(GetScaledFraction(
918 getASICProfilingInfo->ulMinVddc, 1000), ConvertToFraction(4)); 917 le32_to_cpu(getASICProfilingInfo->ulMinVddc), 1000), ConvertToFraction(4));
919 918
920 /*----------------------- 919 /*-----------------------
921 * PART 3 920 * PART 3
@@ -925,7 +924,7 @@ int atomctrl_calculate_voltage_evv_on_sclk(
925 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5)); 924 fA_Term = fAdd(fMargin_RO_a, fAdd(fMultiply(fSM_A4, fSclk), fSM_A5));
926 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b); 925 fB_Term = fAdd(fAdd(fMultiply(fSM_A2, fSclk), fSM_A6), fMargin_RO_b);
927 fC_Term = fAdd(fMargin_RO_c, 926 fC_Term = fAdd(fMargin_RO_c,
928 fAdd(fMultiply(fSM_A0,fLkg_FT), 927 fAdd(fMultiply(fSM_A0, fLkg_FT),
929 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)), 928 fAdd(fMultiply(fSM_A1, fMultiply(fLkg_FT, fSclk)),
930 fAdd(fMultiply(fSM_A3, fSclk), 929 fAdd(fMultiply(fSM_A3, fSclk),
931 fSubtract(fSM_A7, fRO_fused))))); 930 fSubtract(fSM_A7, fRO_fused)))));
@@ -1063,9 +1062,55 @@ int atomctrl_get_voltage_evv_on_sclk(
1063 get_voltage_info_param_space.ucVoltageMode = 1062 get_voltage_info_param_space.ucVoltageMode =
1064 ATOM_GET_VOLTAGE_EVV_VOLTAGE; 1063 ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1065 get_voltage_info_param_space.usVoltageLevel = 1064 get_voltage_info_param_space.usVoltageLevel =
1066 virtual_voltage_Id; 1065 cpu_to_le16(virtual_voltage_Id);
1067 get_voltage_info_param_space.ulSCLKFreq = 1066 get_voltage_info_param_space.ulSCLKFreq =
1068 sclk; 1067 cpu_to_le32(sclk);
1068
1069 result = cgs_atom_exec_cmd_table(hwmgr->device,
1070 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
1071 &get_voltage_info_param_space);
1072
1073 if (0 != result)
1074 return result;
1075
1076 *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1077 (&get_voltage_info_param_space))->usVoltageLevel);
1078
1079 return result;
1080}
1081
1082/**
1083 * atomctrl_get_voltage_evv gets voltage via call to ATOM COMMAND table.
1084 * @param hwmgr input: pointer to hwManager
1085 * @param virtual_voltage_id input: voltage id which match per voltage DPM state: 0xff01, 0xff02.. 0xff08
1086 * @param voltage output: real voltage level in unit of mv
1087 */
1088int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr,
1089 uint16_t virtual_voltage_id,
1090 uint16_t *voltage)
1091{
1092 int result;
1093 int entry_id;
1094 GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 get_voltage_info_param_space;
1095
1096 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */
1097 for (entry_id = 0; entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count; entry_id++) {
1098 if (hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].v == virtual_voltage_id) {
1099 /* found */
1100 break;
1101 }
1102 }
1103
1104 PP_ASSERT_WITH_CODE(entry_id < hwmgr->dyn_state.vddc_dependency_on_sclk->count,
1105 "Can't find requested voltage id in vddc_dependency_on_sclk table!",
1106 return -EINVAL;
1107 );
1108
1109 get_voltage_info_param_space.ucVoltageType = VOLTAGE_TYPE_VDDC;
1110 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1111 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_id;
1112 get_voltage_info_param_space.ulSCLKFreq =
1113 cpu_to_le32(hwmgr->dyn_state.vddc_dependency_on_sclk->entries[entry_id].clk);
1069 1114
1070 result = cgs_atom_exec_cmd_table(hwmgr->device, 1115 result = cgs_atom_exec_cmd_table(hwmgr->device,
1071 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1116 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1074,8 +1119,8 @@ int atomctrl_get_voltage_evv_on_sclk(
1074 if (0 != result) 1119 if (0 != result)
1075 return result; 1120 return result;
1076 1121
1077 *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *) 1122 *voltage = le16_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 *)
1078 (&get_voltage_info_param_space))->usVoltageLevel; 1123 (&get_voltage_info_param_space))->usVoltageLevel);
1079 1124
1080 return result; 1125 return result;
1081} 1126}
@@ -1165,8 +1210,8 @@ static int asic_internal_ss_get_ss_asignment(struct pp_hwmgr *hwmgr,
1165 1210
1166 if (entry_found) { 1211 if (entry_found) {
1167 ssEntry->speed_spectrum_percentage = 1212 ssEntry->speed_spectrum_percentage =
1168 ssInfo->usSpreadSpectrumPercentage; 1213 le16_to_cpu(ssInfo->usSpreadSpectrumPercentage);
1169 ssEntry->speed_spectrum_rate = ssInfo->usSpreadRateInKhz; 1214 ssEntry->speed_spectrum_rate = le16_to_cpu(ssInfo->usSpreadRateInKhz);
1170 1215
1171 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) && 1216 if (((GET_DATA_TABLE_MAJOR_REVISION(table) == 2) &&
1172 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) || 1217 (GET_DATA_TABLE_MINOR_REVISION(table) >= 2)) ||
@@ -1222,7 +1267,7 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
1222 int result; 1267 int result;
1223 READ_EFUSE_VALUE_PARAMETER efuse_param; 1268 READ_EFUSE_VALUE_PARAMETER efuse_param;
1224 1269
1225 efuse_param.sEfuse.usEfuseIndex = (start_index / 32) * 4; 1270 efuse_param.sEfuse.usEfuseIndex = cpu_to_le16((start_index / 32) * 4);
1226 efuse_param.sEfuse.ucBitShift = (uint8_t) 1271 efuse_param.sEfuse.ucBitShift = (uint8_t)
1227 (start_index - ((start_index / 32) * 32)); 1272 (start_index - ((start_index / 32) * 32));
1228 efuse_param.sEfuse.ucBitLength = (uint8_t) 1273 efuse_param.sEfuse.ucBitLength = (uint8_t)
@@ -1232,19 +1277,21 @@ int atomctrl_read_efuse(void *device, uint16_t start_index,
1232 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue), 1277 GetIndexIntoMasterTable(COMMAND, ReadEfuseValue),
1233 &efuse_param); 1278 &efuse_param);
1234 if (!result) 1279 if (!result)
1235 *efuse = efuse_param.ulEfuseValue & mask; 1280 *efuse = le32_to_cpu(efuse_param.ulEfuseValue) & mask;
1236 1281
1237 return result; 1282 return result;
1238} 1283}
1239 1284
1240int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock, 1285int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clock,
1241 uint8_t level) 1286 uint8_t level)
1242{ 1287{
1243 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters; 1288 DYNAMICE_MEMORY_SETTINGS_PARAMETER_V2_1 memory_clock_parameters;
1244 int result; 1289 int result;
1245 1290
1246 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq = memory_clock & SET_CLOCK_FREQ_MASK; 1291 memory_clock_parameters.asDPMMCReg.ulClock.ulClockFreq =
1247 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag = ADJUST_MC_SETTING_PARAM; 1292 memory_clock & SET_CLOCK_FREQ_MASK;
1293 memory_clock_parameters.asDPMMCReg.ulClock.ulComputeClockFlag =
1294 ADJUST_MC_SETTING_PARAM;
1248 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level; 1295 memory_clock_parameters.asDPMMCReg.ucMclkDPMState = level;
1249 1296
1250 result = cgs_atom_exec_cmd_table 1297 result = cgs_atom_exec_cmd_table
@@ -1264,8 +1311,8 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
1264 1311
1265 get_voltage_info_param_space.ucVoltageType = voltage_type; 1312 get_voltage_info_param_space.ucVoltageType = voltage_type;
1266 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE; 1313 get_voltage_info_param_space.ucVoltageMode = ATOM_GET_VOLTAGE_EVV_VOLTAGE;
1267 get_voltage_info_param_space.usVoltageLevel = virtual_voltage_Id; 1314 get_voltage_info_param_space.usVoltageLevel = cpu_to_le16(virtual_voltage_Id);
1268 get_voltage_info_param_space.ulSCLKFreq = sclk; 1315 get_voltage_info_param_space.ulSCLKFreq = cpu_to_le32(sclk);
1269 1316
1270 result = cgs_atom_exec_cmd_table(hwmgr->device, 1317 result = cgs_atom_exec_cmd_table(hwmgr->device,
1271 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo), 1318 GetIndexIntoMasterTable(COMMAND, GetVoltageInfo),
@@ -1274,7 +1321,7 @@ int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_
1274 if (0 != result) 1321 if (0 != result)
1275 return result; 1322 return result;
1276 1323
1277 *voltage = ((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel; 1324 *voltage = le32_to_cpu(((GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_3 *)(&get_voltage_info_param_space))->ulVoltageLevel);
1278 1325
1279 return result; 1326 return result;
1280} 1327}
@@ -1295,15 +1342,19 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr
1295 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) { 1342 for (i = 0; i < psmu_info->ucSclkEntryNum; i++) {
1296 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting; 1343 table->entry[i].ucVco_setting = psmu_info->asSclkFcwRangeEntry[i].ucVco_setting;
1297 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv; 1344 table->entry[i].ucPostdiv = psmu_info->asSclkFcwRangeEntry[i].ucPostdiv;
1298 table->entry[i].usFcw_pcc = psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc; 1345 table->entry[i].usFcw_pcc =
1299 table->entry[i].usFcw_trans_upper = psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper; 1346 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_pcc);
1300 table->entry[i].usRcw_trans_lower = psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower; 1347 table->entry[i].usFcw_trans_upper =
1348 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucFcw_trans_upper);
1349 table->entry[i].usRcw_trans_lower =
1350 le16_to_cpu(psmu_info->asSclkFcwRangeEntry[i].ucRcw_trans_lower);
1301 } 1351 }
1302 1352
1303 return 0; 1353 return 0;
1304} 1354}
1305 1355
1306int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) 1356int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr,
1357 struct pp_atom_ctrl__avfs_parameters *param)
1307{ 1358{
1308 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; 1359 ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL;
1309 1360
@@ -1317,30 +1368,30 @@ int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__a
1317 if (!profile) 1368 if (!profile)
1318 return -1; 1369 return -1;
1319 1370
1320 param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; 1371 param->ulAVFS_meanNsigma_Acontant0 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant0);
1321 param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; 1372 param->ulAVFS_meanNsigma_Acontant1 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant1);
1322 param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; 1373 param->ulAVFS_meanNsigma_Acontant2 = le32_to_cpu(profile->ulAVFS_meanNsigma_Acontant2);
1323 param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; 1374 param->usAVFS_meanNsigma_DC_tol_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_DC_tol_sigma);
1324 param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; 1375 param->usAVFS_meanNsigma_Platform_mean = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_mean);
1325 param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; 1376 param->usAVFS_meanNsigma_Platform_sigma = le16_to_cpu(profile->usAVFS_meanNsigma_Platform_sigma);
1326 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; 1377 param->ulGB_VDROOP_TABLE_CKSOFF_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a0);
1327 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; 1378 param->ulGB_VDROOP_TABLE_CKSOFF_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a1);
1328 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; 1379 param->ulGB_VDROOP_TABLE_CKSOFF_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSOFF_a2);
1329 param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; 1380 param->ulGB_VDROOP_TABLE_CKSON_a0 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a0);
1330 param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; 1381 param->ulGB_VDROOP_TABLE_CKSON_a1 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a1);
1331 param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; 1382 param->ulGB_VDROOP_TABLE_CKSON_a2 = le32_to_cpu(profile->ulGB_VDROOP_TABLE_CKSON_a2);
1332 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; 1383 param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1);
1333 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; 1384 param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2);
1334 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; 1385 param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b);
1335 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; 1386 param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_m1);
1336 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; 1387 param->usAVFSGB_FUSE_TABLE_CKSON_m2 = le16_to_cpu(profile->usAVFSGB_FUSE_TABLE_CKSON_m2);
1337 param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; 1388 param->ulAVFSGB_FUSE_TABLE_CKSON_b = le32_to_cpu(profile->ulAVFSGB_FUSE_TABLE_CKSON_b);
1338 param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; 1389 param->usMaxVoltage_0_25mv = le16_to_cpu(profile->usMaxVoltage_0_25mv);
1339 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; 1390 param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF;
1340 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; 1391 param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON;
1341 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; 1392 param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF;
1342 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; 1393 param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON;
1343 param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; 1394 param->usPSM_Age_ComFactor = le16_to_cpu(profile->usPSM_Age_ComFactor);
1344 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; 1395 param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage;
1345 1396
1346 return 0; 1397 return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
index 1e35a9625baf..fc898afce002 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h
@@ -281,6 +281,7 @@ struct pp_atom_ctrl__avfs_parameters {
281 281
282extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); 282extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment);
283extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); 283extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage);
284extern int atomctrl_get_voltage_evv(struct pp_hwmgr *hwmgr, uint16_t virtual_voltage_id, uint16_t *voltage);
284extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); 285extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr);
285extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo); 286extern int atomctrl_get_memory_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t memory_clock, pp_atomctrl_internal_ss_info *ssInfo);
286extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo); 287extern int atomctrl_get_engine_clock_spread_spectrum(struct pp_hwmgr *hwmgr, const uint32_t engine_clock, pp_atomctrl_internal_ss_info *ssInfo);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
index 009bd5963ed8..8f50a038396c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppevvmath.h
@@ -50,55 +50,45 @@ typedef union _fInt {
50 * Function Declarations 50 * Function Declarations
51 * ------------------------------------------------------------------------------- 51 * -------------------------------------------------------------------------------
52 */ 52 */
53fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */ 53static fInt ConvertToFraction(int); /* Use this to convert an INT to a FINT */
54fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */ 54static fInt Convert_ULONG_ToFraction(uint32_t); /* Use this to convert an uint32_t to a FINT */
55fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */ 55static fInt GetScaledFraction(int, int); /* Use this to convert an INT to a FINT after scaling it by a factor */
56int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */ 56static int ConvertBackToInteger(fInt); /* Convert a FINT back to an INT that is scaled by 1000 (i.e. last 3 digits are the decimal digits) */
57 57
58fInt fNegate(fInt); /* Returns -1 * input fInt value */ 58static fInt fNegate(fInt); /* Returns -1 * input fInt value */
59fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */ 59static fInt fAdd (fInt, fInt); /* Returns the sum of two fInt numbers */
60fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */ 60static fInt fSubtract (fInt A, fInt B); /* Returns A-B - Sometimes easier than Adding negative numbers */
61fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */ 61static fInt fMultiply (fInt, fInt); /* Returns the product of two fInt numbers */
62fInt fDivide (fInt A, fInt B); /* Returns A/B */ 62static fInt fDivide (fInt A, fInt B); /* Returns A/B */
63fInt fGetSquare(fInt); /* Returns the square of a fInt number */ 63static fInt fGetSquare(fInt); /* Returns the square of a fInt number */
64fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */ 64static fInt fSqrt(fInt); /* Returns the Square Root of a fInt number */
65 65
66int uAbs(int); /* Returns the Absolute value of the Int */ 66static int uAbs(int); /* Returns the Absolute value of the Int */
67fInt fAbs(fInt); /* Returns the Absolute value of the fInt */ 67static int uPow(int base, int exponent); /* Returns base^exponent an INT */
68int uPow(int base, int exponent); /* Returns base^exponent an INT */ 68
69 69static void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */
70void SolveQuadracticEqn(fInt, fInt, fInt, fInt[]); /* Returns the 2 roots via the array */ 70static bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */
71bool Equal(fInt, fInt); /* Returns true if two fInts are equal to each other */ 71static bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */
72bool GreaterThan(fInt A, fInt B); /* Returns true if A > B */ 72
73 73static fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */
74fInt fExponential(fInt exponent); /* Can be used to calculate e^exponent */ 74static fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
75fInt fNaturalLog(fInt value); /* Can be used to calculate ln(value) */
76 75
77/* Fuse decoding functions 76/* Fuse decoding functions
78 * ------------------------------------------------------------------------------------- 77 * -------------------------------------------------------------------------------------
79 */ 78 */
80fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength); 79static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength);
81fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength); 80static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength);
82fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength); 81static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength);
83 82
84/* Internal Support Functions - Use these ONLY for testing or adding to internal functions 83/* Internal Support Functions - Use these ONLY for testing or adding to internal functions
85 * ------------------------------------------------------------------------------------- 84 * -------------------------------------------------------------------------------------
86 * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons. 85 * Some of the following functions take two INTs as their input - This is unsafe for a variety of reasons.
87 */ 86 */
88fInt Add (int, int); /* Add two INTs and return Sum as FINT */ 87static fInt Divide (int, int); /* Divide two INTs and return result as FINT */
89fInt Multiply (int, int); /* Multiply two INTs and return Product as FINT */ 88static fInt fNegate(fInt);
90fInt Divide (int, int); /* You get the idea... */
91fInt fNegate(fInt);
92 89
93int uGetScaledDecimal (fInt); /* Internal function */ 90static int uGetScaledDecimal (fInt); /* Internal function */
94int GetReal (fInt A); /* Internal function */ 91static int GetReal (fInt A); /* Internal function */
95
96/* Future Additions and Incomplete Functions
97 * -------------------------------------------------------------------------------------
98 */
99int GetRoundedValue(fInt); /* Incomplete function - Useful only when Precision is lacking */
100 /* Let us say we have 2.126 but can only handle 2 decimal points. We could */
101 /* either chop of 6 and keep 2.12 or use this function to get 2.13, which is more accurate */
102 92
103/* ------------------------------------------------------------------------------------- 93/* -------------------------------------------------------------------------------------
104 * TROUBLESHOOTING INFORMATION 94 * TROUBLESHOOTING INFORMATION
@@ -115,7 +105,7 @@ int GetRoundedValue(fInt); /* Incomplete function - Usef
115 * START OF CODE 105 * START OF CODE
116 * ------------------------------------------------------------------------------------- 106 * -------------------------------------------------------------------------------------
117 */ 107 */
118fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/ 108static fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
119{ 109{
120 uint32_t i; 110 uint32_t i;
121 bool bNegated = false; 111 bool bNegated = false;
@@ -154,7 +144,7 @@ fInt fExponential(fInt exponent) /*Can be used to calculate e^exponent*/
154 return solution; 144 return solution;
155} 145}
156 146
157fInt fNaturalLog(fInt value) 147static fInt fNaturalLog(fInt value)
158{ 148{
159 uint32_t i; 149 uint32_t i;
160 fInt upper_bound = Divide(8, 1000); 150 fInt upper_bound = Divide(8, 1000);
@@ -179,7 +169,7 @@ fInt fNaturalLog(fInt value)
179 return (fAdd(solution, error_term)); 169 return (fAdd(solution, error_term));
180} 170}
181 171
182fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength) 172static fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t bitlength)
183{ 173{
184 fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); 174 fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
185 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); 175 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -194,7 +184,7 @@ fInt fDecodeLinearFuse(uint32_t fuse_value, fInt f_min, fInt f_range, uint32_t b
194} 184}
195 185
196 186
197fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength) 187static fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint32_t bitlength)
198{ 188{
199 fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value); 189 fInt f_fuse_value = Convert_ULONG_ToFraction(fuse_value);
200 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); 190 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -212,7 +202,7 @@ fInt fDecodeLogisticFuse(uint32_t fuse_value, fInt f_average, fInt f_range, uint
212 return f_decoded_value; 202 return f_decoded_value;
213} 203}
214 204
215fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength) 205static fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min, uint32_t bitlength)
216{ 206{
217 fInt fLeakage; 207 fInt fLeakage;
218 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1); 208 fInt f_bit_max_value = Convert_ULONG_ToFraction((uPow(2, bitlength)) - 1);
@@ -225,7 +215,7 @@ fInt fDecodeLeakageID (uint32_t leakageID_fuse, fInt ln_max_div_min, fInt f_min,
225 return fLeakage; 215 return fLeakage;
226} 216}
227 217
228fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */ 218static fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to make fInt a private declaration? */
229{ 219{
230 fInt temp; 220 fInt temp;
231 221
@@ -237,13 +227,13 @@ fInt ConvertToFraction(int X) /*Add all range checking here. Is it possible to m
237 return temp; 227 return temp;
238} 228}
239 229
240fInt fNegate(fInt X) 230static fInt fNegate(fInt X)
241{ 231{
242 fInt CONSTANT_NEGONE = ConvertToFraction(-1); 232 fInt CONSTANT_NEGONE = ConvertToFraction(-1);
243 return (fMultiply(X, CONSTANT_NEGONE)); 233 return (fMultiply(X, CONSTANT_NEGONE));
244} 234}
245 235
246fInt Convert_ULONG_ToFraction(uint32_t X) 236static fInt Convert_ULONG_ToFraction(uint32_t X)
247{ 237{
248 fInt temp; 238 fInt temp;
249 239
@@ -255,7 +245,7 @@ fInt Convert_ULONG_ToFraction(uint32_t X)
255 return temp; 245 return temp;
256} 246}
257 247
258fInt GetScaledFraction(int X, int factor) 248static fInt GetScaledFraction(int X, int factor)
259{ 249{
260 int times_shifted, factor_shifted; 250 int times_shifted, factor_shifted;
261 bool bNEGATED; 251 bool bNEGATED;
@@ -304,7 +294,7 @@ fInt GetScaledFraction(int X, int factor)
304} 294}
305 295
306/* Addition using two fInts */ 296/* Addition using two fInts */
307fInt fAdd (fInt X, fInt Y) 297static fInt fAdd (fInt X, fInt Y)
308{ 298{
309 fInt Sum; 299 fInt Sum;
310 300
@@ -314,7 +304,7 @@ fInt fAdd (fInt X, fInt Y)
314} 304}
315 305
316/* Addition using two fInts */ 306/* Addition using two fInts */
317fInt fSubtract (fInt X, fInt Y) 307static fInt fSubtract (fInt X, fInt Y)
318{ 308{
319 fInt Difference; 309 fInt Difference;
320 310
@@ -323,7 +313,7 @@ fInt fSubtract (fInt X, fInt Y)
323 return Difference; 313 return Difference;
324} 314}
325 315
326bool Equal(fInt A, fInt B) 316static bool Equal(fInt A, fInt B)
327{ 317{
328 if (A.full == B.full) 318 if (A.full == B.full)
329 return true; 319 return true;
@@ -331,7 +321,7 @@ bool Equal(fInt A, fInt B)
331 return false; 321 return false;
332} 322}
333 323
334bool GreaterThan(fInt A, fInt B) 324static bool GreaterThan(fInt A, fInt B)
335{ 325{
336 if (A.full > B.full) 326 if (A.full > B.full)
337 return true; 327 return true;
@@ -339,7 +329,7 @@ bool GreaterThan(fInt A, fInt B)
339 return false; 329 return false;
340} 330}
341 331
342fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */ 332static fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
343{ 333{
344 fInt Product; 334 fInt Product;
345 int64_t tempProduct; 335 int64_t tempProduct;
@@ -363,7 +353,7 @@ fInt fMultiply (fInt X, fInt Y) /* Uses 64-bit integers (int64_t) */
363 return Product; 353 return Product;
364} 354}
365 355
366fInt fDivide (fInt X, fInt Y) 356static fInt fDivide (fInt X, fInt Y)
367{ 357{
368 fInt fZERO, fQuotient; 358 fInt fZERO, fQuotient;
369 int64_t longlongX, longlongY; 359 int64_t longlongX, longlongY;
@@ -384,7 +374,7 @@ fInt fDivide (fInt X, fInt Y)
384 return fQuotient; 374 return fQuotient;
385} 375}
386 376
387int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/ 377static int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to check with the Golden settings table*/
388{ 378{
389 fInt fullNumber, scaledDecimal, scaledReal; 379 fInt fullNumber, scaledDecimal, scaledReal;
390 380
@@ -397,13 +387,13 @@ int ConvertBackToInteger (fInt A) /*THIS is the function that will be used to ch
397 return fullNumber.full; 387 return fullNumber.full;
398} 388}
399 389
400fInt fGetSquare(fInt A) 390static fInt fGetSquare(fInt A)
401{ 391{
402 return fMultiply(A,A); 392 return fMultiply(A,A);
403} 393}
404 394
405/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */ 395/* x_new = x_old - (x_old^2 - C) / (2 * x_old) */
406fInt fSqrt(fInt num) 396static fInt fSqrt(fInt num)
407{ 397{
408 fInt F_divide_Fprime, Fprime; 398 fInt F_divide_Fprime, Fprime;
409 fInt test; 399 fInt test;
@@ -460,7 +450,7 @@ fInt fSqrt(fInt num)
460 return (x_new); 450 return (x_new);
461} 451}
462 452
463void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[]) 453static void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
464{ 454{
465 fInt *pRoots = &Roots[0]; 455 fInt *pRoots = &Roots[0];
466 fInt temp, root_first, root_second; 456 fInt temp, root_first, root_second;
@@ -498,52 +488,13 @@ void SolveQuadracticEqn(fInt A, fInt B, fInt C, fInt Roots[])
498 * ----------------------------------------------------------------------------- 488 * -----------------------------------------------------------------------------
499 */ 489 */
500 490
501/* Addition using two normal ints - Temporary - Use only for testing purposes?. */
502fInt Add (int X, int Y)
503{
504 fInt A, B, Sum;
505
506 A.full = (X << SHIFT_AMOUNT);
507 B.full = (Y << SHIFT_AMOUNT);
508
509 Sum.full = A.full + B.full;
510
511 return Sum;
512}
513
514/* Conversion Functions */ 491/* Conversion Functions */
515int GetReal (fInt A) 492static int GetReal (fInt A)
516{ 493{
517 return (A.full >> SHIFT_AMOUNT); 494 return (A.full >> SHIFT_AMOUNT);
518} 495}
519 496
520/* Temporarily Disabled */ 497static fInt Divide (int X, int Y)
521int GetRoundedValue(fInt A) /*For now, round the 3rd decimal place */
522{
523 /* ROUNDING TEMPORARLY DISABLED
524 int temp = A.full;
525 int decimal_cutoff, decimal_mask = 0x000001FF;
526 decimal_cutoff = temp & decimal_mask;
527 if (decimal_cutoff > 0x147) {
528 temp += 673;
529 }*/
530
531 return ConvertBackToInteger(A)/10000; /*Temporary - in case this was used somewhere else */
532}
533
534fInt Multiply (int X, int Y)
535{
536 fInt A, B, Product;
537
538 A.full = X << SHIFT_AMOUNT;
539 B.full = Y << SHIFT_AMOUNT;
540
541 Product = fMultiply(A, B);
542
543 return Product;
544}
545
546fInt Divide (int X, int Y)
547{ 498{
548 fInt A, B, Quotient; 499 fInt A, B, Quotient;
549 500
@@ -555,7 +506,7 @@ fInt Divide (int X, int Y)
555 return Quotient; 506 return Quotient;
556} 507}
557 508
558int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */ 509static int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole integers - Costly function */
559{ 510{
560 int dec[PRECISION]; 511 int dec[PRECISION];
561 int i, scaledDecimal = 0, tmp = A.partial.decimal; 512 int i, scaledDecimal = 0, tmp = A.partial.decimal;
@@ -570,7 +521,7 @@ int uGetScaledDecimal (fInt A) /*Converts the fractional portion to whole intege
570 return scaledDecimal; 521 return scaledDecimal;
571} 522}
572 523
573int uPow(int base, int power) 524static int uPow(int base, int power)
574{ 525{
575 if (power == 0) 526 if (power == 0)
576 return 1; 527 return 1;
@@ -578,15 +529,7 @@ int uPow(int base, int power)
578 return (base)*uPow(base, power - 1); 529 return (base)*uPow(base, power - 1);
579} 530}
580 531
581fInt fAbs(fInt A) 532static int uAbs(int X)
582{
583 if (A.partial.real < 0)
584 return (fMultiply(A, ConvertToFraction(-1)));
585 else
586 return A;
587}
588
589int uAbs(int X)
590{ 533{
591 if (X < 0) 534 if (X < 0)
592 return (X * -1); 535 return (X * -1);
@@ -594,7 +537,7 @@ int uAbs(int X)
594 return X; 537 return X;
595} 538}
596 539
597fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term) 540static fInt fRoundUpByStepSize(fInt A, fInt fStepSize, bool error_term)
598{ 541{
599 fInt solution; 542 fInt solution;
600 543
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
index 2f1a14fe05b1..6c321b0d8a1e 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c
@@ -794,19 +794,35 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2(
794static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( 794static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table(
795 struct pp_hwmgr *hwmgr) 795 struct pp_hwmgr *hwmgr)
796{ 796{
797 const void *table_addr = NULL; 797 const void *table_addr = hwmgr->soft_pp_table;
798 uint8_t frev, crev; 798 uint8_t frev, crev;
799 uint16_t size; 799 uint16_t size;
800 800
801 table_addr = cgs_atom_get_data_table(hwmgr->device, 801 if (!table_addr) {
802 GetIndexIntoMasterTable(DATA, PowerPlayInfo), 802 table_addr = cgs_atom_get_data_table(hwmgr->device,
803 &size, &frev, &crev); 803 GetIndexIntoMasterTable(DATA, PowerPlayInfo),
804 &size, &frev, &crev);
804 805
805 hwmgr->soft_pp_table = table_addr; 806 hwmgr->soft_pp_table = table_addr;
807 hwmgr->soft_pp_table_size = size;
808 }
806 809
807 return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; 810 return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr;
808} 811}
809 812
813int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
814 uint32_t *vol_rep_time, uint32_t *bb_rep_time)
815{
816 const ATOM_PPLIB_POWERPLAYTABLE *powerplay_tab = get_powerplay_table(hwmgr);
817
818 PP_ASSERT_WITH_CODE(NULL != powerplay_tab,
819 "Missing PowerPlay Table!", return -EINVAL);
820
821 *vol_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usVoltageTime);
822 *bb_rep_time = (uint32_t)le16_to_cpu(powerplay_tab->usBackbiasTime);
823
824 return 0;
825}
810 826
811int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, 827int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
812 unsigned long *num_of_entries) 828 unsigned long *num_of_entries)
@@ -1499,7 +1515,7 @@ int get_number_of_vce_state_table_entries(
1499 const ATOM_PPLIB_VCE_State_Table *vce_table = 1515 const ATOM_PPLIB_VCE_State_Table *vce_table =
1500 get_vce_state_table(hwmgr, table); 1516 get_vce_state_table(hwmgr, table);
1501 1517
1502 if (vce_table > 0) 1518 if (vce_table)
1503 return vce_table->numEntries; 1519 return vce_table->numEntries;
1504 1520
1505 return 0; 1521 return 0;
@@ -1589,11 +1605,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr)
1589 1605
1590static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) 1606static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1591{ 1607{
1592 if (NULL != hwmgr->soft_pp_table) {
1593 kfree(hwmgr->soft_pp_table);
1594 hwmgr->soft_pp_table = NULL;
1595 }
1596
1597 if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { 1608 if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) {
1598 kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); 1609 kfree(hwmgr->dyn_state.vddc_dependency_on_sclk);
1599 hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; 1610 hwmgr->dyn_state.vddc_dependency_on_sclk = NULL;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
index 30434802417e..baddaa75693b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.h
@@ -32,16 +32,19 @@ struct pp_hw_power_state;
32extern const struct pp_table_func pptable_funcs; 32extern const struct pp_table_func pptable_funcs;
33 33
34typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr, 34typedef int (*pp_tables_hw_clock_info_callback)(struct pp_hwmgr *hwmgr,
35 struct pp_hw_power_state *hw_ps, 35 struct pp_hw_power_state *hw_ps,
36 unsigned int index, 36 unsigned int index,
37 const void *clock_info); 37 const void *clock_info);
38 38
39int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr, 39int pp_tables_get_num_of_entries(struct pp_hwmgr *hwmgr,
40 unsigned long *num_of_entries); 40 unsigned long *num_of_entries);
41 41
42int pp_tables_get_entry(struct pp_hwmgr *hwmgr, 42int pp_tables_get_entry(struct pp_hwmgr *hwmgr,
43 unsigned long entry_index, 43 unsigned long entry_index,
44 struct pp_power_state *ps, 44 struct pp_power_state *ps,
45 pp_tables_hw_clock_info_callback func); 45 pp_tables_hw_clock_info_callback func);
46
47int pp_tables_get_response_times(struct pp_hwmgr *hwmgr,
48 uint32_t *vol_rep_time, uint32_t *bb_rep_time);
46 49
47#endif 50#endif
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 5d0f655bf160..c7dc111221c2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -571,7 +571,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
571 if (0 == data->sclk_dpm_key_disabled) { 571 if (0 == data->sclk_dpm_key_disabled) {
572 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ 572 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
573 PP_ASSERT_WITH_CODE( 573 PP_ASSERT_WITH_CODE(
574 (0 == tonga_is_dpm_running(hwmgr)), 574 !tonga_is_dpm_running(hwmgr),
575 "Trying to Disable SCLK DPM when DPM is disabled", 575 "Trying to Disable SCLK DPM when DPM is disabled",
576 return -1 576 return -1
577 ); 577 );
@@ -587,7 +587,7 @@ int tonga_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
587 if (0 == data->mclk_dpm_key_disabled) { 587 if (0 == data->mclk_dpm_key_disabled) {
588 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ 588 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
589 PP_ASSERT_WITH_CODE( 589 PP_ASSERT_WITH_CODE(
590 (0 == tonga_is_dpm_running(hwmgr)), 590 !tonga_is_dpm_running(hwmgr),
591 "Trying to Disable MCLK DPM when DPM is disabled", 591 "Trying to Disable MCLK DPM when DPM is disabled",
592 return -1 592 return -1
593 ); 593 );
@@ -614,7 +614,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
614 if (0 == data->pcie_dpm_key_disabled) { 614 if (0 == data->pcie_dpm_key_disabled) {
615 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ 615 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
616 PP_ASSERT_WITH_CODE( 616 PP_ASSERT_WITH_CODE(
617 (0 == tonga_is_dpm_running(hwmgr)), 617 !tonga_is_dpm_running(hwmgr),
618 "Trying to Disable PCIE DPM when DPM is disabled", 618 "Trying to Disable PCIE DPM when DPM is disabled",
619 return -1 619 return -1
620 ); 620 );
@@ -630,7 +630,7 @@ int tonga_stop_dpm(struct pp_hwmgr *hwmgr)
630 630
631 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ 631 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
632 PP_ASSERT_WITH_CODE( 632 PP_ASSERT_WITH_CODE(
633 (0 == tonga_is_dpm_running(hwmgr)), 633 !tonga_is_dpm_running(hwmgr),
634 "Trying to Disable Voltage CNTL when DPM is disabled", 634 "Trying to Disable Voltage CNTL when DPM is disabled",
635 return -1 635 return -1
636 ); 636 );
@@ -688,8 +688,9 @@ int tonga_dpm_force_state(struct pp_hwmgr *hwmgr, uint32_t n)
688 uint32_t level_mask = 1 << n; 688 uint32_t level_mask = 1 << n;
689 689
690 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ 690 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
691 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), 691 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
692 "Trying to force SCLK when DPM is disabled", return -1;); 692 "Trying to force SCLK when DPM is disabled",
693 return -1;);
693 if (0 == data->sclk_dpm_key_disabled) 694 if (0 == data->sclk_dpm_key_disabled)
694 return (0 == smum_send_msg_to_smc_with_parameter( 695 return (0 == smum_send_msg_to_smc_with_parameter(
695 hwmgr->smumgr, 696 hwmgr->smumgr,
@@ -712,8 +713,9 @@ int tonga_dpm_force_state_mclk(struct pp_hwmgr *hwmgr, uint32_t n)
712 uint32_t level_mask = 1 << n; 713 uint32_t level_mask = 1 << n;
713 714
714 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */ 715 /* Checking if DPM is running. If we discover hang because of this, we should skip this message. */
715 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), 716 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
716 "Trying to Force MCLK when DPM is disabled", return -1;); 717 "Trying to Force MCLK when DPM is disabled",
718 return -1;);
717 if (0 == data->mclk_dpm_key_disabled) 719 if (0 == data->mclk_dpm_key_disabled)
718 return (0 == smum_send_msg_to_smc_with_parameter( 720 return (0 == smum_send_msg_to_smc_with_parameter(
719 hwmgr->smumgr, 721 hwmgr->smumgr,
@@ -735,8 +737,9 @@ int tonga_dpm_force_state_pcie(struct pp_hwmgr *hwmgr, uint32_t n)
735 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); 737 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
736 738
737 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ 739 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
738 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), 740 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
739 "Trying to Force PCIE level when DPM is disabled", return -1;); 741 "Trying to Force PCIE level when DPM is disabled",
742 return -1;);
740 if (0 == data->pcie_dpm_key_disabled) 743 if (0 == data->pcie_dpm_key_disabled)
741 return (0 == smum_send_msg_to_smc_with_parameter( 744 return (0 == smum_send_msg_to_smc_with_parameter(
742 hwmgr->smumgr, 745 hwmgr->smumgr,
@@ -774,7 +777,7 @@ int tonga_process_firmware_header(struct pp_hwmgr *hwmgr)
774 777
775 uint32_t tmp; 778 uint32_t tmp;
776 int result; 779 int result;
777 bool error = 0; 780 bool error = false;
778 781
779 result = tonga_read_smc_sram_dword(hwmgr->smumgr, 782 result = tonga_read_smc_sram_dword(hwmgr->smumgr,
780 SMU72_FIRMWARE_HEADER_LOCATION + 783 SMU72_FIRMWARE_HEADER_LOCATION +
@@ -933,11 +936,11 @@ int tonga_init_power_gate_state(struct pp_hwmgr *hwmgr)
933{ 936{
934 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); 937 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
935 938
936 data->uvd_power_gated = 0; 939 data->uvd_power_gated = false;
937 data->vce_power_gated = 0; 940 data->vce_power_gated = false;
938 data->samu_power_gated = 0; 941 data->samu_power_gated = false;
939 data->acp_power_gated = 0; 942 data->acp_power_gated = false;
940 data->pg_acp_init = 1; 943 data->pg_acp_init = true;
941 944
942 return 0; 945 return 0;
943} 946}
@@ -955,7 +958,7 @@ int tonga_check_for_dpm_running(struct pp_hwmgr *hwmgr)
955 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM, 958 * because we may have test scenarios that need us intentionly disable SCLK/MCLK DPM,
956 * whereas voltage control is a fundemental change that will not be disabled 959 * whereas voltage control is a fundemental change that will not be disabled
957 */ 960 */
958 return (0 == tonga_is_dpm_running(hwmgr) ? 0 : 1); 961 return (!tonga_is_dpm_running(hwmgr) ? 0 : 1);
959} 962}
960 963
961/** 964/**
@@ -968,7 +971,7 @@ int tonga_check_for_dpm_stopped(struct pp_hwmgr *hwmgr)
968{ 971{
969 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); 972 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
970 973
971 if (0 != tonga_is_dpm_running(hwmgr)) { 974 if (tonga_is_dpm_running(hwmgr)) {
972 /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */ 975 /* If HW Virtualization is enabled, dpm_table_start will not have a valid value */
973 if (!data->dpm_table_start) { 976 if (!data->dpm_table_start) {
974 return 1; 977 return 1;
@@ -991,7 +994,7 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
991{ 994{
992 uint32_t table_size, i, j; 995 uint32_t table_size, i, j;
993 uint16_t vvalue; 996 uint16_t vvalue;
994 bool bVoltageFound = 0; 997 bool bVoltageFound = false;
995 pp_atomctrl_voltage_table *table; 998 pp_atomctrl_voltage_table *table;
996 999
997 PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;); 1000 PP_ASSERT_WITH_CODE((NULL != voltage_table), "Voltage Table empty.", return -1;);
@@ -1007,11 +1010,11 @@ static int tonga_trim_voltage_table(struct pp_hwmgr *hwmgr,
1007 1010
1008 for (i = 0; i < voltage_table->count; i++) { 1011 for (i = 0; i < voltage_table->count; i++) {
1009 vvalue = voltage_table->entries[i].value; 1012 vvalue = voltage_table->entries[i].value;
1010 bVoltageFound = 0; 1013 bVoltageFound = false;
1011 1014
1012 for (j = 0; j < table->count; j++) { 1015 for (j = 0; j < table->count; j++) {
1013 if (vvalue == table->entries[j].value) { 1016 if (vvalue == table->entries[j].value) {
1014 bVoltageFound = 1; 1017 bVoltageFound = true;
1015 break; 1018 break;
1016 } 1019 }
1017 } 1020 }
@@ -1331,7 +1334,6 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
1331{ 1334{
1332 uint32_t count; 1335 uint32_t count;
1333 uint8_t index; 1336 uint8_t index;
1334 int result = 0;
1335 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); 1337 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
1336 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 1338 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
1337 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table; 1339 struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table = pptable_info->vddgfx_lookup_table;
@@ -1378,7 +1380,7 @@ static int tonga_populate_cac_tables(struct pp_hwmgr *hwmgr,
1378 } 1380 }
1379 } 1381 }
1380 1382
1381 return result; 1383 return 0;
1382} 1384}
1383 1385
1384 1386
@@ -2042,7 +2044,7 @@ static int tonga_populate_single_memory_level(
2042 2044
2043 if ((data->mclk_stutter_mode_threshold != 0) && 2045 if ((data->mclk_stutter_mode_threshold != 0) &&
2044 (memory_clock <= data->mclk_stutter_mode_threshold) && 2046 (memory_clock <= data->mclk_stutter_mode_threshold) &&
2045 (data->is_uvd_enabled == 0) 2047 (!data->is_uvd_enabled)
2046 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1) 2048 && (PHM_READ_FIELD(hwmgr->device, DPG_PIPE_STUTTER_CONTROL, STUTTER_ENABLE) & 0x1)
2047 && (data->display_timing.num_existing_displays <= 2) 2049 && (data->display_timing.num_existing_displays <= 2)
2048 && (data->display_timing.num_existing_displays != 0)) 2050 && (data->display_timing.num_existing_displays != 0))
@@ -2705,7 +2707,7 @@ static int tonga_reset_single_dpm_table(
2705 2707
2706 dpm_table->count = count; 2708 dpm_table->count = count;
2707 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) { 2709 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++) {
2708 dpm_table->dpm_levels[i].enabled = 0; 2710 dpm_table->dpm_levels[i].enabled = false;
2709 } 2711 }
2710 2712
2711 return 0; 2713 return 0;
@@ -2718,7 +2720,7 @@ static void tonga_setup_pcie_table_entry(
2718{ 2720{
2719 dpm_table->dpm_levels[index].value = pcie_gen; 2721 dpm_table->dpm_levels[index].value = pcie_gen;
2720 dpm_table->dpm_levels[index].param1 = pcie_lanes; 2722 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2721 dpm_table->dpm_levels[index].enabled = 1; 2723 dpm_table->dpm_levels[index].enabled = true;
2722} 2724}
2723 2725
2724static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr) 2726static int tonga_setup_default_pcie_tables(struct pp_hwmgr *hwmgr)
@@ -2828,7 +2830,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
2828 allowed_vdd_sclk_table->entries[i].clk) { 2830 allowed_vdd_sclk_table->entries[i].clk) {
2829 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value = 2831 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].value =
2830 allowed_vdd_sclk_table->entries[i].clk; 2832 allowed_vdd_sclk_table->entries[i].clk;
2831 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; to do */ 2833 data->dpm_table.sclk_table.dpm_levels[data->dpm_table.sclk_table.count].enabled = true; /*(i==0) ? 1 : 0; to do */
2832 data->dpm_table.sclk_table.count++; 2834 data->dpm_table.sclk_table.count++;
2833 } 2835 }
2834 } 2836 }
@@ -2842,7 +2844,7 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
2842 allowed_vdd_mclk_table->entries[i].clk) { 2844 allowed_vdd_mclk_table->entries[i].clk) {
2843 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value = 2845 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].value =
2844 allowed_vdd_mclk_table->entries[i].clk; 2846 allowed_vdd_mclk_table->entries[i].clk;
2845 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = 1; /*(i==0) ? 1 : 0; */ 2847 data->dpm_table.mclk_table.dpm_levels[data->dpm_table.mclk_table.count].enabled = true; /*(i==0) ? 1 : 0; */
2846 data->dpm_table.mclk_table.count++; 2848 data->dpm_table.mclk_table.count++;
2847 } 2849 }
2848 } 2850 }
@@ -3026,8 +3028,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
3026 3028
3027 reg_value = 0; 3029 reg_value = 0;
3028 if ((0 == reg_value) && 3030 if ((0 == reg_value) &&
3029 (0 == atomctrl_get_pp_assign_pin(hwmgr, 3031 (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID,
3030 VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) { 3032 &gpio_pin_assignment))) {
3031 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; 3033 table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3032 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 3034 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3033 PHM_PlatformCaps_RegulatorHot); 3035 PHM_PlatformCaps_RegulatorHot);
@@ -3040,8 +3042,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
3040 /* ACDC Switch GPIO */ 3042 /* ACDC Switch GPIO */
3041 reg_value = 0; 3043 reg_value = 0;
3042 if ((0 == reg_value) && 3044 if ((0 == reg_value) &&
3043 (0 == atomctrl_get_pp_assign_pin(hwmgr, 3045 (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID,
3044 PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) { 3046 &gpio_pin_assignment))) {
3045 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; 3047 table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift;
3046 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 3048 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3047 PHM_PlatformCaps_AutomaticDCTransition); 3049 PHM_PlatformCaps_AutomaticDCTransition);
@@ -3063,8 +3065,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr)
3063 } 3065 }
3064 3066
3065 reg_value = 0; 3067 reg_value = 0;
3066 if ((0 == reg_value) && 3068 if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr,
3067 (0 == atomctrl_get_pp_assign_pin(hwmgr,
3068 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { 3069 THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) {
3069 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 3070 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
3070 PHM_PlatformCaps_ThermalOutGPIO); 3071 PHM_PlatformCaps_ThermalOutGPIO);
@@ -3135,7 +3136,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3135 3136
3136 if (0 == data->sclk_dpm_key_disabled) { 3137 if (0 == data->sclk_dpm_key_disabled) {
3137 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ 3138 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3138 if (0 != tonga_is_dpm_running(hwmgr)) 3139 if (tonga_is_dpm_running(hwmgr))
3139 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); 3140 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3140 3141
3141 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3142 if (0 != data->dpm_level_enable_mask.sclk_dpm_enable_mask) {
@@ -3150,7 +3151,7 @@ int tonga_upload_dpm_level_enable_mask(struct pp_hwmgr *hwmgr)
3150 3151
3151 if (0 == data->mclk_dpm_key_disabled) { 3152 if (0 == data->mclk_dpm_key_disabled) {
3152 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/ 3153 /* Checking if DPM is running. If we discover hang because of this, we should skip this message.*/
3153 if (0 != tonga_is_dpm_running(hwmgr)) 3154 if (tonga_is_dpm_running(hwmgr))
3154 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n"); 3155 printk(KERN_ERR "[ powerplay ] Trying to set Enable Mask when DPM is disabled \n");
3155 3156
3156 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) { 3157 if (0 != data->dpm_level_enable_mask.mclk_dpm_enable_mask) {
@@ -3261,7 +3262,7 @@ int tonga_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwm
3261 3262
3262 /* initialize vddc_dep_on_dal_pwrl table */ 3263 /* initialize vddc_dep_on_dal_pwrl table */
3263 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); 3264 table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record);
3264 table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); 3265 table_clk_vlt = kzalloc(table_size, GFP_KERNEL);
3265 3266
3266 if (NULL == table_clk_vlt) { 3267 if (NULL == table_clk_vlt) {
3267 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); 3268 printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n");
@@ -3336,9 +3337,9 @@ int tonga_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
3336 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend); 3337 tonga_hwmgr *data = (tonga_hwmgr *)(hwmgr->backend);
3337 int result = 1; 3338 int result = 1;
3338 3339
3339 PP_ASSERT_WITH_CODE (0 == tonga_is_dpm_running(hwmgr), 3340 PP_ASSERT_WITH_CODE (!tonga_is_dpm_running(hwmgr),
3340 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.", 3341 "Trying to Unforce DPM when DPM is disabled. Returning without sending SMC message.",
3341 return result); 3342 return result);
3342 3343
3343 if (0 == data->pcie_dpm_key_disabled) { 3344 if (0 == data->pcie_dpm_key_disabled) {
3344 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc( 3345 PP_ASSERT_WITH_CODE((0 == smum_send_msg_to_smc(
@@ -3742,7 +3743,7 @@ uint8_t tonga_get_memory_modile_index(struct pp_hwmgr *hwmgr)
3742 3743
3743bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg) 3744bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
3744{ 3745{
3745 bool result = 1; 3746 bool result = true;
3746 3747
3747 switch (inReg) { 3748 switch (inReg) {
3748 case mmMC_SEQ_RAS_TIMING: 3749 case mmMC_SEQ_RAS_TIMING:
@@ -3826,7 +3827,7 @@ bool tonga_check_s0_mc_reg_index(uint16_t inReg, uint16_t *outReg)
3826 break; 3827 break;
3827 3828
3828 default: 3829 default:
3829 result = 0; 3830 result = false;
3830 break; 3831 break;
3831 } 3832 }
3832 3833
@@ -4422,13 +4423,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr)
4422 4423
4423int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) 4424int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
4424{ 4425{
4425 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
4426
4427 if (data->soft_pp_table) {
4428 kfree(data->soft_pp_table);
4429 data->soft_pp_table = NULL;
4430 }
4431
4432 return phm_hwmgr_backend_fini(hwmgr); 4426 return phm_hwmgr_backend_fini(hwmgr);
4433} 4427}
4434 4428
@@ -4442,7 +4436,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4442{ 4436{
4443 int result = 0; 4437 int result = 0;
4444 SMU72_Discrete_DpmTable *table = NULL; 4438 SMU72_Discrete_DpmTable *table = NULL;
4445 tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 4439 tonga_hwmgr *data;
4446 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; 4440 pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
4447 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); 4441 struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
4448 phw_tonga_ulv_parm *ulv; 4442 phw_tonga_ulv_parm *ulv;
@@ -4451,7 +4445,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4451 PP_ASSERT_WITH_CODE((NULL != hwmgr), 4445 PP_ASSERT_WITH_CODE((NULL != hwmgr),
4452 "Invalid Parameter!", return -1;); 4446 "Invalid Parameter!", return -1;);
4453 4447
4454 data->dll_defaule_on = 0; 4448 data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL);
4449 if (data == NULL)
4450 return -ENOMEM;
4451
4452 hwmgr->backend = data;
4453
4454 data->dll_defaule_on = false;
4455 data->sram_end = SMC_RAM_END; 4455 data->sram_end = SMC_RAM_END;
4456 4456
4457 data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT; 4457 data->activity_target[0] = PPTONGA_TARGETACTIVITY_DFLT;
@@ -4557,13 +4557,13 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4557 4557
4558 /* ULV Support*/ 4558 /* ULV Support*/
4559 ulv = &(data->ulv); 4559 ulv = &(data->ulv);
4560 ulv->ulv_supported = 0; 4560 ulv->ulv_supported = false;
4561 4561
4562 /* Initalize Dynamic State Adjustment Rule Settings*/ 4562 /* Initalize Dynamic State Adjustment Rule Settings*/
4563 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr); 4563 result = tonga_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
4564 if (result) 4564 if (result)
4565 printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n"); 4565 printk(KERN_ERR "[ powerplay ] tonga_initializa_dynamic_state_adjustment_rule_settings failed!\n");
4566 data->uvd_enabled = 0; 4566 data->uvd_enabled = false;
4567 4567
4568 table = &(data->smc_state_table); 4568 table = &(data->smc_state_table);
4569 4569
@@ -4571,7 +4571,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4571 * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, 4571 * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable,
4572 * Peak Current Control feature is enabled and we should program PCC HW register 4572 * Peak Current Control feature is enabled and we should program PCC HW register
4573 */ 4573 */
4574 if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { 4574 if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) {
4575 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, 4575 uint32_t temp_reg = cgs_read_ind_register(hwmgr->device,
4576 CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); 4576 CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL);
4577 4577
@@ -4610,7 +4610,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4610 phm_cap_set(hwmgr->platform_descriptor.platformCaps, 4610 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
4611 PHM_PlatformCaps_SMU7); 4611 PHM_PlatformCaps_SMU7);
4612 4612
4613 data->vddc_phase_shed_control = 0; 4613 data->vddc_phase_shed_control = false;
4614 4614
4615 phm_cap_unset(hwmgr->platform_descriptor.platformCaps, 4615 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
4616 PHM_PlatformCaps_UVDPowerGating); 4616 PHM_PlatformCaps_UVDPowerGating);
@@ -4629,7 +4629,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4629 } 4629 }
4630 4630
4631 if (0 == result) { 4631 if (0 == result) {
4632 data->is_tlu_enabled = 0; 4632 data->is_tlu_enabled = false;
4633 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels = 4633 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
4634 TONGA_MAX_HARDWARE_POWERLEVELS; 4634 TONGA_MAX_HARDWARE_POWERLEVELS;
4635 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2; 4635 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
@@ -4639,7 +4639,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4639 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; 4639 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO;
4640 result = cgs_query_system_info(hwmgr->device, &sys_info); 4640 result = cgs_query_system_info(hwmgr->device, &sys_info);
4641 if (result) 4641 if (result)
4642 data->pcie_gen_cap = 0x30007; 4642 data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK;
4643 else 4643 else
4644 data->pcie_gen_cap = (uint32_t)sys_info.value; 4644 data->pcie_gen_cap = (uint32_t)sys_info.value;
4645 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) 4645 if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
@@ -4648,7 +4648,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
4648 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; 4648 sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW;
4649 result = cgs_query_system_info(hwmgr->device, &sys_info); 4649 result = cgs_query_system_info(hwmgr->device, &sys_info);
4650 if (result) 4650 if (result)
4651 data->pcie_lane_cap = 0x2f0000; 4651 data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK;
4652 else 4652 else
4653 data->pcie_lane_cap = (uint32_t)sys_info.value; 4653 data->pcie_lane_cap = (uint32_t)sys_info.value;
4654 } else { 4654 } else {
@@ -5310,9 +5310,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5310 if ((0 == data->sclk_dpm_key_disabled) && 5310 if ((0 == data->sclk_dpm_key_disabled) &&
5311 (data->need_update_smu7_dpm_table & 5311 (data->need_update_smu7_dpm_table &
5312 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 5312 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5313 PP_ASSERT_WITH_CODE( 5313 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5314 0 == tonga_is_dpm_running(hwmgr), 5314 "Trying to freeze SCLK DPM when DPM is disabled",
5315 "Trying to freeze SCLK DPM when DPM is disabled",
5316 ); 5315 );
5317 PP_ASSERT_WITH_CODE( 5316 PP_ASSERT_WITH_CODE(
5318 0 == smum_send_msg_to_smc(hwmgr->smumgr, 5317 0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5324,8 +5323,8 @@ static int tonga_freeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5324 if ((0 == data->mclk_dpm_key_disabled) && 5323 if ((0 == data->mclk_dpm_key_disabled) &&
5325 (data->need_update_smu7_dpm_table & 5324 (data->need_update_smu7_dpm_table &
5326 DPMTABLE_OD_UPDATE_MCLK)) { 5325 DPMTABLE_OD_UPDATE_MCLK)) {
5327 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), 5326 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5328 "Trying to freeze MCLK DPM when DPM is disabled", 5327 "Trying to freeze MCLK DPM when DPM is disabled",
5329 ); 5328 );
5330 PP_ASSERT_WITH_CODE( 5329 PP_ASSERT_WITH_CODE(
5331 0 == smum_send_msg_to_smc(hwmgr->smumgr, 5330 0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5460,7 +5459,6 @@ static int tonga_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
5460 5459
5461static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state) 5460static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_power_state *hw_state)
5462{ 5461{
5463 int result = 0;
5464 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); 5462 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
5465 uint32_t high_limit_count; 5463 uint32_t high_limit_count;
5466 5464
@@ -5480,7 +5478,7 @@ static int tonga_trim_dpm_states(struct pp_hwmgr *hwmgr, const struct tonga_powe
5480 hw_state->performance_levels[0].memory_clock, 5478 hw_state->performance_levels[0].memory_clock,
5481 hw_state->performance_levels[high_limit_count].memory_clock); 5479 hw_state->performance_levels[high_limit_count].memory_clock);
5482 5480
5483 return result; 5481 return 0;
5484} 5482}
5485 5483
5486static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input) 5484static int tonga_generate_dpm_level_enable_mask(struct pp_hwmgr *hwmgr, const void *input)
@@ -5627,8 +5625,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5627 (data->need_update_smu7_dpm_table & 5625 (data->need_update_smu7_dpm_table &
5628 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) { 5626 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK))) {
5629 5627
5630 PP_ASSERT_WITH_CODE(0 == tonga_is_dpm_running(hwmgr), 5628 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5631 "Trying to Unfreeze SCLK DPM when DPM is disabled", 5629 "Trying to Unfreeze SCLK DPM when DPM is disabled",
5632 ); 5630 );
5633 PP_ASSERT_WITH_CODE( 5631 PP_ASSERT_WITH_CODE(
5634 0 == smum_send_msg_to_smc(hwmgr->smumgr, 5632 0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -5640,9 +5638,8 @@ static int tonga_unfreeze_sclk_mclk_dpm(struct pp_hwmgr *hwmgr)
5640 if ((0 == data->mclk_dpm_key_disabled) && 5638 if ((0 == data->mclk_dpm_key_disabled) &&
5641 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) { 5639 (data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
5642 5640
5643 PP_ASSERT_WITH_CODE( 5641 PP_ASSERT_WITH_CODE(!tonga_is_dpm_running(hwmgr),
5644 0 == tonga_is_dpm_running(hwmgr), 5642 "Trying to Unfreeze MCLK DPM when DPM is disabled",
5645 "Trying to Unfreeze MCLK DPM when DPM is disabled",
5646 ); 5643 );
5647 PP_ASSERT_WITH_CODE( 5644 PP_ASSERT_WITH_CODE(
5648 0 == smum_send_msg_to_smc(hwmgr->smumgr, 5645 0 == smum_send_msg_to_smc(hwmgr->smumgr,
@@ -6031,42 +6028,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr)
6031 CG_FDO_CTRL2, FDO_PWM_MODE); 6028 CG_FDO_CTRL2, FDO_PWM_MODE);
6032} 6029}
6033 6030
6034static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table)
6035{
6036 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6037
6038 if (!data->soft_pp_table) {
6039 data->soft_pp_table = kmemdup(hwmgr->soft_pp_table,
6040 hwmgr->soft_pp_table_size,
6041 GFP_KERNEL);
6042 if (!data->soft_pp_table)
6043 return -ENOMEM;
6044 }
6045
6046 *table = (char *)&data->soft_pp_table;
6047
6048 return hwmgr->soft_pp_table_size;
6049}
6050
6051static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size)
6052{
6053 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6054
6055 if (!data->soft_pp_table) {
6056 data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL);
6057 if (!data->soft_pp_table)
6058 return -ENOMEM;
6059 }
6060
6061 memcpy(data->soft_pp_table, buf, size);
6062
6063 hwmgr->soft_pp_table = data->soft_pp_table;
6064
6065 /* TODO: re-init powerplay to implement modified pptable */
6066
6067 return 0;
6068}
6069
6070static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, 6031static int tonga_force_clock_level(struct pp_hwmgr *hwmgr,
6071 enum pp_clock_type type, uint32_t mask) 6032 enum pp_clock_type type, uint32_t mask)
6072{ 6033{
@@ -6174,11 +6135,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr,
6174 return size; 6135 return size;
6175} 6136}
6176 6137
6138static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr)
6139{
6140 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6141 struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table);
6142 struct tonga_single_dpm_table *golden_sclk_table =
6143 &(data->golden_dpm_table.sclk_table);
6144 int value;
6145
6146 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6147 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6148 100 /
6149 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6150
6151 return value;
6152}
6153
6154static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
6155{
6156 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6157 struct tonga_single_dpm_table *golden_sclk_table =
6158 &(data->golden_dpm_table.sclk_table);
6159 struct pp_power_state *ps;
6160 struct tonga_power_state *tonga_ps;
6161
6162 if (value > 20)
6163 value = 20;
6164
6165 ps = hwmgr->request_ps;
6166
6167 if (ps == NULL)
6168 return -EINVAL;
6169
6170 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
6171
6172 tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock =
6173 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6174 value / 100 +
6175 golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6176
6177 return 0;
6178}
6179
6180static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr)
6181{
6182 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6183 struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table);
6184 struct tonga_single_dpm_table *golden_mclk_table =
6185 &(data->golden_dpm_table.mclk_table);
6186 int value;
6187
6188 value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6189 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6190 100 /
6191 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6192
6193 return value;
6194}
6195
6196static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
6197{
6198 struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend);
6199 struct tonga_single_dpm_table *golden_mclk_table =
6200 &(data->golden_dpm_table.mclk_table);
6201 struct pp_power_state *ps;
6202 struct tonga_power_state *tonga_ps;
6203
6204 if (value > 20)
6205 value = 20;
6206
6207 ps = hwmgr->request_ps;
6208
6209 if (ps == NULL)
6210 return -EINVAL;
6211
6212 tonga_ps = cast_phw_tonga_power_state(&ps->hardware);
6213
6214 tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock =
6215 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6216 value / 100 +
6217 golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6218
6219 return 0;
6220}
6221
6177static const struct pp_hwmgr_func tonga_hwmgr_funcs = { 6222static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
6178 .backend_init = &tonga_hwmgr_backend_init, 6223 .backend_init = &tonga_hwmgr_backend_init,
6179 .backend_fini = &tonga_hwmgr_backend_fini, 6224 .backend_fini = &tonga_hwmgr_backend_fini,
6180 .asic_setup = &tonga_setup_asic_task, 6225 .asic_setup = &tonga_setup_asic_task,
6181 .dynamic_state_management_enable = &tonga_enable_dpm_tasks, 6226 .dynamic_state_management_enable = &tonga_enable_dpm_tasks,
6227 .dynamic_state_management_disable = &tonga_disable_dpm_tasks,
6182 .apply_state_adjust_rules = tonga_apply_state_adjust_rules, 6228 .apply_state_adjust_rules = tonga_apply_state_adjust_rules,
6183 .force_dpm_level = &tonga_force_dpm_level, 6229 .force_dpm_level = &tonga_force_dpm_level,
6184 .power_state_set = tonga_set_power_state_tasks, 6230 .power_state_set = tonga_set_power_state_tasks,
@@ -6212,22 +6258,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = {
6212 .check_states_equal = tonga_check_states_equal, 6258 .check_states_equal = tonga_check_states_equal,
6213 .set_fan_control_mode = tonga_set_fan_control_mode, 6259 .set_fan_control_mode = tonga_set_fan_control_mode,
6214 .get_fan_control_mode = tonga_get_fan_control_mode, 6260 .get_fan_control_mode = tonga_get_fan_control_mode,
6215 .get_pp_table = tonga_get_pp_table,
6216 .set_pp_table = tonga_set_pp_table,
6217 .force_clock_level = tonga_force_clock_level, 6261 .force_clock_level = tonga_force_clock_level,
6218 .print_clock_levels = tonga_print_clock_levels, 6262 .print_clock_levels = tonga_print_clock_levels,
6263 .get_sclk_od = tonga_get_sclk_od,
6264 .set_sclk_od = tonga_set_sclk_od,
6265 .get_mclk_od = tonga_get_mclk_od,
6266 .set_mclk_od = tonga_set_mclk_od,
6219}; 6267};
6220 6268
6221int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) 6269int tonga_hwmgr_init(struct pp_hwmgr *hwmgr)
6222{ 6270{
6223 tonga_hwmgr *data;
6224
6225 data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL);
6226 if (data == NULL)
6227 return -ENOMEM;
6228 memset(data, 0x00, sizeof(tonga_hwmgr));
6229
6230 hwmgr->backend = data;
6231 hwmgr->hwmgr_func = &tonga_hwmgr_funcs; 6271 hwmgr->hwmgr_func = &tonga_hwmgr_funcs;
6232 hwmgr->pptable_func = &tonga_pptable_funcs; 6272 hwmgr->pptable_func = &tonga_pptable_funcs;
6233 pp_tonga_thermal_initialize(hwmgr); 6273 pp_tonga_thermal_initialize(hwmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
index 573cd39fe78d..3961884bfa9b 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h
@@ -352,9 +352,6 @@ struct tonga_hwmgr {
352 bool samu_power_gated; /* 1: gated, 0:not gated */ 352 bool samu_power_gated; /* 1: gated, 0:not gated */
353 bool acp_power_gated; /* 1: gated, 0:not gated */ 353 bool acp_power_gated; /* 1: gated, 0:not gated */
354 bool pg_acp_init; 354 bool pg_acp_init;
355
356 /* soft pptable for re-uploading into smu */
357 void *soft_pp_table;
358}; 355};
359 356
360typedef struct tonga_hwmgr tonga_hwmgr; 357typedef struct tonga_hwmgr tonga_hwmgr;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
index dccc859f638c..cfb647f76cbe 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c
@@ -167,8 +167,7 @@ static int get_vddc_lookup_table(
167 table_size = sizeof(uint32_t) + 167 table_size = sizeof(uint32_t) +
168 sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels; 168 sizeof(phm_ppt_v1_voltage_lookup_record) * max_levels;
169 169
170 table = (phm_ppt_v1_voltage_lookup_table *) 170 table = kzalloc(table_size, GFP_KERNEL);
171 kzalloc(table_size, GFP_KERNEL);
172 171
173 if (NULL == table) 172 if (NULL == table)
174 return -ENOMEM; 173 return -ENOMEM;
@@ -327,7 +326,7 @@ static int get_valid_clk(
327 table_size = sizeof(uint32_t) + 326 table_size = sizeof(uint32_t) +
328 sizeof(uint32_t) * clk_volt_pp_table->count; 327 sizeof(uint32_t) * clk_volt_pp_table->count;
329 328
330 table = (struct phm_clock_array *)kzalloc(table_size, GFP_KERNEL); 329 table = kzalloc(table_size, GFP_KERNEL);
331 330
332 if (NULL == table) 331 if (NULL == table)
333 return -ENOMEM; 332 return -ENOMEM;
@@ -377,8 +376,7 @@ static int get_mclk_voltage_dependency_table(
377 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 376 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
378 * mclk_dep_table->ucNumEntries; 377 * mclk_dep_table->ucNumEntries;
379 378
380 mclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 379 mclk_table = kzalloc(table_size, GFP_KERNEL);
381 kzalloc(table_size, GFP_KERNEL);
382 380
383 if (NULL == mclk_table) 381 if (NULL == mclk_table)
384 return -ENOMEM; 382 return -ENOMEM;
@@ -424,8 +422,7 @@ static int get_sclk_voltage_dependency_table(
424 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 422 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
425 * tonga_table->ucNumEntries; 423 * tonga_table->ucNumEntries;
426 424
427 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 425 sclk_table = kzalloc(table_size, GFP_KERNEL);
428 kzalloc(table_size, GFP_KERNEL);
429 426
430 if (NULL == sclk_table) 427 if (NULL == sclk_table)
431 return -ENOMEM; 428 return -ENOMEM;
@@ -456,8 +453,7 @@ static int get_sclk_voltage_dependency_table(
456 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) 453 table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record)
457 * polaris_table->ucNumEntries; 454 * polaris_table->ucNumEntries;
458 455
459 sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) 456 sclk_table = kzalloc(table_size, GFP_KERNEL);
460 kzalloc(table_size, GFP_KERNEL);
461 457
462 if (NULL == sclk_table) 458 if (NULL == sclk_table)
463 return -ENOMEM; 459 return -ENOMEM;
@@ -504,7 +500,7 @@ static int get_pcie_table(
504 table_size = sizeof(uint32_t) + 500 table_size = sizeof(uint32_t) +
505 sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; 501 sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
506 502
507 pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); 503 pcie_table = kzalloc(table_size, GFP_KERNEL);
508 504
509 if (pcie_table == NULL) 505 if (pcie_table == NULL)
510 return -ENOMEM; 506 return -ENOMEM;
@@ -541,7 +537,7 @@ static int get_pcie_table(
541 table_size = sizeof(uint32_t) + 537 table_size = sizeof(uint32_t) +
542 sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries; 538 sizeof(phm_ppt_v1_pcie_record) * atom_pcie_table->ucNumEntries;
543 539
544 pcie_table = (phm_ppt_v1_pcie_table *)kzalloc(table_size, GFP_KERNEL); 540 pcie_table = kzalloc(table_size, GFP_KERNEL);
545 541
546 if (pcie_table == NULL) 542 if (pcie_table == NULL)
547 return -ENOMEM; 543 return -ENOMEM;
@@ -695,8 +691,7 @@ static int get_mm_clock_voltage_table(
695 table_size = sizeof(uint32_t) + 691 table_size = sizeof(uint32_t) +
696 sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record) 692 sizeof(phm_ppt_v1_mm_clock_voltage_dependency_record)
697 * mm_dependency_table->ucNumEntries; 693 * mm_dependency_table->ucNumEntries;
698 mm_table = (phm_ppt_v1_mm_clock_voltage_dependency_table *) 694 mm_table = kzalloc(table_size, GFP_KERNEL);
699 kzalloc(table_size, GFP_KERNEL);
700 695
701 if (NULL == mm_table) 696 if (NULL == mm_table)
702 return -ENOMEM; 697 return -ENOMEM;
@@ -1073,13 +1068,9 @@ int tonga_pp_tables_initialize(struct pp_hwmgr *hwmgr)
1073 1068
1074int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) 1069int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1075{ 1070{
1076 int result = 0;
1077 struct phm_ppt_v1_information *pp_table_information = 1071 struct phm_ppt_v1_information *pp_table_information =
1078 (struct phm_ppt_v1_information *)(hwmgr->pptable); 1072 (struct phm_ppt_v1_information *)(hwmgr->pptable);
1079 1073
1080 if (NULL != hwmgr->soft_pp_table)
1081 hwmgr->soft_pp_table = NULL;
1082
1083 kfree(pp_table_information->vdd_dep_on_sclk); 1074 kfree(pp_table_information->vdd_dep_on_sclk);
1084 pp_table_information->vdd_dep_on_sclk = NULL; 1075 pp_table_information->vdd_dep_on_sclk = NULL;
1085 1076
@@ -1116,7 +1107,7 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr)
1116 kfree(hwmgr->pptable); 1107 kfree(hwmgr->pptable);
1117 hwmgr->pptable = NULL; 1108 hwmgr->pptable = NULL;
1118 1109
1119 return result; 1110 return 0;
1120} 1111}
1121 1112
1122const struct pp_table_func tonga_pptable_funcs = { 1113const struct pp_table_func tonga_pptable_funcs = {
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
index 50b367d44307..b764c8c05ec8 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h
@@ -132,6 +132,7 @@ struct amd_pp_init {
132 uint32_t chip_family; 132 uint32_t chip_family;
133 uint32_t chip_id; 133 uint32_t chip_id;
134 uint32_t rev_id; 134 uint32_t rev_id;
135 bool powercontainment_enabled;
135}; 136};
136enum amd_pp_display_config_type{ 137enum amd_pp_display_config_type{
137 AMD_PP_DisplayConfigType_None = 0, 138 AMD_PP_DisplayConfigType_None = 0,
@@ -342,6 +343,10 @@ struct amd_powerplay_funcs {
342 int (*set_pp_table)(void *handle, const char *buf, size_t size); 343 int (*set_pp_table)(void *handle, const char *buf, size_t size);
343 int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); 344 int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
344 int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); 345 int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
346 int (*get_sclk_od)(void *handle);
347 int (*set_sclk_od)(void *handle, uint32_t value);
348 int (*get_mclk_od)(void *handle);
349 int (*set_mclk_od)(void *handle, uint32_t value);
345}; 350};
346 351
347struct amd_powerplay { 352struct amd_powerplay {
@@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init,
355 360
356int amd_powerplay_fini(void *handle); 361int amd_powerplay_fini(void *handle);
357 362
363int amd_powerplay_reset(void *handle);
364
358int amd_powerplay_display_configuration_change(void *handle, 365int amd_powerplay_display_configuration_change(void *handle,
359 const struct amd_pp_display_configuration *input); 366 const struct amd_pp_display_configuration *input);
360 367
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
index 56f712c7d07a..962cb5385951 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h
@@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate);
340extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); 340extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr);
341extern int phm_setup_asic(struct pp_hwmgr *hwmgr); 341extern int phm_setup_asic(struct pp_hwmgr *hwmgr);
342extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); 342extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr);
343extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr);
343extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr); 344extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr);
344extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); 345extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr);
345extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); 346extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
index 77e8e33d5870..bf0d2accf7bf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h
@@ -278,6 +278,8 @@ struct pp_hwmgr_func {
278 278
279 int (*dynamic_state_management_enable)( 279 int (*dynamic_state_management_enable)(
280 struct pp_hwmgr *hw_mgr); 280 struct pp_hwmgr *hw_mgr);
281 int (*dynamic_state_management_disable)(
282 struct pp_hwmgr *hw_mgr);
281 283
282 int (*patch_boot_state)(struct pp_hwmgr *hwmgr, 284 int (*patch_boot_state)(struct pp_hwmgr *hwmgr,
283 struct pp_hw_power_state *hw_ps); 285 struct pp_hw_power_state *hw_ps);
@@ -333,11 +335,13 @@ struct pp_hwmgr_func {
333 int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); 335 int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks);
334 int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); 336 int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks);
335 int (*power_off_asic)(struct pp_hwmgr *hwmgr); 337 int (*power_off_asic)(struct pp_hwmgr *hwmgr);
336 int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table);
337 int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size);
338 int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); 338 int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask);
339 int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); 339 int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf);
340 int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); 340 int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable);
341 int (*get_sclk_od)(struct pp_hwmgr *hwmgr);
342 int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
343 int (*get_mclk_od)(struct pp_hwmgr *hwmgr);
344 int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value);
341}; 345};
342 346
343struct pp_table_func { 347struct pp_table_func {
@@ -580,6 +584,7 @@ struct pp_hwmgr {
580 struct pp_smumgr *smumgr; 584 struct pp_smumgr *smumgr;
581 const void *soft_pp_table; 585 const void *soft_pp_table;
582 uint32_t soft_pp_table_size; 586 uint32_t soft_pp_table_size;
587 void *hardcode_pp_table;
583 bool need_pp_table_upload; 588 bool need_pp_table_upload;
584 enum amd_dpm_forced_level dpm_level; 589 enum amd_dpm_forced_level dpm_level;
585 bool block_hw_access; 590 bool block_hw_access;
@@ -609,6 +614,7 @@ struct pp_hwmgr {
609 uint32_t num_ps; 614 uint32_t num_ps;
610 struct pp_thermal_controller_info thermal_controller; 615 struct pp_thermal_controller_info thermal_controller;
611 bool fan_ctrl_is_in_default_mode; 616 bool fan_ctrl_is_in_default_mode;
617 bool powercontainment_enabled;
612 uint32_t fan_ctrl_default_mode; 618 uint32_t fan_ctrl_default_mode;
613 uint32_t tmin; 619 uint32_t tmin;
614 struct phm_microcode_version_info microcode_version_info; 620 struct phm_microcode_version_info microcode_version_info;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
index fc9e3d1dd409..3c235f0177cd 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smumgr.h
@@ -131,6 +131,12 @@ extern int smu_free_memory(void *device, void *handle);
131 smum_wait_on_indirect_register(smumgr, \ 131 smum_wait_on_indirect_register(smumgr, \
132 mm##port##_INDEX, index, value, mask) 132 mm##port##_INDEX, index, value, mask)
133 133
134#define SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, value, mask) \
135 SMUM_WAIT_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
136
137#define SMUM_WAIT_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
138 SMUM_WAIT_INDIRECT_REGISTER(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
139 SMUM_FIELD_MASK(reg, field) )
134 140
135#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \ 141#define SMUM_WAIT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, \
136 index, value, mask) \ 142 index, value, mask) \
@@ -158,6 +164,10 @@ extern int smu_free_memory(void *device, void *handle);
158 (SMUM_FIELD_MASK(reg, field) & ((field_val) << \ 164 (SMUM_FIELD_MASK(reg, field) & ((field_val) << \
159 SMUM_FIELD_SHIFT(reg, field)))) 165 SMUM_FIELD_SHIFT(reg, field))))
160 166
167#define SMUM_READ_INDIRECT_FIELD(device, port, reg, field) \
168 SMUM_GET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
169 reg, field)
170
161#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \ 171#define SMUM_WAIT_VFPF_INDIRECT_REGISTER_GIVEN_INDEX(smumgr, \
162 port, index, value, mask) \ 172 port, index, value, mask) \
163 smum_wait_on_indirect_register(smumgr, \ 173 smum_wait_on_indirect_register(smumgr, \
@@ -191,6 +201,13 @@ extern int smu_free_memory(void *device, void *handle);
191 SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \ 201 SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
192 reg, field, fieldval)) 202 reg, field, fieldval))
193 203
204
205#define SMUM_WRITE_INDIRECT_FIELD(device, port, reg, field, fieldval) \
206 cgs_write_ind_register(device, port, ix##reg, \
207 SMUM_SET_FIELD(cgs_read_ind_register(device, port, ix##reg), \
208 reg, field, fieldval))
209
210
194#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \ 211#define SMUM_WAIT_VFPF_INDIRECT_FIELD(smumgr, port, reg, field, fieldval) \
195 SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \ 212 SMUM_WAIT_VFPF_INDIRECT_REGISTER(smumgr, port, reg, \
196 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ 213 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
@@ -200,4 +217,16 @@ extern int smu_free_memory(void *device, void *handle);
200 SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \ 217 SMUM_WAIT_VFPF_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, \
201 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \ 218 (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
202 SMUM_FIELD_MASK(reg, field)) 219 SMUM_FIELD_MASK(reg, field))
220
221#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, index, value, mask) \
222 smum_wait_for_indirect_register_unequal(smumgr, \
223 mm##port##_INDEX, index, value, mask)
224
225#define SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, value, mask) \
226 SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL_GIVEN_INDEX(smumgr, port, ix##reg, value, mask)
227
228#define SMUM_WAIT_INDIRECT_FIELD_UNEQUAL(smumgr, port, reg, field, fieldval) \
229 SMUM_WAIT_INDIRECT_REGISTER_UNEQUAL(smumgr, port, reg, (fieldval) << SMUM_FIELD_SHIFT(reg, field), \
230 SMUM_FIELD_MASK(reg, field) )
231
203#endif 232#endif
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index 0728c1e3d97a..7723473e51a0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <drm/amdgpu_drm.h>
26#include "pp_instance.h" 27#include "pp_instance.h"
27#include "smumgr.h" 28#include "smumgr.h"
28#include "cgs_common.h" 29#include "cgs_common.h"
@@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
52 handle->smu_mgr = smumgr; 53 handle->smu_mgr = smumgr;
53 54
54 switch (smumgr->chip_family) { 55 switch (smumgr->chip_family) {
55 case AMD_FAMILY_CZ: 56 case AMDGPU_FAMILY_CZ:
56 cz_smum_init(smumgr); 57 cz_smum_init(smumgr);
57 break; 58 break;
58 case AMD_FAMILY_VI: 59 case AMDGPU_FAMILY_VI:
59 switch (smumgr->chip_id) { 60 switch (smumgr->chip_id) {
60 case CHIP_TONGA: 61 case CHIP_TONGA:
61 tonga_smum_init(smumgr); 62 tonga_smum_init(smumgr);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
index b22722eabafc..f42c536b3af1 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c
@@ -479,7 +479,6 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
479 struct tonga_smumgr *tonga_smu = 479 struct tonga_smumgr *tonga_smu =
480 (struct tonga_smumgr *)(smumgr->backend); 480 (struct tonga_smumgr *)(smumgr->backend);
481 uint16_t fw_to_load; 481 uint16_t fw_to_load;
482 int result = 0;
483 struct SMU_DRAMData_TOC *toc; 482 struct SMU_DRAMData_TOC *toc;
484 /** 483 /**
485 * First time this gets called during SmuMgr init, 484 * First time this gets called during SmuMgr init,
@@ -563,7 +562,7 @@ static int tonga_request_smu_reload_fw(struct pp_smumgr *smumgr)
563 smumgr, PPSMC_MSG_LoadUcodes, fw_to_load), 562 smumgr, PPSMC_MSG_LoadUcodes, fw_to_load),
564 "Fail to Request SMU Load uCode", return 0); 563 "Fail to Request SMU Load uCode", return 0);
565 564
566 return result; 565 return 0;
567} 566}
568 567
569static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr, 568static int tonga_request_smu_load_specific_fw(struct pp_smumgr *smumgr,
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index c89dc777768f..b961a1c6caf3 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job,
26 TP_fast_assign( 26 TP_fast_assign(
27 __entry->entity = sched_job->s_entity; 27 __entry->entity = sched_job->s_entity;
28 __entry->sched_job = sched_job; 28 __entry->sched_job = sched_job;
29 __entry->fence = &sched_job->s_fence->base; 29 __entry->fence = &sched_job->s_fence->finished;
30 __entry->name = sched_job->sched->name; 30 __entry->name = sched_job->sched->name;
31 __entry->job_count = kfifo_len( 31 __entry->job_count = kfifo_len(
32 &sched_job->s_entity->job_queue) / sizeof(sched_job); 32 &sched_job->s_entity->job_queue) / sizeof(sched_job);
@@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job,
46 ), 46 ),
47 47
48 TP_fast_assign( 48 TP_fast_assign(
49 __entry->fence = &fence->base; 49 __entry->fence = &fence->finished;
50 ), 50 ),
51 TP_printk("fence=%p signaled", __entry->fence) 51 TP_printk("fence=%p signaled", __entry->fence)
52); 52);
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index c16248cee779..ef312bb75fda 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -32,6 +32,7 @@
32 32
33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); 34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35static void amd_sched_process_job(struct fence *f, struct fence_cb *cb);
35 36
36struct kmem_cache *sched_fence_slab; 37struct kmem_cache *sched_fence_slab;
37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); 38atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
@@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
140 return r; 141 return r;
141 142
142 atomic_set(&entity->fence_seq, 0); 143 atomic_set(&entity->fence_seq, 0);
143 entity->fence_context = fence_context_alloc(1); 144 entity->fence_context = fence_context_alloc(2);
144 145
145 return 0; 146 return 0;
146} 147}
@@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
251 252
252 s_fence = to_amd_sched_fence(fence); 253 s_fence = to_amd_sched_fence(fence);
253 if (s_fence && s_fence->sched == sched) { 254 if (s_fence && s_fence->sched == sched) {
254 /* Fence is from the same scheduler */
255 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
256 /* Ignore it when it is already scheduled */
257 fence_put(entity->dependency);
258 return false;
259 }
260 255
261 /* Wait for fence to be scheduled */ 256 /*
262 entity->cb.func = amd_sched_entity_clear_dep; 257 * Fence is from the same scheduler, only need to wait for
263 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); 258 * it to be scheduled
264 return true; 259 */
260 fence = fence_get(&s_fence->scheduled);
261 fence_put(entity->dependency);
262 entity->dependency = fence;
263 if (!fence_add_callback(fence, &entity->cb,
264 amd_sched_entity_clear_dep))
265 return true;
266
267 /* Ignore it when it is already scheduled */
268 fence_put(fence);
269 return false;
265 } 270 }
266 271
267 if (!fence_add_callback(entity->dependency, &entity->cb, 272 if (!fence_add_callback(entity->dependency, &entity->cb,
@@ -319,46 +324,114 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
319 return added; 324 return added;
320} 325}
321 326
322static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
323 struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job);
324 schedule_work(&job->work_free_job);
325}
326
327/* job_finish is called after hw fence signaled, and 327/* job_finish is called after hw fence signaled, and
328 * the job had already been deleted from ring_mirror_list 328 * the job had already been deleted from ring_mirror_list
329 */ 329 */
330void amd_sched_job_finish(struct amd_sched_job *s_job) 330static void amd_sched_job_finish(struct work_struct *work)
331{ 331{
332 struct amd_sched_job *next; 332 struct amd_sched_job *s_job = container_of(work, struct amd_sched_job,
333 finish_work);
333 struct amd_gpu_scheduler *sched = s_job->sched; 334 struct amd_gpu_scheduler *sched = s_job->sched;
334 335
336 /* remove job from ring_mirror_list */
337 spin_lock(&sched->job_list_lock);
338 list_del_init(&s_job->node);
335 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { 339 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
336 if (cancel_delayed_work(&s_job->work_tdr)) 340 struct amd_sched_job *next;
337 amd_sched_job_put(s_job); 341
342 spin_unlock(&sched->job_list_lock);
343 cancel_delayed_work_sync(&s_job->work_tdr);
344 spin_lock(&sched->job_list_lock);
338 345
339 /* queue TDR for next job */ 346 /* queue TDR for next job */
340 next = list_first_entry_or_null(&sched->ring_mirror_list, 347 next = list_first_entry_or_null(&sched->ring_mirror_list,
341 struct amd_sched_job, node); 348 struct amd_sched_job, node);
342 349
343 if (next) { 350 if (next)
344 INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback);
345 amd_sched_job_get(next);
346 schedule_delayed_work(&next->work_tdr, sched->timeout); 351 schedule_delayed_work(&next->work_tdr, sched->timeout);
347 }
348 } 352 }
353 spin_unlock(&sched->job_list_lock);
354 sched->ops->free_job(s_job);
349} 355}
350 356
351void amd_sched_job_begin(struct amd_sched_job *s_job) 357static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb)
358{
359 struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
360 finish_cb);
361 schedule_work(&job->finish_work);
362}
363
364static void amd_sched_job_begin(struct amd_sched_job *s_job)
352{ 365{
353 struct amd_gpu_scheduler *sched = s_job->sched; 366 struct amd_gpu_scheduler *sched = s_job->sched;
354 367
368 spin_lock(&sched->job_list_lock);
369 list_add_tail(&s_job->node, &sched->ring_mirror_list);
355 if (sched->timeout != MAX_SCHEDULE_TIMEOUT && 370 if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
356 list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) 371 list_first_entry_or_null(&sched->ring_mirror_list,
357 { 372 struct amd_sched_job, node) == s_job)
358 INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); 373 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
359 amd_sched_job_get(s_job); 374 spin_unlock(&sched->job_list_lock);
375}
376
377static void amd_sched_job_timedout(struct work_struct *work)
378{
379 struct amd_sched_job *job = container_of(work, struct amd_sched_job,
380 work_tdr.work);
381
382 job->sched->ops->timedout_job(job);
383}
384
385void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched)
386{
387 struct amd_sched_job *s_job;
388
389 spin_lock(&sched->job_list_lock);
390 list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) {
391 if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) {
392 fence_put(s_job->s_fence->parent);
393 s_job->s_fence->parent = NULL;
394 }
395 }
396 atomic_set(&sched->hw_rq_count, 0);
397 spin_unlock(&sched->job_list_lock);
398}
399
400void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
401{
402 struct amd_sched_job *s_job, *tmp;
403 int r;
404
405 spin_lock(&sched->job_list_lock);
406 s_job = list_first_entry_or_null(&sched->ring_mirror_list,
407 struct amd_sched_job, node);
408 if (s_job)
360 schedule_delayed_work(&s_job->work_tdr, sched->timeout); 409 schedule_delayed_work(&s_job->work_tdr, sched->timeout);
410
411 list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
412 struct amd_sched_fence *s_fence = s_job->s_fence;
413 struct fence *fence;
414
415 spin_unlock(&sched->job_list_lock);
416 fence = sched->ops->run_job(s_job);
417 atomic_inc(&sched->hw_rq_count);
418 if (fence) {
419 s_fence->parent = fence_get(fence);
420 r = fence_add_callback(fence, &s_fence->cb,
421 amd_sched_process_job);
422 if (r == -ENOENT)
423 amd_sched_process_job(fence, &s_fence->cb);
424 else if (r)
425 DRM_ERROR("fence add callback failed (%d)\n",
426 r);
427 fence_put(fence);
428 } else {
429 DRM_ERROR("Failed to run job!\n");
430 amd_sched_process_job(NULL, &s_fence->cb);
431 }
432 spin_lock(&sched->job_list_lock);
361 } 433 }
434 spin_unlock(&sched->job_list_lock);
362} 435}
363 436
364/** 437/**
@@ -372,36 +445,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
372{ 445{
373 struct amd_sched_entity *entity = sched_job->s_entity; 446 struct amd_sched_entity *entity = sched_job->s_entity;
374 447
375 sched_job->use_sched = 1;
376 fence_add_callback(&sched_job->s_fence->base,
377 &sched_job->cb_free_job, amd_sched_free_job);
378 trace_amd_sched_job(sched_job); 448 trace_amd_sched_job(sched_job);
449 fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb,
450 amd_sched_job_finish_cb);
379 wait_event(entity->sched->job_scheduled, 451 wait_event(entity->sched->job_scheduled,
380 amd_sched_entity_in(sched_job)); 452 amd_sched_entity_in(sched_job));
381} 453}
382 454
383/* init a sched_job with basic field */ 455/* init a sched_job with basic field */
384int amd_sched_job_init(struct amd_sched_job *job, 456int amd_sched_job_init(struct amd_sched_job *job,
385 struct amd_gpu_scheduler *sched, 457 struct amd_gpu_scheduler *sched,
386 struct amd_sched_entity *entity, 458 struct amd_sched_entity *entity,
387 void (*timeout_cb)(struct work_struct *work), 459 void *owner)
388 void (*free_cb)(struct kref *refcount),
389 void *owner, struct fence **fence)
390{ 460{
391 INIT_LIST_HEAD(&job->node);
392 kref_init(&job->refcount);
393 job->sched = sched; 461 job->sched = sched;
394 job->s_entity = entity; 462 job->s_entity = entity;
395 job->s_fence = amd_sched_fence_create(entity, owner); 463 job->s_fence = amd_sched_fence_create(entity, owner);
396 if (!job->s_fence) 464 if (!job->s_fence)
397 return -ENOMEM; 465 return -ENOMEM;
398 466
399 job->s_fence->s_job = job; 467 INIT_WORK(&job->finish_work, amd_sched_job_finish);
400 job->timeout_callback = timeout_cb; 468 INIT_LIST_HEAD(&job->node);
401 job->free_callback = free_cb; 469 INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout);
402 470
403 if (fence)
404 *fence = &job->s_fence->base;
405 return 0; 471 return 0;
406} 472}
407 473
@@ -450,23 +516,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
450 struct amd_sched_fence *s_fence = 516 struct amd_sched_fence *s_fence =
451 container_of(cb, struct amd_sched_fence, cb); 517 container_of(cb, struct amd_sched_fence, cb);
452 struct amd_gpu_scheduler *sched = s_fence->sched; 518 struct amd_gpu_scheduler *sched = s_fence->sched;
453 unsigned long flags;
454 519
455 atomic_dec(&sched->hw_rq_count); 520 atomic_dec(&sched->hw_rq_count);
456 521 amd_sched_fence_finished(s_fence);
457 /* remove job from ring_mirror_list */
458 spin_lock_irqsave(&sched->job_list_lock, flags);
459 list_del_init(&s_fence->s_job->node);
460 sched->ops->finish_job(s_fence->s_job);
461 spin_unlock_irqrestore(&sched->job_list_lock, flags);
462
463 amd_sched_fence_signal(s_fence);
464 522
465 trace_amd_sched_process_job(s_fence); 523 trace_amd_sched_process_job(s_fence);
466 fence_put(&s_fence->base); 524 fence_put(&s_fence->finished);
467 wake_up_interruptible(&sched->wake_up_worker); 525 wake_up_interruptible(&sched->wake_up_worker);
468} 526}
469 527
528static bool amd_sched_blocked(struct amd_gpu_scheduler *sched)
529{
530 if (kthread_should_park()) {
531 kthread_parkme();
532 return true;
533 }
534
535 return false;
536}
537
470static int amd_sched_main(void *param) 538static int amd_sched_main(void *param)
471{ 539{
472 struct sched_param sparam = {.sched_priority = 1}; 540 struct sched_param sparam = {.sched_priority = 1};
@@ -476,14 +544,15 @@ static int amd_sched_main(void *param)
476 sched_setscheduler(current, SCHED_FIFO, &sparam); 544 sched_setscheduler(current, SCHED_FIFO, &sparam);
477 545
478 while (!kthread_should_stop()) { 546 while (!kthread_should_stop()) {
479 struct amd_sched_entity *entity; 547 struct amd_sched_entity *entity = NULL;
480 struct amd_sched_fence *s_fence; 548 struct amd_sched_fence *s_fence;
481 struct amd_sched_job *sched_job; 549 struct amd_sched_job *sched_job;
482 struct fence *fence; 550 struct fence *fence;
483 551
484 wait_event_interruptible(sched->wake_up_worker, 552 wait_event_interruptible(sched->wake_up_worker,
485 (entity = amd_sched_select_entity(sched)) || 553 (!amd_sched_blocked(sched) &&
486 kthread_should_stop()); 554 (entity = amd_sched_select_entity(sched))) ||
555 kthread_should_stop());
487 556
488 if (!entity) 557 if (!entity)
489 continue; 558 continue;
@@ -495,16 +564,19 @@ static int amd_sched_main(void *param)
495 s_fence = sched_job->s_fence; 564 s_fence = sched_job->s_fence;
496 565
497 atomic_inc(&sched->hw_rq_count); 566 atomic_inc(&sched->hw_rq_count);
498 amd_sched_job_pre_schedule(sched, sched_job); 567 amd_sched_job_begin(sched_job);
568
499 fence = sched->ops->run_job(sched_job); 569 fence = sched->ops->run_job(sched_job);
500 amd_sched_fence_scheduled(s_fence); 570 amd_sched_fence_scheduled(s_fence);
501 if (fence) { 571 if (fence) {
572 s_fence->parent = fence_get(fence);
502 r = fence_add_callback(fence, &s_fence->cb, 573 r = fence_add_callback(fence, &s_fence->cb,
503 amd_sched_process_job); 574 amd_sched_process_job);
504 if (r == -ENOENT) 575 if (r == -ENOENT)
505 amd_sched_process_job(fence, &s_fence->cb); 576 amd_sched_process_job(fence, &s_fence->cb);
506 else if (r) 577 else if (r)
507 DRM_ERROR("fence add callback failed (%d)\n", r); 578 DRM_ERROR("fence add callback failed (%d)\n",
579 r);
508 fence_put(fence); 580 fence_put(fence);
509 } else { 581 } else {
510 DRM_ERROR("Failed to run job!\n"); 582 DRM_ERROR("Failed to run job!\n");
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 070095a9433c..7cbbbfb502ef 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -27,8 +27,6 @@
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/fence.h> 28#include <linux/fence.h>
29 29
30#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS
31
32struct amd_gpu_scheduler; 30struct amd_gpu_scheduler;
33struct amd_sched_rq; 31struct amd_sched_rq;
34 32
@@ -68,36 +66,34 @@ struct amd_sched_rq {
68}; 66};
69 67
70struct amd_sched_fence { 68struct amd_sched_fence {
71 struct fence base; 69 struct fence scheduled;
70 struct fence finished;
72 struct fence_cb cb; 71 struct fence_cb cb;
73 struct list_head scheduled_cb; 72 struct fence *parent;
74 struct amd_gpu_scheduler *sched; 73 struct amd_gpu_scheduler *sched;
75 spinlock_t lock; 74 spinlock_t lock;
76 void *owner; 75 void *owner;
77 struct amd_sched_job *s_job;
78}; 76};
79 77
80struct amd_sched_job { 78struct amd_sched_job {
81 struct kref refcount;
82 struct amd_gpu_scheduler *sched; 79 struct amd_gpu_scheduler *sched;
83 struct amd_sched_entity *s_entity; 80 struct amd_sched_entity *s_entity;
84 struct amd_sched_fence *s_fence; 81 struct amd_sched_fence *s_fence;
85 bool use_sched; /* true if the job goes to scheduler */ 82 struct fence_cb finish_cb;
86 struct fence_cb cb_free_job; 83 struct work_struct finish_work;
87 struct work_struct work_free_job; 84 struct list_head node;
88 struct list_head node; 85 struct delayed_work work_tdr;
89 struct delayed_work work_tdr;
90 void (*timeout_callback) (struct work_struct *work);
91 void (*free_callback)(struct kref *refcount);
92}; 86};
93 87
94extern const struct fence_ops amd_sched_fence_ops; 88extern const struct fence_ops amd_sched_fence_ops_scheduled;
89extern const struct fence_ops amd_sched_fence_ops_finished;
95static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) 90static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
96{ 91{
97 struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); 92 if (f->ops == &amd_sched_fence_ops_scheduled)
93 return container_of(f, struct amd_sched_fence, scheduled);
98 94
99 if (__f->base.ops == &amd_sched_fence_ops) 95 if (f->ops == &amd_sched_fence_ops_finished)
100 return __f; 96 return container_of(f, struct amd_sched_fence, finished);
101 97
102 return NULL; 98 return NULL;
103} 99}
@@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
109struct amd_sched_backend_ops { 105struct amd_sched_backend_ops {
110 struct fence *(*dependency)(struct amd_sched_job *sched_job); 106 struct fence *(*dependency)(struct amd_sched_job *sched_job);
111 struct fence *(*run_job)(struct amd_sched_job *sched_job); 107 struct fence *(*run_job)(struct amd_sched_job *sched_job);
112 void (*begin_job)(struct amd_sched_job *sched_job); 108 void (*timedout_job)(struct amd_sched_job *sched_job);
113 void (*finish_job)(struct amd_sched_job *sched_job); 109 void (*free_job)(struct amd_sched_job *sched_job);
114}; 110};
115 111
116enum amd_sched_priority { 112enum amd_sched_priority {
@@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
152struct amd_sched_fence *amd_sched_fence_create( 148struct amd_sched_fence *amd_sched_fence_create(
153 struct amd_sched_entity *s_entity, void *owner); 149 struct amd_sched_entity *s_entity, void *owner);
154void amd_sched_fence_scheduled(struct amd_sched_fence *fence); 150void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
155void amd_sched_fence_signal(struct amd_sched_fence *fence); 151void amd_sched_fence_finished(struct amd_sched_fence *fence);
156int amd_sched_job_init(struct amd_sched_job *job, 152int amd_sched_job_init(struct amd_sched_job *job,
157 struct amd_gpu_scheduler *sched, 153 struct amd_gpu_scheduler *sched,
158 struct amd_sched_entity *entity, 154 struct amd_sched_entity *entity,
159 void (*timeout_cb)(struct work_struct *work), 155 void *owner);
160 void (*free_cb)(struct kref* refcount), 156void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
161 void *owner, struct fence **fence); 157void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
162void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
163 struct amd_sched_job *s_job);
164void amd_sched_job_finish(struct amd_sched_job *s_job);
165void amd_sched_job_begin(struct amd_sched_job *s_job);
166static inline void amd_sched_job_get(struct amd_sched_job *job) {
167 if (job)
168 kref_get(&job->refcount);
169}
170
171static inline void amd_sched_job_put(struct amd_sched_job *job) {
172 if (job)
173 kref_put(&job->refcount, job->free_callback);
174}
175
176#endif 158#endif
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index 2a732c490375..6b63beaf7574 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -27,7 +27,8 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include "gpu_scheduler.h" 28#include "gpu_scheduler.h"
29 29
30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) 30struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
31 void *owner)
31{ 32{
32 struct amd_sched_fence *fence = NULL; 33 struct amd_sched_fence *fence = NULL;
33 unsigned seq; 34 unsigned seq;
@@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
36 if (fence == NULL) 37 if (fence == NULL)
37 return NULL; 38 return NULL;
38 39
39 INIT_LIST_HEAD(&fence->scheduled_cb);
40 fence->owner = owner; 40 fence->owner = owner;
41 fence->sched = s_entity->sched; 41 fence->sched = entity->sched;
42 spin_lock_init(&fence->lock); 42 spin_lock_init(&fence->lock);
43 43
44 seq = atomic_inc_return(&s_entity->fence_seq); 44 seq = atomic_inc_return(&entity->fence_seq);
45 fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, 45 fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled,
46 s_entity->fence_context, seq); 46 &fence->lock, entity->fence_context, seq);
47 fence_init(&fence->finished, &amd_sched_fence_ops_finished,
48 &fence->lock, entity->fence_context + 1, seq);
47 49
48 return fence; 50 return fence;
49} 51}
50 52
51void amd_sched_fence_signal(struct amd_sched_fence *fence) 53void amd_sched_fence_scheduled(struct amd_sched_fence *fence)
52{ 54{
53 int ret = fence_signal(&fence->base); 55 int ret = fence_signal(&fence->scheduled);
56
54 if (!ret) 57 if (!ret)
55 FENCE_TRACE(&fence->base, "signaled from irq context\n"); 58 FENCE_TRACE(&fence->scheduled, "signaled from irq context\n");
56 else 59 else
57 FENCE_TRACE(&fence->base, "was already signaled\n"); 60 FENCE_TRACE(&fence->scheduled, "was already signaled\n");
58}
59
60void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
61 struct amd_sched_job *s_job)
62{
63 unsigned long flags;
64 spin_lock_irqsave(&sched->job_list_lock, flags);
65 list_add_tail(&s_job->node, &sched->ring_mirror_list);
66 sched->ops->begin_job(s_job);
67 spin_unlock_irqrestore(&sched->job_list_lock, flags);
68} 61}
69 62
70void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) 63void amd_sched_fence_finished(struct amd_sched_fence *fence)
71{ 64{
72 struct fence_cb *cur, *tmp; 65 int ret = fence_signal(&fence->finished);
73 66
74 set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); 67 if (!ret)
75 list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { 68 FENCE_TRACE(&fence->finished, "signaled from irq context\n");
76 list_del_init(&cur->node); 69 else
77 cur->func(&s_fence->base, cur); 70 FENCE_TRACE(&fence->finished, "was already signaled\n");
78 }
79} 71}
80 72
81static const char *amd_sched_fence_get_driver_name(struct fence *fence) 73static const char *amd_sched_fence_get_driver_name(struct fence *fence)
@@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
105{ 97{
106 struct fence *f = container_of(rcu, struct fence, rcu); 98 struct fence *f = container_of(rcu, struct fence, rcu);
107 struct amd_sched_fence *fence = to_amd_sched_fence(f); 99 struct amd_sched_fence *fence = to_amd_sched_fence(f);
100
101 fence_put(fence->parent);
108 kmem_cache_free(sched_fence_slab, fence); 102 kmem_cache_free(sched_fence_slab, fence);
109} 103}
110 104
@@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu)
116 * This function is called when the reference count becomes zero. 110 * This function is called when the reference count becomes zero.
117 * It just RCU schedules freeing up the fence. 111 * It just RCU schedules freeing up the fence.
118 */ 112 */
119static void amd_sched_fence_release(struct fence *f) 113static void amd_sched_fence_release_scheduled(struct fence *f)
120{ 114{
121 call_rcu(&f->rcu, amd_sched_fence_free); 115 struct amd_sched_fence *fence = to_amd_sched_fence(f);
116
117 call_rcu(&fence->finished.rcu, amd_sched_fence_free);
122} 118}
123 119
124const struct fence_ops amd_sched_fence_ops = { 120/**
121 * amd_sched_fence_release_scheduled - drop extra reference
122 *
123 * @f: fence
124 *
125 * Drop the extra reference from the scheduled fence to the base fence.
126 */
127static void amd_sched_fence_release_finished(struct fence *f)
128{
129 struct amd_sched_fence *fence = to_amd_sched_fence(f);
130
131 fence_put(&fence->scheduled);
132}
133
134const struct fence_ops amd_sched_fence_ops_scheduled = {
135 .get_driver_name = amd_sched_fence_get_driver_name,
136 .get_timeline_name = amd_sched_fence_get_timeline_name,
137 .enable_signaling = amd_sched_fence_enable_signaling,
138 .signaled = NULL,
139 .wait = fence_default_wait,
140 .release = amd_sched_fence_release_scheduled,
141};
142
143const struct fence_ops amd_sched_fence_ops_finished = {
125 .get_driver_name = amd_sched_fence_get_driver_name, 144 .get_driver_name = amd_sched_fence_get_driver_name,
126 .get_timeline_name = amd_sched_fence_get_timeline_name, 145 .get_timeline_name = amd_sched_fence_get_timeline_name,
127 .enable_signaling = amd_sched_fence_enable_signaling, 146 .enable_signaling = amd_sched_fence_enable_signaling,
128 .signaled = NULL, 147 .signaled = NULL,
129 .wait = fence_default_wait, 148 .wait = fence_default_wait,
130 .release = amd_sched_fence_release, 149 .release = amd_sched_fence_release_finished,
131}; 150};
diff --git a/drivers/gpu/drm/arc/Kconfig b/drivers/gpu/drm/arc/Kconfig
index f9a13b658fea..f47d88ba4fa5 100644
--- a/drivers/gpu/drm/arc/Kconfig
+++ b/drivers/gpu/drm/arc/Kconfig
@@ -2,7 +2,6 @@ config DRM_ARCPGU
2 tristate "ARC PGU" 2 tristate "ARC PGU"
3 depends on DRM && OF 3 depends on DRM && OF
4 select DRM_KMS_CMA_HELPER 4 select DRM_KMS_CMA_HELPER
5 select DRM_KMS_FB_HELPER
6 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
7 help 6 help
8 Choose this option if you have an ARC PGU controller. 7 Choose this option if you have an ARC PGU controller.
diff --git a/drivers/gpu/drm/arc/Makefile b/drivers/gpu/drm/arc/Makefile
index d48fda70f857..73de56a0139a 100644
--- a/drivers/gpu/drm/arc/Makefile
+++ b/drivers/gpu/drm/arc/Makefile
@@ -1,2 +1,2 @@
1arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_drv.o 1arcpgu-y := arcpgu_crtc.o arcpgu_hdmi.o arcpgu_sim.o arcpgu_drv.o
2obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o 2obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
diff --git a/drivers/gpu/drm/arc/arcpgu.h b/drivers/gpu/drm/arc/arcpgu.h
index 86574b698a78..e8fcf3ab1d9a 100644
--- a/drivers/gpu/drm/arc/arcpgu.h
+++ b/drivers/gpu/drm/arc/arcpgu.h
@@ -22,7 +22,6 @@ struct arcpgu_drm_private {
22 struct clk *clk; 22 struct clk *clk;
23 struct drm_fbdev_cma *fbdev; 23 struct drm_fbdev_cma *fbdev;
24 struct drm_framebuffer *fb; 24 struct drm_framebuffer *fb;
25 struct list_head event_list;
26 struct drm_crtc crtc; 25 struct drm_crtc crtc;
27 struct drm_plane *plane; 26 struct drm_plane *plane;
28}; 27};
@@ -43,6 +42,7 @@ static inline u32 arc_pgu_read(struct arcpgu_drm_private *arcpgu,
43 42
44int arc_pgu_setup_crtc(struct drm_device *dev); 43int arc_pgu_setup_crtc(struct drm_device *dev);
45int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np); 44int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np);
45int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np);
46struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev, 46struct drm_fbdev_cma *arcpgu_fbdev_cma_init(struct drm_device *dev,
47 unsigned int preferred_bpp, unsigned int num_crtc, 47 unsigned int preferred_bpp, unsigned int num_crtc,
48 unsigned int max_conn_count); 48 unsigned int max_conn_count);
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 92f8beff8e60..ee0a61c2861b 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -145,20 +145,14 @@ static int arc_pgu_crtc_atomic_check(struct drm_crtc *crtc,
145static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc, 145static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
146 struct drm_crtc_state *state) 146 struct drm_crtc_state *state)
147{ 147{
148 struct arcpgu_drm_private *arcpgu = crtc_to_arcpgu_priv(crtc); 148 struct drm_pending_vblank_event *event = crtc->state->event;
149 unsigned long flags;
150
151 if (crtc->state->event) {
152 struct drm_pending_vblank_event *event = crtc->state->event;
153 149
150 if (event) {
154 crtc->state->event = NULL; 151 crtc->state->event = NULL;
155 event->pipe = drm_crtc_index(crtc);
156
157 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
158 152
159 spin_lock_irqsave(&crtc->dev->event_lock, flags); 153 spin_lock_irq(&crtc->dev->event_lock);
160 list_add_tail(&event->base.link, &arcpgu->event_list); 154 drm_crtc_send_vblank_event(crtc, event);
161 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 155 spin_unlock_irq(&crtc->dev->event_lock);
162 } 156 }
163} 157}
164 158
diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c
index 76e187a5bde0..6d4ff34737cb 100644
--- a/drivers/gpu/drm/arc/arcpgu_drv.c
+++ b/drivers/gpu/drm/arc/arcpgu_drv.c
@@ -28,21 +28,14 @@ static void arcpgu_fb_output_poll_changed(struct drm_device *dev)
28{ 28{
29 struct arcpgu_drm_private *arcpgu = dev->dev_private; 29 struct arcpgu_drm_private *arcpgu = dev->dev_private;
30 30
31 if (arcpgu->fbdev) 31 drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
32 drm_fbdev_cma_hotplug_event(arcpgu->fbdev);
33}
34
35static int arcpgu_atomic_commit(struct drm_device *dev,
36 struct drm_atomic_state *state, bool async)
37{
38 return drm_atomic_helper_commit(dev, state, false);
39} 32}
40 33
41static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = { 34static struct drm_mode_config_funcs arcpgu_drm_modecfg_funcs = {
42 .fb_create = drm_fb_cma_create, 35 .fb_create = drm_fb_cma_create,
43 .output_poll_changed = arcpgu_fb_output_poll_changed, 36 .output_poll_changed = arcpgu_fb_output_poll_changed,
44 .atomic_check = drm_atomic_helper_check, 37 .atomic_check = drm_atomic_helper_check,
45 .atomic_commit = arcpgu_atomic_commit, 38 .atomic_commit = drm_atomic_helper_commit,
46}; 39};
47 40
48static void arcpgu_setup_mode_config(struct drm_device *drm) 41static void arcpgu_setup_mode_config(struct drm_device *drm)
@@ -55,7 +48,7 @@ static void arcpgu_setup_mode_config(struct drm_device *drm)
55 drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs; 48 drm->mode_config.funcs = &arcpgu_drm_modecfg_funcs;
56} 49}
57 50
58int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma) 51static int arcpgu_gem_mmap(struct file *filp, struct vm_area_struct *vma)
59{ 52{
60 int ret; 53 int ret;
61 54
@@ -81,22 +74,6 @@ static const struct file_operations arcpgu_drm_ops = {
81 .mmap = arcpgu_gem_mmap, 74 .mmap = arcpgu_gem_mmap,
82}; 75};
83 76
84static void arcpgu_preclose(struct drm_device *drm, struct drm_file *file)
85{
86 struct arcpgu_drm_private *arcpgu = drm->dev_private;
87 struct drm_pending_vblank_event *e, *t;
88 unsigned long flags;
89
90 spin_lock_irqsave(&drm->event_lock, flags);
91 list_for_each_entry_safe(e, t, &arcpgu->event_list, base.link) {
92 if (e->base.file_priv != file)
93 continue;
94 list_del(&e->base.link);
95 e->base.destroy(&e->base);
96 }
97 spin_unlock_irqrestore(&drm->event_lock, flags);
98}
99
100static void arcpgu_lastclose(struct drm_device *drm) 77static void arcpgu_lastclose(struct drm_device *drm)
101{ 78{
102 struct arcpgu_drm_private *arcpgu = drm->dev_private; 79 struct arcpgu_drm_private *arcpgu = drm->dev_private;
@@ -122,16 +99,12 @@ static int arcpgu_load(struct drm_device *drm)
122 if (IS_ERR(arcpgu->clk)) 99 if (IS_ERR(arcpgu->clk))
123 return PTR_ERR(arcpgu->clk); 100 return PTR_ERR(arcpgu->clk);
124 101
125 INIT_LIST_HEAD(&arcpgu->event_list);
126
127 arcpgu_setup_mode_config(drm); 102 arcpgu_setup_mode_config(drm);
128 103
129 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 104 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
130 arcpgu->regs = devm_ioremap_resource(&pdev->dev, res); 105 arcpgu->regs = devm_ioremap_resource(&pdev->dev, res);
131 if (IS_ERR(arcpgu->regs)) { 106 if (IS_ERR(arcpgu->regs))
132 dev_err(drm->dev, "Could not remap IO mem\n");
133 return PTR_ERR(arcpgu->regs); 107 return PTR_ERR(arcpgu->regs);
134 }
135 108
136 dev_info(drm->dev, "arc_pgu ID: 0x%x\n", 109 dev_info(drm->dev, "arc_pgu ID: 0x%x\n",
137 arc_pgu_read(arcpgu, ARCPGU_REG_ID)); 110 arc_pgu_read(arcpgu, ARCPGU_REG_ID));
@@ -149,15 +122,17 @@ static int arcpgu_load(struct drm_device *drm)
149 122
150 /* find the encoder node and initialize it */ 123 /* find the encoder node and initialize it */
151 encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); 124 encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0);
152 if (!encoder_node) { 125 if (encoder_node) {
153 dev_err(drm->dev, "failed to get an encoder slave node\n"); 126 ret = arcpgu_drm_hdmi_init(drm, encoder_node);
154 return -ENODEV; 127 of_node_put(encoder_node);
128 if (ret < 0)
129 return ret;
130 } else {
131 ret = arcpgu_drm_sim_init(drm, NULL);
132 if (ret < 0)
133 return ret;
155 } 134 }
156 135
157 ret = arcpgu_drm_hdmi_init(drm, encoder_node);
158 if (ret < 0)
159 return ret;
160
161 drm_mode_config_reset(drm); 136 drm_mode_config_reset(drm);
162 drm_kms_helper_poll_init(drm); 137 drm_kms_helper_poll_init(drm);
163 138
@@ -174,7 +149,7 @@ static int arcpgu_load(struct drm_device *drm)
174 return 0; 149 return 0;
175} 150}
176 151
177int arcpgu_unload(struct drm_device *drm) 152static int arcpgu_unload(struct drm_device *drm)
178{ 153{
179 struct arcpgu_drm_private *arcpgu = drm->dev_private; 154 struct arcpgu_drm_private *arcpgu = drm->dev_private;
180 155
@@ -192,7 +167,6 @@ int arcpgu_unload(struct drm_device *drm)
192static struct drm_driver arcpgu_drm_driver = { 167static struct drm_driver arcpgu_drm_driver = {
193 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | 168 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
194 DRIVER_ATOMIC, 169 DRIVER_ATOMIC,
195 .preclose = arcpgu_preclose,
196 .lastclose = arcpgu_lastclose, 170 .lastclose = arcpgu_lastclose,
197 .name = "drm-arcpgu", 171 .name = "drm-arcpgu",
198 .desc = "ARC PGU Controller", 172 .desc = "ARC PGU Controller",
@@ -207,7 +181,7 @@ static struct drm_driver arcpgu_drm_driver = {
207 .get_vblank_counter = drm_vblank_no_hw_counter, 181 .get_vblank_counter = drm_vblank_no_hw_counter,
208 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 182 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
209 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 183 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
210 .gem_free_object = drm_gem_cma_free_object, 184 .gem_free_object_unlocked = drm_gem_cma_free_object,
211 .gem_vm_ops = &drm_gem_cma_vm_ops, 185 .gem_vm_ops = &drm_gem_cma_vm_ops,
212 .gem_prime_export = drm_gem_prime_export, 186 .gem_prime_export = drm_gem_prime_export,
213 .gem_prime_import = drm_gem_prime_import, 187 .gem_prime_import = drm_gem_prime_import,
@@ -235,15 +209,8 @@ static int arcpgu_probe(struct platform_device *pdev)
235 if (ret) 209 if (ret)
236 goto err_unload; 210 goto err_unload;
237 211
238 ret = drm_connector_register_all(drm);
239 if (ret)
240 goto err_unregister;
241
242 return 0; 212 return 0;
243 213
244err_unregister:
245 drm_dev_unregister(drm);
246
247err_unload: 214err_unload:
248 arcpgu_unload(drm); 215 arcpgu_unload(drm);
249 216
@@ -257,7 +224,6 @@ static int arcpgu_remove(struct platform_device *pdev)
257{ 224{
258 struct drm_device *drm = platform_get_drvdata(pdev); 225 struct drm_device *drm = platform_get_drvdata(pdev);
259 226
260 drm_connector_unregister_all(drm);
261 drm_dev_unregister(drm); 227 drm_dev_unregister(drm);
262 arcpgu_unload(drm); 228 arcpgu_unload(drm);
263 drm_dev_unref(drm); 229 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 08b6baeb320d..b7a8b2ac4055 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -46,23 +46,6 @@ static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
46 return sfuncs->get_modes(&slave->base, connector); 46 return sfuncs->get_modes(&slave->base, connector);
47} 47}
48 48
49struct drm_encoder *
50arcpgu_drm_connector_best_encoder(struct drm_connector *connector)
51{
52 struct drm_encoder_slave *slave;
53 struct arcpgu_drm_connector *con =
54 container_of(connector, struct arcpgu_drm_connector, connector);
55
56 slave = con->encoder_slave;
57 if (slave == NULL) {
58 dev_err(connector->dev->dev,
59 "connector_best_encoder: cannot find slave encoder for connector\n");
60 return NULL;
61 }
62
63 return &slave->base;
64}
65
66static enum drm_connector_status 49static enum drm_connector_status
67arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) 50arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
68{ 51{
@@ -97,7 +80,6 @@ static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
97static const struct drm_connector_helper_funcs 80static const struct drm_connector_helper_funcs
98arcpgu_drm_connector_helper_funcs = { 81arcpgu_drm_connector_helper_funcs = {
99 .get_modes = arcpgu_drm_connector_get_modes, 82 .get_modes = arcpgu_drm_connector_get_modes,
100 .best_encoder = arcpgu_drm_connector_best_encoder,
101}; 83};
102 84
103static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { 85static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
diff --git a/drivers/gpu/drm/arc/arcpgu_sim.c b/drivers/gpu/drm/arc/arcpgu_sim.c
new file mode 100644
index 000000000000..2bf06d71556a
--- /dev/null
+++ b/drivers/gpu/drm/arc/arcpgu_sim.c
@@ -0,0 +1,128 @@
1/*
2 * ARC PGU DRM driver.
3 *
4 * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_encoder_slave.h>
19#include <drm/drm_atomic_helper.h>
20
21#include "arcpgu.h"
22
23#define XRES_DEF 640
24#define YRES_DEF 480
25
26#define XRES_MAX 8192
27#define YRES_MAX 8192
28
29
30struct arcpgu_drm_connector {
31 struct drm_connector connector;
32 struct drm_encoder_slave *encoder_slave;
33};
34
35static int arcpgu_drm_connector_get_modes(struct drm_connector *connector)
36{
37 int count;
38
39 count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX);
40 drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF);
41 return count;
42}
43
44static enum drm_connector_status
45arcpgu_drm_connector_detect(struct drm_connector *connector, bool force)
46{
47 return connector_status_connected;
48}
49
50static void arcpgu_drm_connector_destroy(struct drm_connector *connector)
51{
52 drm_connector_unregister(connector);
53 drm_connector_cleanup(connector);
54}
55
56static const struct drm_connector_helper_funcs
57arcpgu_drm_connector_helper_funcs = {
58 .get_modes = arcpgu_drm_connector_get_modes,
59};
60
61static const struct drm_connector_funcs arcpgu_drm_connector_funcs = {
62 .dpms = drm_helper_connector_dpms,
63 .reset = drm_atomic_helper_connector_reset,
64 .detect = arcpgu_drm_connector_detect,
65 .fill_modes = drm_helper_probe_single_connector_modes,
66 .destroy = arcpgu_drm_connector_destroy,
67 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
68 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
69};
70
71static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = {
72 .destroy = drm_encoder_cleanup,
73};
74
75int arcpgu_drm_sim_init(struct drm_device *drm, struct device_node *np)
76{
77 struct arcpgu_drm_connector *arcpgu_connector;
78 struct drm_encoder_slave *encoder;
79 struct drm_connector *connector;
80 int ret;
81
82 encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL);
83 if (encoder == NULL)
84 return -ENOMEM;
85
86 encoder->base.possible_crtcs = 1;
87 encoder->base.possible_clones = 0;
88
89 ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs,
90 DRM_MODE_ENCODER_VIRTUAL, NULL);
91 if (ret)
92 return ret;
93
94 arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector),
95 GFP_KERNEL);
96 if (!arcpgu_connector) {
97 ret = -ENOMEM;
98 goto error_encoder_cleanup;
99 }
100
101 connector = &arcpgu_connector->connector;
102 drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs);
103
104 ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs,
105 DRM_MODE_CONNECTOR_VIRTUAL);
106 if (ret < 0) {
107 dev_err(drm->dev, "failed to initialize drm connector\n");
108 goto error_encoder_cleanup;
109 }
110
111 ret = drm_mode_connector_attach_encoder(connector, &encoder->base);
112 if (ret < 0) {
113 dev_err(drm->dev, "could not attach connector to encoder\n");
114 drm_connector_unregister(connector);
115 goto error_connector_cleanup;
116 }
117
118 arcpgu_connector->encoder_slave = encoder;
119
120 return 0;
121
122error_connector_cleanup:
123 drm_connector_cleanup(connector);
124
125error_encoder_cleanup:
126 drm_encoder_cleanup(&encoder->base);
127 return ret;
128}
diff --git a/drivers/gpu/drm/arm/Kconfig b/drivers/gpu/drm/arm/Kconfig
index eaed454e043c..9a18e1bd57b4 100644
--- a/drivers/gpu/drm/arm/Kconfig
+++ b/drivers/gpu/drm/arm/Kconfig
@@ -9,7 +9,6 @@ config DRM_HDLCD
9 depends on COMMON_CLK 9 depends on COMMON_CLK
10 select DRM_ARM 10 select DRM_ARM
11 select DRM_KMS_HELPER 11 select DRM_KMS_HELPER
12 select DRM_KMS_FB_HELPER
13 select DRM_KMS_CMA_HELPER 12 select DRM_KMS_CMA_HELPER
14 help 13 help
15 Choose this option if you have an ARM High Definition Colour LCD 14 Choose this option if you have an ARM High Definition Colour LCD
@@ -25,3 +24,19 @@ config DRM_HDLCD_SHOW_UNDERRUN
25 Enable this option to show in red colour the pixels that the 24 Enable this option to show in red colour the pixels that the
26 HDLCD device did not fetch from framebuffer due to underrun 25 HDLCD device did not fetch from framebuffer due to underrun
27 conditions. 26 conditions.
27
28config DRM_MALI_DISPLAY
29 tristate "ARM Mali Display Processor"
30 depends on DRM && OF && (ARM || ARM64)
31 depends on COMMON_CLK
32 select DRM_ARM
33 select DRM_KMS_HELPER
34 select DRM_KMS_CMA_HELPER
35 select DRM_GEM_CMA_HELPER
36 select VIDEOMODE_HELPERS
37 help
38 Choose this option if you want to compile the ARM Mali Display
39 Processor driver. It supports the DP500, DP550 and DP650 variants
40 of the hardware.
41
42 If compiled as a module it will be called mali-dp.
diff --git a/drivers/gpu/drm/arm/Makefile b/drivers/gpu/drm/arm/Makefile
index 89dcb7bab93a..bb8b158ff90d 100644
--- a/drivers/gpu/drm/arm/Makefile
+++ b/drivers/gpu/drm/arm/Makefile
@@ -1,2 +1,4 @@
1hdlcd-y := hdlcd_drv.o hdlcd_crtc.o 1hdlcd-y := hdlcd_drv.o hdlcd_crtc.o
2obj-$(CONFIG_DRM_HDLCD) += hdlcd.o 2obj-$(CONFIG_DRM_HDLCD) += hdlcd.o
3mali-dp-y := malidp_drv.o malidp_hw.o malidp_planes.o malidp_crtc.o
4obj-$(CONFIG_DRM_MALI_DISPLAY) += mali-dp.o
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index 0813c2f06931..48019ae22ddb 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -196,30 +196,11 @@ static void hdlcd_crtc_atomic_begin(struct drm_crtc *crtc,
196 } 196 }
197} 197}
198 198
199static void hdlcd_crtc_atomic_flush(struct drm_crtc *crtc,
200 struct drm_crtc_state *state)
201{
202}
203
204static bool hdlcd_crtc_mode_fixup(struct drm_crtc *crtc,
205 const struct drm_display_mode *mode,
206 struct drm_display_mode *adjusted_mode)
207{
208 return true;
209}
210
211static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = { 199static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
212 .mode_fixup = hdlcd_crtc_mode_fixup,
213 .mode_set = drm_helper_crtc_mode_set,
214 .mode_set_base = drm_helper_crtc_mode_set_base,
215 .mode_set_nofb = hdlcd_crtc_mode_set_nofb,
216 .enable = hdlcd_crtc_enable, 200 .enable = hdlcd_crtc_enable,
217 .disable = hdlcd_crtc_disable, 201 .disable = hdlcd_crtc_disable,
218 .prepare = hdlcd_crtc_disable,
219 .commit = hdlcd_crtc_enable,
220 .atomic_check = hdlcd_crtc_atomic_check, 202 .atomic_check = hdlcd_crtc_atomic_check,
221 .atomic_begin = hdlcd_crtc_atomic_begin, 203 .atomic_begin = hdlcd_crtc_atomic_begin,
222 .atomic_flush = hdlcd_crtc_atomic_flush,
223}; 204};
224 205
225static int hdlcd_plane_atomic_check(struct drm_plane *plane, 206static int hdlcd_plane_atomic_check(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index a6ca36f0096f..d83b46a30327 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -102,21 +102,14 @@ static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
102{ 102{
103 struct hdlcd_drm_private *hdlcd = drm->dev_private; 103 struct hdlcd_drm_private *hdlcd = drm->dev_private;
104 104
105 if (hdlcd->fbdev) 105 drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
106 drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
107}
108
109static int hdlcd_atomic_commit(struct drm_device *dev,
110 struct drm_atomic_state *state, bool nonblock)
111{
112 return drm_atomic_helper_commit(dev, state, false);
113} 106}
114 107
115static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = { 108static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
116 .fb_create = drm_fb_cma_create, 109 .fb_create = drm_fb_cma_create,
117 .output_poll_changed = hdlcd_fb_output_poll_changed, 110 .output_poll_changed = hdlcd_fb_output_poll_changed,
118 .atomic_check = drm_atomic_helper_check, 111 .atomic_check = drm_atomic_helper_check,
119 .atomic_commit = hdlcd_atomic_commit, 112 .atomic_commit = drm_atomic_helper_commit,
120}; 113};
121 114
122static void hdlcd_setup_mode_config(struct drm_device *drm) 115static void hdlcd_setup_mode_config(struct drm_device *drm)
@@ -296,7 +289,7 @@ static struct drm_driver hdlcd_driver = {
296 .get_vblank_counter = drm_vblank_no_hw_counter, 289 .get_vblank_counter = drm_vblank_no_hw_counter,
297 .enable_vblank = hdlcd_enable_vblank, 290 .enable_vblank = hdlcd_enable_vblank,
298 .disable_vblank = hdlcd_disable_vblank, 291 .disable_vblank = hdlcd_disable_vblank,
299 .gem_free_object = drm_gem_cma_free_object, 292 .gem_free_object_unlocked = drm_gem_cma_free_object,
300 .gem_vm_ops = &drm_gem_cma_vm_ops, 293 .gem_vm_ops = &drm_gem_cma_vm_ops,
301 .dumb_create = drm_gem_cma_dumb_create, 294 .dumb_create = drm_gem_cma_dumb_create,
302 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 295 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/arm/malidp_crtc.c b/drivers/gpu/drm/arm/malidp_crtc.c
new file mode 100644
index 000000000000..08e6a71f5d05
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_crtc.c
@@ -0,0 +1,216 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 driver (crtc operations)
11 */
12
13#include <drm/drmP.h>
14#include <drm/drm_atomic.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h>
18#include <linux/clk.h>
19#include <video/videomode.h>
20
21#include "malidp_drv.h"
22#include "malidp_hw.h"
23
24static bool malidp_crtc_mode_fixup(struct drm_crtc *crtc,
25 const struct drm_display_mode *mode,
26 struct drm_display_mode *adjusted_mode)
27{
28 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
29 struct malidp_hw_device *hwdev = malidp->dev;
30
31 /*
32 * check that the hardware can drive the required clock rate,
33 * but skip the check if the clock is meant to be disabled (req_rate = 0)
34 */
35 long rate, req_rate = mode->crtc_clock * 1000;
36
37 if (req_rate) {
38 rate = clk_round_rate(hwdev->mclk, req_rate);
39 if (rate < req_rate) {
40 DRM_DEBUG_DRIVER("mclk clock unable to reach %d kHz\n",
41 mode->crtc_clock);
42 return false;
43 }
44
45 rate = clk_round_rate(hwdev->pxlclk, req_rate);
46 if (rate != req_rate) {
47 DRM_DEBUG_DRIVER("pxlclk doesn't support %ld Hz\n",
48 req_rate);
49 return false;
50 }
51 }
52
53 return true;
54}
55
56static void malidp_crtc_enable(struct drm_crtc *crtc)
57{
58 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
59 struct malidp_hw_device *hwdev = malidp->dev;
60 struct videomode vm;
61
62 drm_display_mode_to_videomode(&crtc->state->adjusted_mode, &vm);
63
64 clk_prepare_enable(hwdev->pxlclk);
65
66 /* mclk needs to be set to the same or higher rate than pxlclk */
67 clk_set_rate(hwdev->mclk, crtc->state->adjusted_mode.crtc_clock * 1000);
68 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
69
70 hwdev->modeset(hwdev, &vm);
71 hwdev->leave_config_mode(hwdev);
72 drm_crtc_vblank_on(crtc);
73}
74
75static void malidp_crtc_disable(struct drm_crtc *crtc)
76{
77 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
78 struct malidp_hw_device *hwdev = malidp->dev;
79
80 drm_crtc_vblank_off(crtc);
81 hwdev->enter_config_mode(hwdev);
82 clk_disable_unprepare(hwdev->pxlclk);
83}
84
85static int malidp_crtc_atomic_check(struct drm_crtc *crtc,
86 struct drm_crtc_state *state)
87{
88 struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
89 struct malidp_hw_device *hwdev = malidp->dev;
90 struct drm_plane *plane;
91 const struct drm_plane_state *pstate;
92 u32 rot_mem_free, rot_mem_usable;
93 int rotated_planes = 0;
94
95 /*
96 * check if there is enough rotation memory available for planes
97 * that need 90° and 270° rotation. Each plane has set its required
98 * memory size in the ->plane_check() callback, here we only make
99 * sure that the sums are less that the total usable memory.
100 *
101 * The rotation memory allocation algorithm (for each plane):
102 * a. If no more rotated planes exist, all remaining rotate
103 * memory in the bank is available for use by the plane.
104 * b. If other rotated planes exist, and plane's layer ID is
105 * DE_VIDEO1, it can use all the memory from first bank if
106 * secondary rotation memory bank is available, otherwise it can
107 * use up to half the bank's memory.
108 * c. If other rotated planes exist, and plane's layer ID is not
109 * DE_VIDEO1, it can use half of the available memory
110 *
111 * Note: this algorithm assumes that the order in which the planes are
112 * checked always has DE_VIDEO1 plane first in the list if it is
113 * rotated. Because that is how we create the planes in the first
114 * place, under current DRM version things work, but if ever the order
115 * in which drm_atomic_crtc_state_for_each_plane() iterates over planes
116 * changes, we need to pre-sort the planes before validation.
117 */
118
119 /* first count the number of rotated planes */
120 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
121 if (pstate->rotation & MALIDP_ROTATED_MASK)
122 rotated_planes++;
123 }
124
125 rot_mem_free = hwdev->rotation_memory[0];
126 /*
127 * if we have more than 1 plane using rotation memory, use the second
128 * block of rotation memory as well
129 */
130 if (rotated_planes > 1)
131 rot_mem_free += hwdev->rotation_memory[1];
132
133 /* now validate the rotation memory requirements */
134 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
135 struct malidp_plane *mp = to_malidp_plane(plane);
136 struct malidp_plane_state *ms = to_malidp_plane_state(pstate);
137
138 if (pstate->rotation & MALIDP_ROTATED_MASK) {
139 /* process current plane */
140 rotated_planes--;
141
142 if (!rotated_planes) {
143 /* no more rotated planes, we can use what's left */
144 rot_mem_usable = rot_mem_free;
145 } else {
146 if ((mp->layer->id != DE_VIDEO1) ||
147 (hwdev->rotation_memory[1] == 0))
148 rot_mem_usable = rot_mem_free / 2;
149 else
150 rot_mem_usable = hwdev->rotation_memory[0];
151 }
152
153 rot_mem_free -= rot_mem_usable;
154
155 if (ms->rotmem_size > rot_mem_usable)
156 return -EINVAL;
157 }
158 }
159
160 return 0;
161}
162
163static const struct drm_crtc_helper_funcs malidp_crtc_helper_funcs = {
164 .mode_fixup = malidp_crtc_mode_fixup,
165 .enable = malidp_crtc_enable,
166 .disable = malidp_crtc_disable,
167 .atomic_check = malidp_crtc_atomic_check,
168};
169
170static const struct drm_crtc_funcs malidp_crtc_funcs = {
171 .destroy = drm_crtc_cleanup,
172 .set_config = drm_atomic_helper_set_config,
173 .page_flip = drm_atomic_helper_page_flip,
174 .reset = drm_atomic_helper_crtc_reset,
175 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
176 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
177};
178
179int malidp_crtc_init(struct drm_device *drm)
180{
181 struct malidp_drm *malidp = drm->dev_private;
182 struct drm_plane *primary = NULL, *plane;
183 int ret;
184
185 ret = malidp_de_planes_init(drm);
186 if (ret < 0) {
187 DRM_ERROR("Failed to initialise planes\n");
188 return ret;
189 }
190
191 drm_for_each_plane(plane, drm) {
192 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
193 primary = plane;
194 break;
195 }
196 }
197
198 if (!primary) {
199 DRM_ERROR("no primary plane found\n");
200 ret = -EINVAL;
201 goto crtc_cleanup_planes;
202 }
203
204 ret = drm_crtc_init_with_planes(drm, &malidp->crtc, primary, NULL,
205 &malidp_crtc_funcs, NULL);
206
207 if (!ret) {
208 drm_crtc_helper_add(&malidp->crtc, &malidp_crtc_helper_funcs);
209 return 0;
210 }
211
212crtc_cleanup_planes:
213 malidp_de_planes_destroy(drm);
214
215 return ret;
216}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
new file mode 100644
index 000000000000..82171d223f2d
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -0,0 +1,519 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 KMS/DRM driver
11 */
12
13#include <linux/module.h>
14#include <linux/clk.h>
15#include <linux/component.h>
16#include <linux/of_device.h>
17#include <linux/of_graph.h>
18#include <linux/of_reserved_mem.h>
19
20#include <drm/drmP.h>
21#include <drm/drm_atomic.h>
22#include <drm/drm_atomic_helper.h>
23#include <drm/drm_crtc.h>
24#include <drm/drm_crtc_helper.h>
25#include <drm/drm_fb_helper.h>
26#include <drm/drm_fb_cma_helper.h>
27#include <drm/drm_gem_cma_helper.h>
28#include <drm/drm_of.h>
29
30#include "malidp_drv.h"
31#include "malidp_regs.h"
32#include "malidp_hw.h"
33
34#define MALIDP_CONF_VALID_TIMEOUT 250
35
36/*
37 * set the "config valid" bit and wait until the hardware acts on it
38 */
39static int malidp_set_and_wait_config_valid(struct drm_device *drm)
40{
41 struct malidp_drm *malidp = drm->dev_private;
42 struct malidp_hw_device *hwdev = malidp->dev;
43 int ret;
44
45 hwdev->set_config_valid(hwdev);
46 /* don't wait for config_valid flag if we are in config mode */
47 if (hwdev->in_config_mode(hwdev))
48 return 0;
49
50 ret = wait_event_interruptible_timeout(malidp->wq,
51 atomic_read(&malidp->config_valid) == 1,
52 msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
53
54 return (ret > 0) ? 0 : -ETIMEDOUT;
55}
56
57static void malidp_output_poll_changed(struct drm_device *drm)
58{
59 struct malidp_drm *malidp = drm->dev_private;
60
61 drm_fbdev_cma_hotplug_event(malidp->fbdev);
62}
63
64static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
65{
66 struct drm_pending_vblank_event *event;
67 struct drm_device *drm = state->dev;
68 struct malidp_drm *malidp = drm->dev_private;
69 int ret = malidp_set_and_wait_config_valid(drm);
70
71 if (ret)
72 DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
73
74 event = malidp->crtc.state->event;
75 if (event) {
76 malidp->crtc.state->event = NULL;
77
78 spin_lock_irq(&drm->event_lock);
79 if (drm_crtc_vblank_get(&malidp->crtc) == 0)
80 drm_crtc_arm_vblank_event(&malidp->crtc, event);
81 else
82 drm_crtc_send_vblank_event(&malidp->crtc, event);
83 spin_unlock_irq(&drm->event_lock);
84 }
85 drm_atomic_helper_commit_hw_done(state);
86}
87
88static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
89{
90 struct drm_device *drm = state->dev;
91
92 drm_atomic_helper_commit_modeset_disables(drm, state);
93 drm_atomic_helper_commit_modeset_enables(drm, state);
94 drm_atomic_helper_commit_planes(drm, state, true);
95
96 malidp_atomic_commit_hw_done(state);
97
98 drm_atomic_helper_wait_for_vblanks(drm, state);
99
100 drm_atomic_helper_cleanup_planes(drm, state);
101}
102
103static struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
104 .atomic_commit_tail = malidp_atomic_commit_tail,
105};
106
107static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
108 .fb_create = drm_fb_cma_create,
109 .output_poll_changed = malidp_output_poll_changed,
110 .atomic_check = drm_atomic_helper_check,
111 .atomic_commit = drm_atomic_helper_commit,
112};
113
114static int malidp_enable_vblank(struct drm_device *drm, unsigned int crtc)
115{
116 struct malidp_drm *malidp = drm->dev_private;
117 struct malidp_hw_device *hwdev = malidp->dev;
118
119 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
120 hwdev->map.de_irq_map.vsync_irq);
121 return 0;
122}
123
124static void malidp_disable_vblank(struct drm_device *drm, unsigned int pipe)
125{
126 struct malidp_drm *malidp = drm->dev_private;
127 struct malidp_hw_device *hwdev = malidp->dev;
128
129 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
130 hwdev->map.de_irq_map.vsync_irq);
131}
132
133static int malidp_init(struct drm_device *drm)
134{
135 int ret;
136 struct malidp_drm *malidp = drm->dev_private;
137 struct malidp_hw_device *hwdev = malidp->dev;
138
139 drm_mode_config_init(drm);
140
141 drm->mode_config.min_width = hwdev->min_line_size;
142 drm->mode_config.min_height = hwdev->min_line_size;
143 drm->mode_config.max_width = hwdev->max_line_size;
144 drm->mode_config.max_height = hwdev->max_line_size;
145 drm->mode_config.funcs = &malidp_mode_config_funcs;
146 drm->mode_config.helper_private = &malidp_mode_config_helpers;
147
148 ret = malidp_crtc_init(drm);
149 if (ret) {
150 drm_mode_config_cleanup(drm);
151 return ret;
152 }
153
154 return 0;
155}
156
157static int malidp_irq_init(struct platform_device *pdev)
158{
159 int irq_de, irq_se, ret = 0;
160 struct drm_device *drm = dev_get_drvdata(&pdev->dev);
161
162 /* fetch the interrupts from DT */
163 irq_de = platform_get_irq_byname(pdev, "DE");
164 if (irq_de < 0) {
165 DRM_ERROR("no 'DE' IRQ specified!\n");
166 return irq_de;
167 }
168 irq_se = platform_get_irq_byname(pdev, "SE");
169 if (irq_se < 0) {
170 DRM_ERROR("no 'SE' IRQ specified!\n");
171 return irq_se;
172 }
173
174 ret = malidp_de_irq_init(drm, irq_de);
175 if (ret)
176 return ret;
177
178 ret = malidp_se_irq_init(drm, irq_se);
179 if (ret) {
180 malidp_de_irq_fini(drm);
181 return ret;
182 }
183
184 return 0;
185}
186
187static void malidp_lastclose(struct drm_device *drm)
188{
189 struct malidp_drm *malidp = drm->dev_private;
190
191 drm_fbdev_cma_restore_mode(malidp->fbdev);
192}
193
194static const struct file_operations fops = {
195 .owner = THIS_MODULE,
196 .open = drm_open,
197 .release = drm_release,
198 .unlocked_ioctl = drm_ioctl,
199#ifdef CONFIG_COMPAT
200 .compat_ioctl = drm_compat_ioctl,
201#endif
202 .poll = drm_poll,
203 .read = drm_read,
204 .llseek = noop_llseek,
205 .mmap = drm_gem_cma_mmap,
206};
207
208static struct drm_driver malidp_driver = {
209 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC |
210 DRIVER_PRIME,
211 .lastclose = malidp_lastclose,
212 .get_vblank_counter = drm_vblank_no_hw_counter,
213 .enable_vblank = malidp_enable_vblank,
214 .disable_vblank = malidp_disable_vblank,
215 .gem_free_object_unlocked = drm_gem_cma_free_object,
216 .gem_vm_ops = &drm_gem_cma_vm_ops,
217 .dumb_create = drm_gem_cma_dumb_create,
218 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
219 .dumb_destroy = drm_gem_dumb_destroy,
220 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
221 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
222 .gem_prime_export = drm_gem_prime_export,
223 .gem_prime_import = drm_gem_prime_import,
224 .gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
225 .gem_prime_import_sg_table = drm_gem_cma_prime_import_sg_table,
226 .gem_prime_vmap = drm_gem_cma_prime_vmap,
227 .gem_prime_vunmap = drm_gem_cma_prime_vunmap,
228 .gem_prime_mmap = drm_gem_cma_prime_mmap,
229 .fops = &fops,
230 .name = "mali-dp",
231 .desc = "ARM Mali Display Processor driver",
232 .date = "20160106",
233 .major = 1,
234 .minor = 0,
235};
236
237static const struct of_device_id malidp_drm_of_match[] = {
238 {
239 .compatible = "arm,mali-dp500",
240 .data = &malidp_device[MALIDP_500]
241 },
242 {
243 .compatible = "arm,mali-dp550",
244 .data = &malidp_device[MALIDP_550]
245 },
246 {
247 .compatible = "arm,mali-dp650",
248 .data = &malidp_device[MALIDP_650]
249 },
250 {},
251};
252MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
253
254#define MAX_OUTPUT_CHANNELS 3
255
256static int malidp_bind(struct device *dev)
257{
258 struct resource *res;
259 struct drm_device *drm;
260 struct device_node *ep;
261 struct malidp_drm *malidp;
262 struct malidp_hw_device *hwdev;
263 struct platform_device *pdev = to_platform_device(dev);
264 /* number of lines for the R, G and B output */
265 u8 output_width[MAX_OUTPUT_CHANNELS];
266 int ret = 0, i;
267 u32 version, out_depth = 0;
268
269 malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
270 if (!malidp)
271 return -ENOMEM;
272
273 hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
274 if (!hwdev)
275 return -ENOMEM;
276
277 /*
278 * copy the associated data from malidp_drm_of_match to avoid
279 * having to keep a reference to the OF node after binding
280 */
281 memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
282 malidp->dev = hwdev;
283
284 INIT_LIST_HEAD(&malidp->event_list);
285
286 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
287 hwdev->regs = devm_ioremap_resource(dev, res);
288 if (IS_ERR(hwdev->regs))
289 return PTR_ERR(hwdev->regs);
290
291 hwdev->pclk = devm_clk_get(dev, "pclk");
292 if (IS_ERR(hwdev->pclk))
293 return PTR_ERR(hwdev->pclk);
294
295 hwdev->aclk = devm_clk_get(dev, "aclk");
296 if (IS_ERR(hwdev->aclk))
297 return PTR_ERR(hwdev->aclk);
298
299 hwdev->mclk = devm_clk_get(dev, "mclk");
300 if (IS_ERR(hwdev->mclk))
301 return PTR_ERR(hwdev->mclk);
302
303 hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
304 if (IS_ERR(hwdev->pxlclk))
305 return PTR_ERR(hwdev->pxlclk);
306
307 /* Get the optional framebuffer memory resource */
308 ret = of_reserved_mem_device_init(dev);
309 if (ret && ret != -ENODEV)
310 return ret;
311
312 drm = drm_dev_alloc(&malidp_driver, dev);
313 if (!drm) {
314 ret = -ENOMEM;
315 goto alloc_fail;
316 }
317
318 /* Enable APB clock in order to get access to the registers */
319 clk_prepare_enable(hwdev->pclk);
320 /*
321 * Enable AXI clock and main clock so that prefetch can start once
322 * the registers are set
323 */
324 clk_prepare_enable(hwdev->aclk);
325 clk_prepare_enable(hwdev->mclk);
326
327 ret = hwdev->query_hw(hwdev);
328 if (ret) {
329 DRM_ERROR("Invalid HW configuration\n");
330 goto query_hw_fail;
331 }
332
333 version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
334 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
335 (version >> 12) & 0xf, (version >> 8) & 0xf);
336
337 /* set the number of lines used for output of RGB data */
338 ret = of_property_read_u8_array(dev->of_node,
339 "arm,malidp-output-port-lines",
340 output_width, MAX_OUTPUT_CHANNELS);
341 if (ret)
342 goto query_hw_fail;
343
344 for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
345 out_depth = (out_depth << 8) | (output_width[i] & 0xf);
346 malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
347
348 drm->dev_private = malidp;
349 dev_set_drvdata(dev, drm);
350 atomic_set(&malidp->config_valid, 0);
351 init_waitqueue_head(&malidp->wq);
352
353 ret = malidp_init(drm);
354 if (ret < 0)
355 goto init_fail;
356
357 ret = drm_dev_register(drm, 0);
358 if (ret)
359 goto register_fail;
360
361 /* Set the CRTC's port so that the encoder component can find it */
362 ep = of_graph_get_next_endpoint(dev->of_node, NULL);
363 if (!ep) {
364 ret = -EINVAL;
365 goto port_fail;
366 }
367 malidp->crtc.port = of_get_next_parent(ep);
368
369 ret = component_bind_all(dev, drm);
370 if (ret) {
371 DRM_ERROR("Failed to bind all components\n");
372 goto bind_fail;
373 }
374
375 ret = malidp_irq_init(pdev);
376 if (ret < 0)
377 goto irq_init_fail;
378
379 ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
380 if (ret < 0) {
381 DRM_ERROR("failed to initialise vblank\n");
382 goto vblank_fail;
383 }
384
385 drm_mode_config_reset(drm);
386
387 malidp->fbdev = drm_fbdev_cma_init(drm, 32, drm->mode_config.num_crtc,
388 drm->mode_config.num_connector);
389
390 if (IS_ERR(malidp->fbdev)) {
391 ret = PTR_ERR(malidp->fbdev);
392 malidp->fbdev = NULL;
393 goto fbdev_fail;
394 }
395
396 drm_kms_helper_poll_init(drm);
397 return 0;
398
399fbdev_fail:
400 drm_vblank_cleanup(drm);
401vblank_fail:
402 malidp_se_irq_fini(drm);
403 malidp_de_irq_fini(drm);
404irq_init_fail:
405 component_unbind_all(dev, drm);
406bind_fail:
407 of_node_put(malidp->crtc.port);
408 malidp->crtc.port = NULL;
409port_fail:
410 drm_dev_unregister(drm);
411register_fail:
412 malidp_de_planes_destroy(drm);
413 drm_mode_config_cleanup(drm);
414init_fail:
415 drm->dev_private = NULL;
416 dev_set_drvdata(dev, NULL);
417query_hw_fail:
418 clk_disable_unprepare(hwdev->mclk);
419 clk_disable_unprepare(hwdev->aclk);
420 clk_disable_unprepare(hwdev->pclk);
421 drm_dev_unref(drm);
422alloc_fail:
423 of_reserved_mem_device_release(dev);
424
425 return ret;
426}
427
428static void malidp_unbind(struct device *dev)
429{
430 struct drm_device *drm = dev_get_drvdata(dev);
431 struct malidp_drm *malidp = drm->dev_private;
432 struct malidp_hw_device *hwdev = malidp->dev;
433
434 if (malidp->fbdev) {
435 drm_fbdev_cma_fini(malidp->fbdev);
436 malidp->fbdev = NULL;
437 }
438 drm_kms_helper_poll_fini(drm);
439 malidp_se_irq_fini(drm);
440 malidp_de_irq_fini(drm);
441 drm_vblank_cleanup(drm);
442 component_unbind_all(dev, drm);
443 of_node_put(malidp->crtc.port);
444 malidp->crtc.port = NULL;
445 drm_dev_unregister(drm);
446 malidp_de_planes_destroy(drm);
447 drm_mode_config_cleanup(drm);
448 drm->dev_private = NULL;
449 dev_set_drvdata(dev, NULL);
450 clk_disable_unprepare(hwdev->mclk);
451 clk_disable_unprepare(hwdev->aclk);
452 clk_disable_unprepare(hwdev->pclk);
453 drm_dev_unref(drm);
454 of_reserved_mem_device_release(dev);
455}
456
457static const struct component_master_ops malidp_master_ops = {
458 .bind = malidp_bind,
459 .unbind = malidp_unbind,
460};
461
462static int malidp_compare_dev(struct device *dev, void *data)
463{
464 struct device_node *np = data;
465
466 return dev->of_node == np;
467}
468
469static int malidp_platform_probe(struct platform_device *pdev)
470{
471 struct device_node *port, *ep;
472 struct component_match *match = NULL;
473
474 if (!pdev->dev.of_node)
475 return -ENODEV;
476
477 /* there is only one output port inside each device, find it */
478 ep = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
479 if (!ep)
480 return -ENODEV;
481
482 if (!of_device_is_available(ep)) {
483 of_node_put(ep);
484 return -ENODEV;
485 }
486
487 /* add the remote encoder port as component */
488 port = of_graph_get_remote_port_parent(ep);
489 of_node_put(ep);
490 if (!port || !of_device_is_available(port)) {
491 of_node_put(port);
492 return -EAGAIN;
493 }
494
495 component_match_add(&pdev->dev, &match, malidp_compare_dev, port);
496 return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
497 match);
498}
499
500static int malidp_platform_remove(struct platform_device *pdev)
501{
502 component_master_del(&pdev->dev, &malidp_master_ops);
503 return 0;
504}
505
506static struct platform_driver malidp_platform_driver = {
507 .probe = malidp_platform_probe,
508 .remove = malidp_platform_remove,
509 .driver = {
510 .name = "mali-dp",
511 .of_match_table = malidp_drm_of_match,
512 },
513};
514
515module_platform_driver(malidp_platform_driver);
516
517MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
518MODULE_DESCRIPTION("ARM Mali DP DRM driver");
519MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/arm/malidp_drv.h b/drivers/gpu/drm/arm/malidp_drv.h
new file mode 100644
index 000000000000..95558fde214b
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_drv.h
@@ -0,0 +1,54 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 KMS/DRM driver structures
11 */
12
13#ifndef __MALIDP_DRV_H__
14#define __MALIDP_DRV_H__
15
16#include <linux/mutex.h>
17#include <linux/wait.h>
18#include "malidp_hw.h"
19
20struct malidp_drm {
21 struct malidp_hw_device *dev;
22 struct drm_fbdev_cma *fbdev;
23 struct list_head event_list;
24 struct drm_crtc crtc;
25 wait_queue_head_t wq;
26 atomic_t config_valid;
27};
28
29#define crtc_to_malidp_device(x) container_of(x, struct malidp_drm, crtc)
30
31struct malidp_plane {
32 struct drm_plane base;
33 struct malidp_hw_device *hwdev;
34 const struct malidp_layer *layer;
35};
36
37struct malidp_plane_state {
38 struct drm_plane_state base;
39
40 /* size of the required rotation memory if plane is rotated */
41 u32 rotmem_size;
42};
43
44#define to_malidp_plane(x) container_of(x, struct malidp_plane, base)
45#define to_malidp_plane_state(x) container_of(x, struct malidp_plane_state, base)
46
47int malidp_de_planes_init(struct drm_device *drm);
48void malidp_de_planes_destroy(struct drm_device *drm);
49int malidp_crtc_init(struct drm_device *drm);
50
51/* often used combination of rotational bits */
52#define MALIDP_ROTATED_MASK (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))
53
54#endif /* __MALIDP_DRV_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
new file mode 100644
index 000000000000..a6132f1d58c1
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -0,0 +1,691 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 hardware manipulation routines. This is where
11 * the difference between various versions of the hardware is being dealt with
12 * in an attempt to provide to the rest of the driver code a unified view
13 */
14
15#include <linux/types.h>
16#include <linux/io.h>
17#include <drm/drmP.h>
18#include <video/videomode.h>
19#include <video/display_timing.h>
20
21#include "malidp_drv.h"
22#include "malidp_hw.h"
23
24static const struct malidp_input_format malidp500_de_formats[] = {
25 /* fourcc, layers supporting the format, internal id */
26 { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 0 },
27 { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 1 },
28 { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 2 },
29 { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 3 },
30 { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 4 },
31 { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 5 },
32 { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 6 },
33 { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 7 },
34 { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 8 },
35 { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 9 },
36 { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 10 },
37 { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_GRAPHICS2, 11 },
38 { DRM_FORMAT_UYVY, DE_VIDEO1, 12 },
39 { DRM_FORMAT_YUYV, DE_VIDEO1, 13 },
40 { DRM_FORMAT_NV12, DE_VIDEO1, 14 },
41 { DRM_FORMAT_YUV420, DE_VIDEO1, 15 },
42};
43
44#define MALIDP_ID(__group, __format) \
45 ((((__group) & 0x7) << 3) | ((__format) & 0x7))
46
47#define MALIDP_COMMON_FORMATS \
48 /* fourcc, layers supporting the format, internal id */ \
49 { DRM_FORMAT_ARGB2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 0) }, \
50 { DRM_FORMAT_ABGR2101010, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 1) }, \
51 { DRM_FORMAT_RGBA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 2) }, \
52 { DRM_FORMAT_BGRA1010102, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(0, 3) }, \
53 { DRM_FORMAT_ARGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 0) }, \
54 { DRM_FORMAT_ABGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 1) }, \
55 { DRM_FORMAT_RGBA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 2) }, \
56 { DRM_FORMAT_BGRA8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(1, 3) }, \
57 { DRM_FORMAT_XRGB8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 0) }, \
58 { DRM_FORMAT_XBGR8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 1) }, \
59 { DRM_FORMAT_RGBX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 2) }, \
60 { DRM_FORMAT_BGRX8888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2 | DE_SMART, MALIDP_ID(2, 3) }, \
61 { DRM_FORMAT_RGB888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 0) }, \
62 { DRM_FORMAT_BGR888, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(3, 1) }, \
63 { DRM_FORMAT_RGBA5551, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 0) }, \
64 { DRM_FORMAT_ABGR1555, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 1) }, \
65 { DRM_FORMAT_RGB565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 2) }, \
66 { DRM_FORMAT_BGR565, DE_VIDEO1 | DE_GRAPHICS1 | DE_VIDEO2, MALIDP_ID(4, 3) }, \
67 { DRM_FORMAT_YUYV, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 2) }, \
68 { DRM_FORMAT_UYVY, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 3) }, \
69 { DRM_FORMAT_NV12, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 6) }, \
70 { DRM_FORMAT_YUV420, DE_VIDEO1 | DE_VIDEO2, MALIDP_ID(5, 7) }
71
72static const struct malidp_input_format malidp550_de_formats[] = {
73 MALIDP_COMMON_FORMATS,
74};
75
76static const struct malidp_layer malidp500_layers[] = {
77 { DE_VIDEO1, MALIDP500_DE_LV_BASE, MALIDP500_DE_LV_PTR_BASE },
78 { DE_GRAPHICS1, MALIDP500_DE_LG1_BASE, MALIDP500_DE_LG1_PTR_BASE },
79 { DE_GRAPHICS2, MALIDP500_DE_LG2_BASE, MALIDP500_DE_LG2_PTR_BASE },
80};
81
82static const struct malidp_layer malidp550_layers[] = {
83 { DE_VIDEO1, MALIDP550_DE_LV1_BASE, MALIDP550_DE_LV1_PTR_BASE },
84 { DE_GRAPHICS1, MALIDP550_DE_LG_BASE, MALIDP550_DE_LG_PTR_BASE },
85 { DE_VIDEO2, MALIDP550_DE_LV2_BASE, MALIDP550_DE_LV2_PTR_BASE },
86 { DE_SMART, MALIDP550_DE_LS_BASE, MALIDP550_DE_LS_PTR_BASE },
87};
88
89#define MALIDP_DE_DEFAULT_PREFETCH_START 5
90
91static int malidp500_query_hw(struct malidp_hw_device *hwdev)
92{
93 u32 conf = malidp_hw_read(hwdev, MALIDP500_CONFIG_ID);
94 /* bit 4 of the CONFIG_ID register holds the line size multiplier */
95 u8 ln_size_mult = conf & 0x10 ? 2 : 1;
96
97 hwdev->min_line_size = 2;
98 hwdev->max_line_size = SZ_2K * ln_size_mult;
99 hwdev->rotation_memory[0] = SZ_1K * 64 * ln_size_mult;
100 hwdev->rotation_memory[1] = 0; /* no second rotation memory bank */
101
102 return 0;
103}
104
105static void malidp500_enter_config_mode(struct malidp_hw_device *hwdev)
106{
107 u32 status, count = 100;
108
109 malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
110 while (count) {
111 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
112 if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
113 break;
114 /*
115 * entering config mode can take as long as the rendering
116 * of a full frame, hence the long sleep here
117 */
118 usleep_range(1000, 10000);
119 count--;
120 }
121 WARN(count == 0, "timeout while entering config mode");
122}
123
124static void malidp500_leave_config_mode(struct malidp_hw_device *hwdev)
125{
126 u32 status, count = 100;
127
128 malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
129 while (count) {
130 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
131 if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
132 break;
133 usleep_range(100, 1000);
134 count--;
135 }
136 WARN(count == 0, "timeout while leaving config mode");
137}
138
139static bool malidp500_in_config_mode(struct malidp_hw_device *hwdev)
140{
141 u32 status;
142
143 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
144 if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
145 return true;
146
147 return false;
148}
149
150static void malidp500_set_config_valid(struct malidp_hw_device *hwdev)
151{
152 malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
153}
154
155static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
156{
157 u32 val = 0;
158
159 malidp_hw_clearbits(hwdev, MALIDP500_DC_CLEAR_MASK, MALIDP500_DC_CONTROL);
160 if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
161 val |= MALIDP500_HSYNCPOL;
162 if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
163 val |= MALIDP500_VSYNCPOL;
164 val |= MALIDP_DE_DEFAULT_PREFETCH_START;
165 malidp_hw_setbits(hwdev, val, MALIDP500_DC_CONTROL);
166
167 /*
168 * Mali-DP500 encodes the background color like this:
169 * - red @ MALIDP500_BGND_COLOR[12:0]
170 * - green @ MALIDP500_BGND_COLOR[27:16]
171 * - blue @ (MALIDP500_BGND_COLOR + 4)[12:0]
172 */
173 val = ((MALIDP_BGND_COLOR_G & 0xfff) << 16) |
174 (MALIDP_BGND_COLOR_R & 0xfff);
175 malidp_hw_write(hwdev, val, MALIDP500_BGND_COLOR);
176 malidp_hw_write(hwdev, MALIDP_BGND_COLOR_B, MALIDP500_BGND_COLOR + 4);
177
178 val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
179 MALIDP_DE_H_BACKPORCH(mode->hback_porch);
180 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
181
182 val = MALIDP500_DE_V_FRONTPORCH(mode->vfront_porch) |
183 MALIDP_DE_V_BACKPORCH(mode->vback_porch);
184 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
185
186 val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
187 MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
188 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
189
190 val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
191 malidp_hw_write(hwdev, val, MALIDP500_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
192
193 if (mode->flags & DISPLAY_FLAGS_INTERLACED)
194 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
195 else
196 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
197}
198
199static int malidp500_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
200{
201 unsigned int depth;
202 int bpp;
203
204 /* RGB888 or BGR888 can't be rotated */
205 if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
206 return -EINVAL;
207
208 /*
209 * Each layer needs enough rotation memory to fit 8 lines
210 * worth of pixel data. Required size is then:
211 * size = rotated_width * (bpp / 8) * 8;
212 */
213 drm_fb_get_bpp_depth(fmt, &depth, &bpp);
214
215 return w * bpp;
216}
217
218static int malidp550_query_hw(struct malidp_hw_device *hwdev)
219{
220 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
221 u8 ln_size = (conf >> 4) & 0x3, rsize;
222
223 hwdev->min_line_size = 2;
224
225 switch (ln_size) {
226 case 0:
227 hwdev->max_line_size = SZ_2K;
228 /* two banks of 64KB for rotation memory */
229 rsize = 64;
230 break;
231 case 1:
232 hwdev->max_line_size = SZ_4K;
233 /* two banks of 128KB for rotation memory */
234 rsize = 128;
235 break;
236 case 2:
237 hwdev->max_line_size = 1280;
238 /* two banks of 40KB for rotation memory */
239 rsize = 40;
240 break;
241 case 3:
242 /* reserved value */
243 hwdev->max_line_size = 0;
244 return -EINVAL;
245 }
246
247 hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
248 return 0;
249}
250
251static void malidp550_enter_config_mode(struct malidp_hw_device *hwdev)
252{
253 u32 status, count = 100;
254
255 malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
256 while (count) {
257 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
258 if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
259 break;
260 /*
261 * entering config mode can take as long as the rendering
262 * of a full frame, hence the long sleep here
263 */
264 usleep_range(1000, 10000);
265 count--;
266 }
267 WARN(count == 0, "timeout while entering config mode");
268}
269
270static void malidp550_leave_config_mode(struct malidp_hw_device *hwdev)
271{
272 u32 status, count = 100;
273
274 malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
275 while (count) {
276 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
277 if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
278 break;
279 usleep_range(100, 1000);
280 count--;
281 }
282 WARN(count == 0, "timeout while leaving config mode");
283}
284
285static bool malidp550_in_config_mode(struct malidp_hw_device *hwdev)
286{
287 u32 status;
288
289 status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
290 if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
291 return true;
292
293 return false;
294}
295
296static void malidp550_set_config_valid(struct malidp_hw_device *hwdev)
297{
298 malidp_hw_setbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
299}
300
301static void malidp550_modeset(struct malidp_hw_device *hwdev, struct videomode *mode)
302{
303 u32 val = MALIDP_DE_DEFAULT_PREFETCH_START;
304
305 malidp_hw_write(hwdev, val, MALIDP550_DE_CONTROL);
306 /*
307 * Mali-DP550 and Mali-DP650 encode the background color like this:
308 * - red @ MALIDP550_DE_BGND_COLOR[23:16]
309 * - green @ MALIDP550_DE_BGND_COLOR[15:8]
310 * - blue @ MALIDP550_DE_BGND_COLOR[7:0]
311 *
312 * We need to truncate the least significant 4 bits from the default
313 * MALIDP_BGND_COLOR_x values
314 */
315 val = (((MALIDP_BGND_COLOR_R >> 4) & 0xff) << 16) |
316 (((MALIDP_BGND_COLOR_G >> 4) & 0xff) << 8) |
317 ((MALIDP_BGND_COLOR_B >> 4) & 0xff);
318 malidp_hw_write(hwdev, val, MALIDP550_DE_BGND_COLOR);
319
320 val = MALIDP_DE_H_FRONTPORCH(mode->hfront_porch) |
321 MALIDP_DE_H_BACKPORCH(mode->hback_porch);
322 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_H_TIMINGS);
323
324 val = MALIDP550_DE_V_FRONTPORCH(mode->vfront_porch) |
325 MALIDP_DE_V_BACKPORCH(mode->vback_porch);
326 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_V_TIMINGS);
327
328 val = MALIDP_DE_H_SYNCWIDTH(mode->hsync_len) |
329 MALIDP_DE_V_SYNCWIDTH(mode->vsync_len);
330 if (mode->flags & DISPLAY_FLAGS_HSYNC_HIGH)
331 val |= MALIDP550_HSYNCPOL;
332 if (mode->flags & DISPLAY_FLAGS_VSYNC_HIGH)
333 val |= MALIDP550_VSYNCPOL;
334 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_SYNC_WIDTH);
335
336 val = MALIDP_DE_H_ACTIVE(mode->hactive) | MALIDP_DE_V_ACTIVE(mode->vactive);
337 malidp_hw_write(hwdev, val, MALIDP550_TIMINGS_BASE + MALIDP_DE_HV_ACTIVE);
338
339 if (mode->flags & DISPLAY_FLAGS_INTERLACED)
340 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
341 else
342 malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC);
343}
344
345static int malidp550_rotmem_required(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt)
346{
347 u32 bytes_per_col;
348
349 /* raw RGB888 or BGR888 can't be rotated */
350 if ((fmt == DRM_FORMAT_RGB888) || (fmt == DRM_FORMAT_BGR888))
351 return -EINVAL;
352
353 switch (fmt) {
354 /* 8 lines at 4 bytes per pixel */
355 case DRM_FORMAT_ARGB2101010:
356 case DRM_FORMAT_ABGR2101010:
357 case DRM_FORMAT_RGBA1010102:
358 case DRM_FORMAT_BGRA1010102:
359 case DRM_FORMAT_ARGB8888:
360 case DRM_FORMAT_ABGR8888:
361 case DRM_FORMAT_RGBA8888:
362 case DRM_FORMAT_BGRA8888:
363 case DRM_FORMAT_XRGB8888:
364 case DRM_FORMAT_XBGR8888:
365 case DRM_FORMAT_RGBX8888:
366 case DRM_FORMAT_BGRX8888:
367 case DRM_FORMAT_RGB888:
368 case DRM_FORMAT_BGR888:
369 /* 16 lines at 2 bytes per pixel */
370 case DRM_FORMAT_RGBA5551:
371 case DRM_FORMAT_ABGR1555:
372 case DRM_FORMAT_RGB565:
373 case DRM_FORMAT_BGR565:
374 case DRM_FORMAT_UYVY:
375 case DRM_FORMAT_YUYV:
376 bytes_per_col = 32;
377 break;
378 /* 16 lines at 1.5 bytes per pixel */
379 case DRM_FORMAT_NV12:
380 case DRM_FORMAT_YUV420:
381 bytes_per_col = 24;
382 break;
383 default:
384 return -EINVAL;
385 }
386
387 return w * bytes_per_col;
388}
389
390static int malidp650_query_hw(struct malidp_hw_device *hwdev)
391{
392 u32 conf = malidp_hw_read(hwdev, MALIDP550_CONFIG_ID);
393 u8 ln_size = (conf >> 4) & 0x3, rsize;
394
395 hwdev->min_line_size = 4;
396
397 switch (ln_size) {
398 case 0:
399 case 2:
400 /* reserved values */
401 hwdev->max_line_size = 0;
402 return -EINVAL;
403 case 1:
404 hwdev->max_line_size = SZ_4K;
405 /* two banks of 128KB for rotation memory */
406 rsize = 128;
407 break;
408 case 3:
409 hwdev->max_line_size = 2560;
410 /* two banks of 80KB for rotation memory */
411 rsize = 80;
412 }
413
414 hwdev->rotation_memory[0] = hwdev->rotation_memory[1] = rsize * SZ_1K;
415 return 0;
416}
417
418const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
419 [MALIDP_500] = {
420 .map = {
421 .se_base = MALIDP500_SE_BASE,
422 .dc_base = MALIDP500_DC_BASE,
423 .out_depth_base = MALIDP500_OUTPUT_DEPTH,
424 .features = 0, /* no CLEARIRQ register */
425 .n_layers = ARRAY_SIZE(malidp500_layers),
426 .layers = malidp500_layers,
427 .de_irq_map = {
428 .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
429 MALIDP500_DE_IRQ_AXI_ERR |
430 MALIDP500_DE_IRQ_VSYNC |
431 MALIDP500_DE_IRQ_GLOBAL,
432 .vsync_irq = MALIDP500_DE_IRQ_VSYNC,
433 },
434 .se_irq_map = {
435 .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
436 .vsync_irq = 0,
437 },
438 .dc_irq_map = {
439 .irq_mask = MALIDP500_DE_IRQ_CONF_VALID,
440 .vsync_irq = MALIDP500_DE_IRQ_CONF_VALID,
441 },
442 .input_formats = malidp500_de_formats,
443 .n_input_formats = ARRAY_SIZE(malidp500_de_formats),
444 },
445 .query_hw = malidp500_query_hw,
446 .enter_config_mode = malidp500_enter_config_mode,
447 .leave_config_mode = malidp500_leave_config_mode,
448 .in_config_mode = malidp500_in_config_mode,
449 .set_config_valid = malidp500_set_config_valid,
450 .modeset = malidp500_modeset,
451 .rotmem_required = malidp500_rotmem_required,
452 },
453 [MALIDP_550] = {
454 .map = {
455 .se_base = MALIDP550_SE_BASE,
456 .dc_base = MALIDP550_DC_BASE,
457 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
458 .features = MALIDP_REGMAP_HAS_CLEARIRQ,
459 .n_layers = ARRAY_SIZE(malidp550_layers),
460 .layers = malidp550_layers,
461 .de_irq_map = {
462 .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
463 MALIDP550_DE_IRQ_VSYNC,
464 .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
465 },
466 .se_irq_map = {
467 .irq_mask = MALIDP550_SE_IRQ_EOW |
468 MALIDP550_SE_IRQ_AXI_ERR,
469 },
470 .dc_irq_map = {
471 .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
472 .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
473 },
474 .input_formats = malidp550_de_formats,
475 .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
476 },
477 .query_hw = malidp550_query_hw,
478 .enter_config_mode = malidp550_enter_config_mode,
479 .leave_config_mode = malidp550_leave_config_mode,
480 .in_config_mode = malidp550_in_config_mode,
481 .set_config_valid = malidp550_set_config_valid,
482 .modeset = malidp550_modeset,
483 .rotmem_required = malidp550_rotmem_required,
484 },
485 [MALIDP_650] = {
486 .map = {
487 .se_base = MALIDP550_SE_BASE,
488 .dc_base = MALIDP550_DC_BASE,
489 .out_depth_base = MALIDP550_DE_OUTPUT_DEPTH,
490 .features = MALIDP_REGMAP_HAS_CLEARIRQ,
491 .n_layers = ARRAY_SIZE(malidp550_layers),
492 .layers = malidp550_layers,
493 .de_irq_map = {
494 .irq_mask = MALIDP_DE_IRQ_UNDERRUN |
495 MALIDP650_DE_IRQ_DRIFT |
496 MALIDP550_DE_IRQ_VSYNC,
497 .vsync_irq = MALIDP550_DE_IRQ_VSYNC,
498 },
499 .se_irq_map = {
500 .irq_mask = MALIDP550_SE_IRQ_EOW |
501 MALIDP550_SE_IRQ_AXI_ERR,
502 },
503 .dc_irq_map = {
504 .irq_mask = MALIDP550_DC_IRQ_CONF_VALID,
505 .vsync_irq = MALIDP550_DC_IRQ_CONF_VALID,
506 },
507 .input_formats = malidp550_de_formats,
508 .n_input_formats = ARRAY_SIZE(malidp550_de_formats),
509 },
510 .query_hw = malidp650_query_hw,
511 .enter_config_mode = malidp550_enter_config_mode,
512 .leave_config_mode = malidp550_leave_config_mode,
513 .in_config_mode = malidp550_in_config_mode,
514 .set_config_valid = malidp550_set_config_valid,
515 .modeset = malidp550_modeset,
516 .rotmem_required = malidp550_rotmem_required,
517 },
518};
519
520u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
521 u8 layer_id, u32 format)
522{
523 unsigned int i;
524
525 for (i = 0; i < map->n_input_formats; i++) {
526 if (((map->input_formats[i].layer & layer_id) == layer_id) &&
527 (map->input_formats[i].format == format))
528 return map->input_formats[i].id;
529 }
530
531 return MALIDP_INVALID_FORMAT_ID;
532}
533
534static void malidp_hw_clear_irq(struct malidp_hw_device *hwdev, u8 block, u32 irq)
535{
536 u32 base = malidp_get_block_base(hwdev, block);
537
538 if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
539 malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
540 else
541 malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
542}
543
544static irqreturn_t malidp_de_irq(int irq, void *arg)
545{
546 struct drm_device *drm = arg;
547 struct malidp_drm *malidp = drm->dev_private;
548 struct malidp_hw_device *hwdev;
549 const struct malidp_irq_map *de;
550 u32 status, mask, dc_status;
551 irqreturn_t ret = IRQ_NONE;
552
553 if (!drm->dev_private)
554 return IRQ_HANDLED;
555
556 hwdev = malidp->dev;
557 de = &hwdev->map.de_irq_map;
558
559 /* first handle the config valid IRQ */
560 dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
561 if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
562 /* we have a page flip event */
563 atomic_set(&malidp->config_valid, 1);
564 malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
565 ret = IRQ_WAKE_THREAD;
566 }
567
568 status = malidp_hw_read(hwdev, MALIDP_REG_STATUS);
569 if (!(status & de->irq_mask))
570 return ret;
571
572 mask = malidp_hw_read(hwdev, MALIDP_REG_MASKIRQ);
573 status &= mask;
574 if (status & de->vsync_irq)
575 drm_crtc_handle_vblank(&malidp->crtc);
576
577 malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, status);
578
579 return (ret == IRQ_NONE) ? IRQ_HANDLED : ret;
580}
581
582static irqreturn_t malidp_de_irq_thread_handler(int irq, void *arg)
583{
584 struct drm_device *drm = arg;
585 struct malidp_drm *malidp = drm->dev_private;
586
587 wake_up(&malidp->wq);
588
589 return IRQ_HANDLED;
590}
591
592int malidp_de_irq_init(struct drm_device *drm, int irq)
593{
594 struct malidp_drm *malidp = drm->dev_private;
595 struct malidp_hw_device *hwdev = malidp->dev;
596 int ret;
597
598 /* ensure interrupts are disabled */
599 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
600 malidp_hw_clear_irq(hwdev, MALIDP_DE_BLOCK, 0xffffffff);
601 malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
602 malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, 0xffffffff);
603
604 ret = devm_request_threaded_irq(drm->dev, irq, malidp_de_irq,
605 malidp_de_irq_thread_handler,
606 IRQF_SHARED, "malidp-de", drm);
607 if (ret < 0) {
608 DRM_ERROR("failed to install DE IRQ handler\n");
609 return ret;
610 }
611
612 /* first enable the DC block IRQs */
613 malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
614 hwdev->map.dc_irq_map.irq_mask);
615
616 /* now enable the DE block IRQs */
617 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
618 hwdev->map.de_irq_map.irq_mask);
619
620 return 0;
621}
622
623void malidp_de_irq_fini(struct drm_device *drm)
624{
625 struct malidp_drm *malidp = drm->dev_private;
626 struct malidp_hw_device *hwdev = malidp->dev;
627
628 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
629 hwdev->map.de_irq_map.irq_mask);
630 malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
631 hwdev->map.dc_irq_map.irq_mask);
632}
633
634static irqreturn_t malidp_se_irq(int irq, void *arg)
635{
636 struct drm_device *drm = arg;
637 struct malidp_drm *malidp = drm->dev_private;
638 struct malidp_hw_device *hwdev = malidp->dev;
639 u32 status, mask;
640
641 status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
642 if (!(status & hwdev->map.se_irq_map.irq_mask))
643 return IRQ_NONE;
644
645 mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
646 status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
647 status &= mask;
648 /* ToDo: status decoding and firing up of VSYNC and page flip events */
649
650 malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, status);
651
652 return IRQ_HANDLED;
653}
654
655static irqreturn_t malidp_se_irq_thread_handler(int irq, void *arg)
656{
657 return IRQ_HANDLED;
658}
659
660int malidp_se_irq_init(struct drm_device *drm, int irq)
661{
662 struct malidp_drm *malidp = drm->dev_private;
663 struct malidp_hw_device *hwdev = malidp->dev;
664 int ret;
665
666 /* ensure interrupts are disabled */
667 malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
668 malidp_hw_clear_irq(hwdev, MALIDP_SE_BLOCK, 0xffffffff);
669
670 ret = devm_request_threaded_irq(drm->dev, irq, malidp_se_irq,
671 malidp_se_irq_thread_handler,
672 IRQF_SHARED, "malidp-se", drm);
673 if (ret < 0) {
674 DRM_ERROR("failed to install SE IRQ handler\n");
675 return ret;
676 }
677
678 malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
679 hwdev->map.se_irq_map.irq_mask);
680
681 return 0;
682}
683
684void malidp_se_irq_fini(struct drm_device *drm)
685{
686 struct malidp_drm *malidp = drm->dev_private;
687 struct malidp_hw_device *hwdev = malidp->dev;
688
689 malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
690 hwdev->map.se_irq_map.irq_mask);
691}
diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h
new file mode 100644
index 000000000000..141743e9f3a6
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_hw.h
@@ -0,0 +1,241 @@
1/*
2 *
3 * (C) COPYRIGHT 2013-2016 ARM Limited. All rights reserved.
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP hardware manipulation routines.
11 */
12
13#ifndef __MALIDP_HW_H__
14#define __MALIDP_HW_H__
15
16#include <linux/bitops.h>
17#include "malidp_regs.h"
18
19struct videomode;
20struct clk;
21
22/* Mali DP IP blocks */
23enum {
24 MALIDP_DE_BLOCK = 0,
25 MALIDP_SE_BLOCK,
26 MALIDP_DC_BLOCK
27};
28
29/* Mali DP layer IDs */
30enum {
31 DE_VIDEO1 = BIT(0),
32 DE_GRAPHICS1 = BIT(1),
33 DE_GRAPHICS2 = BIT(2), /* used only in DP500 */
34 DE_VIDEO2 = BIT(3),
35 DE_SMART = BIT(4),
36};
37
38struct malidp_input_format {
39 u32 format; /* DRM fourcc */
40 u8 layer; /* bitmask of layers supporting it */
41 u8 id; /* used internally */
42};
43
44#define MALIDP_INVALID_FORMAT_ID 0xff
45
46/*
47 * hide the differences between register maps
48 * by using a common structure to hold the
49 * base register offsets
50 */
51
52struct malidp_irq_map {
53 u32 irq_mask; /* mask of IRQs that can be enabled in the block */
54 u32 vsync_irq; /* IRQ bit used for signaling during VSYNC */
55};
56
57struct malidp_layer {
58 u16 id; /* layer ID */
59 u16 base; /* address offset for the register bank */
60 u16 ptr; /* address offset for the pointer register */
61};
62
63/* regmap features */
64#define MALIDP_REGMAP_HAS_CLEARIRQ (1 << 0)
65
66struct malidp_hw_regmap {
67 /* address offset of the DE register bank */
68 /* is always 0x0000 */
69 /* address offset of the SE registers bank */
70 const u16 se_base;
71 /* address offset of the DC registers bank */
72 const u16 dc_base;
73
74 /* address offset for the output depth register */
75 const u16 out_depth_base;
76
77 /* bitmap with register map features */
78 const u8 features;
79
80 /* list of supported layers */
81 const u8 n_layers;
82 const struct malidp_layer *layers;
83
84 const struct malidp_irq_map de_irq_map;
85 const struct malidp_irq_map se_irq_map;
86 const struct malidp_irq_map dc_irq_map;
87
88 /* list of supported input formats for each layer */
89 const struct malidp_input_format *input_formats;
90 const u8 n_input_formats;
91};
92
93struct malidp_hw_device {
94 const struct malidp_hw_regmap map;
95 void __iomem *regs;
96
97 /* APB clock */
98 struct clk *pclk;
99 /* AXI clock */
100 struct clk *aclk;
101 /* main clock for display core */
102 struct clk *mclk;
103 /* pixel clock for display core */
104 struct clk *pxlclk;
105
106 /*
107 * Validate the driver instance against the hardware bits
108 */
109 int (*query_hw)(struct malidp_hw_device *hwdev);
110
111 /*
112 * Set the hardware into config mode, ready to accept mode changes
113 */
114 void (*enter_config_mode)(struct malidp_hw_device *hwdev);
115
116 /*
117 * Tell hardware to exit configuration mode
118 */
119 void (*leave_config_mode)(struct malidp_hw_device *hwdev);
120
121 /*
122 * Query if hardware is in configuration mode
123 */
124 bool (*in_config_mode)(struct malidp_hw_device *hwdev);
125
126 /*
127 * Set configuration valid flag for hardware parameters that can
128 * be changed outside the configuration mode. Hardware will use
129 * the new settings when config valid is set after the end of the
130 * current buffer scanout
131 */
132 void (*set_config_valid)(struct malidp_hw_device *hwdev);
133
134 /*
135 * Set a new mode in hardware. Requires the hardware to be in
136 * configuration mode before this function is called.
137 */
138 void (*modeset)(struct malidp_hw_device *hwdev, struct videomode *m);
139
140 /*
141 * Calculate the required rotation memory given the active area
142 * and the buffer format.
143 */
144 int (*rotmem_required)(struct malidp_hw_device *hwdev, u16 w, u16 h, u32 fmt);
145
146 u8 features;
147
148 u8 min_line_size;
149 u16 max_line_size;
150
151 /* size of memory used for rotating layers, up to two banks available */
152 u32 rotation_memory[2];
153};
154
155/* Supported variants of the hardware */
156enum {
157 MALIDP_500 = 0,
158 MALIDP_550,
159 MALIDP_650,
160 /* keep the next entry last */
161 MALIDP_MAX_DEVICES
162};
163
164extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
165
166static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
167{
168 return readl(hwdev->regs + reg);
169}
170
171static inline void malidp_hw_write(struct malidp_hw_device *hwdev,
172 u32 value, u32 reg)
173{
174 writel(value, hwdev->regs + reg);
175}
176
177static inline void malidp_hw_setbits(struct malidp_hw_device *hwdev,
178 u32 mask, u32 reg)
179{
180 u32 data = malidp_hw_read(hwdev, reg);
181
182 data |= mask;
183 malidp_hw_write(hwdev, data, reg);
184}
185
186static inline void malidp_hw_clearbits(struct malidp_hw_device *hwdev,
187 u32 mask, u32 reg)
188{
189 u32 data = malidp_hw_read(hwdev, reg);
190
191 data &= ~mask;
192 malidp_hw_write(hwdev, data, reg);
193}
194
195static inline u32 malidp_get_block_base(struct malidp_hw_device *hwdev,
196 u8 block)
197{
198 switch (block) {
199 case MALIDP_SE_BLOCK:
200 return hwdev->map.se_base;
201 case MALIDP_DC_BLOCK:
202 return hwdev->map.dc_base;
203 }
204
205 return 0;
206}
207
208static inline void malidp_hw_disable_irq(struct malidp_hw_device *hwdev,
209 u8 block, u32 irq)
210{
211 u32 base = malidp_get_block_base(hwdev, block);
212
213 malidp_hw_clearbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
214}
215
216static inline void malidp_hw_enable_irq(struct malidp_hw_device *hwdev,
217 u8 block, u32 irq)
218{
219 u32 base = malidp_get_block_base(hwdev, block);
220
221 malidp_hw_setbits(hwdev, irq, base + MALIDP_REG_MASKIRQ);
222}
223
224int malidp_de_irq_init(struct drm_device *drm, int irq);
225void malidp_de_irq_fini(struct drm_device *drm);
226int malidp_se_irq_init(struct drm_device *drm, int irq);
227void malidp_se_irq_fini(struct drm_device *drm);
228
229u8 malidp_hw_get_format_id(const struct malidp_hw_regmap *map,
230 u8 layer_id, u32 format);
231
232/*
233 * background color components are defined as 12bits values,
234 * they will be shifted right when stored on hardware that
235 * supports only 8bits per channel
236 */
237#define MALIDP_BGND_COLOR_R 0x000
238#define MALIDP_BGND_COLOR_G 0x000
239#define MALIDP_BGND_COLOR_B 0x000
240
241#endif /* __MALIDP_HW_H__ */
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
new file mode 100644
index 000000000000..725098d6179a
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -0,0 +1,298 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP plane manipulation routines.
11 */
12
13#include <drm/drmP.h>
14#include <drm/drm_atomic_helper.h>
15#include <drm/drm_fb_cma_helper.h>
16#include <drm/drm_gem_cma_helper.h>
17#include <drm/drm_plane_helper.h>
18
19#include "malidp_hw.h"
20#include "malidp_drv.h"
21
22/* Layer specific register offsets */
23#define MALIDP_LAYER_FORMAT 0x000
24#define MALIDP_LAYER_CONTROL 0x004
25#define LAYER_ENABLE (1 << 0)
26#define LAYER_ROT_OFFSET 8
27#define LAYER_H_FLIP (1 << 10)
28#define LAYER_V_FLIP (1 << 11)
29#define LAYER_ROT_MASK (0xf << 8)
30#define MALIDP_LAYER_SIZE 0x00c
31#define LAYER_H_VAL(x) (((x) & 0x1fff) << 0)
32#define LAYER_V_VAL(x) (((x) & 0x1fff) << 16)
33#define MALIDP_LAYER_COMP_SIZE 0x010
34#define MALIDP_LAYER_OFFSET 0x014
35#define MALIDP_LAYER_STRIDE 0x018
36
37static void malidp_de_plane_destroy(struct drm_plane *plane)
38{
39 struct malidp_plane *mp = to_malidp_plane(plane);
40
41 if (mp->base.fb)
42 drm_framebuffer_unreference(mp->base.fb);
43
44 drm_plane_helper_disable(plane);
45 drm_plane_cleanup(plane);
46 devm_kfree(plane->dev->dev, mp);
47}
48
49struct drm_plane_state *malidp_duplicate_plane_state(struct drm_plane *plane)
50{
51 struct malidp_plane_state *state, *m_state;
52
53 if (!plane->state)
54 return NULL;
55
56 state = kmalloc(sizeof(*state), GFP_KERNEL);
57 if (state) {
58 m_state = to_malidp_plane_state(plane->state);
59 __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
60 state->rotmem_size = m_state->rotmem_size;
61 }
62
63 return &state->base;
64}
65
66void malidp_destroy_plane_state(struct drm_plane *plane,
67 struct drm_plane_state *state)
68{
69 struct malidp_plane_state *m_state = to_malidp_plane_state(state);
70
71 __drm_atomic_helper_plane_destroy_state(state);
72 kfree(m_state);
73}
74
75static const struct drm_plane_funcs malidp_de_plane_funcs = {
76 .update_plane = drm_atomic_helper_update_plane,
77 .disable_plane = drm_atomic_helper_disable_plane,
78 .destroy = malidp_de_plane_destroy,
79 .reset = drm_atomic_helper_plane_reset,
80 .atomic_duplicate_state = malidp_duplicate_plane_state,
81 .atomic_destroy_state = malidp_destroy_plane_state,
82};
83
84static int malidp_de_plane_check(struct drm_plane *plane,
85 struct drm_plane_state *state)
86{
87 struct malidp_plane *mp = to_malidp_plane(plane);
88 struct malidp_plane_state *ms = to_malidp_plane_state(state);
89 u8 format_id;
90 u32 src_w, src_h;
91
92 if (!state->crtc || !state->fb)
93 return 0;
94
95 format_id = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
96 state->fb->pixel_format);
97 if (format_id == MALIDP_INVALID_FORMAT_ID)
98 return -EINVAL;
99
100 src_w = state->src_w >> 16;
101 src_h = state->src_h >> 16;
102
103 if ((state->crtc_w > mp->hwdev->max_line_size) ||
104 (state->crtc_h > mp->hwdev->max_line_size) ||
105 (state->crtc_w < mp->hwdev->min_line_size) ||
106 (state->crtc_h < mp->hwdev->min_line_size) ||
107 (state->crtc_w != src_w) || (state->crtc_h != src_h))
108 return -EINVAL;
109
110 /* packed RGB888 / BGR888 can't be rotated or flipped */
111 if (state->rotation != BIT(DRM_ROTATE_0) &&
112 (state->fb->pixel_format == DRM_FORMAT_RGB888 ||
113 state->fb->pixel_format == DRM_FORMAT_BGR888))
114 return -EINVAL;
115
116 ms->rotmem_size = 0;
117 if (state->rotation & MALIDP_ROTATED_MASK) {
118 int val;
119
120 val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
121 state->crtc_w,
122 state->fb->pixel_format);
123 if (val < 0)
124 return val;
125
126 ms->rotmem_size = val;
127 }
128
129 return 0;
130}
131
132static void malidp_de_plane_update(struct drm_plane *plane,
133 struct drm_plane_state *old_state)
134{
135 struct drm_gem_cma_object *obj;
136 struct malidp_plane *mp;
137 const struct malidp_hw_regmap *map;
138 u8 format_id;
139 u16 ptr;
140 u32 format, src_w, src_h, dest_w, dest_h, val = 0;
141 int num_planes, i;
142
143 mp = to_malidp_plane(plane);
144
145 map = &mp->hwdev->map;
146 format = plane->state->fb->pixel_format;
147 format_id = malidp_hw_get_format_id(map, mp->layer->id, format);
148 num_planes = drm_format_num_planes(format);
149
150 /* convert src values from Q16 fixed point to integer */
151 src_w = plane->state->src_w >> 16;
152 src_h = plane->state->src_h >> 16;
153 if (plane->state->rotation & MALIDP_ROTATED_MASK) {
154 dest_w = plane->state->crtc_h;
155 dest_h = plane->state->crtc_w;
156 } else {
157 dest_w = plane->state->crtc_w;
158 dest_h = plane->state->crtc_h;
159 }
160
161 malidp_hw_write(mp->hwdev, format_id, mp->layer->base);
162
163 for (i = 0; i < num_planes; i++) {
164 /* calculate the offset for the layer's plane registers */
165 ptr = mp->layer->ptr + (i << 4);
166
167 obj = drm_fb_cma_get_gem_obj(plane->state->fb, i);
168 malidp_hw_write(mp->hwdev, lower_32_bits(obj->paddr), ptr);
169 malidp_hw_write(mp->hwdev, upper_32_bits(obj->paddr), ptr + 4);
170 malidp_hw_write(mp->hwdev, plane->state->fb->pitches[i],
171 mp->layer->base + MALIDP_LAYER_STRIDE);
172 }
173
174 malidp_hw_write(mp->hwdev, LAYER_H_VAL(src_w) | LAYER_V_VAL(src_h),
175 mp->layer->base + MALIDP_LAYER_SIZE);
176
177 malidp_hw_write(mp->hwdev, LAYER_H_VAL(dest_w) | LAYER_V_VAL(dest_h),
178 mp->layer->base + MALIDP_LAYER_COMP_SIZE);
179
180 malidp_hw_write(mp->hwdev, LAYER_H_VAL(plane->state->crtc_x) |
181 LAYER_V_VAL(plane->state->crtc_y),
182 mp->layer->base + MALIDP_LAYER_OFFSET);
183
184 /* first clear the rotation bits in the register */
185 malidp_hw_clearbits(mp->hwdev, LAYER_ROT_MASK,
186 mp->layer->base + MALIDP_LAYER_CONTROL);
187
188 /* setup the rotation and axis flip bits */
189 if (plane->state->rotation & DRM_ROTATE_MASK)
190 val = ilog2(plane->state->rotation & DRM_ROTATE_MASK) << LAYER_ROT_OFFSET;
191 if (plane->state->rotation & BIT(DRM_REFLECT_X))
192 val |= LAYER_V_FLIP;
193 if (plane->state->rotation & BIT(DRM_REFLECT_Y))
194 val |= LAYER_H_FLIP;
195
196 /* set the 'enable layer' bit */
197 val |= LAYER_ENABLE;
198
199 malidp_hw_setbits(mp->hwdev, val,
200 mp->layer->base + MALIDP_LAYER_CONTROL);
201}
202
203static void malidp_de_plane_disable(struct drm_plane *plane,
204 struct drm_plane_state *state)
205{
206 struct malidp_plane *mp = to_malidp_plane(plane);
207
208 malidp_hw_clearbits(mp->hwdev, LAYER_ENABLE,
209 mp->layer->base + MALIDP_LAYER_CONTROL);
210}
211
212static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
213 .atomic_check = malidp_de_plane_check,
214 .atomic_update = malidp_de_plane_update,
215 .atomic_disable = malidp_de_plane_disable,
216};
217
218int malidp_de_planes_init(struct drm_device *drm)
219{
220 struct malidp_drm *malidp = drm->dev_private;
221 const struct malidp_hw_regmap *map = &malidp->dev->map;
222 struct malidp_plane *plane = NULL;
223 enum drm_plane_type plane_type;
224 unsigned long crtcs = 1 << drm->mode_config.num_crtc;
225 u32 *formats;
226 int ret, i, j, n;
227
228 formats = kcalloc(map->n_input_formats, sizeof(*formats), GFP_KERNEL);
229 if (!formats) {
230 ret = -ENOMEM;
231 goto cleanup;
232 }
233
234 for (i = 0; i < map->n_layers; i++) {
235 u8 id = map->layers[i].id;
236
237 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
238 if (!plane) {
239 ret = -ENOMEM;
240 goto cleanup;
241 }
242
243 /* build the list of DRM supported formats based on the map */
244 for (n = 0, j = 0; j < map->n_input_formats; j++) {
245 if ((map->input_formats[j].layer & id) == id)
246 formats[n++] = map->input_formats[j].format;
247 }
248
249 plane_type = (i == 0) ? DRM_PLANE_TYPE_PRIMARY :
250 DRM_PLANE_TYPE_OVERLAY;
251 ret = drm_universal_plane_init(drm, &plane->base, crtcs,
252 &malidp_de_plane_funcs, formats,
253 n, plane_type, NULL);
254 if (ret < 0)
255 goto cleanup;
256
257 if (!drm->mode_config.rotation_property) {
258 unsigned long flags = BIT(DRM_ROTATE_0) |
259 BIT(DRM_ROTATE_90) |
260 BIT(DRM_ROTATE_180) |
261 BIT(DRM_ROTATE_270) |
262 BIT(DRM_REFLECT_X) |
263 BIT(DRM_REFLECT_Y);
264 drm->mode_config.rotation_property =
265 drm_mode_create_rotation_property(drm, flags);
266 }
267 /* SMART layer can't be rotated */
268 if (drm->mode_config.rotation_property && (id != DE_SMART))
269 drm_object_attach_property(&plane->base.base,
270 drm->mode_config.rotation_property,
271 BIT(DRM_ROTATE_0));
272
273 drm_plane_helper_add(&plane->base,
274 &malidp_de_plane_helper_funcs);
275 plane->hwdev = malidp->dev;
276 plane->layer = &map->layers[i];
277 }
278
279 kfree(formats);
280
281 return 0;
282
283cleanup:
284 malidp_de_planes_destroy(drm);
285 kfree(formats);
286
287 return ret;
288}
289
290void malidp_de_planes_destroy(struct drm_device *drm)
291{
292 struct drm_plane *p, *pt;
293
294 list_for_each_entry_safe(p, pt, &drm->mode_config.plane_list, head) {
295 drm_plane_cleanup(p);
296 kfree(p);
297 }
298}
diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h
new file mode 100644
index 000000000000..73fecb38f955
--- /dev/null
+++ b/drivers/gpu/drm/arm/malidp_regs.h
@@ -0,0 +1,172 @@
1/*
2 * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
3 * Author: Liviu Dudau <Liviu.Dudau@arm.com>
4 *
5 * This program is free software and is provided to you under the terms of the
6 * GNU General Public License version 2 as published by the Free Software
7 * Foundation, and any use by you of this program is subject to the terms
8 * of such GNU licence.
9 *
10 * ARM Mali DP500/DP550/DP650 registers definition.
11 */
12
13#ifndef __MALIDP_REGS_H__
14#define __MALIDP_REGS_H__
15
16/*
17 * abbreviations used:
18 * - DC - display core (general settings)
19 * - DE - display engine
20 * - SE - scaling engine
21 */
22
23/* interrupt bit masks */
24#define MALIDP_DE_IRQ_UNDERRUN (1 << 0)
25
26#define MALIDP500_DE_IRQ_AXI_ERR (1 << 4)
27#define MALIDP500_DE_IRQ_VSYNC (1 << 5)
28#define MALIDP500_DE_IRQ_PROG_LINE (1 << 6)
29#define MALIDP500_DE_IRQ_SATURATION (1 << 7)
30#define MALIDP500_DE_IRQ_CONF_VALID (1 << 8)
31#define MALIDP500_DE_IRQ_CONF_MODE (1 << 11)
32#define MALIDP500_DE_IRQ_CONF_ACTIVE (1 << 17)
33#define MALIDP500_DE_IRQ_PM_ACTIVE (1 << 18)
34#define MALIDP500_DE_IRQ_TESTMODE_ACTIVE (1 << 19)
35#define MALIDP500_DE_IRQ_FORCE_BLNK_ACTIVE (1 << 24)
36#define MALIDP500_DE_IRQ_AXI_BUSY (1 << 28)
37#define MALIDP500_DE_IRQ_GLOBAL (1 << 31)
38#define MALIDP500_SE_IRQ_CONF_MODE (1 << 0)
39#define MALIDP500_SE_IRQ_CONF_VALID (1 << 4)
40#define MALIDP500_SE_IRQ_INIT_BUSY (1 << 5)
41#define MALIDP500_SE_IRQ_AXI_ERROR (1 << 8)
42#define MALIDP500_SE_IRQ_OVERRUN (1 << 9)
43#define MALIDP500_SE_IRQ_PROG_LINE1 (1 << 12)
44#define MALIDP500_SE_IRQ_PROG_LINE2 (1 << 13)
45#define MALIDP500_SE_IRQ_CONF_ACTIVE (1 << 17)
46#define MALIDP500_SE_IRQ_PM_ACTIVE (1 << 18)
47#define MALIDP500_SE_IRQ_AXI_BUSY (1 << 28)
48#define MALIDP500_SE_IRQ_GLOBAL (1 << 31)
49
50#define MALIDP550_DE_IRQ_SATURATION (1 << 8)
51#define MALIDP550_DE_IRQ_VSYNC (1 << 12)
52#define MALIDP550_DE_IRQ_PROG_LINE (1 << 13)
53#define MALIDP550_DE_IRQ_AXI_ERR (1 << 16)
54#define MALIDP550_SE_IRQ_EOW (1 << 0)
55#define MALIDP550_SE_IRQ_AXI_ERR (1 << 16)
56#define MALIDP550_DC_IRQ_CONF_VALID (1 << 0)
57#define MALIDP550_DC_IRQ_CONF_MODE (1 << 4)
58#define MALIDP550_DC_IRQ_CONF_ACTIVE (1 << 16)
59#define MALIDP550_DC_IRQ_DE (1 << 20)
60#define MALIDP550_DC_IRQ_SE (1 << 24)
61
62#define MALIDP650_DE_IRQ_DRIFT (1 << 4)
63
64/* bit masks that are common between products */
65#define MALIDP_CFG_VALID (1 << 0)
66#define MALIDP_DISP_FUNC_ILACED (1 << 8)
67
68/* register offsets for IRQ management */
69#define MALIDP_REG_STATUS 0x00000
70#define MALIDP_REG_SETIRQ 0x00004
71#define MALIDP_REG_MASKIRQ 0x00008
72#define MALIDP_REG_CLEARIRQ 0x0000c
73
74/* register offsets */
75#define MALIDP_DE_CORE_ID 0x00018
76#define MALIDP_DE_DISPLAY_FUNC 0x00020
77
78/* these offsets are relative to MALIDP5x0_TIMINGS_BASE */
79#define MALIDP_DE_H_TIMINGS 0x0
80#define MALIDP_DE_V_TIMINGS 0x4
81#define MALIDP_DE_SYNC_WIDTH 0x8
82#define MALIDP_DE_HV_ACTIVE 0xc
83
84/* macros to set values into registers */
85#define MALIDP_DE_H_FRONTPORCH(x) (((x) & 0xfff) << 0)
86#define MALIDP_DE_H_BACKPORCH(x) (((x) & 0x3ff) << 16)
87#define MALIDP500_DE_V_FRONTPORCH(x) (((x) & 0xff) << 0)
88#define MALIDP550_DE_V_FRONTPORCH(x) (((x) & 0xfff) << 0)
89#define MALIDP_DE_V_BACKPORCH(x) (((x) & 0xff) << 16)
90#define MALIDP_DE_H_SYNCWIDTH(x) (((x) & 0x3ff) << 0)
91#define MALIDP_DE_V_SYNCWIDTH(x) (((x) & 0xff) << 16)
92#define MALIDP_DE_H_ACTIVE(x) (((x) & 0x1fff) << 0)
93#define MALIDP_DE_V_ACTIVE(x) (((x) & 0x1fff) << 16)
94
95/* register offsets and bits specific to DP500 */
96#define MALIDP500_DC_BASE 0x00000
97#define MALIDP500_DC_CONTROL 0x0000c
98#define MALIDP500_DC_CONFIG_REQ (1 << 17)
99#define MALIDP500_HSYNCPOL (1 << 20)
100#define MALIDP500_VSYNCPOL (1 << 21)
101#define MALIDP500_DC_CLEAR_MASK 0x300fff
102#define MALIDP500_DE_LINE_COUNTER 0x00010
103#define MALIDP500_DE_AXI_CONTROL 0x00014
104#define MALIDP500_DE_SECURE_CTRL 0x0001c
105#define MALIDP500_DE_CHROMA_KEY 0x00024
106#define MALIDP500_TIMINGS_BASE 0x00028
107
108#define MALIDP500_CONFIG_3D 0x00038
109#define MALIDP500_BGND_COLOR 0x0003c
110#define MALIDP500_OUTPUT_DEPTH 0x00044
111#define MALIDP500_YUV_RGB_COEF 0x00048
112#define MALIDP500_COLOR_ADJ_COEF 0x00078
113#define MALIDP500_COEF_TABLE_ADDR 0x000a8
114#define MALIDP500_COEF_TABLE_DATA 0x000ac
115#define MALIDP500_DE_LV_BASE 0x00100
116#define MALIDP500_DE_LV_PTR_BASE 0x00124
117#define MALIDP500_DE_LG1_BASE 0x00200
118#define MALIDP500_DE_LG1_PTR_BASE 0x0021c
119#define MALIDP500_DE_LG2_BASE 0x00300
120#define MALIDP500_DE_LG2_PTR_BASE 0x0031c
121#define MALIDP500_SE_BASE 0x00c00
122#define MALIDP500_SE_PTR_BASE 0x00e0c
123#define MALIDP500_DC_IRQ_BASE 0x00f00
124#define MALIDP500_CONFIG_VALID 0x00f00
125#define MALIDP500_CONFIG_ID 0x00fd4
126
127/* register offsets and bits specific to DP550/DP650 */
128#define MALIDP550_DE_CONTROL 0x00010
129#define MALIDP550_DE_LINE_COUNTER 0x00014
130#define MALIDP550_DE_AXI_CONTROL 0x00018
131#define MALIDP550_DE_QOS 0x0001c
132#define MALIDP550_TIMINGS_BASE 0x00030
133#define MALIDP550_HSYNCPOL (1 << 12)
134#define MALIDP550_VSYNCPOL (1 << 28)
135
136#define MALIDP550_DE_DISP_SIDEBAND 0x00040
137#define MALIDP550_DE_BGND_COLOR 0x00044
138#define MALIDP550_DE_OUTPUT_DEPTH 0x0004c
139#define MALIDP550_DE_COLOR_COEF 0x00050
140#define MALIDP550_DE_COEF_TABLE_ADDR 0x00080
141#define MALIDP550_DE_COEF_TABLE_DATA 0x00084
142#define MALIDP550_DE_LV1_BASE 0x00100
143#define MALIDP550_DE_LV1_PTR_BASE 0x00124
144#define MALIDP550_DE_LV2_BASE 0x00200
145#define MALIDP550_DE_LV2_PTR_BASE 0x00224
146#define MALIDP550_DE_LG_BASE 0x00300
147#define MALIDP550_DE_LG_PTR_BASE 0x0031c
148#define MALIDP550_DE_LS_BASE 0x00400
149#define MALIDP550_DE_LS_PTR_BASE 0x0042c
150#define MALIDP550_DE_PERF_BASE 0x00500
151#define MALIDP550_SE_BASE 0x08000
152#define MALIDP550_DC_BASE 0x0c000
153#define MALIDP550_DC_CONTROL 0x0c010
154#define MALIDP550_DC_CONFIG_REQ (1 << 16)
155#define MALIDP550_CONFIG_VALID 0x0c014
156#define MALIDP550_CONFIG_ID 0x0ffd4
157
158/*
159 * Starting with DP550 the register map blocks has been standardised to the
160 * following layout:
161 *
162 * Offset Block registers
163 * 0x00000 Display Engine
164 * 0x08000 Scaling Engine
165 * 0x0c000 Display Core
166 * 0x10000 Secure control
167 *
168 * The old DP500 IP mixes some DC with the DE registers, hence the need
169 * for a mapping structure.
170 */
171
172#endif /* __MALIDP_REGS_H__ */
diff --git a/drivers/gpu/drm/armada/Kconfig b/drivers/gpu/drm/armada/Kconfig
index eb773e9af313..15f3ecfb16f1 100644
--- a/drivers/gpu/drm/armada/Kconfig
+++ b/drivers/gpu/drm/armada/Kconfig
@@ -1,11 +1,7 @@
1config DRM_ARMADA 1config DRM_ARMADA
2 tristate "DRM support for Marvell Armada SoCs" 2 tristate "DRM support for Marvell Armada SoCs"
3 depends on DRM && HAVE_CLK && ARM 3 depends on DRM && HAVE_CLK && ARM
4 select FB_CFB_FILLRECT
5 select FB_CFB_COPYAREA
6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
9 help 5 help
10 Support the "LCD" controllers found on the Marvell Armada 510 6 Support the "LCD" controllers found on the Marvell Armada 510
11 devices. There are two controllers on the device, each controller 7 devices. There are two controllers on the device, each controller
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 3130aa8bcdd0..2f58e9e2a59c 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -199,7 +199,7 @@ static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
199 /* Handle any pending frame work. */ 199 /* Handle any pending frame work. */
200 if (work) { 200 if (work) {
201 work->fn(dcrtc, plane, work); 201 work->fn(dcrtc, plane, work);
202 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 202 drm_crtc_vblank_put(&dcrtc->crtc);
203 } 203 }
204 204
205 wake_up(&plane->frame_wait); 205 wake_up(&plane->frame_wait);
@@ -210,7 +210,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
210{ 210{
211 int ret; 211 int ret;
212 212
213 ret = drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); 213 ret = drm_crtc_vblank_get(&dcrtc->crtc);
214 if (ret) { 214 if (ret) {
215 DRM_ERROR("failed to acquire vblank counter\n"); 215 DRM_ERROR("failed to acquire vblank counter\n");
216 return ret; 216 return ret;
@@ -218,7 +218,7 @@ int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
218 218
219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0; 219 ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
220 if (ret) 220 if (ret)
221 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 221 drm_crtc_vblank_put(&dcrtc->crtc);
222 222
223 return ret; 223 return ret;
224} 224}
@@ -234,7 +234,7 @@ struct armada_plane_work *armada_drm_plane_work_cancel(
234 struct armada_plane_work *work = xchg(&plane->work, NULL); 234 struct armada_plane_work *work = xchg(&plane->work, NULL);
235 235
236 if (work) 236 if (work)
237 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 237 drm_crtc_vblank_put(&dcrtc->crtc);
238 238
239 return work; 239 return work;
240} 240}
@@ -260,7 +260,7 @@ static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
260 260
261 if (fwork->event) { 261 if (fwork->event) {
262 spin_lock_irqsave(&dev->event_lock, flags); 262 spin_lock_irqsave(&dev->event_lock, flags);
263 drm_send_vblank_event(dev, dcrtc->num, fwork->event); 263 drm_crtc_send_vblank_event(&dcrtc->crtc, fwork->event);
264 spin_unlock_irqrestore(&dev->event_lock, flags); 264 spin_unlock_irqrestore(&dev->event_lock, flags);
265 } 265 }
266 266
@@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
410 DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num); 410 DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num);
411 411
412 if (stat & VSYNC_IRQ) 412 if (stat & VSYNC_IRQ)
413 drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num); 413 drm_crtc_handle_vblank(&dcrtc->crtc);
414 414
415 spin_lock(&dcrtc->irq_lock); 415 spin_lock(&dcrtc->irq_lock);
416 ovl_plane = dcrtc->plane; 416 ovl_plane = dcrtc->plane;
@@ -592,9 +592,9 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
592 592
593 if (interlaced ^ dcrtc->interlaced) { 593 if (interlaced ^ dcrtc->interlaced) {
594 if (adj->flags & DRM_MODE_FLAG_INTERLACE) 594 if (adj->flags & DRM_MODE_FLAG_INTERLACE)
595 drm_vblank_get(dcrtc->crtc.dev, dcrtc->num); 595 drm_crtc_vblank_get(&dcrtc->crtc);
596 else 596 else
597 drm_vblank_put(dcrtc->crtc.dev, dcrtc->num); 597 drm_crtc_vblank_put(&dcrtc->crtc);
598 dcrtc->interlaced = interlaced; 598 dcrtc->interlaced = interlaced;
599 } 599 }
600 600
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 439824a61aa5..f5ebdd681445 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -189,7 +189,6 @@ static struct drm_driver armada_drm_driver = {
189 .load = armada_drm_load, 189 .load = armada_drm_load,
190 .lastclose = armada_drm_lastclose, 190 .lastclose = armada_drm_lastclose,
191 .unload = armada_drm_unload, 191 .unload = armada_drm_unload,
192 .set_busid = drm_platform_set_busid,
193 .get_vblank_counter = drm_vblank_no_hw_counter, 192 .get_vblank_counter = drm_vblank_no_hw_counter,
194 .enable_vblank = armada_drm_enable_vblank, 193 .enable_vblank = armada_drm_enable_vblank,
195 .disable_vblank = armada_drm_disable_vblank, 194 .disable_vblank = armada_drm_disable_vblank,
@@ -197,7 +196,7 @@ static struct drm_driver armada_drm_driver = {
197 .debugfs_init = armada_drm_debugfs_init, 196 .debugfs_init = armada_drm_debugfs_init,
198 .debugfs_cleanup = armada_drm_debugfs_cleanup, 197 .debugfs_cleanup = armada_drm_debugfs_cleanup,
199#endif 198#endif
200 .gem_free_object = armada_gem_free_object, 199 .gem_free_object_unlocked = armada_gem_free_object,
201 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 200 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
202 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 201 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
203 .gem_prime_export = armada_gem_prime_export, 202 .gem_prime_export = armada_gem_prime_export,
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index 148e8a42b2c6..1ee707ef6b8d 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -121,6 +121,7 @@ armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
121 int ret; 121 int ret;
122 122
123 ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip, 123 ret = drm_plane_helper_check_update(plane, crtc, fb, &src, &dest, &clip,
124 BIT(DRM_ROTATE_0),
124 0, INT_MAX, true, false, &visible); 125 0, INT_MAX, true, false, &visible);
125 if (ret) 126 if (ret)
126 return ret; 127 return ret;
diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig
index 8a784c460c89..15f6ce7acb2a 100644
--- a/drivers/gpu/drm/ast/Kconfig
+++ b/drivers/gpu/drm/ast/Kconfig
@@ -2,11 +2,7 @@ config DRM_AST
2 tristate "AST server chips" 2 tristate "AST server chips"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select DRM_TTM 4 select DRM_TTM
5 select FB_SYS_COPYAREA
6 select FB_SYS_FILLRECT
7 select FB_SYS_IMAGEBLIT
8 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
10 select DRM_TTM 6 select DRM_TTM
11 help 7 help
12 Say yes for experimental AST GPU driver. Do not enable 8 Say yes for experimental AST GPU driver. Do not enable
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index fcd9c0714836..f54afd2113a9 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -209,7 +209,7 @@ static struct drm_driver driver = {
209 .minor = DRIVER_MINOR, 209 .minor = DRIVER_MINOR,
210 .patchlevel = DRIVER_PATCHLEVEL, 210 .patchlevel = DRIVER_PATCHLEVEL,
211 211
212 .gem_free_object = ast_gem_free_object, 212 .gem_free_object_unlocked = ast_gem_free_object,
213 .dumb_create = ast_dumb_create, 213 .dumb_create = ast_dumb_create,
214 .dumb_map_offset = ast_dumb_mmap_offset, 214 .dumb_map_offset = ast_dumb_mmap_offset,
215 .dumb_destroy = drm_gem_dumb_destroy, 215 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 5320f8c57884..c017a9330a18 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -167,12 +167,9 @@ static int astfb_create_object(struct ast_fbdev *afbdev,
167 struct drm_gem_object **gobj_p) 167 struct drm_gem_object **gobj_p)
168{ 168{
169 struct drm_device *dev = afbdev->helper.dev; 169 struct drm_device *dev = afbdev->helper.dev;
170 u32 bpp, depth;
171 u32 size; 170 u32 size;
172 struct drm_gem_object *gobj; 171 struct drm_gem_object *gobj;
173
174 int ret = 0; 172 int ret = 0;
175 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
176 173
177 size = mode_cmd->pitches[0] * mode_cmd->height; 174 size = mode_cmd->pitches[0] * mode_cmd->height;
178 ret = ast_gem_create(dev, size, true, &gobj); 175 ret = ast_gem_create(dev, size, true, &gobj);
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 7bc3aa6dda8c..904beaa932d0 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -295,9 +295,8 @@ static int ast_get_dram_info(struct drm_device *dev)
295static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb) 295static void ast_user_framebuffer_destroy(struct drm_framebuffer *fb)
296{ 296{
297 struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb); 297 struct ast_framebuffer *ast_fb = to_ast_framebuffer(fb);
298 if (ast_fb->obj)
299 drm_gem_object_unreference_unlocked(ast_fb->obj);
300 298
299 drm_gem_object_unreference_unlocked(ast_fb->obj);
301 drm_framebuffer_cleanup(fb); 300 drm_framebuffer_cleanup(fb);
302 kfree(fb); 301 kfree(fb);
303} 302}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index c337922606e3..5957c3e659fe 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -624,19 +624,21 @@ static void ast_crtc_reset(struct drm_crtc *crtc)
624 624
625} 625}
626 626
627static void ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 627static int ast_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
628 u16 *blue, uint32_t start, uint32_t size) 628 u16 *blue, uint32_t size)
629{ 629{
630 struct ast_crtc *ast_crtc = to_ast_crtc(crtc); 630 struct ast_crtc *ast_crtc = to_ast_crtc(crtc);
631 int end = (start + size > 256) ? 256 : start + size, i; 631 int i;
632 632
633 /* userspace palettes are always correct as is */ 633 /* userspace palettes are always correct as is */
634 for (i = start; i < end; i++) { 634 for (i = 0; i < size; i++) {
635 ast_crtc->lut_r[i] = red[i] >> 8; 635 ast_crtc->lut_r[i] = red[i] >> 8;
636 ast_crtc->lut_g[i] = green[i] >> 8; 636 ast_crtc->lut_g[i] = green[i] >> 8;
637 ast_crtc->lut_b[i] = blue[i] >> 8; 637 ast_crtc->lut_b[i] = blue[i] >> 8;
638 } 638 }
639 ast_crtc_load_lut(crtc); 639 ast_crtc_load_lut(crtc);
640
641 return 0;
640} 642}
641 643
642 644
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c
index 59f2f93b6f84..b29a41218fc9 100644
--- a/drivers/gpu/drm/ast/ast_ttm.c
+++ b/drivers/gpu/drm/ast/ast_ttm.c
@@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *
186{ 186{
187} 187}
188 188
189static int ast_bo_move(struct ttm_buffer_object *bo,
190 bool evict, bool interruptible,
191 bool no_wait_gpu,
192 struct ttm_mem_reg *new_mem)
193{
194 int r;
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
196 return r;
197}
198
199
200static void ast_ttm_backend_destroy(struct ttm_tt *tt) 189static void ast_ttm_backend_destroy(struct ttm_tt *tt)
201{ 190{
202 ttm_tt_fini(tt); 191 ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = {
241 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, 230 .ttm_tt_unpopulate = ast_ttm_tt_unpopulate,
242 .init_mem_type = ast_bo_init_mem_type, 231 .init_mem_type = ast_bo_init_mem_type,
243 .evict_flags = ast_bo_evict_flags, 232 .evict_flags = ast_bo_evict_flags,
244 .move = ast_bo_move, 233 .move = NULL,
245 .verify_access = ast_bo_verify_access, 234 .verify_access = ast_bo_verify_access,
246 .io_mem_reserve = &ast_ttm_io_mem_reserve, 235 .io_mem_reserve = &ast_ttm_io_mem_reserve,
247 .io_mem_free = &ast_ttm_io_mem_free, 236 .io_mem_free = &ast_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/atmel-hlcdc/Kconfig b/drivers/gpu/drm/atmel-hlcdc/Kconfig
index 99b4f0698a30..32bcc4bad06a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/Kconfig
+++ b/drivers/gpu/drm/atmel-hlcdc/Kconfig
@@ -3,7 +3,6 @@ config DRM_ATMEL_HLCDC
3 depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM 3 depends on DRM && OF && COMMON_CLK && MFD_ATMEL_HLCDC && ARM
4 select DRM_GEM_CMA_HELPER 4 select DRM_GEM_CMA_HELPER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER
7 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
8 select DRM_PANEL 7 select DRM_PANEL
9 help 8 help
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index bd12231ab0cd..a978381ef95b 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -374,8 +374,8 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
374 374
375 spin_lock_irqsave(&dev->event_lock, flags); 375 spin_lock_irqsave(&dev->event_lock, flags);
376 if (crtc->event) { 376 if (crtc->event) {
377 drm_send_vblank_event(dev, crtc->id, crtc->event); 377 drm_crtc_send_vblank_event(&crtc->base, crtc->event);
378 drm_vblank_put(dev, crtc->id); 378 drm_crtc_vblank_put(&crtc->base);
379 crtc->event = NULL; 379 crtc->event = NULL;
380 } 380 }
381 spin_unlock_irqrestore(&dev->event_lock, flags); 381 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc)
383 383
384void atmel_hlcdc_crtc_irq(struct drm_crtc *c) 384void atmel_hlcdc_crtc_irq(struct drm_crtc *c)
385{ 385{
386 drm_handle_vblank(c->dev, 0); 386 drm_crtc_handle_vblank(c);
387 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); 387 atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c));
388} 388}
389 389
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 8ded7645747e..d4a3d61b7b06 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -519,7 +519,7 @@ static int atmel_hlcdc_dc_atomic_commit(struct drm_device *dev,
519 } 519 }
520 520
521 /* Swap the state, this is the point of no return. */ 521 /* Swap the state, this is the point of no return. */
522 drm_atomic_helper_swap_state(dev, state); 522 drm_atomic_helper_swap_state(state, true);
523 523
524 if (async) 524 if (async)
525 queue_work(dc->wq, &commit->work); 525 queue_work(dc->wq, &commit->work);
@@ -691,13 +691,6 @@ static void atmel_hlcdc_dc_unload(struct drm_device *dev)
691 destroy_workqueue(dc->wq); 691 destroy_workqueue(dc->wq);
692} 692}
693 693
694static void atmel_hlcdc_dc_connector_unplug_all(struct drm_device *dev)
695{
696 mutex_lock(&dev->mode_config.mutex);
697 drm_connector_unregister_all(dev);
698 mutex_unlock(&dev->mode_config.mutex);
699}
700
701static void atmel_hlcdc_dc_lastclose(struct drm_device *dev) 694static void atmel_hlcdc_dc_lastclose(struct drm_device *dev)
702{ 695{
703 struct atmel_hlcdc_dc *dc = dev->dev_private; 696 struct atmel_hlcdc_dc *dc = dev->dev_private;
@@ -776,7 +769,7 @@ static struct drm_driver atmel_hlcdc_dc_driver = {
776 .get_vblank_counter = drm_vblank_no_hw_counter, 769 .get_vblank_counter = drm_vblank_no_hw_counter,
777 .enable_vblank = atmel_hlcdc_dc_enable_vblank, 770 .enable_vblank = atmel_hlcdc_dc_enable_vblank,
778 .disable_vblank = atmel_hlcdc_dc_disable_vblank, 771 .disable_vblank = atmel_hlcdc_dc_disable_vblank,
779 .gem_free_object = drm_gem_cma_free_object, 772 .gem_free_object_unlocked = drm_gem_cma_free_object,
780 .gem_vm_ops = &drm_gem_cma_vm_ops, 773 .gem_vm_ops = &drm_gem_cma_vm_ops,
781 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 774 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
782 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 775 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -815,15 +808,8 @@ static int atmel_hlcdc_dc_drm_probe(struct platform_device *pdev)
815 if (ret) 808 if (ret)
816 goto err_unload; 809 goto err_unload;
817 810
818 ret = drm_connector_register_all(ddev);
819 if (ret)
820 goto err_unregister;
821
822 return 0; 811 return 0;
823 812
824err_unregister:
825 drm_dev_unregister(ddev);
826
827err_unload: 813err_unload:
828 atmel_hlcdc_dc_unload(ddev); 814 atmel_hlcdc_dc_unload(ddev);
829 815
@@ -837,7 +823,6 @@ static int atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
837{ 823{
838 struct drm_device *ddev = platform_get_drvdata(pdev); 824 struct drm_device *ddev = platform_get_drvdata(pdev);
839 825
840 atmel_hlcdc_dc_connector_unplug_all(ddev);
841 drm_dev_unregister(ddev); 826 drm_dev_unregister(ddev);
842 atmel_hlcdc_dc_unload(ddev); 827 atmel_hlcdc_dc_unload(ddev);
843 drm_dev_unref(ddev); 828 drm_dev_unref(ddev);
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 3d34fc4ca826..6119b5085501 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -113,21 +113,9 @@ static int atmel_hlcdc_rgb_mode_valid(struct drm_connector *connector,
113 return atmel_hlcdc_dc_mode_valid(rgb->dc, mode); 113 return atmel_hlcdc_dc_mode_valid(rgb->dc, mode);
114} 114}
115 115
116
117
118static struct drm_encoder *
119atmel_hlcdc_rgb_best_encoder(struct drm_connector *connector)
120{
121 struct atmel_hlcdc_rgb_output *rgb =
122 drm_connector_to_atmel_hlcdc_rgb_output(connector);
123
124 return &rgb->encoder;
125}
126
127static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = { 116static const struct drm_connector_helper_funcs atmel_hlcdc_panel_connector_helper_funcs = {
128 .get_modes = atmel_hlcdc_panel_get_modes, 117 .get_modes = atmel_hlcdc_panel_get_modes,
129 .mode_valid = atmel_hlcdc_rgb_mode_valid, 118 .mode_valid = atmel_hlcdc_rgb_mode_valid,
130 .best_encoder = atmel_hlcdc_rgb_best_encoder,
131}; 119};
132 120
133static enum drm_connector_status 121static enum drm_connector_status
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index 5f8b0c2b9a44..f739763f47ce 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,10 +2,6 @@ config DRM_BOCHS
2 tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" 2 tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
6 select FB_SYS_FILLRECT
7 select FB_SYS_COPYAREA
8 select FB_SYS_IMAGEBLIT
9 select DRM_TTM 5 select DRM_TTM
10 help 6 help
11 Choose this option for qemu. 7 Choose this option for qemu.
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b332b4d3b0e2..abace82de6ea 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -89,7 +89,7 @@ static struct drm_driver bochs_driver = {
89 .date = "20130925", 89 .date = "20130925",
90 .major = 1, 90 .major = 1,
91 .minor = 0, 91 .minor = 0,
92 .gem_free_object = bochs_gem_free_object, 92 .gem_free_object_unlocked = bochs_gem_free_object,
93 .dumb_create = bochs_dumb_create, 93 .dumb_create = bochs_dumb_create,
94 .dumb_map_offset = bochs_dumb_mmap_offset, 94 .dumb_map_offset = bochs_dumb_mmap_offset,
95 .dumb_destroy = drm_gem_dumb_destroy, 95 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 6cf912c45e48..5c5638a777a1 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev,
165{ 165{
166} 166}
167 167
168static int bochs_bo_move(struct ttm_buffer_object *bo,
169 bool evict, bool interruptible,
170 bool no_wait_gpu,
171 struct ttm_mem_reg *new_mem)
172{
173 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
174}
175
176
177static void bochs_ttm_backend_destroy(struct ttm_tt *tt) 168static void bochs_ttm_backend_destroy(struct ttm_tt *tt)
178{ 169{
179 ttm_tt_fini(tt); 170 ttm_tt_fini(tt);
@@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = {
208 .ttm_tt_unpopulate = ttm_pool_unpopulate, 199 .ttm_tt_unpopulate = ttm_pool_unpopulate,
209 .init_mem_type = bochs_bo_init_mem_type, 200 .init_mem_type = bochs_bo_init_mem_type,
210 .evict_flags = bochs_bo_evict_flags, 201 .evict_flags = bochs_bo_evict_flags,
211 .move = bochs_bo_move, 202 .move = NULL,
212 .verify_access = bochs_bo_verify_access, 203 .verify_access = bochs_bo_verify_access,
213 .io_mem_reserve = &bochs_ttm_io_mem_reserve, 204 .io_mem_reserve = &bochs_ttm_io_mem_reserve,
214 .io_mem_free = &bochs_ttm_io_mem_free, 205 .io_mem_free = &bochs_ttm_io_mem_free,
@@ -474,8 +465,8 @@ int bochs_dumb_mmap_offset(struct drm_file *file, struct drm_device *dev,
474static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb) 465static void bochs_user_framebuffer_destroy(struct drm_framebuffer *fb)
475{ 466{
476 struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb); 467 struct bochs_framebuffer *bochs_fb = to_bochs_framebuffer(fb);
477 if (bochs_fb->obj) 468
478 drm_gem_object_unreference_unlocked(bochs_fb->obj); 469 drm_gem_object_unreference_unlocked(bochs_fb->obj);
479 drm_framebuffer_cleanup(fb); 470 drm_framebuffer_cleanup(fb);
480 kfree(fb); 471 kfree(fb);
481} 472}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 8f7423f18da5..b590e678052d 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -50,6 +50,25 @@ config DRM_PARADE_PS8622
50 ---help--- 50 ---help---
51 Parade eDP-LVDS bridge chip driver. 51 Parade eDP-LVDS bridge chip driver.
52 52
53config DRM_SII902X
54 tristate "Silicon Image sii902x RGB/HDMI bridge"
55 depends on OF
56 select DRM_KMS_HELPER
57 select REGMAP_I2C
58 ---help---
59 Silicon Image sii902x bridge chip driver.
60
61config DRM_TOSHIBA_TC358767
62 tristate "Toshiba TC358767 eDP bridge"
63 depends on OF
64 select DRM_KMS_HELPER
65 select REGMAP_I2C
66 select DRM_PANEL
67 ---help---
68 Toshiba TC358767 eDP bridge chip driver.
69
53source "drivers/gpu/drm/bridge/analogix/Kconfig" 70source "drivers/gpu/drm/bridge/analogix/Kconfig"
54 71
72source "drivers/gpu/drm/bridge/adv7511/Kconfig"
73
55endmenu 74endmenu
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 96b13b30e6ab..efdb07e878f5 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -5,4 +5,7 @@ obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
5obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o 5obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
6obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o 6obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
7obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o 7obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
8obj-$(CONFIG_DRM_SII902X) += sii902x.o
9obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
8obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/ 10obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
11obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
new file mode 100644
index 000000000000..d2b0499ab7d7
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -0,0 +1,15 @@
1config DRM_I2C_ADV7511
2 tristate "AV7511 encoder"
3 depends on OF
4 select DRM_KMS_HELPER
5 select REGMAP_I2C
6 help
7 Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
8
9config DRM_I2C_ADV7533
10 bool "ADV7533 encoder"
11 depends on DRM_I2C_ADV7511
12 select DRM_MIPI_DSI
13 default y
14 help
15 Support for the Analog Devices ADV7533 DSI to HDMI encoder.
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
new file mode 100644
index 000000000000..9019327fff4c
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -0,0 +1,3 @@
1adv7511-y := adv7511_drv.o
2adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
3obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 38515b30cedf..161c923d6162 100644
--- a/drivers/gpu/drm/i2c/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -10,6 +10,11 @@
10#define __DRM_I2C_ADV7511_H__ 10#define __DRM_I2C_ADV7511_H__
11 11
12#include <linux/hdmi.h> 12#include <linux/hdmi.h>
13#include <linux/i2c.h>
14#include <linux/regmap.h>
15
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_mipi_dsi.h>
13 18
14#define ADV7511_REG_CHIP_REVISION 0x00 19#define ADV7511_REG_CHIP_REVISION 0x00
15#define ADV7511_REG_N0 0x01 20#define ADV7511_REG_N0 0x01
@@ -286,4 +291,102 @@ struct adv7511_video_config {
286 struct hdmi_avi_infoframe avi_infoframe; 291 struct hdmi_avi_infoframe avi_infoframe;
287}; 292};
288 293
294enum adv7511_type {
295 ADV7511,
296 ADV7533,
297};
298
299struct adv7511 {
300 struct i2c_client *i2c_main;
301 struct i2c_client *i2c_edid;
302 struct i2c_client *i2c_cec;
303
304 struct regmap *regmap;
305 struct regmap *regmap_cec;
306 enum drm_connector_status status;
307 bool powered;
308
309 struct drm_display_mode curr_mode;
310
311 unsigned int f_tmds;
312
313 unsigned int current_edid_segment;
314 uint8_t edid_buf[256];
315 bool edid_read;
316
317 wait_queue_head_t wq;
318 struct drm_bridge bridge;
319 struct drm_connector connector;
320
321 bool embedded_sync;
322 enum adv7511_sync_polarity vsync_polarity;
323 enum adv7511_sync_polarity hsync_polarity;
324 bool rgb;
325
326 struct edid *edid;
327
328 struct gpio_desc *gpio_pd;
329
330 /* ADV7533 DSI RX related params */
331 struct device_node *host_node;
332 struct mipi_dsi_device *dsi;
333 u8 num_dsi_lanes;
334 bool use_timing_gen;
335
336 enum adv7511_type type;
337};
338
339#ifdef CONFIG_DRM_I2C_ADV7533
340void adv7533_dsi_power_on(struct adv7511 *adv);
341void adv7533_dsi_power_off(struct adv7511 *adv);
342void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode);
343int adv7533_patch_registers(struct adv7511 *adv);
344void adv7533_uninit_cec(struct adv7511 *adv);
345int adv7533_init_cec(struct adv7511 *adv);
346int adv7533_attach_dsi(struct adv7511 *adv);
347void adv7533_detach_dsi(struct adv7511 *adv);
348int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
349#else
350static inline void adv7533_dsi_power_on(struct adv7511 *adv)
351{
352}
353
354static inline void adv7533_dsi_power_off(struct adv7511 *adv)
355{
356}
357
358static inline void adv7533_mode_set(struct adv7511 *adv,
359 struct drm_display_mode *mode)
360{
361}
362
363static inline int adv7533_patch_registers(struct adv7511 *adv)
364{
365 return -ENODEV;
366}
367
368static inline void adv7533_uninit_cec(struct adv7511 *adv)
369{
370}
371
372static inline int adv7533_init_cec(struct adv7511 *adv)
373{
374 return -ENODEV;
375}
376
377static inline int adv7533_attach_dsi(struct adv7511 *adv)
378{
379 return -ENODEV;
380}
381
382static inline void adv7533_detach_dsi(struct adv7511 *adv)
383{
384}
385
386static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
387{
388 return -ENODEV;
389}
390#endif
391
289#endif /* __DRM_I2C_ADV7511_H__ */ 392#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index a02112ba1c3d..ec8fb2ed3275 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -8,51 +8,17 @@
8 8
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/gpio/consumer.h> 10#include <linux/gpio/consumer.h>
11#include <linux/i2c.h>
12#include <linux/module.h> 11#include <linux/module.h>
13#include <linux/regmap.h> 12#include <linux/of_device.h>
14#include <linux/slab.h> 13#include <linux/slab.h>
15 14
16#include <drm/drmP.h> 15#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 16#include <drm/drm_atomic.h>
17#include <drm/drm_atomic_helper.h>
18#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
19#include <drm/drm_encoder_slave.h>
20 19
21#include "adv7511.h" 20#include "adv7511.h"
22 21
23struct adv7511 {
24 struct i2c_client *i2c_main;
25 struct i2c_client *i2c_edid;
26
27 struct regmap *regmap;
28 struct regmap *packet_memory_regmap;
29 enum drm_connector_status status;
30 bool powered;
31
32 unsigned int f_tmds;
33
34 unsigned int current_edid_segment;
35 uint8_t edid_buf[256];
36 bool edid_read;
37
38 wait_queue_head_t wq;
39 struct drm_encoder *encoder;
40
41 bool embedded_sync;
42 enum adv7511_sync_polarity vsync_polarity;
43 enum adv7511_sync_polarity hsync_polarity;
44 bool rgb;
45
46 struct edid *edid;
47
48 struct gpio_desc *gpio_pd;
49};
50
51static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
52{
53 return to_encoder_slave(encoder)->slave_priv;
54}
55
56/* ADI recommended values for proper operation. */ 22/* ADI recommended values for proper operation. */
57static const struct reg_sequence adv7511_fixed_registers[] = { 23static const struct reg_sequence adv7511_fixed_registers[] = {
58 { 0x98, 0x03 }, 24 { 0x98, 0x03 },
@@ -394,6 +360,9 @@ static void adv7511_power_on(struct adv7511 *adv7511)
394 */ 360 */
395 regcache_sync(adv7511->regmap); 361 regcache_sync(adv7511->regmap);
396 362
363 if (adv7511->type == ADV7533)
364 adv7533_dsi_power_on(adv7511);
365
397 adv7511->powered = true; 366 adv7511->powered = true;
398} 367}
399 368
@@ -405,6 +374,9 @@ static void adv7511_power_off(struct adv7511 *adv7511)
405 ADV7511_POWER_POWER_DOWN); 374 ADV7511_POWER_POWER_DOWN);
406 regcache_mark_dirty(adv7511->regmap); 375 regcache_mark_dirty(adv7511->regmap);
407 376
377 if (adv7511->type == ADV7533)
378 adv7533_dsi_power_off(adv7511);
379
408 adv7511->powered = false; 380 adv7511->powered = false;
409} 381}
410 382
@@ -430,7 +402,7 @@ static bool adv7511_hpd(struct adv7511 *adv7511)
430 return false; 402 return false;
431} 403}
432 404
433static int adv7511_irq_process(struct adv7511 *adv7511) 405static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
434{ 406{
435 unsigned int irq0, irq1; 407 unsigned int irq0, irq1;
436 int ret; 408 int ret;
@@ -446,8 +418,8 @@ static int adv7511_irq_process(struct adv7511 *adv7511)
446 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0); 418 regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
447 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1); 419 regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
448 420
449 if (irq0 & ADV7511_INT0_HPD && adv7511->encoder) 421 if (process_hpd && irq0 & ADV7511_INT0_HPD && adv7511->bridge.encoder)
450 drm_helper_hpd_irq_event(adv7511->encoder->dev); 422 drm_helper_hpd_irq_event(adv7511->connector.dev);
451 423
452 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) { 424 if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
453 adv7511->edid_read = true; 425 adv7511->edid_read = true;
@@ -464,7 +436,7 @@ static irqreturn_t adv7511_irq_handler(int irq, void *devid)
464 struct adv7511 *adv7511 = devid; 436 struct adv7511 *adv7511 = devid;
465 int ret; 437 int ret;
466 438
467 ret = adv7511_irq_process(adv7511); 439 ret = adv7511_irq_process(adv7511, true);
468 return ret < 0 ? IRQ_NONE : IRQ_HANDLED; 440 return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
469} 441}
470 442
@@ -481,7 +453,7 @@ static int adv7511_wait_for_edid(struct adv7511 *adv7511, int timeout)
481 adv7511->edid_read, msecs_to_jiffies(timeout)); 453 adv7511->edid_read, msecs_to_jiffies(timeout));
482 } else { 454 } else {
483 for (; timeout > 0; timeout -= 25) { 455 for (; timeout > 0; timeout -= 25) {
484 ret = adv7511_irq_process(adv7511); 456 ret = adv7511_irq_process(adv7511, false);
485 if (ret < 0) 457 if (ret < 0)
486 break; 458 break;
487 459
@@ -563,13 +535,12 @@ static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
563} 535}
564 536
565/* ----------------------------------------------------------------------------- 537/* -----------------------------------------------------------------------------
566 * Encoder operations 538 * ADV75xx helpers
567 */ 539 */
568 540
569static int adv7511_get_modes(struct drm_encoder *encoder, 541static int adv7511_get_modes(struct adv7511 *adv7511,
570 struct drm_connector *connector) 542 struct drm_connector *connector)
571{ 543{
572 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
573 struct edid *edid; 544 struct edid *edid;
574 unsigned int count; 545 unsigned int count;
575 546
@@ -606,21 +577,9 @@ static int adv7511_get_modes(struct drm_encoder *encoder,
606 return count; 577 return count;
607} 578}
608 579
609static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
610{
611 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
612
613 if (mode == DRM_MODE_DPMS_ON)
614 adv7511_power_on(adv7511);
615 else
616 adv7511_power_off(adv7511);
617}
618
619static enum drm_connector_status 580static enum drm_connector_status
620adv7511_encoder_detect(struct drm_encoder *encoder, 581adv7511_detect(struct adv7511 *adv7511, struct drm_connector *connector)
621 struct drm_connector *connector)
622{ 582{
623 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
624 enum drm_connector_status status; 583 enum drm_connector_status status;
625 unsigned int val; 584 unsigned int val;
626 bool hpd; 585 bool hpd;
@@ -644,7 +603,7 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
644 if (status == connector_status_connected && hpd && adv7511->powered) { 603 if (status == connector_status_connected && hpd && adv7511->powered) {
645 regcache_mark_dirty(adv7511->regmap); 604 regcache_mark_dirty(adv7511->regmap);
646 adv7511_power_on(adv7511); 605 adv7511_power_on(adv7511);
647 adv7511_get_modes(encoder, connector); 606 adv7511_get_modes(adv7511, connector);
648 if (adv7511->status == connector_status_connected) 607 if (adv7511->status == connector_status_connected)
649 status = connector_status_disconnected; 608 status = connector_status_disconnected;
650 } else { 609 } else {
@@ -658,8 +617,8 @@ adv7511_encoder_detect(struct drm_encoder *encoder,
658 return status; 617 return status;
659} 618}
660 619
661static int adv7511_encoder_mode_valid(struct drm_encoder *encoder, 620static int adv7511_mode_valid(struct adv7511 *adv7511,
662 struct drm_display_mode *mode) 621 struct drm_display_mode *mode)
663{ 622{
664 if (mode->clock > 165000) 623 if (mode->clock > 165000)
665 return MODE_CLOCK_HIGH; 624 return MODE_CLOCK_HIGH;
@@ -667,11 +626,10 @@ static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
667 return MODE_OK; 626 return MODE_OK;
668} 627}
669 628
670static void adv7511_encoder_mode_set(struct drm_encoder *encoder, 629static void adv7511_mode_set(struct adv7511 *adv7511,
671 struct drm_display_mode *mode, 630 struct drm_display_mode *mode,
672 struct drm_display_mode *adj_mode) 631 struct drm_display_mode *adj_mode)
673{ 632{
674 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
675 unsigned int low_refresh_rate; 633 unsigned int low_refresh_rate;
676 unsigned int hsync_polarity = 0; 634 unsigned int hsync_polarity = 0;
677 unsigned int vsync_polarity = 0; 635 unsigned int vsync_polarity = 0;
@@ -754,6 +712,11 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
754 regmap_update_bits(adv7511->regmap, 0x17, 712 regmap_update_bits(adv7511->regmap, 0x17,
755 0x60, (vsync_polarity << 6) | (hsync_polarity << 5)); 713 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
756 714
715 if (adv7511->type == ADV7533)
716 adv7533_mode_set(adv7511, adj_mode);
717
718 drm_mode_copy(&adv7511->curr_mode, adj_mode);
719
757 /* 720 /*
758 * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is 721 * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
759 * supposed to give better results. 722 * supposed to give better results.
@@ -762,12 +725,114 @@ static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
762 adv7511->f_tmds = mode->clock; 725 adv7511->f_tmds = mode->clock;
763} 726}
764 727
765static const struct drm_encoder_slave_funcs adv7511_encoder_funcs = { 728/* Connector funcs */
766 .dpms = adv7511_encoder_dpms, 729static struct adv7511 *connector_to_adv7511(struct drm_connector *connector)
767 .mode_valid = adv7511_encoder_mode_valid, 730{
768 .mode_set = adv7511_encoder_mode_set, 731 return container_of(connector, struct adv7511, connector);
769 .detect = adv7511_encoder_detect, 732}
770 .get_modes = adv7511_get_modes, 733
734static int adv7511_connector_get_modes(struct drm_connector *connector)
735{
736 struct adv7511 *adv = connector_to_adv7511(connector);
737
738 return adv7511_get_modes(adv, connector);
739}
740
741static enum drm_mode_status
742adv7511_connector_mode_valid(struct drm_connector *connector,
743 struct drm_display_mode *mode)
744{
745 struct adv7511 *adv = connector_to_adv7511(connector);
746
747 return adv7511_mode_valid(adv, mode);
748}
749
750static struct drm_connector_helper_funcs adv7511_connector_helper_funcs = {
751 .get_modes = adv7511_connector_get_modes,
752 .mode_valid = adv7511_connector_mode_valid,
753};
754
755static enum drm_connector_status
756adv7511_connector_detect(struct drm_connector *connector, bool force)
757{
758 struct adv7511 *adv = connector_to_adv7511(connector);
759
760 return adv7511_detect(adv, connector);
761}
762
763static struct drm_connector_funcs adv7511_connector_funcs = {
764 .dpms = drm_atomic_helper_connector_dpms,
765 .fill_modes = drm_helper_probe_single_connector_modes,
766 .detect = adv7511_connector_detect,
767 .destroy = drm_connector_cleanup,
768 .reset = drm_atomic_helper_connector_reset,
769 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
770 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
771};
772
773/* Bridge funcs */
774static struct adv7511 *bridge_to_adv7511(struct drm_bridge *bridge)
775{
776 return container_of(bridge, struct adv7511, bridge);
777}
778
779static void adv7511_bridge_enable(struct drm_bridge *bridge)
780{
781 struct adv7511 *adv = bridge_to_adv7511(bridge);
782
783 adv7511_power_on(adv);
784}
785
786static void adv7511_bridge_disable(struct drm_bridge *bridge)
787{
788 struct adv7511 *adv = bridge_to_adv7511(bridge);
789
790 adv7511_power_off(adv);
791}
792
793static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
794 struct drm_display_mode *mode,
795 struct drm_display_mode *adj_mode)
796{
797 struct adv7511 *adv = bridge_to_adv7511(bridge);
798
799 adv7511_mode_set(adv, mode, adj_mode);
800}
801
802static int adv7511_bridge_attach(struct drm_bridge *bridge)
803{
804 struct adv7511 *adv = bridge_to_adv7511(bridge);
805 int ret;
806
807 if (!bridge->encoder) {
808 DRM_ERROR("Parent encoder object not found");
809 return -ENODEV;
810 }
811
812 adv->connector.polled = DRM_CONNECTOR_POLL_HPD;
813
814 ret = drm_connector_init(bridge->dev, &adv->connector,
815 &adv7511_connector_funcs,
816 DRM_MODE_CONNECTOR_HDMIA);
817 if (ret) {
818 DRM_ERROR("Failed to initialize connector with drm\n");
819 return ret;
820 }
821 drm_connector_helper_add(&adv->connector,
822 &adv7511_connector_helper_funcs);
823 drm_mode_connector_attach_encoder(&adv->connector, bridge->encoder);
824
825 if (adv->type == ADV7533)
826 ret = adv7533_attach_dsi(adv);
827
828 return ret;
829}
830
831static struct drm_bridge_funcs adv7511_bridge_funcs = {
832 .enable = adv7511_bridge_enable,
833 .disable = adv7511_bridge_disable,
834 .mode_set = adv7511_bridge_mode_set,
835 .attach = adv7511_bridge_attach,
771}; 836};
772 837
773/* ----------------------------------------------------------------------------- 838/* -----------------------------------------------------------------------------
@@ -780,8 +845,6 @@ static int adv7511_parse_dt(struct device_node *np,
780 const char *str; 845 const char *str;
781 int ret; 846 int ret;
782 847
783 memset(config, 0, sizeof(*config));
784
785 of_property_read_u32(np, "adi,input-depth", &config->input_color_depth); 848 of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
786 if (config->input_color_depth != 8 && config->input_color_depth != 10 && 849 if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
787 config->input_color_depth != 12) 850 config->input_color_depth != 12)
@@ -881,7 +944,17 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
881 adv7511->powered = false; 944 adv7511->powered = false;
882 adv7511->status = connector_status_disconnected; 945 adv7511->status = connector_status_disconnected;
883 946
884 ret = adv7511_parse_dt(dev->of_node, &link_config); 947 if (dev->of_node)
948 adv7511->type = (enum adv7511_type)of_device_get_match_data(dev);
949 else
950 adv7511->type = id->driver_data;
951
952 memset(&link_config, 0, sizeof(link_config));
953
954 if (adv7511->type == ADV7511)
955 ret = adv7511_parse_dt(dev->of_node, &link_config);
956 else
957 ret = adv7533_parse_dt(dev->of_node, adv7511);
885 if (ret) 958 if (ret)
886 return ret; 959 return ret;
887 960
@@ -907,8 +980,12 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
907 return ret; 980 return ret;
908 dev_dbg(dev, "Rev. %d\n", val); 981 dev_dbg(dev, "Rev. %d\n", val);
909 982
910 ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers, 983 if (adv7511->type == ADV7511)
911 ARRAY_SIZE(adv7511_fixed_registers)); 984 ret = regmap_register_patch(adv7511->regmap,
985 adv7511_fixed_registers,
986 ARRAY_SIZE(adv7511_fixed_registers));
987 else
988 ret = adv7533_patch_registers(adv7511);
912 if (ret) 989 if (ret)
913 return ret; 990 return ret;
914 991
@@ -923,6 +1000,12 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
923 if (!adv7511->i2c_edid) 1000 if (!adv7511->i2c_edid)
924 return -ENOMEM; 1001 return -ENOMEM;
925 1002
1003 if (adv7511->type == ADV7533) {
1004 ret = adv7533_init_cec(adv7511);
1005 if (ret)
1006 goto err_i2c_unregister_edid;
1007 }
1008
926 if (i2c->irq) { 1009 if (i2c->irq) {
927 init_waitqueue_head(&adv7511->wq); 1010 init_waitqueue_head(&adv7511->wq);
928 1011
@@ -931,7 +1014,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
931 IRQF_ONESHOT, dev_name(dev), 1014 IRQF_ONESHOT, dev_name(dev),
932 adv7511); 1015 adv7511);
933 if (ret) 1016 if (ret)
934 goto err_i2c_unregister_device; 1017 goto err_unregister_cec;
935 } 1018 }
936 1019
937 /* CEC is unused for now */ 1020 /* CEC is unused for now */
@@ -942,11 +1025,23 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
942 1025
943 i2c_set_clientdata(i2c, adv7511); 1026 i2c_set_clientdata(i2c, adv7511);
944 1027
945 adv7511_set_link_config(adv7511, &link_config); 1028 if (adv7511->type == ADV7511)
1029 adv7511_set_link_config(adv7511, &link_config);
1030
1031 adv7511->bridge.funcs = &adv7511_bridge_funcs;
1032 adv7511->bridge.of_node = dev->of_node;
1033
1034 ret = drm_bridge_add(&adv7511->bridge);
1035 if (ret) {
1036 dev_err(dev, "failed to add adv7511 bridge\n");
1037 goto err_unregister_cec;
1038 }
946 1039
947 return 0; 1040 return 0;
948 1041
949err_i2c_unregister_device: 1042err_unregister_cec:
1043 adv7533_uninit_cec(adv7511);
1044err_i2c_unregister_edid:
950 i2c_unregister_device(adv7511->i2c_edid); 1045 i2c_unregister_device(adv7511->i2c_edid);
951 1046
952 return ret; 1047 return ret;
@@ -956,66 +1051,71 @@ static int adv7511_remove(struct i2c_client *i2c)
956{ 1051{
957 struct adv7511 *adv7511 = i2c_get_clientdata(i2c); 1052 struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
958 1053
959 i2c_unregister_device(adv7511->i2c_edid); 1054 if (adv7511->type == ADV7533) {
960 1055 adv7533_detach_dsi(adv7511);
961 kfree(adv7511->edid); 1056 adv7533_uninit_cec(adv7511);
962 1057 }
963 return 0;
964}
965
966static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
967 struct drm_encoder_slave *encoder)
968{
969 1058
970 struct adv7511 *adv7511 = i2c_get_clientdata(i2c); 1059 drm_bridge_remove(&adv7511->bridge);
971 1060
972 encoder->slave_priv = adv7511; 1061 i2c_unregister_device(adv7511->i2c_edid);
973 encoder->slave_funcs = &adv7511_encoder_funcs;
974 1062
975 adv7511->encoder = &encoder->base; 1063 kfree(adv7511->edid);
976 1064
977 return 0; 1065 return 0;
978} 1066}
979 1067
980static const struct i2c_device_id adv7511_i2c_ids[] = { 1068static const struct i2c_device_id adv7511_i2c_ids[] = {
981 { "adv7511", 0 }, 1069 { "adv7511", ADV7511 },
982 { "adv7511w", 0 }, 1070 { "adv7511w", ADV7511 },
983 { "adv7513", 0 }, 1071 { "adv7513", ADV7511 },
1072#ifdef CONFIG_DRM_I2C_ADV7533
1073 { "adv7533", ADV7533 },
1074#endif
984 { } 1075 { }
985}; 1076};
986MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids); 1077MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
987 1078
988static const struct of_device_id adv7511_of_ids[] = { 1079static const struct of_device_id adv7511_of_ids[] = {
989 { .compatible = "adi,adv7511", }, 1080 { .compatible = "adi,adv7511", .data = (void *)ADV7511 },
990 { .compatible = "adi,adv7511w", }, 1081 { .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
991 { .compatible = "adi,adv7513", }, 1082 { .compatible = "adi,adv7513", .data = (void *)ADV7511 },
1083#ifdef CONFIG_DRM_I2C_ADV7533
1084 { .compatible = "adi,adv7533", .data = (void *)ADV7533 },
1085#endif
992 { } 1086 { }
993}; 1087};
994MODULE_DEVICE_TABLE(of, adv7511_of_ids); 1088MODULE_DEVICE_TABLE(of, adv7511_of_ids);
995 1089
996static struct drm_i2c_encoder_driver adv7511_driver = { 1090static struct mipi_dsi_driver adv7533_dsi_driver = {
997 .i2c_driver = { 1091 .driver.name = "adv7533",
998 .driver = { 1092};
999 .name = "adv7511",
1000 .of_match_table = adv7511_of_ids,
1001 },
1002 .id_table = adv7511_i2c_ids,
1003 .probe = adv7511_probe,
1004 .remove = adv7511_remove,
1005 },
1006 1093
1007 .encoder_init = adv7511_encoder_init, 1094static struct i2c_driver adv7511_driver = {
1095 .driver = {
1096 .name = "adv7511",
1097 .of_match_table = adv7511_of_ids,
1098 },
1099 .id_table = adv7511_i2c_ids,
1100 .probe = adv7511_probe,
1101 .remove = adv7511_remove,
1008}; 1102};
1009 1103
1010static int __init adv7511_init(void) 1104static int __init adv7511_init(void)
1011{ 1105{
1012 return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver); 1106 if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
1107 mipi_dsi_driver_register(&adv7533_dsi_driver);
1108
1109 return i2c_add_driver(&adv7511_driver);
1013} 1110}
1014module_init(adv7511_init); 1111module_init(adv7511_init);
1015 1112
1016static void __exit adv7511_exit(void) 1113static void __exit adv7511_exit(void)
1017{ 1114{
1018 drm_i2c_encoder_unregister(&adv7511_driver); 1115 i2c_del_driver(&adv7511_driver);
1116
1117 if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
1118 mipi_dsi_driver_unregister(&adv7533_dsi_driver);
1019} 1119}
1020module_exit(adv7511_exit); 1120module_exit(adv7511_exit);
1021 1121
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7533.c b/drivers/gpu/drm/bridge/adv7511/adv7533.c
new file mode 100644
index 000000000000..5eebd15899b1
--- /dev/null
+++ b/drivers/gpu/drm/bridge/adv7511/adv7533.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/of_graph.h>
15
16#include "adv7511.h"
17
18static const struct reg_sequence adv7533_fixed_registers[] = {
19 { 0x16, 0x20 },
20 { 0x9a, 0xe0 },
21 { 0xba, 0x70 },
22 { 0xde, 0x82 },
23 { 0xe4, 0x40 },
24 { 0xe5, 0x80 },
25};
26
27static const struct reg_sequence adv7533_cec_fixed_registers[] = {
28 { 0x15, 0xd0 },
29 { 0x17, 0xd0 },
30 { 0x24, 0x20 },
31 { 0x57, 0x11 },
32};
33
34static const struct regmap_config adv7533_cec_regmap_config = {
35 .reg_bits = 8,
36 .val_bits = 8,
37
38 .max_register = 0xff,
39 .cache_type = REGCACHE_RBTREE,
40};
41
42static void adv7511_dsi_config_timing_gen(struct adv7511 *adv)
43{
44 struct mipi_dsi_device *dsi = adv->dsi;
45 struct drm_display_mode *mode = &adv->curr_mode;
46 unsigned int hsw, hfp, hbp, vsw, vfp, vbp;
47 u8 clock_div_by_lanes[] = { 6, 4, 3 }; /* 2, 3, 4 lanes */
48
49 hsw = mode->hsync_end - mode->hsync_start;
50 hfp = mode->hsync_start - mode->hdisplay;
51 hbp = mode->htotal - mode->hsync_end;
52 vsw = mode->vsync_end - mode->vsync_start;
53 vfp = mode->vsync_start - mode->vdisplay;
54 vbp = mode->vtotal - mode->vsync_end;
55
56 /* set pixel clock divider mode */
57 regmap_write(adv->regmap_cec, 0x16,
58 clock_div_by_lanes[dsi->lanes - 2] << 3);
59
60 /* horizontal porch params */
61 regmap_write(adv->regmap_cec, 0x28, mode->htotal >> 4);
62 regmap_write(adv->regmap_cec, 0x29, (mode->htotal << 4) & 0xff);
63 regmap_write(adv->regmap_cec, 0x2a, hsw >> 4);
64 regmap_write(adv->regmap_cec, 0x2b, (hsw << 4) & 0xff);
65 regmap_write(adv->regmap_cec, 0x2c, hfp >> 4);
66 regmap_write(adv->regmap_cec, 0x2d, (hfp << 4) & 0xff);
67 regmap_write(adv->regmap_cec, 0x2e, hbp >> 4);
68 regmap_write(adv->regmap_cec, 0x2f, (hbp << 4) & 0xff);
69
70 /* vertical porch params */
71 regmap_write(adv->regmap_cec, 0x30, mode->vtotal >> 4);
72 regmap_write(adv->regmap_cec, 0x31, (mode->vtotal << 4) & 0xff);
73 regmap_write(adv->regmap_cec, 0x32, vsw >> 4);
74 regmap_write(adv->regmap_cec, 0x33, (vsw << 4) & 0xff);
75 regmap_write(adv->regmap_cec, 0x34, vfp >> 4);
76 regmap_write(adv->regmap_cec, 0x35, (vfp << 4) & 0xff);
77 regmap_write(adv->regmap_cec, 0x36, vbp >> 4);
78 regmap_write(adv->regmap_cec, 0x37, (vbp << 4) & 0xff);
79}
80
81void adv7533_dsi_power_on(struct adv7511 *adv)
82{
83 struct mipi_dsi_device *dsi = adv->dsi;
84
85 if (adv->use_timing_gen)
86 adv7511_dsi_config_timing_gen(adv);
87
88 /* set number of dsi lanes */
89 regmap_write(adv->regmap_cec, 0x1c, dsi->lanes << 4);
90
91 if (adv->use_timing_gen) {
92 /* reset internal timing generator */
93 regmap_write(adv->regmap_cec, 0x27, 0xcb);
94 regmap_write(adv->regmap_cec, 0x27, 0x8b);
95 regmap_write(adv->regmap_cec, 0x27, 0xcb);
96 } else {
97 /* disable internal timing generator */
98 regmap_write(adv->regmap_cec, 0x27, 0x0b);
99 }
100
101 /* enable hdmi */
102 regmap_write(adv->regmap_cec, 0x03, 0x89);
103 /* disable test mode */
104 regmap_write(adv->regmap_cec, 0x55, 0x00);
105
106 regmap_register_patch(adv->regmap_cec, adv7533_cec_fixed_registers,
107 ARRAY_SIZE(adv7533_cec_fixed_registers));
108}
109
110void adv7533_dsi_power_off(struct adv7511 *adv)
111{
112 /* disable hdmi */
113 regmap_write(adv->regmap_cec, 0x03, 0x0b);
114 /* disable internal timing generator */
115 regmap_write(adv->regmap_cec, 0x27, 0x0b);
116}
117
118void adv7533_mode_set(struct adv7511 *adv, struct drm_display_mode *mode)
119{
120 struct mipi_dsi_device *dsi = adv->dsi;
121 int lanes, ret;
122
123 if (adv->num_dsi_lanes != 4)
124 return;
125
126 if (mode->clock > 80000)
127 lanes = 4;
128 else
129 lanes = 3;
130
131 if (lanes != dsi->lanes) {
132 mipi_dsi_detach(dsi);
133 dsi->lanes = lanes;
134 ret = mipi_dsi_attach(dsi);
135 if (ret)
136 dev_err(&dsi->dev, "failed to change host lanes\n");
137 }
138}
139
140int adv7533_patch_registers(struct adv7511 *adv)
141{
142 return regmap_register_patch(adv->regmap,
143 adv7533_fixed_registers,
144 ARRAY_SIZE(adv7533_fixed_registers));
145}
146
147void adv7533_uninit_cec(struct adv7511 *adv)
148{
149 i2c_unregister_device(adv->i2c_cec);
150}
151
152static const int cec_i2c_addr = 0x78;
153
154int adv7533_init_cec(struct adv7511 *adv)
155{
156 int ret;
157
158 adv->i2c_cec = i2c_new_dummy(adv->i2c_main->adapter, cec_i2c_addr >> 1);
159 if (!adv->i2c_cec)
160 return -ENOMEM;
161
162 adv->regmap_cec = devm_regmap_init_i2c(adv->i2c_cec,
163 &adv7533_cec_regmap_config);
164 if (IS_ERR(adv->regmap_cec)) {
165 ret = PTR_ERR(adv->regmap_cec);
166 goto err;
167 }
168
169 ret = regmap_register_patch(adv->regmap_cec,
170 adv7533_cec_fixed_registers,
171 ARRAY_SIZE(adv7533_cec_fixed_registers));
172 if (ret)
173 goto err;
174
175 return 0;
176err:
177 adv7533_uninit_cec(adv);
178 return ret;
179}
180
181int adv7533_attach_dsi(struct adv7511 *adv)
182{
183 struct device *dev = &adv->i2c_main->dev;
184 struct mipi_dsi_host *host;
185 struct mipi_dsi_device *dsi;
186 int ret = 0;
187 const struct mipi_dsi_device_info info = { .type = "adv7533",
188 .channel = 0,
189 .node = NULL,
190 };
191
192 host = of_find_mipi_dsi_host_by_node(adv->host_node);
193 if (!host) {
194 dev_err(dev, "failed to find dsi host\n");
195 return -EPROBE_DEFER;
196 }
197
198 dsi = mipi_dsi_device_register_full(host, &info);
199 if (IS_ERR(dsi)) {
200 dev_err(dev, "failed to create dsi device\n");
201 ret = PTR_ERR(dsi);
202 goto err_dsi_device;
203 }
204
205 adv->dsi = dsi;
206
207 dsi->lanes = adv->num_dsi_lanes;
208 dsi->format = MIPI_DSI_FMT_RGB888;
209 dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
210 MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
211
212 ret = mipi_dsi_attach(dsi);
213 if (ret < 0) {
214 dev_err(dev, "failed to attach dsi to host\n");
215 goto err_dsi_attach;
216 }
217
218 return 0;
219
220err_dsi_attach:
221 mipi_dsi_device_unregister(dsi);
222err_dsi_device:
223 return ret;
224}
225
226void adv7533_detach_dsi(struct adv7511 *adv)
227{
228 mipi_dsi_detach(adv->dsi);
229 mipi_dsi_device_unregister(adv->dsi);
230}
231
232int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
233{
234 u32 num_lanes;
235 struct device_node *endpoint;
236
237 of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
238
239 if (num_lanes < 1 || num_lanes > 4)
240 return -EINVAL;
241
242 adv->num_dsi_lanes = num_lanes;
243
244 endpoint = of_graph_get_next_endpoint(np, NULL);
245 if (!endpoint)
246 return -ENODEV;
247
248 adv->host_node = of_graph_get_remote_port_parent(endpoint);
249 if (!adv->host_node) {
250 of_node_put(endpoint);
251 return -ENODEV;
252 }
253
254 of_node_put(endpoint);
255 of_node_put(adv->host_node);
256
257 adv->use_timing_gen = !of_property_read_bool(np,
258 "adi,disable-timing-generator");
259
260 /* TODO: Check if these need to be parsed by DT or not */
261 adv->rgb = true;
262 adv->embedded_sync = false;
263
264 return 0;
265}
diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c
index d087b054c360..f9f03bcba0af 100644
--- a/drivers/gpu/drm/bridge/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c
@@ -986,16 +986,8 @@ unlock:
986 return num_modes; 986 return num_modes;
987} 987}
988 988
989static struct drm_encoder *anx78xx_best_encoder(struct drm_connector *connector)
990{
991 struct anx78xx *anx78xx = connector_to_anx78xx(connector);
992
993 return anx78xx->bridge.encoder;
994}
995
996static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = { 989static const struct drm_connector_helper_funcs anx78xx_connector_helper_funcs = {
997 .get_modes = anx78xx_get_modes, 990 .get_modes = anx78xx_get_modes,
998 .best_encoder = anx78xx_best_encoder,
999}; 991};
1000 992
1001static enum drm_connector_status anx78xx_detect(struct drm_connector *connector, 993static enum drm_connector_status anx78xx_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 7699597070a1..32715daf73cb 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -938,7 +938,7 @@ int analogix_dp_get_modes(struct drm_connector *connector)
938 num_modes += drm_panel_get_modes(dp->plat_data->panel); 938 num_modes += drm_panel_get_modes(dp->plat_data->panel);
939 939
940 if (dp->plat_data->get_modes) 940 if (dp->plat_data->get_modes)
941 num_modes += dp->plat_data->get_modes(dp->plat_data); 941 num_modes += dp->plat_data->get_modes(dp->plat_data, connector);
942 942
943 return num_modes; 943 return num_modes;
944} 944}
@@ -1208,6 +1208,7 @@ static int analogix_dp_dt_parse_pdata(struct analogix_dp_device *dp)
1208 1208
1209 switch (dp->plat_data->dev_type) { 1209 switch (dp->plat_data->dev_type) {
1210 case RK3288_DP: 1210 case RK3288_DP:
1211 case RK3399_EDP:
1211 /* 1212 /*
1212 * Like Rk3288 DisplayPort TRM indicate that "Main link 1213 * Like Rk3288 DisplayPort TRM indicate that "Main link
1213 * containing 4 physical lanes of 2.7/1.62 Gbps/lane". 1214 * containing 4 physical lanes of 2.7/1.62 Gbps/lane".
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
index f09275d40f70..b45638043ec4 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.h
@@ -127,10 +127,10 @@ enum analog_power_block {
127}; 127};
128 128
129enum dp_irq_type { 129enum dp_irq_type {
130 DP_IRQ_TYPE_HP_CABLE_IN, 130 DP_IRQ_TYPE_HP_CABLE_IN = BIT(0),
131 DP_IRQ_TYPE_HP_CABLE_OUT, 131 DP_IRQ_TYPE_HP_CABLE_OUT = BIT(1),
132 DP_IRQ_TYPE_HP_CHANGE, 132 DP_IRQ_TYPE_HP_CHANGE = BIT(2),
133 DP_IRQ_TYPE_UNKNOWN, 133 DP_IRQ_TYPE_UNKNOWN = BIT(3),
134}; 134};
135 135
136struct video_info { 136struct video_info {
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 49205ef02be3..48030f0cf497 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -74,8 +74,12 @@ void analogix_dp_init_analog_param(struct analogix_dp_device *dp)
74 reg = SEL_24M | TX_DVDD_BIT_1_0625V; 74 reg = SEL_24M | TX_DVDD_BIT_1_0625V;
75 writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2); 75 writel(reg, dp->reg_base + ANALOGIX_DP_ANALOG_CTL_2);
76 76
77 if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) { 77 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type)) {
78 writel(REF_CLK_24M, dp->reg_base + ANALOGIX_DP_PLL_REG_1); 78 reg = REF_CLK_24M;
79 if (dp->plat_data->dev_type == RK3288_DP)
80 reg ^= REF_CLK_MASK;
81
82 writel(reg, dp->reg_base + ANALOGIX_DP_PLL_REG_1);
79 writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2); 83 writel(0x95, dp->reg_base + ANALOGIX_DP_PLL_REG_2);
80 writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3); 84 writel(0x40, dp->reg_base + ANALOGIX_DP_PLL_REG_3);
81 writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4); 85 writel(0x58, dp->reg_base + ANALOGIX_DP_PLL_REG_4);
@@ -244,7 +248,7 @@ void analogix_dp_set_analog_power_down(struct analogix_dp_device *dp,
244 u32 reg; 248 u32 reg;
245 u32 phy_pd_addr = ANALOGIX_DP_PHY_PD; 249 u32 phy_pd_addr = ANALOGIX_DP_PHY_PD;
246 250
247 if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) 251 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
248 phy_pd_addr = ANALOGIX_DP_PD; 252 phy_pd_addr = ANALOGIX_DP_PD;
249 253
250 switch (block) { 254 switch (block) {
@@ -448,7 +452,7 @@ void analogix_dp_init_aux(struct analogix_dp_device *dp)
448 analogix_dp_reset_aux(dp); 452 analogix_dp_reset_aux(dp);
449 453
450 /* Disable AUX transaction H/W retry */ 454 /* Disable AUX transaction H/W retry */
451 if (dp->plat_data && (dp->plat_data->dev_type == RK3288_DP)) 455 if (dp->plat_data && is_rockchip(dp->plat_data->dev_type))
452 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) | 456 reg = AUX_BIT_PERIOD_EXPECTED_DELAY(0) |
453 AUX_HW_RETRY_COUNT_SEL(3) | 457 AUX_HW_RETRY_COUNT_SEL(3) |
454 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS; 458 AUX_HW_RETRY_INTERVAL_600_MICROSECONDS;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
index 337912b0aeab..cdcc6c5add5e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.h
@@ -163,8 +163,9 @@
163#define HSYNC_POLARITY_CFG (0x1 << 0) 163#define HSYNC_POLARITY_CFG (0x1 << 0)
164 164
165/* ANALOGIX_DP_PLL_REG_1 */ 165/* ANALOGIX_DP_PLL_REG_1 */
166#define REF_CLK_24M (0x1 << 1) 166#define REF_CLK_24M (0x1 << 0)
167#define REF_CLK_27M (0x0 << 1) 167#define REF_CLK_27M (0x0 << 0)
168#define REF_CLK_MASK (0x1 << 0)
168 169
169/* ANALOGIX_DP_LANE_MAP */ 170/* ANALOGIX_DP_LANE_MAP */
170#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6) 171#define LANE3_MAP_LOGIC_LANE_0 (0x0 << 6)
diff --git a/drivers/gpu/drm/bridge/dw-hdmi.c b/drivers/gpu/drm/bridge/dw-hdmi.c
index c9d941283d30..77ab47341658 100644
--- a/drivers/gpu/drm/bridge/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/dw-hdmi.c
@@ -1476,15 +1476,6 @@ dw_hdmi_connector_mode_valid(struct drm_connector *connector,
1476 return mode_status; 1476 return mode_status;
1477} 1477}
1478 1478
1479static struct drm_encoder *dw_hdmi_connector_best_encoder(struct drm_connector
1480 *connector)
1481{
1482 struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi,
1483 connector);
1484
1485 return hdmi->encoder;
1486}
1487
1488static void dw_hdmi_connector_destroy(struct drm_connector *connector) 1479static void dw_hdmi_connector_destroy(struct drm_connector *connector)
1489{ 1480{
1490 drm_connector_unregister(connector); 1481 drm_connector_unregister(connector);
@@ -1504,14 +1495,6 @@ static void dw_hdmi_connector_force(struct drm_connector *connector)
1504} 1495}
1505 1496
1506static const struct drm_connector_funcs dw_hdmi_connector_funcs = { 1497static const struct drm_connector_funcs dw_hdmi_connector_funcs = {
1507 .dpms = drm_helper_connector_dpms,
1508 .fill_modes = drm_helper_probe_single_connector_modes,
1509 .detect = dw_hdmi_connector_detect,
1510 .destroy = dw_hdmi_connector_destroy,
1511 .force = dw_hdmi_connector_force,
1512};
1513
1514static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
1515 .dpms = drm_atomic_helper_connector_dpms, 1498 .dpms = drm_atomic_helper_connector_dpms,
1516 .fill_modes = drm_helper_probe_single_connector_modes, 1499 .fill_modes = drm_helper_probe_single_connector_modes,
1517 .detect = dw_hdmi_connector_detect, 1500 .detect = dw_hdmi_connector_detect,
@@ -1525,7 +1508,7 @@ static const struct drm_connector_funcs dw_hdmi_atomic_connector_funcs = {
1525static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { 1508static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = {
1526 .get_modes = dw_hdmi_connector_get_modes, 1509 .get_modes = dw_hdmi_connector_get_modes,
1527 .mode_valid = dw_hdmi_connector_mode_valid, 1510 .mode_valid = dw_hdmi_connector_mode_valid,
1528 .best_encoder = dw_hdmi_connector_best_encoder, 1511 .best_encoder = drm_atomic_helper_best_encoder,
1529}; 1512};
1530 1513
1531static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = { 1514static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
@@ -1643,14 +1626,9 @@ static int dw_hdmi_register(struct drm_device *drm, struct dw_hdmi *hdmi)
1643 drm_connector_helper_add(&hdmi->connector, 1626 drm_connector_helper_add(&hdmi->connector,
1644 &dw_hdmi_connector_helper_funcs); 1627 &dw_hdmi_connector_helper_funcs);
1645 1628
1646 if (drm_core_check_feature(drm, DRIVER_ATOMIC)) 1629 drm_connector_init(drm, &hdmi->connector,
1647 drm_connector_init(drm, &hdmi->connector, 1630 &dw_hdmi_connector_funcs,
1648 &dw_hdmi_atomic_connector_funcs, 1631 DRM_MODE_CONNECTOR_HDMIA);
1649 DRM_MODE_CONNECTOR_HDMIA);
1650 else
1651 drm_connector_init(drm, &hdmi->connector,
1652 &dw_hdmi_connector_funcs,
1653 DRM_MODE_CONNECTOR_HDMIA);
1654 1632
1655 drm_mode_connector_attach_encoder(&hdmi->connector, encoder); 1633 drm_mode_connector_attach_encoder(&hdmi->connector, encoder);
1656 1634
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 7ecd59f70b8e..93f3dacf9e27 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -235,16 +235,8 @@ out:
235 return num_modes; 235 return num_modes;
236} 236}
237 237
238static struct drm_encoder *ptn3460_best_encoder(struct drm_connector *connector)
239{
240 struct ptn3460_bridge *ptn_bridge = connector_to_ptn3460(connector);
241
242 return ptn_bridge->bridge.encoder;
243}
244
245static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = { 238static const struct drm_connector_helper_funcs ptn3460_connector_helper_funcs = {
246 .get_modes = ptn3460_get_modes, 239 .get_modes = ptn3460_get_modes,
247 .best_encoder = ptn3460_best_encoder,
248}; 240};
249 241
250static enum drm_connector_status ptn3460_detect(struct drm_connector *connector, 242static enum drm_connector_status ptn3460_detect(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index be881e9fef8f..583b8ce614e3 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -474,18 +474,8 @@ static int ps8622_get_modes(struct drm_connector *connector)
474 return drm_panel_get_modes(ps8622->panel); 474 return drm_panel_get_modes(ps8622->panel);
475} 475}
476 476
477static struct drm_encoder *ps8622_best_encoder(struct drm_connector *connector)
478{
479 struct ps8622_bridge *ps8622;
480
481 ps8622 = connector_to_ps8622(connector);
482
483 return ps8622->bridge.encoder;
484}
485
486static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = { 477static const struct drm_connector_helper_funcs ps8622_connector_helper_funcs = {
487 .get_modes = ps8622_get_modes, 478 .get_modes = ps8622_get_modes,
488 .best_encoder = ps8622_best_encoder,
489}; 479};
490 480
491static enum drm_connector_status ps8622_detect(struct drm_connector *connector, 481static enum drm_connector_status ps8622_detect(struct drm_connector *connector,
@@ -646,9 +636,7 @@ static int ps8622_remove(struct i2c_client *client)
646{ 636{
647 struct ps8622_bridge *ps8622 = i2c_get_clientdata(client); 637 struct ps8622_bridge *ps8622 = i2c_get_clientdata(client);
648 638
649 if (ps8622->bl) 639 backlight_device_unregister(ps8622->bl);
650 backlight_device_unregister(ps8622->bl);
651
652 drm_bridge_remove(&ps8622->bridge); 640 drm_bridge_remove(&ps8622->bridge);
653 641
654 return 0; 642 return 0;
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
new file mode 100644
index 000000000000..9126d0306ab5
--- /dev/null
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -0,0 +1,467 @@
1/*
2 * Copyright (C) 2016 Atmel
3 * Bo Shen <voice.shen@atmel.com>
4 *
5 * Authors: Bo Shen <voice.shen@atmel.com>
6 * Boris Brezillon <boris.brezillon@free-electrons.com>
7 * Wu, Songjun <Songjun.Wu@atmel.com>
8 *
9 *
10 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. All Rights Reserved.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 */
22
23#include <linux/gpio/consumer.h>
24#include <linux/i2c.h>
25#include <linux/module.h>
26#include <linux/regmap.h>
27
28#include <drm/drmP.h>
29#include <drm/drm_atomic_helper.h>
30#include <drm/drm_crtc_helper.h>
31#include <drm/drm_edid.h>
32
33#define SII902X_TPI_VIDEO_DATA 0x0
34
35#define SII902X_TPI_PIXEL_REPETITION 0x8
36#define SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT BIT(5)
37#define SII902X_TPI_AVI_PIXEL_REP_RISING_EDGE BIT(4)
38#define SII902X_TPI_AVI_PIXEL_REP_4X 3
39#define SII902X_TPI_AVI_PIXEL_REP_2X 1
40#define SII902X_TPI_AVI_PIXEL_REP_NONE 0
41#define SII902X_TPI_CLK_RATIO_HALF (0 << 6)
42#define SII902X_TPI_CLK_RATIO_1X (1 << 6)
43#define SII902X_TPI_CLK_RATIO_2X (2 << 6)
44#define SII902X_TPI_CLK_RATIO_4X (3 << 6)
45
46#define SII902X_TPI_AVI_IN_FORMAT 0x9
47#define SII902X_TPI_AVI_INPUT_BITMODE_12BIT BIT(7)
48#define SII902X_TPI_AVI_INPUT_DITHER BIT(6)
49#define SII902X_TPI_AVI_INPUT_RANGE_LIMITED (2 << 2)
50#define SII902X_TPI_AVI_INPUT_RANGE_FULL (1 << 2)
51#define SII902X_TPI_AVI_INPUT_RANGE_AUTO (0 << 2)
52#define SII902X_TPI_AVI_INPUT_COLORSPACE_BLACK (3 << 0)
53#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV422 (2 << 0)
54#define SII902X_TPI_AVI_INPUT_COLORSPACE_YUV444 (1 << 0)
55#define SII902X_TPI_AVI_INPUT_COLORSPACE_RGB (0 << 0)
56
57#define SII902X_TPI_AVI_INFOFRAME 0x0c
58
59#define SII902X_SYS_CTRL_DATA 0x1a
60#define SII902X_SYS_CTRL_PWR_DWN BIT(4)
61#define SII902X_SYS_CTRL_AV_MUTE BIT(3)
62#define SII902X_SYS_CTRL_DDC_BUS_REQ BIT(2)
63#define SII902X_SYS_CTRL_DDC_BUS_GRTD BIT(1)
64#define SII902X_SYS_CTRL_OUTPUT_MODE BIT(0)
65#define SII902X_SYS_CTRL_OUTPUT_HDMI 1
66#define SII902X_SYS_CTRL_OUTPUT_DVI 0
67
68#define SII902X_REG_CHIPID(n) (0x1b + (n))
69
70#define SII902X_PWR_STATE_CTRL 0x1e
71#define SII902X_AVI_POWER_STATE_MSK GENMASK(1, 0)
72#define SII902X_AVI_POWER_STATE_D(l) ((l) & SII902X_AVI_POWER_STATE_MSK)
73
74#define SII902X_INT_ENABLE 0x3c
75#define SII902X_INT_STATUS 0x3d
76#define SII902X_HOTPLUG_EVENT BIT(0)
77#define SII902X_PLUGGED_STATUS BIT(2)
78
79#define SII902X_REG_TPI_RQB 0xc7
80
81#define SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS 500
82
83struct sii902x {
84 struct i2c_client *i2c;
85 struct regmap *regmap;
86 struct drm_bridge bridge;
87 struct drm_connector connector;
88 struct gpio_desc *reset_gpio;
89};
90
91static inline struct sii902x *bridge_to_sii902x(struct drm_bridge *bridge)
92{
93 return container_of(bridge, struct sii902x, bridge);
94}
95
96static inline struct sii902x *connector_to_sii902x(struct drm_connector *con)
97{
98 return container_of(con, struct sii902x, connector);
99}
100
101static void sii902x_reset(struct sii902x *sii902x)
102{
103 if (!sii902x->reset_gpio)
104 return;
105
106 gpiod_set_value(sii902x->reset_gpio, 1);
107
108 /* The datasheet says treset-min = 100us. Make it 150us to be sure. */
109 usleep_range(150, 200);
110
111 gpiod_set_value(sii902x->reset_gpio, 0);
112}
113
114static enum drm_connector_status
115sii902x_connector_detect(struct drm_connector *connector, bool force)
116{
117 struct sii902x *sii902x = connector_to_sii902x(connector);
118 unsigned int status;
119
120 regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
121
122 return (status & SII902X_PLUGGED_STATUS) ?
123 connector_status_connected : connector_status_disconnected;
124}
125
126static const struct drm_connector_funcs sii902x_connector_funcs = {
127 .dpms = drm_atomic_helper_connector_dpms,
128 .detect = sii902x_connector_detect,
129 .fill_modes = drm_helper_probe_single_connector_modes,
130 .destroy = drm_connector_cleanup,
131 .reset = drm_atomic_helper_connector_reset,
132 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
133 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
134};
135
136static int sii902x_get_modes(struct drm_connector *connector)
137{
138 struct sii902x *sii902x = connector_to_sii902x(connector);
139 struct regmap *regmap = sii902x->regmap;
140 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
141 unsigned long timeout;
142 unsigned int status;
143 struct edid *edid;
144 int num = 0;
145 int ret;
146
147 ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
148 SII902X_SYS_CTRL_DDC_BUS_REQ,
149 SII902X_SYS_CTRL_DDC_BUS_REQ);
150 if (ret)
151 return ret;
152
153 timeout = jiffies +
154 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
155 do {
156 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
157 if (ret)
158 return ret;
159 } while (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
160 time_before(jiffies, timeout));
161
162 if (!(status & SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
163 dev_err(&sii902x->i2c->dev, "failed to acquire the i2c bus");
164 return -ETIMEDOUT;
165 }
166
167 ret = regmap_write(regmap, SII902X_SYS_CTRL_DATA, status);
168 if (ret)
169 return ret;
170
171 edid = drm_get_edid(connector, sii902x->i2c->adapter);
172 drm_mode_connector_update_edid_property(connector, edid);
173 if (edid) {
174 num = drm_add_edid_modes(connector, edid);
175 kfree(edid);
176 }
177
178 ret = drm_display_info_set_bus_formats(&connector->display_info,
179 &bus_format, 1);
180 if (ret)
181 return ret;
182
183 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
184 if (ret)
185 return ret;
186
187 ret = regmap_update_bits(regmap, SII902X_SYS_CTRL_DATA,
188 SII902X_SYS_CTRL_DDC_BUS_REQ |
189 SII902X_SYS_CTRL_DDC_BUS_GRTD, 0);
190 if (ret)
191 return ret;
192
193 timeout = jiffies +
194 msecs_to_jiffies(SII902X_I2C_BUS_ACQUISITION_TIMEOUT_MS);
195 do {
196 ret = regmap_read(regmap, SII902X_SYS_CTRL_DATA, &status);
197 if (ret)
198 return ret;
199 } while (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
200 SII902X_SYS_CTRL_DDC_BUS_GRTD) &&
201 time_before(jiffies, timeout));
202
203 if (status & (SII902X_SYS_CTRL_DDC_BUS_REQ |
204 SII902X_SYS_CTRL_DDC_BUS_GRTD)) {
205 dev_err(&sii902x->i2c->dev, "failed to release the i2c bus");
206 return -ETIMEDOUT;
207 }
208
209 return num;
210}
211
212static enum drm_mode_status sii902x_mode_valid(struct drm_connector *connector,
213 struct drm_display_mode *mode)
214{
215 /* TODO: check mode */
216
217 return MODE_OK;
218}
219
220static const struct drm_connector_helper_funcs sii902x_connector_helper_funcs = {
221 .get_modes = sii902x_get_modes,
222 .mode_valid = sii902x_mode_valid,
223};
224
225static void sii902x_bridge_disable(struct drm_bridge *bridge)
226{
227 struct sii902x *sii902x = bridge_to_sii902x(bridge);
228
229 regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
230 SII902X_SYS_CTRL_PWR_DWN,
231 SII902X_SYS_CTRL_PWR_DWN);
232}
233
234static void sii902x_bridge_enable(struct drm_bridge *bridge)
235{
236 struct sii902x *sii902x = bridge_to_sii902x(bridge);
237
238 regmap_update_bits(sii902x->regmap, SII902X_PWR_STATE_CTRL,
239 SII902X_AVI_POWER_STATE_MSK,
240 SII902X_AVI_POWER_STATE_D(0));
241 regmap_update_bits(sii902x->regmap, SII902X_SYS_CTRL_DATA,
242 SII902X_SYS_CTRL_PWR_DWN, 0);
243}
244
245static void sii902x_bridge_mode_set(struct drm_bridge *bridge,
246 struct drm_display_mode *mode,
247 struct drm_display_mode *adj)
248{
249 struct sii902x *sii902x = bridge_to_sii902x(bridge);
250 struct regmap *regmap = sii902x->regmap;
251 u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
252 struct hdmi_avi_infoframe frame;
253 int ret;
254
255 buf[0] = adj->clock;
256 buf[1] = adj->clock >> 8;
257 buf[2] = adj->vrefresh;
258 buf[3] = 0x00;
259 buf[4] = adj->hdisplay;
260 buf[5] = adj->hdisplay >> 8;
261 buf[6] = adj->vdisplay;
262 buf[7] = adj->vdisplay >> 8;
263 buf[8] = SII902X_TPI_CLK_RATIO_1X | SII902X_TPI_AVI_PIXEL_REP_NONE |
264 SII902X_TPI_AVI_PIXEL_REP_BUS_24BIT;
265 buf[9] = SII902X_TPI_AVI_INPUT_RANGE_AUTO |
266 SII902X_TPI_AVI_INPUT_COLORSPACE_RGB;
267
268 ret = regmap_bulk_write(regmap, SII902X_TPI_VIDEO_DATA, buf, 10);
269 if (ret)
270 return;
271
272 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, adj);
273 if (ret < 0) {
274 DRM_ERROR("couldn't fill AVI infoframe\n");
275 return;
276 }
277
278 ret = hdmi_avi_infoframe_pack(&frame, buf, sizeof(buf));
279 if (ret < 0) {
280 DRM_ERROR("failed to pack AVI infoframe: %d\n", ret);
281 return;
282 }
283
284 /* Do not send the infoframe header, but keep the CRC field. */
285 regmap_bulk_write(regmap, SII902X_TPI_AVI_INFOFRAME,
286 buf + HDMI_INFOFRAME_HEADER_SIZE - 1,
287 HDMI_AVI_INFOFRAME_SIZE + 1);
288}
289
290static int sii902x_bridge_attach(struct drm_bridge *bridge)
291{
292 struct sii902x *sii902x = bridge_to_sii902x(bridge);
293 struct drm_device *drm = bridge->dev;
294 int ret;
295
296 drm_connector_helper_add(&sii902x->connector,
297 &sii902x_connector_helper_funcs);
298
299 if (!drm_core_check_feature(drm, DRIVER_ATOMIC)) {
300 dev_err(&sii902x->i2c->dev,
301 "sii902x driver is only compatible with DRM devices supporting atomic updates");
302 return -ENOTSUPP;
303 }
304
305 ret = drm_connector_init(drm, &sii902x->connector,
306 &sii902x_connector_funcs,
307 DRM_MODE_CONNECTOR_HDMIA);
308 if (ret)
309 return ret;
310
311 if (sii902x->i2c->irq > 0)
312 sii902x->connector.polled = DRM_CONNECTOR_POLL_HPD;
313 else
314 sii902x->connector.polled = DRM_CONNECTOR_POLL_CONNECT;
315
316 drm_mode_connector_attach_encoder(&sii902x->connector, bridge->encoder);
317
318 return 0;
319}
320
321static const struct drm_bridge_funcs sii902x_bridge_funcs = {
322 .attach = sii902x_bridge_attach,
323 .mode_set = sii902x_bridge_mode_set,
324 .disable = sii902x_bridge_disable,
325 .enable = sii902x_bridge_enable,
326};
327
328static const struct regmap_range sii902x_volatile_ranges[] = {
329 { .range_min = 0, .range_max = 0xff },
330};
331
332static const struct regmap_access_table sii902x_volatile_table = {
333 .yes_ranges = sii902x_volatile_ranges,
334 .n_yes_ranges = ARRAY_SIZE(sii902x_volatile_ranges),
335};
336
337static const struct regmap_config sii902x_regmap_config = {
338 .reg_bits = 8,
339 .val_bits = 8,
340 .volatile_table = &sii902x_volatile_table,
341 .cache_type = REGCACHE_NONE,
342};
343
344static irqreturn_t sii902x_interrupt(int irq, void *data)
345{
346 struct sii902x *sii902x = data;
347 unsigned int status = 0;
348
349 regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
350 regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
351
352 if ((status & SII902X_HOTPLUG_EVENT) && sii902x->bridge.dev)
353 drm_helper_hpd_irq_event(sii902x->bridge.dev);
354
355 return IRQ_HANDLED;
356}
357
358static int sii902x_probe(struct i2c_client *client,
359 const struct i2c_device_id *id)
360{
361 struct device *dev = &client->dev;
362 unsigned int status = 0;
363 struct sii902x *sii902x;
364 u8 chipid[4];
365 int ret;
366
367 sii902x = devm_kzalloc(dev, sizeof(*sii902x), GFP_KERNEL);
368 if (!sii902x)
369 return -ENOMEM;
370
371 sii902x->i2c = client;
372 sii902x->regmap = devm_regmap_init_i2c(client, &sii902x_regmap_config);
373 if (IS_ERR(sii902x->regmap))
374 return PTR_ERR(sii902x->regmap);
375
376 sii902x->reset_gpio = devm_gpiod_get_optional(dev, "reset",
377 GPIOD_OUT_LOW);
378 if (IS_ERR(sii902x->reset_gpio)) {
379 dev_err(dev, "Failed to retrieve/request reset gpio: %ld\n",
380 PTR_ERR(sii902x->reset_gpio));
381 return PTR_ERR(sii902x->reset_gpio);
382 }
383
384 sii902x_reset(sii902x);
385
386 ret = regmap_write(sii902x->regmap, SII902X_REG_TPI_RQB, 0x0);
387 if (ret)
388 return ret;
389
390 ret = regmap_bulk_read(sii902x->regmap, SII902X_REG_CHIPID(0),
391 &chipid, 4);
392 if (ret) {
393 dev_err(dev, "regmap_read failed %d\n", ret);
394 return ret;
395 }
396
397 if (chipid[0] != 0xb0) {
398 dev_err(dev, "Invalid chipid: %02x (expecting 0xb0)\n",
399 chipid[0]);
400 return -EINVAL;
401 }
402
403 /* Clear all pending interrupts */
404 regmap_read(sii902x->regmap, SII902X_INT_STATUS, &status);
405 regmap_write(sii902x->regmap, SII902X_INT_STATUS, status);
406
407 if (client->irq > 0) {
408 regmap_write(sii902x->regmap, SII902X_INT_ENABLE,
409 SII902X_HOTPLUG_EVENT);
410
411 ret = devm_request_threaded_irq(dev, client->irq, NULL,
412 sii902x_interrupt,
413 IRQF_ONESHOT, dev_name(dev),
414 sii902x);
415 if (ret)
416 return ret;
417 }
418
419 sii902x->bridge.funcs = &sii902x_bridge_funcs;
420 sii902x->bridge.of_node = dev->of_node;
421 ret = drm_bridge_add(&sii902x->bridge);
422 if (ret) {
423 dev_err(dev, "Failed to add drm_bridge\n");
424 return ret;
425 }
426
427 i2c_set_clientdata(client, sii902x);
428
429 return 0;
430}
431
432static int sii902x_remove(struct i2c_client *client)
433
434{
435 struct sii902x *sii902x = i2c_get_clientdata(client);
436
437 drm_bridge_remove(&sii902x->bridge);
438
439 return 0;
440}
441
442static const struct of_device_id sii902x_dt_ids[] = {
443 { .compatible = "sil,sii9022", },
444 { }
445};
446MODULE_DEVICE_TABLE(of, sii902x_dt_ids);
447
448static const struct i2c_device_id sii902x_i2c_ids[] = {
449 { "sii9022", 0 },
450 { },
451};
452MODULE_DEVICE_TABLE(i2c, sii902x_i2c_ids);
453
454static struct i2c_driver sii902x_driver = {
455 .probe = sii902x_probe,
456 .remove = sii902x_remove,
457 .driver = {
458 .name = "sii902x",
459 .of_match_table = sii902x_dt_ids,
460 },
461 .id_table = sii902x_i2c_ids,
462};
463module_i2c_driver(sii902x_driver);
464
465MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
466MODULE_DESCRIPTION("SII902x RGB -> HDMI bridges");
467MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
new file mode 100644
index 000000000000..a09825d8c94a
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -0,0 +1,1413 @@
1/*
2 * tc358767 eDP bridge driver
3 *
4 * Copyright (C) 2016 CogentEmbedded Inc
5 * Author: Andrey Gusakov <andrey.gusakov@cogentembedded.com>
6 *
7 * Copyright (C) 2016 Pengutronix, Philipp Zabel <p.zabel@pengutronix.de>
8 *
9 * Initially based on: drivers/gpu/drm/i2c/tda998x_drv.c
10 *
11 * Copyright (C) 2012 Texas Instruments
12 * Author: Rob Clark <robdclark@gmail.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 */
24
25#include <linux/clk.h>
26#include <linux/device.h>
27#include <linux/gpio/consumer.h>
28#include <linux/i2c.h>
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/regmap.h>
32#include <linux/slab.h>
33
34#include <drm/drm_atomic_helper.h>
35#include <drm/drm_crtc_helper.h>
36#include <drm/drm_dp_helper.h>
37#include <drm/drm_edid.h>
38#include <drm/drm_of.h>
39#include <drm/drm_panel.h>
40
41/* Registers */
42
43/* Display Parallel Interface */
44#define DPIPXLFMT 0x0440
45#define VS_POL_ACTIVE_LOW (1 << 10)
46#define HS_POL_ACTIVE_LOW (1 << 9)
47#define DE_POL_ACTIVE_HIGH (0 << 8)
48#define SUB_CFG_TYPE_CONFIG1 (0 << 2) /* LSB aligned */
49#define SUB_CFG_TYPE_CONFIG2 (1 << 2) /* Loosely Packed */
50#define SUB_CFG_TYPE_CONFIG3 (2 << 2) /* LSB aligned 8-bit */
51#define DPI_BPP_RGB888 (0 << 0)
52#define DPI_BPP_RGB666 (1 << 0)
53#define DPI_BPP_RGB565 (2 << 0)
54
55/* Video Path */
56#define VPCTRL0 0x0450
57#define OPXLFMT_RGB666 (0 << 8)
58#define OPXLFMT_RGB888 (1 << 8)
59#define FRMSYNC_DISABLED (0 << 4) /* Video Timing Gen Disabled */
60#define FRMSYNC_ENABLED (1 << 4) /* Video Timing Gen Enabled */
61#define MSF_DISABLED (0 << 0) /* Magic Square FRC disabled */
62#define MSF_ENABLED (1 << 0) /* Magic Square FRC enabled */
63#define HTIM01 0x0454
64#define HTIM02 0x0458
65#define VTIM01 0x045c
66#define VTIM02 0x0460
67#define VFUEN0 0x0464
68#define VFUEN BIT(0) /* Video Frame Timing Upload */
69
70/* System */
71#define TC_IDREG 0x0500
72#define SYSCTRL 0x0510
73#define DP0_AUDSRC_NO_INPUT (0 << 3)
74#define DP0_AUDSRC_I2S_RX (1 << 3)
75#define DP0_VIDSRC_NO_INPUT (0 << 0)
76#define DP0_VIDSRC_DSI_RX (1 << 0)
77#define DP0_VIDSRC_DPI_RX (2 << 0)
78#define DP0_VIDSRC_COLOR_BAR (3 << 0)
79
80/* Control */
81#define DP0CTL 0x0600
82#define VID_MN_GEN BIT(6) /* Auto-generate M/N values */
83#define EF_EN BIT(5) /* Enable Enhanced Framing */
84#define VID_EN BIT(1) /* Video transmission enable */
85#define DP_EN BIT(0) /* Enable DPTX function */
86
87/* Clocks */
88#define DP0_VIDMNGEN0 0x0610
89#define DP0_VIDMNGEN1 0x0614
90#define DP0_VMNGENSTATUS 0x0618
91
92/* Main Channel */
93#define DP0_SECSAMPLE 0x0640
94#define DP0_VIDSYNCDELAY 0x0644
95#define DP0_TOTALVAL 0x0648
96#define DP0_STARTVAL 0x064c
97#define DP0_ACTIVEVAL 0x0650
98#define DP0_SYNCVAL 0x0654
99#define DP0_MISC 0x0658
100#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
101#define BPC_6 (0 << 5)
102#define BPC_8 (1 << 5)
103
104/* AUX channel */
105#define DP0_AUXCFG0 0x0660
106#define DP0_AUXCFG1 0x0664
107#define AUX_RX_FILTER_EN BIT(16)
108
109#define DP0_AUXADDR 0x0668
110#define DP0_AUXWDATA(i) (0x066c + (i) * 4)
111#define DP0_AUXRDATA(i) (0x067c + (i) * 4)
112#define DP0_AUXSTATUS 0x068c
113#define AUX_STATUS_MASK 0xf0
114#define AUX_STATUS_SHIFT 4
115#define AUX_TIMEOUT BIT(1)
116#define AUX_BUSY BIT(0)
117#define DP0_AUXI2CADR 0x0698
118
119/* Link Training */
120#define DP0_SRCCTRL 0x06a0
121#define DP0_SRCCTRL_SCRMBLDIS BIT(13)
122#define DP0_SRCCTRL_EN810B BIT(12)
123#define DP0_SRCCTRL_NOTP (0 << 8)
124#define DP0_SRCCTRL_TP1 (1 << 8)
125#define DP0_SRCCTRL_TP2 (2 << 8)
126#define DP0_SRCCTRL_LANESKEW BIT(7)
127#define DP0_SRCCTRL_SSCG BIT(3)
128#define DP0_SRCCTRL_LANES_1 (0 << 2)
129#define DP0_SRCCTRL_LANES_2 (1 << 2)
130#define DP0_SRCCTRL_BW27 (1 << 1)
131#define DP0_SRCCTRL_BW162 (0 << 1)
132#define DP0_SRCCTRL_AUTOCORRECT BIT(0)
133#define DP0_LTSTAT 0x06d0
134#define LT_LOOPDONE BIT(13)
135#define LT_STATUS_MASK (0x1f << 8)
136#define LT_CHANNEL1_EQ_BITS (DP_CHANNEL_EQ_BITS << 4)
137#define LT_INTERLANE_ALIGN_DONE BIT(3)
138#define LT_CHANNEL0_EQ_BITS (DP_CHANNEL_EQ_BITS)
139#define DP0_SNKLTCHGREQ 0x06d4
140#define DP0_LTLOOPCTRL 0x06d8
141#define DP0_SNKLTCTRL 0x06e4
142
143/* PHY */
144#define DP_PHY_CTRL 0x0800
145#define DP_PHY_RST BIT(28) /* DP PHY Global Soft Reset */
146#define BGREN BIT(25) /* AUX PHY BGR Enable */
147#define PWR_SW_EN BIT(24) /* PHY Power Switch Enable */
148#define PHY_M1_RST BIT(12) /* Reset PHY1 Main Channel */
149#define PHY_RDY BIT(16) /* PHY Main Channels Ready */
150#define PHY_M0_RST BIT(8) /* Reset PHY0 Main Channel */
151#define PHY_A0_EN BIT(1) /* PHY Aux Channel0 Enable */
152#define PHY_M0_EN BIT(0) /* PHY Main Channel0 Enable */
153
154/* PLL */
155#define DP0_PLLCTRL 0x0900
156#define DP1_PLLCTRL 0x0904 /* not defined in DS */
157#define PXL_PLLCTRL 0x0908
158#define PLLUPDATE BIT(2)
159#define PLLBYP BIT(1)
160#define PLLEN BIT(0)
161#define PXL_PLLPARAM 0x0914
162#define IN_SEL_REFCLK (0 << 14)
163#define SYS_PLLPARAM 0x0918
164#define REF_FREQ_38M4 (0 << 8) /* 38.4 MHz */
165#define REF_FREQ_19M2 (1 << 8) /* 19.2 MHz */
166#define REF_FREQ_26M (2 << 8) /* 26 MHz */
167#define REF_FREQ_13M (3 << 8) /* 13 MHz */
168#define SYSCLK_SEL_LSCLK (0 << 4)
169#define LSCLK_DIV_1 (0 << 0)
170#define LSCLK_DIV_2 (1 << 0)
171
172/* Test & Debug */
173#define TSTCTL 0x0a00
174#define PLL_DBG 0x0a04
175
176static bool tc_test_pattern;
177module_param_named(test, tc_test_pattern, bool, 0644);
178
179struct tc_edp_link {
180 struct drm_dp_link base;
181 u8 assr;
182 int scrambler_dis;
183 int spread;
184 int coding8b10b;
185 u8 swing;
186 u8 preemp;
187};
188
189struct tc_data {
190 struct device *dev;
191 struct regmap *regmap;
192 struct drm_dp_aux aux;
193
194 struct drm_bridge bridge;
195 struct drm_connector connector;
196 struct drm_panel *panel;
197
198 /* link settings */
199 struct tc_edp_link link;
200
201 /* display edid */
202 struct edid *edid;
203 /* current mode */
204 struct drm_display_mode *mode;
205
206 u32 rev;
207 u8 assr;
208
209 struct gpio_desc *sd_gpio;
210 struct gpio_desc *reset_gpio;
211 struct clk *refclk;
212};
213
214static inline struct tc_data *aux_to_tc(struct drm_dp_aux *a)
215{
216 return container_of(a, struct tc_data, aux);
217}
218
219static inline struct tc_data *bridge_to_tc(struct drm_bridge *b)
220{
221 return container_of(b, struct tc_data, bridge);
222}
223
224static inline struct tc_data *connector_to_tc(struct drm_connector *c)
225{
226 return container_of(c, struct tc_data, connector);
227}
228
229/* Simple macros to avoid repeated error checks */
230#define tc_write(reg, var) \
231 do { \
232 ret = regmap_write(tc->regmap, reg, var); \
233 if (ret) \
234 goto err; \
235 } while (0)
236#define tc_read(reg, var) \
237 do { \
238 ret = regmap_read(tc->regmap, reg, var); \
239 if (ret) \
240 goto err; \
241 } while (0)
242
243static inline int tc_poll_timeout(struct regmap *map, unsigned int addr,
244 unsigned int cond_mask,
245 unsigned int cond_value,
246 unsigned long sleep_us, u64 timeout_us)
247{
248 ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
249 unsigned int val;
250 int ret;
251
252 for (;;) {
253 ret = regmap_read(map, addr, &val);
254 if (ret)
255 break;
256 if ((val & cond_mask) == cond_value)
257 break;
258 if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) {
259 ret = regmap_read(map, addr, &val);
260 break;
261 }
262 if (sleep_us)
263 usleep_range((sleep_us >> 2) + 1, sleep_us);
264 }
265 return ret ?: (((val & cond_mask) == cond_value) ? 0 : -ETIMEDOUT);
266}
267
268static int tc_aux_wait_busy(struct tc_data *tc, unsigned int timeout_ms)
269{
270 return tc_poll_timeout(tc->regmap, DP0_AUXSTATUS, AUX_BUSY, 0,
271 1000, 1000 * timeout_ms);
272}
273
274static int tc_aux_get_status(struct tc_data *tc, u8 *reply)
275{
276 int ret;
277 u32 value;
278
279 ret = regmap_read(tc->regmap, DP0_AUXSTATUS, &value);
280 if (ret < 0)
281 return ret;
282 if (value & AUX_BUSY) {
283 if (value & AUX_TIMEOUT) {
284 dev_err(tc->dev, "i2c access timeout!\n");
285 return -ETIMEDOUT;
286 }
287 return -EBUSY;
288 }
289
290 *reply = (value & AUX_STATUS_MASK) >> AUX_STATUS_SHIFT;
291 return 0;
292}
293
294static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
295 struct drm_dp_aux_msg *msg)
296{
297 struct tc_data *tc = aux_to_tc(aux);
298 size_t size = min_t(size_t, 8, msg->size);
299 u8 request = msg->request & ~DP_AUX_I2C_MOT;
300 u8 *buf = msg->buffer;
301 u32 tmp = 0;
302 int i = 0;
303 int ret;
304
305 if (size == 0)
306 return 0;
307
308 ret = tc_aux_wait_busy(tc, 100);
309 if (ret)
310 goto err;
311
312 if (request == DP_AUX_I2C_WRITE || request == DP_AUX_NATIVE_WRITE) {
313 /* Store data */
314 while (i < size) {
315 if (request == DP_AUX_NATIVE_WRITE)
316 tmp = tmp | (buf[i] << (8 * (i & 0x3)));
317 else
318 tmp = (tmp << 8) | buf[i];
319 i++;
320 if (((i % 4) == 0) || (i == size)) {
321 tc_write(DP0_AUXWDATA(i >> 2), tmp);
322 tmp = 0;
323 }
324 }
325 } else if (request != DP_AUX_I2C_READ &&
326 request != DP_AUX_NATIVE_READ) {
327 return -EINVAL;
328 }
329
330 /* Store address */
331 tc_write(DP0_AUXADDR, msg->address);
332 /* Start transfer */
333 tc_write(DP0_AUXCFG0, ((size - 1) << 8) | request);
334
335 ret = tc_aux_wait_busy(tc, 100);
336 if (ret)
337 goto err;
338
339 ret = tc_aux_get_status(tc, &msg->reply);
340 if (ret)
341 goto err;
342
343 if (request == DP_AUX_I2C_READ || request == DP_AUX_NATIVE_READ) {
344 /* Read data */
345 while (i < size) {
346 if ((i % 4) == 0)
347 tc_read(DP0_AUXRDATA(i >> 2), &tmp);
348 buf[i] = tmp & 0xff;
349 tmp = tmp >> 8;
350 i++;
351 }
352 }
353
354 return size;
355err:
356 return ret;
357}
358
359static const char * const training_pattern1_errors[] = {
360 "No errors",
361 "Aux write error",
362 "Aux read error",
363 "Max voltage reached error",
364 "Loop counter expired error",
365 "res", "res", "res"
366};
367
368static const char * const training_pattern2_errors[] = {
369 "No errors",
370 "Aux write error",
371 "Aux read error",
372 "Clock recovery failed error",
373 "Loop counter expired error",
374 "res", "res", "res"
375};
376
377static u32 tc_srcctrl(struct tc_data *tc)
378{
379 /*
380 * No training pattern, skew lane 1 data by two LSCLK cycles with
381 * respect to lane 0 data, AutoCorrect Mode = 0
382 */
383 u32 reg = DP0_SRCCTRL_NOTP | DP0_SRCCTRL_LANESKEW;
384
385 if (tc->link.scrambler_dis)
386 reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */
387 if (tc->link.coding8b10b)
388 /* Enable 8/10B Encoder (TxData[19:16] not used) */
389 reg |= DP0_SRCCTRL_EN810B;
390 if (tc->link.spread)
391 reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */
392 if (tc->link.base.num_lanes == 2)
393 reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */
394 if (tc->link.base.rate != 162000)
395 reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */
396 return reg;
397}
398
399static void tc_wait_pll_lock(struct tc_data *tc)
400{
401 /* Wait for PLL to lock: up to 2.09 ms, depending on refclk */
402 usleep_range(3000, 6000);
403}
404
405static int tc_pxl_pll_en(struct tc_data *tc, u32 refclk, u32 pixelclock)
406{
407 int ret;
408 int i_pre, best_pre = 1;
409 int i_post, best_post = 1;
410 int div, best_div = 1;
411 int mul, best_mul = 1;
412 int delta, best_delta;
413 int ext_div[] = {1, 2, 3, 5, 7};
414 int best_pixelclock = 0;
415 int vco_hi = 0;
416
417 dev_dbg(tc->dev, "PLL: requested %d pixelclock, ref %d\n", pixelclock,
418 refclk);
419 best_delta = pixelclock;
420 /* Loop over all possible ext_divs, skipping invalid configurations */
421 for (i_pre = 0; i_pre < ARRAY_SIZE(ext_div); i_pre++) {
422 /*
423 * refclk / ext_pre_div should be in the 1 to 200 MHz range.
424 * We don't allow any refclk > 200 MHz, only check lower bounds.
425 */
426 if (refclk / ext_div[i_pre] < 1000000)
427 continue;
428 for (i_post = 0; i_post < ARRAY_SIZE(ext_div); i_post++) {
429 for (div = 1; div <= 16; div++) {
430 u32 clk;
431 u64 tmp;
432
433 tmp = pixelclock * ext_div[i_pre] *
434 ext_div[i_post] * div;
435 do_div(tmp, refclk);
436 mul = tmp;
437
438 /* Check limits */
439 if ((mul < 1) || (mul > 128))
440 continue;
441
442 clk = (refclk / ext_div[i_pre] / div) * mul;
443 /*
444 * refclk * mul / (ext_pre_div * pre_div)
445 * should be in the 150 to 650 MHz range
446 */
447 if ((clk > 650000000) || (clk < 150000000))
448 continue;
449
450 clk = clk / ext_div[i_post];
451 delta = clk - pixelclock;
452
453 if (abs(delta) < abs(best_delta)) {
454 best_pre = i_pre;
455 best_post = i_post;
456 best_div = div;
457 best_mul = mul;
458 best_delta = delta;
459 best_pixelclock = clk;
460 }
461 }
462 }
463 }
464 if (best_pixelclock == 0) {
465 dev_err(tc->dev, "Failed to calc clock for %d pixelclock\n",
466 pixelclock);
467 return -EINVAL;
468 }
469
470 dev_dbg(tc->dev, "PLL: got %d, delta %d\n", best_pixelclock,
471 best_delta);
472 dev_dbg(tc->dev, "PLL: %d / %d / %d * %d / %d\n", refclk,
473 ext_div[best_pre], best_div, best_mul, ext_div[best_post]);
474
475 /* if VCO >= 300 MHz */
476 if (refclk / ext_div[best_pre] / best_div * best_mul >= 300000000)
477 vco_hi = 1;
478 /* see DS */
479 if (best_div == 16)
480 best_div = 0;
481 if (best_mul == 128)
482 best_mul = 0;
483
484 /* Power up PLL and switch to bypass */
485 tc_write(PXL_PLLCTRL, PLLBYP | PLLEN);
486
487 tc_write(PXL_PLLPARAM,
488 (vco_hi << 24) | /* For PLL VCO >= 300 MHz = 1 */
489 (ext_div[best_pre] << 20) | /* External Pre-divider */
490 (ext_div[best_post] << 16) | /* External Post-divider */
491 IN_SEL_REFCLK | /* Use RefClk as PLL input */
492 (best_div << 8) | /* Divider for PLL RefClk */
493 (best_mul << 0)); /* Multiplier for PLL */
494
495 /* Force PLL parameter update and disable bypass */
496 tc_write(PXL_PLLCTRL, PLLUPDATE | PLLEN);
497
498 tc_wait_pll_lock(tc);
499
500 return 0;
501err:
502 return ret;
503}
504
505static int tc_pxl_pll_dis(struct tc_data *tc)
506{
507 /* Enable PLL bypass, power down PLL */
508 return regmap_write(tc->regmap, PXL_PLLCTRL, PLLBYP);
509}
510
511static int tc_stream_clock_calc(struct tc_data *tc)
512{
513 int ret;
514 /*
515 * If the Stream clock and Link Symbol clock are
516 * asynchronous with each other, the value of M changes over
517 * time. This way of generating link clock and stream
518 * clock is called Asynchronous Clock mode. The value M
519 * must change while the value N stays constant. The
520 * value of N in this Asynchronous Clock mode must be set
521 * to 2^15 or 32,768.
522 *
523 * LSCLK = 1/10 of high speed link clock
524 *
525 * f_STRMCLK = M/N * f_LSCLK
526 * M/N = f_STRMCLK / f_LSCLK
527 *
528 */
529 tc_write(DP0_VIDMNGEN1, 32768);
530
531 return 0;
532err:
533 return ret;
534}
535
536static int tc_aux_link_setup(struct tc_data *tc)
537{
538 unsigned long rate;
539 u32 value;
540 int ret;
541
542 rate = clk_get_rate(tc->refclk);
543 switch (rate) {
544 case 38400000:
545 value = REF_FREQ_38M4;
546 break;
547 case 26000000:
548 value = REF_FREQ_26M;
549 break;
550 case 19200000:
551 value = REF_FREQ_19M2;
552 break;
553 case 13000000:
554 value = REF_FREQ_13M;
555 break;
556 default:
557 dev_err(tc->dev, "Invalid refclk rate: %lu Hz\n", rate);
558 return -EINVAL;
559 }
560
561 /* Setup DP-PHY / PLL */
562 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
563 tc_write(SYS_PLLPARAM, value);
564
565 tc_write(DP_PHY_CTRL, BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN);
566
567 /*
568 * Initially PLLs are in bypass. Force PLL parameter update,
569 * disable PLL bypass, enable PLL
570 */
571 tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
572 tc_wait_pll_lock(tc);
573
574 tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
575 tc_wait_pll_lock(tc);
576
577 ret = tc_poll_timeout(tc->regmap, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1,
578 1000);
579 if (ret == -ETIMEDOUT) {
580 dev_err(tc->dev, "Timeout waiting for PHY to become ready");
581 return ret;
582 } else if (ret)
583 goto err;
584
585 /* Setup AUX link */
586 tc_write(DP0_AUXCFG1, AUX_RX_FILTER_EN |
587 (0x06 << 8) | /* Aux Bit Period Calculator Threshold */
588 (0x3f << 0)); /* Aux Response Timeout Timer */
589
590 return 0;
591err:
592 dev_err(tc->dev, "tc_aux_link_setup failed: %d\n", ret);
593 return ret;
594}
595
596static int tc_get_display_props(struct tc_data *tc)
597{
598 int ret;
599 /* temp buffer */
600 u8 tmp[8];
601
602 /* Read DP Rx Link Capability */
603 ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
604 if (ret < 0)
605 goto err_dpcd_read;
606 if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
607 goto err_dpcd_inval;
608
609 ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
610 if (ret < 0)
611 goto err_dpcd_read;
612 tc->link.spread = tmp[0] & BIT(0); /* 0.5% down spread */
613
614 ret = drm_dp_dpcd_readb(&tc->aux, DP_MAIN_LINK_CHANNEL_CODING, tmp);
615 if (ret < 0)
616 goto err_dpcd_read;
617 tc->link.coding8b10b = tmp[0] & BIT(0);
618 tc->link.scrambler_dis = 0;
619 /* read assr */
620 ret = drm_dp_dpcd_readb(&tc->aux, DP_EDP_CONFIGURATION_SET, tmp);
621 if (ret < 0)
622 goto err_dpcd_read;
623 tc->link.assr = tmp[0] & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
624
625 dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n",
626 tc->link.base.revision >> 4, tc->link.base.revision & 0x0f,
627 (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps",
628 tc->link.base.num_lanes,
629 (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ?
630 "enhanced" : "non-enhanced");
631 dev_dbg(tc->dev, "ANSI 8B/10B: %d\n", tc->link.coding8b10b);
632 dev_dbg(tc->dev, "Display ASSR: %d, TC358767 ASSR: %d\n",
633 tc->link.assr, tc->assr);
634
635 return 0;
636
637err_dpcd_read:
638 dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
639 return ret;
640err_dpcd_inval:
641 dev_err(tc->dev, "invalid DPCD\n");
642 return -EINVAL;
643}
644
645static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
646{
647 int ret;
648 int vid_sync_dly;
649 int max_tu_symbol;
650
651 int left_margin = mode->htotal - mode->hsync_end;
652 int right_margin = mode->hsync_start - mode->hdisplay;
653 int hsync_len = mode->hsync_end - mode->hsync_start;
654 int upper_margin = mode->vtotal - mode->vsync_end;
655 int lower_margin = mode->vsync_start - mode->vdisplay;
656 int vsync_len = mode->vsync_end - mode->vsync_start;
657
658 dev_dbg(tc->dev, "set mode %dx%d\n",
659 mode->hdisplay, mode->vdisplay);
660 dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
661 left_margin, right_margin, hsync_len);
662 dev_dbg(tc->dev, "V margin %d,%d sync %d\n",
663 upper_margin, lower_margin, vsync_len);
664 dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
665
666
667 /* LCD Ctl Frame Size */
668 tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
669 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
670 tc_write(HTIM01, (left_margin << 16) | /* H back porch */
671 (hsync_len << 0)); /* Hsync */
672 tc_write(HTIM02, (right_margin << 16) | /* H front porch */
673 (mode->hdisplay << 0)); /* width */
674 tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
675 (vsync_len << 0)); /* Vsync */
676 tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
677 (mode->vdisplay << 0)); /* height */
678 tc_write(VFUEN0, VFUEN); /* update settings */
679
680 /* Test pattern settings */
681 tc_write(TSTCTL,
682 (120 << 24) | /* Red Color component value */
683 (20 << 16) | /* Green Color component value */
684 (99 << 8) | /* Blue Color component value */
685 (1 << 4) | /* Enable I2C Filter */
686 (2 << 0) | /* Color bar Mode */
687 0);
688
689 /* DP Main Stream Attributes */
690 vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
691 tc_write(DP0_VIDSYNCDELAY,
692 (0x003e << 16) | /* thresh_dly */
693 (vid_sync_dly << 0));
694
695 tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
696
697 tc_write(DP0_STARTVAL,
698 ((upper_margin + vsync_len) << 16) |
699 ((left_margin + hsync_len) << 0));
700
701 tc_write(DP0_ACTIVEVAL, (mode->vdisplay << 16) | (mode->hdisplay));
702
703 tc_write(DP0_SYNCVAL, (vsync_len << 16) | (hsync_len << 0));
704
705 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
706 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
707
708 /*
709 * Recommended maximum number of symbols transferred in a transfer unit:
710 * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
711 * (output active video bandwidth in bytes))
712 * Must be less than tu_size.
713 */
714 max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
715 tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
716
717 return 0;
718err:
719 return ret;
720}
721
722static int tc_link_training(struct tc_data *tc, int pattern)
723{
724 const char * const *errors;
725 u32 srcctrl = tc_srcctrl(tc) | DP0_SRCCTRL_SCRMBLDIS |
726 DP0_SRCCTRL_AUTOCORRECT;
727 int timeout;
728 int retry;
729 u32 value;
730 int ret;
731
732 if (pattern == DP_TRAINING_PATTERN_1) {
733 srcctrl |= DP0_SRCCTRL_TP1;
734 errors = training_pattern1_errors;
735 } else {
736 srcctrl |= DP0_SRCCTRL_TP2;
737 errors = training_pattern2_errors;
738 }
739
740 /* Set DPCD 0x102 for Training Part 1 or 2 */
741 tc_write(DP0_SNKLTCTRL, DP_LINK_SCRAMBLING_DISABLE | pattern);
742
743 tc_write(DP0_LTLOOPCTRL,
744 (0x0f << 28) | /* Defer Iteration Count */
745 (0x0f << 24) | /* Loop Iteration Count */
746 (0x0d << 0)); /* Loop Timer Delay */
747
748 retry = 5;
749 do {
750 /* Set DP0 Training Pattern */
751 tc_write(DP0_SRCCTRL, srcctrl);
752
753 /* Enable DP0 to start Link Training */
754 tc_write(DP0CTL, DP_EN);
755
756 /* wait */
757 timeout = 1000;
758 do {
759 tc_read(DP0_LTSTAT, &value);
760 udelay(1);
761 } while ((!(value & LT_LOOPDONE)) && (--timeout));
762 if (timeout == 0) {
763 dev_err(tc->dev, "Link training timeout!\n");
764 } else {
765 int pattern = (value >> 11) & 0x3;
766 int error = (value >> 8) & 0x7;
767
768 dev_dbg(tc->dev,
769 "Link training phase %d done after %d uS: %s\n",
770 pattern, 1000 - timeout, errors[error]);
771 if (pattern == DP_TRAINING_PATTERN_1 && error == 0)
772 break;
773 if (pattern == DP_TRAINING_PATTERN_2) {
774 value &= LT_CHANNEL1_EQ_BITS |
775 LT_INTERLANE_ALIGN_DONE |
776 LT_CHANNEL0_EQ_BITS;
777 /* in case of two lanes */
778 if ((tc->link.base.num_lanes == 2) &&
779 (value == (LT_CHANNEL1_EQ_BITS |
780 LT_INTERLANE_ALIGN_DONE |
781 LT_CHANNEL0_EQ_BITS)))
782 break;
783 /* in case of one line */
784 if ((tc->link.base.num_lanes == 1) &&
785 (value == (LT_INTERLANE_ALIGN_DONE |
786 LT_CHANNEL0_EQ_BITS)))
787 break;
788 }
789 }
790 /* restart */
791 tc_write(DP0CTL, 0);
792 usleep_range(10, 20);
793 } while (--retry);
794 if (retry == 0) {
795 dev_err(tc->dev, "Failed to finish training phase %d\n",
796 pattern);
797 }
798
799 return 0;
800err:
801 return ret;
802}
803
804static int tc_main_link_setup(struct tc_data *tc)
805{
806 struct drm_dp_aux *aux = &tc->aux;
807 struct device *dev = tc->dev;
808 unsigned int rate;
809 u32 dp_phy_ctrl;
810 int timeout;
811 bool aligned;
812 bool ready;
813 u32 value;
814 int ret;
815 u8 tmp[8];
816
817 /* display mode should be set at this point */
818 if (!tc->mode)
819 return -EINVAL;
820
821 /* from excel file - DP0_SrcCtrl */
822 tc_write(DP0_SRCCTRL, DP0_SRCCTRL_SCRMBLDIS | DP0_SRCCTRL_EN810B |
823 DP0_SRCCTRL_LANESKEW | DP0_SRCCTRL_LANES_2 |
824 DP0_SRCCTRL_BW27 | DP0_SRCCTRL_AUTOCORRECT);
825 /* from excel file - DP1_SrcCtrl */
826 tc_write(0x07a0, 0x00003083);
827
828 rate = clk_get_rate(tc->refclk);
829 switch (rate) {
830 case 38400000:
831 value = REF_FREQ_38M4;
832 break;
833 case 26000000:
834 value = REF_FREQ_26M;
835 break;
836 case 19200000:
837 value = REF_FREQ_19M2;
838 break;
839 case 13000000:
840 value = REF_FREQ_13M;
841 break;
842 default:
843 return -EINVAL;
844 }
845 value |= SYSCLK_SEL_LSCLK | LSCLK_DIV_2;
846 tc_write(SYS_PLLPARAM, value);
847 /* Setup Main Link */
848 dp_phy_ctrl = BGREN | PWR_SW_EN | BIT(2) | PHY_A0_EN | PHY_M0_EN;
849 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
850 msleep(100);
851
852 /* PLL setup */
853 tc_write(DP0_PLLCTRL, PLLUPDATE | PLLEN);
854 tc_wait_pll_lock(tc);
855
856 tc_write(DP1_PLLCTRL, PLLUPDATE | PLLEN);
857 tc_wait_pll_lock(tc);
858
859 /* PXL PLL setup */
860 if (tc_test_pattern) {
861 ret = tc_pxl_pll_en(tc, clk_get_rate(tc->refclk),
862 1000 * tc->mode->clock);
863 if (ret)
864 goto err;
865 }
866
867 /* Reset/Enable Main Links */
868 dp_phy_ctrl |= DP_PHY_RST | PHY_M1_RST | PHY_M0_RST;
869 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
870 usleep_range(100, 200);
871 dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
872 tc_write(DP_PHY_CTRL, dp_phy_ctrl);
873
874 timeout = 1000;
875 do {
876 tc_read(DP_PHY_CTRL, &value);
877 udelay(1);
878 } while ((!(value & PHY_RDY)) && (--timeout));
879
880 if (timeout == 0) {
881 dev_err(dev, "timeout waiting for phy become ready");
882 return -ETIMEDOUT;
883 }
884
885 /* Set misc: 8 bits per color */
886 ret = regmap_update_bits(tc->regmap, DP0_MISC, BPC_8, BPC_8);
887 if (ret)
888 goto err;
889
890 /*
891 * ASSR mode
892 * on TC358767 side ASSR configured through strap pin
893 * seems there is no way to change this setting from SW
894 *
895 * check is tc configured for same mode
896 */
897 if (tc->assr != tc->link.assr) {
898 dev_dbg(dev, "Trying to set display to ASSR: %d\n",
899 tc->assr);
900 /* try to set ASSR on display side */
901 tmp[0] = tc->assr;
902 ret = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, tmp[0]);
903 if (ret < 0)
904 goto err_dpcd_read;
905 /* read back */
906 ret = drm_dp_dpcd_readb(aux, DP_EDP_CONFIGURATION_SET, tmp);
907 if (ret < 0)
908 goto err_dpcd_read;
909
910 if (tmp[0] != tc->assr) {
911 dev_warn(dev, "Failed to switch display ASSR to %d, falling back to unscrambled mode\n",
912 tc->assr);
913 /* trying with disabled scrambler */
914 tc->link.scrambler_dis = 1;
915 }
916 }
917
918 /* Setup Link & DPRx Config for Training */
919 ret = drm_dp_link_configure(aux, &tc->link.base);
920 if (ret < 0)
921 goto err_dpcd_write;
922
923 /* DOWNSPREAD_CTRL */
924 tmp[0] = tc->link.spread ? DP_SPREAD_AMP_0_5 : 0x00;
925 /* MAIN_LINK_CHANNEL_CODING_SET */
926 tmp[1] = tc->link.coding8b10b ? DP_SET_ANSI_8B10B : 0x00;
927 ret = drm_dp_dpcd_write(aux, DP_DOWNSPREAD_CTRL, tmp, 2);
928 if (ret < 0)
929 goto err_dpcd_write;
930
931 ret = tc_link_training(tc, DP_TRAINING_PATTERN_1);
932 if (ret)
933 goto err;
934
935 ret = tc_link_training(tc, DP_TRAINING_PATTERN_2);
936 if (ret)
937 goto err;
938
939 /* Clear DPCD 0x102 */
940 /* Note: Can Not use DP0_SNKLTCTRL (0x06E4) short cut */
941 tmp[0] = tc->link.scrambler_dis ? DP_LINK_SCRAMBLING_DISABLE : 0x00;
942 ret = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, tmp[0]);
943 if (ret < 0)
944 goto err_dpcd_write;
945
946 /* Clear Training Pattern, set AutoCorrect Mode = 1 */
947 tc_write(DP0_SRCCTRL, tc_srcctrl(tc) | DP0_SRCCTRL_AUTOCORRECT);
948
949 /* Wait */
950 timeout = 100;
951 do {
952 udelay(1);
953 /* Read DPCD 0x202-0x207 */
954 ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
955 if (ret < 0)
956 goto err_dpcd_read;
957 ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
958 DP_CHANNEL_EQ_BITS)); /* Lane0 */
959 aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
960 } while ((--timeout) && !(ready && aligned));
961
962 if (timeout == 0) {
963 /* Read DPCD 0x200-0x201 */
964 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
965 if (ret < 0)
966 goto err_dpcd_read;
967 dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
968 dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
969 tmp[1]);
970 dev_info(dev, "0x0202 LANE0_1_STATUS: 0x%02x\n", tmp[2]);
971 dev_info(dev, "0x0204 LANE_ALIGN_STATUS_UPDATED: 0x%02x\n",
972 tmp[4]);
973 dev_info(dev, "0x0205 SINK_STATUS: 0x%02x\n", tmp[5]);
974 dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
975 tmp[6]);
976
977 if (!ready)
978 dev_err(dev, "Lane0/1 not ready\n");
979 if (!aligned)
980 dev_err(dev, "Lane0/1 not aligned\n");
981 return -EAGAIN;
982 }
983
984 ret = tc_set_video_mode(tc, tc->mode);
985 if (ret)
986 goto err;
987
988 /* Set M/N */
989 ret = tc_stream_clock_calc(tc);
990 if (ret)
991 goto err;
992
993 return 0;
994err_dpcd_read:
995 dev_err(tc->dev, "Failed to read DPCD: %d\n", ret);
996 return ret;
997err_dpcd_write:
998 dev_err(tc->dev, "Failed to write DPCD: %d\n", ret);
999err:
1000 return ret;
1001}
1002
1003static int tc_main_link_stream(struct tc_data *tc, int state)
1004{
1005 int ret;
1006 u32 value;
1007
1008 dev_dbg(tc->dev, "stream: %d\n", state);
1009
1010 if (state) {
1011 value = VID_MN_GEN | DP_EN;
1012 if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING)
1013 value |= EF_EN;
1014 tc_write(DP0CTL, value);
1015 /*
1016 * VID_EN assertion should be delayed by at least N * LSCLK
1017 * cycles from the time VID_MN_GEN is enabled in order to
1018 * generate stable values for VID_M. LSCLK is 270 MHz or
1019 * 162 MHz, VID_N is set to 32768 in tc_stream_clock_calc(),
1020 * so a delay of at least 203 us should suffice.
1021 */
1022 usleep_range(500, 1000);
1023 value |= VID_EN;
1024 tc_write(DP0CTL, value);
1025 /* Set input interface */
1026 value = DP0_AUDSRC_NO_INPUT;
1027 if (tc_test_pattern)
1028 value |= DP0_VIDSRC_COLOR_BAR;
1029 else
1030 value |= DP0_VIDSRC_DPI_RX;
1031 tc_write(SYSCTRL, value);
1032 } else {
1033 tc_write(DP0CTL, 0);
1034 }
1035
1036 return 0;
1037err:
1038 return ret;
1039}
1040
1041static enum drm_connector_status
1042tc_connector_detect(struct drm_connector *connector, bool force)
1043{
1044 return connector_status_connected;
1045}
1046
1047static void tc_bridge_pre_enable(struct drm_bridge *bridge)
1048{
1049 struct tc_data *tc = bridge_to_tc(bridge);
1050
1051 drm_panel_prepare(tc->panel);
1052}
1053
1054static void tc_bridge_enable(struct drm_bridge *bridge)
1055{
1056 struct tc_data *tc = bridge_to_tc(bridge);
1057 int ret;
1058
1059 ret = tc_main_link_setup(tc);
1060 if (ret < 0) {
1061 dev_err(tc->dev, "main link setup error: %d\n", ret);
1062 return;
1063 }
1064
1065 ret = tc_main_link_stream(tc, 1);
1066 if (ret < 0) {
1067 dev_err(tc->dev, "main link stream start error: %d\n", ret);
1068 return;
1069 }
1070
1071 drm_panel_enable(tc->panel);
1072}
1073
1074static void tc_bridge_disable(struct drm_bridge *bridge)
1075{
1076 struct tc_data *tc = bridge_to_tc(bridge);
1077 int ret;
1078
1079 drm_panel_disable(tc->panel);
1080
1081 ret = tc_main_link_stream(tc, 0);
1082 if (ret < 0)
1083 dev_err(tc->dev, "main link stream stop error: %d\n", ret);
1084}
1085
1086static void tc_bridge_post_disable(struct drm_bridge *bridge)
1087{
1088 struct tc_data *tc = bridge_to_tc(bridge);
1089
1090 drm_panel_unprepare(tc->panel);
1091}
1092
1093static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1094 const struct drm_display_mode *mode,
1095 struct drm_display_mode *adj)
1096{
1097 /* Fixup sync polarities, both hsync and vsync are active low */
1098 adj->flags = mode->flags;
1099 adj->flags |= (DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
1100 adj->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
1101
1102 return true;
1103}
1104
1105static int tc_connector_mode_valid(struct drm_connector *connector,
1106 struct drm_display_mode *mode)
1107{
1108 /* Accept any mode */
1109 return MODE_OK;
1110}
1111
1112static void tc_bridge_mode_set(struct drm_bridge *bridge,
1113 struct drm_display_mode *mode,
1114 struct drm_display_mode *adj)
1115{
1116 struct tc_data *tc = bridge_to_tc(bridge);
1117
1118 tc->mode = mode;
1119}
1120
1121static int tc_connector_get_modes(struct drm_connector *connector)
1122{
1123 struct tc_data *tc = connector_to_tc(connector);
1124 struct edid *edid;
1125 unsigned int count;
1126
1127 if (tc->panel && tc->panel->funcs && tc->panel->funcs->get_modes) {
1128 count = tc->panel->funcs->get_modes(tc->panel);
1129 if (count > 0)
1130 return count;
1131 }
1132
1133 edid = drm_get_edid(connector, &tc->aux.ddc);
1134
1135 kfree(tc->edid);
1136 tc->edid = edid;
1137 if (!edid)
1138 return 0;
1139
1140 drm_mode_connector_update_edid_property(connector, edid);
1141 count = drm_add_edid_modes(connector, edid);
1142
1143 return count;
1144}
1145
1146static void tc_connector_set_polling(struct tc_data *tc,
1147 struct drm_connector *connector)
1148{
1149 /* TODO: add support for HPD */
1150 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
1151 DRM_CONNECTOR_POLL_DISCONNECT;
1152}
1153
1154static struct drm_encoder *
1155tc_connector_best_encoder(struct drm_connector *connector)
1156{
1157 struct tc_data *tc = connector_to_tc(connector);
1158
1159 return tc->bridge.encoder;
1160}
1161
1162static const struct drm_connector_helper_funcs tc_connector_helper_funcs = {
1163 .get_modes = tc_connector_get_modes,
1164 .mode_valid = tc_connector_mode_valid,
1165 .best_encoder = tc_connector_best_encoder,
1166};
1167
1168static void tc_connector_destroy(struct drm_connector *connector)
1169{
1170 drm_connector_unregister(connector);
1171 drm_connector_cleanup(connector);
1172}
1173
1174static const struct drm_connector_funcs tc_connector_funcs = {
1175 .dpms = drm_atomic_helper_connector_dpms,
1176 .fill_modes = drm_helper_probe_single_connector_modes,
1177 .detect = tc_connector_detect,
1178 .destroy = tc_connector_destroy,
1179 .reset = drm_atomic_helper_connector_reset,
1180 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1181 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1182};
1183
1184static int tc_bridge_attach(struct drm_bridge *bridge)
1185{
1186 u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
1187 struct tc_data *tc = bridge_to_tc(bridge);
1188 struct drm_device *drm = bridge->dev;
1189 int ret;
1190
1191 /* Create eDP connector */
1192 drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
1193 ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
1194 DRM_MODE_CONNECTOR_eDP);
1195 if (ret)
1196 return ret;
1197
1198 if (tc->panel)
1199 drm_panel_attach(tc->panel, &tc->connector);
1200
1201 drm_display_info_set_bus_formats(&tc->connector.display_info,
1202 &bus_format, 1);
1203 drm_mode_connector_attach_encoder(&tc->connector, tc->bridge.encoder);
1204
1205 return 0;
1206}
1207
1208static const struct drm_bridge_funcs tc_bridge_funcs = {
1209 .attach = tc_bridge_attach,
1210 .mode_set = tc_bridge_mode_set,
1211 .pre_enable = tc_bridge_pre_enable,
1212 .enable = tc_bridge_enable,
1213 .disable = tc_bridge_disable,
1214 .post_disable = tc_bridge_post_disable,
1215 .mode_fixup = tc_bridge_mode_fixup,
1216};
1217
1218static bool tc_readable_reg(struct device *dev, unsigned int reg)
1219{
1220 return reg != SYSCTRL;
1221}
1222
1223static const struct regmap_range tc_volatile_ranges[] = {
1224 regmap_reg_range(DP0_AUXWDATA(0), DP0_AUXSTATUS),
1225 regmap_reg_range(DP0_LTSTAT, DP0_SNKLTCHGREQ),
1226 regmap_reg_range(DP_PHY_CTRL, DP_PHY_CTRL),
1227 regmap_reg_range(DP0_PLLCTRL, PXL_PLLCTRL),
1228 regmap_reg_range(VFUEN0, VFUEN0),
1229};
1230
1231static const struct regmap_access_table tc_volatile_table = {
1232 .yes_ranges = tc_volatile_ranges,
1233 .n_yes_ranges = ARRAY_SIZE(tc_volatile_ranges),
1234};
1235
1236static bool tc_writeable_reg(struct device *dev, unsigned int reg)
1237{
1238 return (reg != TC_IDREG) &&
1239 (reg != DP0_LTSTAT) &&
1240 (reg != DP0_SNKLTCHGREQ);
1241}
1242
1243static const struct regmap_config tc_regmap_config = {
1244 .name = "tc358767",
1245 .reg_bits = 16,
1246 .val_bits = 32,
1247 .reg_stride = 4,
1248 .max_register = PLL_DBG,
1249 .cache_type = REGCACHE_RBTREE,
1250 .readable_reg = tc_readable_reg,
1251 .volatile_table = &tc_volatile_table,
1252 .writeable_reg = tc_writeable_reg,
1253 .reg_format_endian = REGMAP_ENDIAN_BIG,
1254 .val_format_endian = REGMAP_ENDIAN_LITTLE,
1255};
1256
1257static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
1258{
1259 struct device *dev = &client->dev;
1260 struct device_node *ep;
1261 struct tc_data *tc;
1262 int ret;
1263
1264 tc = devm_kzalloc(dev, sizeof(*tc), GFP_KERNEL);
1265 if (!tc)
1266 return -ENOMEM;
1267
1268 tc->dev = dev;
1269
1270 /* port@2 is the output port */
1271 ep = of_graph_get_endpoint_by_regs(dev->of_node, 2, -1);
1272 if (ep) {
1273 struct device_node *remote;
1274
1275 remote = of_graph_get_remote_port_parent(ep);
1276 if (!remote) {
1277 dev_warn(dev, "endpoint %s not connected\n",
1278 ep->full_name);
1279 of_node_put(ep);
1280 return -ENODEV;
1281 }
1282 of_node_put(ep);
1283 tc->panel = of_drm_find_panel(remote);
1284 if (tc->panel) {
1285 dev_dbg(dev, "found panel %s\n", remote->full_name);
1286 } else {
1287 dev_dbg(dev, "waiting for panel %s\n",
1288 remote->full_name);
1289 of_node_put(remote);
1290 return -EPROBE_DEFER;
1291 }
1292 of_node_put(remote);
1293 }
1294
1295 /* Shut down GPIO is optional */
1296 tc->sd_gpio = devm_gpiod_get_optional(dev, "shutdown", GPIOD_OUT_HIGH);
1297 if (IS_ERR(tc->sd_gpio))
1298 return PTR_ERR(tc->sd_gpio);
1299
1300 if (tc->sd_gpio) {
1301 gpiod_set_value_cansleep(tc->sd_gpio, 0);
1302 usleep_range(5000, 10000);
1303 }
1304
1305 /* Reset GPIO is optional */
1306 tc->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
1307 if (IS_ERR(tc->reset_gpio))
1308 return PTR_ERR(tc->reset_gpio);
1309
1310 if (tc->reset_gpio) {
1311 gpiod_set_value_cansleep(tc->reset_gpio, 1);
1312 usleep_range(5000, 10000);
1313 }
1314
1315 tc->refclk = devm_clk_get(dev, "ref");
1316 if (IS_ERR(tc->refclk)) {
1317 ret = PTR_ERR(tc->refclk);
1318 dev_err(dev, "Failed to get refclk: %d\n", ret);
1319 return ret;
1320 }
1321
1322 tc->regmap = devm_regmap_init_i2c(client, &tc_regmap_config);
1323 if (IS_ERR(tc->regmap)) {
1324 ret = PTR_ERR(tc->regmap);
1325 dev_err(dev, "Failed to initialize regmap: %d\n", ret);
1326 return ret;
1327 }
1328
1329 ret = regmap_read(tc->regmap, TC_IDREG, &tc->rev);
1330 if (ret) {
1331 dev_err(tc->dev, "can not read device ID: %d\n", ret);
1332 return ret;
1333 }
1334
1335 if ((tc->rev != 0x6601) && (tc->rev != 0x6603)) {
1336 dev_err(tc->dev, "invalid device ID: 0x%08x\n", tc->rev);
1337 return -EINVAL;
1338 }
1339
1340 tc->assr = (tc->rev == 0x6601); /* Enable ASSR for eDP panels */
1341
1342 ret = tc_aux_link_setup(tc);
1343 if (ret)
1344 return ret;
1345
1346 /* Register DP AUX channel */
1347 tc->aux.name = "TC358767 AUX i2c adapter";
1348 tc->aux.dev = tc->dev;
1349 tc->aux.transfer = tc_aux_transfer;
1350 ret = drm_dp_aux_register(&tc->aux);
1351 if (ret)
1352 return ret;
1353
1354 ret = tc_get_display_props(tc);
1355 if (ret)
1356 goto err_unregister_aux;
1357
1358 tc_connector_set_polling(tc, &tc->connector);
1359
1360 tc->bridge.funcs = &tc_bridge_funcs;
1361 tc->bridge.of_node = dev->of_node;
1362 ret = drm_bridge_add(&tc->bridge);
1363 if (ret) {
1364 dev_err(dev, "Failed to add drm_bridge: %d\n", ret);
1365 goto err_unregister_aux;
1366 }
1367
1368 i2c_set_clientdata(client, tc);
1369
1370 return 0;
1371err_unregister_aux:
1372 drm_dp_aux_unregister(&tc->aux);
1373 return ret;
1374}
1375
1376static int tc_remove(struct i2c_client *client)
1377{
1378 struct tc_data *tc = i2c_get_clientdata(client);
1379
1380 drm_bridge_remove(&tc->bridge);
1381 drm_dp_aux_unregister(&tc->aux);
1382
1383 tc_pxl_pll_dis(tc);
1384
1385 return 0;
1386}
1387
1388static const struct i2c_device_id tc358767_i2c_ids[] = {
1389 { "tc358767", 0 },
1390 { }
1391};
1392MODULE_DEVICE_TABLE(i2c, tc358767_i2c_ids);
1393
1394static const struct of_device_id tc358767_of_ids[] = {
1395 { .compatible = "toshiba,tc358767", },
1396 { }
1397};
1398MODULE_DEVICE_TABLE(of, tc358767_of_ids);
1399
1400static struct i2c_driver tc358767_driver = {
1401 .driver = {
1402 .name = "tc358767",
1403 .of_match_table = tc358767_of_ids,
1404 },
1405 .id_table = tc358767_i2c_ids,
1406 .probe = tc_probe,
1407 .remove = tc_remove,
1408};
1409module_i2c_driver(tc358767_driver);
1410
1411MODULE_AUTHOR("Andrey Gusakov <andrey.gusakov@cogentembedded.com>");
1412MODULE_DESCRIPTION("tc358767 eDP encoder driver");
1413MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 9864559e5fb9..04b3c161dfae 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -1,11 +1,7 @@
1config DRM_CIRRUS_QEMU 1config DRM_CIRRUS_QEMU
2 tristate "Cirrus driver for QEMU emulated device" 2 tristate "Cirrus driver for QEMU emulated device"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
9 select DRM_TTM 5 select DRM_TTM
10 help 6 help
11 This is a KMS driver for emulated cirrus device in qemu. 7 This is a KMS driver for emulated cirrus device in qemu.
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index dc83f69da6f1..b05f7eae32ce 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -142,7 +142,7 @@ static struct drm_driver driver = {
142 .major = DRIVER_MAJOR, 142 .major = DRIVER_MAJOR,
143 .minor = DRIVER_MINOR, 143 .minor = DRIVER_MINOR,
144 .patchlevel = DRIVER_PATCHLEVEL, 144 .patchlevel = DRIVER_PATCHLEVEL,
145 .gem_free_object = cirrus_gem_free_object, 145 .gem_free_object_unlocked = cirrus_gem_free_object,
146 .dumb_create = cirrus_dumb_create, 146 .dumb_create = cirrus_dumb_create,
147 .dumb_map_offset = cirrus_dumb_mmap_offset, 147 .dumb_map_offset = cirrus_dumb_mmap_offset,
148 .dumb_destroy = drm_gem_dumb_destroy, 148 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 32d32c5b7b17..80446e2d3ab6 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -17,8 +17,8 @@
17static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb) 17static void cirrus_user_framebuffer_destroy(struct drm_framebuffer *fb)
18{ 18{
19 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb); 19 struct cirrus_framebuffer *cirrus_fb = to_cirrus_framebuffer(fb);
20 if (cirrus_fb->obj) 20
21 drm_gem_object_unreference_unlocked(cirrus_fb->obj); 21 drm_gem_object_unreference_unlocked(cirrus_fb->obj);
22 drm_framebuffer_cleanup(fb); 22 drm_framebuffer_cleanup(fb);
23 kfree(fb); 23 kfree(fb);
24} 24}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index d3d8d7bfcc57..17c915d9a03e 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -325,21 +325,20 @@ static void cirrus_crtc_commit(struct drm_crtc *crtc)
325 * use this for 8-bit mode so can't perform smooth fades on deeper modes, 325 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
326 * but it's a requirement that we provide the function 326 * but it's a requirement that we provide the function
327 */ 327 */
328static void cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 328static int cirrus_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
329 u16 *blue, uint32_t start, uint32_t size) 329 u16 *blue, uint32_t size)
330{ 330{
331 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc); 331 struct cirrus_crtc *cirrus_crtc = to_cirrus_crtc(crtc);
332 int i; 332 int i;
333 333
334 if (size != CIRRUS_LUT_SIZE) 334 for (i = 0; i < size; i++) {
335 return;
336
337 for (i = 0; i < CIRRUS_LUT_SIZE; i++) {
338 cirrus_crtc->lut_r[i] = red[i]; 335 cirrus_crtc->lut_r[i] = red[i];
339 cirrus_crtc->lut_g[i] = green[i]; 336 cirrus_crtc->lut_g[i] = green[i];
340 cirrus_crtc->lut_b[i] = blue[i]; 337 cirrus_crtc->lut_b[i] = blue[i];
341 } 338 }
342 cirrus_crtc_load_lut(crtc); 339 cirrus_crtc_load_lut(crtc);
340
341 return 0;
343} 342}
344 343
345/* Simple cleanup function */ 344/* Simple cleanup function */
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 6768b7b1af32..1cc9ee607128 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re
186{ 186{
187} 187}
188 188
189static int cirrus_bo_move(struct ttm_buffer_object *bo,
190 bool evict, bool interruptible,
191 bool no_wait_gpu,
192 struct ttm_mem_reg *new_mem)
193{
194 int r;
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
196 return r;
197}
198
199
200static void cirrus_ttm_backend_destroy(struct ttm_tt *tt) 189static void cirrus_ttm_backend_destroy(struct ttm_tt *tt)
201{ 190{
202 ttm_tt_fini(tt); 191 ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = {
241 .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, 230 .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate,
242 .init_mem_type = cirrus_bo_init_mem_type, 231 .init_mem_type = cirrus_bo_init_mem_type,
243 .evict_flags = cirrus_bo_evict_flags, 232 .evict_flags = cirrus_bo_evict_flags,
244 .move = cirrus_bo_move, 233 .move = NULL,
245 .verify_access = cirrus_bo_verify_access, 234 .verify_access = cirrus_bo_verify_access,
246 .io_mem_reserve = &cirrus_ttm_io_mem_reserve, 235 .io_mem_reserve = &cirrus_ttm_io_mem_reserve,
247 .io_mem_free = &cirrus_ttm_io_mem_free, 236 .io_mem_free = &cirrus_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 9bb99e274d23..8d2f111fa113 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -33,6 +33,20 @@
33 33
34#include "drm_crtc_internal.h" 34#include "drm_crtc_internal.h"
35 35
36static void crtc_commit_free(struct kref *kref)
37{
38 struct drm_crtc_commit *commit =
39 container_of(kref, struct drm_crtc_commit, ref);
40
41 kfree(commit);
42}
43
44void drm_crtc_commit_put(struct drm_crtc_commit *commit)
45{
46 kref_put(&commit->ref, crtc_commit_free);
47}
48EXPORT_SYMBOL(drm_crtc_commit_put);
49
36/** 50/**
37 * drm_atomic_state_default_release - 51 * drm_atomic_state_default_release -
38 * release memory initialized by drm_atomic_state_init 52 * release memory initialized by drm_atomic_state_init
@@ -44,11 +58,8 @@
44void drm_atomic_state_default_release(struct drm_atomic_state *state) 58void drm_atomic_state_default_release(struct drm_atomic_state *state)
45{ 59{
46 kfree(state->connectors); 60 kfree(state->connectors);
47 kfree(state->connector_states);
48 kfree(state->crtcs); 61 kfree(state->crtcs);
49 kfree(state->crtc_states);
50 kfree(state->planes); 62 kfree(state->planes);
51 kfree(state->plane_states);
52} 63}
53EXPORT_SYMBOL(drm_atomic_state_default_release); 64EXPORT_SYMBOL(drm_atomic_state_default_release);
54 65
@@ -72,18 +83,10 @@ drm_atomic_state_init(struct drm_device *dev, struct drm_atomic_state *state)
72 sizeof(*state->crtcs), GFP_KERNEL); 83 sizeof(*state->crtcs), GFP_KERNEL);
73 if (!state->crtcs) 84 if (!state->crtcs)
74 goto fail; 85 goto fail;
75 state->crtc_states = kcalloc(dev->mode_config.num_crtc,
76 sizeof(*state->crtc_states), GFP_KERNEL);
77 if (!state->crtc_states)
78 goto fail;
79 state->planes = kcalloc(dev->mode_config.num_total_plane, 86 state->planes = kcalloc(dev->mode_config.num_total_plane,
80 sizeof(*state->planes), GFP_KERNEL); 87 sizeof(*state->planes), GFP_KERNEL);
81 if (!state->planes) 88 if (!state->planes)
82 goto fail; 89 goto fail;
83 state->plane_states = kcalloc(dev->mode_config.num_total_plane,
84 sizeof(*state->plane_states), GFP_KERNEL);
85 if (!state->plane_states)
86 goto fail;
87 90
88 state->dev = dev; 91 state->dev = dev;
89 92
@@ -139,40 +142,48 @@ void drm_atomic_state_default_clear(struct drm_atomic_state *state)
139 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state); 142 DRM_DEBUG_ATOMIC("Clearing atomic state %p\n", state);
140 143
141 for (i = 0; i < state->num_connector; i++) { 144 for (i = 0; i < state->num_connector; i++) {
142 struct drm_connector *connector = state->connectors[i]; 145 struct drm_connector *connector = state->connectors[i].ptr;
143 146
144 if (!connector) 147 if (!connector)
145 continue; 148 continue;
146 149
147 connector->funcs->atomic_destroy_state(connector, 150 connector->funcs->atomic_destroy_state(connector,
148 state->connector_states[i]); 151 state->connectors[i].state);
149 state->connectors[i] = NULL; 152 state->connectors[i].ptr = NULL;
150 state->connector_states[i] = NULL; 153 state->connectors[i].state = NULL;
151 drm_connector_unreference(connector); 154 drm_connector_unreference(connector);
152 } 155 }
153 156
154 for (i = 0; i < config->num_crtc; i++) { 157 for (i = 0; i < config->num_crtc; i++) {
155 struct drm_crtc *crtc = state->crtcs[i]; 158 struct drm_crtc *crtc = state->crtcs[i].ptr;
156 159
157 if (!crtc) 160 if (!crtc)
158 continue; 161 continue;
159 162
160 crtc->funcs->atomic_destroy_state(crtc, 163 crtc->funcs->atomic_destroy_state(crtc,
161 state->crtc_states[i]); 164 state->crtcs[i].state);
162 state->crtcs[i] = NULL; 165
163 state->crtc_states[i] = NULL; 166 if (state->crtcs[i].commit) {
167 kfree(state->crtcs[i].commit->event);
168 state->crtcs[i].commit->event = NULL;
169 drm_crtc_commit_put(state->crtcs[i].commit);
170 }
171
172 state->crtcs[i].commit = NULL;
173 state->crtcs[i].ptr = NULL;
174 state->crtcs[i].state = NULL;
164 } 175 }
165 176
166 for (i = 0; i < config->num_total_plane; i++) { 177 for (i = 0; i < config->num_total_plane; i++) {
167 struct drm_plane *plane = state->planes[i]; 178 struct drm_plane *plane = state->planes[i].ptr;
168 179
169 if (!plane) 180 if (!plane)
170 continue; 181 continue;
171 182
172 plane->funcs->atomic_destroy_state(plane, 183 plane->funcs->atomic_destroy_state(plane,
173 state->plane_states[i]); 184 state->planes[i].state);
174 state->planes[i] = NULL; 185 state->planes[i].ptr = NULL;
175 state->plane_states[i] = NULL; 186 state->planes[i].state = NULL;
176 } 187 }
177} 188}
178EXPORT_SYMBOL(drm_atomic_state_default_clear); 189EXPORT_SYMBOL(drm_atomic_state_default_clear);
@@ -270,8 +281,8 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
270 if (!crtc_state) 281 if (!crtc_state)
271 return ERR_PTR(-ENOMEM); 282 return ERR_PTR(-ENOMEM);
272 283
273 state->crtc_states[index] = crtc_state; 284 state->crtcs[index].state = crtc_state;
274 state->crtcs[index] = crtc; 285 state->crtcs[index].ptr = crtc;
275 crtc_state->state = state; 286 crtc_state->state = state;
276 287
277 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n", 288 DRM_DEBUG_ATOMIC("Added [CRTC:%d:%s] %p state to %p\n",
@@ -393,8 +404,7 @@ drm_atomic_replace_property_blob(struct drm_property_blob **blob,
393 if (old_blob == new_blob) 404 if (old_blob == new_blob)
394 return; 405 return;
395 406
396 if (old_blob) 407 drm_property_unreference_blob(old_blob);
397 drm_property_unreference_blob(old_blob);
398 if (new_blob) 408 if (new_blob)
399 drm_property_reference_blob(new_blob); 409 drm_property_reference_blob(new_blob);
400 *blob = new_blob; 410 *blob = new_blob;
@@ -632,8 +642,8 @@ drm_atomic_get_plane_state(struct drm_atomic_state *state,
632 if (!plane_state) 642 if (!plane_state)
633 return ERR_PTR(-ENOMEM); 643 return ERR_PTR(-ENOMEM);
634 644
635 state->plane_states[index] = plane_state; 645 state->planes[index].state = plane_state;
636 state->planes[index] = plane; 646 state->planes[index].ptr = plane;
637 plane_state->state = state; 647 plane_state->state = state;
638 648
639 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n", 649 DRM_DEBUG_ATOMIC("Added [PLANE:%d:%s] %p state to %p\n",
@@ -897,8 +907,7 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
897 index = drm_connector_index(connector); 907 index = drm_connector_index(connector);
898 908
899 if (index >= state->num_connector) { 909 if (index >= state->num_connector) {
900 struct drm_connector **c; 910 struct __drm_connnectors_state *c;
901 struct drm_connector_state **cs;
902 int alloc = max(index + 1, config->num_connector); 911 int alloc = max(index + 1, config->num_connector);
903 912
904 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL); 913 c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
@@ -909,26 +918,19 @@ drm_atomic_get_connector_state(struct drm_atomic_state *state,
909 memset(&state->connectors[state->num_connector], 0, 918 memset(&state->connectors[state->num_connector], 0,
910 sizeof(*state->connectors) * (alloc - state->num_connector)); 919 sizeof(*state->connectors) * (alloc - state->num_connector));
911 920
912 cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
913 if (!cs)
914 return ERR_PTR(-ENOMEM);
915
916 state->connector_states = cs;
917 memset(&state->connector_states[state->num_connector], 0,
918 sizeof(*state->connector_states) * (alloc - state->num_connector));
919 state->num_connector = alloc; 921 state->num_connector = alloc;
920 } 922 }
921 923
922 if (state->connector_states[index]) 924 if (state->connectors[index].state)
923 return state->connector_states[index]; 925 return state->connectors[index].state;
924 926
925 connector_state = connector->funcs->atomic_duplicate_state(connector); 927 connector_state = connector->funcs->atomic_duplicate_state(connector);
926 if (!connector_state) 928 if (!connector_state)
927 return ERR_PTR(-ENOMEM); 929 return ERR_PTR(-ENOMEM);
928 930
929 drm_connector_reference(connector); 931 drm_connector_reference(connector);
930 state->connector_states[index] = connector_state; 932 state->connectors[index].state = connector_state;
931 state->connectors[index] = connector; 933 state->connectors[index].ptr = connector;
932 connector_state->state = state; 934 connector_state->state = state;
933 935
934 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n", 936 DRM_DEBUG_ATOMIC("Added [CONNECTOR:%d] %p state to %p\n",
@@ -1457,7 +1459,8 @@ EXPORT_SYMBOL(drm_atomic_nonblocking_commit);
1457 */ 1459 */
1458 1460
1459static struct drm_pending_vblank_event *create_vblank_event( 1461static struct drm_pending_vblank_event *create_vblank_event(
1460 struct drm_device *dev, struct drm_file *file_priv, uint64_t user_data) 1462 struct drm_device *dev, struct drm_file *file_priv,
1463 struct fence *fence, uint64_t user_data)
1461{ 1464{
1462 struct drm_pending_vblank_event *e = NULL; 1465 struct drm_pending_vblank_event *e = NULL;
1463 int ret; 1466 int ret;
@@ -1470,12 +1473,17 @@ static struct drm_pending_vblank_event *create_vblank_event(
1470 e->event.base.length = sizeof(e->event); 1473 e->event.base.length = sizeof(e->event);
1471 e->event.user_data = user_data; 1474 e->event.user_data = user_data;
1472 1475
1473 ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base); 1476 if (file_priv) {
1474 if (ret) { 1477 ret = drm_event_reserve_init(dev, file_priv, &e->base,
1475 kfree(e); 1478 &e->event.base);
1476 return NULL; 1479 if (ret) {
1480 kfree(e);
1481 return NULL;
1482 }
1477 } 1483 }
1478 1484
1485 e->base.fence = fence;
1486
1479 return e; 1487 return e;
1480} 1488}
1481 1489
@@ -1715,7 +1723,8 @@ retry:
1715 for_each_crtc_in_state(state, crtc, crtc_state, i) { 1723 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1716 struct drm_pending_vblank_event *e; 1724 struct drm_pending_vblank_event *e;
1717 1725
1718 e = create_vblank_event(dev, file_priv, arg->user_data); 1726 e = create_vblank_event(dev, file_priv, NULL,
1727 arg->user_data);
1719 if (!e) { 1728 if (!e) {
1720 ret = -ENOMEM; 1729 ret = -ENOMEM;
1721 goto out; 1730 goto out;
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index ddfa0d120e39..de7fddce3cef 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -110,8 +110,10 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
110 110
111 if (funcs->atomic_best_encoder) 111 if (funcs->atomic_best_encoder)
112 new_encoder = funcs->atomic_best_encoder(connector, conn_state); 112 new_encoder = funcs->atomic_best_encoder(connector, conn_state);
113 else 113 else if (funcs->best_encoder)
114 new_encoder = funcs->best_encoder(connector); 114 new_encoder = funcs->best_encoder(connector);
115 else
116 new_encoder = drm_atomic_helper_best_encoder(connector);
115 117
116 if (new_encoder) { 118 if (new_encoder) {
117 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) { 119 if (encoder_mask & (1 << drm_encoder_index(new_encoder))) {
@@ -298,8 +300,10 @@ update_connector_routing(struct drm_atomic_state *state,
298 if (funcs->atomic_best_encoder) 300 if (funcs->atomic_best_encoder)
299 new_encoder = funcs->atomic_best_encoder(connector, 301 new_encoder = funcs->atomic_best_encoder(connector,
300 connector_state); 302 connector_state);
301 else 303 else if (funcs->best_encoder)
302 new_encoder = funcs->best_encoder(connector); 304 new_encoder = funcs->best_encoder(connector);
305 else
306 new_encoder = drm_atomic_helper_best_encoder(connector);
303 307
304 if (!new_encoder) { 308 if (!new_encoder) {
305 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 309 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -414,6 +418,9 @@ mode_fixup(struct drm_atomic_state *state)
414 for_each_crtc_in_state(state, crtc, crtc_state, i) { 418 for_each_crtc_in_state(state, crtc, crtc_state, i) {
415 const struct drm_crtc_helper_funcs *funcs; 419 const struct drm_crtc_helper_funcs *funcs;
416 420
421 if (!crtc_state->enable)
422 continue;
423
417 if (!crtc_state->mode_changed && 424 if (!crtc_state->mode_changed &&
418 !crtc_state->connectors_changed) 425 !crtc_state->connectors_changed)
419 continue; 426 continue;
@@ -458,7 +465,7 @@ mode_fixup(struct drm_atomic_state *state)
458 * times for the same update, e.g. when the ->atomic_check functions depend upon 465 * times for the same update, e.g. when the ->atomic_check functions depend upon
459 * the adjusted dotclock for fifo space allocation and watermark computation. 466 * the adjusted dotclock for fifo space allocation and watermark computation.
460 * 467 *
461 * RETURNS 468 * RETURNS:
462 * Zero for success or -errno 469 * Zero for success or -errno
463 */ 470 */
464int 471int
@@ -572,7 +579,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_modeset);
572 * It also sets crtc_state->planes_changed to indicate that a crtc has 579 * It also sets crtc_state->planes_changed to indicate that a crtc has
573 * updated planes. 580 * updated planes.
574 * 581 *
575 * RETURNS 582 * RETURNS:
576 * Zero for success or -errno 583 * Zero for success or -errno
577 */ 584 */
578int 585int
@@ -611,7 +618,7 @@ drm_atomic_helper_check_planes(struct drm_device *dev,
611 if (!funcs || !funcs->atomic_check) 618 if (!funcs || !funcs->atomic_check)
612 continue; 619 continue;
613 620
614 ret = funcs->atomic_check(crtc, state->crtc_states[i]); 621 ret = funcs->atomic_check(crtc, crtc_state);
615 if (ret) { 622 if (ret) {
616 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", 623 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
617 crtc->base.id, crtc->name); 624 crtc->base.id, crtc->name);
@@ -640,7 +647,7 @@ EXPORT_SYMBOL(drm_atomic_helper_check_planes);
640 * ->atomic_check functions depend upon an updated adjusted_mode.clock to 647 * ->atomic_check functions depend upon an updated adjusted_mode.clock to
641 * e.g. properly compute watermarks. 648 * e.g. properly compute watermarks.
642 * 649 *
643 * RETURNS 650 * RETURNS:
644 * Zero for success or -errno 651 * Zero for success or -errno
645 */ 652 */
646int drm_atomic_helper_check(struct drm_device *dev, 653int drm_atomic_helper_check(struct drm_device *dev,
@@ -1113,22 +1120,17 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
1113EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks); 1120EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1114 1121
1115/** 1122/**
1116 * drm_atomic_helper_commit - commit validated state object 1123 * drm_atomic_helper_commit_tail - commit atomic update to hardware
1117 * @dev: DRM device 1124 * @state: new modeset state to be committed
1118 * @state: the driver state object
1119 * @nonblocking: whether nonblocking behavior is requested.
1120 * 1125 *
1121 * This function commits a with drm_atomic_helper_check() pre-validated state 1126 * This is the default implemenation for the ->atomic_commit_tail() hook of the
1122 * object. This can still fail when e.g. the framebuffer reservation fails. For 1127 * &drm_mode_config_helper_funcs vtable.
1123 * now this doesn't implement nonblocking commits.
1124 * 1128 *
1125 * Note that right now this function does not support nonblocking commits, hence 1129 * Note that the default ordering of how the various stages are called is to
1126 * driver writers must implement their own version for now. Also note that the 1130 * match the legacy modeset helper library closest. One peculiarity of that is
1127 * default ordering of how the various stages are called is to match the legacy 1131 * that it doesn't mesh well with runtime PM at all.
1128 * modeset helper library closest. One peculiarity of that is that it doesn't
1129 * mesh well with runtime PM at all.
1130 * 1132 *
1131 * For drivers supporting runtime PM the recommended sequence is 1133 * For drivers supporting runtime PM the recommended sequence is instead ::
1132 * 1134 *
1133 * drm_atomic_helper_commit_modeset_disables(dev, state); 1135 * drm_atomic_helper_commit_modeset_disables(dev, state);
1134 * 1136 *
@@ -1136,9 +1138,75 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
1136 * 1138 *
1137 * drm_atomic_helper_commit_planes(dev, state, true); 1139 * drm_atomic_helper_commit_planes(dev, state, true);
1138 * 1140 *
1139 * See the kerneldoc entries for these three functions for more details. 1141 * for committing the atomic update to hardware. See the kerneldoc entries for
1142 * these three functions for more details.
1143 */
1144void drm_atomic_helper_commit_tail(struct drm_atomic_state *state)
1145{
1146 struct drm_device *dev = state->dev;
1147
1148 drm_atomic_helper_commit_modeset_disables(dev, state);
1149
1150 drm_atomic_helper_commit_planes(dev, state, false);
1151
1152 drm_atomic_helper_commit_modeset_enables(dev, state);
1153
1154 drm_atomic_helper_commit_hw_done(state);
1155
1156 drm_atomic_helper_wait_for_vblanks(dev, state);
1157
1158 drm_atomic_helper_cleanup_planes(dev, state);
1159}
1160EXPORT_SYMBOL(drm_atomic_helper_commit_tail);
1161
1162static void commit_tail(struct drm_atomic_state *state)
1163{
1164 struct drm_device *dev = state->dev;
1165 struct drm_mode_config_helper_funcs *funcs;
1166
1167 funcs = dev->mode_config.helper_private;
1168
1169 drm_atomic_helper_wait_for_fences(dev, state);
1170
1171 drm_atomic_helper_wait_for_dependencies(state);
1172
1173 if (funcs && funcs->atomic_commit_tail)
1174 funcs->atomic_commit_tail(state);
1175 else
1176 drm_atomic_helper_commit_tail(state);
1177
1178 drm_atomic_helper_commit_cleanup_done(state);
1179
1180 drm_atomic_state_free(state);
1181}
1182
1183static void commit_work(struct work_struct *work)
1184{
1185 struct drm_atomic_state *state = container_of(work,
1186 struct drm_atomic_state,
1187 commit_work);
1188 commit_tail(state);
1189}
1190
1191/**
1192 * drm_atomic_helper_commit - commit validated state object
1193 * @dev: DRM device
1194 * @state: the driver state object
1195 * @nonblock: whether nonblocking behavior is requested.
1196 *
1197 * This function commits a with drm_atomic_helper_check() pre-validated state
1198 * object. This can still fail when e.g. the framebuffer reservation fails. This
1199 * function implements nonblocking commits, using
1200 * drm_atomic_helper_setup_commit() and related functions.
1201 *
1202 * Note that right now this function does not support nonblocking commits, hence
1203 * driver writers must implement their own version for now.
1204 *
1205 * Committing the actual hardware state is done through the
1206 * ->atomic_commit_tail() callback of the &drm_mode_config_helper_funcs vtable,
1207 * or it's default implementation drm_atomic_helper_commit_tail().
1140 * 1208 *
1141 * RETURNS 1209 * RETURNS:
1142 * Zero for success or -errno. 1210 * Zero for success or -errno.
1143 */ 1211 */
1144int drm_atomic_helper_commit(struct drm_device *dev, 1212int drm_atomic_helper_commit(struct drm_device *dev,
@@ -1147,8 +1215,11 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1147{ 1215{
1148 int ret; 1216 int ret;
1149 1217
1150 if (nonblock) 1218 ret = drm_atomic_helper_setup_commit(state, nonblock);
1151 return -EBUSY; 1219 if (ret)
1220 return ret;
1221
1222 INIT_WORK(&state->commit_work, commit_work);
1152 1223
1153 ret = drm_atomic_helper_prepare_planes(dev, state); 1224 ret = drm_atomic_helper_prepare_planes(dev, state);
1154 if (ret) 1225 if (ret)
@@ -1160,7 +1231,7 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1160 * the software side now. 1231 * the software side now.
1161 */ 1232 */
1162 1233
1163 drm_atomic_helper_swap_state(dev, state); 1234 drm_atomic_helper_swap_state(state, true);
1164 1235
1165 /* 1236 /*
1166 * Everything below can be run asynchronously without the need to grab 1237 * Everything below can be run asynchronously without the need to grab
@@ -1176,21 +1247,16 @@ int drm_atomic_helper_commit(struct drm_device *dev,
1176 * update. Which is important since compositors need to figure out the 1247 * update. Which is important since compositors need to figure out the
1177 * composition of the next frame right after having submitted the 1248 * composition of the next frame right after having submitted the
1178 * current layout. 1249 * current layout.
1250 *
1251 * NOTE: Commit work has multiple phases, first hardware commit, then
1252 * cleanup. We want them to overlap, hence need system_unbound_wq to
1253 * make sure work items don't artifically stall on each another.
1179 */ 1254 */
1180 1255
1181 drm_atomic_helper_wait_for_fences(dev, state); 1256 if (nonblock)
1182 1257 queue_work(system_unbound_wq, &state->commit_work);
1183 drm_atomic_helper_commit_modeset_disables(dev, state); 1258 else
1184 1259 commit_tail(state);
1185 drm_atomic_helper_commit_planes(dev, state, false);
1186
1187 drm_atomic_helper_commit_modeset_enables(dev, state);
1188
1189 drm_atomic_helper_wait_for_vblanks(dev, state);
1190
1191 drm_atomic_helper_cleanup_planes(dev, state);
1192
1193 drm_atomic_state_free(state);
1194 1260
1195 return 0; 1261 return 0;
1196} 1262}
@@ -1199,12 +1265,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1199/** 1265/**
1200 * DOC: implementing nonblocking commit 1266 * DOC: implementing nonblocking commit
1201 * 1267 *
1202 * For now the atomic helpers don't support nonblocking commit directly. If 1268 * Nonblocking atomic commits have to be implemented in the following sequence:
1203 * there is real need it could be added though, using the dma-buf fence
1204 * infrastructure for generic synchronization with outstanding rendering.
1205 *
1206 * For now drivers have to implement nonblocking commit themselves, with the
1207 * following sequence being the recommended one:
1208 * 1269 *
1209 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function 1270 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
1210 * which commit needs to call which can fail, so we want to run it first and 1271 * which commit needs to call which can fail, so we want to run it first and
@@ -1216,10 +1277,14 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1216 * cancelled updates. Note that it is important to ensure that the framebuffer 1277 * cancelled updates. Note that it is important to ensure that the framebuffer
1217 * cleanup is still done when cancelling. 1278 * cleanup is still done when cancelling.
1218 * 1279 *
1219 * For sufficient parallelism it is recommended to have a work item per crtc 1280 * Asynchronous workers need to have sufficient parallelism to be able to run
1220 * (for updates which don't touch global state) and a global one. Then we only 1281 * different atomic commits on different CRTCs in parallel. The simplest way to
1221 * need to synchronize with the crtc work items for changed crtcs and the global 1282 * achive this is by running them on the &system_unbound_wq work queue. Note
1222 * work item, which allows nice concurrent updates on disjoint sets of crtcs. 1283 * that drivers are not required to split up atomic commits and run an
1284 * individual commit in parallel - userspace is supposed to do that if it cares.
1285 * But it might be beneficial to do that for modesets, since those necessarily
1286 * must be done as one global operation, and enabling or disabling a CRTC can
1287 * take a long time. But even that is not required.
1223 * 1288 *
1224 * 3. The software state is updated synchronously with 1289 * 3. The software state is updated synchronously with
1225 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset 1290 * drm_atomic_helper_swap_state(). Doing this under the protection of all modeset
@@ -1232,8 +1297,310 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1232 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and 1297 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
1233 * then cleaning up the framebuffers after the old framebuffer is no longer 1298 * then cleaning up the framebuffers after the old framebuffer is no longer
1234 * being displayed. 1299 * being displayed.
1300 *
1301 * The above scheme is implemented in the atomic helper libraries in
1302 * drm_atomic_helper_commit() using a bunch of helper functions. See
1303 * drm_atomic_helper_setup_commit() for a starting point.
1235 */ 1304 */
1236 1305
1306static int stall_checks(struct drm_crtc *crtc, bool nonblock)
1307{
1308 struct drm_crtc_commit *commit, *stall_commit = NULL;
1309 bool completed = true;
1310 int i;
1311 long ret = 0;
1312
1313 spin_lock(&crtc->commit_lock);
1314 i = 0;
1315 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1316 if (i == 0) {
1317 completed = try_wait_for_completion(&commit->flip_done);
1318 /* Userspace is not allowed to get ahead of the previous
1319 * commit with nonblocking ones. */
1320 if (!completed && nonblock) {
1321 spin_unlock(&crtc->commit_lock);
1322 return -EBUSY;
1323 }
1324 } else if (i == 1) {
1325 stall_commit = commit;
1326 drm_crtc_commit_get(stall_commit);
1327 break;
1328 }
1329
1330 i++;
1331 }
1332 spin_unlock(&crtc->commit_lock);
1333
1334 if (!stall_commit)
1335 return 0;
1336
1337 /* We don't want to let commits get ahead of cleanup work too much,
1338 * stalling on 2nd previous commit means triple-buffer won't ever stall.
1339 */
1340 ret = wait_for_completion_interruptible_timeout(&stall_commit->cleanup_done,
1341 10*HZ);
1342 if (ret == 0)
1343 DRM_ERROR("[CRTC:%d:%s] cleanup_done timed out\n",
1344 crtc->base.id, crtc->name);
1345
1346 drm_crtc_commit_put(stall_commit);
1347
1348 return ret < 0 ? ret : 0;
1349}
1350
1351/**
1352 * drm_atomic_helper_setup_commit - setup possibly nonblocking commit
1353 * @state: new modeset state to be committed
1354 * @nonblock: whether nonblocking behavior is requested.
1355 *
1356 * This function prepares @state to be used by the atomic helper's support for
1357 * nonblocking commits. Drivers using the nonblocking commit infrastructure
1358 * should always call this function from their ->atomic_commit hook.
1359 *
1360 * To be able to use this support drivers need to use a few more helper
1361 * functions. drm_atomic_helper_wait_for_dependencies() must be called before
1362 * actually committing the hardware state, and for nonblocking commits this call
1363 * must be placed in the async worker. See also drm_atomic_helper_swap_state()
1364 * and it's stall parameter, for when a driver's commit hooks look at the
1365 * ->state pointers of struct &drm_crtc, &drm_plane or &drm_connector directly.
1366 *
1367 * Completion of the hardware commit step must be signalled using
1368 * drm_atomic_helper_commit_hw_done(). After this step the driver is not allowed
1369 * to read or change any permanent software or hardware modeset state. The only
1370 * exception is state protected by other means than &drm_modeset_lock locks.
1371 * Only the free standing @state with pointers to the old state structures can
1372 * be inspected, e.g. to clean up old buffers using
1373 * drm_atomic_helper_cleanup_planes().
1374 *
1375 * At the very end, before cleaning up @state drivers must call
1376 * drm_atomic_helper_commit_cleanup_done().
1377 *
1378 * This is all implemented by in drm_atomic_helper_commit(), giving drivers a
1379 * complete and esay-to-use default implementation of the atomic_commit() hook.
1380 *
1381 * The tracking of asynchronously executed and still pending commits is done
1382 * using the core structure &drm_crtc_commit.
1383 *
1384 * By default there's no need to clean up resources allocated by this function
1385 * explicitly: drm_atomic_state_default_clear() will take care of that
1386 * automatically.
1387 *
1388 * Returns:
1389 *
1390 * 0 on success. -EBUSY when userspace schedules nonblocking commits too fast,
1391 * -ENOMEM on allocation failures and -EINTR when a signal is pending.
1392 */
1393int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
1394 bool nonblock)
1395{
1396 struct drm_crtc *crtc;
1397 struct drm_crtc_state *crtc_state;
1398 struct drm_crtc_commit *commit;
1399 int i, ret;
1400
1401 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1402 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
1403 if (!commit)
1404 return -ENOMEM;
1405
1406 init_completion(&commit->flip_done);
1407 init_completion(&commit->hw_done);
1408 init_completion(&commit->cleanup_done);
1409 INIT_LIST_HEAD(&commit->commit_entry);
1410 kref_init(&commit->ref);
1411 commit->crtc = crtc;
1412
1413 state->crtcs[i].commit = commit;
1414
1415 ret = stall_checks(crtc, nonblock);
1416 if (ret)
1417 return ret;
1418
1419 /* Drivers only send out events when at least either current or
1420 * new CRTC state is active. Complete right away if everything
1421 * stays off. */
1422 if (!crtc->state->active && !crtc_state->active) {
1423 complete_all(&commit->flip_done);
1424 continue;
1425 }
1426
1427 /* Legacy cursor updates are fully unsynced. */
1428 if (state->legacy_cursor_update) {
1429 complete_all(&commit->flip_done);
1430 continue;
1431 }
1432
1433 if (!crtc_state->event) {
1434 commit->event = kzalloc(sizeof(*commit->event),
1435 GFP_KERNEL);
1436 if (!commit->event)
1437 return -ENOMEM;
1438
1439 crtc_state->event = commit->event;
1440 }
1441
1442 crtc_state->event->base.completion = &commit->flip_done;
1443 }
1444
1445 return 0;
1446}
1447EXPORT_SYMBOL(drm_atomic_helper_setup_commit);
1448
1449
1450static struct drm_crtc_commit *preceeding_commit(struct drm_crtc *crtc)
1451{
1452 struct drm_crtc_commit *commit;
1453 int i = 0;
1454
1455 list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
1456 /* skip the first entry, that's the current commit */
1457 if (i == 1)
1458 return commit;
1459 i++;
1460 }
1461
1462 return NULL;
1463}
1464
1465/**
1466 * drm_atomic_helper_wait_for_dependencies - wait for required preceeding commits
1467 * @state: new modeset state to be committed
1468 *
1469 * This function waits for all preceeding commits that touch the same CRTC as
1470 * @state to both be committed to the hardware (as signalled by
1471 * drm_atomic_helper_commit_hw_done) and executed by the hardware (as signalled
1472 * by calling drm_crtc_vblank_send_event on the event member of
1473 * &drm_crtc_state).
1474 *
1475 * This is part of the atomic helper support for nonblocking commits, see
1476 * drm_atomic_helper_setup_commit() for an overview.
1477 */
1478void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state)
1479{
1480 struct drm_crtc *crtc;
1481 struct drm_crtc_state *crtc_state;
1482 struct drm_crtc_commit *commit;
1483 int i;
1484 long ret;
1485
1486 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1487 spin_lock(&crtc->commit_lock);
1488 commit = preceeding_commit(crtc);
1489 if (commit)
1490 drm_crtc_commit_get(commit);
1491 spin_unlock(&crtc->commit_lock);
1492
1493 if (!commit)
1494 continue;
1495
1496 ret = wait_for_completion_timeout(&commit->hw_done,
1497 10*HZ);
1498 if (ret == 0)
1499 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
1500 crtc->base.id, crtc->name);
1501
1502 /* Currently no support for overwriting flips, hence
1503 * stall for previous one to execute completely. */
1504 ret = wait_for_completion_timeout(&commit->flip_done,
1505 10*HZ);
1506 if (ret == 0)
1507 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1508 crtc->base.id, crtc->name);
1509
1510 drm_crtc_commit_put(commit);
1511 }
1512}
1513EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
1514
1515/**
1516 * drm_atomic_helper_commit_hw_done - setup possible nonblocking commit
1517 * @state: new modeset state to be committed
1518 *
1519 * This function is used to signal completion of the hardware commit step. After
1520 * this step the driver is not allowed to read or change any permanent software
1521 * or hardware modeset state. The only exception is state protected by other
1522 * means than &drm_modeset_lock locks.
1523 *
1524 * Drivers should try to postpone any expensive or delayed cleanup work after
1525 * this function is called.
1526 *
1527 * This is part of the atomic helper support for nonblocking commits, see
1528 * drm_atomic_helper_setup_commit() for an overview.
1529 */
1530void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state)
1531{
1532 struct drm_crtc *crtc;
1533 struct drm_crtc_state *crtc_state;
1534 struct drm_crtc_commit *commit;
1535 int i;
1536
1537 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1538 commit = state->crtcs[i].commit;
1539 if (!commit)
1540 continue;
1541
1542 /* backend must have consumed any event by now */
1543 WARN_ON(crtc->state->event);
1544 spin_lock(&crtc->commit_lock);
1545 complete_all(&commit->hw_done);
1546 spin_unlock(&crtc->commit_lock);
1547 }
1548}
1549EXPORT_SYMBOL(drm_atomic_helper_commit_hw_done);
1550
1551/**
1552 * drm_atomic_helper_commit_cleanup_done - signal completion of commit
1553 * @state: new modeset state to be committed
1554 *
1555 * This signals completion of the atomic update @state, including any cleanup
1556 * work. If used, it must be called right before calling
1557 * drm_atomic_state_free().
1558 *
1559 * This is part of the atomic helper support for nonblocking commits, see
1560 * drm_atomic_helper_setup_commit() for an overview.
1561 */
1562void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state)
1563{
1564 struct drm_crtc *crtc;
1565 struct drm_crtc_state *crtc_state;
1566 struct drm_crtc_commit *commit;
1567 int i;
1568 long ret;
1569
1570 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1571 commit = state->crtcs[i].commit;
1572 if (WARN_ON(!commit))
1573 continue;
1574
1575 spin_lock(&crtc->commit_lock);
1576 complete_all(&commit->cleanup_done);
1577 WARN_ON(!try_wait_for_completion(&commit->hw_done));
1578
1579 /* commit_list borrows our reference, need to remove before we
1580 * clean up our drm_atomic_state. But only after it actually
1581 * completed, otherwise subsequent commits won't stall properly. */
1582 if (try_wait_for_completion(&commit->flip_done))
1583 goto del_commit;
1584
1585 spin_unlock(&crtc->commit_lock);
1586
1587 /* We must wait for the vblank event to signal our completion
1588 * before releasing our reference, since the vblank work does
1589 * not hold a reference of its own. */
1590 ret = wait_for_completion_timeout(&commit->flip_done,
1591 10*HZ);
1592 if (ret == 0)
1593 DRM_ERROR("[CRTC:%d:%s] flip_done timed out\n",
1594 crtc->base.id, crtc->name);
1595
1596 spin_lock(&crtc->commit_lock);
1597del_commit:
1598 list_del(&commit->commit_entry);
1599 spin_unlock(&crtc->commit_lock);
1600 }
1601}
1602EXPORT_SYMBOL(drm_atomic_helper_commit_cleanup_done);
1603
1237/** 1604/**
1238 * drm_atomic_helper_prepare_planes - prepare plane resources before commit 1605 * drm_atomic_helper_prepare_planes - prepare plane resources before commit
1239 * @dev: DRM device 1606 * @dev: DRM device
@@ -1249,16 +1616,12 @@ EXPORT_SYMBOL(drm_atomic_helper_commit);
1249int drm_atomic_helper_prepare_planes(struct drm_device *dev, 1616int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1250 struct drm_atomic_state *state) 1617 struct drm_atomic_state *state)
1251{ 1618{
1252 int nplanes = dev->mode_config.num_total_plane; 1619 struct drm_plane *plane;
1253 int ret, i; 1620 struct drm_plane_state *plane_state;
1621 int ret, i, j;
1254 1622
1255 for (i = 0; i < nplanes; i++) { 1623 for_each_plane_in_state(state, plane, plane_state, i) {
1256 const struct drm_plane_helper_funcs *funcs; 1624 const struct drm_plane_helper_funcs *funcs;
1257 struct drm_plane *plane = state->planes[i];
1258 struct drm_plane_state *plane_state = state->plane_states[i];
1259
1260 if (!plane)
1261 continue;
1262 1625
1263 funcs = plane->helper_private; 1626 funcs = plane->helper_private;
1264 1627
@@ -1272,12 +1635,10 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
1272 return 0; 1635 return 0;
1273 1636
1274fail: 1637fail:
1275 for (i--; i >= 0; i--) { 1638 for_each_plane_in_state(state, plane, plane_state, j) {
1276 const struct drm_plane_helper_funcs *funcs; 1639 const struct drm_plane_helper_funcs *funcs;
1277 struct drm_plane *plane = state->planes[i];
1278 struct drm_plane_state *plane_state = state->plane_states[i];
1279 1640
1280 if (!plane) 1641 if (j >= i)
1281 continue; 1642 continue;
1282 1643
1283 funcs = plane->helper_private; 1644 funcs = plane->helper_private;
@@ -1537,8 +1898,8 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1537 1898
1538/** 1899/**
1539 * drm_atomic_helper_swap_state - store atomic state into current sw state 1900 * drm_atomic_helper_swap_state - store atomic state into current sw state
1540 * @dev: DRM device
1541 * @state: atomic state 1901 * @state: atomic state
1902 * @stall: stall for proceeding commits
1542 * 1903 *
1543 * This function stores the atomic state into the current state pointers in all 1904 * This function stores the atomic state into the current state pointers in all
1544 * driver objects. It should be called after all failing steps have been done 1905 * driver objects. It should be called after all failing steps have been done
@@ -1559,42 +1920,70 @@ EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1559 * 1920 *
1560 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3 1921 * 5. Call drm_atomic_helper_cleanup_planes() with @state, which since step 3
1561 * contains the old state. Also do any other cleanup required with that state. 1922 * contains the old state. Also do any other cleanup required with that state.
1923 *
1924 * @stall must be set when nonblocking commits for this driver directly access
1925 * the ->state pointer of &drm_plane, &drm_crtc or &drm_connector. With the
1926 * current atomic helpers this is almost always the case, since the helpers
1927 * don't pass the right state structures to the callbacks.
1562 */ 1928 */
1563void drm_atomic_helper_swap_state(struct drm_device *dev, 1929void drm_atomic_helper_swap_state(struct drm_atomic_state *state,
1564 struct drm_atomic_state *state) 1930 bool stall)
1565{ 1931{
1566 int i; 1932 int i;
1933 long ret;
1934 struct drm_connector *connector;
1935 struct drm_connector_state *conn_state;
1936 struct drm_crtc *crtc;
1937 struct drm_crtc_state *crtc_state;
1938 struct drm_plane *plane;
1939 struct drm_plane_state *plane_state;
1940 struct drm_crtc_commit *commit;
1941
1942 if (stall) {
1943 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1944 spin_lock(&crtc->commit_lock);
1945 commit = list_first_entry_or_null(&crtc->commit_list,
1946 struct drm_crtc_commit, commit_entry);
1947 if (commit)
1948 drm_crtc_commit_get(commit);
1949 spin_unlock(&crtc->commit_lock);
1950
1951 if (!commit)
1952 continue;
1567 1953
1568 for (i = 0; i < state->num_connector; i++) { 1954 ret = wait_for_completion_timeout(&commit->hw_done,
1569 struct drm_connector *connector = state->connectors[i]; 1955 10*HZ);
1570 1956 if (ret == 0)
1571 if (!connector) 1957 DRM_ERROR("[CRTC:%d:%s] hw_done timed out\n",
1572 continue; 1958 crtc->base.id, crtc->name);
1959 drm_crtc_commit_put(commit);
1960 }
1961 }
1573 1962
1963 for_each_connector_in_state(state, connector, conn_state, i) {
1574 connector->state->state = state; 1964 connector->state->state = state;
1575 swap(state->connector_states[i], connector->state); 1965 swap(state->connectors[i].state, connector->state);
1576 connector->state->state = NULL; 1966 connector->state->state = NULL;
1577 } 1967 }
1578 1968
1579 for (i = 0; i < dev->mode_config.num_crtc; i++) { 1969 for_each_crtc_in_state(state, crtc, crtc_state, i) {
1580 struct drm_crtc *crtc = state->crtcs[i];
1581
1582 if (!crtc)
1583 continue;
1584
1585 crtc->state->state = state; 1970 crtc->state->state = state;
1586 swap(state->crtc_states[i], crtc->state); 1971 swap(state->crtcs[i].state, crtc->state);
1587 crtc->state->state = NULL; 1972 crtc->state->state = NULL;
1588 }
1589 1973
1590 for (i = 0; i < dev->mode_config.num_total_plane; i++) { 1974 if (state->crtcs[i].commit) {
1591 struct drm_plane *plane = state->planes[i]; 1975 spin_lock(&crtc->commit_lock);
1976 list_add(&state->crtcs[i].commit->commit_entry,
1977 &crtc->commit_list);
1978 spin_unlock(&crtc->commit_lock);
1592 1979
1593 if (!plane) 1980 state->crtcs[i].commit->event = NULL;
1594 continue; 1981 }
1982 }
1595 1983
1984 for_each_plane_in_state(state, plane, plane_state, i) {
1596 plane->state->state = state; 1985 plane->state->state = state;
1597 swap(state->plane_states[i], plane->state); 1986 swap(state->planes[i].state, plane->state);
1598 plane->state->state = NULL; 1987 plane->state->state = NULL;
1599 } 1988 }
1600} 1989}
@@ -2409,7 +2798,7 @@ EXPORT_SYMBOL(drm_atomic_helper_page_flip);
2409 * This is the main helper function provided by the atomic helper framework for 2798 * This is the main helper function provided by the atomic helper framework for
2410 * implementing the legacy DPMS connector interface. It computes the new desired 2799 * implementing the legacy DPMS connector interface. It computes the new desired
2411 * ->active state for the corresponding CRTC (if the connector is enabled) and 2800 * ->active state for the corresponding CRTC (if the connector is enabled) and
2412 * updates it. 2801 * updates it.
2413 * 2802 *
2414 * Returns: 2803 * Returns:
2415 * Returns 0 on success, negative errno numbers on failure. 2804 * Returns 0 on success, negative errno numbers on failure.
@@ -2930,16 +3319,15 @@ EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
2930 * @red: red correction table 3319 * @red: red correction table
2931 * @green: green correction table 3320 * @green: green correction table
2932 * @blue: green correction table 3321 * @blue: green correction table
2933 * @start:
2934 * @size: size of the tables 3322 * @size: size of the tables
2935 * 3323 *
2936 * Implements support for legacy gamma correction table for drivers 3324 * Implements support for legacy gamma correction table for drivers
2937 * that support color management through the DEGAMMA_LUT/GAMMA_LUT 3325 * that support color management through the DEGAMMA_LUT/GAMMA_LUT
2938 * properties. 3326 * properties.
2939 */ 3327 */
2940void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc, 3328int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
2941 u16 *red, u16 *green, u16 *blue, 3329 u16 *red, u16 *green, u16 *blue,
2942 uint32_t start, uint32_t size) 3330 uint32_t size)
2943{ 3331{
2944 struct drm_device *dev = crtc->dev; 3332 struct drm_device *dev = crtc->dev;
2945 struct drm_mode_config *config = &dev->mode_config; 3333 struct drm_mode_config *config = &dev->mode_config;
@@ -2951,7 +3339,7 @@ void drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
2951 3339
2952 state = drm_atomic_state_alloc(crtc->dev); 3340 state = drm_atomic_state_alloc(crtc->dev);
2953 if (!state) 3341 if (!state)
2954 return; 3342 return -ENOMEM;
2955 3343
2956 blob = drm_property_create_blob(dev, 3344 blob = drm_property_create_blob(dev,
2957 sizeof(struct drm_color_lut) * size, 3345 sizeof(struct drm_color_lut) * size,
@@ -3002,7 +3390,7 @@ retry:
3002 3390
3003 drm_property_unreference_blob(blob); 3391 drm_property_unreference_blob(blob);
3004 3392
3005 return; 3393 return 0;
3006fail: 3394fail:
3007 if (ret == -EDEADLK) 3395 if (ret == -EDEADLK)
3008 goto backoff; 3396 goto backoff;
@@ -3010,7 +3398,7 @@ fail:
3010 drm_atomic_state_free(state); 3398 drm_atomic_state_free(state);
3011 drm_property_unreference_blob(blob); 3399 drm_property_unreference_blob(blob);
3012 3400
3013 return; 3401 return ret;
3014backoff: 3402backoff:
3015 drm_atomic_state_clear(state); 3403 drm_atomic_state_clear(state);
3016 drm_atomic_legacy_backoff(state); 3404 drm_atomic_legacy_backoff(state);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 50d0baa06db0..4153e8a193af 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -30,25 +30,36 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include "drm_internal.h" 32#include "drm_internal.h"
33#include "drm_legacy.h"
33 34
34/** 35/**
35 * drm_getmagic - Get unique magic of a client 36 * DOC: master and authentication
36 * @dev: DRM device to operate on
37 * @data: ioctl data containing the drm_auth object
38 * @file_priv: DRM file that performs the operation
39 * 37 *
40 * This looks up the unique magic of the passed client and returns it. If the 38 * struct &drm_master is used to track groups of clients with open
41 * client did not have a magic assigned, yet, a new one is registered. The magic 39 * primary/legacy device nodes. For every struct &drm_file which has had at
42 * is stored in the passed drm_auth object. 40 * least once successfully became the device master (either through the
41 * SET_MASTER IOCTL, or implicitly through opening the primary device node when
42 * no one else is the current master that time) there exists one &drm_master.
43 * This is noted in the is_master member of &drm_file. All other clients have
44 * just a pointer to the &drm_master they are associated with.
43 * 45 *
44 * Returns: 0 on success, negative error code on failure. 46 * In addition only one &drm_master can be the current master for a &drm_device.
47 * It can be switched through the DROP_MASTER and SET_MASTER IOCTL, or
48 * implicitly through closing/openeing the primary device node. See also
49 * drm_is_current_master().
50 *
51 * Clients can authenticate against the current master (if it matches their own)
52 * using the GETMAGIC and AUTHMAGIC IOCTLs. Together with exchanging masters,
53 * this allows controlled access to the device for an entire group of mutually
54 * trusted clients.
45 */ 55 */
56
46int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) 57int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
47{ 58{
48 struct drm_auth *auth = data; 59 struct drm_auth *auth = data;
49 int ret = 0; 60 int ret = 0;
50 61
51 mutex_lock(&dev->struct_mutex); 62 mutex_lock(&dev->master_mutex);
52 if (!file_priv->magic) { 63 if (!file_priv->magic) {
53 ret = idr_alloc(&file_priv->master->magic_map, file_priv, 64 ret = idr_alloc(&file_priv->master->magic_map, file_priv,
54 1, 0, GFP_KERNEL); 65 1, 0, GFP_KERNEL);
@@ -56,23 +67,13 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
56 file_priv->magic = ret; 67 file_priv->magic = ret;
57 } 68 }
58 auth->magic = file_priv->magic; 69 auth->magic = file_priv->magic;
59 mutex_unlock(&dev->struct_mutex); 70 mutex_unlock(&dev->master_mutex);
60 71
61 DRM_DEBUG("%u\n", auth->magic); 72 DRM_DEBUG("%u\n", auth->magic);
62 73
63 return ret < 0 ? ret : 0; 74 return ret < 0 ? ret : 0;
64} 75}
65 76
66/**
67 * drm_authmagic - Authenticate client with a magic
68 * @dev: DRM device to operate on
69 * @data: ioctl data containing the drm_auth object
70 * @file_priv: DRM file that performs the operation
71 *
72 * This looks up a DRM client by the passed magic and authenticates it.
73 *
74 * Returns: 0 on success, negative error code on failure.
75 */
76int drm_authmagic(struct drm_device *dev, void *data, 77int drm_authmagic(struct drm_device *dev, void *data,
77 struct drm_file *file_priv) 78 struct drm_file *file_priv)
78{ 79{
@@ -81,13 +82,253 @@ int drm_authmagic(struct drm_device *dev, void *data,
81 82
82 DRM_DEBUG("%u\n", auth->magic); 83 DRM_DEBUG("%u\n", auth->magic);
83 84
84 mutex_lock(&dev->struct_mutex); 85 mutex_lock(&dev->master_mutex);
85 file = idr_find(&file_priv->master->magic_map, auth->magic); 86 file = idr_find(&file_priv->master->magic_map, auth->magic);
86 if (file) { 87 if (file) {
87 file->authenticated = 1; 88 file->authenticated = 1;
88 idr_replace(&file_priv->master->magic_map, NULL, auth->magic); 89 idr_replace(&file_priv->master->magic_map, NULL, auth->magic);
89 } 90 }
90 mutex_unlock(&dev->struct_mutex); 91 mutex_unlock(&dev->master_mutex);
91 92
92 return file ? 0 : -EINVAL; 93 return file ? 0 : -EINVAL;
93} 94}
95
96static struct drm_master *drm_master_create(struct drm_device *dev)
97{
98 struct drm_master *master;
99
100 master = kzalloc(sizeof(*master), GFP_KERNEL);
101 if (!master)
102 return NULL;
103
104 kref_init(&master->refcount);
105 spin_lock_init(&master->lock.spinlock);
106 init_waitqueue_head(&master->lock.lock_queue);
107 idr_init(&master->magic_map);
108 master->dev = dev;
109
110 return master;
111}
112
113static int drm_set_master(struct drm_device *dev, struct drm_file *fpriv,
114 bool new_master)
115{
116 int ret = 0;
117
118 dev->master = drm_master_get(fpriv->master);
119 if (dev->driver->master_set) {
120 ret = dev->driver->master_set(dev, fpriv, new_master);
121 if (unlikely(ret != 0)) {
122 drm_master_put(&dev->master);
123 }
124 }
125
126 return ret;
127}
128
129static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
130{
131 struct drm_master *old_master;
132 int ret;
133
134 lockdep_assert_held_once(&dev->master_mutex);
135
136 old_master = fpriv->master;
137 fpriv->master = drm_master_create(dev);
138 if (!fpriv->master) {
139 fpriv->master = old_master;
140 return -ENOMEM;
141 }
142
143 if (dev->driver->master_create) {
144 ret = dev->driver->master_create(dev, fpriv->master);
145 if (ret)
146 goto out_err;
147 }
148 fpriv->is_master = 1;
149 fpriv->authenticated = 1;
150
151 ret = drm_set_master(dev, fpriv, true);
152 if (ret)
153 goto out_err;
154
155 if (old_master)
156 drm_master_put(&old_master);
157
158 return 0;
159
160out_err:
161 /* drop references and restore old master on failure */
162 drm_master_put(&fpriv->master);
163 fpriv->master = old_master;
164
165 return ret;
166}
167
168int drm_setmaster_ioctl(struct drm_device *dev, void *data,
169 struct drm_file *file_priv)
170{
171 int ret = 0;
172
173 mutex_lock(&dev->master_mutex);
174 if (drm_is_current_master(file_priv))
175 goto out_unlock;
176
177 if (dev->master) {
178 ret = -EINVAL;
179 goto out_unlock;
180 }
181
182 if (!file_priv->master) {
183 ret = -EINVAL;
184 goto out_unlock;
185 }
186
187 if (!file_priv->is_master) {
188 ret = drm_new_set_master(dev, file_priv);
189 goto out_unlock;
190 }
191
192 ret = drm_set_master(dev, file_priv, false);
193out_unlock:
194 mutex_unlock(&dev->master_mutex);
195 return ret;
196}
197
198static void drm_drop_master(struct drm_device *dev,
199 struct drm_file *fpriv)
200{
201 if (dev->driver->master_drop)
202 dev->driver->master_drop(dev, fpriv);
203 drm_master_put(&dev->master);
204}
205
206int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
207 struct drm_file *file_priv)
208{
209 int ret = -EINVAL;
210
211 mutex_lock(&dev->master_mutex);
212 if (!drm_is_current_master(file_priv))
213 goto out_unlock;
214
215 if (!dev->master)
216 goto out_unlock;
217
218 ret = 0;
219 drm_drop_master(dev, file_priv);
220out_unlock:
221 mutex_unlock(&dev->master_mutex);
222 return ret;
223}
224
225int drm_master_open(struct drm_file *file_priv)
226{
227 struct drm_device *dev = file_priv->minor->dev;
228 int ret = 0;
229
230 /* if there is no current master make this fd it, but do not create
231 * any master object for render clients */
232 mutex_lock(&dev->master_mutex);
233 if (!dev->master)
234 ret = drm_new_set_master(dev, file_priv);
235 else
236 file_priv->master = drm_master_get(dev->master);
237 mutex_unlock(&dev->master_mutex);
238
239 return ret;
240}
241
242void drm_master_release(struct drm_file *file_priv)
243{
244 struct drm_device *dev = file_priv->minor->dev;
245 struct drm_master *master = file_priv->master;
246
247 mutex_lock(&dev->master_mutex);
248 if (file_priv->magic)
249 idr_remove(&file_priv->master->magic_map, file_priv->magic);
250
251 if (!drm_is_current_master(file_priv))
252 goto out;
253
254 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
255 /*
256 * Since the master is disappearing, so is the
257 * possibility to lock.
258 */
259 mutex_lock(&dev->struct_mutex);
260 if (master->lock.hw_lock) {
261 if (dev->sigdata.lock == master->lock.hw_lock)
262 dev->sigdata.lock = NULL;
263 master->lock.hw_lock = NULL;
264 master->lock.file_priv = NULL;
265 wake_up_interruptible_all(&master->lock.lock_queue);
266 }
267 mutex_unlock(&dev->struct_mutex);
268 }
269
270 if (dev->master == file_priv->master)
271 drm_drop_master(dev, file_priv);
272out:
273 /* drop the master reference held by the file priv */
274 if (file_priv->master)
275 drm_master_put(&file_priv->master);
276 mutex_unlock(&dev->master_mutex);
277}
278
279/**
280 * drm_is_current_master - checks whether @priv is the current master
281 * @fpriv: DRM file private
282 *
283 * Checks whether @fpriv is current master on its device. This decides whether a
284 * client is allowed to run DRM_MASTER IOCTLs.
285 *
286 * Most of the modern IOCTL which require DRM_MASTER are for kernel modesetting
287 * - the current master is assumed to own the non-shareable display hardware.
288 */
289bool drm_is_current_master(struct drm_file *fpriv)
290{
291 return fpriv->is_master && fpriv->master == fpriv->minor->dev->master;
292}
293EXPORT_SYMBOL(drm_is_current_master);
294
295/**
296 * drm_master_get - reference a master pointer
297 * @master: struct &drm_master
298 *
299 * Increments the reference count of @master and returns a pointer to @master.
300 */
301struct drm_master *drm_master_get(struct drm_master *master)
302{
303 kref_get(&master->refcount);
304 return master;
305}
306EXPORT_SYMBOL(drm_master_get);
307
308static void drm_master_destroy(struct kref *kref)
309{
310 struct drm_master *master = container_of(kref, struct drm_master, refcount);
311 struct drm_device *dev = master->dev;
312
313 if (dev->driver->master_destroy)
314 dev->driver->master_destroy(dev, master);
315
316 drm_legacy_master_rmmaps(dev, master);
317
318 idr_destroy(&master->magic_map);
319 kfree(master->unique);
320 kfree(master);
321}
322
323/**
324 * drm_master_put - unreference and clear a master pointer
325 * @master: pointer to a pointer of struct &drm_master
326 *
327 * This decrements the &drm_master behind @master and sets it to NULL.
328 */
329void drm_master_put(struct drm_master **master)
330{
331 kref_put(&(*master)->refcount, drm_master_destroy);
332 *master = NULL;
333}
334EXPORT_SYMBOL(drm_master_put);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index b3654404abd0..255543086590 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -36,7 +36,7 @@
36 * encoder chain. 36 * encoder chain.
37 * 37 *
38 * A bridge is always attached to a single &drm_encoder at a time, but can be 38 * A bridge is always attached to a single &drm_encoder at a time, but can be
39 * either connected to it directly, or through an intermediate bridge: 39 * either connected to it directly, or through an intermediate bridge::
40 * 40 *
41 * encoder ---> bridge B ---> bridge A 41 * encoder ---> bridge B ---> bridge A
42 * 42 *
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 9b34158c0f77..c3a12cd8bd0d 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -51,7 +51,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
51 */ 51 */
52 if (!entry->map || 52 if (!entry->map ||
53 map->type != entry->map->type || 53 map->type != entry->map->type ||
54 entry->master != dev->primary->master) 54 entry->master != dev->master)
55 continue; 55 continue;
56 switch (map->type) { 56 switch (map->type) {
57 case _DRM_SHM: 57 case _DRM_SHM:
@@ -245,12 +245,12 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
245 map->offset = (unsigned long)map->handle; 245 map->offset = (unsigned long)map->handle;
246 if (map->flags & _DRM_CONTAINS_LOCK) { 246 if (map->flags & _DRM_CONTAINS_LOCK) {
247 /* Prevent a 2nd X Server from creating a 2nd lock */ 247 /* Prevent a 2nd X Server from creating a 2nd lock */
248 if (dev->primary->master->lock.hw_lock != NULL) { 248 if (dev->master->lock.hw_lock != NULL) {
249 vfree(map->handle); 249 vfree(map->handle);
250 kfree(map); 250 kfree(map);
251 return -EBUSY; 251 return -EBUSY;
252 } 252 }
253 dev->sigdata.lock = dev->primary->master->lock.hw_lock = map->handle; /* Pointer to lock */ 253 dev->sigdata.lock = dev->master->lock.hw_lock = map->handle; /* Pointer to lock */
254 } 254 }
255 break; 255 break;
256 case _DRM_AGP: { 256 case _DRM_AGP: {
@@ -356,7 +356,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
356 mutex_unlock(&dev->struct_mutex); 356 mutex_unlock(&dev->struct_mutex);
357 357
358 if (!(map->flags & _DRM_DRIVER)) 358 if (!(map->flags & _DRM_DRIVER))
359 list->master = dev->primary->master; 359 list->master = dev->master;
360 *maplist = list; 360 *maplist = list;
361 return 0; 361 return 0;
362} 362}
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 059f7c39c582..a7916e5f8864 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length)
136 mb(); 136 mb();
137 for (; addr < end; addr += size) 137 for (; addr < end; addr += size)
138 clflushopt(addr); 138 clflushopt(addr);
139 clflushopt(end - 1); /* force serialisation */
139 mb(); 140 mb();
140 return; 141 return;
141 } 142 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 0e3cc66aa8b7..f1d9f0569d7f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -39,6 +39,7 @@
39#include <drm/drm_fourcc.h> 39#include <drm/drm_fourcc.h>
40#include <drm/drm_modeset_lock.h> 40#include <drm/drm_modeset_lock.h>
41#include <drm/drm_atomic.h> 41#include <drm/drm_atomic.h>
42#include <drm/drm_auth.h>
42 43
43#include "drm_crtc_internal.h" 44#include "drm_crtc_internal.h"
44#include "drm_internal.h" 45#include "drm_internal.h"
@@ -239,37 +240,6 @@ const char *drm_get_subpixel_order_name(enum subpixel_order order)
239} 240}
240EXPORT_SYMBOL(drm_get_subpixel_order_name); 241EXPORT_SYMBOL(drm_get_subpixel_order_name);
241 242
242static char printable_char(int c)
243{
244 return isascii(c) && isprint(c) ? c : '?';
245}
246
247/**
248 * drm_get_format_name - return a string for drm fourcc format
249 * @format: format to compute name of
250 *
251 * Note that the buffer used by this function is globally shared and owned by
252 * the function itself.
253 *
254 * FIXME: This isn't really multithreading safe.
255 */
256const char *drm_get_format_name(uint32_t format)
257{
258 static char buf[32];
259
260 snprintf(buf, sizeof(buf),
261 "%c%c%c%c %s-endian (0x%08x)",
262 printable_char(format & 0xff),
263 printable_char((format >> 8) & 0xff),
264 printable_char((format >> 16) & 0xff),
265 printable_char((format >> 24) & 0x7f),
266 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
267 format);
268
269 return buf;
270}
271EXPORT_SYMBOL(drm_get_format_name);
272
273/* 243/*
274 * Internal function to assign a slot in the object idr and optionally 244 * Internal function to assign a slot in the object idr and optionally
275 * register the object into the idr. 245 * register the object into the idr.
@@ -426,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj)
426} 396}
427EXPORT_SYMBOL(drm_mode_object_reference); 397EXPORT_SYMBOL(drm_mode_object_reference);
428 398
399/**
400 * drm_crtc_force_disable - Forcibly turn off a CRTC
401 * @crtc: CRTC to turn off
402 *
403 * Returns:
404 * Zero on success, error code on failure.
405 */
406int drm_crtc_force_disable(struct drm_crtc *crtc)
407{
408 struct drm_mode_set set = {
409 .crtc = crtc,
410 };
411
412 return drm_mode_set_config_internal(&set);
413}
414EXPORT_SYMBOL(drm_crtc_force_disable);
415
416/**
417 * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs
418 * @dev: DRM device whose CRTCs to turn off
419 *
420 * Drivers may want to call this on unload to ensure that all displays are
421 * unlit and the GPU is in a consistent, low power state. Takes modeset locks.
422 *
423 * Returns:
424 * Zero on success, error code on failure.
425 */
426int drm_crtc_force_disable_all(struct drm_device *dev)
427{
428 struct drm_crtc *crtc;
429 int ret = 0;
430
431 drm_modeset_lock_all(dev);
432 drm_for_each_crtc(crtc, dev)
433 if (crtc->enabled) {
434 ret = drm_crtc_force_disable(crtc);
435 if (ret)
436 goto out;
437 }
438out:
439 drm_modeset_unlock_all(dev);
440 return ret;
441}
442EXPORT_SYMBOL(drm_crtc_force_disable_all);
443
429static void drm_framebuffer_free(struct kref *kref) 444static void drm_framebuffer_free(struct kref *kref)
430{ 445{
431 struct drm_framebuffer *fb = 446 struct drm_framebuffer *fb =
@@ -535,7 +550,7 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private);
535 * 550 *
536 * Cleanup framebuffer. This function is intended to be used from the drivers 551 * Cleanup framebuffer. This function is intended to be used from the drivers
537 * ->destroy callback. It can also be used to clean up driver private 552 * ->destroy callback. It can also be used to clean up driver private
538 * framebuffers embedded into a larger structure. 553 * framebuffers embedded into a larger structure.
539 * 554 *
540 * Note that this function does not remove the fb from active usuage - if it is 555 * Note that this function does not remove the fb from active usuage - if it is
541 * still used anywhere, hilarity can ensue since userspace could call getfb on 556 * still used anywhere, hilarity can ensue since userspace could call getfb on
@@ -574,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
574 struct drm_device *dev; 589 struct drm_device *dev;
575 struct drm_crtc *crtc; 590 struct drm_crtc *crtc;
576 struct drm_plane *plane; 591 struct drm_plane *plane;
577 struct drm_mode_set set;
578 int ret;
579 592
580 if (!fb) 593 if (!fb)
581 return; 594 return;
@@ -605,11 +618,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb)
605 drm_for_each_crtc(crtc, dev) { 618 drm_for_each_crtc(crtc, dev) {
606 if (crtc->primary->fb == fb) { 619 if (crtc->primary->fb == fb) {
607 /* should turn off the crtc */ 620 /* should turn off the crtc */
608 memset(&set, 0, sizeof(struct drm_mode_set)); 621 if (drm_crtc_force_disable(crtc))
609 set.crtc = crtc;
610 set.fb = NULL;
611 ret = drm_mode_set_config_internal(&set);
612 if (ret)
613 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); 622 DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc);
614 } 623 }
615 } 624 }
@@ -639,6 +648,31 @@ static unsigned int drm_num_crtcs(struct drm_device *dev)
639 return num; 648 return num;
640} 649}
641 650
651static int drm_crtc_register_all(struct drm_device *dev)
652{
653 struct drm_crtc *crtc;
654 int ret = 0;
655
656 drm_for_each_crtc(crtc, dev) {
657 if (crtc->funcs->late_register)
658 ret = crtc->funcs->late_register(crtc);
659 if (ret)
660 return ret;
661 }
662
663 return 0;
664}
665
666static void drm_crtc_unregister_all(struct drm_device *dev)
667{
668 struct drm_crtc *crtc;
669
670 drm_for_each_crtc(crtc, dev) {
671 if (crtc->funcs->early_unregister)
672 crtc->funcs->early_unregister(crtc);
673 }
674}
675
642/** 676/**
643 * drm_crtc_init_with_planes - Initialise a new CRTC object with 677 * drm_crtc_init_with_planes - Initialise a new CRTC object with
644 * specified primary and cursor planes. 678 * specified primary and cursor planes.
@@ -669,6 +703,9 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
669 crtc->dev = dev; 703 crtc->dev = dev;
670 crtc->funcs = funcs; 704 crtc->funcs = funcs;
671 705
706 INIT_LIST_HEAD(&crtc->commit_list);
707 spin_lock_init(&crtc->commit_lock);
708
672 drm_modeset_lock_init(&crtc->mutex); 709 drm_modeset_lock_init(&crtc->mutex);
673 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 710 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
674 if (ret) 711 if (ret)
@@ -692,7 +729,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
692 crtc->base.properties = &crtc->properties; 729 crtc->base.properties = &crtc->properties;
693 730
694 list_add_tail(&crtc->head, &config->crtc_list); 731 list_add_tail(&crtc->head, &config->crtc_list);
695 config->num_crtc++; 732 crtc->index = config->num_crtc++;
696 733
697 crtc->primary = primary; 734 crtc->primary = primary;
698 crtc->cursor = cursor; 735 crtc->cursor = cursor;
@@ -722,6 +759,11 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
722{ 759{
723 struct drm_device *dev = crtc->dev; 760 struct drm_device *dev = crtc->dev;
724 761
762 /* Note that the crtc_list is considered to be static; should we
763 * remove the drm_crtc at runtime we would have to decrement all
764 * the indices on the drm_crtc after us in the crtc_list.
765 */
766
725 kfree(crtc->gamma_store); 767 kfree(crtc->gamma_store);
726 crtc->gamma_store = NULL; 768 crtc->gamma_store = NULL;
727 769
@@ -741,29 +783,6 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
741} 783}
742EXPORT_SYMBOL(drm_crtc_cleanup); 784EXPORT_SYMBOL(drm_crtc_cleanup);
743 785
744/**
745 * drm_crtc_index - find the index of a registered CRTC
746 * @crtc: CRTC to find index for
747 *
748 * Given a registered CRTC, return the index of that CRTC within a DRM
749 * device's list of CRTCs.
750 */
751unsigned int drm_crtc_index(struct drm_crtc *crtc)
752{
753 unsigned int index = 0;
754 struct drm_crtc *tmp;
755
756 drm_for_each_crtc(tmp, crtc->dev) {
757 if (tmp == crtc)
758 return index;
759
760 index++;
761 }
762
763 BUG();
764}
765EXPORT_SYMBOL(drm_crtc_index);
766
767/* 786/*
768 * drm_mode_remove - remove and free a mode 787 * drm_mode_remove - remove and free a mode
769 * @connector: connector list to modify 788 * @connector: connector list to modify
@@ -909,11 +928,11 @@ int drm_connector_init(struct drm_device *dev,
909 connector->dev = dev; 928 connector->dev = dev;
910 connector->funcs = funcs; 929 connector->funcs = funcs;
911 930
912 connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL); 931 ret = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
913 if (connector->connector_id < 0) { 932 if (ret < 0)
914 ret = connector->connector_id;
915 goto out_put; 933 goto out_put;
916 } 934 connector->index = ret;
935 ret = 0;
917 936
918 connector->connector_type = connector_type; 937 connector->connector_type = connector_type;
919 connector->connector_type_id = 938 connector->connector_type_id =
@@ -961,7 +980,7 @@ out_put_type_id:
961 ida_remove(connector_ida, connector->connector_type_id); 980 ida_remove(connector_ida, connector->connector_type_id);
962out_put_id: 981out_put_id:
963 if (ret) 982 if (ret)
964 ida_remove(&config->connector_ida, connector->connector_id); 983 ida_remove(&config->connector_ida, connector->index);
965out_put: 984out_put:
966 if (ret) 985 if (ret)
967 drm_mode_object_unregister(dev, &connector->base); 986 drm_mode_object_unregister(dev, &connector->base);
@@ -984,6 +1003,12 @@ void drm_connector_cleanup(struct drm_connector *connector)
984 struct drm_device *dev = connector->dev; 1003 struct drm_device *dev = connector->dev;
985 struct drm_display_mode *mode, *t; 1004 struct drm_display_mode *mode, *t;
986 1005
1006 /* The connector should have been removed from userspace long before
1007 * it is finally destroyed.
1008 */
1009 if (WARN_ON(connector->registered))
1010 drm_connector_unregister(connector);
1011
987 if (connector->tile_group) { 1012 if (connector->tile_group) {
988 drm_mode_put_tile_group(dev, connector->tile_group); 1013 drm_mode_put_tile_group(dev, connector->tile_group);
989 connector->tile_group = NULL; 1014 connector->tile_group = NULL;
@@ -999,7 +1024,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
999 connector->connector_type_id); 1024 connector->connector_type_id);
1000 1025
1001 ida_remove(&dev->mode_config.connector_ida, 1026 ida_remove(&dev->mode_config.connector_ida,
1002 connector->connector_id); 1027 connector->index);
1003 1028
1004 kfree(connector->display_info.bus_formats); 1029 kfree(connector->display_info.bus_formats);
1005 drm_mode_object_unregister(dev, &connector->base); 1030 drm_mode_object_unregister(dev, &connector->base);
@@ -1030,19 +1055,34 @@ int drm_connector_register(struct drm_connector *connector)
1030{ 1055{
1031 int ret; 1056 int ret;
1032 1057
1058 if (connector->registered)
1059 return 0;
1060
1033 ret = drm_sysfs_connector_add(connector); 1061 ret = drm_sysfs_connector_add(connector);
1034 if (ret) 1062 if (ret)
1035 return ret; 1063 return ret;
1036 1064
1037 ret = drm_debugfs_connector_add(connector); 1065 ret = drm_debugfs_connector_add(connector);
1038 if (ret) { 1066 if (ret) {
1039 drm_sysfs_connector_remove(connector); 1067 goto err_sysfs;
1040 return ret; 1068 }
1069
1070 if (connector->funcs->late_register) {
1071 ret = connector->funcs->late_register(connector);
1072 if (ret)
1073 goto err_debugfs;
1041 } 1074 }
1042 1075
1043 drm_mode_object_register(connector->dev, &connector->base); 1076 drm_mode_object_register(connector->dev, &connector->base);
1044 1077
1078 connector->registered = true;
1045 return 0; 1079 return 0;
1080
1081err_debugfs:
1082 drm_debugfs_connector_remove(connector);
1083err_sysfs:
1084 drm_sysfs_connector_remove(connector);
1085 return ret;
1046} 1086}
1047EXPORT_SYMBOL(drm_connector_register); 1087EXPORT_SYMBOL(drm_connector_register);
1048 1088
@@ -1054,28 +1094,29 @@ EXPORT_SYMBOL(drm_connector_register);
1054 */ 1094 */
1055void drm_connector_unregister(struct drm_connector *connector) 1095void drm_connector_unregister(struct drm_connector *connector)
1056{ 1096{
1097 if (!connector->registered)
1098 return;
1099
1100 if (connector->funcs->early_unregister)
1101 connector->funcs->early_unregister(connector);
1102
1057 drm_sysfs_connector_remove(connector); 1103 drm_sysfs_connector_remove(connector);
1058 drm_debugfs_connector_remove(connector); 1104 drm_debugfs_connector_remove(connector);
1105
1106 connector->registered = false;
1059} 1107}
1060EXPORT_SYMBOL(drm_connector_unregister); 1108EXPORT_SYMBOL(drm_connector_unregister);
1061 1109
1062/** 1110static void drm_connector_unregister_all(struct drm_device *dev)
1063 * drm_connector_register_all - register all connectors 1111{
1064 * @dev: drm device 1112 struct drm_connector *connector;
1065 * 1113
1066 * This function registers all connectors in sysfs and other places so that 1114 /* FIXME: taking the mode config mutex ends up in a clash with sysfs */
1067 * userspace can start to access them. Drivers can call it after calling 1115 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
1068 * drm_dev_register() to complete the device registration, if they don't call 1116 drm_connector_unregister(connector);
1069 * drm_connector_register() on each connector individually. 1117}
1070 * 1118
1071 * When a device is unplugged and should be removed from userspace access, 1119static int drm_connector_register_all(struct drm_device *dev)
1072 * call drm_connector_unregister_all(), which is the inverse of this
1073 * function.
1074 *
1075 * Returns:
1076 * Zero on success, error code on failure.
1077 */
1078int drm_connector_register_all(struct drm_device *dev)
1079{ 1120{
1080 struct drm_connector *connector; 1121 struct drm_connector *connector;
1081 int ret; 1122 int ret;
@@ -1097,27 +1138,31 @@ err:
1097 drm_connector_unregister_all(dev); 1138 drm_connector_unregister_all(dev);
1098 return ret; 1139 return ret;
1099} 1140}
1100EXPORT_SYMBOL(drm_connector_register_all);
1101 1141
1102/** 1142static int drm_encoder_register_all(struct drm_device *dev)
1103 * drm_connector_unregister_all - unregister connector userspace interfaces
1104 * @dev: drm device
1105 *
1106 * This functions unregisters all connectors from sysfs and other places so
1107 * that userspace can no longer access them. Drivers should call this as the
1108 * first step tearing down the device instace, or when the underlying
1109 * physical device disappeared (e.g. USB unplug), right before calling
1110 * drm_dev_unregister().
1111 */
1112void drm_connector_unregister_all(struct drm_device *dev)
1113{ 1143{
1114 struct drm_connector *connector; 1144 struct drm_encoder *encoder;
1145 int ret = 0;
1115 1146
1116 /* FIXME: taking the mode config mutex ends up in a clash with sysfs */ 1147 drm_for_each_encoder(encoder, dev) {
1117 list_for_each_entry(connector, &dev->mode_config.connector_list, head) 1148 if (encoder->funcs->late_register)
1118 drm_connector_unregister(connector); 1149 ret = encoder->funcs->late_register(encoder);
1150 if (ret)
1151 return ret;
1152 }
1153
1154 return 0;
1155}
1156
1157static void drm_encoder_unregister_all(struct drm_device *dev)
1158{
1159 struct drm_encoder *encoder;
1160
1161 drm_for_each_encoder(encoder, dev) {
1162 if (encoder->funcs->early_unregister)
1163 encoder->funcs->early_unregister(encoder);
1164 }
1119} 1165}
1120EXPORT_SYMBOL(drm_connector_unregister_all);
1121 1166
1122/** 1167/**
1123 * drm_encoder_init - Init a preallocated encoder 1168 * drm_encoder_init - Init a preallocated encoder
@@ -1166,7 +1211,7 @@ int drm_encoder_init(struct drm_device *dev,
1166 } 1211 }
1167 1212
1168 list_add_tail(&encoder->head, &dev->mode_config.encoder_list); 1213 list_add_tail(&encoder->head, &dev->mode_config.encoder_list);
1169 dev->mode_config.num_encoder++; 1214 encoder->index = dev->mode_config.num_encoder++;
1170 1215
1171out_put: 1216out_put:
1172 if (ret) 1217 if (ret)
@@ -1180,29 +1225,6 @@ out_unlock:
1180EXPORT_SYMBOL(drm_encoder_init); 1225EXPORT_SYMBOL(drm_encoder_init);
1181 1226
1182/** 1227/**
1183 * drm_encoder_index - find the index of a registered encoder
1184 * @encoder: encoder to find index for
1185 *
1186 * Given a registered encoder, return the index of that encoder within a DRM
1187 * device's list of encoders.
1188 */
1189unsigned int drm_encoder_index(struct drm_encoder *encoder)
1190{
1191 unsigned int index = 0;
1192 struct drm_encoder *tmp;
1193
1194 drm_for_each_encoder(tmp, encoder->dev) {
1195 if (tmp == encoder)
1196 return index;
1197
1198 index++;
1199 }
1200
1201 BUG();
1202}
1203EXPORT_SYMBOL(drm_encoder_index);
1204
1205/**
1206 * drm_encoder_cleanup - cleans up an initialised encoder 1228 * drm_encoder_cleanup - cleans up an initialised encoder
1207 * @encoder: encoder to cleanup 1229 * @encoder: encoder to cleanup
1208 * 1230 *
@@ -1212,6 +1234,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1212{ 1234{
1213 struct drm_device *dev = encoder->dev; 1235 struct drm_device *dev = encoder->dev;
1214 1236
1237 /* Note that the encoder_list is considered to be static; should we
1238 * remove the drm_encoder at runtime we would have to decrement all
1239 * the indices on the drm_encoder after us in the encoder_list.
1240 */
1241
1215 drm_modeset_lock_all(dev); 1242 drm_modeset_lock_all(dev);
1216 drm_mode_object_unregister(dev, &encoder->base); 1243 drm_mode_object_unregister(dev, &encoder->base);
1217 kfree(encoder->name); 1244 kfree(encoder->name);
@@ -1300,7 +1327,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1300 plane->type = type; 1327 plane->type = type;
1301 1328
1302 list_add_tail(&plane->head, &config->plane_list); 1329 list_add_tail(&plane->head, &config->plane_list);
1303 config->num_total_plane++; 1330 plane->index = config->num_total_plane++;
1304 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1331 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1305 config->num_overlay_plane++; 1332 config->num_overlay_plane++;
1306 1333
@@ -1325,6 +1352,31 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1325} 1352}
1326EXPORT_SYMBOL(drm_universal_plane_init); 1353EXPORT_SYMBOL(drm_universal_plane_init);
1327 1354
1355static int drm_plane_register_all(struct drm_device *dev)
1356{
1357 struct drm_plane *plane;
1358 int ret = 0;
1359
1360 drm_for_each_plane(plane, dev) {
1361 if (plane->funcs->late_register)
1362 ret = plane->funcs->late_register(plane);
1363 if (ret)
1364 return ret;
1365 }
1366
1367 return 0;
1368}
1369
1370static void drm_plane_unregister_all(struct drm_device *dev)
1371{
1372 struct drm_plane *plane;
1373
1374 drm_for_each_plane(plane, dev) {
1375 if (plane->funcs->early_unregister)
1376 plane->funcs->early_unregister(plane);
1377 }
1378}
1379
1328/** 1380/**
1329 * drm_plane_init - Initialize a legacy plane 1381 * drm_plane_init - Initialize a legacy plane
1330 * @dev: DRM device 1382 * @dev: DRM device
@@ -1374,6 +1426,11 @@ void drm_plane_cleanup(struct drm_plane *plane)
1374 1426
1375 BUG_ON(list_empty(&plane->head)); 1427 BUG_ON(list_empty(&plane->head));
1376 1428
1429 /* Note that the plane_list is considered to be static; should we
1430 * remove the drm_plane at runtime we would have to decrement all
1431 * the indices on the drm_plane after us in the plane_list.
1432 */
1433
1377 list_del(&plane->head); 1434 list_del(&plane->head);
1378 dev->mode_config.num_total_plane--; 1435 dev->mode_config.num_total_plane--;
1379 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1436 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
@@ -1391,29 +1448,6 @@ void drm_plane_cleanup(struct drm_plane *plane)
1391EXPORT_SYMBOL(drm_plane_cleanup); 1448EXPORT_SYMBOL(drm_plane_cleanup);
1392 1449
1393/** 1450/**
1394 * drm_plane_index - find the index of a registered plane
1395 * @plane: plane to find index for
1396 *
1397 * Given a registered plane, return the index of that CRTC within a DRM
1398 * device's list of planes.
1399 */
1400unsigned int drm_plane_index(struct drm_plane *plane)
1401{
1402 unsigned int index = 0;
1403 struct drm_plane *tmp;
1404
1405 drm_for_each_plane(tmp, plane->dev) {
1406 if (tmp == plane)
1407 return index;
1408
1409 index++;
1410 }
1411
1412 BUG();
1413}
1414EXPORT_SYMBOL(drm_plane_index);
1415
1416/**
1417 * drm_plane_from_index - find the registered plane at an index 1451 * drm_plane_from_index - find the registered plane at an index
1418 * @dev: DRM device 1452 * @dev: DRM device
1419 * @idx: index of registered plane to find for 1453 * @idx: index of registered plane to find for
@@ -1425,13 +1459,11 @@ struct drm_plane *
1425drm_plane_from_index(struct drm_device *dev, int idx) 1459drm_plane_from_index(struct drm_device *dev, int idx)
1426{ 1460{
1427 struct drm_plane *plane; 1461 struct drm_plane *plane;
1428 unsigned int i = 0;
1429 1462
1430 drm_for_each_plane(plane, dev) { 1463 drm_for_each_plane(plane, dev)
1431 if (i == idx) 1464 if (idx == plane->index)
1432 return plane; 1465 return plane;
1433 i++; 1466
1434 }
1435 return NULL; 1467 return NULL;
1436} 1468}
1437EXPORT_SYMBOL(drm_plane_from_index); 1469EXPORT_SYMBOL(drm_plane_from_index);
@@ -1467,6 +1499,46 @@ void drm_plane_force_disable(struct drm_plane *plane)
1467} 1499}
1468EXPORT_SYMBOL(drm_plane_force_disable); 1500EXPORT_SYMBOL(drm_plane_force_disable);
1469 1501
1502int drm_modeset_register_all(struct drm_device *dev)
1503{
1504 int ret;
1505
1506 ret = drm_plane_register_all(dev);
1507 if (ret)
1508 goto err_plane;
1509
1510 ret = drm_crtc_register_all(dev);
1511 if (ret)
1512 goto err_crtc;
1513
1514 ret = drm_encoder_register_all(dev);
1515 if (ret)
1516 goto err_encoder;
1517
1518 ret = drm_connector_register_all(dev);
1519 if (ret)
1520 goto err_connector;
1521
1522 return 0;
1523
1524err_connector:
1525 drm_encoder_unregister_all(dev);
1526err_encoder:
1527 drm_crtc_unregister_all(dev);
1528err_crtc:
1529 drm_plane_unregister_all(dev);
1530err_plane:
1531 return ret;
1532}
1533
1534void drm_modeset_unregister_all(struct drm_device *dev)
1535{
1536 drm_connector_unregister_all(dev);
1537 drm_encoder_unregister_all(dev);
1538 drm_crtc_unregister_all(dev);
1539 drm_plane_unregister_all(dev);
1540}
1541
1470static int drm_mode_create_standard_properties(struct drm_device *dev) 1542static int drm_mode_create_standard_properties(struct drm_device *dev)
1471{ 1543{
1472 struct drm_property *prop; 1544 struct drm_property *prop;
@@ -2975,6 +3047,8 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2975 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); 3047 DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n");
2976 return PTR_ERR(fb); 3048 return PTR_ERR(fb);
2977 } 3049 }
3050 fb->hot_x = req->hot_x;
3051 fb->hot_y = req->hot_y;
2978 } else { 3052 } else {
2979 fb = NULL; 3053 fb = NULL;
2980 } 3054 }
@@ -3581,7 +3655,7 @@ int drm_mode_getfb(struct drm_device *dev,
3581 r->bpp = fb->bits_per_pixel; 3655 r->bpp = fb->bits_per_pixel;
3582 r->pitch = fb->pitches[0]; 3656 r->pitch = fb->pitches[0];
3583 if (fb->funcs->create_handle) { 3657 if (fb->funcs->create_handle) {
3584 if (file_priv->is_master || capable(CAP_SYS_ADMIN) || 3658 if (drm_is_current_master(file_priv) || capable(CAP_SYS_ADMIN) ||
3585 drm_is_control_client(file_priv)) { 3659 drm_is_control_client(file_priv)) {
3586 ret = fb->funcs->create_handle(fb, file_priv, 3660 ret = fb->funcs->create_handle(fb, file_priv,
3587 &r->handle); 3661 &r->handle);
@@ -3738,6 +3812,13 @@ void drm_fb_release(struct drm_file *priv)
3738 } 3812 }
3739} 3813}
3740 3814
3815static bool drm_property_type_valid(struct drm_property *property)
3816{
3817 if (property->flags & DRM_MODE_PROP_EXTENDED_TYPE)
3818 return !(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
3819 return !!(property->flags & DRM_MODE_PROP_LEGACY_TYPE);
3820}
3821
3741/** 3822/**
3742 * drm_property_create - create a new property type 3823 * drm_property_create - create a new property type
3743 * @dev: drm device 3824 * @dev: drm device
@@ -5138,6 +5219,9 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
5138int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 5219int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
5139 int gamma_size) 5220 int gamma_size)
5140{ 5221{
5222 uint16_t *r_base, *g_base, *b_base;
5223 int i;
5224
5141 crtc->gamma_size = gamma_size; 5225 crtc->gamma_size = gamma_size;
5142 5226
5143 crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3, 5227 crtc->gamma_store = kcalloc(gamma_size, sizeof(uint16_t) * 3,
@@ -5147,6 +5231,16 @@ int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
5147 return -ENOMEM; 5231 return -ENOMEM;
5148 } 5232 }
5149 5233
5234 r_base = crtc->gamma_store;
5235 g_base = r_base + gamma_size;
5236 b_base = g_base + gamma_size;
5237 for (i = 0; i < gamma_size; i++) {
5238 r_base[i] = i << 8;
5239 g_base[i] = i << 8;
5240 b_base[i] = i << 8;
5241 }
5242
5243
5150 return 0; 5244 return 0;
5151} 5245}
5152EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size); 5246EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
@@ -5214,7 +5308,7 @@ int drm_mode_gamma_set_ioctl(struct drm_device *dev,
5214 goto out; 5308 goto out;
5215 } 5309 }
5216 5310
5217 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 5311 ret = crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
5218 5312
5219out: 5313out:
5220 drm_modeset_unlock_all(dev); 5314 drm_modeset_unlock_all(dev);
@@ -5544,264 +5638,6 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
5544} 5638}
5545 5639
5546/** 5640/**
5547 * drm_fb_get_bpp_depth - get the bpp/depth values for format
5548 * @format: pixel format (DRM_FORMAT_*)
5549 * @depth: storage for the depth value
5550 * @bpp: storage for the bpp value
5551 *
5552 * This only supports RGB formats here for compat with code that doesn't use
5553 * pixel formats directly yet.
5554 */
5555void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
5556 int *bpp)
5557{
5558 switch (format) {
5559 case DRM_FORMAT_C8:
5560 case DRM_FORMAT_RGB332:
5561 case DRM_FORMAT_BGR233:
5562 *depth = 8;
5563 *bpp = 8;
5564 break;
5565 case DRM_FORMAT_XRGB1555:
5566 case DRM_FORMAT_XBGR1555:
5567 case DRM_FORMAT_RGBX5551:
5568 case DRM_FORMAT_BGRX5551:
5569 case DRM_FORMAT_ARGB1555:
5570 case DRM_FORMAT_ABGR1555:
5571 case DRM_FORMAT_RGBA5551:
5572 case DRM_FORMAT_BGRA5551:
5573 *depth = 15;
5574 *bpp = 16;
5575 break;
5576 case DRM_FORMAT_RGB565:
5577 case DRM_FORMAT_BGR565:
5578 *depth = 16;
5579 *bpp = 16;
5580 break;
5581 case DRM_FORMAT_RGB888:
5582 case DRM_FORMAT_BGR888:
5583 *depth = 24;
5584 *bpp = 24;
5585 break;
5586 case DRM_FORMAT_XRGB8888:
5587 case DRM_FORMAT_XBGR8888:
5588 case DRM_FORMAT_RGBX8888:
5589 case DRM_FORMAT_BGRX8888:
5590 *depth = 24;
5591 *bpp = 32;
5592 break;
5593 case DRM_FORMAT_XRGB2101010:
5594 case DRM_FORMAT_XBGR2101010:
5595 case DRM_FORMAT_RGBX1010102:
5596 case DRM_FORMAT_BGRX1010102:
5597 case DRM_FORMAT_ARGB2101010:
5598 case DRM_FORMAT_ABGR2101010:
5599 case DRM_FORMAT_RGBA1010102:
5600 case DRM_FORMAT_BGRA1010102:
5601 *depth = 30;
5602 *bpp = 32;
5603 break;
5604 case DRM_FORMAT_ARGB8888:
5605 case DRM_FORMAT_ABGR8888:
5606 case DRM_FORMAT_RGBA8888:
5607 case DRM_FORMAT_BGRA8888:
5608 *depth = 32;
5609 *bpp = 32;
5610 break;
5611 default:
5612 DRM_DEBUG_KMS("unsupported pixel format %s\n",
5613 drm_get_format_name(format));
5614 *depth = 0;
5615 *bpp = 0;
5616 break;
5617 }
5618}
5619EXPORT_SYMBOL(drm_fb_get_bpp_depth);
5620
5621/**
5622 * drm_format_num_planes - get the number of planes for format
5623 * @format: pixel format (DRM_FORMAT_*)
5624 *
5625 * Returns:
5626 * The number of planes used by the specified pixel format.
5627 */
5628int drm_format_num_planes(uint32_t format)
5629{
5630 switch (format) {
5631 case DRM_FORMAT_YUV410:
5632 case DRM_FORMAT_YVU410:
5633 case DRM_FORMAT_YUV411:
5634 case DRM_FORMAT_YVU411:
5635 case DRM_FORMAT_YUV420:
5636 case DRM_FORMAT_YVU420:
5637 case DRM_FORMAT_YUV422:
5638 case DRM_FORMAT_YVU422:
5639 case DRM_FORMAT_YUV444:
5640 case DRM_FORMAT_YVU444:
5641 return 3;
5642 case DRM_FORMAT_NV12:
5643 case DRM_FORMAT_NV21:
5644 case DRM_FORMAT_NV16:
5645 case DRM_FORMAT_NV61:
5646 case DRM_FORMAT_NV24:
5647 case DRM_FORMAT_NV42:
5648 return 2;
5649 default:
5650 return 1;
5651 }
5652}
5653EXPORT_SYMBOL(drm_format_num_planes);
5654
5655/**
5656 * drm_format_plane_cpp - determine the bytes per pixel value
5657 * @format: pixel format (DRM_FORMAT_*)
5658 * @plane: plane index
5659 *
5660 * Returns:
5661 * The bytes per pixel value for the specified plane.
5662 */
5663int drm_format_plane_cpp(uint32_t format, int plane)
5664{
5665 unsigned int depth;
5666 int bpp;
5667
5668 if (plane >= drm_format_num_planes(format))
5669 return 0;
5670
5671 switch (format) {
5672 case DRM_FORMAT_YUYV:
5673 case DRM_FORMAT_YVYU:
5674 case DRM_FORMAT_UYVY:
5675 case DRM_FORMAT_VYUY:
5676 return 2;
5677 case DRM_FORMAT_NV12:
5678 case DRM_FORMAT_NV21:
5679 case DRM_FORMAT_NV16:
5680 case DRM_FORMAT_NV61:
5681 case DRM_FORMAT_NV24:
5682 case DRM_FORMAT_NV42:
5683 return plane ? 2 : 1;
5684 case DRM_FORMAT_YUV410:
5685 case DRM_FORMAT_YVU410:
5686 case DRM_FORMAT_YUV411:
5687 case DRM_FORMAT_YVU411:
5688 case DRM_FORMAT_YUV420:
5689 case DRM_FORMAT_YVU420:
5690 case DRM_FORMAT_YUV422:
5691 case DRM_FORMAT_YVU422:
5692 case DRM_FORMAT_YUV444:
5693 case DRM_FORMAT_YVU444:
5694 return 1;
5695 default:
5696 drm_fb_get_bpp_depth(format, &depth, &bpp);
5697 return bpp >> 3;
5698 }
5699}
5700EXPORT_SYMBOL(drm_format_plane_cpp);
5701
5702/**
5703 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
5704 * @format: pixel format (DRM_FORMAT_*)
5705 *
5706 * Returns:
5707 * The horizontal chroma subsampling factor for the
5708 * specified pixel format.
5709 */
5710int drm_format_horz_chroma_subsampling(uint32_t format)
5711{
5712 switch (format) {
5713 case DRM_FORMAT_YUV411:
5714 case DRM_FORMAT_YVU411:
5715 case DRM_FORMAT_YUV410:
5716 case DRM_FORMAT_YVU410:
5717 return 4;
5718 case DRM_FORMAT_YUYV:
5719 case DRM_FORMAT_YVYU:
5720 case DRM_FORMAT_UYVY:
5721 case DRM_FORMAT_VYUY:
5722 case DRM_FORMAT_NV12:
5723 case DRM_FORMAT_NV21:
5724 case DRM_FORMAT_NV16:
5725 case DRM_FORMAT_NV61:
5726 case DRM_FORMAT_YUV422:
5727 case DRM_FORMAT_YVU422:
5728 case DRM_FORMAT_YUV420:
5729 case DRM_FORMAT_YVU420:
5730 return 2;
5731 default:
5732 return 1;
5733 }
5734}
5735EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
5736
5737/**
5738 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
5739 * @format: pixel format (DRM_FORMAT_*)
5740 *
5741 * Returns:
5742 * The vertical chroma subsampling factor for the
5743 * specified pixel format.
5744 */
5745int drm_format_vert_chroma_subsampling(uint32_t format)
5746{
5747 switch (format) {
5748 case DRM_FORMAT_YUV410:
5749 case DRM_FORMAT_YVU410:
5750 return 4;
5751 case DRM_FORMAT_YUV420:
5752 case DRM_FORMAT_YVU420:
5753 case DRM_FORMAT_NV12:
5754 case DRM_FORMAT_NV21:
5755 return 2;
5756 default:
5757 return 1;
5758 }
5759}
5760EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
5761
5762/**
5763 * drm_format_plane_width - width of the plane given the first plane
5764 * @width: width of the first plane
5765 * @format: pixel format
5766 * @plane: plane index
5767 *
5768 * Returns:
5769 * The width of @plane, given that the width of the first plane is @width.
5770 */
5771int drm_format_plane_width(int width, uint32_t format, int plane)
5772{
5773 if (plane >= drm_format_num_planes(format))
5774 return 0;
5775
5776 if (plane == 0)
5777 return width;
5778
5779 return width / drm_format_horz_chroma_subsampling(format);
5780}
5781EXPORT_SYMBOL(drm_format_plane_width);
5782
5783/**
5784 * drm_format_plane_height - height of the plane given the first plane
5785 * @height: height of the first plane
5786 * @format: pixel format
5787 * @plane: plane index
5788 *
5789 * Returns:
5790 * The height of @plane, given that the height of the first plane is @height.
5791 */
5792int drm_format_plane_height(int height, uint32_t format, int plane)
5793{
5794 if (plane >= drm_format_num_planes(format))
5795 return 0;
5796
5797 if (plane == 0)
5798 return height;
5799
5800 return height / drm_format_vert_chroma_subsampling(format);
5801}
5802EXPORT_SYMBOL(drm_format_plane_height);
5803
5804/**
5805 * drm_rotation_simplify() - Try to simplify the rotation 5641 * drm_rotation_simplify() - Try to simplify the rotation
5806 * @rotation: Rotation to be simplified 5642 * @rotation: Rotation to be simplified
5807 * @supported_rotations: Supported rotations 5643 * @supported_rotations: Supported rotations
@@ -6064,3 +5900,48 @@ struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
6064 return tg; 5900 return tg;
6065} 5901}
6066EXPORT_SYMBOL(drm_mode_create_tile_group); 5902EXPORT_SYMBOL(drm_mode_create_tile_group);
5903
5904/**
5905 * drm_crtc_enable_color_mgmt - enable color management properties
5906 * @crtc: DRM CRTC
5907 * @degamma_lut_size: the size of the degamma lut (before CSC)
5908 * @has_ctm: whether to attach ctm_property for CSC matrix
5909 * @gamma_lut_size: the size of the gamma lut (after CSC)
5910 *
5911 * This function lets the driver enable the color correction
5912 * properties on a CRTC. This includes 3 degamma, csc and gamma
5913 * properties that userspace can set and 2 size properties to inform
5914 * the userspace of the lut sizes. Each of the properties are
5915 * optional. The gamma and degamma properties are only attached if
5916 * their size is not 0 and ctm_property is only attached if has_ctm is
5917 * true.
5918 */
5919void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
5920 uint degamma_lut_size,
5921 bool has_ctm,
5922 uint gamma_lut_size)
5923{
5924 struct drm_device *dev = crtc->dev;
5925 struct drm_mode_config *config = &dev->mode_config;
5926
5927 if (degamma_lut_size) {
5928 drm_object_attach_property(&crtc->base,
5929 config->degamma_lut_property, 0);
5930 drm_object_attach_property(&crtc->base,
5931 config->degamma_lut_size_property,
5932 degamma_lut_size);
5933 }
5934
5935 if (has_ctm)
5936 drm_object_attach_property(&crtc->base,
5937 config->ctm_property, 0);
5938
5939 if (gamma_lut_size) {
5940 drm_object_attach_property(&crtc->base,
5941 config->gamma_lut_property, 0);
5942 drm_object_attach_property(&crtc->base,
5943 config->gamma_lut_size_property,
5944 gamma_lut_size);
5945 }
5946}
5947EXPORT_SYMBOL(drm_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 26feb2f8453f..604d3ef72ffa 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -232,6 +232,9 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev)
232 */ 232 */
233void drm_helper_disable_unused_functions(struct drm_device *dev) 233void drm_helper_disable_unused_functions(struct drm_device *dev)
234{ 234{
235 if (drm_core_check_feature(dev, DRIVER_ATOMIC))
236 DRM_ERROR("Called for atomic driver, this is not what you want.\n");
237
235 drm_modeset_lock_all(dev); 238 drm_modeset_lock_all(dev);
236 __drm_helper_disable_unused_functions(dev); 239 __drm_helper_disable_unused_functions(dev);
237 drm_modeset_unlock_all(dev); 240 drm_modeset_unlock_all(dev);
@@ -1123,36 +1126,3 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
1123 return drm_plane_helper_commit(plane, plane_state, old_fb); 1126 return drm_plane_helper_commit(plane, plane_state, old_fb);
1124} 1127}
1125EXPORT_SYMBOL(drm_helper_crtc_mode_set_base); 1128EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
1126
1127/**
1128 * drm_helper_crtc_enable_color_mgmt - enable color management properties
1129 * @crtc: DRM CRTC
1130 * @degamma_lut_size: the size of the degamma lut (before CSC)
1131 * @gamma_lut_size: the size of the gamma lut (after CSC)
1132 *
1133 * This function lets the driver enable the color correction properties on a
1134 * CRTC. This includes 3 degamma, csc and gamma properties that userspace can
1135 * set and 2 size properties to inform the userspace of the lut sizes.
1136 */
1137void drm_helper_crtc_enable_color_mgmt(struct drm_crtc *crtc,
1138 int degamma_lut_size,
1139 int gamma_lut_size)
1140{
1141 struct drm_device *dev = crtc->dev;
1142 struct drm_mode_config *config = &dev->mode_config;
1143
1144 drm_object_attach_property(&crtc->base,
1145 config->degamma_lut_property, 0);
1146 drm_object_attach_property(&crtc->base,
1147 config->ctm_property, 0);
1148 drm_object_attach_property(&crtc->base,
1149 config->gamma_lut_property, 0);
1150
1151 drm_object_attach_property(&crtc->base,
1152 config->degamma_lut_size_property,
1153 degamma_lut_size);
1154 drm_object_attach_property(&crtc->base,
1155 config->gamma_lut_size_property,
1156 gamma_lut_size);
1157}
1158EXPORT_SYMBOL(drm_helper_crtc_enable_color_mgmt);
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index a78c138282ea..47a500b90fd7 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -31,14 +31,100 @@
31 * and are not exported to drivers. 31 * and are not exported to drivers.
32 */ 32 */
33 33
34
35/* drm_crtc.c */
36void drm_connector_ida_init(void);
37void drm_connector_ida_destroy(void);
34int drm_mode_object_get(struct drm_device *dev, 38int drm_mode_object_get(struct drm_device *dev,
35 struct drm_mode_object *obj, uint32_t obj_type); 39 struct drm_mode_object *obj, uint32_t obj_type);
36void drm_mode_object_unregister(struct drm_device *dev, 40void drm_mode_object_unregister(struct drm_device *dev,
37 struct drm_mode_object *object); 41 struct drm_mode_object *object);
42bool drm_property_change_valid_get(struct drm_property *property,
43 uint64_t value,
44 struct drm_mode_object **ref);
45void drm_property_change_valid_put(struct drm_property *property,
46 struct drm_mode_object *ref);
47
48int drm_plane_check_pixel_format(const struct drm_plane *plane,
49 u32 format);
50int drm_crtc_check_viewport(const struct drm_crtc *crtc,
51 int x, int y,
52 const struct drm_display_mode *mode,
53 const struct drm_framebuffer *fb);
54
55void drm_fb_release(struct drm_file *file_priv);
56void drm_property_destroy_user_blobs(struct drm_device *dev,
57 struct drm_file *file_priv);
58
59/* dumb buffer support IOCTLs */
60int drm_mode_create_dumb_ioctl(struct drm_device *dev,
61 void *data, struct drm_file *file_priv);
62int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
63 void *data, struct drm_file *file_priv);
64int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
65 void *data, struct drm_file *file_priv);
66
67/* framebuffer IOCTLs */
68extern int drm_mode_addfb(struct drm_device *dev,
69 void *data, struct drm_file *file_priv);
70extern int drm_mode_addfb2(struct drm_device *dev,
71 void *data, struct drm_file *file_priv);
72int drm_mode_rmfb(struct drm_device *dev,
73 void *data, struct drm_file *file_priv);
74int drm_mode_getfb(struct drm_device *dev,
75 void *data, struct drm_file *file_priv);
76int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
77 void *data, struct drm_file *file_priv);
78
79/* IOCTLs */
80int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
81 struct drm_file *file_priv);
82int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
83 struct drm_file *file_priv);
84
85int drm_mode_getresources(struct drm_device *dev,
86 void *data, struct drm_file *file_priv);
87int drm_mode_getplane_res(struct drm_device *dev, void *data,
88 struct drm_file *file_priv);
89int drm_mode_getcrtc(struct drm_device *dev,
90 void *data, struct drm_file *file_priv);
91int drm_mode_getconnector(struct drm_device *dev,
92 void *data, struct drm_file *file_priv);
93int drm_mode_setcrtc(struct drm_device *dev,
94 void *data, struct drm_file *file_priv);
95int drm_mode_getplane(struct drm_device *dev,
96 void *data, struct drm_file *file_priv);
97int drm_mode_setplane(struct drm_device *dev,
98 void *data, struct drm_file *file_priv);
99int drm_mode_cursor_ioctl(struct drm_device *dev,
100 void *data, struct drm_file *file_priv);
101int drm_mode_cursor2_ioctl(struct drm_device *dev,
102 void *data, struct drm_file *file_priv);
103int drm_mode_getproperty_ioctl(struct drm_device *dev,
104 void *data, struct drm_file *file_priv);
105int drm_mode_getblob_ioctl(struct drm_device *dev,
106 void *data, struct drm_file *file_priv);
107int drm_mode_createblob_ioctl(struct drm_device *dev,
108 void *data, struct drm_file *file_priv);
109int drm_mode_destroyblob_ioctl(struct drm_device *dev,
110 void *data, struct drm_file *file_priv);
111int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
112 void *data, struct drm_file *file_priv);
113int drm_mode_getencoder(struct drm_device *dev,
114 void *data, struct drm_file *file_priv);
115int drm_mode_gamma_get_ioctl(struct drm_device *dev,
116 void *data, struct drm_file *file_priv);
117int drm_mode_gamma_set_ioctl(struct drm_device *dev,
118 void *data, struct drm_file *file_priv);
119
120int drm_mode_page_flip_ioctl(struct drm_device *dev,
121 void *data, struct drm_file *file_priv);
38 122
39/* drm_atomic.c */ 123/* drm_atomic.c */
40int drm_atomic_get_property(struct drm_mode_object *obj, 124int drm_atomic_get_property(struct drm_mode_object *obj,
41 struct drm_property *property, uint64_t *val); 125 struct drm_property *property, uint64_t *val);
42int drm_mode_atomic_ioctl(struct drm_device *dev, 126int drm_mode_atomic_ioctl(struct drm_device *dev,
43 void *data, struct drm_file *file_priv); 127 void *data, struct drm_file *file_priv);
44 128
129int drm_modeset_register_all(struct drm_device *dev);
130void drm_modeset_unregister_all(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index 3bcf8e6a85b3..fa10cef2ba37 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -46,11 +46,8 @@
46 46
47static const struct drm_info_list drm_debugfs_list[] = { 47static const struct drm_info_list drm_debugfs_list[] = {
48 {"name", drm_name_info, 0}, 48 {"name", drm_name_info, 0},
49 {"vm", drm_vm_info, 0},
50 {"clients", drm_clients_info, 0}, 49 {"clients", drm_clients_info, 0},
51 {"bufs", drm_bufs_info, 0},
52 {"gem_names", drm_gem_name_info, DRIVER_GEM}, 50 {"gem_names", drm_gem_name_info, DRIVER_GEM},
53 {"vma", drm_vma_info, 0},
54}; 51};
55#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list) 52#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
56 53
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index 3334baacf43d..734f86a345f6 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void)
355 355
356 drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev"); 356 drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev");
357 if (IS_ERR(drm_dp_aux_dev_class)) { 357 if (IS_ERR(drm_dp_aux_dev_class)) {
358 res = PTR_ERR(drm_dp_aux_dev_class); 358 return PTR_ERR(drm_dp_aux_dev_class);
359 goto out;
360 } 359 }
361 drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups; 360 drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups;
362 361
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index eeaf5a7c3aa7..8f11b8741e42 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -203,7 +203,7 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
203 203
204 ret = aux->transfer(aux, &msg); 204 ret = aux->transfer(aux, &msg);
205 205
206 if (ret > 0) { 206 if (ret >= 0) {
207 native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK; 207 native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK;
208 if (native_reply == DP_AUX_NATIVE_REPLY_ACK) { 208 if (native_reply == DP_AUX_NATIVE_REPLY_ACK) {
209 if (ret == size) 209 if (ret == size)
@@ -708,8 +708,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
708 708
709 memset(&msg, 0, sizeof(msg)); 709 memset(&msg, 0, sizeof(msg));
710 710
711 mutex_lock(&aux->hw_mutex);
712
713 for (i = 0; i < num; i++) { 711 for (i = 0; i < num; i++) {
714 msg.address = msgs[i].addr; 712 msg.address = msgs[i].addr;
715 drm_dp_i2c_msg_set_request(&msg, &msgs[i]); 713 drm_dp_i2c_msg_set_request(&msg, &msgs[i]);
@@ -764,8 +762,6 @@ static int drm_dp_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
764 msg.size = 0; 762 msg.size = 0;
765 (void)drm_dp_i2c_do_msg(aux, &msg); 763 (void)drm_dp_i2c_do_msg(aux, &msg);
766 764
767 mutex_unlock(&aux->hw_mutex);
768
769 return err; 765 return err;
770} 766}
771 767
@@ -774,22 +770,64 @@ static const struct i2c_algorithm drm_dp_i2c_algo = {
774 .master_xfer = drm_dp_i2c_xfer, 770 .master_xfer = drm_dp_i2c_xfer,
775}; 771};
776 772
773static struct drm_dp_aux *i2c_to_aux(struct i2c_adapter *i2c)
774{
775 return container_of(i2c, struct drm_dp_aux, ddc);
776}
777
778static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
779{
780 mutex_lock(&i2c_to_aux(i2c)->hw_mutex);
781}
782
783static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
784{
785 return mutex_trylock(&i2c_to_aux(i2c)->hw_mutex);
786}
787
788static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
789{
790 mutex_unlock(&i2c_to_aux(i2c)->hw_mutex);
791}
792
777/** 793/**
778 * drm_dp_aux_register() - initialise and register aux channel 794 * drm_dp_aux_init() - minimally initialise an aux channel
779 * @aux: DisplayPort AUX channel 795 * @aux: DisplayPort AUX channel
780 * 796 *
781 * Returns 0 on success or a negative error code on failure. 797 * If you need to use the drm_dp_aux's i2c adapter prior to registering it
798 * with the outside world, call drm_dp_aux_init() first. You must still
799 * call drm_dp_aux_register() once the connector has been registered to
800 * allow userspace access to the auxiliary DP channel.
782 */ 801 */
783int drm_dp_aux_register(struct drm_dp_aux *aux) 802void drm_dp_aux_init(struct drm_dp_aux *aux)
784{ 803{
785 int ret;
786
787 mutex_init(&aux->hw_mutex); 804 mutex_init(&aux->hw_mutex);
788 805
789 aux->ddc.algo = &drm_dp_i2c_algo; 806 aux->ddc.algo = &drm_dp_i2c_algo;
790 aux->ddc.algo_data = aux; 807 aux->ddc.algo_data = aux;
791 aux->ddc.retries = 3; 808 aux->ddc.retries = 3;
792 809
810 aux->ddc.lock_bus = lock_bus;
811 aux->ddc.trylock_bus = trylock_bus;
812 aux->ddc.unlock_bus = unlock_bus;
813}
814EXPORT_SYMBOL(drm_dp_aux_init);
815
816/**
817 * drm_dp_aux_register() - initialise and register aux channel
818 * @aux: DisplayPort AUX channel
819 *
820 * Automatically calls drm_dp_aux_init() if this hasn't been done yet.
821 *
822 * Returns 0 on success or a negative error code on failure.
823 */
824int drm_dp_aux_register(struct drm_dp_aux *aux)
825{
826 int ret;
827
828 if (!aux->ddc.algo)
829 drm_dp_aux_init(aux);
830
793 aux->ddc.class = I2C_CLASS_DDC; 831 aux->ddc.class = I2C_CLASS_DDC;
794 aux->ddc.owner = THIS_MODULE; 832 aux->ddc.owner = THIS_MODULE;
795 aux->ddc.dev.parent = aux->dev; 833 aux->ddc.dev.parent = aux->dev;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 6537908050d7..04e457117980 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1493,11 +1493,8 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1493 WARN_ON(!mutex_is_locked(&mgr->qlock)); 1493 WARN_ON(!mutex_is_locked(&mgr->qlock));
1494 1494
1495 /* construct a chunk from the first msg in the tx_msg queue */ 1495 /* construct a chunk from the first msg in the tx_msg queue */
1496 if (list_empty(&mgr->tx_msg_downq)) { 1496 if (list_empty(&mgr->tx_msg_downq))
1497 mgr->tx_down_in_progress = false;
1498 return; 1497 return;
1499 }
1500 mgr->tx_down_in_progress = true;
1501 1498
1502 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); 1499 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next);
1503 ret = process_single_tx_qlock(mgr, txmsg, false); 1500 ret = process_single_tx_qlock(mgr, txmsg, false);
@@ -1512,10 +1509,6 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
1512 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; 1509 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
1513 wake_up(&mgr->tx_waitq); 1510 wake_up(&mgr->tx_waitq);
1514 } 1511 }
1515 if (list_empty(&mgr->tx_msg_downq)) {
1516 mgr->tx_down_in_progress = false;
1517 return;
1518 }
1519} 1512}
1520 1513
1521/* called holding qlock */ 1514/* called holding qlock */
@@ -1538,7 +1531,7 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
1538{ 1531{
1539 mutex_lock(&mgr->qlock); 1532 mutex_lock(&mgr->qlock);
1540 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); 1533 list_add_tail(&txmsg->next, &mgr->tx_msg_downq);
1541 if (!mgr->tx_down_in_progress) 1534 if (list_is_singular(&mgr->tx_msg_downq))
1542 process_single_down_tx_qlock(mgr); 1535 process_single_down_tx_qlock(mgr);
1543 mutex_unlock(&mgr->qlock); 1536 mutex_unlock(&mgr->qlock);
1544} 1537}
@@ -2372,6 +2365,7 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2372 2365
2373/** 2366/**
2374 * drm_dp_mst_detect_port() - get connection status for an MST port 2367 * drm_dp_mst_detect_port() - get connection status for an MST port
2368 * @connector: DRM connector for this port
2375 * @mgr: manager for this port 2369 * @mgr: manager for this port
2376 * @port: unverified pointer to a port 2370 * @port: unverified pointer to a port
2377 * 2371 *
@@ -2887,7 +2881,7 @@ static void drm_dp_tx_work(struct work_struct *work)
2887 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 2881 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work);
2888 2882
2889 mutex_lock(&mgr->qlock); 2883 mutex_lock(&mgr->qlock);
2890 if (mgr->tx_down_in_progress) 2884 if (!list_empty(&mgr->tx_msg_downq))
2891 process_single_down_tx_qlock(mgr); 2885 process_single_down_tx_qlock(mgr);
2892 mutex_unlock(&mgr->qlock); 2886 mutex_unlock(&mgr->qlock);
2893} 2887}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bff89226a344..be27ed36f56e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -34,8 +34,10 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36#include <drm/drm_core.h> 36#include <drm/drm_core.h>
37#include "drm_crtc_internal.h"
37#include "drm_legacy.h" 38#include "drm_legacy.h"
38#include "drm_internal.h" 39#include "drm_internal.h"
40#include "drm_crtc_internal.h"
39 41
40/* 42/*
41 * drm_debug: Enable debug output. 43 * drm_debug: Enable debug output.
@@ -93,114 +95,6 @@ void drm_ut_debug_printk(const char *function_name, const char *format, ...)
93} 95}
94EXPORT_SYMBOL(drm_ut_debug_printk); 96EXPORT_SYMBOL(drm_ut_debug_printk);
95 97
96struct drm_master *drm_master_create(struct drm_minor *minor)
97{
98 struct drm_master *master;
99
100 master = kzalloc(sizeof(*master), GFP_KERNEL);
101 if (!master)
102 return NULL;
103
104 kref_init(&master->refcount);
105 spin_lock_init(&master->lock.spinlock);
106 init_waitqueue_head(&master->lock.lock_queue);
107 idr_init(&master->magic_map);
108 master->minor = minor;
109
110 return master;
111}
112
113struct drm_master *drm_master_get(struct drm_master *master)
114{
115 kref_get(&master->refcount);
116 return master;
117}
118EXPORT_SYMBOL(drm_master_get);
119
120static void drm_master_destroy(struct kref *kref)
121{
122 struct drm_master *master = container_of(kref, struct drm_master, refcount);
123 struct drm_device *dev = master->minor->dev;
124
125 if (dev->driver->master_destroy)
126 dev->driver->master_destroy(dev, master);
127
128 drm_legacy_master_rmmaps(dev, master);
129
130 idr_destroy(&master->magic_map);
131 kfree(master->unique);
132 kfree(master);
133}
134
135void drm_master_put(struct drm_master **master)
136{
137 kref_put(&(*master)->refcount, drm_master_destroy);
138 *master = NULL;
139}
140EXPORT_SYMBOL(drm_master_put);
141
142int drm_setmaster_ioctl(struct drm_device *dev, void *data,
143 struct drm_file *file_priv)
144{
145 int ret = 0;
146
147 mutex_lock(&dev->master_mutex);
148 if (file_priv->is_master)
149 goto out_unlock;
150
151 if (file_priv->minor->master) {
152 ret = -EINVAL;
153 goto out_unlock;
154 }
155
156 if (!file_priv->master) {
157 ret = -EINVAL;
158 goto out_unlock;
159 }
160
161 if (!file_priv->allowed_master) {
162 ret = drm_new_set_master(dev, file_priv);
163 goto out_unlock;
164 }
165
166 file_priv->minor->master = drm_master_get(file_priv->master);
167 file_priv->is_master = 1;
168 if (dev->driver->master_set) {
169 ret = dev->driver->master_set(dev, file_priv, false);
170 if (unlikely(ret != 0)) {
171 file_priv->is_master = 0;
172 drm_master_put(&file_priv->minor->master);
173 }
174 }
175
176out_unlock:
177 mutex_unlock(&dev->master_mutex);
178 return ret;
179}
180
181int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
182 struct drm_file *file_priv)
183{
184 int ret = -EINVAL;
185
186 mutex_lock(&dev->master_mutex);
187 if (!file_priv->is_master)
188 goto out_unlock;
189
190 if (!file_priv->minor->master)
191 goto out_unlock;
192
193 ret = 0;
194 if (dev->driver->master_drop)
195 dev->driver->master_drop(dev, file_priv, false);
196 drm_master_put(&file_priv->minor->master);
197 file_priv->is_master = 0;
198
199out_unlock:
200 mutex_unlock(&dev->master_mutex);
201 return ret;
202}
203
204/* 98/*
205 * DRM Minors 99 * DRM Minors
206 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each 100 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
@@ -405,10 +299,9 @@ void drm_minor_release(struct drm_minor *minor)
405 * callbacks implemented by the driver. The driver then needs to initialize all 299 * callbacks implemented by the driver. The driver then needs to initialize all
406 * the various subsystems for the drm device like memory management, vblank 300 * the various subsystems for the drm device like memory management, vblank
407 * handling, modesetting support and intial output configuration plus obviously 301 * handling, modesetting support and intial output configuration plus obviously
408 * initialize all the corresponding hardware bits. An important part of this is 302 * initialize all the corresponding hardware bits. Finally when everything is up
409 * also calling drm_dev_set_unique() to set the userspace-visible unique name of 303 * and running and ready for userspace the device instance can be published
410 * this device instance. Finally when everything is up and running and ready for 304 * using drm_dev_register().
411 * userspace the device instance can be published using drm_dev_register().
412 * 305 *
413 * There is also deprecated support for initalizing device instances using 306 * There is also deprecated support for initalizing device instances using
414 * bus-specific helpers and the ->load() callback. But due to 307 * bus-specific helpers and the ->load() callback. But due to
@@ -430,6 +323,14 @@ void drm_minor_release(struct drm_minor *minor)
430 * dev_priv field of &drm_device. 323 * dev_priv field of &drm_device.
431 */ 324 */
432 325
326static int drm_dev_set_unique(struct drm_device *dev, const char *name)
327{
328 kfree(dev->unique);
329 dev->unique = kstrdup(name, GFP_KERNEL);
330
331 return dev->unique ? 0 : -ENOMEM;
332}
333
433/** 334/**
434 * drm_put_dev - Unregister and release a DRM device 335 * drm_put_dev - Unregister and release a DRM device
435 * @dev: DRM device 336 * @dev: DRM device
@@ -461,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev);
461void drm_unplug_dev(struct drm_device *dev) 362void drm_unplug_dev(struct drm_device *dev)
462{ 363{
463 /* for a USB device */ 364 /* for a USB device */
464 drm_minor_unregister(dev, DRM_MINOR_LEGACY); 365 drm_dev_unregister(dev);
465 drm_minor_unregister(dev, DRM_MINOR_RENDER);
466 drm_minor_unregister(dev, DRM_MINOR_CONTROL);
467 366
468 mutex_lock(&drm_global_mutex); 367 mutex_lock(&drm_global_mutex);
469 368
@@ -549,11 +448,12 @@ static void drm_fs_inode_free(struct inode *inode)
549} 448}
550 449
551/** 450/**
552 * drm_dev_alloc - Allocate new DRM device 451 * drm_dev_init - Initialise new DRM device
553 * @driver: DRM driver to allocate device for 452 * @dev: DRM device
453 * @driver: DRM driver
554 * @parent: Parent device object 454 * @parent: Parent device object
555 * 455 *
556 * Allocate and initialize a new DRM device. No device registration is done. 456 * Initialize a new DRM device. No device registration is done.
557 * Call drm_dev_register() to advertice the device to user space and register it 457 * Call drm_dev_register() to advertice the device to user space and register it
558 * with other core subsystems. This should be done last in the device 458 * with other core subsystems. This should be done last in the device
559 * initialization sequence to make sure userspace can't access an inconsistent 459 * initialization sequence to make sure userspace can't access an inconsistent
@@ -564,19 +464,18 @@ static void drm_fs_inode_free(struct inode *inode)
564 * 464 *
565 * Note that for purely virtual devices @parent can be NULL. 465 * Note that for purely virtual devices @parent can be NULL.
566 * 466 *
467 * Drivers that do not want to allocate their own device struct
468 * embedding struct &drm_device can call drm_dev_alloc() instead.
469 *
567 * RETURNS: 470 * RETURNS:
568 * Pointer to new DRM device, or NULL if out of memory. 471 * 0 on success, or error code on failure.
569 */ 472 */
570struct drm_device *drm_dev_alloc(struct drm_driver *driver, 473int drm_dev_init(struct drm_device *dev,
571 struct device *parent) 474 struct drm_driver *driver,
475 struct device *parent)
572{ 476{
573 struct drm_device *dev;
574 int ret; 477 int ret;
575 478
576 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
577 if (!dev)
578 return NULL;
579
580 kref_init(&dev->ref); 479 kref_init(&dev->ref);
581 dev->dev = parent; 480 dev->dev = parent;
582 dev->driver = driver; 481 dev->driver = driver;
@@ -605,8 +504,6 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
605 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL); 504 ret = drm_minor_alloc(dev, DRM_MINOR_CONTROL);
606 if (ret) 505 if (ret)
607 goto err_minors; 506 goto err_minors;
608
609 WARN_ON(driver->suspend || driver->resume);
610 } 507 }
611 508
612 if (drm_core_check_feature(dev, DRIVER_RENDER)) { 509 if (drm_core_check_feature(dev, DRIVER_RENDER)) {
@@ -619,7 +516,8 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
619 if (ret) 516 if (ret)
620 goto err_minors; 517 goto err_minors;
621 518
622 if (drm_ht_create(&dev->map_hash, 12)) 519 ret = drm_ht_create(&dev->map_hash, 12);
520 if (ret)
623 goto err_minors; 521 goto err_minors;
624 522
625 drm_legacy_ctxbitmap_init(dev); 523 drm_legacy_ctxbitmap_init(dev);
@@ -632,13 +530,13 @@ struct drm_device *drm_dev_alloc(struct drm_driver *driver,
632 } 530 }
633 } 531 }
634 532
635 if (parent) { 533 /* Use the parent device name as DRM device unique identifier, but fall
636 ret = drm_dev_set_unique(dev, dev_name(parent)); 534 * back to the driver name for virtual devices like vgem. */
637 if (ret) 535 ret = drm_dev_set_unique(dev, parent ? dev_name(parent) : driver->name);
638 goto err_setunique; 536 if (ret)
639 } 537 goto err_setunique;
640 538
641 return dev; 539 return 0;
642 540
643err_setunique: 541err_setunique:
644 if (drm_core_check_feature(dev, DRIVER_GEM)) 542 if (drm_core_check_feature(dev, DRIVER_GEM))
@@ -653,8 +551,49 @@ err_minors:
653 drm_fs_inode_free(dev->anon_inode); 551 drm_fs_inode_free(dev->anon_inode);
654err_free: 552err_free:
655 mutex_destroy(&dev->master_mutex); 553 mutex_destroy(&dev->master_mutex);
656 kfree(dev); 554 return ret;
657 return NULL; 555}
556EXPORT_SYMBOL(drm_dev_init);
557
558/**
559 * drm_dev_alloc - Allocate new DRM device
560 * @driver: DRM driver to allocate device for
561 * @parent: Parent device object
562 *
563 * Allocate and initialize a new DRM device. No device registration is done.
564 * Call drm_dev_register() to advertice the device to user space and register it
565 * with other core subsystems. This should be done last in the device
566 * initialization sequence to make sure userspace can't access an inconsistent
567 * state.
568 *
569 * The initial ref-count of the object is 1. Use drm_dev_ref() and
570 * drm_dev_unref() to take and drop further ref-counts.
571 *
572 * Note that for purely virtual devices @parent can be NULL.
573 *
574 * Drivers that wish to subclass or embed struct &drm_device into their
575 * own struct should look at using drm_dev_init() instead.
576 *
577 * RETURNS:
578 * Pointer to new DRM device, or NULL if out of memory.
579 */
580struct drm_device *drm_dev_alloc(struct drm_driver *driver,
581 struct device *parent)
582{
583 struct drm_device *dev;
584 int ret;
585
586 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
587 if (!dev)
588 return NULL;
589
590 ret = drm_dev_init(dev, driver, parent);
591 if (ret) {
592 kfree(dev);
593 return NULL;
594 }
595
596 return dev;
658} 597}
659EXPORT_SYMBOL(drm_dev_alloc); 598EXPORT_SYMBOL(drm_dev_alloc);
660 599
@@ -718,11 +657,7 @@ EXPORT_SYMBOL(drm_dev_unref);
718 * 657 *
719 * Register the DRM device @dev with the system, advertise device to user-space 658 * Register the DRM device @dev with the system, advertise device to user-space
720 * and start normal device operation. @dev must be allocated via drm_dev_alloc() 659 * and start normal device operation. @dev must be allocated via drm_dev_alloc()
721 * previously. Right after drm_dev_register() the driver should call 660 * previously.
722 * drm_connector_register_all() to register all connectors in sysfs. This is
723 * a separate call for backward compatibility with drivers still using
724 * the deprecated ->load() callback, where connectors are registered from within
725 * the ->load() callback.
726 * 661 *
727 * Never call this twice on any device! 662 * Never call this twice on any device!
728 * 663 *
@@ -759,6 +694,9 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
759 goto err_minors; 694 goto err_minors;
760 } 695 }
761 696
697 if (drm_core_check_feature(dev, DRIVER_MODESET))
698 drm_modeset_register_all(dev);
699
762 ret = 0; 700 ret = 0;
763 goto out_unlock; 701 goto out_unlock;
764 702
@@ -789,6 +727,9 @@ void drm_dev_unregister(struct drm_device *dev)
789 727
790 drm_lastclose(dev); 728 drm_lastclose(dev);
791 729
730 if (drm_core_check_feature(dev, DRIVER_MODESET))
731 drm_modeset_unregister_all(dev);
732
792 if (dev->driver->unload) 733 if (dev->driver->unload)
793 dev->driver->unload(dev); 734 dev->driver->unload(dev);
794 735
@@ -806,26 +747,6 @@ void drm_dev_unregister(struct drm_device *dev)
806} 747}
807EXPORT_SYMBOL(drm_dev_unregister); 748EXPORT_SYMBOL(drm_dev_unregister);
808 749
809/**
810 * drm_dev_set_unique - Set the unique name of a DRM device
811 * @dev: device of which to set the unique name
812 * @name: unique name
813 *
814 * Sets the unique name of a DRM device using the specified string. Drivers
815 * can use this at driver probe time if the unique name of the devices they
816 * drive is static.
817 *
818 * Return: 0 on success or a negative error code on failure.
819 */
820int drm_dev_set_unique(struct drm_device *dev, const char *name)
821{
822 kfree(dev->unique);
823 dev->unique = kstrdup(name, GFP_KERNEL);
824
825 return dev->unique ? 0 : -ENOMEM;
826}
827EXPORT_SYMBOL(drm_dev_set_unique);
828
829/* 750/*
830 * DRM Core 751 * DRM Core
831 * The DRM core module initializes all global DRM objects and makes them 752 * The DRM core module initializes all global DRM objects and makes them
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 9a401aed98e0..622f788bff46 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -271,7 +271,7 @@ int drm_load_edid_firmware(struct drm_connector *connector)
271 * by commas, search through the list looking for one that 271 * by commas, search through the list looking for one that
272 * matches the connector. 272 * matches the connector.
273 * 273 *
274 * If there's one or more that don't't specify a connector, keep 274 * If there's one or more that doesn't specify a connector, keep
275 * the last one found one as a fallback. 275 * the last one found one as a fallback.
276 */ 276 */
277 fwstr = kstrdup(edid_firmware, GFP_KERNEL); 277 fwstr = kstrdup(edid_firmware, GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 5075fae3c4e2..1fd6eac1400c 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -23,6 +23,7 @@
23#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h> 24#include <drm/drm_gem_cma_helper.h>
25#include <drm/drm_fb_cma_helper.h> 25#include <drm/drm_fb_cma_helper.h>
26#include <linux/dma-mapping.h>
26#include <linux/module.h> 27#include <linux/module.h>
27 28
28#define DEFAULT_FBDEFIO_DELAY_MS 50 29#define DEFAULT_FBDEFIO_DELAY_MS 50
@@ -52,7 +53,7 @@ struct drm_fbdev_cma {
52 * will be set up automatically. dirty() is called by 53 * will be set up automatically. dirty() is called by
53 * drm_fb_helper_deferred_io() in process context (struct delayed_work). 54 * drm_fb_helper_deferred_io() in process context (struct delayed_work).
54 * 55 *
55 * Example fbdev deferred io code: 56 * Example fbdev deferred io code::
56 * 57 *
57 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb, 58 * static int driver_fbdev_fb_dirty(struct drm_framebuffer *fb,
58 * struct drm_file *file_priv, 59 * struct drm_file *file_priv,
@@ -162,6 +163,10 @@ static struct drm_fb_cma *drm_fb_cma_alloc(struct drm_device *dev,
162 * drm_fb_cma_create_with_funcs() - helper function for the 163 * drm_fb_cma_create_with_funcs() - helper function for the
163 * &drm_mode_config_funcs ->fb_create 164 * &drm_mode_config_funcs ->fb_create
164 * callback function 165 * callback function
166 * @dev: DRM device
167 * @file_priv: drm file for the ioctl call
168 * @mode_cmd: metadata from the userspace fb creation request
169 * @funcs: vtable to be used for the new framebuffer object
165 * 170 *
166 * This can be used to set &drm_framebuffer_funcs for drivers that need the 171 * This can be used to set &drm_framebuffer_funcs for drivers that need the
167 * dirty() callback. Use drm_fb_cma_create() if you don't need to change 172 * dirty() callback. Use drm_fb_cma_create() if you don't need to change
@@ -223,6 +228,9 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create_with_funcs);
223 228
224/** 229/**
225 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function 230 * drm_fb_cma_create() - &drm_mode_config_funcs ->fb_create callback function
231 * @dev: DRM device
232 * @file_priv: drm file for the ioctl call
233 * @mode_cmd: metadata from the userspace fb creation request
226 * 234 *
227 * If your hardware has special alignment or pitch requirements these should be 235 * If your hardware has special alignment or pitch requirements these should be
228 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if 236 * checked before calling this function. Use drm_fb_cma_create_with_funcs() if
@@ -246,7 +254,7 @@ EXPORT_SYMBOL_GPL(drm_fb_cma_create);
246 * This function will usually be called from the CRTC callback functions. 254 * This function will usually be called from the CRTC callback functions.
247 */ 255 */
248struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb, 256struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
249 unsigned int plane) 257 unsigned int plane)
250{ 258{
251 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 259 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
252 260
@@ -258,10 +266,6 @@ struct drm_gem_cma_object *drm_fb_cma_get_gem_obj(struct drm_framebuffer *fb,
258EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj); 266EXPORT_SYMBOL_GPL(drm_fb_cma_get_gem_obj);
259 267
260#ifdef CONFIG_DEBUG_FS 268#ifdef CONFIG_DEBUG_FS
261/*
262 * drm_fb_cma_describe() - Helper to dump information about a single
263 * CMA framebuffer object
264 */
265static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m) 269static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
266{ 270{
267 struct drm_fb_cma *fb_cma = to_fb_cma(fb); 271 struct drm_fb_cma *fb_cma = to_fb_cma(fb);
@@ -279,7 +283,9 @@ static void drm_fb_cma_describe(struct drm_framebuffer *fb, struct seq_file *m)
279 283
280/** 284/**
281 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects 285 * drm_fb_cma_debugfs_show() - Helper to list CMA framebuffer objects
282 * in debugfs. 286 * in debugfs.
287 * @m: output file
288 * @arg: private data for the callback
283 */ 289 */
284int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg) 290int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
285{ 291{
@@ -297,6 +303,12 @@ int drm_fb_cma_debugfs_show(struct seq_file *m, void *arg)
297EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show); 303EXPORT_SYMBOL_GPL(drm_fb_cma_debugfs_show);
298#endif 304#endif
299 305
306static int drm_fb_cma_mmap(struct fb_info *info, struct vm_area_struct *vma)
307{
308 return dma_mmap_writecombine(info->device, vma, info->screen_base,
309 info->fix.smem_start, info->fix.smem_len);
310}
311
300static struct fb_ops drm_fbdev_cma_ops = { 312static struct fb_ops drm_fbdev_cma_ops = {
301 .owner = THIS_MODULE, 313 .owner = THIS_MODULE,
302 .fb_fillrect = drm_fb_helper_sys_fillrect, 314 .fb_fillrect = drm_fb_helper_sys_fillrect,
@@ -307,6 +319,7 @@ static struct fb_ops drm_fbdev_cma_ops = {
307 .fb_blank = drm_fb_helper_blank, 319 .fb_blank = drm_fb_helper_blank,
308 .fb_pan_display = drm_fb_helper_pan_display, 320 .fb_pan_display = drm_fb_helper_pan_display,
309 .fb_setcmap = drm_fb_helper_setcmap, 321 .fb_setcmap = drm_fb_helper_setcmap,
322 .fb_mmap = drm_fb_cma_mmap,
310}; 323};
311 324
312static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info, 325static int drm_fbdev_cma_deferred_io_mmap(struct fb_info *info,
@@ -333,6 +346,7 @@ static int drm_fbdev_cma_defio_init(struct fb_info *fbi,
333 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); 346 fbops = kzalloc(sizeof(*fbops), GFP_KERNEL);
334 if (!fbdefio || !fbops) { 347 if (!fbdefio || !fbops) {
335 kfree(fbdefio); 348 kfree(fbdefio);
349 kfree(fbops);
336 return -ENOMEM; 350 return -ENOMEM;
337 } 351 }
338 352
@@ -582,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
582 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); 596 drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper);
583} 597}
584EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); 598EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
599
600/**
601 * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
602 * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
603 * @state: desired state, zero to resume, non-zero to suspend
604 *
605 * Calls drm_fb_helper_set_suspend, which is a wrapper around
606 * fb_set_suspend implemented by fbdev core.
607 */
608void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state)
609{
610 if (fbdev_cma)
611 drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
612}
613EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 7c2eb75db60f..ce54e985d91b 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -227,7 +227,7 @@ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
227 g_base = r_base + crtc->gamma_size; 227 g_base = r_base + crtc->gamma_size;
228 b_base = g_base + crtc->gamma_size; 228 b_base = g_base + crtc->gamma_size;
229 229
230 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size); 230 crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, crtc->gamma_size);
231} 231}
232 232
233/** 233/**
@@ -385,7 +385,7 @@ static int restore_fbdev_mode(struct drm_fb_helper *fb_helper)
385 385
386 drm_warn_on_modeset_not_all_locked(dev); 386 drm_warn_on_modeset_not_all_locked(dev);
387 387
388 if (fb_helper->atomic) 388 if (dev->mode_config.funcs->atomic_commit)
389 return restore_fbdev_mode_atomic(fb_helper); 389 return restore_fbdev_mode_atomic(fb_helper);
390 390
391 drm_for_each_plane(plane, dev) { 391 drm_for_each_plane(plane, dev) {
@@ -464,7 +464,7 @@ static bool drm_fb_helper_is_bound(struct drm_fb_helper *fb_helper)
464 464
465 /* Sometimes user space wants everything disabled, so don't steal the 465 /* Sometimes user space wants everything disabled, so don't steal the
466 * display if there's a master. */ 466 * display if there's a master. */
467 if (dev->primary->master) 467 if (lockless_dereference(dev->master))
468 return false; 468 return false;
469 469
470 drm_for_each_crtc(crtc, dev) { 470 drm_for_each_crtc(crtc, dev) {
@@ -716,8 +716,6 @@ int drm_fb_helper_init(struct drm_device *dev,
716 i++; 716 i++;
717 } 717 }
718 718
719 fb_helper->atomic = !!drm_core_check_feature(dev, DRIVER_ATOMIC);
720
721 return 0; 719 return 0;
722out_free: 720out_free:
723 drm_fb_helper_crtc_free(fb_helper); 721 drm_fb_helper_crtc_free(fb_helper);
@@ -1042,7 +1040,6 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1042{ 1040{
1043 struct drm_fb_helper *fb_helper = info->par; 1041 struct drm_fb_helper *fb_helper = info->par;
1044 struct drm_framebuffer *fb = fb_helper->fb; 1042 struct drm_framebuffer *fb = fb_helper->fb;
1045 int pindex;
1046 1043
1047 if (info->fix.visual == FB_VISUAL_TRUECOLOR) { 1044 if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
1048 u32 *palette; 1045 u32 *palette;
@@ -1074,38 +1071,10 @@ static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
1074 !fb_helper->funcs->gamma_get)) 1071 !fb_helper->funcs->gamma_get))
1075 return -EINVAL; 1072 return -EINVAL;
1076 1073
1077 pindex = regno; 1074 WARN_ON(fb->bits_per_pixel != 8);
1078
1079 if (fb->bits_per_pixel == 16) {
1080 pindex = regno << 3;
1081
1082 if (fb->depth == 16 && regno > 63)
1083 return -EINVAL;
1084 if (fb->depth == 15 && regno > 31)
1085 return -EINVAL;
1086
1087 if (fb->depth == 16) {
1088 u16 r, g, b;
1089 int i;
1090 if (regno < 32) {
1091 for (i = 0; i < 8; i++)
1092 fb_helper->funcs->gamma_set(crtc, red,
1093 green, blue, pindex + i);
1094 }
1095 1075
1096 fb_helper->funcs->gamma_get(crtc, &r, 1076 fb_helper->funcs->gamma_set(crtc, red, green, blue, regno);
1097 &g, &b,
1098 pindex >> 1);
1099 1077
1100 for (i = 0; i < 4; i++)
1101 fb_helper->funcs->gamma_set(crtc, r,
1102 green, b,
1103 (pindex >> 1) + i);
1104 }
1105 }
1106
1107 if (fb->depth != 16)
1108 fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
1109 return 0; 1078 return 0;
1110} 1079}
1111 1080
@@ -1373,7 +1342,7 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
1373 return -EBUSY; 1342 return -EBUSY;
1374 } 1343 }
1375 1344
1376 if (fb_helper->atomic) { 1345 if (dev->mode_config.funcs->atomic_commit) {
1377 ret = pan_display_atomic(var, info); 1346 ret = pan_display_atomic(var, info);
1378 goto unlock; 1347 goto unlock;
1379 } 1348 }
@@ -2000,7 +1969,18 @@ static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
2000 my_score++; 1969 my_score++;
2001 1970
2002 connector_funcs = connector->helper_private; 1971 connector_funcs = connector->helper_private;
2003 encoder = connector_funcs->best_encoder(connector); 1972
1973 /*
1974 * If the DRM device implements atomic hooks and ->best_encoder() is
1975 * NULL we fallback to the default drm_atomic_helper_best_encoder()
1976 * helper.
1977 */
1978 if (fb_helper->dev->mode_config.funcs->atomic_commit &&
1979 !connector_funcs->best_encoder)
1980 encoder = drm_atomic_helper_best_encoder(connector);
1981 else
1982 encoder = connector_funcs->best_encoder(connector);
1983
2004 if (!encoder) 1984 if (!encoder)
2005 goto out; 1985 goto out;
2006 1986
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 7af7f8bcb355..323c238fcac7 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -40,6 +40,7 @@
40#include <linux/module.h> 40#include <linux/module.h>
41#include "drm_legacy.h" 41#include "drm_legacy.h"
42#include "drm_internal.h" 42#include "drm_internal.h"
43#include "drm_crtc_internal.h"
43 44
44/* from BKL pushdown */ 45/* from BKL pushdown */
45DEFINE_MUTEX(drm_global_mutex); 46DEFINE_MUTEX(drm_global_mutex);
@@ -67,7 +68,7 @@ DEFINE_MUTEX(drm_global_mutex);
67 * specific implementations. For GEM-based drivers this is drm_gem_mmap(). 68 * specific implementations. For GEM-based drivers this is drm_gem_mmap().
68 * 69 *
69 * No other file operations are supported by the DRM userspace API. Overall the 70 * No other file operations are supported by the DRM userspace API. Overall the
70 * following is an example #file_operations structure: 71 * following is an example #file_operations structure::
71 * 72 *
72 * static const example_drm_fops = { 73 * static const example_drm_fops = {
73 * .owner = THIS_MODULE, 74 * .owner = THIS_MODULE,
@@ -168,60 +169,6 @@ static int drm_cpu_valid(void)
168} 169}
169 170
170/* 171/*
171 * drm_new_set_master - Allocate a new master object and become master for the
172 * associated master realm.
173 *
174 * @dev: The associated device.
175 * @fpriv: File private identifying the client.
176 *
177 * This function must be called with dev::struct_mutex held.
178 * Returns negative error code on failure. Zero on success.
179 */
180int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
181{
182 struct drm_master *old_master;
183 int ret;
184
185 lockdep_assert_held_once(&dev->master_mutex);
186
187 /* create a new master */
188 fpriv->minor->master = drm_master_create(fpriv->minor);
189 if (!fpriv->minor->master)
190 return -ENOMEM;
191
192 /* take another reference for the copy in the local file priv */
193 old_master = fpriv->master;
194 fpriv->master = drm_master_get(fpriv->minor->master);
195
196 if (dev->driver->master_create) {
197 ret = dev->driver->master_create(dev, fpriv->master);
198 if (ret)
199 goto out_err;
200 }
201 if (dev->driver->master_set) {
202 ret = dev->driver->master_set(dev, fpriv, true);
203 if (ret)
204 goto out_err;
205 }
206
207 fpriv->is_master = 1;
208 fpriv->allowed_master = 1;
209 fpriv->authenticated = 1;
210 if (old_master)
211 drm_master_put(&old_master);
212
213 return 0;
214
215out_err:
216 /* drop both references and restore old master on failure */
217 drm_master_put(&fpriv->minor->master);
218 drm_master_put(&fpriv->master);
219 fpriv->master = old_master;
220
221 return ret;
222}
223
224/*
225 * Called whenever a process opens /dev/drm. 172 * Called whenever a process opens /dev/drm.
226 * 173 *
227 * \param filp file pointer. 174 * \param filp file pointer.
@@ -283,19 +230,11 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
283 goto out_prime_destroy; 230 goto out_prime_destroy;
284 } 231 }
285 232
286 /* if there is no current master make this fd it, but do not create 233 if (drm_is_primary_client(priv)) {
287 * any master object for render clients */ 234 ret = drm_master_open(priv);
288 mutex_lock(&dev->master_mutex);
289 if (drm_is_primary_client(priv) && !priv->minor->master) {
290 /* create a new master */
291 ret = drm_new_set_master(dev, priv);
292 if (ret) 235 if (ret)
293 goto out_close; 236 goto out_close;
294 } else if (drm_is_primary_client(priv)) {
295 /* get a reference to the master */
296 priv->master = drm_master_get(priv->minor->master);
297 } 237 }
298 mutex_unlock(&dev->master_mutex);
299 238
300 mutex_lock(&dev->filelist_mutex); 239 mutex_lock(&dev->filelist_mutex);
301 list_add(&priv->lhead, &dev->filelist); 240 list_add(&priv->lhead, &dev->filelist);
@@ -324,7 +263,6 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
324 return 0; 263 return 0;
325 264
326out_close: 265out_close:
327 mutex_unlock(&dev->master_mutex);
328 if (dev->driver->postclose) 266 if (dev->driver->postclose)
329 dev->driver->postclose(dev, priv); 267 dev->driver->postclose(dev, priv);
330out_prime_destroy: 268out_prime_destroy:
@@ -338,18 +276,6 @@ out_prime_destroy:
338 return ret; 276 return ret;
339} 277}
340 278
341static void drm_master_release(struct drm_device *dev, struct file *filp)
342{
343 struct drm_file *file_priv = filp->private_data;
344
345 if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
346 DRM_DEBUG("File %p released, freeing lock for context %d\n",
347 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
348 drm_legacy_lock_free(&file_priv->master->lock,
349 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
350 }
351}
352
353static void drm_events_release(struct drm_file *file_priv) 279static void drm_events_release(struct drm_file *file_priv)
354{ 280{
355 struct drm_device *dev = file_priv->minor->dev; 281 struct drm_device *dev = file_priv->minor->dev;
@@ -368,7 +294,7 @@ static void drm_events_release(struct drm_file *file_priv)
368 /* Remove unconsumed events */ 294 /* Remove unconsumed events */
369 list_for_each_entry_safe(e, et, &file_priv->event_list, link) { 295 list_for_each_entry_safe(e, et, &file_priv->event_list, link) {
370 list_del(&e->link); 296 list_del(&e->link);
371 e->destroy(e); 297 kfree(e);
372 } 298 }
373 299
374 spin_unlock_irqrestore(&dev->event_lock, flags); 300 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -451,11 +377,6 @@ int drm_release(struct inode *inode, struct file *filp)
451 list_del(&file_priv->lhead); 377 list_del(&file_priv->lhead);
452 mutex_unlock(&dev->filelist_mutex); 378 mutex_unlock(&dev->filelist_mutex);
453 379
454 mutex_lock(&dev->struct_mutex);
455 if (file_priv->magic)
456 idr_remove(&file_priv->master->magic_map, file_priv->magic);
457 mutex_unlock(&dev->struct_mutex);
458
459 if (dev->driver->preclose) 380 if (dev->driver->preclose)
460 dev->driver->preclose(dev, file_priv); 381 dev->driver->preclose(dev, file_priv);
461 382
@@ -468,9 +389,8 @@ int drm_release(struct inode *inode, struct file *filp)
468 (long)old_encode_dev(file_priv->minor->kdev->devt), 389 (long)old_encode_dev(file_priv->minor->kdev->devt),
469 dev->open_count); 390 dev->open_count);
470 391
471 /* if the master has gone away we can't do anything with the lock */ 392 if (!drm_core_check_feature(dev, DRIVER_MODESET))
472 if (file_priv->minor->master) 393 drm_legacy_lock_release(dev, filp);
473 drm_master_release(dev, filp);
474 394
475 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) 395 if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
476 drm_legacy_reclaim_buffers(dev, file_priv); 396 drm_legacy_reclaim_buffers(dev, file_priv);
@@ -487,43 +407,12 @@ int drm_release(struct inode *inode, struct file *filp)
487 407
488 drm_legacy_ctxbitmap_flush(dev, file_priv); 408 drm_legacy_ctxbitmap_flush(dev, file_priv);
489 409
490 mutex_lock(&dev->master_mutex); 410 if (drm_is_primary_client(file_priv))
491 411 drm_master_release(file_priv);
492 if (file_priv->is_master) {
493 struct drm_master *master = file_priv->master;
494
495 /*
496 * Since the master is disappearing, so is the
497 * possibility to lock.
498 */
499 mutex_lock(&dev->struct_mutex);
500 if (master->lock.hw_lock) {
501 if (dev->sigdata.lock == master->lock.hw_lock)
502 dev->sigdata.lock = NULL;
503 master->lock.hw_lock = NULL;
504 master->lock.file_priv = NULL;
505 wake_up_interruptible_all(&master->lock.lock_queue);
506 }
507 mutex_unlock(&dev->struct_mutex);
508
509 if (file_priv->minor->master == file_priv->master) {
510 /* drop the reference held my the minor */
511 if (dev->driver->master_drop)
512 dev->driver->master_drop(dev, file_priv, true);
513 drm_master_put(&file_priv->minor->master);
514 }
515 }
516
517 /* drop the master reference held by the file priv */
518 if (file_priv->master)
519 drm_master_put(&file_priv->master);
520 file_priv->is_master = 0;
521 mutex_unlock(&dev->master_mutex);
522 412
523 if (dev->driver->postclose) 413 if (dev->driver->postclose)
524 dev->driver->postclose(dev, file_priv); 414 dev->driver->postclose(dev, file_priv);
525 415
526
527 if (drm_core_check_feature(dev, DRIVER_PRIME)) 416 if (drm_core_check_feature(dev, DRIVER_PRIME))
528 drm_prime_destroy_file_private(&file_priv->prime); 417 drm_prime_destroy_file_private(&file_priv->prime);
529 418
@@ -636,7 +525,7 @@ put_back_event:
636 } 525 }
637 526
638 ret += length; 527 ret += length;
639 e->destroy(e); 528 kfree(e);
640 } 529 }
641 } 530 }
642 mutex_unlock(&file_priv->event_read_lock); 531 mutex_unlock(&file_priv->event_read_lock);
@@ -713,9 +602,6 @@ int drm_event_reserve_init_locked(struct drm_device *dev,
713 list_add(&p->pending_link, &file_priv->pending_event_list); 602 list_add(&p->pending_link, &file_priv->pending_event_list);
714 p->file_priv = file_priv; 603 p->file_priv = file_priv;
715 604
716 /* we *could* pass this in as arg, but everyone uses kfree: */
717 p->destroy = (void (*) (struct drm_pending_event *)) kfree;
718
719 return 0; 605 return 0;
720} 606}
721EXPORT_SYMBOL(drm_event_reserve_init_locked); 607EXPORT_SYMBOL(drm_event_reserve_init_locked);
@@ -778,7 +664,7 @@ void drm_event_cancel_free(struct drm_device *dev,
778 list_del(&p->pending_link); 664 list_del(&p->pending_link);
779 } 665 }
780 spin_unlock_irqrestore(&dev->event_lock, flags); 666 spin_unlock_irqrestore(&dev->event_lock, flags);
781 p->destroy(p); 667 kfree(p);
782} 668}
783EXPORT_SYMBOL(drm_event_cancel_free); 669EXPORT_SYMBOL(drm_event_cancel_free);
784 670
@@ -800,8 +686,19 @@ void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e)
800{ 686{
801 assert_spin_locked(&dev->event_lock); 687 assert_spin_locked(&dev->event_lock);
802 688
689 if (e->completion) {
690 /* ->completion might disappear as soon as it signalled. */
691 complete_all(e->completion);
692 e->completion = NULL;
693 }
694
695 if (e->fence) {
696 fence_signal(e->fence);
697 fence_put(e->fence);
698 }
699
803 if (!e->file_priv) { 700 if (!e->file_priv) {
804 e->destroy(e); 701 kfree(e);
805 return; 702 return;
806 } 703 }
807 704
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
new file mode 100644
index 000000000000..0645c85d5f95
--- /dev/null
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -0,0 +1,320 @@
1/*
2 * Copyright (c) 2016 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
3 *
4 * DRM core format related functions
5 *
6 * Permission to use, copy, modify, distribute, and sell this software and its
7 * documentation for any purpose is hereby granted without fee, provided that
8 * the above copyright notice appear in all copies and that both that copyright
9 * notice and this permission notice appear in supporting documentation, and
10 * that the name of the copyright holders not be used in advertising or
11 * publicity pertaining to distribution of the software without specific,
12 * written prior permission. The copyright holders make no representations
13 * about the suitability of this software for any purpose. It is provided "as
14 * is" without express or implied warranty.
15 *
16 * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
17 * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
18 * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
19 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
20 * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
21 * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
22 * OF THIS SOFTWARE.
23 */
24
25#include <linux/bug.h>
26#include <linux/ctype.h>
27#include <linux/export.h>
28#include <linux/kernel.h>
29
30#include <drm/drmP.h>
31#include <drm/drm_fourcc.h>
32
33static char printable_char(int c)
34{
35 return isascii(c) && isprint(c) ? c : '?';
36}
37
38/**
39 * drm_get_format_name - return a string for drm fourcc format
40 * @format: format to compute name of
41 *
42 * Note that the buffer used by this function is globally shared and owned by
43 * the function itself.
44 *
45 * FIXME: This isn't really multithreading safe.
46 */
47const char *drm_get_format_name(uint32_t format)
48{
49 static char buf[32];
50
51 snprintf(buf, sizeof(buf),
52 "%c%c%c%c %s-endian (0x%08x)",
53 printable_char(format & 0xff),
54 printable_char((format >> 8) & 0xff),
55 printable_char((format >> 16) & 0xff),
56 printable_char((format >> 24) & 0x7f),
57 format & DRM_FORMAT_BIG_ENDIAN ? "big" : "little",
58 format);
59
60 return buf;
61}
62EXPORT_SYMBOL(drm_get_format_name);
63
64/**
65 * drm_fb_get_bpp_depth - get the bpp/depth values for format
66 * @format: pixel format (DRM_FORMAT_*)
67 * @depth: storage for the depth value
68 * @bpp: storage for the bpp value
69 *
70 * This only supports RGB formats here for compat with code that doesn't use
71 * pixel formats directly yet.
72 */
73void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
74 int *bpp)
75{
76 switch (format) {
77 case DRM_FORMAT_C8:
78 case DRM_FORMAT_RGB332:
79 case DRM_FORMAT_BGR233:
80 *depth = 8;
81 *bpp = 8;
82 break;
83 case DRM_FORMAT_XRGB1555:
84 case DRM_FORMAT_XBGR1555:
85 case DRM_FORMAT_RGBX5551:
86 case DRM_FORMAT_BGRX5551:
87 case DRM_FORMAT_ARGB1555:
88 case DRM_FORMAT_ABGR1555:
89 case DRM_FORMAT_RGBA5551:
90 case DRM_FORMAT_BGRA5551:
91 *depth = 15;
92 *bpp = 16;
93 break;
94 case DRM_FORMAT_RGB565:
95 case DRM_FORMAT_BGR565:
96 *depth = 16;
97 *bpp = 16;
98 break;
99 case DRM_FORMAT_RGB888:
100 case DRM_FORMAT_BGR888:
101 *depth = 24;
102 *bpp = 24;
103 break;
104 case DRM_FORMAT_XRGB8888:
105 case DRM_FORMAT_XBGR8888:
106 case DRM_FORMAT_RGBX8888:
107 case DRM_FORMAT_BGRX8888:
108 *depth = 24;
109 *bpp = 32;
110 break;
111 case DRM_FORMAT_XRGB2101010:
112 case DRM_FORMAT_XBGR2101010:
113 case DRM_FORMAT_RGBX1010102:
114 case DRM_FORMAT_BGRX1010102:
115 case DRM_FORMAT_ARGB2101010:
116 case DRM_FORMAT_ABGR2101010:
117 case DRM_FORMAT_RGBA1010102:
118 case DRM_FORMAT_BGRA1010102:
119 *depth = 30;
120 *bpp = 32;
121 break;
122 case DRM_FORMAT_ARGB8888:
123 case DRM_FORMAT_ABGR8888:
124 case DRM_FORMAT_RGBA8888:
125 case DRM_FORMAT_BGRA8888:
126 *depth = 32;
127 *bpp = 32;
128 break;
129 default:
130 DRM_DEBUG_KMS("unsupported pixel format %s\n",
131 drm_get_format_name(format));
132 *depth = 0;
133 *bpp = 0;
134 break;
135 }
136}
137EXPORT_SYMBOL(drm_fb_get_bpp_depth);
138
139/**
140 * drm_format_num_planes - get the number of planes for format
141 * @format: pixel format (DRM_FORMAT_*)
142 *
143 * Returns:
144 * The number of planes used by the specified pixel format.
145 */
146int drm_format_num_planes(uint32_t format)
147{
148 switch (format) {
149 case DRM_FORMAT_YUV410:
150 case DRM_FORMAT_YVU410:
151 case DRM_FORMAT_YUV411:
152 case DRM_FORMAT_YVU411:
153 case DRM_FORMAT_YUV420:
154 case DRM_FORMAT_YVU420:
155 case DRM_FORMAT_YUV422:
156 case DRM_FORMAT_YVU422:
157 case DRM_FORMAT_YUV444:
158 case DRM_FORMAT_YVU444:
159 return 3;
160 case DRM_FORMAT_NV12:
161 case DRM_FORMAT_NV21:
162 case DRM_FORMAT_NV16:
163 case DRM_FORMAT_NV61:
164 case DRM_FORMAT_NV24:
165 case DRM_FORMAT_NV42:
166 return 2;
167 default:
168 return 1;
169 }
170}
171EXPORT_SYMBOL(drm_format_num_planes);
172
173/**
174 * drm_format_plane_cpp - determine the bytes per pixel value
175 * @format: pixel format (DRM_FORMAT_*)
176 * @plane: plane index
177 *
178 * Returns:
179 * The bytes per pixel value for the specified plane.
180 */
181int drm_format_plane_cpp(uint32_t format, int plane)
182{
183 unsigned int depth;
184 int bpp;
185
186 if (plane >= drm_format_num_planes(format))
187 return 0;
188
189 switch (format) {
190 case DRM_FORMAT_YUYV:
191 case DRM_FORMAT_YVYU:
192 case DRM_FORMAT_UYVY:
193 case DRM_FORMAT_VYUY:
194 return 2;
195 case DRM_FORMAT_NV12:
196 case DRM_FORMAT_NV21:
197 case DRM_FORMAT_NV16:
198 case DRM_FORMAT_NV61:
199 case DRM_FORMAT_NV24:
200 case DRM_FORMAT_NV42:
201 return plane ? 2 : 1;
202 case DRM_FORMAT_YUV410:
203 case DRM_FORMAT_YVU410:
204 case DRM_FORMAT_YUV411:
205 case DRM_FORMAT_YVU411:
206 case DRM_FORMAT_YUV420:
207 case DRM_FORMAT_YVU420:
208 case DRM_FORMAT_YUV422:
209 case DRM_FORMAT_YVU422:
210 case DRM_FORMAT_YUV444:
211 case DRM_FORMAT_YVU444:
212 return 1;
213 default:
214 drm_fb_get_bpp_depth(format, &depth, &bpp);
215 return bpp >> 3;
216 }
217}
218EXPORT_SYMBOL(drm_format_plane_cpp);
219
220/**
221 * drm_format_horz_chroma_subsampling - get the horizontal chroma subsampling factor
222 * @format: pixel format (DRM_FORMAT_*)
223 *
224 * Returns:
225 * The horizontal chroma subsampling factor for the
226 * specified pixel format.
227 */
228int drm_format_horz_chroma_subsampling(uint32_t format)
229{
230 switch (format) {
231 case DRM_FORMAT_YUV411:
232 case DRM_FORMAT_YVU411:
233 case DRM_FORMAT_YUV410:
234 case DRM_FORMAT_YVU410:
235 return 4;
236 case DRM_FORMAT_YUYV:
237 case DRM_FORMAT_YVYU:
238 case DRM_FORMAT_UYVY:
239 case DRM_FORMAT_VYUY:
240 case DRM_FORMAT_NV12:
241 case DRM_FORMAT_NV21:
242 case DRM_FORMAT_NV16:
243 case DRM_FORMAT_NV61:
244 case DRM_FORMAT_YUV422:
245 case DRM_FORMAT_YVU422:
246 case DRM_FORMAT_YUV420:
247 case DRM_FORMAT_YVU420:
248 return 2;
249 default:
250 return 1;
251 }
252}
253EXPORT_SYMBOL(drm_format_horz_chroma_subsampling);
254
255/**
256 * drm_format_vert_chroma_subsampling - get the vertical chroma subsampling factor
257 * @format: pixel format (DRM_FORMAT_*)
258 *
259 * Returns:
260 * The vertical chroma subsampling factor for the
261 * specified pixel format.
262 */
263int drm_format_vert_chroma_subsampling(uint32_t format)
264{
265 switch (format) {
266 case DRM_FORMAT_YUV410:
267 case DRM_FORMAT_YVU410:
268 return 4;
269 case DRM_FORMAT_YUV420:
270 case DRM_FORMAT_YVU420:
271 case DRM_FORMAT_NV12:
272 case DRM_FORMAT_NV21:
273 return 2;
274 default:
275 return 1;
276 }
277}
278EXPORT_SYMBOL(drm_format_vert_chroma_subsampling);
279
280/**
281 * drm_format_plane_width - width of the plane given the first plane
282 * @width: width of the first plane
283 * @format: pixel format
284 * @plane: plane index
285 *
286 * Returns:
287 * The width of @plane, given that the width of the first plane is @width.
288 */
289int drm_format_plane_width(int width, uint32_t format, int plane)
290{
291 if (plane >= drm_format_num_planes(format))
292 return 0;
293
294 if (plane == 0)
295 return width;
296
297 return width / drm_format_horz_chroma_subsampling(format);
298}
299EXPORT_SYMBOL(drm_format_plane_width);
300
301/**
302 * drm_format_plane_height - height of the plane given the first plane
303 * @height: height of the first plane
304 * @format: pixel format
305 * @plane: plane index
306 *
307 * Returns:
308 * The height of @plane, given that the height of the first plane is @height.
309 */
310int drm_format_plane_height(int height, uint32_t format, int plane)
311{
312 if (plane >= drm_format_num_planes(format))
313 return 0;
314
315 if (plane == 0)
316 return height;
317
318 return height / drm_format_vert_chroma_subsampling(format);
319}
320EXPORT_SYMBOL(drm_format_plane_height);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index ad89db36ca25..9134ae134667 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -787,7 +787,7 @@ EXPORT_SYMBOL(drm_gem_object_release);
787 * @kref: kref of the object to free 787 * @kref: kref of the object to free
788 * 788 *
789 * Called after the last reference to the object has been lost. 789 * Called after the last reference to the object has been lost.
790 * Must be called holding struct_ mutex 790 * Must be called holding &drm_device->struct_mutex.
791 * 791 *
792 * Frees the object 792 * Frees the object
793 */ 793 */
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 5d469b2f26f4..9ae353f4dd06 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -50,106 +50,24 @@ int drm_name_info(struct seq_file *m, void *data)
50 struct drm_info_node *node = (struct drm_info_node *) m->private; 50 struct drm_info_node *node = (struct drm_info_node *) m->private;
51 struct drm_minor *minor = node->minor; 51 struct drm_minor *minor = node->minor;
52 struct drm_device *dev = minor->dev; 52 struct drm_device *dev = minor->dev;
53 struct drm_master *master = minor->master; 53 struct drm_master *master;
54 if (!master)
55 return 0;
56
57 if (master->unique) {
58 seq_printf(m, "%s %s %s\n",
59 dev->driver->name,
60 dev_name(dev->dev), master->unique);
61 } else {
62 seq_printf(m, "%s %s\n",
63 dev->driver->name, dev_name(dev->dev));
64 }
65 return 0;
66}
67
68/**
69 * Called when "/proc/dri/.../vm" is read.
70 *
71 * Prints information about all mappings in drm_device::maplist.
72 */
73int drm_vm_info(struct seq_file *m, void *data)
74{
75 struct drm_info_node *node = (struct drm_info_node *) m->private;
76 struct drm_device *dev = node->minor->dev;
77 struct drm_local_map *map;
78 struct drm_map_list *r_list;
79
80 /* Hardcoded from _DRM_FRAME_BUFFER,
81 _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
82 _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
83 const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
84 const char *type;
85 int i;
86
87 mutex_lock(&dev->struct_mutex);
88 seq_printf(m, "slot offset size type flags address mtrr\n\n");
89 i = 0;
90 list_for_each_entry(r_list, &dev->maplist, head) {
91 map = r_list->map;
92 if (!map)
93 continue;
94 if (map->type < 0 || map->type > 5)
95 type = "??";
96 else
97 type = types[map->type];
98
99 seq_printf(m, "%4d 0x%016llx 0x%08lx %4.4s 0x%02x 0x%08lx ",
100 i,
101 (unsigned long long)map->offset,
102 map->size, type, map->flags,
103 (unsigned long) r_list->user_token);
104 if (map->mtrr < 0)
105 seq_printf(m, "none\n");
106 else
107 seq_printf(m, "%4d\n", map->mtrr);
108 i++;
109 }
110 mutex_unlock(&dev->struct_mutex);
111 return 0;
112}
113 54
114/** 55 mutex_lock(&dev->master_mutex);
115 * Called when "/proc/dri/.../bufs" is read. 56 master = dev->master;
116 */ 57 if (!master)
117int drm_bufs_info(struct seq_file *m, void *data) 58 goto out_unlock;
118{ 59
119 struct drm_info_node *node = (struct drm_info_node *) m->private; 60 seq_printf(m, "%s", dev->driver->name);
120 struct drm_device *dev = node->minor->dev; 61 if (dev->dev)
121 struct drm_device_dma *dma; 62 seq_printf(m, " dev=%s", dev_name(dev->dev));
122 int i, seg_pages; 63 if (master && master->unique)
123 64 seq_printf(m, " master=%s", master->unique);
124 mutex_lock(&dev->struct_mutex); 65 if (dev->unique)
125 dma = dev->dma; 66 seq_printf(m, " unique=%s", dev->unique);
126 if (!dma) {
127 mutex_unlock(&dev->struct_mutex);
128 return 0;
129 }
130
131 seq_printf(m, " o size count free segs pages kB\n\n");
132 for (i = 0; i <= DRM_MAX_ORDER; i++) {
133 if (dma->bufs[i].buf_count) {
134 seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
135 seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
136 i,
137 dma->bufs[i].buf_size,
138 dma->bufs[i].buf_count,
139 0,
140 dma->bufs[i].seg_count,
141 seg_pages,
142 seg_pages * PAGE_SIZE / 1024);
143 }
144 }
145 seq_printf(m, "\n");
146 for (i = 0; i < dma->buf_count; i++) {
147 if (i && !(i % 32))
148 seq_printf(m, "\n");
149 seq_printf(m, " %d", dma->buflist[i]->list);
150 }
151 seq_printf(m, "\n"); 67 seq_printf(m, "\n");
152 mutex_unlock(&dev->struct_mutex); 68out_unlock:
69 mutex_unlock(&dev->master_mutex);
70
153 return 0; 71 return 0;
154} 72}
155 73
@@ -184,7 +102,7 @@ int drm_clients_info(struct seq_file *m, void *data)
184 task ? task->comm : "<unknown>", 102 task ? task->comm : "<unknown>",
185 pid_vnr(priv->pid), 103 pid_vnr(priv->pid),
186 priv->minor->index, 104 priv->minor->index,
187 priv->is_master ? 'y' : 'n', 105 drm_is_current_master(priv) ? 'y' : 'n',
188 priv->authenticated ? 'y' : 'n', 106 priv->authenticated ? 'y' : 'n',
189 from_kuid_munged(seq_user_ns(m), priv->uid), 107 from_kuid_munged(seq_user_ns(m), priv->uid),
190 priv->magic); 108 priv->magic);
@@ -194,7 +112,6 @@ int drm_clients_info(struct seq_file *m, void *data)
194 return 0; 112 return 0;
195} 113}
196 114
197
198static int drm_gem_one_name_info(int id, void *ptr, void *data) 115static int drm_gem_one_name_info(int id, void *ptr, void *data)
199{ 116{
200 struct drm_gem_object *obj = ptr; 117 struct drm_gem_object *obj = ptr;
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 902cf6a15212..b86dc9b921a5 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -29,15 +29,9 @@ extern struct mutex drm_global_mutex;
29void drm_lastclose(struct drm_device *dev); 29void drm_lastclose(struct drm_device *dev);
30 30
31/* drm_pci.c */ 31/* drm_pci.c */
32int drm_pci_set_unique(struct drm_device *dev,
33 struct drm_master *master,
34 struct drm_unique *u);
35int drm_irq_by_busid(struct drm_device *dev, void *data, 32int drm_irq_by_busid(struct drm_device *dev, void *data,
36 struct drm_file *file_priv); 33 struct drm_file *file_priv);
37 34
38/* drm_vm.c */
39int drm_vma_info(struct seq_file *m, void *data);
40
41/* drm_prime.c */ 35/* drm_prime.c */
42int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, 36int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data,
43 struct drm_file *file_priv); 37 struct drm_file *file_priv);
@@ -51,8 +45,6 @@ void drm_prime_remove_buf_handle_locked(struct drm_prime_file_private *prime_fpr
51 45
52/* drm_info.c */ 46/* drm_info.c */
53int drm_name_info(struct seq_file *m, void *data); 47int drm_name_info(struct seq_file *m, void *data);
54int drm_vm_info(struct seq_file *m, void *data);
55int drm_bufs_info(struct seq_file *m, void *data);
56int drm_clients_info(struct seq_file *m, void* data); 48int drm_clients_info(struct seq_file *m, void* data);
57int drm_gem_name_info(struct seq_file *m, void *data); 49int drm_gem_name_info(struct seq_file *m, void *data);
58 50
@@ -67,6 +59,12 @@ int drm_getmagic(struct drm_device *dev, void *data,
67 struct drm_file *file_priv); 59 struct drm_file *file_priv);
68int drm_authmagic(struct drm_device *dev, void *data, 60int drm_authmagic(struct drm_device *dev, void *data,
69 struct drm_file *file_priv); 61 struct drm_file *file_priv);
62int drm_setmaster_ioctl(struct drm_device *dev, void *data,
63 struct drm_file *file_priv);
64int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
65 struct drm_file *file_priv);
66int drm_master_open(struct drm_file *file_priv);
67void drm_master_release(struct drm_file *file_priv);
70 68
71/* drm_sysfs.c */ 69/* drm_sysfs.c */
72extern struct class *drm_class; 70extern struct class *drm_class;
@@ -92,13 +90,6 @@ int drm_gem_open_ioctl(struct drm_device *dev, void *data,
92void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 90void drm_gem_open(struct drm_device *dev, struct drm_file *file_private);
93void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 91void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
94 92
95/* drm_drv.c */
96int drm_setmaster_ioctl(struct drm_device *dev, void *data,
97 struct drm_file *file_priv);
98int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
99 struct drm_file *file_priv);
100struct drm_master *drm_master_create(struct drm_minor *minor);
101
102/* drm_debugfs.c */ 93/* drm_debugfs.c */
103#if defined(CONFIG_DEBUG_FS) 94#if defined(CONFIG_DEBUG_FS)
104int drm_debugfs_init(struct drm_minor *minor, int minor_id, 95int drm_debugfs_init(struct drm_minor *minor, int minor_id,
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index b7a39771c152..33af4a5ddca1 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -30,6 +30,7 @@
30 30
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include <drm/drm_core.h> 32#include <drm/drm_core.h>
33#include <drm/drm_auth.h>
33#include "drm_legacy.h" 34#include "drm_legacy.h"
34#include "drm_internal.h" 35#include "drm_internal.h"
35#include "drm_crtc_internal.h" 36#include "drm_crtc_internal.h"
@@ -37,6 +38,64 @@
37#include <linux/pci.h> 38#include <linux/pci.h>
38#include <linux/export.h> 39#include <linux/export.h>
39 40
41/**
42 * DOC: getunique and setversion story
43 *
44 * BEWARE THE DRAGONS! MIND THE TRAPDOORS!
45 *
46 * In an attempt to warn anyone else who's trying to figure out what's going
47 * on here, I'll try to summarize the story. First things first, let's clear up
48 * the names, because the kernel internals, libdrm and the ioctls are all named
49 * differently:
50 *
51 * - GET_UNIQUE ioctl, implemented by drm_getunique is wrapped up in libdrm
52 * through the drmGetBusid function.
53 * - The libdrm drmSetBusid function is backed by the SET_UNIQUE ioctl. All
54 * that code is nerved in the kernel with drm_invalid_op().
55 * - The internal set_busid kernel functions and driver callbacks are
56 * exclusively use by the SET_VERSION ioctl, because only drm 1.0 (which is
57 * nerved) allowed userspace to set the busid through the above ioctl.
58 * - Other ioctls and functions involved are named consistently.
59 *
60 * For anyone wondering what's the difference between drm 1.1 and 1.4: Correctly
61 * handling pci domains in the busid on ppc. Doing this correctly was only
62 * implemented in libdrm in 2010, hence can't be nerved yet. No one knows what's
63 * special with drm 1.2 and 1.3.
64 *
65 * Now the actual horror story of how device lookup in drm works. At large,
66 * there's 2 different ways, either by busid, or by device driver name.
67 *
68 * Opening by busid is fairly simple:
69 *
70 * 1. First call SET_VERSION to make sure pci domains are handled properly. As a
71 * side-effect this fills out the unique name in the master structure.
72 * 2. Call GET_UNIQUE to read out the unique name from the master structure,
73 * which matches the busid thanks to step 1. If it doesn't, proceed to try
74 * the next device node.
75 *
76 * Opening by name is slightly different:
77 *
78 * 1. Directly call VERSION to get the version and to match against the driver
79 * name returned by that ioctl. Note that SET_VERSION is not called, which
80 * means the the unique name for the master node just opening is _not_ filled
81 * out. This despite that with current drm device nodes are always bound to
82 * one device, and can't be runtime assigned like with drm 1.0.
83 * 2. Match driver name. If it mismatches, proceed to the next device node.
84 * 3. Call GET_UNIQUE, and check whether the unique name has length zero (by
85 * checking that the first byte in the string is 0). If that's not the case
86 * libdrm skips and proceeds to the next device node. Probably this is just
87 * copypasta from drm 1.0 times where a set unique name meant that the driver
88 * was in use already, but that's just conjecture.
89 *
90 * Long story short: To keep the open by name logic working, GET_UNIQUE must
91 * _not_ return a unique string when SET_VERSION hasn't been called yet,
92 * otherwise libdrm breaks. Even when that unique string can't ever change, and
93 * is totally irrelevant for actually opening the device because runtime
94 * assignable device instances were only support in drm 1.0, which is long dead.
95 * But the libdrm code in drmOpenByName somehow survived, hence this can't be
96 * broken.
97 */
98
40static int drm_version(struct drm_device *dev, void *data, 99static int drm_version(struct drm_device *dev, void *data,
41 struct drm_file *file_priv); 100 struct drm_file *file_priv);
42 101
@@ -75,51 +134,6 @@ drm_unset_busid(struct drm_device *dev,
75 master->unique_len = 0; 134 master->unique_len = 0;
76} 135}
77 136
78/*
79 * Set the bus id.
80 *
81 * \param inode device inode.
82 * \param file_priv DRM file private.
83 * \param cmd command.
84 * \param arg user argument, pointing to a drm_unique structure.
85 * \return zero on success or a negative number on failure.
86 *
87 * Copies the bus id from userspace into drm_device::unique, and verifies that
88 * it matches the device this DRM is attached to (EINVAL otherwise). Deprecated
89 * in interface version 1.1 and will return EBUSY when setversion has requested
90 * version 1.1 or greater. Also note that KMS is all version 1.1 and later and
91 * UMS was only ever supported on pci devices.
92 */
93static int drm_setunique(struct drm_device *dev, void *data,
94 struct drm_file *file_priv)
95{
96 struct drm_unique *u = data;
97 struct drm_master *master = file_priv->master;
98 int ret;
99
100 if (master->unique_len || master->unique)
101 return -EBUSY;
102
103 if (!u->unique_len || u->unique_len > 1024)
104 return -EINVAL;
105
106 if (drm_core_check_feature(dev, DRIVER_MODESET))
107 return 0;
108
109 if (WARN_ON(!dev->pdev))
110 return -EINVAL;
111
112 ret = drm_pci_set_unique(dev, master, u);
113 if (ret)
114 goto err;
115
116 return 0;
117
118err:
119 drm_unset_busid(dev, master);
120 return ret;
121}
122
123static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv) 137static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
124{ 138{
125 struct drm_master *master = file_priv->master; 139 struct drm_master *master = file_priv->master;
@@ -135,12 +149,7 @@ static int drm_set_busid(struct drm_device *dev, struct drm_file *file_priv)
135 return ret; 149 return ret;
136 } 150 }
137 } else { 151 } else {
138 if (WARN(dev->unique == NULL, 152 WARN_ON(!dev->unique);
139 "No drm_driver.set_busid() implementation provided by "
140 "%ps. Use drm_dev_set_unique() to set the unique "
141 "name explicitly.", dev->driver))
142 return -EINVAL;
143
144 master->unique = kstrdup(dev->unique, GFP_KERNEL); 153 master->unique = kstrdup(dev->unique, GFP_KERNEL);
145 if (master->unique) 154 if (master->unique)
146 master->unique_len = strlen(dev->unique); 155 master->unique_len = strlen(dev->unique);
@@ -473,7 +482,8 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
473 return -EACCES; 482 return -EACCES;
474 483
475 /* MASTER is only for master or control clients */ 484 /* MASTER is only for master or control clients */
476 if (unlikely((flags & DRM_MASTER) && !file_priv->is_master && 485 if (unlikely((flags & DRM_MASTER) &&
486 !drm_is_current_master(file_priv) &&
477 !drm_is_control_client(file_priv))) 487 !drm_is_control_client(file_priv)))
478 return -EACCES; 488 return -EACCES;
479 489
@@ -504,7 +514,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
504 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 514 DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
505 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW), 515 DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
506 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), 516 DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
507 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), 517 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, DRM_UNLOCKED),
508 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), 518 DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
509 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED), 519 DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_legacy_getmap_ioctl, DRM_UNLOCKED),
510 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED), 520 DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
@@ -513,10 +523,10 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
513 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0), 523 DRM_IOCTL_DEF(DRM_IOCTL_SET_CLIENT_CAP, drm_setclientcap, 0),
514 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER), 524 DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
515 525
516 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 526 DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_invalid_op, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
517 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 527 DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
518 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 528 DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
519 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER), 529 DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_UNLOCKED|DRM_MASTER),
520 530
521 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 531 DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_legacy_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
522 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH), 532 DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_legacy_rmmap_ioctl, DRM_AUTH),
@@ -524,8 +534,8 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
524 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 534 DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_legacy_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
525 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH), 535 DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_legacy_getsareactx, DRM_AUTH),
526 536
527 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_ROOT_ONLY), 537 DRM_IOCTL_DEF(DRM_IOCTL_SET_MASTER, drm_setmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
528 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_ROOT_ONLY), 538 DRM_IOCTL_DEF(DRM_IOCTL_DROP_MASTER, drm_dropmaster_ioctl, DRM_UNLOCKED|DRM_ROOT_ONLY),
529 539
530 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY), 540 DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_legacy_addctx, DRM_AUTH|DRM_ROOT_ONLY),
531 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 541 DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_legacy_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -638,7 +648,7 @@ long drm_ioctl(struct file *filp,
638 int retcode = -EINVAL; 648 int retcode = -EINVAL;
639 char stack_kdata[128]; 649 char stack_kdata[128];
640 char *kdata = NULL; 650 char *kdata = NULL;
641 unsigned int usize, asize, drv_size; 651 unsigned int in_size, out_size, drv_size, ksize;
642 bool is_driver_ioctl; 652 bool is_driver_ioctl;
643 653
644 dev = file_priv->minor->dev; 654 dev = file_priv->minor->dev;
@@ -661,9 +671,12 @@ long drm_ioctl(struct file *filp,
661 } 671 }
662 672
663 drv_size = _IOC_SIZE(ioctl->cmd); 673 drv_size = _IOC_SIZE(ioctl->cmd);
664 usize = _IOC_SIZE(cmd); 674 out_size = in_size = _IOC_SIZE(cmd);
665 asize = max(usize, drv_size); 675 if ((cmd & ioctl->cmd & IOC_IN) == 0)
666 cmd = ioctl->cmd; 676 in_size = 0;
677 if ((cmd & ioctl->cmd & IOC_OUT) == 0)
678 out_size = 0;
679 ksize = max(max(in_size, out_size), drv_size);
667 680
668 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", 681 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
669 task_pid_nr(current), 682 task_pid_nr(current),
@@ -683,30 +696,24 @@ long drm_ioctl(struct file *filp,
683 if (unlikely(retcode)) 696 if (unlikely(retcode))
684 goto err_i1; 697 goto err_i1;
685 698
686 if (cmd & (IOC_IN | IOC_OUT)) { 699 if (ksize <= sizeof(stack_kdata)) {
687 if (asize <= sizeof(stack_kdata)) { 700 kdata = stack_kdata;
688 kdata = stack_kdata; 701 } else {
689 } else { 702 kdata = kmalloc(ksize, GFP_KERNEL);
690 kdata = kmalloc(asize, GFP_KERNEL); 703 if (!kdata) {
691 if (!kdata) { 704 retcode = -ENOMEM;
692 retcode = -ENOMEM; 705 goto err_i1;
693 goto err_i1;
694 }
695 } 706 }
696 if (asize > usize)
697 memset(kdata + usize, 0, asize - usize);
698 } 707 }
699 708
700 if (cmd & IOC_IN) { 709 if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
701 if (copy_from_user(kdata, (void __user *)arg, 710 retcode = -EFAULT;
702 usize) != 0) { 711 goto err_i1;
703 retcode = -EFAULT;
704 goto err_i1;
705 }
706 } else if (cmd & IOC_OUT) {
707 memset(kdata, 0, usize);
708 } 712 }
709 713
714 if (ksize > in_size)
715 memset(kdata + in_size, 0, ksize - in_size);
716
710 /* Enforce sane locking for kms driver ioctls. Core ioctls are 717 /* Enforce sane locking for kms driver ioctls. Core ioctls are
711 * too messy still. */ 718 * too messy still. */
712 if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) || 719 if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) ||
@@ -718,11 +725,8 @@ long drm_ioctl(struct file *filp,
718 mutex_unlock(&drm_global_mutex); 725 mutex_unlock(&drm_global_mutex);
719 } 726 }
720 727
721 if (cmd & IOC_OUT) { 728 if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
722 if (copy_to_user((void __user *)arg, kdata, 729 retcode = -EFAULT;
723 usize) != 0)
724 retcode = -EFAULT;
725 }
726 730
727 err_i1: 731 err_i1:
728 if (!ioctl) 732 if (!ioctl)
@@ -749,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl);
749 * shouldn't be used by any drivers. 753 * shouldn't be used by any drivers.
750 * 754 *
751 * Returns: 755 * Returns:
752 * True if the @nr corresponds to a DRM core ioctl numer, false otherwise. 756 * True if the @nr corresponds to a DRM core ioctl number, false otherwise.
753 */ 757 */
754bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) 758bool drm_ioctl_flags(unsigned int nr, unsigned int *flags)
755{ 759{
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 0fac801c18fe..77f357b2c386 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -42,10 +42,6 @@
42#include <linux/vgaarb.h> 42#include <linux/vgaarb.h>
43#include <linux/export.h> 43#include <linux/export.h>
44 44
45/* Access macro for slots in vblank timestamp ringbuffer. */
46#define vblanktimestamp(dev, pipe, count) \
47 ((dev)->vblank[pipe].time[(count) % DRM_VBLANKTIME_RBSIZE])
48
49/* Retry timestamp calculation up to 3 times to satisfy 45/* Retry timestamp calculation up to 3 times to satisfy
50 * drm_timestamp_precision before giving up. 46 * drm_timestamp_precision before giving up.
51 */ 47 */
@@ -82,36 +78,18 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe,
82 struct timeval *t_vblank, u32 last) 78 struct timeval *t_vblank, u32 last)
83{ 79{
84 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 80 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
85 u32 tslot;
86 81
87 assert_spin_locked(&dev->vblank_time_lock); 82 assert_spin_locked(&dev->vblank_time_lock);
88 83
89 vblank->last = last; 84 vblank->last = last;
90 85
91 /* All writers hold the spinlock, but readers are serialized by 86 write_seqlock(&vblank->seqlock);
92 * the latching of vblank->count below. 87 vblank->time = *t_vblank;
93 */
94 tslot = vblank->count + vblank_count_inc;
95 vblanktimestamp(dev, pipe, tslot) = *t_vblank;
96
97 /*
98 * vblank timestamp updates are protected on the write side with
99 * vblank_time_lock, but on the read side done locklessly using a
100 * sequence-lock on the vblank counter. Ensure correct ordering using
101 * memory barrriers. We need the barrier both before and also after the
102 * counter update to synchronize with the next timestamp write.
103 * The read-side barriers for this are in drm_vblank_count_and_time.
104 */
105 smp_wmb();
106 vblank->count += vblank_count_inc; 88 vblank->count += vblank_count_inc;
107 smp_wmb(); 89 write_sequnlock(&vblank->seqlock);
108} 90}
109 91
110/** 92/*
111 * drm_reset_vblank_timestamp - reset the last timestamp to the last vblank
112 * @dev: DRM device
113 * @pipe: index of CRTC for which to reset the timestamp
114 *
115 * Reset the stored timestamp for the current vblank count to correspond 93 * Reset the stored timestamp for the current vblank count to correspond
116 * to the last vblank occurred. 94 * to the last vblank occurred.
117 * 95 *
@@ -155,11 +133,7 @@ static void drm_reset_vblank_timestamp(struct drm_device *dev, unsigned int pipe
155 spin_unlock(&dev->vblank_time_lock); 133 spin_unlock(&dev->vblank_time_lock);
156} 134}
157 135
158/** 136/*
159 * drm_update_vblank_count - update the master vblank counter
160 * @dev: DRM device
161 * @pipe: counter to update
162 *
163 * Call back into the driver to update the appropriate vblank counter 137 * Call back into the driver to update the appropriate vblank counter
164 * (specified by @pipe). Deal with wraparound, if it occurred, and 138 * (specified by @pipe). Deal with wraparound, if it occurred, and
165 * update the last read value so we can deal with wraparound on the next 139 * update the last read value so we can deal with wraparound on the next
@@ -205,7 +179,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
205 const struct timeval *t_old; 179 const struct timeval *t_old;
206 u64 diff_ns; 180 u64 diff_ns;
207 181
208 t_old = &vblanktimestamp(dev, pipe, vblank->count); 182 t_old = &vblank->time;
209 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old); 183 diff_ns = timeval_to_ns(&t_vblank) - timeval_to_ns(t_old);
210 184
211 /* 185 /*
@@ -239,49 +213,6 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
239 diff = 1; 213 diff = 1;
240 } 214 }
241 215
242 /*
243 * FIMXE: Need to replace this hack with proper seqlocks.
244 *
245 * Restrict the bump of the software vblank counter to a safe maximum
246 * value of +1 whenever there is the possibility that concurrent readers
247 * of vblank timestamps could be active at the moment, as the current
248 * implementation of the timestamp caching and updating is not safe
249 * against concurrent readers for calls to store_vblank() with a bump
250 * of anything but +1. A bump != 1 would very likely return corrupted
251 * timestamps to userspace, because the same slot in the cache could
252 * be concurrently written by store_vblank() and read by one of those
253 * readers without the read-retry logic detecting the collision.
254 *
255 * Concurrent readers can exist when we are called from the
256 * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
257 * irq callers. However, all those calls to us are happening with the
258 * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
259 * can't increase while we are executing. Therefore a zero refcount at
260 * this point is safe for arbitrary counter bumps if we are called
261 * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
262 * we must also accept a refcount of 1, as whenever we are called from
263 * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
264 * we must let that one pass through in order to not lose vblank counts
265 * during vblank irq off - which would completely defeat the whole
266 * point of this routine.
267 *
268 * Whenever we are called from vblank irq, we have to assume concurrent
269 * readers exist or can show up any time during our execution, even if
270 * the refcount is currently zero, as vblank irqs are usually only
271 * enabled due to the presence of readers, and because when we are called
272 * from vblank irq we can't hold the vbl_lock to protect us from sudden
273 * bumps in vblank refcount. Therefore also restrict bumps to +1 when
274 * called from vblank irq.
275 */
276 if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
277 (flags & DRM_CALLED_FROM_VBLIRQ))) {
278 DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
279 "refcount %u, vblirq %u\n", pipe, diff,
280 atomic_read(&vblank->refcount),
281 (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
282 diff = 1;
283 }
284
285 DRM_DEBUG_VBL("updating vblank count on crtc %u:" 216 DRM_DEBUG_VBL("updating vblank count on crtc %u:"
286 " current=%u, diff=%u, hw=%u hw_last=%u\n", 217 " current=%u, diff=%u, hw=%u hw_last=%u\n",
287 pipe, vblank->count, diff, cur_vblank, vblank->last); 218 pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -303,6 +234,37 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe,
303 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); 234 store_vblank(dev, pipe, diff, &t_vblank, cur_vblank);
304} 235}
305 236
237/**
238 * drm_accurate_vblank_count - retrieve the master vblank counter
239 * @crtc: which counter to retrieve
240 *
241 * This function is similar to @drm_crtc_vblank_count but this
242 * function interpolates to handle a race with vblank irq's.
243 *
244 * This is mostly useful for hardware that can obtain the scanout
245 * position, but doesn't have a frame counter.
246 */
247u32 drm_accurate_vblank_count(struct drm_crtc *crtc)
248{
249 struct drm_device *dev = crtc->dev;
250 unsigned int pipe = drm_crtc_index(crtc);
251 u32 vblank;
252 unsigned long flags;
253
254 WARN(!dev->driver->get_vblank_timestamp,
255 "This function requires support for accurate vblank timestamps.");
256
257 spin_lock_irqsave(&dev->vblank_time_lock, flags);
258
259 drm_update_vblank_count(dev, pipe, 0);
260 vblank = drm_vblank_count(dev, pipe);
261
262 spin_unlock_irqrestore(&dev->vblank_time_lock, flags);
263
264 return vblank;
265}
266EXPORT_SYMBOL(drm_accurate_vblank_count);
267
306/* 268/*
307 * Disable vblank irq's on crtc, make sure that last vblank count 269 * Disable vblank irq's on crtc, make sure that last vblank count
308 * of hardware and corresponding consistent software vblank counter 270 * of hardware and corresponding consistent software vblank counter
@@ -417,6 +379,7 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
417 init_waitqueue_head(&vblank->queue); 379 init_waitqueue_head(&vblank->queue);
418 setup_timer(&vblank->disable_timer, vblank_disable_fn, 380 setup_timer(&vblank->disable_timer, vblank_disable_fn,
419 (unsigned long)vblank); 381 (unsigned long)vblank);
382 seqlock_init(&vblank->seqlock);
420 } 383 }
421 384
422 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n"); 385 DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -569,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev)
569 532
570 /* 533 /*
571 * Wake up any waiters so they don't hang. This is just to paper over 534 * Wake up any waiters so they don't hang. This is just to paper over
572 * isssues for UMS drivers which aren't in full control of their 535 * issues for UMS drivers which aren't in full control of their
573 * vblank/irq handling. KMS drivers must ensure that vblanks are all 536 * vblank/irq handling. KMS drivers must ensure that vblanks are all
574 * disabled when uninstalling the irq handler. 537 * disabled when uninstalling the irq handler.
575 */ 538 */
@@ -631,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data,
631 return 0; 594 return 0;
632 if (drm_core_check_feature(dev, DRIVER_MODESET)) 595 if (drm_core_check_feature(dev, DRIVER_MODESET))
633 return 0; 596 return 0;
634 /* UMS was only ever support on pci devices. */ 597 /* UMS was only ever supported on pci devices. */
635 if (WARN_ON(!dev->pdev)) 598 if (WARN_ON(!dev->pdev))
636 return -EINVAL; 599 return -EINVAL;
637 600
@@ -982,31 +945,24 @@ EXPORT_SYMBOL(drm_crtc_vblank_count);
982 * 945 *
983 * This is the legacy version of drm_crtc_vblank_count_and_time(). 946 * This is the legacy version of drm_crtc_vblank_count_and_time().
984 */ 947 */
985u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, 948static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe,
986 struct timeval *vblanktime) 949 struct timeval *vblanktime)
987{ 950{
988 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 951 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
989 int count = DRM_TIMESTAMP_MAXRETRIES; 952 u32 vblank_count;
990 u32 cur_vblank; 953 unsigned int seq;
991 954
992 if (WARN_ON(pipe >= dev->num_crtcs)) 955 if (WARN_ON(pipe >= dev->num_crtcs))
993 return 0; 956 return 0;
994 957
995 /*
996 * Vblank timestamps are read lockless. To ensure consistency the vblank
997 * counter is rechecked and ordering is ensured using memory barriers.
998 * This works like a seqlock. The write-side barriers are in store_vblank.
999 */
1000 do { 958 do {
1001 cur_vblank = vblank->count; 959 seq = read_seqbegin(&vblank->seqlock);
1002 smp_rmb(); 960 vblank_count = vblank->count;
1003 *vblanktime = vblanktimestamp(dev, pipe, cur_vblank); 961 *vblanktime = vblank->time;
1004 smp_rmb(); 962 } while (read_seqretry(&vblank->seqlock, seq));
1005 } while (cur_vblank != vblank->count && --count > 0);
1006 963
1007 return cur_vblank; 964 return vblank_count;
1008} 965}
1009EXPORT_SYMBOL(drm_vblank_count_and_time);
1010 966
1011/** 967/**
1012 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value 968 * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value
@@ -1018,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time);
1018 * vblank events since the system was booted, including lost events due to 974 * vblank events since the system was booted, including lost events due to
1019 * modesetting activity. Returns corresponding system timestamp of the time 975 * modesetting activity. Returns corresponding system timestamp of the time
1020 * of the vblank interval that corresponds to the current vblank counter value. 976 * of the vblank interval that corresponds to the current vblank counter value.
1021 *
1022 * This is the native KMS version of drm_vblank_count_and_time().
1023 */ 977 */
1024u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, 978u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
1025 struct timeval *vblanktime) 979 struct timeval *vblanktime)
@@ -1037,39 +991,11 @@ static void send_vblank_event(struct drm_device *dev,
1037 e->event.tv_sec = now->tv_sec; 991 e->event.tv_sec = now->tv_sec;
1038 e->event.tv_usec = now->tv_usec; 992 e->event.tv_usec = now->tv_usec;
1039 993
1040 drm_send_event_locked(dev, &e->base);
1041
1042 trace_drm_vblank_event_delivered(e->base.pid, e->pipe, 994 trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
1043 e->event.sequence); 995 e->event.sequence);
1044}
1045
1046/**
1047 * drm_arm_vblank_event - arm vblank event after pageflip
1048 * @dev: DRM device
1049 * @pipe: CRTC index
1050 * @e: the event to prepare to send
1051 *
1052 * A lot of drivers need to generate vblank events for the very next vblank
1053 * interrupt. For example when the page flip interrupt happens when the page
1054 * flip gets armed, but not when it actually executes within the next vblank
1055 * period. This helper function implements exactly the required vblank arming
1056 * behaviour.
1057 *
1058 * Caller must hold event lock. Caller must also hold a vblank reference for
1059 * the event @e, which will be dropped when the next vblank arrives.
1060 *
1061 * This is the legacy version of drm_crtc_arm_vblank_event().
1062 */
1063void drm_arm_vblank_event(struct drm_device *dev, unsigned int pipe,
1064 struct drm_pending_vblank_event *e)
1065{
1066 assert_spin_locked(&dev->event_lock);
1067 996
1068 e->pipe = pipe; 997 drm_send_event_locked(dev, &e->base);
1069 e->event.sequence = drm_vblank_count(dev, pipe);
1070 list_add_tail(&e->base.link, &dev->vblank_event_list);
1071} 998}
1072EXPORT_SYMBOL(drm_arm_vblank_event);
1073 999
1074/** 1000/**
1075 * drm_crtc_arm_vblank_event - arm vblank event after pageflip 1001 * drm_crtc_arm_vblank_event - arm vblank event after pageflip
@@ -1084,32 +1010,35 @@ EXPORT_SYMBOL(drm_arm_vblank_event);
1084 * 1010 *
1085 * Caller must hold event lock. Caller must also hold a vblank reference for 1011 * Caller must hold event lock. Caller must also hold a vblank reference for
1086 * the event @e, which will be dropped when the next vblank arrives. 1012 * the event @e, which will be dropped when the next vblank arrives.
1087 *
1088 * This is the native KMS version of drm_arm_vblank_event().
1089 */ 1013 */
1090void drm_crtc_arm_vblank_event(struct drm_crtc *crtc, 1014void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
1091 struct drm_pending_vblank_event *e) 1015 struct drm_pending_vblank_event *e)
1092{ 1016{
1093 drm_arm_vblank_event(crtc->dev, drm_crtc_index(crtc), e); 1017 struct drm_device *dev = crtc->dev;
1018 unsigned int pipe = drm_crtc_index(crtc);
1019
1020 assert_spin_locked(&dev->event_lock);
1021
1022 e->pipe = pipe;
1023 e->event.sequence = drm_vblank_count(dev, pipe);
1024 list_add_tail(&e->base.link, &dev->vblank_event_list);
1094} 1025}
1095EXPORT_SYMBOL(drm_crtc_arm_vblank_event); 1026EXPORT_SYMBOL(drm_crtc_arm_vblank_event);
1096 1027
1097/** 1028/**
1098 * drm_send_vblank_event - helper to send vblank event after pageflip 1029 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
1099 * @dev: DRM device 1030 * @crtc: the source CRTC of the vblank event
1100 * @pipe: CRTC index
1101 * @e: the event to send 1031 * @e: the event to send
1102 * 1032 *
1103 * Updates sequence # and timestamp on event, and sends it to userspace. 1033 * Updates sequence # and timestamp on event, and sends it to userspace.
1104 * Caller must hold event lock. 1034 * Caller must hold event lock.
1105 *
1106 * This is the legacy version of drm_crtc_send_vblank_event().
1107 */ 1035 */
1108void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe, 1036void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1109 struct drm_pending_vblank_event *e) 1037 struct drm_pending_vblank_event *e)
1110{ 1038{
1039 struct drm_device *dev = crtc->dev;
1040 unsigned int seq, pipe = drm_crtc_index(crtc);
1111 struct timeval now; 1041 struct timeval now;
1112 unsigned int seq;
1113 1042
1114 if (dev->num_crtcs > 0) { 1043 if (dev->num_crtcs > 0) {
1115 seq = drm_vblank_count_and_time(dev, pipe, &now); 1044 seq = drm_vblank_count_and_time(dev, pipe, &now);
@@ -1121,23 +1050,6 @@ void drm_send_vblank_event(struct drm_device *dev, unsigned int pipe,
1121 e->pipe = pipe; 1050 e->pipe = pipe;
1122 send_vblank_event(dev, e, seq, &now); 1051 send_vblank_event(dev, e, seq, &now);
1123} 1052}
1124EXPORT_SYMBOL(drm_send_vblank_event);
1125
1126/**
1127 * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
1128 * @crtc: the source CRTC of the vblank event
1129 * @e: the event to send
1130 *
1131 * Updates sequence # and timestamp on event, and sends it to userspace.
1132 * Caller must hold event lock.
1133 *
1134 * This is the native KMS version of drm_send_vblank_event().
1135 */
1136void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
1137 struct drm_pending_vblank_event *e)
1138{
1139 drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
1140}
1141EXPORT_SYMBOL(drm_crtc_send_vblank_event); 1053EXPORT_SYMBOL(drm_crtc_send_vblank_event);
1142 1054
1143/** 1055/**
@@ -1193,7 +1105,7 @@ static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
1193 * Returns: 1105 * Returns:
1194 * Zero on success or a negative error code on failure. 1106 * Zero on success or a negative error code on failure.
1195 */ 1107 */
1196int drm_vblank_get(struct drm_device *dev, unsigned int pipe) 1108static int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1197{ 1109{
1198 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1110 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1199 unsigned long irqflags; 1111 unsigned long irqflags;
@@ -1219,7 +1131,6 @@ int drm_vblank_get(struct drm_device *dev, unsigned int pipe)
1219 1131
1220 return ret; 1132 return ret;
1221} 1133}
1222EXPORT_SYMBOL(drm_vblank_get);
1223 1134
1224/** 1135/**
1225 * drm_crtc_vblank_get - get a reference count on vblank events 1136 * drm_crtc_vblank_get - get a reference count on vblank events
@@ -1228,8 +1139,6 @@ EXPORT_SYMBOL(drm_vblank_get);
1228 * Acquire a reference count on vblank events to avoid having them disabled 1139 * Acquire a reference count on vblank events to avoid having them disabled
1229 * while in use. 1140 * while in use.
1230 * 1141 *
1231 * This is the native kms version of drm_vblank_get().
1232 *
1233 * Returns: 1142 * Returns:
1234 * Zero on success or a negative error code on failure. 1143 * Zero on success or a negative error code on failure.
1235 */ 1144 */
@@ -1249,7 +1158,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_get);
1249 * 1158 *
1250 * This is the legacy version of drm_crtc_vblank_put(). 1159 * This is the legacy version of drm_crtc_vblank_put().
1251 */ 1160 */
1252void drm_vblank_put(struct drm_device *dev, unsigned int pipe) 1161static void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1253{ 1162{
1254 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1163 struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
1255 1164
@@ -1270,7 +1179,6 @@ void drm_vblank_put(struct drm_device *dev, unsigned int pipe)
1270 jiffies + ((drm_vblank_offdelay * HZ)/1000)); 1179 jiffies + ((drm_vblank_offdelay * HZ)/1000));
1271 } 1180 }
1272} 1181}
1273EXPORT_SYMBOL(drm_vblank_put);
1274 1182
1275/** 1183/**
1276 * drm_crtc_vblank_put - give up ownership of vblank events 1184 * drm_crtc_vblank_put - give up ownership of vblank events
@@ -1278,8 +1186,6 @@ EXPORT_SYMBOL(drm_vblank_put);
1278 * 1186 *
1279 * Release ownership of a given vblank counter, turning off interrupts 1187 * Release ownership of a given vblank counter, turning off interrupts
1280 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds. 1188 * if possible. Disable interrupts after drm_vblank_offdelay milliseconds.
1281 *
1282 * This is the native kms version of drm_vblank_put().
1283 */ 1189 */
1284void drm_crtc_vblank_put(struct drm_crtc *crtc) 1190void drm_crtc_vblank_put(struct drm_crtc *crtc)
1285{ 1191{
@@ -1679,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe,
1679 1585
1680 seq = drm_vblank_count_and_time(dev, pipe, &now); 1586 seq = drm_vblank_count_and_time(dev, pipe, &now);
1681 1587
1682 if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
1683 (seq - vblwait->request.sequence) <= (1 << 23)) {
1684 vblwait->request.sequence = seq + 1;
1685 vblwait->reply.sequence = vblwait->request.sequence;
1686 }
1687
1688 DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", 1588 DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n",
1689 vblwait->request.sequence, seq, pipe); 1589 vblwait->request.sequence, seq, pipe);
1690 1590
@@ -1781,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1781 goto done; 1681 goto done;
1782 } 1682 }
1783 1683
1684 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1685 (seq - vblwait->request.sequence) <= (1 << 23)) {
1686 vblwait->request.sequence = seq + 1;
1687 }
1688
1784 if (flags & _DRM_VBLANK_EVENT) { 1689 if (flags & _DRM_VBLANK_EVENT) {
1785 /* must hold on to the vblank ref until the event fires 1690 /* must hold on to the vblank ref until the event fires
1786 * drm_vblank_put will be called asynchronously 1691 * drm_vblank_put will be called asynchronously
@@ -1788,14 +1693,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1788 return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); 1693 return drm_queue_vblank_event(dev, pipe, vblwait, file_priv);
1789 } 1694 }
1790 1695
1791 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1792 (seq - vblwait->request.sequence) <= (1<<23)) {
1793 vblwait->request.sequence = seq + 1;
1794 }
1795
1796 DRM_DEBUG("waiting on vblank count %d, crtc %u\n", 1696 DRM_DEBUG("waiting on vblank count %d, crtc %u\n",
1797 vblwait->request.sequence, pipe); 1697 vblwait->request.sequence, pipe);
1798 vblank->last_wait = vblwait->request.sequence;
1799 DRM_WAIT_ON(ret, vblank->queue, 3 * HZ, 1698 DRM_WAIT_ON(ret, vblank->queue, 3 * HZ,
1800 (((drm_vblank_count(dev, pipe) - 1699 (((drm_vblank_count(dev, pipe) -
1801 vblwait->request.sequence) <= (1 << 23)) || 1700 vblwait->request.sequence) <= (1 << 23)) ||
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index d3b6ee357a2b..c6f422e879dd 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -88,14 +88,10 @@ struct drm_agp_mem {
88 struct list_head head; 88 struct list_head head;
89}; 89};
90 90
91/* 91/* drm_lock.c */
92 * Generic Userspace Locking-API
93 */
94
95int drm_legacy_i_have_hw_lock(struct drm_device *d, struct drm_file *f);
96int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f); 92int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
97int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f); 93int drm_legacy_unlock(struct drm_device *d, void *v, struct drm_file *f);
98int drm_legacy_lock_free(struct drm_lock_data *lock, unsigned int ctx); 94void drm_legacy_lock_release(struct drm_device *dev, struct file *filp);
99 95
100/* DMA support */ 96/* DMA support */
101int drm_legacy_dma_setup(struct drm_device *dev); 97int drm_legacy_dma_setup(struct drm_device *dev);
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index daa2ff12101b..48ac0ebbd663 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -41,6 +41,110 @@
41static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); 41static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
42 42
43/** 43/**
44 * Take the heavyweight lock.
45 *
46 * \param lock lock pointer.
47 * \param context locking context.
48 * \return one if the lock is held, or zero otherwise.
49 *
50 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
51 */
52static
53int drm_lock_take(struct drm_lock_data *lock_data,
54 unsigned int context)
55{
56 unsigned int old, new, prev;
57 volatile unsigned int *lock = &lock_data->hw_lock->lock;
58
59 spin_lock_bh(&lock_data->spinlock);
60 do {
61 old = *lock;
62 if (old & _DRM_LOCK_HELD)
63 new = old | _DRM_LOCK_CONT;
64 else {
65 new = context | _DRM_LOCK_HELD |
66 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
67 _DRM_LOCK_CONT : 0);
68 }
69 prev = cmpxchg(lock, old, new);
70 } while (prev != old);
71 spin_unlock_bh(&lock_data->spinlock);
72
73 if (_DRM_LOCKING_CONTEXT(old) == context) {
74 if (old & _DRM_LOCK_HELD) {
75 if (context != DRM_KERNEL_CONTEXT) {
76 DRM_ERROR("%d holds heavyweight lock\n",
77 context);
78 }
79 return 0;
80 }
81 }
82
83 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
84 /* Have lock */
85 return 1;
86 }
87 return 0;
88}
89
90/**
91 * This takes a lock forcibly and hands it to context. Should ONLY be used
92 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
93 *
94 * \param dev DRM device.
95 * \param lock lock pointer.
96 * \param context locking context.
97 * \return always one.
98 *
99 * Resets the lock file pointer.
100 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
101 */
102static int drm_lock_transfer(struct drm_lock_data *lock_data,
103 unsigned int context)
104{
105 unsigned int old, new, prev;
106 volatile unsigned int *lock = &lock_data->hw_lock->lock;
107
108 lock_data->file_priv = NULL;
109 do {
110 old = *lock;
111 new = context | _DRM_LOCK_HELD;
112 prev = cmpxchg(lock, old, new);
113 } while (prev != old);
114 return 1;
115}
116
117static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
118 unsigned int context)
119{
120 unsigned int old, new, prev;
121 volatile unsigned int *lock = &lock_data->hw_lock->lock;
122
123 spin_lock_bh(&lock_data->spinlock);
124 if (lock_data->kernel_waiters != 0) {
125 drm_lock_transfer(lock_data, 0);
126 lock_data->idle_has_lock = 1;
127 spin_unlock_bh(&lock_data->spinlock);
128 return 1;
129 }
130 spin_unlock_bh(&lock_data->spinlock);
131
132 do {
133 old = *lock;
134 new = _DRM_LOCKING_CONTEXT(old);
135 prev = cmpxchg(lock, old, new);
136 } while (prev != old);
137
138 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
139 DRM_ERROR("%d freed heavyweight lock held by %d\n",
140 context, _DRM_LOCKING_CONTEXT(old));
141 return 1;
142 }
143 wake_up_interruptible(&lock_data->lock_queue);
144 return 0;
145}
146
147/**
44 * Lock ioctl. 148 * Lock ioctl.
45 * 149 *
46 * \param inode device inode. 150 * \param inode device inode.
@@ -115,7 +219,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
115 /* don't set the block all signals on the master process for now 219 /* don't set the block all signals on the master process for now
116 * really probably not the correct answer but lets us debug xkb 220 * really probably not the correct answer but lets us debug xkb
117 * xserver for now */ 221 * xserver for now */
118 if (!file_priv->is_master) { 222 if (!drm_is_current_master(file_priv)) {
119 dev->sigdata.context = lock->context; 223 dev->sigdata.context = lock->context;
120 dev->sigdata.lock = master->lock.hw_lock; 224 dev->sigdata.lock = master->lock.hw_lock;
121 } 225 }
@@ -165,120 +269,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
165} 269}
166 270
167/** 271/**
168 * Take the heavyweight lock.
169 *
170 * \param lock lock pointer.
171 * \param context locking context.
172 * \return one if the lock is held, or zero otherwise.
173 *
174 * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
175 */
176static
177int drm_lock_take(struct drm_lock_data *lock_data,
178 unsigned int context)
179{
180 unsigned int old, new, prev;
181 volatile unsigned int *lock = &lock_data->hw_lock->lock;
182
183 spin_lock_bh(&lock_data->spinlock);
184 do {
185 old = *lock;
186 if (old & _DRM_LOCK_HELD)
187 new = old | _DRM_LOCK_CONT;
188 else {
189 new = context | _DRM_LOCK_HELD |
190 ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ?
191 _DRM_LOCK_CONT : 0);
192 }
193 prev = cmpxchg(lock, old, new);
194 } while (prev != old);
195 spin_unlock_bh(&lock_data->spinlock);
196
197 if (_DRM_LOCKING_CONTEXT(old) == context) {
198 if (old & _DRM_LOCK_HELD) {
199 if (context != DRM_KERNEL_CONTEXT) {
200 DRM_ERROR("%d holds heavyweight lock\n",
201 context);
202 }
203 return 0;
204 }
205 }
206
207 if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) {
208 /* Have lock */
209 return 1;
210 }
211 return 0;
212}
213
214/**
215 * This takes a lock forcibly and hands it to context. Should ONLY be used
216 * inside *_unlock to give lock to kernel before calling *_dma_schedule.
217 *
218 * \param dev DRM device.
219 * \param lock lock pointer.
220 * \param context locking context.
221 * \return always one.
222 *
223 * Resets the lock file pointer.
224 * Marks the lock as held by the given context, via the \p cmpxchg instruction.
225 */
226static int drm_lock_transfer(struct drm_lock_data *lock_data,
227 unsigned int context)
228{
229 unsigned int old, new, prev;
230 volatile unsigned int *lock = &lock_data->hw_lock->lock;
231
232 lock_data->file_priv = NULL;
233 do {
234 old = *lock;
235 new = context | _DRM_LOCK_HELD;
236 prev = cmpxchg(lock, old, new);
237 } while (prev != old);
238 return 1;
239}
240
241/**
242 * Free lock.
243 *
244 * \param dev DRM device.
245 * \param lock lock.
246 * \param context context.
247 *
248 * Resets the lock file pointer.
249 * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
250 * waiting on the lock queue.
251 */
252int drm_legacy_lock_free(struct drm_lock_data *lock_data, unsigned int context)
253{
254 unsigned int old, new, prev;
255 volatile unsigned int *lock = &lock_data->hw_lock->lock;
256
257 spin_lock_bh(&lock_data->spinlock);
258 if (lock_data->kernel_waiters != 0) {
259 drm_lock_transfer(lock_data, 0);
260 lock_data->idle_has_lock = 1;
261 spin_unlock_bh(&lock_data->spinlock);
262 return 1;
263 }
264 spin_unlock_bh(&lock_data->spinlock);
265
266 do {
267 old = *lock;
268 new = _DRM_LOCKING_CONTEXT(old);
269 prev = cmpxchg(lock, old, new);
270 } while (prev != old);
271
272 if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) {
273 DRM_ERROR("%d freed heavyweight lock held by %d\n",
274 context, _DRM_LOCKING_CONTEXT(old));
275 return 1;
276 }
277 wake_up_interruptible(&lock_data->lock_queue);
278 return 0;
279}
280
281/**
282 * This function returns immediately and takes the hw lock 272 * This function returns immediately and takes the hw lock
283 * with the kernel context if it is free, otherwise it gets the highest priority when and if 273 * with the kernel context if it is free, otherwise it gets the highest priority when and if
284 * it is eventually released. 274 * it is eventually released.
@@ -330,11 +320,27 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock_data)
330} 320}
331EXPORT_SYMBOL(drm_legacy_idlelock_release); 321EXPORT_SYMBOL(drm_legacy_idlelock_release);
332 322
333int drm_legacy_i_have_hw_lock(struct drm_device *dev, 323static int drm_legacy_i_have_hw_lock(struct drm_device *dev,
334 struct drm_file *file_priv) 324 struct drm_file *file_priv)
335{ 325{
336 struct drm_master *master = file_priv->master; 326 struct drm_master *master = file_priv->master;
337 return (file_priv->lock_count && master->lock.hw_lock && 327 return (file_priv->lock_count && master->lock.hw_lock &&
338 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && 328 _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
339 master->lock.file_priv == file_priv); 329 master->lock.file_priv == file_priv);
340} 330}
331
332void drm_legacy_lock_release(struct drm_device *dev, struct file *filp)
333{
334 struct drm_file *file_priv = filp->private_data;
335
336 /* if the master has gone away we can't do anything with the lock */
337 if (!dev->master)
338 return;
339
340 if (drm_legacy_i_have_hw_lock(dev, file_priv)) {
341 DRM_DEBUG("File %p released, freeing lock for context %d\n",
342 filp, _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
343 drm_legacy_lock_free(&file_priv->master->lock,
344 _DRM_LOCKING_CONTEXT(file_priv->master->lock.hw_lock->lock));
345 }
346}
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index 87a8cb73366f..fc0ebd273ef8 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -44,7 +44,7 @@
44# include <asm/agp.h> 44# include <asm/agp.h>
45#else 45#else
46# ifdef __powerpc__ 46# ifdef __powerpc__
47# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) 47# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL)
48# else 48# else
49# define PAGE_AGP PAGE_KERNEL 49# define PAGE_AGP PAGE_KERNEL
50# endif 50# endif
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index f5d80839a90c..af0d471ee246 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -60,6 +60,21 @@ static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
60 return 0; 60 return 0;
61} 61}
62 62
63static int mipi_dsi_uevent(struct device *dev, struct kobj_uevent_env *env)
64{
65 struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
66 int err;
67
68 err = of_device_uevent_modalias(dev, env);
69 if (err != -ENODEV)
70 return err;
71
72 add_uevent_var(env, "MODALIAS=%s%s", MIPI_DSI_MODULE_PREFIX,
73 dsi->name);
74
75 return 0;
76}
77
63static const struct dev_pm_ops mipi_dsi_device_pm_ops = { 78static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
64 .runtime_suspend = pm_generic_runtime_suspend, 79 .runtime_suspend = pm_generic_runtime_suspend,
65 .runtime_resume = pm_generic_runtime_resume, 80 .runtime_resume = pm_generic_runtime_resume,
@@ -74,6 +89,7 @@ static const struct dev_pm_ops mipi_dsi_device_pm_ops = {
74static struct bus_type mipi_dsi_bus_type = { 89static struct bus_type mipi_dsi_bus_type = {
75 .name = "mipi-dsi", 90 .name = "mipi-dsi",
76 .match = mipi_dsi_device_match, 91 .match = mipi_dsi_device_match,
92 .uevent = mipi_dsi_uevent,
77 .pm = &mipi_dsi_device_pm_ops, 93 .pm = &mipi_dsi_device_pm_ops,
78}; 94};
79 95
@@ -983,6 +999,28 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
983EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); 999EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
984 1000
985/** 1001/**
1002 * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for
1003 * the Tearing Effect output signal of the display module
1004 * @dsi: DSI peripheral device
1005 * @scanline: scanline to use as trigger
1006 *
1007 * Return: 0 on success or a negative error code on failure
1008 */
1009int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline)
1010{
1011 u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8,
1012 scanline & 0xff };
1013 ssize_t err;
1014
1015 err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
1016 if (err < 0)
1017 return err;
1018
1019 return 0;
1020}
1021EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline);
1022
1023/**
986 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image 1024 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
987 * data used by the interface 1025 * data used by the interface
988 * @dsi: DSI peripheral device 1026 * @dsi: DSI peripheral device
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 04de6fd88f8c..cb39f45d6a16 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -179,12 +179,14 @@ static void drm_mm_insert_helper(struct drm_mm_node *hole_node,
179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) 179int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
180{ 180{
181 struct drm_mm_node *hole; 181 struct drm_mm_node *hole;
182 u64 end = node->start + node->size; 182 u64 end;
183 u64 hole_start; 183 u64 hole_start;
184 u64 hole_end; 184 u64 hole_end;
185 185
186 BUG_ON(node == NULL); 186 BUG_ON(node == NULL);
187 187
188 end = node->start + node->size;
189
188 /* Find the relevant hole to add our node to */ 190 /* Find the relevant hole to add our node to */
189 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { 191 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
190 if (hole_start > node->start || hole_end < end) 192 if (hole_start > node->start || hole_end < end)
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index e5e6f504d8cc..fc5040ae5f25 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -544,6 +544,7 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
544 * 544 *
545 * This function is to create the modeline based on the GTF algorithm. 545 * This function is to create the modeline based on the GTF algorithm.
546 * Generalized Timing Formula is derived from: 546 * Generalized Timing Formula is derived from:
547 *
547 * GTF Spreadsheet by Andy Morrish (1/5/97) 548 * GTF Spreadsheet by Andy Morrish (1/5/97)
548 * available at http://www.vesa.org 549 * available at http://www.vesa.org
549 * 550 *
@@ -552,7 +553,8 @@ EXPORT_SYMBOL(drm_gtf_mode_complex);
552 * I also refer to the function of fb_get_mode in the file of 553 * I also refer to the function of fb_get_mode in the file of
553 * drivers/video/fbmon.c 554 * drivers/video/fbmon.c
554 * 555 *
555 * Standard GTF parameters: 556 * Standard GTF parameters::
557 *
556 * M = 600 558 * M = 600
557 * C = 40 559 * C = 40
558 * K = 128 560 * K = 128
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index e3a4adf03e7b..61146f5b4f56 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -30,14 +30,14 @@
30 * 30 *
31 * As KMS moves toward more fine grained locking, and atomic ioctl where 31 * As KMS moves toward more fine grained locking, and atomic ioctl where
32 * userspace can indirectly control locking order, it becomes necessary 32 * userspace can indirectly control locking order, it becomes necessary
33 * to use ww_mutex and acquire-contexts to avoid deadlocks. But because 33 * to use &ww_mutex and acquire-contexts to avoid deadlocks. But because
34 * the locking is more distributed around the driver code, we want a bit 34 * the locking is more distributed around the driver code, we want a bit
35 * of extra utility/tracking out of our acquire-ctx. This is provided 35 * of extra utility/tracking out of our acquire-ctx. This is provided
36 * by drm_modeset_lock / drm_modeset_acquire_ctx. 36 * by drm_modeset_lock / drm_modeset_acquire_ctx.
37 * 37 *
38 * For basic principles of ww_mutex, see: Documentation/locking/ww-mutex-design.txt 38 * For basic principles of &ww_mutex, see: Documentation/locking/ww-mutex-design.txt
39 * 39 *
40 * The basic usage pattern is to: 40 * The basic usage pattern is to::
41 * 41 *
42 * drm_modeset_acquire_init(&ctx) 42 * drm_modeset_acquire_init(&ctx)
43 * retry: 43 * retry:
@@ -51,6 +51,13 @@
51 * ... do stuff ... 51 * ... do stuff ...
52 * drm_modeset_drop_locks(&ctx); 52 * drm_modeset_drop_locks(&ctx);
53 * drm_modeset_acquire_fini(&ctx); 53 * drm_modeset_acquire_fini(&ctx);
54 *
55 * On top of of these per-object locks using &ww_mutex there's also an overall
56 * dev->mode_config.lock, for protecting everything else. Mostly this means
57 * probe state of connectors, and preventing hotplug add/removal of connectors.
58 *
59 * Finally there's a bunch of dedicated locks to protect drm core internal
60 * lists and lookup data structures.
54 */ 61 */
55 62
56/** 63/**
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 29d5a548d07a..b2f8f1062d5f 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -144,50 +144,6 @@ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master)
144} 144}
145EXPORT_SYMBOL(drm_pci_set_busid); 145EXPORT_SYMBOL(drm_pci_set_busid);
146 146
147int drm_pci_set_unique(struct drm_device *dev,
148 struct drm_master *master,
149 struct drm_unique *u)
150{
151 int domain, bus, slot, func, ret;
152
153 master->unique_len = u->unique_len;
154 master->unique = kmalloc(master->unique_len + 1, GFP_KERNEL);
155 if (!master->unique) {
156 ret = -ENOMEM;
157 goto err;
158 }
159
160 if (copy_from_user(master->unique, u->unique, master->unique_len)) {
161 ret = -EFAULT;
162 goto err;
163 }
164
165 master->unique[master->unique_len] = '\0';
166
167 /* Return error if the busid submitted doesn't match the device's actual
168 * busid.
169 */
170 ret = sscanf(master->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
171 if (ret != 3) {
172 ret = -EINVAL;
173 goto err;
174 }
175
176 domain = bus >> 8;
177 bus &= 0xff;
178
179 if ((domain != drm_get_pci_domain(dev)) ||
180 (bus != dev->pdev->bus->number) ||
181 (slot != PCI_SLOT(dev->pdev->devfn)) ||
182 (func != PCI_FUNC(dev->pdev->devfn))) {
183 ret = -EINVAL;
184 goto err;
185 }
186 return 0;
187err:
188 return ret;
189}
190
191static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 147static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
192{ 148{
193 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 149 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
@@ -444,13 +400,6 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
444{ 400{
445 return -EINVAL; 401 return -EINVAL;
446} 402}
447
448int drm_pci_set_unique(struct drm_device *dev,
449 struct drm_master *master,
450 struct drm_unique *u)
451{
452 return -EINVAL;
453}
454#endif 403#endif
455 404
456EXPORT_SYMBOL(drm_pci_init); 405EXPORT_SYMBOL(drm_pci_init);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 369d2898ff9e..16c4a7bd7465 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -115,6 +115,7 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
115 * @src: source coordinates in 16.16 fixed point 115 * @src: source coordinates in 16.16 fixed point
116 * @dest: integer destination coordinates 116 * @dest: integer destination coordinates
117 * @clip: integer clipping coordinates 117 * @clip: integer clipping coordinates
118 * @rotation: plane rotation
118 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point 119 * @min_scale: minimum @src:@dest scaling factor in 16.16 fixed point
119 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point 120 * @max_scale: maximum @src:@dest scaling factor in 16.16 fixed point
120 * @can_position: is it legal to position the plane such that it 121 * @can_position: is it legal to position the plane such that it
@@ -134,16 +135,17 @@ static int get_connectors_for_crtc(struct drm_crtc *crtc,
134 * Zero if update appears valid, error code on failure 135 * Zero if update appears valid, error code on failure
135 */ 136 */
136int drm_plane_helper_check_update(struct drm_plane *plane, 137int drm_plane_helper_check_update(struct drm_plane *plane,
137 struct drm_crtc *crtc, 138 struct drm_crtc *crtc,
138 struct drm_framebuffer *fb, 139 struct drm_framebuffer *fb,
139 struct drm_rect *src, 140 struct drm_rect *src,
140 struct drm_rect *dest, 141 struct drm_rect *dest,
141 const struct drm_rect *clip, 142 const struct drm_rect *clip,
142 int min_scale, 143 unsigned int rotation,
143 int max_scale, 144 int min_scale,
144 bool can_position, 145 int max_scale,
145 bool can_update_disabled, 146 bool can_position,
146 bool *visible) 147 bool can_update_disabled,
148 bool *visible)
147{ 149{
148 int hscale, vscale; 150 int hscale, vscale;
149 151
@@ -163,6 +165,8 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
163 return -EINVAL; 165 return -EINVAL;
164 } 166 }
165 167
168 drm_rect_rotate(src, fb->width << 16, fb->height << 16, rotation);
169
166 /* Check scaling */ 170 /* Check scaling */
167 hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale); 171 hscale = drm_rect_calc_hscale(src, dest, min_scale, max_scale);
168 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale); 172 vscale = drm_rect_calc_vscale(src, dest, min_scale, max_scale);
@@ -174,6 +178,9 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
174 } 178 }
175 179
176 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale); 180 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
181
182 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16, rotation);
183
177 if (!*visible) 184 if (!*visible)
178 /* 185 /*
179 * Plane isn't visible; some drivers can handle this 186 * Plane isn't visible; some drivers can handle this
@@ -219,10 +226,12 @@ EXPORT_SYMBOL(drm_plane_helper_check_update);
219 * 226 *
220 * Note that we make some assumptions about hardware limitations that may not be 227 * Note that we make some assumptions about hardware limitations that may not be
221 * true for all hardware -- 228 * true for all hardware --
222 * 1) Primary plane cannot be repositioned. 229 *
223 * 2) Primary plane cannot be scaled. 230 * 1. Primary plane cannot be repositioned.
224 * 3) Primary plane must cover the entire CRTC. 231 * 2. Primary plane cannot be scaled.
225 * 4) Subpixel positioning is not supported. 232 * 3. Primary plane must cover the entire CRTC.
233 * 4. Subpixel positioning is not supported.
234 *
226 * Drivers for hardware that don't have these restrictions can provide their 235 * Drivers for hardware that don't have these restrictions can provide their
227 * own implementation rather than using this helper. 236 * own implementation rather than using this helper.
228 * 237 *
@@ -265,6 +274,7 @@ int drm_primary_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
265 274
266 ret = drm_plane_helper_check_update(plane, crtc, fb, 275 ret = drm_plane_helper_check_update(plane, crtc, fb,
267 &src, &dest, &clip, 276 &src, &dest, &clip,
277 BIT(DRM_ROTATE_0),
268 DRM_PLANE_HELPER_NO_SCALING, 278 DRM_PLANE_HELPER_NO_SCALING,
269 DRM_PLANE_HELPER_NO_SCALING, 279 DRM_PLANE_HELPER_NO_SCALING,
270 false, false, &visible); 280 false, false, &visible);
diff --git a/drivers/gpu/drm/drm_platform.c b/drivers/gpu/drm/drm_platform.c
index 644169e1a029..2c819ef90090 100644
--- a/drivers/gpu/drm/drm_platform.c
+++ b/drivers/gpu/drm/drm_platform.c
@@ -68,24 +68,6 @@ err_free:
68 return ret; 68 return ret;
69} 69}
70 70
71int drm_platform_set_busid(struct drm_device *dev, struct drm_master *master)
72{
73 int id;
74
75 id = dev->platformdev->id;
76 if (id < 0)
77 id = 0;
78
79 master->unique = kasprintf(GFP_KERNEL, "platform:%s:%02d",
80 dev->platformdev->name, id);
81 if (!master->unique)
82 return -ENOMEM;
83
84 master->unique_len = strlen(master->unique);
85 return 0;
86}
87EXPORT_SYMBOL(drm_platform_set_busid);
88
89/** 71/**
90 * drm_platform_init - Register a platform device with the DRM subsystem 72 * drm_platform_init - Register a platform device with the DRM subsystem
91 * @driver: DRM device driver 73 * @driver: DRM device driver
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index aab0f3f1f42d..780589b420a4 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -593,7 +593,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
593 get_dma_buf(dma_buf); 593 get_dma_buf(dma_buf);
594 } 594 }
595 595
596 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ 596 /* _handle_create_tail unconditionally unlocks dev->object_name_lock. */
597 ret = drm_gem_handle_create_tail(file_priv, obj, handle); 597 ret = drm_gem_handle_create_tail(file_priv, obj, handle);
598 drm_gem_object_unreference_unlocked(obj); 598 drm_gem_object_unreference_unlocked(obj);
599 if (ret) 599 if (ret)
@@ -601,11 +601,10 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
601 601
602 ret = drm_prime_add_buf_handle(&file_priv->prime, 602 ret = drm_prime_add_buf_handle(&file_priv->prime,
603 dma_buf, *handle); 603 dma_buf, *handle);
604 mutex_unlock(&file_priv->prime.lock);
604 if (ret) 605 if (ret)
605 goto fail; 606 goto fail;
606 607
607 mutex_unlock(&file_priv->prime.lock);
608
609 dma_buf_put(dma_buf); 608 dma_buf_put(dma_buf);
610 609
611 return 0; 610 return 0;
@@ -615,11 +614,14 @@ fail:
615 * to detach.. which seems ok.. 614 * to detach.. which seems ok..
616 */ 615 */
617 drm_gem_handle_delete(file_priv, *handle); 616 drm_gem_handle_delete(file_priv, *handle);
617 dma_buf_put(dma_buf);
618 return ret;
619
618out_unlock: 620out_unlock:
619 mutex_unlock(&dev->object_name_lock); 621 mutex_unlock(&dev->object_name_lock);
620out_put: 622out_put:
621 dma_buf_put(dma_buf);
622 mutex_unlock(&file_priv->prime.lock); 623 mutex_unlock(&file_priv->prime.lock);
624 dma_buf_put(dma_buf);
623 return ret; 625 return ret;
624} 626}
625EXPORT_SYMBOL(drm_gem_prime_fd_to_handle); 627EXPORT_SYMBOL(drm_gem_prime_fd_to_handle);
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 0329080d7f7c..a0df377d7d1c 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -82,13 +82,30 @@ drm_mode_validate_flag(const struct drm_display_mode *mode,
82 82
83static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector) 83static int drm_helper_probe_add_cmdline_mode(struct drm_connector *connector)
84{ 84{
85 struct drm_cmdline_mode *cmdline_mode;
85 struct drm_display_mode *mode; 86 struct drm_display_mode *mode;
86 87
87 if (!connector->cmdline_mode.specified) 88 cmdline_mode = &connector->cmdline_mode;
89 if (!cmdline_mode->specified)
88 return 0; 90 return 0;
89 91
92 /* Only add a GTF mode if we find no matching probed modes */
93 list_for_each_entry(mode, &connector->probed_modes, head) {
94 if (mode->hdisplay != cmdline_mode->xres ||
95 mode->vdisplay != cmdline_mode->yres)
96 continue;
97
98 if (cmdline_mode->refresh_specified) {
99 /* The probed mode's vrefresh is set until later */
100 if (drm_mode_vrefresh(mode) != cmdline_mode->refresh)
101 continue;
102 }
103
104 return 0;
105 }
106
90 mode = drm_mode_create_from_cmdline_mode(connector->dev, 107 mode = drm_mode_create_from_cmdline_mode(connector->dev,
91 &connector->cmdline_mode); 108 cmdline_mode);
92 if (mode == NULL) 109 if (mode == NULL)
93 return 0; 110 return 0;
94 111
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index 4f0f3b36d537..bf70431073f6 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -41,7 +41,7 @@
41static inline void *drm_vmalloc_dma(unsigned long size) 41static inline void *drm_vmalloc_dma(unsigned long size)
42{ 42{
43#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 43#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
44 return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); 44 return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL));
45#else 45#else
46 return vmalloc_32(size); 46 return vmalloc_32(size);
47#endif 47#endif
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
new file mode 100644
index 000000000000..0db36d27e90b
--- /dev/null
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -0,0 +1,206 @@
1/*
2 * Copyright (C) 2016 Noralf Trønnes
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <drm/drmP.h>
11#include <drm/drm_atomic.h>
12#include <drm/drm_atomic_helper.h>
13#include <drm/drm_crtc_helper.h>
14#include <drm/drm_plane_helper.h>
15#include <drm/drm_simple_kms_helper.h>
16#include <linux/slab.h>
17
18/**
19 * DOC: overview
20 *
21 * This helper library provides helpers for drivers for simple display
22 * hardware.
23 *
24 * drm_simple_display_pipe_init() initializes a simple display pipeline
25 * which has only one full-screen scanout buffer feeding one output. The
26 * pipeline is represented by struct &drm_simple_display_pipe and binds
27 * together &drm_plane, &drm_crtc and &drm_encoder structures into one fixed
28 * entity. Some flexibility for code reuse is provided through a separately
29 * allocated &drm_connector object and supporting optional &drm_bridge
30 * encoder drivers.
31 */
32
33static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
34 .destroy = drm_encoder_cleanup,
35};
36
37static void drm_simple_kms_crtc_enable(struct drm_crtc *crtc)
38{
39 struct drm_simple_display_pipe *pipe;
40
41 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
42 if (!pipe->funcs || !pipe->funcs->enable)
43 return;
44
45 pipe->funcs->enable(pipe, crtc->state);
46}
47
48static void drm_simple_kms_crtc_disable(struct drm_crtc *crtc)
49{
50 struct drm_simple_display_pipe *pipe;
51
52 pipe = container_of(crtc, struct drm_simple_display_pipe, crtc);
53 if (!pipe->funcs || !pipe->funcs->disable)
54 return;
55
56 pipe->funcs->disable(pipe);
57}
58
59static const struct drm_crtc_helper_funcs drm_simple_kms_crtc_helper_funcs = {
60 .disable = drm_simple_kms_crtc_disable,
61 .enable = drm_simple_kms_crtc_enable,
62};
63
64static const struct drm_crtc_funcs drm_simple_kms_crtc_funcs = {
65 .reset = drm_atomic_helper_crtc_reset,
66 .destroy = drm_crtc_cleanup,
67 .set_config = drm_atomic_helper_set_config,
68 .page_flip = drm_atomic_helper_page_flip,
69 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
70 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
71};
72
73static int drm_simple_kms_plane_atomic_check(struct drm_plane *plane,
74 struct drm_plane_state *plane_state)
75{
76 struct drm_rect src = {
77 .x1 = plane_state->src_x,
78 .y1 = plane_state->src_y,
79 .x2 = plane_state->src_x + plane_state->src_w,
80 .y2 = plane_state->src_y + plane_state->src_h,
81 };
82 struct drm_rect dest = {
83 .x1 = plane_state->crtc_x,
84 .y1 = plane_state->crtc_y,
85 .x2 = plane_state->crtc_x + plane_state->crtc_w,
86 .y2 = plane_state->crtc_y + plane_state->crtc_h,
87 };
88 struct drm_rect clip = { 0 };
89 struct drm_simple_display_pipe *pipe;
90 struct drm_crtc_state *crtc_state;
91 bool visible;
92 int ret;
93
94 pipe = container_of(plane, struct drm_simple_display_pipe, plane);
95 crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
96 &pipe->crtc);
97 if (crtc_state->enable != !!plane_state->crtc)
98 return -EINVAL; /* plane must match crtc enable state */
99
100 if (!crtc_state->enable)
101 return 0; /* nothing to check when disabling or disabled */
102
103 clip.x2 = crtc_state->adjusted_mode.hdisplay;
104 clip.y2 = crtc_state->adjusted_mode.vdisplay;
105 ret = drm_plane_helper_check_update(plane, &pipe->crtc,
106 plane_state->fb,
107 &src, &dest, &clip,
108 plane_state->rotation,
109 DRM_PLANE_HELPER_NO_SCALING,
110 DRM_PLANE_HELPER_NO_SCALING,
111 false, true, &visible);
112 if (ret)
113 return ret;
114
115 if (!visible)
116 return -EINVAL;
117
118 if (!pipe->funcs || !pipe->funcs->check)
119 return 0;
120
121 return pipe->funcs->check(pipe, plane_state, crtc_state);
122}
123
124static void drm_simple_kms_plane_atomic_update(struct drm_plane *plane,
125 struct drm_plane_state *pstate)
126{
127 struct drm_simple_display_pipe *pipe;
128
129 pipe = container_of(plane, struct drm_simple_display_pipe, plane);
130 if (!pipe->funcs || !pipe->funcs->update)
131 return;
132
133 pipe->funcs->update(pipe, pstate);
134}
135
136static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
137 .atomic_check = drm_simple_kms_plane_atomic_check,
138 .atomic_update = drm_simple_kms_plane_atomic_update,
139};
140
141static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
142 .update_plane = drm_atomic_helper_update_plane,
143 .disable_plane = drm_atomic_helper_disable_plane,
144 .destroy = drm_plane_cleanup,
145 .reset = drm_atomic_helper_plane_reset,
146 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
147 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
148};
149
150/**
151 * drm_simple_display_pipe_init - Initialize a simple display pipeline
152 * @dev: DRM device
153 * @pipe: simple display pipe object to initialize
154 * @funcs: callbacks for the display pipe (optional)
155 * @formats: array of supported formats (%DRM_FORMAT_*)
156 * @format_count: number of elements in @formats
157 * @connector: connector to attach and register
158 *
159 * Sets up a display pipeline which consist of a really simple
160 * plane-crtc-encoder pipe coupled with the provided connector.
161 * Teardown of a simple display pipe is all handled automatically by the drm
162 * core through calling drm_mode_config_cleanup(). Drivers afterwards need to
163 * release the memory for the structure themselves.
164 *
165 * Returns:
166 * Zero on success, negative error code on failure.
167 */
168int drm_simple_display_pipe_init(struct drm_device *dev,
169 struct drm_simple_display_pipe *pipe,
170 const struct drm_simple_display_pipe_funcs *funcs,
171 const uint32_t *formats, unsigned int format_count,
172 struct drm_connector *connector)
173{
174 struct drm_encoder *encoder = &pipe->encoder;
175 struct drm_plane *plane = &pipe->plane;
176 struct drm_crtc *crtc = &pipe->crtc;
177 int ret;
178
179 pipe->connector = connector;
180 pipe->funcs = funcs;
181
182 drm_plane_helper_add(plane, &drm_simple_kms_plane_helper_funcs);
183 ret = drm_universal_plane_init(dev, plane, 0,
184 &drm_simple_kms_plane_funcs,
185 formats, format_count,
186 DRM_PLANE_TYPE_PRIMARY, NULL);
187 if (ret)
188 return ret;
189
190 drm_crtc_helper_add(crtc, &drm_simple_kms_crtc_helper_funcs);
191 ret = drm_crtc_init_with_planes(dev, crtc, plane, NULL,
192 &drm_simple_kms_crtc_funcs, NULL);
193 if (ret)
194 return ret;
195
196 encoder->possible_crtcs = 1 << drm_crtc_index(crtc);
197 ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
198 DRM_MODE_ENCODER_NONE, NULL);
199 if (ret)
200 return ret;
201
202 return drm_mode_connector_attach_encoder(connector, encoder);
203}
204EXPORT_SYMBOL(drm_simple_display_pipe_init);
205
206MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index fa7fadce8063..32dd821b7202 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -32,75 +32,6 @@ static struct device_type drm_sysfs_device_minor = {
32 32
33struct class *drm_class; 33struct class *drm_class;
34 34
35/**
36 * __drm_class_suspend - internal DRM class suspend routine
37 * @dev: Linux device to suspend
38 * @state: power state to enter
39 *
40 * Just figures out what the actual struct drm_device associated with
41 * @dev is and calls its suspend hook, if present.
42 */
43static int __drm_class_suspend(struct device *dev, pm_message_t state)
44{
45 if (dev->type == &drm_sysfs_device_minor) {
46 struct drm_minor *drm_minor = to_drm_minor(dev);
47 struct drm_device *drm_dev = drm_minor->dev;
48
49 if (drm_minor->type == DRM_MINOR_LEGACY &&
50 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
51 drm_dev->driver->suspend)
52 return drm_dev->driver->suspend(drm_dev, state);
53 }
54 return 0;
55}
56
57/**
58 * drm_class_suspend - internal DRM class suspend hook. Simply calls
59 * __drm_class_suspend() with the correct pm state.
60 * @dev: Linux device to suspend
61 */
62static int drm_class_suspend(struct device *dev)
63{
64 return __drm_class_suspend(dev, PMSG_SUSPEND);
65}
66
67/**
68 * drm_class_freeze - internal DRM class freeze hook. Simply calls
69 * __drm_class_suspend() with the correct pm state.
70 * @dev: Linux device to freeze
71 */
72static int drm_class_freeze(struct device *dev)
73{
74 return __drm_class_suspend(dev, PMSG_FREEZE);
75}
76
77/**
78 * drm_class_resume - DRM class resume hook
79 * @dev: Linux device to resume
80 *
81 * Just figures out what the actual struct drm_device associated with
82 * @dev is and calls its resume hook, if present.
83 */
84static int drm_class_resume(struct device *dev)
85{
86 if (dev->type == &drm_sysfs_device_minor) {
87 struct drm_minor *drm_minor = to_drm_minor(dev);
88 struct drm_device *drm_dev = drm_minor->dev;
89
90 if (drm_minor->type == DRM_MINOR_LEGACY &&
91 !drm_core_check_feature(drm_dev, DRIVER_MODESET) &&
92 drm_dev->driver->resume)
93 return drm_dev->driver->resume(drm_dev);
94 }
95 return 0;
96}
97
98static const struct dev_pm_ops drm_class_dev_pm_ops = {
99 .suspend = drm_class_suspend,
100 .resume = drm_class_resume,
101 .freeze = drm_class_freeze,
102};
103
104static char *drm_devnode(struct device *dev, umode_t *mode) 35static char *drm_devnode(struct device *dev, umode_t *mode)
105{ 36{
106 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev)); 37 return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -131,8 +62,6 @@ int drm_sysfs_init(void)
131 if (IS_ERR(drm_class)) 62 if (IS_ERR(drm_class))
132 return PTR_ERR(drm_class); 63 return PTR_ERR(drm_class);
133 64
134 drm_class->pm = &drm_class_dev_pm_ops;
135
136 err = class_create_file(drm_class, &class_attr_version.attr); 65 err = class_create_file(drm_class, &class_attr_version.attr);
137 if (err) { 66 if (err) {
138 class_destroy(drm_class); 67 class_destroy(drm_class);
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index ac9f4b3ec615..caa4e4ca616d 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 80 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
81 81
82#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) 82#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
83 tmp |= _PAGE_NO_CACHE; 83 tmp = pgprot_noncached_wc(tmp);
84#endif 84#endif
85 return tmp; 85 return tmp;
86} 86}
@@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
593 * pages and mappings in fault() 593 * pages and mappings in fault()
594 */ 594 */
595#if defined(__powerpc__) 595#if defined(__powerpc__)
596 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; 596 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
597#endif 597#endif
598 vma->vm_ops = &drm_vm_ops; 598 vma->vm_ops = &drm_vm_ops;
599 break; 599 break;
@@ -670,57 +670,3 @@ void drm_legacy_vma_flush(struct drm_device *dev)
670 kfree(vma); 670 kfree(vma);
671 } 671 }
672} 672}
673
674int drm_vma_info(struct seq_file *m, void *data)
675{
676 struct drm_info_node *node = (struct drm_info_node *) m->private;
677 struct drm_device *dev = node->minor->dev;
678 struct drm_vma_entry *pt;
679 struct vm_area_struct *vma;
680 unsigned long vma_count = 0;
681#if defined(__i386__)
682 unsigned int pgprot;
683#endif
684
685 mutex_lock(&dev->struct_mutex);
686 list_for_each_entry(pt, &dev->vmalist, head)
687 vma_count++;
688
689 seq_printf(m, "vma use count: %lu, high_memory = %pK, 0x%pK\n",
690 vma_count, high_memory,
691 (void *)(unsigned long)virt_to_phys(high_memory));
692
693 list_for_each_entry(pt, &dev->vmalist, head) {
694 vma = pt->vma;
695 if (!vma)
696 continue;
697 seq_printf(m,
698 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
699 pt->pid,
700 (void *)vma->vm_start, (void *)vma->vm_end,
701 vma->vm_flags & VM_READ ? 'r' : '-',
702 vma->vm_flags & VM_WRITE ? 'w' : '-',
703 vma->vm_flags & VM_EXEC ? 'x' : '-',
704 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
705 vma->vm_flags & VM_LOCKED ? 'l' : '-',
706 vma->vm_flags & VM_IO ? 'i' : '-',
707 vma->vm_pgoff);
708
709#if defined(__i386__)
710 pgprot = pgprot_val(vma->vm_page_prot);
711 seq_printf(m, " %c%c%c%c%c%c%c%c%c",
712 pgprot & _PAGE_PRESENT ? 'p' : '-',
713 pgprot & _PAGE_RW ? 'w' : 'r',
714 pgprot & _PAGE_USER ? 'u' : 's',
715 pgprot & _PAGE_PWT ? 't' : 'b',
716 pgprot & _PAGE_PCD ? 'u' : 'c',
717 pgprot & _PAGE_ACCESSED ? 'a' : '-',
718 pgprot & _PAGE_DIRTY ? 'd' : '-',
719 pgprot & _PAGE_PSE ? 'm' : 'k',
720 pgprot & _PAGE_GLOBAL ? 'g' : 'l');
721#endif
722 seq_printf(m, "\n");
723 }
724 mutex_unlock(&dev->struct_mutex);
725 return 0;
726}
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index 2f2ecde8285b..f306c8855978 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -127,6 +127,9 @@ EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
127 * used to implement weakly referenced lookups using kref_get_unless_zero(). 127 * used to implement weakly referenced lookups using kref_get_unless_zero().
128 * 128 *
129 * Example: 129 * Example:
130 *
131 * ::
132 *
130 * drm_vma_offset_lock_lookup(mgr); 133 * drm_vma_offset_lock_lookup(mgr);
131 * node = drm_vma_offset_lookup_locked(mgr); 134 * node = drm_vma_offset_lookup_locked(mgr);
132 * if (node) 135 * if (node)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 3d4f56df8359..ffd1b32caa8d 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev)
91 int ret; 91 int ret;
92 92
93 ret = etnaviv_gpu_init(g); 93 ret = etnaviv_gpu_init(g);
94 if (ret) { 94 if (ret)
95 dev_err(g->dev, "hw init failed: %d\n", ret);
96 priv->gpu[i] = NULL; 95 priv->gpu[i] = NULL;
97 }
98 } 96 }
99 } 97 }
100} 98}
@@ -496,7 +494,6 @@ static struct drm_driver etnaviv_drm_driver = {
496 DRIVER_RENDER, 494 DRIVER_RENDER,
497 .open = etnaviv_open, 495 .open = etnaviv_open,
498 .preclose = etnaviv_preclose, 496 .preclose = etnaviv_preclose,
499 .set_busid = drm_platform_set_busid,
500 .gem_free_object_unlocked = etnaviv_gem_free_object, 497 .gem_free_object_unlocked = etnaviv_gem_free_object,
501 .gem_vm_ops = &vm_ops, 498 .gem_vm_ops = &vm_ops,
502 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 499 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 8c6f750634af..5ce3603e6eac 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -535,8 +535,7 @@ void etnaviv_gem_describe_objects(struct etnaviv_drm_private *priv,
535 535
536static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj) 536static void etnaviv_gem_shmem_release(struct etnaviv_gem_object *etnaviv_obj)
537{ 537{
538 if (etnaviv_obj->vaddr) 538 vunmap(etnaviv_obj->vaddr);
539 vunmap(etnaviv_obj->vaddr);
540 put_pages(etnaviv_obj); 539 put_pages(etnaviv_obj);
541} 540}
542 541
@@ -670,9 +669,7 @@ static struct drm_gem_object *__etnaviv_gem_new(struct drm_device *dev,
670 return obj; 669 return obj;
671 670
672fail: 671fail:
673 if (obj) 672 drm_gem_object_unreference_unlocked(obj);
674 drm_gem_object_unreference_unlocked(obj);
675
676 return ERR_PTR(ret); 673 return ERR_PTR(ret);
677} 674}
678 675
@@ -916,15 +913,12 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
916 get_task_struct(current); 913 get_task_struct(current);
917 914
918 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base); 915 ret = etnaviv_gem_obj_add(dev, &etnaviv_obj->base);
919 if (ret) { 916 if (ret)
920 drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 917 goto unreference;
921 return ret;
922 }
923 918
924 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle); 919 ret = drm_gem_handle_create(file, &etnaviv_obj->base, handle);
925 920unreference:
926 /* drop reference from allocate - handle holds it now */ 921 /* drop reference from allocate - handle holds it now */
927 drm_gem_object_unreference_unlocked(&etnaviv_obj->base); 922 drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
928
929 return ret; 923 return ret;
930} 924}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index ff6aa5dfb2d7..87ef34150d46 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
487 return 0; 487 return 0;
488} 488}
489 489
490static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
491{
492 u32 pmc, ppc;
493
494 /* enable clock gating */
495 ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
496 ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
497
498 /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
499 if (gpu->identity.revision == 0x4301 ||
500 gpu->identity.revision == 0x4302)
501 ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
502
503 gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
504
505 pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
506
507 /* Disable PA clock gating for GC400+ except for GC420 */
508 if (gpu->identity.model >= chipModel_GC400 &&
509 gpu->identity.model != chipModel_GC420)
510 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA;
511
512 /*
513 * Disable PE clock gating on revs < 5.0.0.0 when HZ is
514 * present without a bug fix.
515 */
516 if (gpu->identity.revision < 0x5000 &&
517 gpu->identity.minor_features0 & chipMinorFeatures0_HZ &&
518 !(gpu->identity.minor_features1 &
519 chipMinorFeatures1_DISABLE_PE_GATING))
520 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE;
521
522 if (gpu->identity.revision < 0x5422)
523 pmc |= BIT(15); /* Unknown bit */
524
525 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
526 pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
527
528 gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
529}
530
490static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) 531static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
491{ 532{
492 u16 prefetch; 533 u16 prefetch;
@@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
506 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); 547 gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug);
507 } 548 }
508 549
550 /* enable module-level clock gating */
551 etnaviv_gpu_enable_mlcg(gpu);
552
509 /* 553 /*
510 * Update GPU AXI cache atttribute to "cacheable, no allocate". 554 * Update GPU AXI cache atttribute to "cacheable, no allocate".
511 * This is necessary to prevent the iMX6 SoC locking up. 555 * This is necessary to prevent the iMX6 SoC locking up.
@@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
553 bool mmuv2; 597 bool mmuv2;
554 598
555 ret = pm_runtime_get_sync(gpu->dev); 599 ret = pm_runtime_get_sync(gpu->dev);
556 if (ret < 0) 600 if (ret < 0) {
601 dev_err(gpu->dev, "Failed to enable GPU power domain\n");
557 return ret; 602 return ret;
603 }
558 604
559 etnaviv_hw_identify(gpu); 605 etnaviv_hw_identify(gpu);
560 606
@@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
591 } 637 }
592 638
593 ret = etnaviv_hw_reset(gpu); 639 ret = etnaviv_hw_reset(gpu);
594 if (ret) 640 if (ret) {
641 dev_err(gpu->dev, "GPU reset failed\n");
595 goto fail; 642 goto fail;
643 }
596 644
597 /* Setup IOMMU.. eventually we will (I think) do this once per context 645 /* Setup IOMMU.. eventually we will (I think) do this once per context
598 * and have separate page tables per context. For now, to keep things 646 * and have separate page tables per context. For now, to keep things
@@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
610 } 658 }
611 659
612 if (!iommu) { 660 if (!iommu) {
661 dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
613 ret = -ENOMEM; 662 ret = -ENOMEM;
614 goto fail; 663 goto fail;
615 } 664 }
616 665
617 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); 666 gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
618 if (!gpu->mmu) { 667 if (!gpu->mmu) {
668 dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
619 iommu_domain_free(iommu); 669 iommu_domain_free(iommu);
620 ret = -ENOMEM; 670 ret = -ENOMEM;
621 goto fail; 671 goto fail;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index f5321e2f25ff..a69cdd526bf8 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -125,7 +125,7 @@ struct etnaviv_gpu {
125 u32 completed_fence; 125 u32 completed_fence;
126 u32 retired_fence; 126 u32 retired_fence;
127 wait_queue_head_t fence_event; 127 wait_queue_head_t fence_event;
128 unsigned int fence_context; 128 u64 fence_context;
129 spinlock_t fence_spinlock; 129 spinlock_t fence_spinlock;
130 130
131 /* worker for handling active-list retiring: */ 131 /* worker for handling active-list retiring: */
diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h
index 6a7de5f1454a..807a3d9e0dd5 100644
--- a/drivers/gpu/drm/etnaviv/state_hi.xml.h
+++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h
@@ -218,6 +218,13 @@ Copyright (C) 2015
218#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001 218#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001
219#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002 219#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002
220#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004 220#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004
221#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008
222#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010
223#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020
224#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040
225#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080
226#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000
227#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000
221 228
222#define VIVS_PM_MODULE_STATUS 0x00000108 229#define VIVS_PM_MODULE_STATUS 0x00000108
223#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 230#define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index d814b3048ee5..83f61c513b7e 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -2,10 +2,6 @@ config DRM_EXYNOS
2 tristate "DRM Support for Samsung SoC EXYNOS Series" 2 tristate "DRM Support for Samsung SoC EXYNOS Series"
3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) 3 depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
6 select FB_CFB_FILLRECT
7 select FB_CFB_COPYAREA
8 select FB_CFB_IMAGEBLIT
9 select VIDEOMODE_HELPERS 5 select VIDEOMODE_HELPERS
10 help 6 help
11 Choose this option if you have a Samsung SoC EXYNOS chipset. 7 Choose this option if you have a Samsung SoC EXYNOS chipset.
@@ -15,7 +11,7 @@ if DRM_EXYNOS
15 11
16config DRM_EXYNOS_IOMMU 12config DRM_EXYNOS_IOMMU
17 bool 13 bool
18 depends on EXYNOS_IOMMU && ARM_DMA_USE_IOMMU 14 depends on EXYNOS_IOMMU
19 default y 15 default y
20 16
21comment "CRTCs" 17comment "CRTCs"
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4c1fb3f8b5a6..4f0850585b8e 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -67,10 +67,10 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data)
67 return exynos_dp_crtc_clock_enable(plat_data, false); 67 return exynos_dp_crtc_clock_enable(plat_data, false);
68} 68}
69 69
70static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) 70static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data,
71 struct drm_connector *connector)
71{ 72{
72 struct exynos_dp_device *dp = to_dp(plat_data); 73 struct exynos_dp_device *dp = to_dp(plat_data);
73 struct drm_connector *connector = dp->connector;
74 struct drm_display_mode *mode; 74 struct drm_display_mode *mode;
75 int num_modes = 0; 75 int num_modes = 0;
76 76
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 5e38e749ac17..ad6b73c7fc59 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -93,17 +93,8 @@ static int exynos_dpi_get_modes(struct drm_connector *connector)
93 return 0; 93 return 0;
94} 94}
95 95
96static struct drm_encoder *
97exynos_dpi_best_encoder(struct drm_connector *connector)
98{
99 struct exynos_dpi *ctx = connector_to_dpi(connector);
100
101 return &ctx->encoder;
102}
103
104static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = { 96static const struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
105 .get_modes = exynos_dpi_get_modes, 97 .get_modes = exynos_dpi_get_modes,
106 .best_encoder = exynos_dpi_best_encoder,
107}; 98};
108 99
109static int exynos_dpi_create_connector(struct drm_encoder *encoder) 100static int exynos_dpi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 2dd820e23b0c..877d2efa28e2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -159,12 +159,7 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n", 159 DRM_INFO("Exynos DRM: using %s device for DMA mapping operations\n",
160 dev_name(private->dma_dev)); 160 dev_name(private->dma_dev));
161 161
162 /* 162 /* create common IOMMU mapping for all devices attached to Exynos DRM */
163 * create mapping to manage iommu table and set a pointer to iommu
164 * mapping structure to iommu_mapping of private data.
165 * also this iommu_mapping can be used to check if iommu is supported
166 * or not.
167 */
168 ret = drm_create_iommu_mapping(dev); 163 ret = drm_create_iommu_mapping(dev);
169 if (ret < 0) { 164 if (ret < 0) {
170 DRM_ERROR("failed to create iommu mapping.\n"); 165 DRM_ERROR("failed to create iommu mapping.\n");
@@ -267,6 +262,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
267{ 262{
268 struct exynos_drm_private *priv = dev->dev_private; 263 struct exynos_drm_private *priv = dev->dev_private;
269 struct exynos_atomic_commit *commit; 264 struct exynos_atomic_commit *commit;
265 struct drm_crtc *crtc;
266 struct drm_crtc_state *crtc_state;
270 int i, ret; 267 int i, ret;
271 268
272 commit = kzalloc(sizeof(*commit), GFP_KERNEL); 269 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
@@ -288,10 +285,8 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
288 /* Wait until all affected CRTCs have completed previous commits and 285 /* Wait until all affected CRTCs have completed previous commits and
289 * mark them as pending. 286 * mark them as pending.
290 */ 287 */
291 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 288 for_each_crtc_in_state(state, crtc, crtc_state, i)
292 if (state->crtcs[i]) 289 commit->crtcs |= drm_crtc_mask(crtc);
293 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
294 }
295 290
296 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs)); 291 wait_event(priv->wait, !commit_is_pending(priv, commit->crtcs));
297 292
@@ -299,7 +294,7 @@ int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
299 priv->pending |= commit->crtcs; 294 priv->pending |= commit->crtcs;
300 spin_unlock(&priv->lock); 295 spin_unlock(&priv->lock);
301 296
302 drm_atomic_helper_swap_state(dev, state); 297 drm_atomic_helper_swap_state(state, true);
303 298
304 if (nonblock) 299 if (nonblock)
305 schedule_work(&commit->work); 300 schedule_work(&commit->work);
@@ -407,7 +402,6 @@ static struct drm_driver exynos_drm_driver = {
407 .preclose = exynos_drm_preclose, 402 .preclose = exynos_drm_preclose,
408 .lastclose = exynos_drm_lastclose, 403 .lastclose = exynos_drm_lastclose,
409 .postclose = exynos_drm_postclose, 404 .postclose = exynos_drm_postclose,
410 .set_busid = drm_platform_set_busid,
411 .get_vblank_counter = drm_vblank_no_hw_counter, 405 .get_vblank_counter = drm_vblank_no_hw_counter,
412 .enable_vblank = exynos_drm_crtc_enable_vblank, 406 .enable_vblank = exynos_drm_crtc_enable_vblank,
413 .disable_vblank = exynos_drm_crtc_disable_vblank, 407 .disable_vblank = exynos_drm_crtc_disable_vblank,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index cc33ec9296e7..b39d521f093d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -224,8 +224,6 @@ struct exynos_drm_private {
224 struct drm_property *plane_zpos_property; 224 struct drm_property *plane_zpos_property;
225 225
226 struct device *dma_dev; 226 struct device *dma_dev;
227 unsigned long da_start;
228 unsigned long da_space_size;
229 void *mapping; 227 void *mapping;
230 228
231 unsigned int pipe; 229 unsigned int pipe;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 601ecf8006a7..e07cb1fe4860 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1566,17 +1566,8 @@ static int exynos_dsi_get_modes(struct drm_connector *connector)
1566 return 0; 1566 return 0;
1567} 1567}
1568 1568
1569static struct drm_encoder *
1570exynos_dsi_best_encoder(struct drm_connector *connector)
1571{
1572 struct exynos_dsi *dsi = connector_to_dsi(connector);
1573
1574 return &dsi->encoder;
1575}
1576
1577static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { 1569static const struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1578 .get_modes = exynos_dsi_get_modes, 1570 .get_modes = exynos_dsi_get_modes,
1579 .best_encoder = exynos_dsi_best_encoder,
1580}; 1571};
1581 1572
1582static int exynos_dsi_create_connector(struct drm_encoder *encoder) 1573static int exynos_dsi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 67dcd6831291..fb49443bfd32 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -269,8 +269,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
269 struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem; 269 struct exynos_drm_gem *exynos_gem = exynos_fbd->exynos_gem;
270 struct drm_framebuffer *fb; 270 struct drm_framebuffer *fb;
271 271
272 if (exynos_gem->kvaddr) 272 vunmap(exynos_gem->kvaddr);
273 vunmap(exynos_gem->kvaddr);
274 273
275 /* release drm framebuffer and real buffer */ 274 /* release drm framebuffer and real buffer */
276 if (fb_helper->fb && fb_helper->fb->funcs) { 275 if (fb_helper->fb && fb_helper->fb->funcs) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.c b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
index 7ca09ee19656..0f373702414e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.c
@@ -14,13 +14,27 @@
14 14
15#include <linux/dma-mapping.h> 15#include <linux/dma-mapping.h>
16#include <linux/iommu.h> 16#include <linux/iommu.h>
17#include <linux/kref.h>
18
19#include <asm/dma-iommu.h>
20 17
21#include "exynos_drm_drv.h" 18#include "exynos_drm_drv.h"
22#include "exynos_drm_iommu.h" 19#include "exynos_drm_iommu.h"
23 20
21static inline int configure_dma_max_seg_size(struct device *dev)
22{
23 if (!dev->dma_parms)
24 dev->dma_parms = kzalloc(sizeof(*dev->dma_parms), GFP_KERNEL);
25 if (!dev->dma_parms)
26 return -ENOMEM;
27
28 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
29 return 0;
30}
31
32static inline void clear_dma_max_seg_size(struct device *dev)
33{
34 kfree(dev->dma_parms);
35 dev->dma_parms = NULL;
36}
37
24/* 38/*
25 * drm_create_iommu_mapping - create a mapping structure 39 * drm_create_iommu_mapping - create a mapping structure
26 * 40 *
@@ -28,38 +42,22 @@
28 */ 42 */
29int drm_create_iommu_mapping(struct drm_device *drm_dev) 43int drm_create_iommu_mapping(struct drm_device *drm_dev)
30{ 44{
31 struct dma_iommu_mapping *mapping = NULL;
32 struct exynos_drm_private *priv = drm_dev->dev_private; 45 struct exynos_drm_private *priv = drm_dev->dev_private;
33 46
34 if (!priv->da_start) 47 return __exynos_iommu_create_mapping(priv, EXYNOS_DEV_ADDR_START,
35 priv->da_start = EXYNOS_DEV_ADDR_START; 48 EXYNOS_DEV_ADDR_SIZE);
36 if (!priv->da_space_size)
37 priv->da_space_size = EXYNOS_DEV_ADDR_SIZE;
38
39 mapping = arm_iommu_create_mapping(&platform_bus_type, priv->da_start,
40 priv->da_space_size);
41
42 if (IS_ERR(mapping))
43 return PTR_ERR(mapping);
44
45 priv->mapping = mapping;
46
47 return 0;
48} 49}
49 50
50/* 51/*
51 * drm_release_iommu_mapping - release iommu mapping structure 52 * drm_release_iommu_mapping - release iommu mapping structure
52 * 53 *
53 * @drm_dev: DRM device 54 * @drm_dev: DRM device
54 *
55 * if mapping->kref becomes 0 then all things related to iommu mapping
56 * will be released
57 */ 55 */
58void drm_release_iommu_mapping(struct drm_device *drm_dev) 56void drm_release_iommu_mapping(struct drm_device *drm_dev)
59{ 57{
60 struct exynos_drm_private *priv = drm_dev->dev_private; 58 struct exynos_drm_private *priv = drm_dev->dev_private;
61 59
62 arm_iommu_release_mapping(priv->mapping); 60 __exynos_iommu_release_mapping(priv);
63} 61}
64 62
65/* 63/*
@@ -77,25 +75,19 @@ int drm_iommu_attach_device(struct drm_device *drm_dev,
77 struct exynos_drm_private *priv = drm_dev->dev_private; 75 struct exynos_drm_private *priv = drm_dev->dev_private;
78 int ret; 76 int ret;
79 77
80 if (!priv->mapping) 78 if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
81 return 0; 79 DRM_ERROR("Device %s lacks support for IOMMU\n",
82 80 dev_name(subdrv_dev));
83 subdrv_dev->dma_parms = devm_kzalloc(subdrv_dev, 81 return -EINVAL;
84 sizeof(*subdrv_dev->dma_parms), 82 }
85 GFP_KERNEL);
86 if (!subdrv_dev->dma_parms)
87 return -ENOMEM;
88
89 dma_set_max_seg_size(subdrv_dev, 0xffffffffu);
90
91 if (subdrv_dev->archdata.mapping)
92 arm_iommu_detach_device(subdrv_dev);
93 83
94 ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); 84 ret = configure_dma_max_seg_size(subdrv_dev);
95 if (ret < 0) { 85 if (ret)
96 DRM_DEBUG_KMS("failed iommu attach.\n");
97 return ret; 86 return ret;
98 } 87
88 ret = __exynos_iommu_attach(priv, subdrv_dev);
89 if (ret)
90 clear_dma_max_seg_size(subdrv_dev);
99 91
100 return 0; 92 return 0;
101} 93}
@@ -113,10 +105,7 @@ void drm_iommu_detach_device(struct drm_device *drm_dev,
113 struct device *subdrv_dev) 105 struct device *subdrv_dev)
114{ 106{
115 struct exynos_drm_private *priv = drm_dev->dev_private; 107 struct exynos_drm_private *priv = drm_dev->dev_private;
116 struct dma_iommu_mapping *mapping = priv->mapping;
117
118 if (!mapping || !mapping->domain)
119 return;
120 108
121 arm_iommu_detach_device(subdrv_dev); 109 __exynos_iommu_detach(priv, subdrv_dev);
110 clear_dma_max_seg_size(subdrv_dev);
122} 111}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 5ffebe02ee4d..c8de4913fdbe 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -17,6 +17,97 @@
17 17
18#ifdef CONFIG_DRM_EXYNOS_IOMMU 18#ifdef CONFIG_DRM_EXYNOS_IOMMU
19 19
20#if defined(CONFIG_ARM_DMA_USE_IOMMU)
21#include <asm/dma-iommu.h>
22
23static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
24 unsigned long start, unsigned long size)
25{
26 priv->mapping = arm_iommu_create_mapping(&platform_bus_type, start,
27 size);
28 return IS_ERR(priv->mapping);
29}
30
31static inline void
32__exynos_iommu_release_mapping(struct exynos_drm_private *priv)
33{
34 arm_iommu_release_mapping(priv->mapping);
35}
36
37static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
38 struct device *dev)
39{
40 if (dev->archdata.mapping)
41 arm_iommu_detach_device(dev);
42
43 return arm_iommu_attach_device(dev, priv->mapping);
44}
45
46static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
47 struct device *dev)
48{
49 arm_iommu_detach_device(dev);
50}
51
52#elif defined(CONFIG_IOMMU_DMA)
53#include <linux/dma-iommu.h>
54
55static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
56 unsigned long start, unsigned long size)
57{
58 struct iommu_domain *domain;
59 int ret;
60
61 domain = iommu_domain_alloc(priv->dma_dev->bus);
62 if (!domain)
63 return -ENOMEM;
64
65 ret = iommu_get_dma_cookie(domain);
66 if (ret)
67 goto free_domain;
68
69 ret = iommu_dma_init_domain(domain, start, size);
70 if (ret)
71 goto put_cookie;
72
73 priv->mapping = domain;
74 return 0;
75
76put_cookie:
77 iommu_put_dma_cookie(domain);
78free_domain:
79 iommu_domain_free(domain);
80 return ret;
81}
82
83static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
84{
85 struct iommu_domain *domain = priv->mapping;
86
87 iommu_put_dma_cookie(domain);
88 iommu_domain_free(domain);
89 priv->mapping = NULL;
90}
91
92static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
93 struct device *dev)
94{
95 struct iommu_domain *domain = priv->mapping;
96
97 return iommu_attach_device(domain, dev);
98}
99
100static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
101 struct device *dev)
102{
103 struct iommu_domain *domain = priv->mapping;
104
105 iommu_detach_device(domain, dev);
106}
107#else
108#error Unsupported architecture and IOMMU/DMA-mapping glue code
109#endif
110
20int drm_create_iommu_mapping(struct drm_device *drm_dev); 111int drm_create_iommu_mapping(struct drm_device *drm_dev);
21 112
22void drm_release_iommu_mapping(struct drm_device *drm_dev); 113void drm_release_iommu_mapping(struct drm_device *drm_dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 608b0afa337f..e8f6c92b2a36 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -378,16 +378,8 @@ static int vidi_get_modes(struct drm_connector *connector)
378 return drm_add_edid_modes(connector, edid); 378 return drm_add_edid_modes(connector, edid);
379} 379}
380 380
381static struct drm_encoder *vidi_best_encoder(struct drm_connector *connector)
382{
383 struct vidi_context *ctx = ctx_from_connector(connector);
384
385 return &ctx->encoder;
386}
387
388static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = { 381static const struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
389 .get_modes = vidi_get_modes, 382 .get_modes = vidi_get_modes,
390 .best_encoder = vidi_best_encoder,
391}; 383};
392 384
393static int vidi_create_connector(struct drm_encoder *encoder) 385static int vidi_create_connector(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 58de5a430508..2275efe41acd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -937,17 +937,9 @@ static int hdmi_mode_valid(struct drm_connector *connector,
937 return MODE_OK; 937 return MODE_OK;
938} 938}
939 939
940static struct drm_encoder *hdmi_best_encoder(struct drm_connector *connector)
941{
942 struct hdmi_context *hdata = connector_to_hdmi(connector);
943
944 return &hdata->encoder;
945}
946
947static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 940static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
948 .get_modes = hdmi_get_modes, 941 .get_modes = hdmi_get_modes,
949 .mode_valid = hdmi_mode_valid, 942 .mode_valid = hdmi_mode_valid,
950 .best_encoder = hdmi_best_encoder,
951}; 943};
952 944
953static int hdmi_create_connector(struct drm_encoder *encoder) 945static int hdmi_create_connector(struct drm_encoder *encoder)
@@ -1828,6 +1820,7 @@ static int hdmi_probe(struct platform_device *pdev)
1828 DRM_ERROR("Failed to find ddc node in device tree\n"); 1820 DRM_ERROR("Failed to find ddc node in device tree\n");
1829 return -ENODEV; 1821 return -ENODEV;
1830 } 1822 }
1823 of_node_put(dev->of_node);
1831 1824
1832out_get_ddc_adpt: 1825out_get_ddc_adpt:
1833 hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node); 1826 hdata->ddc_adpt = of_find_i2c_adapter_by_node(ddc_node);
@@ -1846,6 +1839,7 @@ out_get_ddc_adpt:
1846 ret = -ENODEV; 1839 ret = -ENODEV;
1847 goto err_ddc; 1840 goto err_ddc;
1848 } 1841 }
1842 of_node_put(dev->of_node);
1849 1843
1850out_get_phy_port: 1844out_get_phy_port:
1851 if (hdata->drv_data->is_apb_phy) { 1845 if (hdata->drv_data->is_apb_phy) {
diff --git a/drivers/gpu/drm/fsl-dcu/Kconfig b/drivers/gpu/drm/fsl-dcu/Kconfig
index b9c714de6e40..14a72c4c496d 100644
--- a/drivers/gpu/drm/fsl-dcu/Kconfig
+++ b/drivers/gpu/drm/fsl-dcu/Kconfig
@@ -5,12 +5,7 @@ config DRM_FSL_DCU
5 select BACKLIGHT_LCD_SUPPORT 5 select BACKLIGHT_LCD_SUPPORT
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
8 select DRM_KMS_FB_HELPER
9 select DRM_PANEL 8 select DRM_PANEL
10 select FB_SYS_FILLRECT
11 select FB_SYS_COPYAREA
12 select FB_SYS_IMAGEBLIT
13 select FB_SYS_FOPS
14 select REGMAP_MMIO 9 select REGMAP_MMIO
15 select VIDEOMODE_HELPERS 10 select VIDEOMODE_HELPERS
16 help 11 help
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
index 89c0084c2814..3371635cd4d7 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c
@@ -22,20 +22,21 @@
22#include "fsl_dcu_drm_drv.h" 22#include "fsl_dcu_drm_drv.h"
23#include "fsl_dcu_drm_plane.h" 23#include "fsl_dcu_drm_plane.h"
24 24
25static void fsl_dcu_drm_crtc_atomic_begin(struct drm_crtc *crtc, 25static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc,
26 struct drm_crtc_state *old_crtc_state) 26 struct drm_crtc_state *old_crtc_state)
27{ 27{
28} 28 struct drm_pending_vblank_event *event = crtc->state->event;
29 29
30static int fsl_dcu_drm_crtc_atomic_check(struct drm_crtc *crtc, 30 if (event) {
31 struct drm_crtc_state *state) 31 crtc->state->event = NULL;
32{
33 return 0;
34}
35 32
36static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, 33 spin_lock_irq(&crtc->dev->event_lock);
37 struct drm_crtc_state *old_crtc_state) 34 if (drm_crtc_vblank_get(crtc) == 0)
38{ 35 drm_crtc_arm_vblank_event(crtc, event);
36 else
37 drm_crtc_send_vblank_event(crtc, event);
38 spin_unlock_irq(&crtc->dev->event_lock);
39 }
39} 40}
40 41
41static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) 42static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
@@ -43,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc)
43 struct drm_device *dev = crtc->dev; 44 struct drm_device *dev = crtc->dev;
44 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; 45 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
45 46
47 drm_crtc_vblank_off(crtc);
48
46 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, 49 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
47 DCU_MODE_DCU_MODE_MASK, 50 DCU_MODE_DCU_MODE_MASK,
48 DCU_MODE_DCU_MODE(DCU_MODE_OFF)); 51 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
@@ -60,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc)
60 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); 63 DCU_MODE_DCU_MODE(DCU_MODE_NORMAL));
61 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, 64 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
62 DCU_UPDATE_MODE_READREG); 65 DCU_UPDATE_MODE_READREG);
66
67 drm_crtc_vblank_on(crtc);
63} 68}
64 69
65static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) 70static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
@@ -117,8 +122,6 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
117} 122}
118 123
119static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { 124static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = {
120 .atomic_begin = fsl_dcu_drm_crtc_atomic_begin,
121 .atomic_check = fsl_dcu_drm_crtc_atomic_check,
122 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, 125 .atomic_flush = fsl_dcu_drm_crtc_atomic_flush,
123 .disable = fsl_dcu_drm_disable_crtc, 126 .disable = fsl_dcu_drm_disable_crtc,
124 .enable = fsl_dcu_drm_crtc_enable, 127 .enable = fsl_dcu_drm_crtc_enable,
@@ -138,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
138{ 141{
139 struct drm_plane *primary; 142 struct drm_plane *primary;
140 struct drm_crtc *crtc = &fsl_dev->crtc; 143 struct drm_crtc *crtc = &fsl_dev->crtc;
141 unsigned int i, j, reg_num;
142 int ret; 144 int ret;
143 145
146 fsl_dcu_drm_init_planes(fsl_dev->drm);
147
144 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); 148 primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm);
145 if (!primary) 149 if (!primary)
146 return -ENOMEM; 150 return -ENOMEM;
@@ -154,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev)
154 158
155 drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); 159 drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs);
156 160
157 if (!strcmp(fsl_dev->soc->name, "ls1021a"))
158 reg_num = LS1021A_LAYER_REG_NUM;
159 else
160 reg_num = VF610_LAYER_REG_NUM;
161 for (i = 0; i < fsl_dev->soc->total_layer; i++) {
162 for (j = 1; j <= reg_num; j++)
163 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
164 }
165 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
166 DCU_MODE_DCU_MODE_MASK,
167 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
168 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
169 DCU_UPDATE_MODE_READREG);
170
171 return 0; 161 return 0;
172} 162}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
index dc723f7ead7d..7882387f9bff 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/clk.h> 12#include <linux/clk.h>
13#include <linux/clk-provider.h> 13#include <linux/clk-provider.h>
14#include <linux/console.h>
14#include <linux/io.h> 15#include <linux/io.h>
15#include <linux/mfd/syscon.h> 16#include <linux/mfd/syscon.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
@@ -22,6 +23,7 @@
22#include <linux/regmap.h> 23#include <linux/regmap.h>
23 24
24#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_atomic_helper.h>
25#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
26#include <drm/drm_fb_cma_helper.h> 28#include <drm/drm_fb_cma_helper.h>
27#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
@@ -42,10 +44,8 @@ static const struct regmap_config fsl_dcu_regmap_config = {
42 .reg_bits = 32, 44 .reg_bits = 32,
43 .reg_stride = 4, 45 .reg_stride = 4,
44 .val_bits = 32, 46 .val_bits = 32,
45 .cache_type = REGCACHE_FLAT,
46 47
47 .volatile_reg = fsl_dcu_drm_is_volatile_reg, 48 .volatile_reg = fsl_dcu_drm_is_volatile_reg,
48 .max_register = 0x11fc,
49}; 49};
50 50
51static int fsl_dcu_drm_irq_init(struct drm_device *dev) 51static int fsl_dcu_drm_irq_init(struct drm_device *dev)
@@ -199,7 +199,7 @@ static struct drm_driver fsl_dcu_drm_driver = {
199 .get_vblank_counter = drm_vblank_no_hw_counter, 199 .get_vblank_counter = drm_vblank_no_hw_counter,
200 .enable_vblank = fsl_dcu_drm_enable_vblank, 200 .enable_vblank = fsl_dcu_drm_enable_vblank,
201 .disable_vblank = fsl_dcu_drm_disable_vblank, 201 .disable_vblank = fsl_dcu_drm_disable_vblank,
202 .gem_free_object = drm_gem_cma_free_object, 202 .gem_free_object_unlocked = drm_gem_cma_free_object,
203 .gem_vm_ops = &drm_gem_cma_vm_ops, 203 .gem_vm_ops = &drm_gem_cma_vm_ops,
204 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 204 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
205 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 205 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -229,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev)
229 if (!fsl_dev) 229 if (!fsl_dev)
230 return 0; 230 return 0;
231 231
232 disable_irq(fsl_dev->irq);
232 drm_kms_helper_poll_disable(fsl_dev->drm); 233 drm_kms_helper_poll_disable(fsl_dev->drm);
233 regcache_cache_only(fsl_dev->regmap, true); 234
234 regcache_mark_dirty(fsl_dev->regmap); 235 console_lock();
235 clk_disable(fsl_dev->clk); 236 drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1);
236 clk_unprepare(fsl_dev->clk); 237 console_unlock();
238
239 fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm);
240 if (IS_ERR(fsl_dev->state)) {
241 console_lock();
242 drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
243 console_unlock();
244
245 drm_kms_helper_poll_enable(fsl_dev->drm);
246 enable_irq(fsl_dev->irq);
247 return PTR_ERR(fsl_dev->state);
248 }
249
250 clk_disable_unprepare(fsl_dev->pix_clk);
251 clk_disable_unprepare(fsl_dev->clk);
237 252
238 return 0; 253 return 0;
239} 254}
@@ -246,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev)
246 if (!fsl_dev) 261 if (!fsl_dev)
247 return 0; 262 return 0;
248 263
249 ret = clk_enable(fsl_dev->clk); 264 ret = clk_prepare_enable(fsl_dev->clk);
250 if (ret < 0) { 265 if (ret < 0) {
251 dev_err(dev, "failed to enable dcu clk\n"); 266 dev_err(dev, "failed to enable dcu clk\n");
252 clk_unprepare(fsl_dev->clk);
253 return ret; 267 return ret;
254 } 268 }
255 ret = clk_prepare(fsl_dev->clk); 269
270 ret = clk_prepare_enable(fsl_dev->pix_clk);
256 if (ret < 0) { 271 if (ret < 0) {
257 dev_err(dev, "failed to prepare dcu clk\n"); 272 dev_err(dev, "failed to enable pix clk\n");
258 return ret; 273 return ret;
259 } 274 }
260 275
276 fsl_dcu_drm_init_planes(fsl_dev->drm);
277 drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
278
279 console_lock();
280 drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0);
281 console_unlock();
282
261 drm_kms_helper_poll_enable(fsl_dev->drm); 283 drm_kms_helper_poll_enable(fsl_dev->drm);
262 regcache_cache_only(fsl_dev->regmap, false); 284 enable_irq(fsl_dev->irq);
263 regcache_sync(fsl_dev->regmap);
264 285
265 return 0; 286 return 0;
266} 287}
@@ -274,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = {
274 .name = "ls1021a", 295 .name = "ls1021a",
275 .total_layer = 16, 296 .total_layer = 16,
276 .max_layer = 4, 297 .max_layer = 4,
298 .layer_regs = LS1021A_LAYER_REG_NUM,
277}; 299};
278 300
279static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = { 301static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = {
280 .name = "vf610", 302 .name = "vf610",
281 .total_layer = 64, 303 .total_layer = 64,
282 .max_layer = 6, 304 .max_layer = 6,
305 .layer_regs = VF610_LAYER_REG_NUM,
283}; 306};
284 307
285static const struct of_device_id fsl_dcu_of_match[] = { 308static const struct of_device_id fsl_dcu_of_match[] = {
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
index c275f900ff23..3b371fe7491e 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h
@@ -175,6 +175,7 @@ struct fsl_dcu_soc_data {
175 unsigned int total_layer; 175 unsigned int total_layer;
176 /*max layer number DCU supported*/ 176 /*max layer number DCU supported*/
177 unsigned int max_layer; 177 unsigned int max_layer;
178 unsigned int layer_regs;
178}; 179};
179 180
180struct fsl_dcu_drm_device { 181struct fsl_dcu_drm_device {
@@ -193,6 +194,7 @@ struct fsl_dcu_drm_device {
193 struct drm_encoder encoder; 194 struct drm_encoder encoder;
194 struct fsl_dcu_drm_connector connector; 195 struct fsl_dcu_drm_connector connector;
195 const struct fsl_dcu_soc_data *soc; 196 const struct fsl_dcu_soc_data *soc;
197 struct drm_atomic_state *state;
196}; 198};
197 199
198void fsl_dcu_fbdev_init(struct drm_device *dev); 200void fsl_dcu_fbdev_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
index c564ec612b59..d9d6cc1c8e39 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_kms.c
@@ -37,23 +37,22 @@ int fsl_dcu_drm_modeset_init(struct fsl_dcu_drm_device *fsl_dev)
37 37
38 ret = fsl_dcu_drm_crtc_create(fsl_dev); 38 ret = fsl_dcu_drm_crtc_create(fsl_dev);
39 if (ret) 39 if (ret)
40 return ret; 40 goto err;
41 41
42 ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc); 42 ret = fsl_dcu_drm_encoder_create(fsl_dev, &fsl_dev->crtc);
43 if (ret) 43 if (ret)
44 goto fail_encoder; 44 goto err;
45 45
46 ret = fsl_dcu_drm_connector_create(fsl_dev, &fsl_dev->encoder); 46 ret = fsl_dcu_create_outputs(fsl_dev);
47 if (ret) 47 if (ret)
48 goto fail_connector; 48 goto err;
49 49
50 drm_mode_config_reset(fsl_dev->drm); 50 drm_mode_config_reset(fsl_dev->drm);
51 drm_kms_helper_poll_init(fsl_dev->drm); 51 drm_kms_helper_poll_init(fsl_dev->drm);
52 52
53 return 0; 53 return 0;
54fail_encoder: 54
55 fsl_dev->crtc.funcs->destroy(&fsl_dev->crtc); 55err:
56fail_connector: 56 drm_mode_config_cleanup(fsl_dev->drm);
57 fsl_dev->encoder.funcs->destroy(&fsl_dev->encoder);
58 return ret; 57 return ret;
59} 58}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
index 7093109fbc21..5a7b88e19e44 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_output.h
@@ -25,9 +25,8 @@ to_fsl_dcu_connector(struct drm_connector *con)
25 : NULL; 25 : NULL;
26} 26}
27 27
28int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
29 struct drm_encoder *encoder);
30int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev, 28int fsl_dcu_drm_encoder_create(struct fsl_dcu_drm_device *fsl_dev,
31 struct drm_crtc *crtc); 29 struct drm_crtc *crtc);
30int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev);
32 31
33#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */ 32#endif /* __FSL_DCU_DRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
index 274558b3b32b..e50467a0deb0 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c
@@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = {
217 DRM_FORMAT_YUV422, 217 DRM_FORMAT_YUV422,
218}; 218};
219 219
220void fsl_dcu_drm_init_planes(struct drm_device *dev)
221{
222 struct fsl_dcu_drm_device *fsl_dev = dev->dev_private;
223 int i, j;
224
225 for (i = 0; i < fsl_dev->soc->total_layer; i++) {
226 for (j = 1; j <= fsl_dev->soc->layer_regs; j++)
227 regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0);
228 }
229 regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE,
230 DCU_MODE_DCU_MODE_MASK,
231 DCU_MODE_DCU_MODE(DCU_MODE_OFF));
232 regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE,
233 DCU_UPDATE_MODE_READREG);
234}
235
220struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) 236struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev)
221{ 237{
222 struct drm_plane *primary; 238 struct drm_plane *primary;
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
index d657f088d859..8ee45f813ee8 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h
@@ -12,6 +12,7 @@
12#ifndef __FSL_DCU_DRM_PLANE_H__ 12#ifndef __FSL_DCU_DRM_PLANE_H__
13#define __FSL_DCU_DRM_PLANE_H__ 13#define __FSL_DCU_DRM_PLANE_H__
14 14
15void fsl_dcu_drm_init_planes(struct drm_device *dev);
15struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev); 16struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev);
16 17
17#endif /* __FSL_DCU_DRM_PLANE_H__ */ 18#endif /* __FSL_DCU_DRM_PLANE_H__ */
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 98c998da91eb..26edcc899712 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/backlight.h> 12#include <linux/backlight.h>
13#include <linux/of_graph.h>
13 14
14#include <drm/drmP.h> 15#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h> 16#include <drm/drm_atomic_helper.h>
@@ -102,14 +103,6 @@ static const struct drm_connector_funcs fsl_dcu_drm_connector_funcs = {
102 .reset = drm_atomic_helper_connector_reset, 103 .reset = drm_atomic_helper_connector_reset,
103}; 104};
104 105
105static struct drm_encoder *
106fsl_dcu_drm_connector_best_encoder(struct drm_connector *connector)
107{
108 struct fsl_dcu_drm_connector *fsl_con = to_fsl_dcu_connector(connector);
109
110 return fsl_con->encoder;
111}
112
113static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector) 106static int fsl_dcu_drm_connector_get_modes(struct drm_connector *connector)
114{ 107{
115 struct fsl_dcu_drm_connector *fsl_connector; 108 struct fsl_dcu_drm_connector *fsl_connector;
@@ -136,17 +129,16 @@ static int fsl_dcu_drm_connector_mode_valid(struct drm_connector *connector,
136} 129}
137 130
138static const struct drm_connector_helper_funcs connector_helper_funcs = { 131static const struct drm_connector_helper_funcs connector_helper_funcs = {
139 .best_encoder = fsl_dcu_drm_connector_best_encoder,
140 .get_modes = fsl_dcu_drm_connector_get_modes, 132 .get_modes = fsl_dcu_drm_connector_get_modes,
141 .mode_valid = fsl_dcu_drm_connector_mode_valid, 133 .mode_valid = fsl_dcu_drm_connector_mode_valid,
142}; 134};
143 135
144int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev, 136static int fsl_dcu_attach_panel(struct fsl_dcu_drm_device *fsl_dev,
145 struct drm_encoder *encoder) 137 struct drm_panel *panel)
146{ 138{
139 struct drm_encoder *encoder = &fsl_dev->encoder;
147 struct drm_connector *connector = &fsl_dev->connector.base; 140 struct drm_connector *connector = &fsl_dev->connector.base;
148 struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config; 141 struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
149 struct device_node *panel_node;
150 int ret; 142 int ret;
151 143
152 fsl_dev->connector.encoder = encoder; 144 fsl_dev->connector.encoder = encoder;
@@ -170,21 +162,7 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
170 mode_config->dpms_property, 162 mode_config->dpms_property,
171 DRM_MODE_DPMS_OFF); 163 DRM_MODE_DPMS_OFF);
172 164
173 panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0); 165 ret = drm_panel_attach(panel, connector);
174 if (!panel_node) {
175 dev_err(fsl_dev->dev, "fsl,panel property not found\n");
176 ret = -ENODEV;
177 goto err_sysfs;
178 }
179
180 fsl_dev->connector.panel = of_drm_find_panel(panel_node);
181 if (!fsl_dev->connector.panel) {
182 ret = -EPROBE_DEFER;
183 goto err_panel;
184 }
185 of_node_put(panel_node);
186
187 ret = drm_panel_attach(fsl_dev->connector.panel, connector);
188 if (ret) { 166 if (ret) {
189 dev_err(fsl_dev->dev, "failed to attach panel\n"); 167 dev_err(fsl_dev->dev, "failed to attach panel\n");
190 goto err_sysfs; 168 goto err_sysfs;
@@ -192,11 +170,62 @@ int fsl_dcu_drm_connector_create(struct fsl_dcu_drm_device *fsl_dev,
192 170
193 return 0; 171 return 0;
194 172
195err_panel:
196 of_node_put(panel_node);
197err_sysfs: 173err_sysfs:
198 drm_connector_unregister(connector); 174 drm_connector_unregister(connector);
199err_cleanup: 175err_cleanup:
200 drm_connector_cleanup(connector); 176 drm_connector_cleanup(connector);
201 return ret; 177 return ret;
202} 178}
179
180static int fsl_dcu_attach_endpoint(struct fsl_dcu_drm_device *fsl_dev,
181 const struct of_endpoint *ep)
182{
183 struct drm_bridge *bridge;
184 struct device_node *np;
185
186 np = of_graph_get_remote_port_parent(ep->local_node);
187
188 fsl_dev->connector.panel = of_drm_find_panel(np);
189 if (fsl_dev->connector.panel) {
190 of_node_put(np);
191 return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
192 }
193
194 bridge = of_drm_find_bridge(np);
195 of_node_put(np);
196 if (!bridge)
197 return -ENODEV;
198
199 fsl_dev->encoder.bridge = bridge;
200 bridge->encoder = &fsl_dev->encoder;
201
202 return drm_bridge_attach(fsl_dev->drm, bridge);
203}
204
205int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
206{
207 struct of_endpoint ep;
208 struct device_node *ep_node, *panel_node;
209 int ret;
210
211 /* This is for backward compatibility */
212 panel_node = of_parse_phandle(fsl_dev->np, "fsl,panel", 0);
213 if (panel_node) {
214 fsl_dev->connector.panel = of_drm_find_panel(panel_node);
215 of_node_put(panel_node);
216 if (!fsl_dev->connector.panel)
217 return -EPROBE_DEFER;
218 return fsl_dcu_attach_panel(fsl_dev, fsl_dev->connector.panel);
219 }
220
221 ep_node = of_graph_get_next_endpoint(fsl_dev->np, NULL);
222 if (!ep_node)
223 return -ENODEV;
224
225 ret = of_graph_parse_endpoint(ep_node, &ep);
226 of_node_put(ep_node);
227 if (ret)
228 return -ENODEV;
229
230 return fsl_dcu_attach_endpoint(fsl_dev, &ep);
231}
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
index bbe34f1c0505..bca09ea24632 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_tcon.c
@@ -92,6 +92,7 @@ struct fsl_tcon *fsl_tcon_init(struct device *dev)
92 goto err_node_put; 92 goto err_node_put;
93 } 93 }
94 94
95 of_node_put(np);
95 clk_prepare_enable(tcon->ipg_clk); 96 clk_prepare_enable(tcon->ipg_clk);
96 97
97 dev_info(dev, "Using TCON in bypass mode\n"); 98 dev_info(dev, "Using TCON in bypass mode\n");
diff --git a/drivers/gpu/drm/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index 17f928ec84ea..8906d67494fc 100644
--- a/drivers/gpu/drm/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,11 +1,7 @@
1config DRM_GMA500 1config DRM_GMA500
2 tristate "Intel GMA5/600 KMS Framebuffer" 2 tristate "Intel GMA5/600 KMS Framebuffer"
3 depends on DRM && PCI && X86 3 depends on DRM && PCI && X86
4 select FB_CFB_COPYAREA
5 select FB_CFB_FILLRECT
6 select FB_CFB_IMAGEBLIT
7 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
9 select DRM_TTM 5 select DRM_TTM
10 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915 6 # GMA500 depends on ACPI_VIDEO when ACPI is enabled, just like i915
11 select ACPI_VIDEO if ACPI 7 select ACPI_VIDEO if ACPI
diff --git a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index 28f9d90988ff..563f193fcfac 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -246,8 +246,7 @@ static void cdv_hdmi_destroy(struct drm_connector *connector)
246{ 246{
247 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 247 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
248 248
249 if (gma_encoder->i2c_bus) 249 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
250 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
251 drm_connector_unregister(connector); 250 drm_connector_unregister(connector);
252 drm_connector_cleanup(connector); 251 drm_connector_cleanup(connector);
253 kfree(connector); 252 kfree(connector);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 813ef23a8054..38dc89083148 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -444,8 +444,7 @@ static void cdv_intel_lvds_destroy(struct drm_connector *connector)
444{ 444{
445 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 445 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
446 446
447 if (gma_encoder->i2c_bus) 447 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
448 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
449 drm_connector_unregister(connector); 448 drm_connector_unregister(connector);
450 drm_connector_cleanup(connector); 449 drm_connector_cleanup(connector);
451 kfree(connector); 450 kfree(connector);
@@ -780,12 +779,10 @@ out:
780failed_find: 779failed_find:
781 mutex_unlock(&dev->mode_config.mutex); 780 mutex_unlock(&dev->mode_config.mutex);
782 printk(KERN_ERR "Failed find\n"); 781 printk(KERN_ERR "Failed find\n");
783 if (gma_encoder->ddc_bus) 782 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
784 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
785failed_ddc: 783failed_ddc:
786 printk(KERN_ERR "Failed DDC\n"); 784 printk(KERN_ERR "Failed DDC\n");
787 if (gma_encoder->i2c_bus) 785 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
788 psb_intel_i2c_destroy(gma_encoder->i2c_bus);
789failed_blc_i2c: 786failed_blc_i2c:
790 printk(KERN_ERR "Failed BLC\n"); 787 printk(KERN_ERR "Failed BLC\n");
791 drm_encoder_cleanup(encoder); 788 drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 7440bf90ac9c..0fcdce0817de 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -184,12 +184,6 @@ static int psbfb_mmap(struct fb_info *info, struct vm_area_struct *vma)
184 return 0; 184 return 0;
185} 185}
186 186
187static int psbfb_ioctl(struct fb_info *info, unsigned int cmd,
188 unsigned long arg)
189{
190 return -ENOTTY;
191}
192
193static struct fb_ops psbfb_ops = { 187static struct fb_ops psbfb_ops = {
194 .owner = THIS_MODULE, 188 .owner = THIS_MODULE,
195 .fb_check_var = drm_fb_helper_check_var, 189 .fb_check_var = drm_fb_helper_check_var,
@@ -201,7 +195,6 @@ static struct fb_ops psbfb_ops = {
201 .fb_imageblit = drm_fb_helper_cfb_imageblit, 195 .fb_imageblit = drm_fb_helper_cfb_imageblit,
202 .fb_mmap = psbfb_mmap, 196 .fb_mmap = psbfb_mmap,
203 .fb_sync = psbfb_sync, 197 .fb_sync = psbfb_sync,
204 .fb_ioctl = psbfb_ioctl,
205}; 198};
206 199
207static struct fb_ops psbfb_roll_ops = { 200static struct fb_ops psbfb_roll_ops = {
@@ -215,7 +208,6 @@ static struct fb_ops psbfb_roll_ops = {
215 .fb_imageblit = drm_fb_helper_cfb_imageblit, 208 .fb_imageblit = drm_fb_helper_cfb_imageblit,
216 .fb_pan_display = psbfb_pan, 209 .fb_pan_display = psbfb_pan,
217 .fb_mmap = psbfb_mmap, 210 .fb_mmap = psbfb_mmap,
218 .fb_ioctl = psbfb_ioctl,
219}; 211};
220 212
221static struct fb_ops psbfb_unaccel_ops = { 213static struct fb_ops psbfb_unaccel_ops = {
@@ -228,7 +220,6 @@ static struct fb_ops psbfb_unaccel_ops = {
228 .fb_copyarea = drm_fb_helper_cfb_copyarea, 220 .fb_copyarea = drm_fb_helper_cfb_copyarea,
229 .fb_imageblit = drm_fb_helper_cfb_imageblit, 221 .fb_imageblit = drm_fb_helper_cfb_imageblit,
230 .fb_mmap = psbfb_mmap, 222 .fb_mmap = psbfb_mmap,
231 .fb_ioctl = psbfb_ioctl,
232}; 223};
233 224
234/** 225/**
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index c95406e6f44d..1a1cf7a3b5ef 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -175,20 +175,21 @@ void gma_crtc_load_lut(struct drm_crtc *crtc)
175 } 175 }
176} 176}
177 177
178void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, 178int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
179 u32 start, u32 size) 179 u32 size)
180{ 180{
181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc); 181 struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
182 int i; 182 int i;
183 int end = (start + size > 256) ? 256 : start + size;
184 183
185 for (i = start; i < end; i++) { 184 for (i = 0; i < size; i++) {
186 gma_crtc->lut_r[i] = red[i] >> 8; 185 gma_crtc->lut_r[i] = red[i] >> 8;
187 gma_crtc->lut_g[i] = green[i] >> 8; 186 gma_crtc->lut_g[i] = green[i] >> 8;
188 gma_crtc->lut_b[i] = blue[i] >> 8; 187 gma_crtc->lut_b[i] = blue[i] >> 8;
189 } 188 }
190 189
191 gma_crtc_load_lut(crtc); 190 gma_crtc_load_lut(crtc);
191
192 return 0;
192} 193}
193 194
194/** 195/**
@@ -281,7 +282,7 @@ void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
281 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE); 282 REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
282 283
283 /* Turn off vblank interrupts */ 284 /* Turn off vblank interrupts */
284 drm_vblank_off(dev, pipe); 285 drm_crtc_vblank_off(crtc);
285 286
286 /* Wait for vblank for the disable to take effect */ 287 /* Wait for vblank for the disable to take effect */
287 gma_wait_for_vblank(dev); 288 gma_wait_for_vblank(dev);
diff --git a/drivers/gpu/drm/gma500/gma_display.h b/drivers/gpu/drm/gma500/gma_display.h
index b2491c65f053..e72dd08b701b 100644
--- a/drivers/gpu/drm/gma500/gma_display.h
+++ b/drivers/gpu/drm/gma500/gma_display.h
@@ -72,8 +72,8 @@ extern int gma_crtc_cursor_set(struct drm_crtc *crtc,
72 uint32_t width, uint32_t height); 72 uint32_t width, uint32_t height);
73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); 73extern int gma_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
74extern void gma_crtc_load_lut(struct drm_crtc *crtc); 74extern void gma_crtc_load_lut(struct drm_crtc *crtc);
75extern void gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 75extern int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
76 u16 *blue, u32 start, u32 size); 76 u16 *blue, u32 size);
77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode); 77extern void gma_crtc_dpms(struct drm_crtc *crtc, int mode);
78extern void gma_crtc_prepare(struct drm_crtc *crtc); 78extern void gma_crtc_prepare(struct drm_crtc *crtc);
79extern void gma_crtc_commit(struct drm_crtc *crtc); 79extern void gma_crtc_commit(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 82b8ce418b27..50eb944fb78a 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -210,10 +210,8 @@ static int psb_driver_unload(struct drm_device *dev)
210 iounmap(dev_priv->aux_reg); 210 iounmap(dev_priv->aux_reg);
211 dev_priv->aux_reg = NULL; 211 dev_priv->aux_reg = NULL;
212 } 212 }
213 if (dev_priv->aux_pdev) 213 pci_dev_put(dev_priv->aux_pdev);
214 pci_dev_put(dev_priv->aux_pdev); 214 pci_dev_put(dev_priv->lpc_pdev);
215 if (dev_priv->lpc_pdev)
216 pci_dev_put(dev_priv->lpc_pdev);
217 215
218 /* Destroy VBT data */ 216 /* Destroy VBT data */
219 psb_intel_destroy_bios(dev); 217 psb_intel_destroy_bios(dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 398015be87e4..7b6c84925098 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -491,7 +491,6 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
491 struct drm_psb_private *dev_priv = dev->dev_private; 491 struct drm_psb_private *dev_priv = dev->dev_private;
492 struct gma_crtc *gma_crtc; 492 struct gma_crtc *gma_crtc;
493 int i; 493 int i;
494 uint16_t *r_base, *g_base, *b_base;
495 494
496 /* We allocate a extra array of drm_connector pointers 495 /* We allocate a extra array of drm_connector pointers
497 * for fbdev after the crtc */ 496 * for fbdev after the crtc */
@@ -519,16 +518,10 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe,
519 gma_crtc->pipe = pipe; 518 gma_crtc->pipe = pipe;
520 gma_crtc->plane = pipe; 519 gma_crtc->plane = pipe;
521 520
522 r_base = gma_crtc->base.gamma_store;
523 g_base = r_base + 256;
524 b_base = g_base + 256;
525 for (i = 0; i < 256; i++) { 521 for (i = 0; i < 256; i++) {
526 gma_crtc->lut_r[i] = i; 522 gma_crtc->lut_r[i] = i;
527 gma_crtc->lut_g[i] = i; 523 gma_crtc->lut_g[i] = i;
528 gma_crtc->lut_b[i] = i; 524 gma_crtc->lut_b[i] = i;
529 r_base[i] = i << 8;
530 g_base[i] = i << 8;
531 b_base[i] = i << 8;
532 525
533 gma_crtc->lut_adj[i] = 0; 526 gma_crtc->lut_adj[i] = 0;
534 } 527 }
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index b1b93317d054..e55733ca46d2 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -561,8 +561,7 @@ void psb_intel_lvds_destroy(struct drm_connector *connector)
561 struct gma_encoder *gma_encoder = gma_attached_encoder(connector); 561 struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
562 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv; 562 struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
563 563
564 if (lvds_priv->ddc_bus) 564 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
565 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
566 drm_connector_unregister(connector); 565 drm_connector_unregister(connector);
567 drm_connector_cleanup(connector); 566 drm_connector_cleanup(connector);
568 kfree(connector); 567 kfree(connector);
@@ -835,11 +834,9 @@ out:
835 834
836failed_find: 835failed_find:
837 mutex_unlock(&dev->mode_config.mutex); 836 mutex_unlock(&dev->mode_config.mutex);
838 if (lvds_priv->ddc_bus) 837 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
839 psb_intel_i2c_destroy(lvds_priv->ddc_bus);
840failed_ddc: 838failed_ddc:
841 if (lvds_priv->i2c_bus) 839 psb_intel_i2c_destroy(lvds_priv->i2c_bus);
842 psb_intel_i2c_destroy(lvds_priv->i2c_bus);
843failed_blc_i2c: 840failed_blc_i2c:
844 drm_encoder_cleanup(encoder); 841 drm_encoder_cleanup(encoder);
845 drm_connector_cleanup(connector); 842 drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig
index ea0df6115f7e..499f64405dac 100644
--- a/drivers/gpu/drm/hisilicon/kirin/Kconfig
+++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig
@@ -4,6 +4,7 @@ config DRM_HISI_KIRIN
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
6 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
7 select HISI_KIRIN_DW_DSI
7 help 8 help
8 Choose this option if you have a hisilicon Kirin chipsets(hi6220). 9 Choose this option if you have a hisilicon Kirin chipsets(hi6220).
9 If M is selected the module will be called kirin-drm. 10 If M is selected the module will be called kirin-drm.
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index fba6372d060e..c3707d47cd89 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -487,6 +487,7 @@ static void ade_crtc_enable(struct drm_crtc *crtc)
487 ade_set_medianoc_qos(acrtc); 487 ade_set_medianoc_qos(acrtc);
488 ade_display_enable(acrtc); 488 ade_display_enable(acrtc);
489 ade_dump_regs(ctx->base); 489 ade_dump_regs(ctx->base);
490 drm_crtc_vblank_on(crtc);
490 acrtc->enable = true; 491 acrtc->enable = true;
491} 492}
492 493
@@ -498,17 +499,11 @@ static void ade_crtc_disable(struct drm_crtc *crtc)
498 if (!acrtc->enable) 499 if (!acrtc->enable)
499 return; 500 return;
500 501
502 drm_crtc_vblank_off(crtc);
501 ade_power_down(ctx); 503 ade_power_down(ctx);
502 acrtc->enable = false; 504 acrtc->enable = false;
503} 505}
504 506
505static int ade_crtc_atomic_check(struct drm_crtc *crtc,
506 struct drm_crtc_state *state)
507{
508 /* do nothing */
509 return 0;
510}
511
512static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc) 507static void ade_crtc_mode_set_nofb(struct drm_crtc *crtc)
513{ 508{
514 struct ade_crtc *acrtc = to_ade_crtc(crtc); 509 struct ade_crtc *acrtc = to_ade_crtc(crtc);
@@ -537,6 +532,7 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
537{ 532{
538 struct ade_crtc *acrtc = to_ade_crtc(crtc); 533 struct ade_crtc *acrtc = to_ade_crtc(crtc);
539 struct ade_hw_ctx *ctx = acrtc->ctx; 534 struct ade_hw_ctx *ctx = acrtc->ctx;
535 struct drm_pending_vblank_event *event = crtc->state->event;
540 void __iomem *base = ctx->base; 536 void __iomem *base = ctx->base;
541 537
542 /* only crtc is enabled regs take effect */ 538 /* only crtc is enabled regs take effect */
@@ -545,12 +541,22 @@ static void ade_crtc_atomic_flush(struct drm_crtc *crtc,
545 /* flush ade registers */ 541 /* flush ade registers */
546 writel(ADE_ENABLE, base + ADE_EN); 542 writel(ADE_ENABLE, base + ADE_EN);
547 } 543 }
544
545 if (event) {
546 crtc->state->event = NULL;
547
548 spin_lock_irq(&crtc->dev->event_lock);
549 if (drm_crtc_vblank_get(crtc) == 0)
550 drm_crtc_arm_vblank_event(crtc, event);
551 else
552 drm_crtc_send_vblank_event(crtc, event);
553 spin_unlock_irq(&crtc->dev->event_lock);
554 }
548} 555}
549 556
550static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = { 557static const struct drm_crtc_helper_funcs ade_crtc_helper_funcs = {
551 .enable = ade_crtc_enable, 558 .enable = ade_crtc_enable,
552 .disable = ade_crtc_disable, 559 .disable = ade_crtc_disable,
553 .atomic_check = ade_crtc_atomic_check,
554 .mode_set_nofb = ade_crtc_mode_set_nofb, 560 .mode_set_nofb = ade_crtc_mode_set_nofb,
555 .atomic_begin = ade_crtc_atomic_begin, 561 .atomic_begin = ade_crtc_atomic_begin,
556 .atomic_flush = ade_crtc_atomic_flush, 562 .atomic_flush = ade_crtc_atomic_flush,
@@ -961,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx)
961 } 967 }
962 968
963 ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core"); 969 ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core");
964 if (!ctx->ade_core_clk) { 970 if (IS_ERR(ctx->ade_core_clk)) {
965 DRM_ERROR("failed to parse clk ADE_CORE\n"); 971 DRM_ERROR("failed to parse clk ADE_CORE\n");
966 return -ENODEV; 972 return PTR_ERR(ctx->ade_core_clk);
967 } 973 }
968 974
969 ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg"); 975 ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg");
970 if (!ctx->media_noc_clk) { 976 if (IS_ERR(ctx->media_noc_clk)) {
971 DRM_ERROR("failed to parse clk CODEC_JPEG\n"); 977 DRM_ERROR("failed to parse clk CODEC_JPEG\n");
972 return -ENODEV; 978 return PTR_ERR(ctx->media_noc_clk);
973 } 979 }
974 980
975 ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix"); 981 ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix");
976 if (!ctx->ade_pix_clk) { 982 if (IS_ERR(ctx->ade_pix_clk)) {
977 DRM_ERROR("failed to parse clk ADE_PIX\n"); 983 DRM_ERROR("failed to parse clk ADE_PIX\n");
978 return -ENODEV; 984 return PTR_ERR(ctx->ade_pix_clk);
979 } 985 }
980 986
981 return 0; 987 return 0;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
index 3f94785fbcca..1edd9bc80294 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c
@@ -171,9 +171,8 @@ static struct drm_driver kirin_drm_driver = {
171 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME | 171 .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME |
172 DRIVER_ATOMIC | DRIVER_HAVE_IRQ, 172 DRIVER_ATOMIC | DRIVER_HAVE_IRQ,
173 .fops = &kirin_drm_fops, 173 .fops = &kirin_drm_fops,
174 .set_busid = drm_platform_set_busid,
175 174
176 .gem_free_object = drm_gem_cma_free_object, 175 .gem_free_object_unlocked = drm_gem_cma_free_object,
177 .gem_vm_ops = &drm_gem_cma_vm_ops, 176 .gem_vm_ops = &drm_gem_cma_vm_ops,
178 .dumb_create = kirin_gem_cma_dumb_create, 177 .dumb_create = kirin_gem_cma_dumb_create,
179 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 178 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -221,19 +220,12 @@ static int kirin_drm_bind(struct device *dev)
221 if (ret) 220 if (ret)
222 goto err_kms_cleanup; 221 goto err_kms_cleanup;
223 222
224 /* connectors should be registered after drm device register */
225 ret = drm_connector_register_all(drm_dev);
226 if (ret)
227 goto err_drm_dev_unregister;
228
229 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", 223 DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
230 driver->name, driver->major, driver->minor, driver->patchlevel, 224 driver->name, driver->major, driver->minor, driver->patchlevel,
231 driver->date, drm_dev->primary->index); 225 driver->date, drm_dev->primary->index);
232 226
233 return 0; 227 return 0;
234 228
235err_drm_dev_unregister:
236 drm_dev_unregister(drm_dev);
237err_kms_cleanup: 229err_kms_cleanup:
238 kirin_drm_kms_cleanup(drm_dev); 230 kirin_drm_kms_cleanup(drm_dev);
239err_drm_dev_unref: 231err_drm_dev_unref:
@@ -246,7 +238,6 @@ static void kirin_drm_unbind(struct device *dev)
246{ 238{
247 struct drm_device *drm_dev = dev_get_drvdata(dev); 239 struct drm_device *drm_dev = dev_get_drvdata(dev);
248 240
249 drm_connector_unregister_all(drm_dev);
250 drm_dev_unregister(drm_dev); 241 drm_dev_unregister(drm_dev);
251 kirin_drm_kms_cleanup(drm_dev); 242 kirin_drm_kms_cleanup(drm_dev);
252 drm_dev_unref(drm_dev); 243 drm_dev_unref(drm_dev);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 22c7ed63a001..4d341db462a2 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -1,12 +1,6 @@
1menu "I2C encoder or helper chips" 1menu "I2C encoder or helper chips"
2 depends on DRM && DRM_KMS_HELPER && I2C 2 depends on DRM && DRM_KMS_HELPER && I2C
3 3
4config DRM_I2C_ADV7511
5 tristate "AV7511 encoder"
6 select REGMAP_I2C
7 help
8 Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
9
10config DRM_I2C_CH7006 4config DRM_I2C_CH7006
11 tristate "Chrontel ch7006 TV encoder" 5 tristate "Chrontel ch7006 TV encoder"
12 default m if DRM_NOUVEAU 6 default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 2c72eb584ab7..43aa33baebed 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,7 +1,5 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2 2
3obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
4
5ch7006-y := ch7006_drv.o ch7006_mode.o 3ch7006-y := ch7006_drv.o ch7006_mode.o
6obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o 4obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
7 5
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c
index 0594c45f7164..e9e8ae2ec06b 100644
--- a/drivers/gpu/drm/i2c/ch7006_drv.c
+++ b/drivers/gpu/drm/i2c/ch7006_drv.c
@@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder,
361 361
362 /* Disable the crtc to ensure a full modeset is 362 /* Disable the crtc to ensure a full modeset is
363 * performed whenever it's turned on again. */ 363 * performed whenever it's turned on again. */
364 if (crtc) { 364 if (crtc)
365 struct drm_mode_set modeset = { 365 drm_crtc_force_disable(crtc);
366 .crtc = crtc,
367 };
368
369 drm_mode_set_config_internal(&modeset);
370 }
371 } 366 }
372 367
373 return 0; 368 return 0;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 29a32b11953b..7769e469118f 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -57,6 +57,28 @@ config DRM_I915_USERPTR
57 57
58 If in doubt, say "Y". 58 If in doubt, say "Y".
59 59
60config DRM_I915_GVT
61 bool "Enable Intel GVT-g graphics virtualization host support"
62 depends on DRM_I915
63 default n
64 help
65 Choose this option if you want to enable Intel GVT-g graphics
66 virtualization technology host support with integrated graphics.
67 With GVT-g, it's possible to have one integrated graphics
68 device shared by multiple VMs under different hypervisors.
69
70 Note that at least one hypervisor like Xen or KVM is required for
71 this driver to work, and it only supports newer device from
72 Broadwell+. For further information and setup guide, you can
73 visit: http://01.org/igvt-g.
74
75 Now it's just a stub to support the modifications of i915 for
76 GVT device model. It requires at least one MPT modules for Xen/KVM
77 and other components of GVT device model to work. Use it under
78 you own risk.
79
80 If in doubt, say "N".
81
60menu "drm/i915 Debugging" 82menu "drm/i915 Debugging"
61depends on DRM_I915 83depends on DRM_I915
62depends on EXPERT 84depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug
index 8f404103341d..cee87bfd10c4 100644
--- a/drivers/gpu/drm/i915/Kconfig.debug
+++ b/drivers/gpu/drm/i915/Kconfig.debug
@@ -18,6 +18,9 @@ config DRM_I915_WERROR
18config DRM_I915_DEBUG 18config DRM_I915_DEBUG
19 bool "Enable additional driver debugging" 19 bool "Enable additional driver debugging"
20 depends on DRM_I915 20 depends on DRM_I915
21 select PREEMPT_COUNT
22 select X86_MSR # used by igt/pm_rpm
23 select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks)
21 default n 24 default n
22 help 25 help
23 Choose this option to turn on extra driver debugging that may affect 26 Choose this option to turn on extra driver debugging that may affect
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0b88ba0f3c1f..684fc1cd08fa 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -10,9 +10,11 @@ subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
10i915-y := i915_drv.o \ 10i915-y := i915_drv.o \
11 i915_irq.o \ 11 i915_irq.o \
12 i915_params.o \ 12 i915_params.o \
13 i915_pci.o \
13 i915_suspend.o \ 14 i915_suspend.o \
14 i915_sysfs.o \ 15 i915_sysfs.o \
15 intel_csr.o \ 16 intel_csr.o \
17 intel_device_info.o \
16 intel_pm.o \ 18 intel_pm.o \
17 intel_runtime_pm.o 19 intel_runtime_pm.o
18 20
@@ -37,6 +39,7 @@ i915-y += i915_cmd_parser.o \
37 i915_gem_userptr.o \ 39 i915_gem_userptr.o \
38 i915_gpu_error.o \ 40 i915_gpu_error.o \
39 i915_trace_points.o \ 41 i915_trace_points.o \
42 intel_breadcrumbs.o \
40 intel_lrc.o \ 43 intel_lrc.o \
41 intel_mocs.o \ 44 intel_mocs.o \
42 intel_ringbuffer.o \ 45 intel_ringbuffer.o \
@@ -59,6 +62,7 @@ i915-y += intel_audio.o \
59 intel_bios.o \ 62 intel_bios.o \
60 intel_color.o \ 63 intel_color.o \
61 intel_display.o \ 64 intel_display.o \
65 intel_dpio_phy.o \
62 intel_dpll_mgr.o \ 66 intel_dpll_mgr.o \
63 intel_fbc.o \ 67 intel_fbc.o \
64 intel_fifo_underrun.o \ 68 intel_fifo_underrun.o \
@@ -81,10 +85,12 @@ i915-y += dvo_ch7017.o \
81 dvo_tfp410.o \ 85 dvo_tfp410.o \
82 intel_crt.o \ 86 intel_crt.o \
83 intel_ddi.o \ 87 intel_ddi.o \
88 intel_dp_aux_backlight.o \
84 intel_dp_link_training.o \ 89 intel_dp_link_training.o \
85 intel_dp_mst.o \ 90 intel_dp_mst.o \
86 intel_dp.o \ 91 intel_dp.o \
87 intel_dsi.o \ 92 intel_dsi.o \
93 intel_dsi_dcs_backlight.o \
88 intel_dsi_panel_vbt.o \ 94 intel_dsi_panel_vbt.o \
89 intel_dsi_pll.o \ 95 intel_dsi_pll.o \
90 intel_dvo.o \ 96 intel_dvo.o \
@@ -98,8 +104,10 @@ i915-y += dvo_ch7017.o \
98# virtual gpu code 104# virtual gpu code
99i915-y += i915_vgpu.o 105i915-y += i915_vgpu.o
100 106
101# legacy horrors 107ifeq ($(CONFIG_DRM_I915_GVT),y)
102i915-y += i915_dma.o 108i915-y += intel_gvt.o
109include $(src)/gvt/Makefile
110endif
103 111
104obj-$(CONFIG_DRM_I915) += i915.o 112obj-$(CONFIG_DRM_I915) += i915.o
105 113
diff --git a/drivers/gpu/drm/i915/gvt/Makefile b/drivers/gpu/drm/i915/gvt/Makefile
new file mode 100644
index 000000000000..d0f21a6ad60d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/Makefile
@@ -0,0 +1,5 @@
1GVT_DIR := gvt
2GVT_SOURCE := gvt.o
3
4ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
5i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
diff --git a/drivers/gpu/drm/i915/gvt/debug.h b/drivers/gpu/drm/i915/gvt/debug.h
new file mode 100644
index 000000000000..7ef412be665f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/debug.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef __GVT_DEBUG_H__
25#define __GVT_DEBUG_H__
26
27#define gvt_dbg_core(fmt, args...) \
28 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
29
30/*
31 * Other GVT debug stuff will be introduced in the GVT device model patches.
32 */
33
34#endif
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
new file mode 100644
index 000000000000..927f4579f5b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -0,0 +1,145 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include <linux/types.h>
25#include <xen/xen.h>
26
27#include "i915_drv.h"
28
29struct intel_gvt_host intel_gvt_host;
30
31static const char * const supported_hypervisors[] = {
32 [INTEL_GVT_HYPERVISOR_XEN] = "XEN",
33 [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
34};
35
36/**
37 * intel_gvt_init_host - Load MPT modules and detect if we're running in host
38 * @gvt: intel gvt device
39 *
40 * This function is called at the driver loading stage. If failed to find a
41 * loadable MPT module or detect currently we're running in a VM, then GVT-g
42 * will be disabled
43 *
44 * Returns:
45 * Zero on success, negative error code if failed.
46 *
47 */
48int intel_gvt_init_host(void)
49{
50 if (intel_gvt_host.initialized)
51 return 0;
52
53 /* Xen DOM U */
54 if (xen_domain() && !xen_initial_domain())
55 return -ENODEV;
56
57 /* Try to load MPT modules for hypervisors */
58 if (xen_initial_domain()) {
59 /* In Xen dom0 */
60 intel_gvt_host.mpt = try_then_request_module(
61 symbol_get(xengt_mpt), "xengt");
62 intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
63 } else {
64 /* not in Xen. Try KVMGT */
65 intel_gvt_host.mpt = try_then_request_module(
66 symbol_get(kvmgt_mpt), "kvm");
67 intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
68 }
69
70 /* Fail to load MPT modules - bail out */
71 if (!intel_gvt_host.mpt)
72 return -EINVAL;
73
74 /* Try to detect if we're running in host instead of VM. */
75 if (!intel_gvt_hypervisor_detect_host())
76 return -ENODEV;
77
78 gvt_dbg_core("Running with hypervisor %s in host mode\n",
79 supported_hypervisors[intel_gvt_host.hypervisor_type]);
80
81 intel_gvt_host.initialized = true;
82 return 0;
83}
84
85static void init_device_info(struct intel_gvt *gvt)
86{
87 if (IS_BROADWELL(gvt->dev_priv))
88 gvt->device_info.max_support_vgpus = 8;
89 /* This function will grow large in GVT device model patches. */
90}
91
92/**
93 * intel_gvt_clean_device - clean a GVT device
94 * @gvt: intel gvt device
95 *
96 * This function is called at the driver unloading stage, to free the
97 * resources owned by a GVT device.
98 *
99 */
100void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
101{
102 struct intel_gvt *gvt = &dev_priv->gvt;
103
104 if (WARN_ON(!gvt->initialized))
105 return;
106
107 /* Other de-initialization of GVT components will be introduced. */
108
109 gvt->initialized = false;
110}
111
112/**
113 * intel_gvt_init_device - initialize a GVT device
114 * @dev_priv: drm i915 private data
115 *
116 * This function is called at the initialization stage, to initialize
117 * necessary GVT components.
118 *
119 * Returns:
120 * Zero on success, negative error code if failed.
121 *
122 */
123int intel_gvt_init_device(struct drm_i915_private *dev_priv)
124{
125 struct intel_gvt *gvt = &dev_priv->gvt;
126 /*
127 * Cannot initialize GVT device without intel_gvt_host gets
128 * initialized first.
129 */
130 if (WARN_ON(!intel_gvt_host.initialized))
131 return -EINVAL;
132
133 if (WARN_ON(gvt->initialized))
134 return -EEXIST;
135
136 gvt_dbg_core("init gvt device\n");
137
138 init_device_info(gvt);
139 /*
140 * Other initialization of GVT components will be introduce here.
141 */
142 gvt_dbg_core("gvt device creation is done\n");
143 gvt->initialized = true;
144 return 0;
145}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
new file mode 100644
index 000000000000..fb619a6e519d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _GVT_H_
25#define _GVT_H_
26
27#include "debug.h"
28#include "hypercall.h"
29
30#define GVT_MAX_VGPU 8
31
32enum {
33 INTEL_GVT_HYPERVISOR_XEN = 0,
34 INTEL_GVT_HYPERVISOR_KVM,
35};
36
37struct intel_gvt_host {
38 bool initialized;
39 int hypervisor_type;
40 struct intel_gvt_mpt *mpt;
41};
42
43extern struct intel_gvt_host intel_gvt_host;
44
45/* Describe per-platform limitations. */
46struct intel_gvt_device_info {
47 u32 max_support_vgpus;
48 /* This data structure will grow bigger in GVT device model patches */
49};
50
51struct intel_vgpu {
52 struct intel_gvt *gvt;
53 int id;
54 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
55};
56
57struct intel_gvt {
58 struct mutex lock;
59 bool initialized;
60
61 struct drm_i915_private *dev_priv;
62 struct idr vgpu_idr; /* vGPU IDR pool */
63
64 struct intel_gvt_device_info device_info;
65};
66
67#include "mpt.h"
68
69#endif
diff --git a/drivers/gpu/drm/i915/gvt/hypercall.h b/drivers/gpu/drm/i915/gvt/hypercall.h
new file mode 100644
index 000000000000..254df8bf1f35
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/hypercall.h
@@ -0,0 +1,38 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _GVT_HYPERCALL_H_
25#define _GVT_HYPERCALL_H_
26
27/*
28 * Specific GVT-g MPT modules function collections. Currently GVT-g supports
29 * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
30 */
31struct intel_gvt_mpt {
32 int (*detect_host)(void);
33};
34
35extern struct intel_gvt_mpt xengt_mpt;
36extern struct intel_gvt_mpt kvmgt_mpt;
37
38#endif /* _GVT_HYPERCALL_H_ */
diff --git a/drivers/gpu/drm/i915/gvt/mpt.h b/drivers/gpu/drm/i915/gvt/mpt.h
new file mode 100644
index 000000000000..03601e3ffa7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/gvt/mpt.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _GVT_MPT_H_
25#define _GVT_MPT_H_
26
27/**
28 * DOC: Hypervisor Service APIs for GVT-g Core Logic
29 *
30 * This is the glue layer between specific hypervisor MPT modules and GVT-g core
31 * logic. Each kind of hypervisor MPT module provides a collection of function
32 * callbacks and will be attached to GVT host when the driver is loading.
33 * GVT-g core logic will call these APIs to request specific services from
34 * hypervisor.
35 */
36
37/**
38 * intel_gvt_hypervisor_detect_host - check if GVT-g is running within
39 * hypervisor host/privilged domain
40 *
41 * Returns:
42 * Zero on success, -ENODEV if current kernel is running inside a VM
43 */
44static inline int intel_gvt_hypervisor_detect_host(void)
45{
46 return intel_gvt_host.mpt->detect_host();
47}
48
49#endif /* _GVT_MPT_H_ */
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a337f33bec5b..b0fd6a7b0603 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -215,7 +215,8 @@ static const struct drm_i915_cmd_descriptor hsw_render_cmds[] = {
215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ), 215 CMD( MI_RS_CONTEXT, SMI, F, 1, S ),
216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ), 216 CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, M ),
217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ), 217 CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, R ), 218 CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
219 .reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
219 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ), 220 CMD( MI_RS_STORE_DATA_IMM, SMI, !F, 0xFF, S ),
220 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ), 221 CMD( MI_LOAD_URB_MEM, SMI, !F, 0xFF, S ),
221 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ), 222 CMD( MI_STORE_URB_MEM, SMI, !F, 0xFF, S ),
@@ -736,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
736 737
737/** 738/**
738 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer 739 * i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
739 * @ring: the ringbuffer to initialize 740 * @engine: the engine to initialize
740 * 741 *
741 * Optionally initializes fields related to batch buffer command parsing in the 742 * Optionally initializes fields related to batch buffer command parsing in the
742 * struct intel_engine_cs based on whether the platform requires software 743 * struct intel_engine_cs based on whether the platform requires software
@@ -750,12 +751,12 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
750 int cmd_table_count; 751 int cmd_table_count;
751 int ret; 752 int ret;
752 753
753 if (!IS_GEN7(engine->dev)) 754 if (!IS_GEN7(engine->i915))
754 return 0; 755 return 0;
755 756
756 switch (engine->id) { 757 switch (engine->id) {
757 case RCS: 758 case RCS:
758 if (IS_HASWELL(engine->dev)) { 759 if (IS_HASWELL(engine->i915)) {
759 cmd_tables = hsw_render_ring_cmds; 760 cmd_tables = hsw_render_ring_cmds;
760 cmd_table_count = 761 cmd_table_count =
761 ARRAY_SIZE(hsw_render_ring_cmds); 762 ARRAY_SIZE(hsw_render_ring_cmds);
@@ -764,7 +765,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
764 cmd_table_count = ARRAY_SIZE(gen7_render_cmds); 765 cmd_table_count = ARRAY_SIZE(gen7_render_cmds);
765 } 766 }
766 767
767 if (IS_HASWELL(engine->dev)) { 768 if (IS_HASWELL(engine->i915)) {
768 engine->reg_tables = hsw_render_reg_tables; 769 engine->reg_tables = hsw_render_reg_tables;
769 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables); 770 engine->reg_table_count = ARRAY_SIZE(hsw_render_reg_tables);
770 } else { 771 } else {
@@ -780,7 +781,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
780 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; 781 engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask;
781 break; 782 break;
782 case BCS: 783 case BCS:
783 if (IS_HASWELL(engine->dev)) { 784 if (IS_HASWELL(engine->i915)) {
784 cmd_tables = hsw_blt_ring_cmds; 785 cmd_tables = hsw_blt_ring_cmds;
785 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds); 786 cmd_table_count = ARRAY_SIZE(hsw_blt_ring_cmds);
786 } else { 787 } else {
@@ -788,7 +789,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
788 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds); 789 cmd_table_count = ARRAY_SIZE(gen7_blt_cmds);
789 } 790 }
790 791
791 if (IS_HASWELL(engine->dev)) { 792 if (IS_HASWELL(engine->i915)) {
792 engine->reg_tables = hsw_blt_reg_tables; 793 engine->reg_tables = hsw_blt_reg_tables;
793 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables); 794 engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
794 } else { 795 } else {
@@ -829,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
829 830
830/** 831/**
831 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields 832 * i915_cmd_parser_fini_ring() - clean up cmd parser related fields
832 * @ring: the ringbuffer to clean up 833 * @engine: the engine to clean up
833 * 834 *
834 * Releases any resources related to command parsing that may have been 835 * Releases any resources related to command parsing that may have been
835 * initialized for the specified ring. 836 * initialized for the specified ring.
@@ -1023,7 +1024,7 @@ unpin_src:
1023 1024
1024/** 1025/**
1025 * i915_needs_cmd_parser() - should a given ring use software command parsing? 1026 * i915_needs_cmd_parser() - should a given ring use software command parsing?
1026 * @ring: the ring in question 1027 * @engine: the engine in question
1027 * 1028 *
1028 * Only certain platforms require software batch buffer command parsing, and 1029 * Only certain platforms require software batch buffer command parsing, and
1029 * only when enabled via module parameter. 1030 * only when enabled via module parameter.
@@ -1035,7 +1036,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *engine)
1035 if (!engine->needs_cmd_parser) 1036 if (!engine->needs_cmd_parser)
1036 return false; 1037 return false;
1037 1038
1038 if (!USES_PPGTT(engine->dev)) 1039 if (!USES_PPGTT(engine->i915))
1039 return false; 1040 return false;
1040 1041
1041 return (i915.enable_cmd_parser == 1); 1042 return (i915.enable_cmd_parser == 1);
@@ -1098,6 +1099,11 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1098 return false; 1099 return false;
1099 } 1100 }
1100 1101
1102 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1103 DRM_DEBUG_DRIVER("CMD: Rejected LRR to OACONTROL\n");
1104 return false;
1105 }
1106
1101 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 1107 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
1102 *oacontrol_set = (cmd[offset + 1] != 0); 1108 *oacontrol_set = (cmd[offset + 1] != 0);
1103 } 1109 }
@@ -1113,6 +1119,12 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1113 return false; 1119 return false;
1114 } 1120 }
1115 1121
1122 if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
1123 DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
1124 reg_addr);
1125 return false;
1126 }
1127
1116 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) && 1128 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
1117 (offset + 2 > length || 1129 (offset + 2 > length ||
1118 (cmd[offset + 1] & reg->mask) != reg->value)) { 1130 (cmd[offset + 1] & reg->mask) != reg->value)) {
@@ -1164,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1164 1176
1165/** 1177/**
1166 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations 1178 * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
1167 * @ring: the ring on which the batch is to execute 1179 * @engine: the engine on which the batch is to execute
1168 * @batch_obj: the batch buffer in question 1180 * @batch_obj: the batch buffer in question
1169 * @shadow_batch_obj: copy of the batch buffer in question 1181 * @shadow_batch_obj: copy of the batch buffer in question
1170 * @batch_start_offset: byte offset in the batch at which execution starts 1182 * @batch_start_offset: byte offset in the batch at which execution starts
@@ -1269,14 +1281,28 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
1269 1281
1270/** 1282/**
1271 * i915_cmd_parser_get_version() - get the cmd parser version number 1283 * i915_cmd_parser_get_version() - get the cmd parser version number
1284 * @dev_priv: i915 device private
1272 * 1285 *
1273 * The cmd parser maintains a simple increasing integer version number suitable 1286 * The cmd parser maintains a simple increasing integer version number suitable
1274 * for passing to userspace clients to determine what operations are permitted. 1287 * for passing to userspace clients to determine what operations are permitted.
1275 * 1288 *
1276 * Return: the current version number of the cmd parser 1289 * Return: the current version number of the cmd parser
1277 */ 1290 */
1278int i915_cmd_parser_get_version(void) 1291int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
1279{ 1292{
1293 struct intel_engine_cs *engine;
1294 bool active = false;
1295
1296 /* If the command parser is not enabled, report 0 - unsupported */
1297 for_each_engine(engine, dev_priv) {
1298 if (i915_needs_cmd_parser(engine)) {
1299 active = true;
1300 break;
1301 }
1302 }
1303 if (!active)
1304 return 0;
1305
1280 /* 1306 /*
1281 * Command parser version history 1307 * Command parser version history
1282 * 1308 *
@@ -1288,6 +1314,7 @@ int i915_cmd_parser_get_version(void)
1288 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. 1314 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3.
1289 * 5. GPGPU dispatch compute indirect registers. 1315 * 5. GPGPU dispatch compute indirect registers.
1290 * 6. TIMESTAMP register and Haswell CS GPR registers 1316 * 6. TIMESTAMP register and Haswell CS GPR registers
1317 * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers.
1291 */ 1318 */
1292 return 6; 1319 return 7;
1293} 1320}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2a6e12956baf..844fea795bae 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -89,17 +89,17 @@ static int i915_capabilities(struct seq_file *m, void *data)
89 return 0; 89 return 0;
90} 90}
91 91
92static const char get_active_flag(struct drm_i915_gem_object *obj) 92static char get_active_flag(struct drm_i915_gem_object *obj)
93{ 93{
94 return obj->active ? '*' : ' '; 94 return obj->active ? '*' : ' ';
95} 95}
96 96
97static const char get_pin_flag(struct drm_i915_gem_object *obj) 97static char get_pin_flag(struct drm_i915_gem_object *obj)
98{ 98{
99 return obj->pin_display ? 'p' : ' '; 99 return obj->pin_display ? 'p' : ' ';
100} 100}
101 101
102static const char get_tiling_flag(struct drm_i915_gem_object *obj) 102static char get_tiling_flag(struct drm_i915_gem_object *obj)
103{ 103{
104 switch (obj->tiling_mode) { 104 switch (obj->tiling_mode) {
105 default: 105 default:
@@ -109,12 +109,12 @@ static const char get_tiling_flag(struct drm_i915_gem_object *obj)
109 } 109 }
110} 110}
111 111
112static inline const char get_global_flag(struct drm_i915_gem_object *obj) 112static char get_global_flag(struct drm_i915_gem_object *obj)
113{ 113{
114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' '; 114 return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
115} 115}
116 116
117static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj) 117static char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
118{ 118{
119 return obj->mapping ? 'M' : ' '; 119 return obj->mapping ? 'M' : ' ';
120} 120}
@@ -199,13 +199,6 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits); 199 seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
200} 200}
201 201
202static void describe_ctx(struct seq_file *m, struct intel_context *ctx)
203{
204 seq_putc(m, ctx->legacy_hw_ctx.initialized ? 'I' : 'i');
205 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
206 seq_putc(m, ' ');
207}
208
209static int i915_gem_object_list_info(struct seq_file *m, void *data) 202static int i915_gem_object_list_info(struct seq_file *m, void *data)
210{ 203{
211 struct drm_info_node *node = m->private; 204 struct drm_info_node *node = m->private;
@@ -272,7 +265,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data)
272{ 265{
273 struct drm_info_node *node = m->private; 266 struct drm_info_node *node = m->private;
274 struct drm_device *dev = node->minor->dev; 267 struct drm_device *dev = node->minor->dev;
275 struct drm_i915_private *dev_priv = dev->dev_private; 268 struct drm_i915_private *dev_priv = to_i915(dev);
276 struct drm_i915_gem_object *obj; 269 struct drm_i915_gem_object *obj;
277 u64 total_obj_size, total_gtt_size; 270 u64 total_obj_size, total_gtt_size;
278 LIST_HEAD(stolen); 271 LIST_HEAD(stolen);
@@ -424,6 +417,42 @@ static void print_batch_pool_stats(struct seq_file *m,
424 print_file_stats(m, "[k]batch pool", stats); 417 print_file_stats(m, "[k]batch pool", stats);
425} 418}
426 419
420static int per_file_ctx_stats(int id, void *ptr, void *data)
421{
422 struct i915_gem_context *ctx = ptr;
423 int n;
424
425 for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
426 if (ctx->engine[n].state)
427 per_file_stats(0, ctx->engine[n].state, data);
428 if (ctx->engine[n].ringbuf)
429 per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
430 }
431
432 return 0;
433}
434
435static void print_context_stats(struct seq_file *m,
436 struct drm_i915_private *dev_priv)
437{
438 struct file_stats stats;
439 struct drm_file *file;
440
441 memset(&stats, 0, sizeof(stats));
442
443 mutex_lock(&dev_priv->drm.struct_mutex);
444 if (dev_priv->kernel_context)
445 per_file_ctx_stats(0, dev_priv->kernel_context, &stats);
446
447 list_for_each_entry(file, &dev_priv->drm.filelist, lhead) {
448 struct drm_i915_file_private *fpriv = file->driver_priv;
449 idr_for_each(&fpriv->context_idr, per_file_ctx_stats, &stats);
450 }
451 mutex_unlock(&dev_priv->drm.struct_mutex);
452
453 print_file_stats(m, "[k]contexts", stats);
454}
455
427#define count_vmas(list, member) do { \ 456#define count_vmas(list, member) do { \
428 list_for_each_entry(vma, list, member) { \ 457 list_for_each_entry(vma, list, member) { \
429 size += i915_gem_obj_total_ggtt_size(vma->obj); \ 458 size += i915_gem_obj_total_ggtt_size(vma->obj); \
@@ -528,10 +557,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
528 557
529 seq_putc(m, '\n'); 558 seq_putc(m, '\n');
530 print_batch_pool_stats(m, dev_priv); 559 print_batch_pool_stats(m, dev_priv);
531
532 mutex_unlock(&dev->struct_mutex); 560 mutex_unlock(&dev->struct_mutex);
533 561
534 mutex_lock(&dev->filelist_mutex); 562 mutex_lock(&dev->filelist_mutex);
563 print_context_stats(m, dev_priv);
535 list_for_each_entry_reverse(file, &dev->filelist, lhead) { 564 list_for_each_entry_reverse(file, &dev->filelist, lhead) {
536 struct file_stats stats; 565 struct file_stats stats;
537 struct task_struct *task; 566 struct task_struct *task;
@@ -562,7 +591,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
562 struct drm_info_node *node = m->private; 591 struct drm_info_node *node = m->private;
563 struct drm_device *dev = node->minor->dev; 592 struct drm_device *dev = node->minor->dev;
564 uintptr_t list = (uintptr_t) node->info_ent->data; 593 uintptr_t list = (uintptr_t) node->info_ent->data;
565 struct drm_i915_private *dev_priv = dev->dev_private; 594 struct drm_i915_private *dev_priv = to_i915(dev);
566 struct drm_i915_gem_object *obj; 595 struct drm_i915_gem_object *obj;
567 u64 total_obj_size, total_gtt_size; 596 u64 total_obj_size, total_gtt_size;
568 int count, ret; 597 int count, ret;
@@ -596,7 +625,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
596{ 625{
597 struct drm_info_node *node = m->private; 626 struct drm_info_node *node = m->private;
598 struct drm_device *dev = node->minor->dev; 627 struct drm_device *dev = node->minor->dev;
599 struct drm_i915_private *dev_priv = dev->dev_private; 628 struct drm_i915_private *dev_priv = to_i915(dev);
600 struct intel_crtc *crtc; 629 struct intel_crtc *crtc;
601 int ret; 630 int ret;
602 631
@@ -607,18 +636,20 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
607 for_each_intel_crtc(dev, crtc) { 636 for_each_intel_crtc(dev, crtc) {
608 const char pipe = pipe_name(crtc->pipe); 637 const char pipe = pipe_name(crtc->pipe);
609 const char plane = plane_name(crtc->plane); 638 const char plane = plane_name(crtc->plane);
610 struct intel_unpin_work *work; 639 struct intel_flip_work *work;
611 640
612 spin_lock_irq(&dev->event_lock); 641 spin_lock_irq(&dev->event_lock);
613 work = crtc->unpin_work; 642 work = crtc->flip_work;
614 if (work == NULL) { 643 if (work == NULL) {
615 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 644 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
616 pipe, plane); 645 pipe, plane);
617 } else { 646 } else {
647 u32 pending;
618 u32 addr; 648 u32 addr;
619 649
620 if (atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) { 650 pending = atomic_read(&work->pending);
621 seq_printf(m, "Flip queued on pipe %c (plane %c)\n", 651 if (pending) {
652 seq_printf(m, "Flip ioctl preparing on pipe %c (plane %c)\n",
622 pipe, plane); 653 pipe, plane);
623 } else { 654 } else {
624 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n", 655 seq_printf(m, "Flip pending (waiting for vsync) on pipe %c (plane %c)\n",
@@ -631,18 +662,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
631 engine->name, 662 engine->name,
632 i915_gem_request_get_seqno(work->flip_queued_req), 663 i915_gem_request_get_seqno(work->flip_queued_req),
633 dev_priv->next_seqno, 664 dev_priv->next_seqno,
634 engine->get_seqno(engine), 665 intel_engine_get_seqno(engine),
635 i915_gem_request_completed(work->flip_queued_req, true)); 666 i915_gem_request_completed(work->flip_queued_req));
636 } else 667 } else
637 seq_printf(m, "Flip not associated with any ring\n"); 668 seq_printf(m, "Flip not associated with any ring\n");
638 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n", 669 seq_printf(m, "Flip queued on frame %d, (was ready on frame %d), now %d\n",
639 work->flip_queued_vblank, 670 work->flip_queued_vblank,
640 work->flip_ready_vblank, 671 work->flip_ready_vblank,
641 drm_crtc_vblank_count(&crtc->base)); 672 intel_crtc_get_vblank_counter(crtc));
642 if (work->enable_stall_check)
643 seq_puts(m, "Stall check enabled, ");
644 else
645 seq_puts(m, "Stall check waiting for page flip ioctl, ");
646 seq_printf(m, "%d prepares\n", atomic_read(&work->pending)); 673 seq_printf(m, "%d prepares\n", atomic_read(&work->pending));
647 674
648 if (INTEL_INFO(dev)->gen >= 4) 675 if (INTEL_INFO(dev)->gen >= 4)
@@ -668,7 +695,7 @@ static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
668{ 695{
669 struct drm_info_node *node = m->private; 696 struct drm_info_node *node = m->private;
670 struct drm_device *dev = node->minor->dev; 697 struct drm_device *dev = node->minor->dev;
671 struct drm_i915_private *dev_priv = dev->dev_private; 698 struct drm_i915_private *dev_priv = to_i915(dev);
672 struct drm_i915_gem_object *obj; 699 struct drm_i915_gem_object *obj;
673 struct intel_engine_cs *engine; 700 struct intel_engine_cs *engine;
674 int total = 0; 701 int total = 0;
@@ -713,7 +740,7 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
713{ 740{
714 struct drm_info_node *node = m->private; 741 struct drm_info_node *node = m->private;
715 struct drm_device *dev = node->minor->dev; 742 struct drm_device *dev = node->minor->dev;
716 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_private *dev_priv = to_i915(dev);
717 struct intel_engine_cs *engine; 744 struct intel_engine_cs *engine;
718 struct drm_i915_gem_request *req; 745 struct drm_i915_gem_request *req;
719 int ret, any; 746 int ret, any;
@@ -761,17 +788,29 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
761static void i915_ring_seqno_info(struct seq_file *m, 788static void i915_ring_seqno_info(struct seq_file *m,
762 struct intel_engine_cs *engine) 789 struct intel_engine_cs *engine)
763{ 790{
791 struct intel_breadcrumbs *b = &engine->breadcrumbs;
792 struct rb_node *rb;
793
764 seq_printf(m, "Current sequence (%s): %x\n", 794 seq_printf(m, "Current sequence (%s): %x\n",
765 engine->name, engine->get_seqno(engine)); 795 engine->name, intel_engine_get_seqno(engine));
766 seq_printf(m, "Current user interrupts (%s): %x\n", 796 seq_printf(m, "Current user interrupts (%s): %lx\n",
767 engine->name, READ_ONCE(engine->user_interrupts)); 797 engine->name, READ_ONCE(engine->breadcrumbs.irq_wakeups));
798
799 spin_lock(&b->lock);
800 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
801 struct intel_wait *w = container_of(rb, typeof(*w), node);
802
803 seq_printf(m, "Waiting (%s): %s [%d] on %x\n",
804 engine->name, w->tsk->comm, w->tsk->pid, w->seqno);
805 }
806 spin_unlock(&b->lock);
768} 807}
769 808
770static int i915_gem_seqno_info(struct seq_file *m, void *data) 809static int i915_gem_seqno_info(struct seq_file *m, void *data)
771{ 810{
772 struct drm_info_node *node = m->private; 811 struct drm_info_node *node = m->private;
773 struct drm_device *dev = node->minor->dev; 812 struct drm_device *dev = node->minor->dev;
774 struct drm_i915_private *dev_priv = dev->dev_private; 813 struct drm_i915_private *dev_priv = to_i915(dev);
775 struct intel_engine_cs *engine; 814 struct intel_engine_cs *engine;
776 int ret; 815 int ret;
777 816
@@ -794,7 +833,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
794{ 833{
795 struct drm_info_node *node = m->private; 834 struct drm_info_node *node = m->private;
796 struct drm_device *dev = node->minor->dev; 835 struct drm_device *dev = node->minor->dev;
797 struct drm_i915_private *dev_priv = dev->dev_private; 836 struct drm_i915_private *dev_priv = to_i915(dev);
798 struct intel_engine_cs *engine; 837 struct intel_engine_cs *engine;
799 int ret, i, pipe; 838 int ret, i, pipe;
800 839
@@ -985,7 +1024,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
985{ 1024{
986 struct drm_info_node *node = m->private; 1025 struct drm_info_node *node = m->private;
987 struct drm_device *dev = node->minor->dev; 1026 struct drm_device *dev = node->minor->dev;
988 struct drm_i915_private *dev_priv = dev->dev_private; 1027 struct drm_i915_private *dev_priv = to_i915(dev);
989 int i, ret; 1028 int i, ret;
990 1029
991 ret = mutex_lock_interruptible(&dev->struct_mutex); 1030 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1013,7 +1052,7 @@ static int i915_hws_info(struct seq_file *m, void *data)
1013{ 1052{
1014 struct drm_info_node *node = m->private; 1053 struct drm_info_node *node = m->private;
1015 struct drm_device *dev = node->minor->dev; 1054 struct drm_device *dev = node->minor->dev;
1016 struct drm_i915_private *dev_priv = dev->dev_private; 1055 struct drm_i915_private *dev_priv = to_i915(dev);
1017 struct intel_engine_cs *engine; 1056 struct intel_engine_cs *engine;
1018 const u32 *hws; 1057 const u32 *hws;
1019 int i; 1058 int i;
@@ -1124,7 +1163,7 @@ static int
1124i915_next_seqno_get(void *data, u64 *val) 1163i915_next_seqno_get(void *data, u64 *val)
1125{ 1164{
1126 struct drm_device *dev = data; 1165 struct drm_device *dev = data;
1127 struct drm_i915_private *dev_priv = dev->dev_private; 1166 struct drm_i915_private *dev_priv = to_i915(dev);
1128 int ret; 1167 int ret;
1129 1168
1130 ret = mutex_lock_interruptible(&dev->struct_mutex); 1169 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1161,7 +1200,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1161{ 1200{
1162 struct drm_info_node *node = m->private; 1201 struct drm_info_node *node = m->private;
1163 struct drm_device *dev = node->minor->dev; 1202 struct drm_device *dev = node->minor->dev;
1164 struct drm_i915_private *dev_priv = dev->dev_private; 1203 struct drm_i915_private *dev_priv = to_i915(dev);
1165 int ret = 0; 1204 int ret = 0;
1166 1205
1167 intel_runtime_pm_get(dev_priv); 1206 intel_runtime_pm_get(dev_priv);
@@ -1281,6 +1320,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
1281 } 1320 }
1282 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n", 1321 seq_printf(m, "PM IER=0x%08x IMR=0x%08x ISR=0x%08x IIR=0x%08x, MASK=0x%08x\n",
1283 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); 1322 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask);
1323 seq_printf(m, "pm_intr_keep: 0x%08x\n", dev_priv->rps.pm_intr_keep);
1284 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 1324 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
1285 seq_printf(m, "Render p-state ratio: %d\n", 1325 seq_printf(m, "Render p-state ratio: %d\n",
1286 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); 1326 (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8);
@@ -1363,7 +1403,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1363{ 1403{
1364 struct drm_info_node *node = m->private; 1404 struct drm_info_node *node = m->private;
1365 struct drm_device *dev = node->minor->dev; 1405 struct drm_device *dev = node->minor->dev;
1366 struct drm_i915_private *dev_priv = dev->dev_private; 1406 struct drm_i915_private *dev_priv = to_i915(dev);
1367 struct intel_engine_cs *engine; 1407 struct intel_engine_cs *engine;
1368 u64 acthd[I915_NUM_ENGINES]; 1408 u64 acthd[I915_NUM_ENGINES];
1369 u32 seqno[I915_NUM_ENGINES]; 1409 u32 seqno[I915_NUM_ENGINES];
@@ -1380,10 +1420,10 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1380 1420
1381 for_each_engine_id(engine, dev_priv, id) { 1421 for_each_engine_id(engine, dev_priv, id) {
1382 acthd[id] = intel_ring_get_active_head(engine); 1422 acthd[id] = intel_ring_get_active_head(engine);
1383 seqno[id] = engine->get_seqno(engine); 1423 seqno[id] = intel_engine_get_seqno(engine);
1384 } 1424 }
1385 1425
1386 i915_get_extra_instdone(dev, instdone); 1426 i915_get_extra_instdone(dev_priv, instdone);
1387 1427
1388 intel_runtime_pm_put(dev_priv); 1428 intel_runtime_pm_put(dev_priv);
1389 1429
@@ -1400,9 +1440,11 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
1400 engine->hangcheck.seqno, 1440 engine->hangcheck.seqno,
1401 seqno[id], 1441 seqno[id],
1402 engine->last_submitted_seqno); 1442 engine->last_submitted_seqno);
1403 seq_printf(m, "\tuser interrupts = %x [current %x]\n", 1443 seq_printf(m, "\twaiters? %d\n",
1444 intel_engine_has_waiter(engine));
1445 seq_printf(m, "\tuser interrupts = %lx [current %lx]\n",
1404 engine->hangcheck.user_interrupts, 1446 engine->hangcheck.user_interrupts,
1405 READ_ONCE(engine->user_interrupts)); 1447 READ_ONCE(engine->breadcrumbs.irq_wakeups));
1406 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", 1448 seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
1407 (long long)engine->hangcheck.acthd, 1449 (long long)engine->hangcheck.acthd,
1408 (long long)acthd[id]); 1450 (long long)acthd[id]);
@@ -1432,7 +1474,7 @@ static int ironlake_drpc_info(struct seq_file *m)
1432{ 1474{
1433 struct drm_info_node *node = m->private; 1475 struct drm_info_node *node = m->private;
1434 struct drm_device *dev = node->minor->dev; 1476 struct drm_device *dev = node->minor->dev;
1435 struct drm_i915_private *dev_priv = dev->dev_private; 1477 struct drm_i915_private *dev_priv = to_i915(dev);
1436 u32 rgvmodectl, rstdbyctl; 1478 u32 rgvmodectl, rstdbyctl;
1437 u16 crstandvid; 1479 u16 crstandvid;
1438 int ret; 1480 int ret;
@@ -1500,7 +1542,7 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
1500{ 1542{
1501 struct drm_info_node *node = m->private; 1543 struct drm_info_node *node = m->private;
1502 struct drm_device *dev = node->minor->dev; 1544 struct drm_device *dev = node->minor->dev;
1503 struct drm_i915_private *dev_priv = dev->dev_private; 1545 struct drm_i915_private *dev_priv = to_i915(dev);
1504 struct intel_uncore_forcewake_domain *fw_domain; 1546 struct intel_uncore_forcewake_domain *fw_domain;
1505 1547
1506 spin_lock_irq(&dev_priv->uncore.lock); 1548 spin_lock_irq(&dev_priv->uncore.lock);
@@ -1518,7 +1560,7 @@ static int vlv_drpc_info(struct seq_file *m)
1518{ 1560{
1519 struct drm_info_node *node = m->private; 1561 struct drm_info_node *node = m->private;
1520 struct drm_device *dev = node->minor->dev; 1562 struct drm_device *dev = node->minor->dev;
1521 struct drm_i915_private *dev_priv = dev->dev_private; 1563 struct drm_i915_private *dev_priv = to_i915(dev);
1522 u32 rpmodectl1, rcctl1, pw_status; 1564 u32 rpmodectl1, rcctl1, pw_status;
1523 1565
1524 intel_runtime_pm_get(dev_priv); 1566 intel_runtime_pm_get(dev_priv);
@@ -1558,7 +1600,7 @@ static int gen6_drpc_info(struct seq_file *m)
1558{ 1600{
1559 struct drm_info_node *node = m->private; 1601 struct drm_info_node *node = m->private;
1560 struct drm_device *dev = node->minor->dev; 1602 struct drm_device *dev = node->minor->dev;
1561 struct drm_i915_private *dev_priv = dev->dev_private; 1603 struct drm_i915_private *dev_priv = to_i915(dev);
1562 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0; 1604 u32 rpmodectl1, gt_core_status, rcctl1, rc6vids = 0;
1563 unsigned forcewake_count; 1605 unsigned forcewake_count;
1564 int count = 0, ret; 1606 int count = 0, ret;
@@ -1670,7 +1712,7 @@ static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
1670{ 1712{
1671 struct drm_info_node *node = m->private; 1713 struct drm_info_node *node = m->private;
1672 struct drm_device *dev = node->minor->dev; 1714 struct drm_device *dev = node->minor->dev;
1673 struct drm_i915_private *dev_priv = dev->dev_private; 1715 struct drm_i915_private *dev_priv = to_i915(dev);
1674 1716
1675 seq_printf(m, "FB tracking busy bits: 0x%08x\n", 1717 seq_printf(m, "FB tracking busy bits: 0x%08x\n",
1676 dev_priv->fb_tracking.busy_bits); 1718 dev_priv->fb_tracking.busy_bits);
@@ -1685,7 +1727,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1685{ 1727{
1686 struct drm_info_node *node = m->private; 1728 struct drm_info_node *node = m->private;
1687 struct drm_device *dev = node->minor->dev; 1729 struct drm_device *dev = node->minor->dev;
1688 struct drm_i915_private *dev_priv = dev->dev_private; 1730 struct drm_i915_private *dev_priv = to_i915(dev);
1689 1731
1690 if (!HAS_FBC(dev)) { 1732 if (!HAS_FBC(dev)) {
1691 seq_puts(m, "FBC unsupported on this chipset\n"); 1733 seq_puts(m, "FBC unsupported on this chipset\n");
@@ -1715,7 +1757,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
1715static int i915_fbc_fc_get(void *data, u64 *val) 1757static int i915_fbc_fc_get(void *data, u64 *val)
1716{ 1758{
1717 struct drm_device *dev = data; 1759 struct drm_device *dev = data;
1718 struct drm_i915_private *dev_priv = dev->dev_private; 1760 struct drm_i915_private *dev_priv = to_i915(dev);
1719 1761
1720 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1762 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
1721 return -ENODEV; 1763 return -ENODEV;
@@ -1728,7 +1770,7 @@ static int i915_fbc_fc_get(void *data, u64 *val)
1728static int i915_fbc_fc_set(void *data, u64 val) 1770static int i915_fbc_fc_set(void *data, u64 val)
1729{ 1771{
1730 struct drm_device *dev = data; 1772 struct drm_device *dev = data;
1731 struct drm_i915_private *dev_priv = dev->dev_private; 1773 struct drm_i915_private *dev_priv = to_i915(dev);
1732 u32 reg; 1774 u32 reg;
1733 1775
1734 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev)) 1776 if (INTEL_INFO(dev)->gen < 7 || !HAS_FBC(dev))
@@ -1755,7 +1797,7 @@ static int i915_ips_status(struct seq_file *m, void *unused)
1755{ 1797{
1756 struct drm_info_node *node = m->private; 1798 struct drm_info_node *node = m->private;
1757 struct drm_device *dev = node->minor->dev; 1799 struct drm_device *dev = node->minor->dev;
1758 struct drm_i915_private *dev_priv = dev->dev_private; 1800 struct drm_i915_private *dev_priv = to_i915(dev);
1759 1801
1760 if (!HAS_IPS(dev)) { 1802 if (!HAS_IPS(dev)) {
1761 seq_puts(m, "not supported\n"); 1803 seq_puts(m, "not supported\n");
@@ -1785,7 +1827,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
1785{ 1827{
1786 struct drm_info_node *node = m->private; 1828 struct drm_info_node *node = m->private;
1787 struct drm_device *dev = node->minor->dev; 1829 struct drm_device *dev = node->minor->dev;
1788 struct drm_i915_private *dev_priv = dev->dev_private; 1830 struct drm_i915_private *dev_priv = to_i915(dev);
1789 bool sr_enabled = false; 1831 bool sr_enabled = false;
1790 1832
1791 intel_runtime_pm_get(dev_priv); 1833 intel_runtime_pm_get(dev_priv);
@@ -1814,7 +1856,7 @@ static int i915_emon_status(struct seq_file *m, void *unused)
1814{ 1856{
1815 struct drm_info_node *node = m->private; 1857 struct drm_info_node *node = m->private;
1816 struct drm_device *dev = node->minor->dev; 1858 struct drm_device *dev = node->minor->dev;
1817 struct drm_i915_private *dev_priv = dev->dev_private; 1859 struct drm_i915_private *dev_priv = to_i915(dev);
1818 unsigned long temp, chipset, gfx; 1860 unsigned long temp, chipset, gfx;
1819 int ret; 1861 int ret;
1820 1862
@@ -1842,7 +1884,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
1842{ 1884{
1843 struct drm_info_node *node = m->private; 1885 struct drm_info_node *node = m->private;
1844 struct drm_device *dev = node->minor->dev; 1886 struct drm_device *dev = node->minor->dev;
1845 struct drm_i915_private *dev_priv = dev->dev_private; 1887 struct drm_i915_private *dev_priv = to_i915(dev);
1846 int ret = 0; 1888 int ret = 0;
1847 int gpu_freq, ia_freq; 1889 int gpu_freq, ia_freq;
1848 unsigned int max_gpu_freq, min_gpu_freq; 1890 unsigned int max_gpu_freq, min_gpu_freq;
@@ -1897,7 +1939,7 @@ static int i915_opregion(struct seq_file *m, void *unused)
1897{ 1939{
1898 struct drm_info_node *node = m->private; 1940 struct drm_info_node *node = m->private;
1899 struct drm_device *dev = node->minor->dev; 1941 struct drm_device *dev = node->minor->dev;
1900 struct drm_i915_private *dev_priv = dev->dev_private; 1942 struct drm_i915_private *dev_priv = to_i915(dev);
1901 struct intel_opregion *opregion = &dev_priv->opregion; 1943 struct intel_opregion *opregion = &dev_priv->opregion;
1902 int ret; 1944 int ret;
1903 1945
@@ -1918,7 +1960,7 @@ static int i915_vbt(struct seq_file *m, void *unused)
1918{ 1960{
1919 struct drm_info_node *node = m->private; 1961 struct drm_info_node *node = m->private;
1920 struct drm_device *dev = node->minor->dev; 1962 struct drm_device *dev = node->minor->dev;
1921 struct drm_i915_private *dev_priv = dev->dev_private; 1963 struct drm_i915_private *dev_priv = to_i915(dev);
1922 struct intel_opregion *opregion = &dev_priv->opregion; 1964 struct intel_opregion *opregion = &dev_priv->opregion;
1923 1965
1924 if (opregion->vbt) 1966 if (opregion->vbt)
@@ -1940,19 +1982,19 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
1940 return ret; 1982 return ret;
1941 1983
1942#ifdef CONFIG_DRM_FBDEV_EMULATION 1984#ifdef CONFIG_DRM_FBDEV_EMULATION
1943 if (to_i915(dev)->fbdev) { 1985 if (to_i915(dev)->fbdev) {
1944 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb); 1986 fbdev_fb = to_intel_framebuffer(to_i915(dev)->fbdev->helper.fb);
1945 1987
1946 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ", 1988 seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
1947 fbdev_fb->base.width, 1989 fbdev_fb->base.width,
1948 fbdev_fb->base.height, 1990 fbdev_fb->base.height,
1949 fbdev_fb->base.depth, 1991 fbdev_fb->base.depth,
1950 fbdev_fb->base.bits_per_pixel, 1992 fbdev_fb->base.bits_per_pixel,
1951 fbdev_fb->base.modifier[0], 1993 fbdev_fb->base.modifier[0],
1952 drm_framebuffer_read_refcount(&fbdev_fb->base)); 1994 drm_framebuffer_read_refcount(&fbdev_fb->base));
1953 describe_obj(m, fbdev_fb->obj); 1995 describe_obj(m, fbdev_fb->obj);
1954 seq_putc(m, '\n'); 1996 seq_putc(m, '\n');
1955 } 1997 }
1956#endif 1998#endif
1957 1999
1958 mutex_lock(&dev->mode_config.fb_lock); 2000 mutex_lock(&dev->mode_config.fb_lock);
@@ -1989,10 +2031,9 @@ static int i915_context_status(struct seq_file *m, void *unused)
1989{ 2031{
1990 struct drm_info_node *node = m->private; 2032 struct drm_info_node *node = m->private;
1991 struct drm_device *dev = node->minor->dev; 2033 struct drm_device *dev = node->minor->dev;
1992 struct drm_i915_private *dev_priv = dev->dev_private; 2034 struct drm_i915_private *dev_priv = to_i915(dev);
1993 struct intel_engine_cs *engine; 2035 struct intel_engine_cs *engine;
1994 struct intel_context *ctx; 2036 struct i915_gem_context *ctx;
1995 enum intel_engine_id id;
1996 int ret; 2037 int ret;
1997 2038
1998 ret = mutex_lock_interruptible(&dev->struct_mutex); 2039 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2000,32 +2041,36 @@ static int i915_context_status(struct seq_file *m, void *unused)
2000 return ret; 2041 return ret;
2001 2042
2002 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2043 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2003 if (!i915.enable_execlists && 2044 seq_printf(m, "HW context %u ", ctx->hw_id);
2004 ctx->legacy_hw_ctx.rcs_state == NULL) 2045 if (IS_ERR(ctx->file_priv)) {
2005 continue; 2046 seq_puts(m, "(deleted) ");
2006 2047 } else if (ctx->file_priv) {
2007 seq_puts(m, "HW context "); 2048 struct pid *pid = ctx->file_priv->file->pid;
2008 describe_ctx(m, ctx); 2049 struct task_struct *task;
2009 if (ctx == dev_priv->kernel_context)
2010 seq_printf(m, "(kernel context) ");
2011 2050
2012 if (i915.enable_execlists) { 2051 task = get_pid_task(pid, PIDTYPE_PID);
2013 seq_putc(m, '\n'); 2052 if (task) {
2014 for_each_engine_id(engine, dev_priv, id) { 2053 seq_printf(m, "(%s [%d]) ",
2015 struct drm_i915_gem_object *ctx_obj = 2054 task->comm, task->pid);
2016 ctx->engine[id].state; 2055 put_task_struct(task);
2017 struct intel_ringbuffer *ringbuf =
2018 ctx->engine[id].ringbuf;
2019
2020 seq_printf(m, "%s: ", engine->name);
2021 if (ctx_obj)
2022 describe_obj(m, ctx_obj);
2023 if (ringbuf)
2024 describe_ctx_ringbuf(m, ringbuf);
2025 seq_putc(m, '\n');
2026 } 2056 }
2027 } else { 2057 } else {
2028 describe_obj(m, ctx->legacy_hw_ctx.rcs_state); 2058 seq_puts(m, "(kernel) ");
2059 }
2060
2061 seq_putc(m, ctx->remap_slice ? 'R' : 'r');
2062 seq_putc(m, '\n');
2063
2064 for_each_engine(engine, dev_priv) {
2065 struct intel_context *ce = &ctx->engine[engine->id];
2066
2067 seq_printf(m, "%s: ", engine->name);
2068 seq_putc(m, ce->initialised ? 'I' : 'i');
2069 if (ce->state)
2070 describe_obj(m, ce->state);
2071 if (ce->ringbuf)
2072 describe_ctx_ringbuf(m, ce->ringbuf);
2073 seq_putc(m, '\n');
2029 } 2074 }
2030 2075
2031 seq_putc(m, '\n'); 2076 seq_putc(m, '\n');
@@ -2037,24 +2082,22 @@ static int i915_context_status(struct seq_file *m, void *unused)
2037} 2082}
2038 2083
2039static void i915_dump_lrc_obj(struct seq_file *m, 2084static void i915_dump_lrc_obj(struct seq_file *m,
2040 struct intel_context *ctx, 2085 struct i915_gem_context *ctx,
2041 struct intel_engine_cs *engine) 2086 struct intel_engine_cs *engine)
2042{ 2087{
2088 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2043 struct page *page; 2089 struct page *page;
2044 uint32_t *reg_state; 2090 uint32_t *reg_state;
2045 int j; 2091 int j;
2046 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
2047 unsigned long ggtt_offset = 0; 2092 unsigned long ggtt_offset = 0;
2048 2093
2094 seq_printf(m, "CONTEXT: %s %u\n", engine->name, ctx->hw_id);
2095
2049 if (ctx_obj == NULL) { 2096 if (ctx_obj == NULL) {
2050 seq_printf(m, "Context on %s with no gem object\n", 2097 seq_puts(m, "\tNot allocated\n");
2051 engine->name);
2052 return; 2098 return;
2053 } 2099 }
2054 2100
2055 seq_printf(m, "CONTEXT: %s %u\n", engine->name,
2056 intel_execlists_ctx_id(ctx, engine));
2057
2058 if (!i915_gem_obj_ggtt_bound(ctx_obj)) 2101 if (!i915_gem_obj_ggtt_bound(ctx_obj))
2059 seq_puts(m, "\tNot bound in GGTT\n"); 2102 seq_puts(m, "\tNot bound in GGTT\n");
2060 else 2103 else
@@ -2085,9 +2128,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2085{ 2128{
2086 struct drm_info_node *node = (struct drm_info_node *) m->private; 2129 struct drm_info_node *node = (struct drm_info_node *) m->private;
2087 struct drm_device *dev = node->minor->dev; 2130 struct drm_device *dev = node->minor->dev;
2088 struct drm_i915_private *dev_priv = dev->dev_private; 2131 struct drm_i915_private *dev_priv = to_i915(dev);
2089 struct intel_engine_cs *engine; 2132 struct intel_engine_cs *engine;
2090 struct intel_context *ctx; 2133 struct i915_gem_context *ctx;
2091 int ret; 2134 int ret;
2092 2135
2093 if (!i915.enable_execlists) { 2136 if (!i915.enable_execlists) {
@@ -2100,10 +2143,8 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2100 return ret; 2143 return ret;
2101 2144
2102 list_for_each_entry(ctx, &dev_priv->context_list, link) 2145 list_for_each_entry(ctx, &dev_priv->context_list, link)
2103 if (ctx != dev_priv->kernel_context) { 2146 for_each_engine(engine, dev_priv)
2104 for_each_engine(engine, dev_priv) 2147 i915_dump_lrc_obj(m, ctx, engine);
2105 i915_dump_lrc_obj(m, ctx, engine);
2106 }
2107 2148
2108 mutex_unlock(&dev->struct_mutex); 2149 mutex_unlock(&dev->struct_mutex);
2109 2150
@@ -2114,7 +2155,7 @@ static int i915_execlists(struct seq_file *m, void *data)
2114{ 2155{
2115 struct drm_info_node *node = (struct drm_info_node *)m->private; 2156 struct drm_info_node *node = (struct drm_info_node *)m->private;
2116 struct drm_device *dev = node->minor->dev; 2157 struct drm_device *dev = node->minor->dev;
2117 struct drm_i915_private *dev_priv = dev->dev_private; 2158 struct drm_i915_private *dev_priv = to_i915(dev);
2118 struct intel_engine_cs *engine; 2159 struct intel_engine_cs *engine;
2119 u32 status_pointer; 2160 u32 status_pointer;
2120 u8 read_pointer; 2161 u8 read_pointer;
@@ -2174,8 +2215,8 @@ static int i915_execlists(struct seq_file *m, void *data)
2174 2215
2175 seq_printf(m, "\t%d requests in queue\n", count); 2216 seq_printf(m, "\t%d requests in queue\n", count);
2176 if (head_req) { 2217 if (head_req) {
2177 seq_printf(m, "\tHead request id: %u\n", 2218 seq_printf(m, "\tHead request context: %u\n",
2178 intel_execlists_ctx_id(head_req->ctx, engine)); 2219 head_req->ctx->hw_id);
2179 seq_printf(m, "\tHead request tail: %u\n", 2220 seq_printf(m, "\tHead request tail: %u\n",
2180 head_req->tail); 2221 head_req->tail);
2181 } 2222 }
@@ -2217,7 +2258,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2217{ 2258{
2218 struct drm_info_node *node = m->private; 2259 struct drm_info_node *node = m->private;
2219 struct drm_device *dev = node->minor->dev; 2260 struct drm_device *dev = node->minor->dev;
2220 struct drm_i915_private *dev_priv = dev->dev_private; 2261 struct drm_i915_private *dev_priv = to_i915(dev);
2221 int ret; 2262 int ret;
2222 2263
2223 ret = mutex_lock_interruptible(&dev->struct_mutex); 2264 ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2269,7 +2310,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
2269 2310
2270static int per_file_ctx(int id, void *ptr, void *data) 2311static int per_file_ctx(int id, void *ptr, void *data)
2271{ 2312{
2272 struct intel_context *ctx = ptr; 2313 struct i915_gem_context *ctx = ptr;
2273 struct seq_file *m = data; 2314 struct seq_file *m = data;
2274 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2315 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2275 2316
@@ -2290,7 +2331,7 @@ static int per_file_ctx(int id, void *ptr, void *data)
2290 2331
2291static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2332static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2292{ 2333{
2293 struct drm_i915_private *dev_priv = dev->dev_private; 2334 struct drm_i915_private *dev_priv = to_i915(dev);
2294 struct intel_engine_cs *engine; 2335 struct intel_engine_cs *engine;
2295 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt; 2336 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2296 int i; 2337 int i;
@@ -2311,15 +2352,15 @@ static void gen8_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2311 2352
2312static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev) 2353static void gen6_ppgtt_info(struct seq_file *m, struct drm_device *dev)
2313{ 2354{
2314 struct drm_i915_private *dev_priv = dev->dev_private; 2355 struct drm_i915_private *dev_priv = to_i915(dev);
2315 struct intel_engine_cs *engine; 2356 struct intel_engine_cs *engine;
2316 2357
2317 if (INTEL_INFO(dev)->gen == 6) 2358 if (IS_GEN6(dev_priv))
2318 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE)); 2359 seq_printf(m, "GFX_MODE: 0x%08x\n", I915_READ(GFX_MODE));
2319 2360
2320 for_each_engine(engine, dev_priv) { 2361 for_each_engine(engine, dev_priv) {
2321 seq_printf(m, "%s\n", engine->name); 2362 seq_printf(m, "%s\n", engine->name);
2322 if (INTEL_INFO(dev)->gen == 7) 2363 if (IS_GEN7(dev_priv))
2323 seq_printf(m, "GFX_MODE: 0x%08x\n", 2364 seq_printf(m, "GFX_MODE: 0x%08x\n",
2324 I915_READ(RING_MODE_GEN7(engine))); 2365 I915_READ(RING_MODE_GEN7(engine)));
2325 seq_printf(m, "PP_DIR_BASE: 0x%08x\n", 2366 seq_printf(m, "PP_DIR_BASE: 0x%08x\n",
@@ -2345,7 +2386,7 @@ static int i915_ppgtt_info(struct seq_file *m, void *data)
2345{ 2386{
2346 struct drm_info_node *node = m->private; 2387 struct drm_info_node *node = m->private;
2347 struct drm_device *dev = node->minor->dev; 2388 struct drm_device *dev = node->minor->dev;
2348 struct drm_i915_private *dev_priv = dev->dev_private; 2389 struct drm_i915_private *dev_priv = to_i915(dev);
2349 struct drm_file *file; 2390 struct drm_file *file;
2350 2391
2351 int ret = mutex_lock_interruptible(&dev->struct_mutex); 2392 int ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -2388,7 +2429,7 @@ static int count_irq_waiters(struct drm_i915_private *i915)
2388 int count = 0; 2429 int count = 0;
2389 2430
2390 for_each_engine(engine, i915) 2431 for_each_engine(engine, i915)
2391 count += engine->irq_refcount; 2432 count += intel_engine_has_waiter(engine);
2392 2433
2393 return count; 2434 return count;
2394} 2435}
@@ -2397,11 +2438,12 @@ static int i915_rps_boost_info(struct seq_file *m, void *data)
2397{ 2438{
2398 struct drm_info_node *node = m->private; 2439 struct drm_info_node *node = m->private;
2399 struct drm_device *dev = node->minor->dev; 2440 struct drm_device *dev = node->minor->dev;
2400 struct drm_i915_private *dev_priv = dev->dev_private; 2441 struct drm_i915_private *dev_priv = to_i915(dev);
2401 struct drm_file *file; 2442 struct drm_file *file;
2402 2443
2403 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled); 2444 seq_printf(m, "RPS enabled? %d\n", dev_priv->rps.enabled);
2404 seq_printf(m, "GPU busy? %d\n", dev_priv->mm.busy); 2445 seq_printf(m, "GPU busy? %s [%x]\n",
2446 yesno(dev_priv->gt.awake), dev_priv->gt.active_engines);
2405 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv)); 2447 seq_printf(m, "CPU waiting? %d\n", count_irq_waiters(dev_priv));
2406 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n", 2448 seq_printf(m, "Frequency requested %d; min hard:%d, soft:%d; max soft:%d, hard:%d\n",
2407 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq), 2449 intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
@@ -2442,7 +2484,7 @@ static int i915_llc(struct seq_file *m, void *data)
2442{ 2484{
2443 struct drm_info_node *node = m->private; 2485 struct drm_info_node *node = m->private;
2444 struct drm_device *dev = node->minor->dev; 2486 struct drm_device *dev = node->minor->dev;
2445 struct drm_i915_private *dev_priv = dev->dev_private; 2487 struct drm_i915_private *dev_priv = to_i915(dev);
2446 const bool edram = INTEL_GEN(dev_priv) > 8; 2488 const bool edram = INTEL_GEN(dev_priv) > 8;
2447 2489
2448 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev))); 2490 seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
@@ -2455,7 +2497,7 @@ static int i915_llc(struct seq_file *m, void *data)
2455static int i915_guc_load_status_info(struct seq_file *m, void *data) 2497static int i915_guc_load_status_info(struct seq_file *m, void *data)
2456{ 2498{
2457 struct drm_info_node *node = m->private; 2499 struct drm_info_node *node = m->private;
2458 struct drm_i915_private *dev_priv = node->minor->dev->dev_private; 2500 struct drm_i915_private *dev_priv = to_i915(node->minor->dev);
2459 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 2501 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
2460 u32 tmp, i; 2502 u32 tmp, i;
2461 2503
@@ -2510,15 +2552,16 @@ static void i915_guc_client_info(struct seq_file *m,
2510 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n", 2552 seq_printf(m, "\tWQ size %d, offset: 0x%x, tail %d\n",
2511 client->wq_size, client->wq_offset, client->wq_tail); 2553 client->wq_size, client->wq_offset, client->wq_tail);
2512 2554
2555 seq_printf(m, "\tWork queue full: %u\n", client->no_wq_space);
2513 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail); 2556 seq_printf(m, "\tFailed to queue: %u\n", client->q_fail);
2514 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail); 2557 seq_printf(m, "\tFailed doorbell: %u\n", client->b_fail);
2515 seq_printf(m, "\tLast submission result: %d\n", client->retcode); 2558 seq_printf(m, "\tLast submission result: %d\n", client->retcode);
2516 2559
2517 for_each_engine(engine, dev_priv) { 2560 for_each_engine(engine, dev_priv) {
2518 seq_printf(m, "\tSubmissions: %llu %s\n", 2561 seq_printf(m, "\tSubmissions: %llu %s\n",
2519 client->submissions[engine->guc_id], 2562 client->submissions[engine->id],
2520 engine->name); 2563 engine->name);
2521 tot += client->submissions[engine->guc_id]; 2564 tot += client->submissions[engine->id];
2522 } 2565 }
2523 seq_printf(m, "\tTotal: %llu\n", tot); 2566 seq_printf(m, "\tTotal: %llu\n", tot);
2524} 2567}
@@ -2527,7 +2570,7 @@ static int i915_guc_info(struct seq_file *m, void *data)
2527{ 2570{
2528 struct drm_info_node *node = m->private; 2571 struct drm_info_node *node = m->private;
2529 struct drm_device *dev = node->minor->dev; 2572 struct drm_device *dev = node->minor->dev;
2530 struct drm_i915_private *dev_priv = dev->dev_private; 2573 struct drm_i915_private *dev_priv = to_i915(dev);
2531 struct intel_guc guc; 2574 struct intel_guc guc;
2532 struct i915_guc_client client = {}; 2575 struct i915_guc_client client = {};
2533 struct intel_engine_cs *engine; 2576 struct intel_engine_cs *engine;
@@ -2546,6 +2589,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
2546 2589
2547 mutex_unlock(&dev->struct_mutex); 2590 mutex_unlock(&dev->struct_mutex);
2548 2591
2592 seq_printf(m, "Doorbell map:\n");
2593 seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
2594 seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
2595
2549 seq_printf(m, "GuC total action count: %llu\n", guc.action_count); 2596 seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
2550 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail); 2597 seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
2551 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd); 2598 seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
@@ -2555,9 +2602,9 @@ static int i915_guc_info(struct seq_file *m, void *data)
2555 seq_printf(m, "\nGuC submissions:\n"); 2602 seq_printf(m, "\nGuC submissions:\n");
2556 for_each_engine(engine, dev_priv) { 2603 for_each_engine(engine, dev_priv) {
2557 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n", 2604 seq_printf(m, "\t%-24s: %10llu, last seqno 0x%08x\n",
2558 engine->name, guc.submissions[engine->guc_id], 2605 engine->name, guc.submissions[engine->id],
2559 guc.last_seqno[engine->guc_id]); 2606 guc.last_seqno[engine->id]);
2560 total += guc.submissions[engine->guc_id]; 2607 total += guc.submissions[engine->id];
2561 } 2608 }
2562 seq_printf(m, "\t%s: %llu\n", "Total", total); 2609 seq_printf(m, "\t%s: %llu\n", "Total", total);
2563 2610
@@ -2573,7 +2620,7 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
2573{ 2620{
2574 struct drm_info_node *node = m->private; 2621 struct drm_info_node *node = m->private;
2575 struct drm_device *dev = node->minor->dev; 2622 struct drm_device *dev = node->minor->dev;
2576 struct drm_i915_private *dev_priv = dev->dev_private; 2623 struct drm_i915_private *dev_priv = to_i915(dev);
2577 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj; 2624 struct drm_i915_gem_object *log_obj = dev_priv->guc.log_obj;
2578 u32 *log; 2625 u32 *log;
2579 int i = 0, pg; 2626 int i = 0, pg;
@@ -2601,7 +2648,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data)
2601{ 2648{
2602 struct drm_info_node *node = m->private; 2649 struct drm_info_node *node = m->private;
2603 struct drm_device *dev = node->minor->dev; 2650 struct drm_device *dev = node->minor->dev;
2604 struct drm_i915_private *dev_priv = dev->dev_private; 2651 struct drm_i915_private *dev_priv = to_i915(dev);
2605 u32 psrperf = 0; 2652 u32 psrperf = 0;
2606 u32 stat[3]; 2653 u32 stat[3];
2607 enum pipe pipe; 2654 enum pipe pipe;
@@ -2669,7 +2716,6 @@ static int i915_sink_crc(struct seq_file *m, void *data)
2669{ 2716{
2670 struct drm_info_node *node = m->private; 2717 struct drm_info_node *node = m->private;
2671 struct drm_device *dev = node->minor->dev; 2718 struct drm_device *dev = node->minor->dev;
2672 struct intel_encoder *encoder;
2673 struct intel_connector *connector; 2719 struct intel_connector *connector;
2674 struct intel_dp *intel_dp = NULL; 2720 struct intel_dp *intel_dp = NULL;
2675 int ret; 2721 int ret;
@@ -2677,18 +2723,19 @@ static int i915_sink_crc(struct seq_file *m, void *data)
2677 2723
2678 drm_modeset_lock_all(dev); 2724 drm_modeset_lock_all(dev);
2679 for_each_intel_connector(dev, connector) { 2725 for_each_intel_connector(dev, connector) {
2726 struct drm_crtc *crtc;
2680 2727
2681 if (connector->base.dpms != DRM_MODE_DPMS_ON) 2728 if (!connector->base.state->best_encoder)
2682 continue; 2729 continue;
2683 2730
2684 if (!connector->base.encoder) 2731 crtc = connector->base.state->crtc;
2732 if (!crtc->state->active)
2685 continue; 2733 continue;
2686 2734
2687 encoder = to_intel_encoder(connector->base.encoder); 2735 if (connector->base.connector_type != DRM_MODE_CONNECTOR_eDP)
2688 if (encoder->type != INTEL_OUTPUT_EDP)
2689 continue; 2736 continue;
2690 2737
2691 intel_dp = enc_to_intel_dp(&encoder->base); 2738 intel_dp = enc_to_intel_dp(connector->base.state->best_encoder);
2692 2739
2693 ret = intel_dp_sink_crc(intel_dp, crc); 2740 ret = intel_dp_sink_crc(intel_dp, crc);
2694 if (ret) 2741 if (ret)
@@ -2709,7 +2756,7 @@ static int i915_energy_uJ(struct seq_file *m, void *data)
2709{ 2756{
2710 struct drm_info_node *node = m->private; 2757 struct drm_info_node *node = m->private;
2711 struct drm_device *dev = node->minor->dev; 2758 struct drm_device *dev = node->minor->dev;
2712 struct drm_i915_private *dev_priv = dev->dev_private; 2759 struct drm_i915_private *dev_priv = to_i915(dev);
2713 u64 power; 2760 u64 power;
2714 u32 units; 2761 u32 units;
2715 2762
@@ -2735,12 +2782,12 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2735{ 2782{
2736 struct drm_info_node *node = m->private; 2783 struct drm_info_node *node = m->private;
2737 struct drm_device *dev = node->minor->dev; 2784 struct drm_device *dev = node->minor->dev;
2738 struct drm_i915_private *dev_priv = dev->dev_private; 2785 struct drm_i915_private *dev_priv = to_i915(dev);
2739 2786
2740 if (!HAS_RUNTIME_PM(dev_priv)) 2787 if (!HAS_RUNTIME_PM(dev_priv))
2741 seq_puts(m, "Runtime power management not supported\n"); 2788 seq_puts(m, "Runtime power management not supported\n");
2742 2789
2743 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->mm.busy)); 2790 seq_printf(m, "GPU idle: %s\n", yesno(!dev_priv->gt.awake));
2744 seq_printf(m, "IRQs disabled: %s\n", 2791 seq_printf(m, "IRQs disabled: %s\n",
2745 yesno(!intel_irqs_enabled(dev_priv))); 2792 yesno(!intel_irqs_enabled(dev_priv)));
2746#ifdef CONFIG_PM 2793#ifdef CONFIG_PM
@@ -2750,8 +2797,8 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
2750 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n"); 2797 seq_printf(m, "Device Power Management (CONFIG_PM) disabled\n");
2751#endif 2798#endif
2752 seq_printf(m, "PCI device power state: %s [%d]\n", 2799 seq_printf(m, "PCI device power state: %s [%d]\n",
2753 pci_power_name(dev_priv->dev->pdev->current_state), 2800 pci_power_name(dev_priv->drm.pdev->current_state),
2754 dev_priv->dev->pdev->current_state); 2801 dev_priv->drm.pdev->current_state);
2755 2802
2756 return 0; 2803 return 0;
2757} 2804}
@@ -2760,7 +2807,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused)
2760{ 2807{
2761 struct drm_info_node *node = m->private; 2808 struct drm_info_node *node = m->private;
2762 struct drm_device *dev = node->minor->dev; 2809 struct drm_device *dev = node->minor->dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private; 2810 struct drm_i915_private *dev_priv = to_i915(dev);
2764 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2811 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2765 int i; 2812 int i;
2766 2813
@@ -2795,7 +2842,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused)
2795{ 2842{
2796 struct drm_info_node *node = m->private; 2843 struct drm_info_node *node = m->private;
2797 struct drm_device *dev = node->minor->dev; 2844 struct drm_device *dev = node->minor->dev;
2798 struct drm_i915_private *dev_priv = dev->dev_private; 2845 struct drm_i915_private *dev_priv = to_i915(dev);
2799 struct intel_csr *csr; 2846 struct intel_csr *csr;
2800 2847
2801 if (!HAS_CSR(dev)) { 2848 if (!HAS_CSR(dev)) {
@@ -2918,7 +2965,7 @@ static void intel_dp_info(struct seq_file *m,
2918 2965
2919 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]); 2966 seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
2920 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio)); 2967 seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
2921 if (intel_encoder->type == INTEL_OUTPUT_EDP) 2968 if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
2922 intel_panel_info(m, &intel_connector->panel); 2969 intel_panel_info(m, &intel_connector->panel);
2923} 2970}
2924 2971
@@ -2957,14 +3004,26 @@ static void intel_connector_info(struct seq_file *m,
2957 seq_printf(m, "\tCEA rev: %d\n", 3004 seq_printf(m, "\tCEA rev: %d\n",
2958 connector->display_info.cea_rev); 3005 connector->display_info.cea_rev);
2959 } 3006 }
2960 if (intel_encoder) { 3007
2961 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT || 3008 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
2962 intel_encoder->type == INTEL_OUTPUT_EDP) 3009 return;
2963 intel_dp_info(m, intel_connector); 3010
2964 else if (intel_encoder->type == INTEL_OUTPUT_HDMI) 3011 switch (connector->connector_type) {
2965 intel_hdmi_info(m, intel_connector); 3012 case DRM_MODE_CONNECTOR_DisplayPort:
2966 else if (intel_encoder->type == INTEL_OUTPUT_LVDS) 3013 case DRM_MODE_CONNECTOR_eDP:
3014 intel_dp_info(m, intel_connector);
3015 break;
3016 case DRM_MODE_CONNECTOR_LVDS:
3017 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
2967 intel_lvds_info(m, intel_connector); 3018 intel_lvds_info(m, intel_connector);
3019 break;
3020 case DRM_MODE_CONNECTOR_HDMIA:
3021 if (intel_encoder->type == INTEL_OUTPUT_HDMI ||
3022 intel_encoder->type == INTEL_OUTPUT_UNKNOWN)
3023 intel_hdmi_info(m, intel_connector);
3024 break;
3025 default:
3026 break;
2968 } 3027 }
2969 3028
2970 seq_printf(m, "\tmodes:\n"); 3029 seq_printf(m, "\tmodes:\n");
@@ -2974,7 +3033,7 @@ static void intel_connector_info(struct seq_file *m,
2974 3033
2975static bool cursor_active(struct drm_device *dev, int pipe) 3034static bool cursor_active(struct drm_device *dev, int pipe)
2976{ 3035{
2977 struct drm_i915_private *dev_priv = dev->dev_private; 3036 struct drm_i915_private *dev_priv = to_i915(dev);
2978 u32 state; 3037 u32 state;
2979 3038
2980 if (IS_845G(dev) || IS_I865G(dev)) 3039 if (IS_845G(dev) || IS_I865G(dev))
@@ -2987,7 +3046,7 @@ static bool cursor_active(struct drm_device *dev, int pipe)
2987 3046
2988static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y) 3047static bool cursor_position(struct drm_device *dev, int pipe, int *x, int *y)
2989{ 3048{
2990 struct drm_i915_private *dev_priv = dev->dev_private; 3049 struct drm_i915_private *dev_priv = to_i915(dev);
2991 u32 pos; 3050 u32 pos;
2992 3051
2993 pos = I915_READ(CURPOS(pipe)); 3052 pos = I915_READ(CURPOS(pipe));
@@ -3108,7 +3167,7 @@ static int i915_display_info(struct seq_file *m, void *unused)
3108{ 3167{
3109 struct drm_info_node *node = m->private; 3168 struct drm_info_node *node = m->private;
3110 struct drm_device *dev = node->minor->dev; 3169 struct drm_device *dev = node->minor->dev;
3111 struct drm_i915_private *dev_priv = dev->dev_private; 3170 struct drm_i915_private *dev_priv = to_i915(dev);
3112 struct intel_crtc *crtc; 3171 struct intel_crtc *crtc;
3113 struct drm_connector *connector; 3172 struct drm_connector *connector;
3114 3173
@@ -3163,13 +3222,13 @@ static int i915_semaphore_status(struct seq_file *m, void *unused)
3163{ 3222{
3164 struct drm_info_node *node = (struct drm_info_node *) m->private; 3223 struct drm_info_node *node = (struct drm_info_node *) m->private;
3165 struct drm_device *dev = node->minor->dev; 3224 struct drm_device *dev = node->minor->dev;
3166 struct drm_i915_private *dev_priv = dev->dev_private; 3225 struct drm_i915_private *dev_priv = to_i915(dev);
3167 struct intel_engine_cs *engine; 3226 struct intel_engine_cs *engine;
3168 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 3227 int num_rings = hweight32(INTEL_INFO(dev)->ring_mask);
3169 enum intel_engine_id id; 3228 enum intel_engine_id id;
3170 int j, ret; 3229 int j, ret;
3171 3230
3172 if (!i915_semaphore_is_enabled(dev)) { 3231 if (!i915_semaphore_is_enabled(dev_priv)) {
3173 seq_puts(m, "Semaphores are disabled\n"); 3232 seq_puts(m, "Semaphores are disabled\n");
3174 return 0; 3233 return 0;
3175 } 3234 }
@@ -3236,7 +3295,7 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
3236{ 3295{
3237 struct drm_info_node *node = (struct drm_info_node *) m->private; 3296 struct drm_info_node *node = (struct drm_info_node *) m->private;
3238 struct drm_device *dev = node->minor->dev; 3297 struct drm_device *dev = node->minor->dev;
3239 struct drm_i915_private *dev_priv = dev->dev_private; 3298 struct drm_i915_private *dev_priv = to_i915(dev);
3240 int i; 3299 int i;
3241 3300
3242 drm_modeset_lock_all(dev); 3301 drm_modeset_lock_all(dev);
@@ -3266,7 +3325,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
3266 struct intel_engine_cs *engine; 3325 struct intel_engine_cs *engine;
3267 struct drm_info_node *node = (struct drm_info_node *) m->private; 3326 struct drm_info_node *node = (struct drm_info_node *) m->private;
3268 struct drm_device *dev = node->minor->dev; 3327 struct drm_device *dev = node->minor->dev;
3269 struct drm_i915_private *dev_priv = dev->dev_private; 3328 struct drm_i915_private *dev_priv = to_i915(dev);
3270 struct i915_workarounds *workarounds = &dev_priv->workarounds; 3329 struct i915_workarounds *workarounds = &dev_priv->workarounds;
3271 enum intel_engine_id id; 3330 enum intel_engine_id id;
3272 3331
@@ -3304,7 +3363,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
3304{ 3363{
3305 struct drm_info_node *node = m->private; 3364 struct drm_info_node *node = m->private;
3306 struct drm_device *dev = node->minor->dev; 3365 struct drm_device *dev = node->minor->dev;
3307 struct drm_i915_private *dev_priv = dev->dev_private; 3366 struct drm_i915_private *dev_priv = to_i915(dev);
3308 struct skl_ddb_allocation *ddb; 3367 struct skl_ddb_allocation *ddb;
3309 struct skl_ddb_entry *entry; 3368 struct skl_ddb_entry *entry;
3310 enum pipe pipe; 3369 enum pipe pipe;
@@ -3342,31 +3401,16 @@ static int i915_ddb_info(struct seq_file *m, void *unused)
3342static void drrs_status_per_crtc(struct seq_file *m, 3401static void drrs_status_per_crtc(struct seq_file *m,
3343 struct drm_device *dev, struct intel_crtc *intel_crtc) 3402 struct drm_device *dev, struct intel_crtc *intel_crtc)
3344{ 3403{
3345 struct intel_encoder *intel_encoder; 3404 struct drm_i915_private *dev_priv = to_i915(dev);
3346 struct drm_i915_private *dev_priv = dev->dev_private;
3347 struct i915_drrs *drrs = &dev_priv->drrs; 3405 struct i915_drrs *drrs = &dev_priv->drrs;
3348 int vrefresh = 0; 3406 int vrefresh = 0;
3407 struct drm_connector *connector;
3349 3408
3350 for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { 3409 drm_for_each_connector(connector, dev) {
3351 /* Encoder connected on this CRTC */ 3410 if (connector->state->crtc != &intel_crtc->base)
3352 switch (intel_encoder->type) { 3411 continue;
3353 case INTEL_OUTPUT_EDP: 3412
3354 seq_puts(m, "eDP:\n"); 3413 seq_printf(m, "%s:\n", connector->name);
3355 break;
3356 case INTEL_OUTPUT_DSI:
3357 seq_puts(m, "DSI:\n");
3358 break;
3359 case INTEL_OUTPUT_HDMI:
3360 seq_puts(m, "HDMI:\n");
3361 break;
3362 case INTEL_OUTPUT_DISPLAYPORT:
3363 seq_puts(m, "DP:\n");
3364 break;
3365 default:
3366 seq_printf(m, "Other encoder (id=%d).\n",
3367 intel_encoder->type);
3368 return;
3369 }
3370 } 3414 }
3371 3415
3372 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) 3416 if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
@@ -3429,18 +3473,16 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
3429 struct intel_crtc *intel_crtc; 3473 struct intel_crtc *intel_crtc;
3430 int active_crtc_cnt = 0; 3474 int active_crtc_cnt = 0;
3431 3475
3476 drm_modeset_lock_all(dev);
3432 for_each_intel_crtc(dev, intel_crtc) { 3477 for_each_intel_crtc(dev, intel_crtc) {
3433 drm_modeset_lock(&intel_crtc->base.mutex, NULL);
3434
3435 if (intel_crtc->base.state->active) { 3478 if (intel_crtc->base.state->active) {
3436 active_crtc_cnt++; 3479 active_crtc_cnt++;
3437 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); 3480 seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
3438 3481
3439 drrs_status_per_crtc(m, dev, intel_crtc); 3482 drrs_status_per_crtc(m, dev, intel_crtc);
3440 } 3483 }
3441
3442 drm_modeset_unlock(&intel_crtc->base.mutex);
3443 } 3484 }
3485 drm_modeset_unlock_all(dev);
3444 3486
3445 if (!active_crtc_cnt) 3487 if (!active_crtc_cnt)
3446 seq_puts(m, "No active crtc found\n"); 3488 seq_puts(m, "No active crtc found\n");
@@ -3458,17 +3500,23 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
3458{ 3500{
3459 struct drm_info_node *node = (struct drm_info_node *) m->private; 3501 struct drm_info_node *node = (struct drm_info_node *) m->private;
3460 struct drm_device *dev = node->minor->dev; 3502 struct drm_device *dev = node->minor->dev;
3461 struct drm_encoder *encoder;
3462 struct intel_encoder *intel_encoder; 3503 struct intel_encoder *intel_encoder;
3463 struct intel_digital_port *intel_dig_port; 3504 struct intel_digital_port *intel_dig_port;
3505 struct drm_connector *connector;
3506
3464 drm_modeset_lock_all(dev); 3507 drm_modeset_lock_all(dev);
3465 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3508 drm_for_each_connector(connector, dev) {
3466 intel_encoder = to_intel_encoder(encoder); 3509 if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
3467 if (intel_encoder->type != INTEL_OUTPUT_DISPLAYPORT)
3468 continue; 3510 continue;
3469 intel_dig_port = enc_to_dig_port(encoder); 3511
3512 intel_encoder = intel_attached_encoder(connector);
3513 if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
3514 continue;
3515
3516 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
3470 if (!intel_dig_port->dp.can_mst) 3517 if (!intel_dig_port->dp.can_mst)
3471 continue; 3518 continue;
3519
3472 seq_printf(m, "MST Source Port %c\n", 3520 seq_printf(m, "MST Source Port %c\n",
3473 port_name(intel_dig_port->port)); 3521 port_name(intel_dig_port->port));
3474 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); 3522 drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
@@ -3480,7 +3528,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
3480static int i915_pipe_crc_open(struct inode *inode, struct file *filep) 3528static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3481{ 3529{
3482 struct pipe_crc_info *info = inode->i_private; 3530 struct pipe_crc_info *info = inode->i_private;
3483 struct drm_i915_private *dev_priv = info->dev->dev_private; 3531 struct drm_i915_private *dev_priv = to_i915(info->dev);
3484 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3532 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3485 3533
3486 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes) 3534 if (info->pipe >= INTEL_INFO(info->dev)->num_pipes)
@@ -3504,7 +3552,7 @@ static int i915_pipe_crc_open(struct inode *inode, struct file *filep)
3504static int i915_pipe_crc_release(struct inode *inode, struct file *filep) 3552static int i915_pipe_crc_release(struct inode *inode, struct file *filep)
3505{ 3553{
3506 struct pipe_crc_info *info = inode->i_private; 3554 struct pipe_crc_info *info = inode->i_private;
3507 struct drm_i915_private *dev_priv = info->dev->dev_private; 3555 struct drm_i915_private *dev_priv = to_i915(info->dev);
3508 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3556 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3509 3557
3510 spin_lock_irq(&pipe_crc->lock); 3558 spin_lock_irq(&pipe_crc->lock);
@@ -3532,7 +3580,7 @@ i915_pipe_crc_read(struct file *filep, char __user *user_buf, size_t count,
3532{ 3580{
3533 struct pipe_crc_info *info = filep->private_data; 3581 struct pipe_crc_info *info = filep->private_data;
3534 struct drm_device *dev = info->dev; 3582 struct drm_device *dev = info->dev;
3535 struct drm_i915_private *dev_priv = dev->dev_private; 3583 struct drm_i915_private *dev_priv = to_i915(dev);
3536 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe]; 3584 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[info->pipe];
3537 char buf[PIPE_CRC_BUFFER_LEN]; 3585 char buf[PIPE_CRC_BUFFER_LEN];
3538 int n_entries; 3586 int n_entries;
@@ -3665,7 +3713,7 @@ static const char *pipe_crc_source_name(enum intel_pipe_crc_source source)
3665static int display_crc_ctl_show(struct seq_file *m, void *data) 3713static int display_crc_ctl_show(struct seq_file *m, void *data)
3666{ 3714{
3667 struct drm_device *dev = m->private; 3715 struct drm_device *dev = m->private;
3668 struct drm_i915_private *dev_priv = dev->dev_private; 3716 struct drm_i915_private *dev_priv = to_i915(dev);
3669 int i; 3717 int i;
3670 3718
3671 for (i = 0; i < I915_MAX_PIPES; i++) 3719 for (i = 0; i < I915_MAX_PIPES; i++)
@@ -3726,7 +3774,7 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
3726 case INTEL_OUTPUT_TVOUT: 3774 case INTEL_OUTPUT_TVOUT:
3727 *source = INTEL_PIPE_CRC_SOURCE_TV; 3775 *source = INTEL_PIPE_CRC_SOURCE_TV;
3728 break; 3776 break;
3729 case INTEL_OUTPUT_DISPLAYPORT: 3777 case INTEL_OUTPUT_DP:
3730 case INTEL_OUTPUT_EDP: 3778 case INTEL_OUTPUT_EDP:
3731 dig_port = enc_to_dig_port(&encoder->base); 3779 dig_port = enc_to_dig_port(&encoder->base);
3732 switch (dig_port->port) { 3780 switch (dig_port->port) {
@@ -3759,7 +3807,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_device *dev,
3759 enum intel_pipe_crc_source *source, 3807 enum intel_pipe_crc_source *source,
3760 uint32_t *val) 3808 uint32_t *val)
3761{ 3809{
3762 struct drm_i915_private *dev_priv = dev->dev_private; 3810 struct drm_i915_private *dev_priv = to_i915(dev);
3763 bool need_stable_symbols = false; 3811 bool need_stable_symbols = false;
3764 3812
3765 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3813 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3830,7 +3878,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3830 enum intel_pipe_crc_source *source, 3878 enum intel_pipe_crc_source *source,
3831 uint32_t *val) 3879 uint32_t *val)
3832{ 3880{
3833 struct drm_i915_private *dev_priv = dev->dev_private; 3881 struct drm_i915_private *dev_priv = to_i915(dev);
3834 bool need_stable_symbols = false; 3882 bool need_stable_symbols = false;
3835 3883
3836 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) { 3884 if (*source == INTEL_PIPE_CRC_SOURCE_AUTO) {
@@ -3904,7 +3952,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_device *dev,
3904static void vlv_undo_pipe_scramble_reset(struct drm_device *dev, 3952static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3905 enum pipe pipe) 3953 enum pipe pipe)
3906{ 3954{
3907 struct drm_i915_private *dev_priv = dev->dev_private; 3955 struct drm_i915_private *dev_priv = to_i915(dev);
3908 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3956 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3909 3957
3910 switch (pipe) { 3958 switch (pipe) {
@@ -3929,7 +3977,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_device *dev,
3929static void g4x_undo_pipe_scramble_reset(struct drm_device *dev, 3977static void g4x_undo_pipe_scramble_reset(struct drm_device *dev,
3930 enum pipe pipe) 3978 enum pipe pipe)
3931{ 3979{
3932 struct drm_i915_private *dev_priv = dev->dev_private; 3980 struct drm_i915_private *dev_priv = to_i915(dev);
3933 uint32_t tmp = I915_READ(PORT_DFT2_G4X); 3981 uint32_t tmp = I915_READ(PORT_DFT2_G4X);
3934 3982
3935 if (pipe == PIPE_A) 3983 if (pipe == PIPE_A)
@@ -3972,7 +4020,7 @@ static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
3972 4020
3973static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable) 4021static void hsw_trans_edp_pipe_A_crc_wa(struct drm_device *dev, bool enable)
3974{ 4022{
3975 struct drm_i915_private *dev_priv = dev->dev_private; 4023 struct drm_i915_private *dev_priv = to_i915(dev);
3976 struct intel_crtc *crtc = 4024 struct intel_crtc *crtc =
3977 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]); 4025 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_A]);
3978 struct intel_crtc_state *pipe_config; 4026 struct intel_crtc_state *pipe_config;
@@ -4040,7 +4088,7 @@ static int ivb_pipe_crc_ctl_reg(struct drm_device *dev,
4040static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, 4088static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
4041 enum intel_pipe_crc_source source) 4089 enum intel_pipe_crc_source source)
4042{ 4090{
4043 struct drm_i915_private *dev_priv = dev->dev_private; 4091 struct drm_i915_private *dev_priv = to_i915(dev);
4044 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 4092 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
4045 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, 4093 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
4046 pipe)); 4094 pipe));
@@ -4547,7 +4595,7 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
4547static int pri_wm_latency_show(struct seq_file *m, void *data) 4595static int pri_wm_latency_show(struct seq_file *m, void *data)
4548{ 4596{
4549 struct drm_device *dev = m->private; 4597 struct drm_device *dev = m->private;
4550 struct drm_i915_private *dev_priv = dev->dev_private; 4598 struct drm_i915_private *dev_priv = to_i915(dev);
4551 const uint16_t *latencies; 4599 const uint16_t *latencies;
4552 4600
4553 if (INTEL_INFO(dev)->gen >= 9) 4601 if (INTEL_INFO(dev)->gen >= 9)
@@ -4563,7 +4611,7 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
4563static int spr_wm_latency_show(struct seq_file *m, void *data) 4611static int spr_wm_latency_show(struct seq_file *m, void *data)
4564{ 4612{
4565 struct drm_device *dev = m->private; 4613 struct drm_device *dev = m->private;
4566 struct drm_i915_private *dev_priv = dev->dev_private; 4614 struct drm_i915_private *dev_priv = to_i915(dev);
4567 const uint16_t *latencies; 4615 const uint16_t *latencies;
4568 4616
4569 if (INTEL_INFO(dev)->gen >= 9) 4617 if (INTEL_INFO(dev)->gen >= 9)
@@ -4579,7 +4627,7 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
4579static int cur_wm_latency_show(struct seq_file *m, void *data) 4627static int cur_wm_latency_show(struct seq_file *m, void *data)
4580{ 4628{
4581 struct drm_device *dev = m->private; 4629 struct drm_device *dev = m->private;
4582 struct drm_i915_private *dev_priv = dev->dev_private; 4630 struct drm_i915_private *dev_priv = to_i915(dev);
4583 const uint16_t *latencies; 4631 const uint16_t *latencies;
4584 4632
4585 if (INTEL_INFO(dev)->gen >= 9) 4633 if (INTEL_INFO(dev)->gen >= 9)
@@ -4670,7 +4718,7 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
4670{ 4718{
4671 struct seq_file *m = file->private_data; 4719 struct seq_file *m = file->private_data;
4672 struct drm_device *dev = m->private; 4720 struct drm_device *dev = m->private;
4673 struct drm_i915_private *dev_priv = dev->dev_private; 4721 struct drm_i915_private *dev_priv = to_i915(dev);
4674 uint16_t *latencies; 4722 uint16_t *latencies;
4675 4723
4676 if (INTEL_INFO(dev)->gen >= 9) 4724 if (INTEL_INFO(dev)->gen >= 9)
@@ -4686,7 +4734,7 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
4686{ 4734{
4687 struct seq_file *m = file->private_data; 4735 struct seq_file *m = file->private_data;
4688 struct drm_device *dev = m->private; 4736 struct drm_device *dev = m->private;
4689 struct drm_i915_private *dev_priv = dev->dev_private; 4737 struct drm_i915_private *dev_priv = to_i915(dev);
4690 uint16_t *latencies; 4738 uint16_t *latencies;
4691 4739
4692 if (INTEL_INFO(dev)->gen >= 9) 4740 if (INTEL_INFO(dev)->gen >= 9)
@@ -4702,7 +4750,7 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
4702{ 4750{
4703 struct seq_file *m = file->private_data; 4751 struct seq_file *m = file->private_data;
4704 struct drm_device *dev = m->private; 4752 struct drm_device *dev = m->private;
4705 struct drm_i915_private *dev_priv = dev->dev_private; 4753 struct drm_i915_private *dev_priv = to_i915(dev);
4706 uint16_t *latencies; 4754 uint16_t *latencies;
4707 4755
4708 if (INTEL_INFO(dev)->gen >= 9) 4756 if (INTEL_INFO(dev)->gen >= 9)
@@ -4744,7 +4792,7 @@ static int
4744i915_wedged_get(void *data, u64 *val) 4792i915_wedged_get(void *data, u64 *val)
4745{ 4793{
4746 struct drm_device *dev = data; 4794 struct drm_device *dev = data;
4747 struct drm_i915_private *dev_priv = dev->dev_private; 4795 struct drm_i915_private *dev_priv = to_i915(dev);
4748 4796
4749 *val = i915_terminally_wedged(&dev_priv->gpu_error); 4797 *val = i915_terminally_wedged(&dev_priv->gpu_error);
4750 4798
@@ -4755,7 +4803,7 @@ static int
4755i915_wedged_set(void *data, u64 val) 4803i915_wedged_set(void *data, u64 val)
4756{ 4804{
4757 struct drm_device *dev = data; 4805 struct drm_device *dev = data;
4758 struct drm_i915_private *dev_priv = dev->dev_private; 4806 struct drm_i915_private *dev_priv = to_i915(dev);
4759 4807
4760 /* 4808 /*
4761 * There is no safeguard against this debugfs entry colliding 4809 * There is no safeguard against this debugfs entry colliding
@@ -4770,7 +4818,7 @@ i915_wedged_set(void *data, u64 val)
4770 4818
4771 intel_runtime_pm_get(dev_priv); 4819 intel_runtime_pm_get(dev_priv);
4772 4820
4773 i915_handle_error(dev, val, 4821 i915_handle_error(dev_priv, val,
4774 "Manually setting wedged to %llu", val); 4822 "Manually setting wedged to %llu", val);
4775 4823
4776 intel_runtime_pm_put(dev_priv); 4824 intel_runtime_pm_put(dev_priv);
@@ -4783,44 +4831,10 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
4783 "%llu\n"); 4831 "%llu\n");
4784 4832
4785static int 4833static int
4786i915_ring_stop_get(void *data, u64 *val)
4787{
4788 struct drm_device *dev = data;
4789 struct drm_i915_private *dev_priv = dev->dev_private;
4790
4791 *val = dev_priv->gpu_error.stop_rings;
4792
4793 return 0;
4794}
4795
4796static int
4797i915_ring_stop_set(void *data, u64 val)
4798{
4799 struct drm_device *dev = data;
4800 struct drm_i915_private *dev_priv = dev->dev_private;
4801 int ret;
4802
4803 DRM_DEBUG_DRIVER("Stopping rings 0x%08llx\n", val);
4804
4805 ret = mutex_lock_interruptible(&dev->struct_mutex);
4806 if (ret)
4807 return ret;
4808
4809 dev_priv->gpu_error.stop_rings = val;
4810 mutex_unlock(&dev->struct_mutex);
4811
4812 return 0;
4813}
4814
4815DEFINE_SIMPLE_ATTRIBUTE(i915_ring_stop_fops,
4816 i915_ring_stop_get, i915_ring_stop_set,
4817 "0x%08llx\n");
4818
4819static int
4820i915_ring_missed_irq_get(void *data, u64 *val) 4834i915_ring_missed_irq_get(void *data, u64 *val)
4821{ 4835{
4822 struct drm_device *dev = data; 4836 struct drm_device *dev = data;
4823 struct drm_i915_private *dev_priv = dev->dev_private; 4837 struct drm_i915_private *dev_priv = to_i915(dev);
4824 4838
4825 *val = dev_priv->gpu_error.missed_irq_rings; 4839 *val = dev_priv->gpu_error.missed_irq_rings;
4826 return 0; 4840 return 0;
@@ -4830,7 +4844,7 @@ static int
4830i915_ring_missed_irq_set(void *data, u64 val) 4844i915_ring_missed_irq_set(void *data, u64 val)
4831{ 4845{
4832 struct drm_device *dev = data; 4846 struct drm_device *dev = data;
4833 struct drm_i915_private *dev_priv = dev->dev_private; 4847 struct drm_i915_private *dev_priv = to_i915(dev);
4834 int ret; 4848 int ret;
4835 4849
4836 /* Lock against concurrent debugfs callers */ 4850 /* Lock against concurrent debugfs callers */
@@ -4851,7 +4865,7 @@ static int
4851i915_ring_test_irq_get(void *data, u64 *val) 4865i915_ring_test_irq_get(void *data, u64 *val)
4852{ 4866{
4853 struct drm_device *dev = data; 4867 struct drm_device *dev = data;
4854 struct drm_i915_private *dev_priv = dev->dev_private; 4868 struct drm_i915_private *dev_priv = to_i915(dev);
4855 4869
4856 *val = dev_priv->gpu_error.test_irq_rings; 4870 *val = dev_priv->gpu_error.test_irq_rings;
4857 4871
@@ -4862,18 +4876,11 @@ static int
4862i915_ring_test_irq_set(void *data, u64 val) 4876i915_ring_test_irq_set(void *data, u64 val)
4863{ 4877{
4864 struct drm_device *dev = data; 4878 struct drm_device *dev = data;
4865 struct drm_i915_private *dev_priv = dev->dev_private; 4879 struct drm_i915_private *dev_priv = to_i915(dev);
4866 int ret;
4867 4880
4881 val &= INTEL_INFO(dev_priv)->ring_mask;
4868 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val); 4882 DRM_DEBUG_DRIVER("Masking interrupts on rings 0x%08llx\n", val);
4869
4870 /* Lock against concurrent debugfs callers */
4871 ret = mutex_lock_interruptible(&dev->struct_mutex);
4872 if (ret)
4873 return ret;
4874
4875 dev_priv->gpu_error.test_irq_rings = val; 4883 dev_priv->gpu_error.test_irq_rings = val;
4876 mutex_unlock(&dev->struct_mutex);
4877 4884
4878 return 0; 4885 return 0;
4879} 4886}
@@ -4902,7 +4909,7 @@ static int
4902i915_drop_caches_set(void *data, u64 val) 4909i915_drop_caches_set(void *data, u64 val)
4903{ 4910{
4904 struct drm_device *dev = data; 4911 struct drm_device *dev = data;
4905 struct drm_i915_private *dev_priv = dev->dev_private; 4912 struct drm_i915_private *dev_priv = to_i915(dev);
4906 int ret; 4913 int ret;
4907 4914
4908 DRM_DEBUG("Dropping caches: 0x%08llx\n", val); 4915 DRM_DEBUG("Dropping caches: 0x%08llx\n", val);
@@ -4914,13 +4921,13 @@ i915_drop_caches_set(void *data, u64 val)
4914 return ret; 4921 return ret;
4915 4922
4916 if (val & DROP_ACTIVE) { 4923 if (val & DROP_ACTIVE) {
4917 ret = i915_gpu_idle(dev); 4924 ret = i915_gem_wait_for_idle(dev_priv);
4918 if (ret) 4925 if (ret)
4919 goto unlock; 4926 goto unlock;
4920 } 4927 }
4921 4928
4922 if (val & (DROP_RETIRE | DROP_ACTIVE)) 4929 if (val & (DROP_RETIRE | DROP_ACTIVE))
4923 i915_gem_retire_requests(dev); 4930 i915_gem_retire_requests(dev_priv);
4924 4931
4925 if (val & DROP_BOUND) 4932 if (val & DROP_BOUND)
4926 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND); 4933 i915_gem_shrink(dev_priv, LONG_MAX, I915_SHRINK_BOUND);
@@ -4942,7 +4949,7 @@ static int
4942i915_max_freq_get(void *data, u64 *val) 4949i915_max_freq_get(void *data, u64 *val)
4943{ 4950{
4944 struct drm_device *dev = data; 4951 struct drm_device *dev = data;
4945 struct drm_i915_private *dev_priv = dev->dev_private; 4952 struct drm_i915_private *dev_priv = to_i915(dev);
4946 int ret; 4953 int ret;
4947 4954
4948 if (INTEL_INFO(dev)->gen < 6) 4955 if (INTEL_INFO(dev)->gen < 6)
@@ -4964,7 +4971,7 @@ static int
4964i915_max_freq_set(void *data, u64 val) 4971i915_max_freq_set(void *data, u64 val)
4965{ 4972{
4966 struct drm_device *dev = data; 4973 struct drm_device *dev = data;
4967 struct drm_i915_private *dev_priv = dev->dev_private; 4974 struct drm_i915_private *dev_priv = to_i915(dev);
4968 u32 hw_max, hw_min; 4975 u32 hw_max, hw_min;
4969 int ret; 4976 int ret;
4970 4977
@@ -4994,7 +5001,7 @@ i915_max_freq_set(void *data, u64 val)
4994 5001
4995 dev_priv->rps.max_freq_softlimit = val; 5002 dev_priv->rps.max_freq_softlimit = val;
4996 5003
4997 intel_set_rps(dev, val); 5004 intel_set_rps(dev_priv, val);
4998 5005
4999 mutex_unlock(&dev_priv->rps.hw_lock); 5006 mutex_unlock(&dev_priv->rps.hw_lock);
5000 5007
@@ -5009,7 +5016,7 @@ static int
5009i915_min_freq_get(void *data, u64 *val) 5016i915_min_freq_get(void *data, u64 *val)
5010{ 5017{
5011 struct drm_device *dev = data; 5018 struct drm_device *dev = data;
5012 struct drm_i915_private *dev_priv = dev->dev_private; 5019 struct drm_i915_private *dev_priv = to_i915(dev);
5013 int ret; 5020 int ret;
5014 5021
5015 if (INTEL_INFO(dev)->gen < 6) 5022 if (INTEL_INFO(dev)->gen < 6)
@@ -5031,7 +5038,7 @@ static int
5031i915_min_freq_set(void *data, u64 val) 5038i915_min_freq_set(void *data, u64 val)
5032{ 5039{
5033 struct drm_device *dev = data; 5040 struct drm_device *dev = data;
5034 struct drm_i915_private *dev_priv = dev->dev_private; 5041 struct drm_i915_private *dev_priv = to_i915(dev);
5035 u32 hw_max, hw_min; 5042 u32 hw_max, hw_min;
5036 int ret; 5043 int ret;
5037 5044
@@ -5061,7 +5068,7 @@ i915_min_freq_set(void *data, u64 val)
5061 5068
5062 dev_priv->rps.min_freq_softlimit = val; 5069 dev_priv->rps.min_freq_softlimit = val;
5063 5070
5064 intel_set_rps(dev, val); 5071 intel_set_rps(dev_priv, val);
5065 5072
5066 mutex_unlock(&dev_priv->rps.hw_lock); 5073 mutex_unlock(&dev_priv->rps.hw_lock);
5067 5074
@@ -5076,7 +5083,7 @@ static int
5076i915_cache_sharing_get(void *data, u64 *val) 5083i915_cache_sharing_get(void *data, u64 *val)
5077{ 5084{
5078 struct drm_device *dev = data; 5085 struct drm_device *dev = data;
5079 struct drm_i915_private *dev_priv = dev->dev_private; 5086 struct drm_i915_private *dev_priv = to_i915(dev);
5080 u32 snpcr; 5087 u32 snpcr;
5081 int ret; 5088 int ret;
5082 5089
@@ -5091,7 +5098,7 @@ i915_cache_sharing_get(void *data, u64 *val)
5091 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 5098 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5092 5099
5093 intel_runtime_pm_put(dev_priv); 5100 intel_runtime_pm_put(dev_priv);
5094 mutex_unlock(&dev_priv->dev->struct_mutex); 5101 mutex_unlock(&dev_priv->drm.struct_mutex);
5095 5102
5096 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT; 5103 *val = (snpcr & GEN6_MBC_SNPCR_MASK) >> GEN6_MBC_SNPCR_SHIFT;
5097 5104
@@ -5102,7 +5109,7 @@ static int
5102i915_cache_sharing_set(void *data, u64 val) 5109i915_cache_sharing_set(void *data, u64 val)
5103{ 5110{
5104 struct drm_device *dev = data; 5111 struct drm_device *dev = data;
5105 struct drm_i915_private *dev_priv = dev->dev_private; 5112 struct drm_i915_private *dev_priv = to_i915(dev);
5106 u32 snpcr; 5113 u32 snpcr;
5107 5114
5108 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 5115 if (!(IS_GEN6(dev) || IS_GEN7(dev)))
@@ -5139,7 +5146,7 @@ struct sseu_dev_status {
5139static void cherryview_sseu_device_status(struct drm_device *dev, 5146static void cherryview_sseu_device_status(struct drm_device *dev,
5140 struct sseu_dev_status *stat) 5147 struct sseu_dev_status *stat)
5141{ 5148{
5142 struct drm_i915_private *dev_priv = dev->dev_private; 5149 struct drm_i915_private *dev_priv = to_i915(dev);
5143 int ss_max = 2; 5150 int ss_max = 2;
5144 int ss; 5151 int ss;
5145 u32 sig1[ss_max], sig2[ss_max]; 5152 u32 sig1[ss_max], sig2[ss_max];
@@ -5171,7 +5178,7 @@ static void cherryview_sseu_device_status(struct drm_device *dev,
5171static void gen9_sseu_device_status(struct drm_device *dev, 5178static void gen9_sseu_device_status(struct drm_device *dev,
5172 struct sseu_dev_status *stat) 5179 struct sseu_dev_status *stat)
5173{ 5180{
5174 struct drm_i915_private *dev_priv = dev->dev_private; 5181 struct drm_i915_private *dev_priv = to_i915(dev);
5175 int s_max = 3, ss_max = 4; 5182 int s_max = 3, ss_max = 4;
5176 int s, ss; 5183 int s, ss;
5177 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; 5184 u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2];
@@ -5236,7 +5243,7 @@ static void gen9_sseu_device_status(struct drm_device *dev,
5236static void broadwell_sseu_device_status(struct drm_device *dev, 5243static void broadwell_sseu_device_status(struct drm_device *dev,
5237 struct sseu_dev_status *stat) 5244 struct sseu_dev_status *stat)
5238{ 5245{
5239 struct drm_i915_private *dev_priv = dev->dev_private; 5246 struct drm_i915_private *dev_priv = to_i915(dev);
5240 int s; 5247 int s;
5241 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5248 u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO);
5242 5249
@@ -5278,6 +5285,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
5278 INTEL_INFO(dev)->eu_total); 5285 INTEL_INFO(dev)->eu_total);
5279 seq_printf(m, " Available EU Per Subslice: %u\n", 5286 seq_printf(m, " Available EU Per Subslice: %u\n",
5280 INTEL_INFO(dev)->eu_per_subslice); 5287 INTEL_INFO(dev)->eu_per_subslice);
5288 seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
5289 if (HAS_POOLED_EU(dev))
5290 seq_printf(m, " Min EU in pool: %u\n",
5291 INTEL_INFO(dev)->min_eu_in_pool);
5281 seq_printf(m, " Has Slice Power Gating: %s\n", 5292 seq_printf(m, " Has Slice Power Gating: %s\n",
5282 yesno(INTEL_INFO(dev)->has_slice_pg)); 5293 yesno(INTEL_INFO(dev)->has_slice_pg));
5283 seq_printf(m, " Has Subslice Power Gating: %s\n", 5294 seq_printf(m, " Has Subslice Power Gating: %s\n",
@@ -5311,7 +5322,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
5311static int i915_forcewake_open(struct inode *inode, struct file *file) 5322static int i915_forcewake_open(struct inode *inode, struct file *file)
5312{ 5323{
5313 struct drm_device *dev = inode->i_private; 5324 struct drm_device *dev = inode->i_private;
5314 struct drm_i915_private *dev_priv = dev->dev_private; 5325 struct drm_i915_private *dev_priv = to_i915(dev);
5315 5326
5316 if (INTEL_INFO(dev)->gen < 6) 5327 if (INTEL_INFO(dev)->gen < 6)
5317 return 0; 5328 return 0;
@@ -5325,7 +5336,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
5325static int i915_forcewake_release(struct inode *inode, struct file *file) 5336static int i915_forcewake_release(struct inode *inode, struct file *file)
5326{ 5337{
5327 struct drm_device *dev = inode->i_private; 5338 struct drm_device *dev = inode->i_private;
5328 struct drm_i915_private *dev_priv = dev->dev_private; 5339 struct drm_i915_private *dev_priv = to_i915(dev);
5329 5340
5330 if (INTEL_INFO(dev)->gen < 6) 5341 if (INTEL_INFO(dev)->gen < 6)
5331 return 0; 5342 return 0;
@@ -5441,7 +5452,6 @@ static const struct i915_debugfs_files {
5441 {"i915_max_freq", &i915_max_freq_fops}, 5452 {"i915_max_freq", &i915_max_freq_fops},
5442 {"i915_min_freq", &i915_min_freq_fops}, 5453 {"i915_min_freq", &i915_min_freq_fops},
5443 {"i915_cache_sharing", &i915_cache_sharing_fops}, 5454 {"i915_cache_sharing", &i915_cache_sharing_fops},
5444 {"i915_ring_stop", &i915_ring_stop_fops},
5445 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops}, 5455 {"i915_ring_missed_irq", &i915_ring_missed_irq_fops},
5446 {"i915_ring_test_irq", &i915_ring_test_irq_fops}, 5456 {"i915_ring_test_irq", &i915_ring_test_irq_fops},
5447 {"i915_gem_drop_caches", &i915_drop_caches_fops}, 5457 {"i915_gem_drop_caches", &i915_drop_caches_fops},
@@ -5459,7 +5469,7 @@ static const struct i915_debugfs_files {
5459 5469
5460void intel_display_crc_init(struct drm_device *dev) 5470void intel_display_crc_init(struct drm_device *dev)
5461{ 5471{
5462 struct drm_i915_private *dev_priv = dev->dev_private; 5472 struct drm_i915_private *dev_priv = to_i915(dev);
5463 enum pipe pipe; 5473 enum pipe pipe;
5464 5474
5465 for_each_pipe(dev_priv, pipe) { 5475 for_each_pipe(dev_priv, pipe) {
@@ -5471,8 +5481,9 @@ void intel_display_crc_init(struct drm_device *dev)
5471 } 5481 }
5472} 5482}
5473 5483
5474int i915_debugfs_init(struct drm_minor *minor) 5484int i915_debugfs_register(struct drm_i915_private *dev_priv)
5475{ 5485{
5486 struct drm_minor *minor = dev_priv->drm.primary;
5476 int ret, i; 5487 int ret, i;
5477 5488
5478 ret = i915_forcewake_create(minor->debugfs_root, minor); 5489 ret = i915_forcewake_create(minor->debugfs_root, minor);
@@ -5498,8 +5509,9 @@ int i915_debugfs_init(struct drm_minor *minor)
5498 minor->debugfs_root, minor); 5509 minor->debugfs_root, minor);
5499} 5510}
5500 5511
5501void i915_debugfs_cleanup(struct drm_minor *minor) 5512void i915_debugfs_unregister(struct drm_i915_private *dev_priv)
5502{ 5513{
5514 struct drm_minor *minor = dev_priv->drm.primary;
5503 int i; 5515 int i;
5504 5516
5505 drm_debugfs_remove_files(i915_debugfs_list, 5517 drm_debugfs_remove_files(i915_debugfs_list,
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
deleted file mode 100644
index b3198fcd0536..000000000000
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ /dev/null
@@ -1,1587 +0,0 @@
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
3/*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31#include <drm/drmP.h>
32#include <drm/drm_crtc_helper.h>
33#include <drm/drm_fb_helper.h>
34#include <drm/drm_legacy.h>
35#include "intel_drv.h"
36#include <drm/i915_drm.h>
37#include "i915_drv.h"
38#include "i915_vgpu.h"
39#include "i915_trace.h"
40#include <linux/pci.h>
41#include <linux/console.h>
42#include <linux/vt.h>
43#include <linux/vgaarb.h>
44#include <linux/acpi.h>
45#include <linux/pnp.h>
46#include <linux/vga_switcheroo.h>
47#include <linux/slab.h>
48#include <acpi/video.h>
49#include <linux/pm.h>
50#include <linux/pm_runtime.h>
51#include <linux/oom.h>
52
53static unsigned int i915_load_fail_count;
54
55bool __i915_inject_load_failure(const char *func, int line)
56{
57 if (i915_load_fail_count >= i915.inject_load_failure)
58 return false;
59
60 if (++i915_load_fail_count == i915.inject_load_failure) {
61 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
62 i915.inject_load_failure, func, line);
63 return true;
64 }
65
66 return false;
67}
68
69#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
70#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
71 "providing the dmesg log by booting with drm.debug=0xf"
72
73void
74__i915_printk(struct drm_i915_private *dev_priv, const char *level,
75 const char *fmt, ...)
76{
77 static bool shown_bug_once;
78 struct device *dev = dev_priv->dev->dev;
79 bool is_error = level[1] <= KERN_ERR[1];
80 bool is_debug = level[1] == KERN_DEBUG[1];
81 struct va_format vaf;
82 va_list args;
83
84 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
85 return;
86
87 va_start(args, fmt);
88
89 vaf.fmt = fmt;
90 vaf.va = &args;
91
92 dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
93 __builtin_return_address(0), &vaf);
94
95 if (is_error && !shown_bug_once) {
96 dev_notice(dev, "%s", FDO_BUG_MSG);
97 shown_bug_once = true;
98 }
99
100 va_end(args);
101}
102
103static bool i915_error_injected(struct drm_i915_private *dev_priv)
104{
105 return i915.inject_load_failure &&
106 i915_load_fail_count == i915.inject_load_failure;
107}
108
109#define i915_load_error(dev_priv, fmt, ...) \
110 __i915_printk(dev_priv, \
111 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
112 fmt, ##__VA_ARGS__)
113
114static int i915_getparam(struct drm_device *dev, void *data,
115 struct drm_file *file_priv)
116{
117 struct drm_i915_private *dev_priv = dev->dev_private;
118 drm_i915_getparam_t *param = data;
119 int value;
120
121 switch (param->param) {
122 case I915_PARAM_IRQ_ACTIVE:
123 case I915_PARAM_ALLOW_BATCHBUFFER:
124 case I915_PARAM_LAST_DISPATCH:
125 /* Reject all old ums/dri params. */
126 return -ENODEV;
127 case I915_PARAM_CHIPSET_ID:
128 value = dev->pdev->device;
129 break;
130 case I915_PARAM_REVISION:
131 value = dev->pdev->revision;
132 break;
133 case I915_PARAM_HAS_GEM:
134 value = 1;
135 break;
136 case I915_PARAM_NUM_FENCES_AVAIL:
137 value = dev_priv->num_fence_regs;
138 break;
139 case I915_PARAM_HAS_OVERLAY:
140 value = dev_priv->overlay ? 1 : 0;
141 break;
142 case I915_PARAM_HAS_PAGEFLIPPING:
143 value = 1;
144 break;
145 case I915_PARAM_HAS_EXECBUF2:
146 /* depends on GEM */
147 value = 1;
148 break;
149 case I915_PARAM_HAS_BSD:
150 value = intel_engine_initialized(&dev_priv->engine[VCS]);
151 break;
152 case I915_PARAM_HAS_BLT:
153 value = intel_engine_initialized(&dev_priv->engine[BCS]);
154 break;
155 case I915_PARAM_HAS_VEBOX:
156 value = intel_engine_initialized(&dev_priv->engine[VECS]);
157 break;
158 case I915_PARAM_HAS_BSD2:
159 value = intel_engine_initialized(&dev_priv->engine[VCS2]);
160 break;
161 case I915_PARAM_HAS_RELAXED_FENCING:
162 value = 1;
163 break;
164 case I915_PARAM_HAS_COHERENT_RINGS:
165 value = 1;
166 break;
167 case I915_PARAM_HAS_EXEC_CONSTANTS:
168 value = INTEL_INFO(dev)->gen >= 4;
169 break;
170 case I915_PARAM_HAS_RELAXED_DELTA:
171 value = 1;
172 break;
173 case I915_PARAM_HAS_GEN7_SOL_RESET:
174 value = 1;
175 break;
176 case I915_PARAM_HAS_LLC:
177 value = HAS_LLC(dev);
178 break;
179 case I915_PARAM_HAS_WT:
180 value = HAS_WT(dev);
181 break;
182 case I915_PARAM_HAS_ALIASING_PPGTT:
183 value = USES_PPGTT(dev);
184 break;
185 case I915_PARAM_HAS_WAIT_TIMEOUT:
186 value = 1;
187 break;
188 case I915_PARAM_HAS_SEMAPHORES:
189 value = i915_semaphore_is_enabled(dev);
190 break;
191 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
192 value = 1;
193 break;
194 case I915_PARAM_HAS_SECURE_BATCHES:
195 value = capable(CAP_SYS_ADMIN);
196 break;
197 case I915_PARAM_HAS_PINNED_BATCHES:
198 value = 1;
199 break;
200 case I915_PARAM_HAS_EXEC_NO_RELOC:
201 value = 1;
202 break;
203 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
204 value = 1;
205 break;
206 case I915_PARAM_CMD_PARSER_VERSION:
207 value = i915_cmd_parser_get_version();
208 break;
209 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
210 value = 1;
211 break;
212 case I915_PARAM_MMAP_VERSION:
213 value = 1;
214 break;
215 case I915_PARAM_SUBSLICE_TOTAL:
216 value = INTEL_INFO(dev)->subslice_total;
217 if (!value)
218 return -ENODEV;
219 break;
220 case I915_PARAM_EU_TOTAL:
221 value = INTEL_INFO(dev)->eu_total;
222 if (!value)
223 return -ENODEV;
224 break;
225 case I915_PARAM_HAS_GPU_RESET:
226 value = i915.enable_hangcheck &&
227 intel_has_gpu_reset(dev);
228 break;
229 case I915_PARAM_HAS_RESOURCE_STREAMER:
230 value = HAS_RESOURCE_STREAMER(dev);
231 break;
232 case I915_PARAM_HAS_EXEC_SOFTPIN:
233 value = 1;
234 break;
235 default:
236 DRM_DEBUG("Unknown parameter %d\n", param->param);
237 return -EINVAL;
238 }
239
240 if (copy_to_user(param->value, &value, sizeof(int))) {
241 DRM_ERROR("copy_to_user failed\n");
242 return -EFAULT;
243 }
244
245 return 0;
246}
247
248static int i915_get_bridge_dev(struct drm_device *dev)
249{
250 struct drm_i915_private *dev_priv = dev->dev_private;
251
252 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
253 if (!dev_priv->bridge_dev) {
254 DRM_ERROR("bridge device not found\n");
255 return -1;
256 }
257 return 0;
258}
259
260/* Allocate space for the MCH regs if needed, return nonzero on error */
261static int
262intel_alloc_mchbar_resource(struct drm_device *dev)
263{
264 struct drm_i915_private *dev_priv = dev->dev_private;
265 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
266 u32 temp_lo, temp_hi = 0;
267 u64 mchbar_addr;
268 int ret;
269
270 if (INTEL_INFO(dev)->gen >= 4)
271 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
272 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
273 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
274
275 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
276#ifdef CONFIG_PNP
277 if (mchbar_addr &&
278 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
279 return 0;
280#endif
281
282 /* Get some space for it */
283 dev_priv->mch_res.name = "i915 MCHBAR";
284 dev_priv->mch_res.flags = IORESOURCE_MEM;
285 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
286 &dev_priv->mch_res,
287 MCHBAR_SIZE, MCHBAR_SIZE,
288 PCIBIOS_MIN_MEM,
289 0, pcibios_align_resource,
290 dev_priv->bridge_dev);
291 if (ret) {
292 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
293 dev_priv->mch_res.start = 0;
294 return ret;
295 }
296
297 if (INTEL_INFO(dev)->gen >= 4)
298 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
299 upper_32_bits(dev_priv->mch_res.start));
300
301 pci_write_config_dword(dev_priv->bridge_dev, reg,
302 lower_32_bits(dev_priv->mch_res.start));
303 return 0;
304}
305
306/* Setup MCHBAR if possible, return true if we should disable it again */
307static void
308intel_setup_mchbar(struct drm_device *dev)
309{
310 struct drm_i915_private *dev_priv = dev->dev_private;
311 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
312 u32 temp;
313 bool enabled;
314
315 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
316 return;
317
318 dev_priv->mchbar_need_disable = false;
319
320 if (IS_I915G(dev) || IS_I915GM(dev)) {
321 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
322 enabled = !!(temp & DEVEN_MCHBAR_EN);
323 } else {
324 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
325 enabled = temp & 1;
326 }
327
328 /* If it's already enabled, don't have to do anything */
329 if (enabled)
330 return;
331
332 if (intel_alloc_mchbar_resource(dev))
333 return;
334
335 dev_priv->mchbar_need_disable = true;
336
337 /* Space is allocated or reserved, so enable it. */
338 if (IS_I915G(dev) || IS_I915GM(dev)) {
339 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
340 temp | DEVEN_MCHBAR_EN);
341 } else {
342 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
343 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
344 }
345}
346
347static void
348intel_teardown_mchbar(struct drm_device *dev)
349{
350 struct drm_i915_private *dev_priv = dev->dev_private;
351 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
352
353 if (dev_priv->mchbar_need_disable) {
354 if (IS_I915G(dev) || IS_I915GM(dev)) {
355 u32 deven_val;
356
357 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
358 &deven_val);
359 deven_val &= ~DEVEN_MCHBAR_EN;
360 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
361 deven_val);
362 } else {
363 u32 mchbar_val;
364
365 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
366 &mchbar_val);
367 mchbar_val &= ~1;
368 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
369 mchbar_val);
370 }
371 }
372
373 if (dev_priv->mch_res.start)
374 release_resource(&dev_priv->mch_res);
375}
376
377/* true = enable decode, false = disable decoder */
378static unsigned int i915_vga_set_decode(void *cookie, bool state)
379{
380 struct drm_device *dev = cookie;
381
382 intel_modeset_vga_set_state(dev, state);
383 if (state)
384 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
385 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
386 else
387 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
388}
389
390static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
391{
392 struct drm_device *dev = pci_get_drvdata(pdev);
393 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
394
395 if (state == VGA_SWITCHEROO_ON) {
396 pr_info("switched on\n");
397 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
398 /* i915 resume handler doesn't set to D0 */
399 pci_set_power_state(dev->pdev, PCI_D0);
400 i915_resume_switcheroo(dev);
401 dev->switch_power_state = DRM_SWITCH_POWER_ON;
402 } else {
403 pr_info("switched off\n");
404 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
405 i915_suspend_switcheroo(dev, pmm);
406 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
407 }
408}
409
410static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
411{
412 struct drm_device *dev = pci_get_drvdata(pdev);
413
414 /*
415 * FIXME: open_count is protected by drm_global_mutex but that would lead to
416 * locking inversion with the driver load path. And the access here is
417 * completely racy anyway. So don't bother with locking for now.
418 */
419 return dev->open_count == 0;
420}
421
422static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
423 .set_gpu_state = i915_switcheroo_set_state,
424 .reprobe = NULL,
425 .can_switch = i915_switcheroo_can_switch,
426};
427
428static int i915_load_modeset_init(struct drm_device *dev)
429{
430 struct drm_i915_private *dev_priv = dev->dev_private;
431 int ret;
432
433 if (i915_inject_load_failure())
434 return -ENODEV;
435
436 ret = intel_bios_init(dev_priv);
437 if (ret)
438 DRM_INFO("failed to find VBIOS tables\n");
439
440 /* If we have > 1 VGA cards, then we need to arbitrate access
441 * to the common VGA resources.
442 *
443 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
444 * then we do not take part in VGA arbitration and the
445 * vga_client_register() fails with -ENODEV.
446 */
447 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
448 if (ret && ret != -ENODEV)
449 goto out;
450
451 intel_register_dsm_handler();
452
453 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
454 if (ret)
455 goto cleanup_vga_client;
456
457 intel_power_domains_init_hw(dev_priv, false);
458
459 intel_csr_ucode_init(dev_priv);
460
461 ret = intel_irq_install(dev_priv);
462 if (ret)
463 goto cleanup_csr;
464
465 intel_setup_gmbus(dev);
466
467 /* Important: The output setup functions called by modeset_init need
468 * working irqs for e.g. gmbus and dp aux transfers. */
469 intel_modeset_init(dev);
470
471 intel_guc_ucode_init(dev);
472
473 ret = i915_gem_init(dev);
474 if (ret)
475 goto cleanup_irq;
476
477 intel_modeset_gem_init(dev);
478
479 if (INTEL_INFO(dev)->num_pipes == 0)
480 return 0;
481
482 ret = intel_fbdev_init(dev);
483 if (ret)
484 goto cleanup_gem;
485
486 /* Only enable hotplug handling once the fbdev is fully set up. */
487 intel_hpd_init(dev_priv);
488
489 /*
490 * Some ports require correctly set-up hpd registers for detection to
491 * work properly (leading to ghost connected connector status), e.g. VGA
492 * on gm45. Hence we can only set up the initial fbdev config after hpd
493 * irqs are fully enabled. Now we should scan for the initial config
494 * only once hotplug handling is enabled, but due to screwed-up locking
495 * around kms/fbdev init we can't protect the fdbev initial config
496 * scanning against hotplug events. Hence do this first and ignore the
497 * tiny window where we will loose hotplug notifactions.
498 */
499 intel_fbdev_initial_config_async(dev);
500
501 drm_kms_helper_poll_init(dev);
502
503 return 0;
504
505cleanup_gem:
506 mutex_lock(&dev->struct_mutex);
507 i915_gem_cleanup_engines(dev);
508 i915_gem_context_fini(dev);
509 mutex_unlock(&dev->struct_mutex);
510cleanup_irq:
511 intel_guc_ucode_fini(dev);
512 drm_irq_uninstall(dev);
513 intel_teardown_gmbus(dev);
514cleanup_csr:
515 intel_csr_ucode_fini(dev_priv);
516 intel_power_domains_fini(dev_priv);
517 vga_switcheroo_unregister_client(dev->pdev);
518cleanup_vga_client:
519 vga_client_register(dev->pdev, NULL, NULL, NULL);
520out:
521 return ret;
522}
523
524#if IS_ENABLED(CONFIG_FB)
525static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
526{
527 struct apertures_struct *ap;
528 struct pci_dev *pdev = dev_priv->dev->pdev;
529 struct i915_ggtt *ggtt = &dev_priv->ggtt;
530 bool primary;
531 int ret;
532
533 ap = alloc_apertures(1);
534 if (!ap)
535 return -ENOMEM;
536
537 ap->ranges[0].base = ggtt->mappable_base;
538 ap->ranges[0].size = ggtt->mappable_end;
539
540 primary =
541 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
542
543 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
544
545 kfree(ap);
546
547 return ret;
548}
549#else
550static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
551{
552 return 0;
553}
554#endif
555
556#if !defined(CONFIG_VGA_CONSOLE)
557static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
558{
559 return 0;
560}
561#elif !defined(CONFIG_DUMMY_CONSOLE)
562static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
563{
564 return -ENODEV;
565}
566#else
567static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
568{
569 int ret = 0;
570
571 DRM_INFO("Replacing VGA console driver\n");
572
573 console_lock();
574 if (con_is_bound(&vga_con))
575 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
576 if (ret == 0) {
577 ret = do_unregister_con_driver(&vga_con);
578
579 /* Ignore "already unregistered". */
580 if (ret == -ENODEV)
581 ret = 0;
582 }
583 console_unlock();
584
585 return ret;
586}
587#endif
588
589static void i915_dump_device_info(struct drm_i915_private *dev_priv)
590{
591 const struct intel_device_info *info = &dev_priv->info;
592
593#define PRINT_S(name) "%s"
594#define SEP_EMPTY
595#define PRINT_FLAG(name) info->name ? #name "," : ""
596#define SEP_COMMA ,
597 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
598 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
599 info->gen,
600 dev_priv->dev->pdev->device,
601 dev_priv->dev->pdev->revision,
602 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
603#undef PRINT_S
604#undef SEP_EMPTY
605#undef PRINT_FLAG
606#undef SEP_COMMA
607}
608
609static void cherryview_sseu_info_init(struct drm_device *dev)
610{
611 struct drm_i915_private *dev_priv = dev->dev_private;
612 struct intel_device_info *info;
613 u32 fuse, eu_dis;
614
615 info = (struct intel_device_info *)&dev_priv->info;
616 fuse = I915_READ(CHV_FUSE_GT);
617
618 info->slice_total = 1;
619
620 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
621 info->subslice_per_slice++;
622 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
623 CHV_FGT_EU_DIS_SS0_R1_MASK);
624 info->eu_total += 8 - hweight32(eu_dis);
625 }
626
627 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
628 info->subslice_per_slice++;
629 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
630 CHV_FGT_EU_DIS_SS1_R1_MASK);
631 info->eu_total += 8 - hweight32(eu_dis);
632 }
633
634 info->subslice_total = info->subslice_per_slice;
635 /*
636 * CHV expected to always have a uniform distribution of EU
637 * across subslices.
638 */
639 info->eu_per_subslice = info->subslice_total ?
640 info->eu_total / info->subslice_total :
641 0;
642 /*
643 * CHV supports subslice power gating on devices with more than
644 * one subslice, and supports EU power gating on devices with
645 * more than one EU pair per subslice.
646 */
647 info->has_slice_pg = 0;
648 info->has_subslice_pg = (info->subslice_total > 1);
649 info->has_eu_pg = (info->eu_per_subslice > 2);
650}
651
652static void gen9_sseu_info_init(struct drm_device *dev)
653{
654 struct drm_i915_private *dev_priv = dev->dev_private;
655 struct intel_device_info *info;
656 int s_max = 3, ss_max = 4, eu_max = 8;
657 int s, ss;
658 u32 fuse2, s_enable, ss_disable, eu_disable;
659 u8 eu_mask = 0xff;
660
661 info = (struct intel_device_info *)&dev_priv->info;
662 fuse2 = I915_READ(GEN8_FUSE2);
663 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
664 GEN8_F2_S_ENA_SHIFT;
665 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
666 GEN9_F2_SS_DIS_SHIFT;
667
668 info->slice_total = hweight32(s_enable);
669 /*
670 * The subslice disable field is global, i.e. it applies
671 * to each of the enabled slices.
672 */
673 info->subslice_per_slice = ss_max - hweight32(ss_disable);
674 info->subslice_total = info->slice_total *
675 info->subslice_per_slice;
676
677 /*
678 * Iterate through enabled slices and subslices to
679 * count the total enabled EU.
680 */
681 for (s = 0; s < s_max; s++) {
682 if (!(s_enable & (0x1 << s)))
683 /* skip disabled slice */
684 continue;
685
686 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
687 for (ss = 0; ss < ss_max; ss++) {
688 int eu_per_ss;
689
690 if (ss_disable & (0x1 << ss))
691 /* skip disabled subslice */
692 continue;
693
694 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
695 eu_mask);
696
697 /*
698 * Record which subslice(s) has(have) 7 EUs. we
699 * can tune the hash used to spread work among
700 * subslices if they are unbalanced.
701 */
702 if (eu_per_ss == 7)
703 info->subslice_7eu[s] |= 1 << ss;
704
705 info->eu_total += eu_per_ss;
706 }
707 }
708
709 /*
710 * SKL is expected to always have a uniform distribution
711 * of EU across subslices with the exception that any one
712 * EU in any one subslice may be fused off for die
713 * recovery. BXT is expected to be perfectly uniform in EU
714 * distribution.
715 */
716 info->eu_per_subslice = info->subslice_total ?
717 DIV_ROUND_UP(info->eu_total,
718 info->subslice_total) : 0;
719 /*
720 * SKL supports slice power gating on devices with more than
721 * one slice, and supports EU power gating on devices with
722 * more than one EU pair per subslice. BXT supports subslice
723 * power gating on devices with more than one subslice, and
724 * supports EU power gating on devices with more than one EU
725 * pair per subslice.
726 */
727 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
728 (info->slice_total > 1));
729 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
730 info->has_eu_pg = (info->eu_per_subslice > 2);
731}
732
733static void broadwell_sseu_info_init(struct drm_device *dev)
734{
735 struct drm_i915_private *dev_priv = dev->dev_private;
736 struct intel_device_info *info;
737 const int s_max = 3, ss_max = 3, eu_max = 8;
738 int s, ss;
739 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
740
741 fuse2 = I915_READ(GEN8_FUSE2);
742 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
743 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
744
745 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
746 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
747 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
748 (32 - GEN8_EU_DIS0_S1_SHIFT));
749 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
750 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
751 (32 - GEN8_EU_DIS1_S2_SHIFT));
752
753
754 info = (struct intel_device_info *)&dev_priv->info;
755 info->slice_total = hweight32(s_enable);
756
757 /*
758 * The subslice disable field is global, i.e. it applies
759 * to each of the enabled slices.
760 */
761 info->subslice_per_slice = ss_max - hweight32(ss_disable);
762 info->subslice_total = info->slice_total * info->subslice_per_slice;
763
764 /*
765 * Iterate through enabled slices and subslices to
766 * count the total enabled EU.
767 */
768 for (s = 0; s < s_max; s++) {
769 if (!(s_enable & (0x1 << s)))
770 /* skip disabled slice */
771 continue;
772
773 for (ss = 0; ss < ss_max; ss++) {
774 u32 n_disabled;
775
776 if (ss_disable & (0x1 << ss))
777 /* skip disabled subslice */
778 continue;
779
780 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
781
782 /*
783 * Record which subslices have 7 EUs.
784 */
785 if (eu_max - n_disabled == 7)
786 info->subslice_7eu[s] |= 1 << ss;
787
788 info->eu_total += eu_max - n_disabled;
789 }
790 }
791
792 /*
793 * BDW is expected to always have a uniform distribution of EU across
794 * subslices with the exception that any one EU in any one subslice may
795 * be fused off for die recovery.
796 */
797 info->eu_per_subslice = info->subslice_total ?
798 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
799
800 /*
801 * BDW supports slice power gating on devices with more than
802 * one slice.
803 */
804 info->has_slice_pg = (info->slice_total > 1);
805 info->has_subslice_pg = 0;
806 info->has_eu_pg = 0;
807}
808
809/*
810 * Determine various intel_device_info fields at runtime.
811 *
812 * Use it when either:
813 * - it's judged too laborious to fill n static structures with the limit
814 * when a simple if statement does the job,
815 * - run-time checks (eg read fuse/strap registers) are needed.
816 *
817 * This function needs to be called:
818 * - after the MMIO has been setup as we are reading registers,
819 * - after the PCH has been detected,
820 * - before the first usage of the fields it can tweak.
821 */
822static void intel_device_info_runtime_init(struct drm_device *dev)
823{
824 struct drm_i915_private *dev_priv = dev->dev_private;
825 struct intel_device_info *info;
826 enum pipe pipe;
827
828 info = (struct intel_device_info *)&dev_priv->info;
829
830 /*
831 * Skylake and Broxton currently don't expose the topmost plane as its
832 * use is exclusive with the legacy cursor and we only want to expose
833 * one of those, not both. Until we can safely expose the topmost plane
834 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
835 * we don't expose the topmost plane at all to prevent ABI breakage
836 * down the line.
837 */
838 if (IS_BROXTON(dev)) {
839 info->num_sprites[PIPE_A] = 2;
840 info->num_sprites[PIPE_B] = 2;
841 info->num_sprites[PIPE_C] = 1;
842 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
843 for_each_pipe(dev_priv, pipe)
844 info->num_sprites[pipe] = 2;
845 else
846 for_each_pipe(dev_priv, pipe)
847 info->num_sprites[pipe] = 1;
848
849 if (i915.disable_display) {
850 DRM_INFO("Display disabled (module parameter)\n");
851 info->num_pipes = 0;
852 } else if (info->num_pipes > 0 &&
853 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
854 HAS_PCH_SPLIT(dev)) {
855 u32 fuse_strap = I915_READ(FUSE_STRAP);
856 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
857
858 /*
859 * SFUSE_STRAP is supposed to have a bit signalling the display
860 * is fused off. Unfortunately it seems that, at least in
861 * certain cases, fused off display means that PCH display
862 * reads don't land anywhere. In that case, we read 0s.
863 *
864 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
865 * should be set when taking over after the firmware.
866 */
867 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
868 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
869 (dev_priv->pch_type == PCH_CPT &&
870 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
871 DRM_INFO("Display fused off, disabling\n");
872 info->num_pipes = 0;
873 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
874 DRM_INFO("PipeC fused off\n");
875 info->num_pipes -= 1;
876 }
877 } else if (info->num_pipes > 0 && INTEL_INFO(dev)->gen == 9) {
878 u32 dfsm = I915_READ(SKL_DFSM);
879 u8 disabled_mask = 0;
880 bool invalid;
881 int num_bits;
882
883 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
884 disabled_mask |= BIT(PIPE_A);
885 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
886 disabled_mask |= BIT(PIPE_B);
887 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
888 disabled_mask |= BIT(PIPE_C);
889
890 num_bits = hweight8(disabled_mask);
891
892 switch (disabled_mask) {
893 case BIT(PIPE_A):
894 case BIT(PIPE_B):
895 case BIT(PIPE_A) | BIT(PIPE_B):
896 case BIT(PIPE_A) | BIT(PIPE_C):
897 invalid = true;
898 break;
899 default:
900 invalid = false;
901 }
902
903 if (num_bits > info->num_pipes || invalid)
904 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
905 disabled_mask);
906 else
907 info->num_pipes -= num_bits;
908 }
909
910 /* Initialize slice/subslice/EU info */
911 if (IS_CHERRYVIEW(dev))
912 cherryview_sseu_info_init(dev);
913 else if (IS_BROADWELL(dev))
914 broadwell_sseu_info_init(dev);
915 else if (INTEL_INFO(dev)->gen >= 9)
916 gen9_sseu_info_init(dev);
917
918 /* Snooping is broken on BXT A stepping. */
919 info->has_snoop = !info->has_llc;
920 info->has_snoop &= !IS_BXT_REVID(dev, 0, BXT_REVID_A1);
921
922 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
923 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
924 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
925 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
926 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
927 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
928 info->has_slice_pg ? "y" : "n");
929 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
930 info->has_subslice_pg ? "y" : "n");
931 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
932 info->has_eu_pg ? "y" : "n");
933}
934
935static void intel_init_dpio(struct drm_i915_private *dev_priv)
936{
937 /*
938 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
939 * CHV x1 PHY (DP/HDMI D)
940 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
941 */
942 if (IS_CHERRYVIEW(dev_priv)) {
943 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
944 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
945 } else if (IS_VALLEYVIEW(dev_priv)) {
946 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
947 }
948}
949
950static int i915_workqueues_init(struct drm_i915_private *dev_priv)
951{
952 /*
953 * The i915 workqueue is primarily used for batched retirement of
954 * requests (and thus managing bo) once the task has been completed
955 * by the GPU. i915_gem_retire_requests() is called directly when we
956 * need high-priority retirement, such as waiting for an explicit
957 * bo.
958 *
959 * It is also used for periodic low-priority events, such as
960 * idle-timers and recording error state.
961 *
962 * All tasks on the workqueue are expected to acquire the dev mutex
963 * so there is no point in running more than one instance of the
964 * workqueue at any time. Use an ordered one.
965 */
966 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
967 if (dev_priv->wq == NULL)
968 goto out_err;
969
970 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
971 if (dev_priv->hotplug.dp_wq == NULL)
972 goto out_free_wq;
973
974 dev_priv->gpu_error.hangcheck_wq =
975 alloc_ordered_workqueue("i915-hangcheck", 0);
976 if (dev_priv->gpu_error.hangcheck_wq == NULL)
977 goto out_free_dp_wq;
978
979 return 0;
980
981out_free_dp_wq:
982 destroy_workqueue(dev_priv->hotplug.dp_wq);
983out_free_wq:
984 destroy_workqueue(dev_priv->wq);
985out_err:
986 DRM_ERROR("Failed to allocate workqueues.\n");
987
988 return -ENOMEM;
989}
990
991static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
992{
993 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
994 destroy_workqueue(dev_priv->hotplug.dp_wq);
995 destroy_workqueue(dev_priv->wq);
996}
997
998/**
999 * i915_driver_init_early - setup state not requiring device access
1000 * @dev_priv: device private
1001 *
1002 * Initialize everything that is a "SW-only" state, that is state not
1003 * requiring accessing the device or exposing the driver via kernel internal
1004 * or userspace interfaces. Example steps belonging here: lock initialization,
1005 * system memory allocation, setting up device specific attributes and
1006 * function hooks not requiring accessing the device.
1007 */
1008static int i915_driver_init_early(struct drm_i915_private *dev_priv,
1009 struct drm_device *dev,
1010 struct intel_device_info *info)
1011{
1012 struct intel_device_info *device_info;
1013 int ret = 0;
1014
1015 if (i915_inject_load_failure())
1016 return -ENODEV;
1017
1018 /* Setup the write-once "constant" device info */
1019 device_info = (struct intel_device_info *)&dev_priv->info;
1020 memcpy(device_info, info, sizeof(dev_priv->info));
1021 device_info->device_id = dev->pdev->device;
1022
1023 spin_lock_init(&dev_priv->irq_lock);
1024 spin_lock_init(&dev_priv->gpu_error.lock);
1025 mutex_init(&dev_priv->backlight_lock);
1026 spin_lock_init(&dev_priv->uncore.lock);
1027 spin_lock_init(&dev_priv->mm.object_stat_lock);
1028 spin_lock_init(&dev_priv->mmio_flip_lock);
1029 mutex_init(&dev_priv->sb_lock);
1030 mutex_init(&dev_priv->modeset_restore_lock);
1031 mutex_init(&dev_priv->av_mutex);
1032 mutex_init(&dev_priv->wm.wm_mutex);
1033 mutex_init(&dev_priv->pps_mutex);
1034
1035 ret = i915_workqueues_init(dev_priv);
1036 if (ret < 0)
1037 return ret;
1038
1039 /* This must be called before any calls to HAS_PCH_* */
1040 intel_detect_pch(dev);
1041
1042 intel_pm_setup(dev);
1043 intel_init_dpio(dev_priv);
1044 intel_power_domains_init(dev_priv);
1045 intel_irq_init(dev_priv);
1046 intel_init_display_hooks(dev_priv);
1047 intel_init_clock_gating_hooks(dev_priv);
1048 intel_init_audio_hooks(dev_priv);
1049 i915_gem_load_init(dev);
1050
1051 intel_display_crc_init(dev);
1052
1053 i915_dump_device_info(dev_priv);
1054
1055 /* Not all pre-production machines fall into this category, only the
1056 * very first ones. Almost everything should work, except for maybe
1057 * suspend/resume. And we don't implement workarounds that affect only
1058 * pre-production machines. */
1059 if (IS_HSW_EARLY_SDV(dev))
1060 DRM_INFO("This is an early pre-production Haswell machine. "
1061 "It may not be fully functional.\n");
1062
1063 return 0;
1064}
1065
1066/**
1067 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
1068 * @dev_priv: device private
1069 */
1070static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
1071{
1072 i915_gem_load_cleanup(dev_priv->dev);
1073 i915_workqueues_cleanup(dev_priv);
1074}
1075
1076static int i915_mmio_setup(struct drm_device *dev)
1077{
1078 struct drm_i915_private *dev_priv = to_i915(dev);
1079 int mmio_bar;
1080 int mmio_size;
1081
1082 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1083 /*
1084 * Before gen4, the registers and the GTT are behind different BARs.
1085 * However, from gen4 onwards, the registers and the GTT are shared
1086 * in the same BAR, so we want to restrict this ioremap from
1087 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1088 * the register BAR remains the same size for all the earlier
1089 * generations up to Ironlake.
1090 */
1091 if (INTEL_INFO(dev)->gen < 5)
1092 mmio_size = 512 * 1024;
1093 else
1094 mmio_size = 2 * 1024 * 1024;
1095 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
1096 if (dev_priv->regs == NULL) {
1097 DRM_ERROR("failed to map registers\n");
1098
1099 return -EIO;
1100 }
1101
1102 /* Try to make sure MCHBAR is enabled before poking at it */
1103 intel_setup_mchbar(dev);
1104
1105 return 0;
1106}
1107
1108static void i915_mmio_cleanup(struct drm_device *dev)
1109{
1110 struct drm_i915_private *dev_priv = to_i915(dev);
1111
1112 intel_teardown_mchbar(dev);
1113 pci_iounmap(dev->pdev, dev_priv->regs);
1114}
1115
1116/**
1117 * i915_driver_init_mmio - setup device MMIO
1118 * @dev_priv: device private
1119 *
1120 * Setup minimal device state necessary for MMIO accesses later in the
1121 * initialization sequence. The setup here should avoid any other device-wide
1122 * side effects or exposing the driver via kernel internal or user space
1123 * interfaces.
1124 */
1125static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
1126{
1127 struct drm_device *dev = dev_priv->dev;
1128 int ret;
1129
1130 if (i915_inject_load_failure())
1131 return -ENODEV;
1132
1133 if (i915_get_bridge_dev(dev))
1134 return -EIO;
1135
1136 ret = i915_mmio_setup(dev);
1137 if (ret < 0)
1138 goto put_bridge;
1139
1140 intel_uncore_init(dev);
1141
1142 return 0;
1143
1144put_bridge:
1145 pci_dev_put(dev_priv->bridge_dev);
1146
1147 return ret;
1148}
1149
1150/**
1151 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
1152 * @dev_priv: device private
1153 */
1154static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
1155{
1156 struct drm_device *dev = dev_priv->dev;
1157
1158 intel_uncore_fini(dev);
1159 i915_mmio_cleanup(dev);
1160 pci_dev_put(dev_priv->bridge_dev);
1161}
1162
1163/**
1164 * i915_driver_init_hw - setup state requiring device access
1165 * @dev_priv: device private
1166 *
1167 * Setup state that requires accessing the device, but doesn't require
1168 * exposing the driver via kernel internal or userspace interfaces.
1169 */
1170static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1171{
1172 struct drm_device *dev = dev_priv->dev;
1173 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1174 uint32_t aperture_size;
1175 int ret;
1176
1177 if (i915_inject_load_failure())
1178 return -ENODEV;
1179
1180 intel_device_info_runtime_init(dev);
1181
1182 ret = i915_ggtt_init_hw(dev);
1183 if (ret)
1184 return ret;
1185
1186 ret = i915_ggtt_enable_hw(dev);
1187 if (ret) {
1188 DRM_ERROR("failed to enable GGTT\n");
1189 goto out_ggtt;
1190 }
1191
1192 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1193 * otherwise the vga fbdev driver falls over. */
1194 ret = i915_kick_out_firmware_fb(dev_priv);
1195 if (ret) {
1196 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1197 goto out_ggtt;
1198 }
1199
1200 ret = i915_kick_out_vgacon(dev_priv);
1201 if (ret) {
1202 DRM_ERROR("failed to remove conflicting VGA console\n");
1203 goto out_ggtt;
1204 }
1205
1206 pci_set_master(dev->pdev);
1207
1208 /* overlay on gen2 is broken and can't address above 1G */
1209 if (IS_GEN2(dev))
1210 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1211
1212 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1213 * using 32bit addressing, overwriting memory if HWS is located
1214 * above 4GB.
1215 *
1216 * The documentation also mentions an issue with undefined
1217 * behaviour if any general state is accessed within a page above 4GB,
1218 * which also needs to be handled carefully.
1219 */
1220 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1221 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1222
1223 aperture_size = ggtt->mappable_end;
1224
1225 ggtt->mappable =
1226 io_mapping_create_wc(ggtt->mappable_base,
1227 aperture_size);
1228 if (!ggtt->mappable) {
1229 ret = -EIO;
1230 goto out_ggtt;
1231 }
1232
1233 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
1234 aperture_size);
1235
1236 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1237 PM_QOS_DEFAULT_VALUE);
1238
1239 intel_uncore_sanitize(dev);
1240
1241 intel_opregion_setup(dev);
1242
1243 i915_gem_load_init_fences(dev_priv);
1244
1245 /* On the 945G/GM, the chipset reports the MSI capability on the
1246 * integrated graphics even though the support isn't actually there
1247 * according to the published specs. It doesn't appear to function
1248 * correctly in testing on 945G.
1249 * This may be a side effect of MSI having been made available for PEG
1250 * and the registers being closely associated.
1251 *
1252 * According to chipset errata, on the 965GM, MSI interrupts may
1253 * be lost or delayed, but we use them anyways to avoid
1254 * stuck interrupts on some machines.
1255 */
1256 if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1257 if (pci_enable_msi(dev->pdev) < 0)
1258 DRM_DEBUG_DRIVER("can't enable MSI");
1259 }
1260
1261 return 0;
1262
1263out_ggtt:
1264 i915_ggtt_cleanup_hw(dev);
1265
1266 return ret;
1267}
1268
1269/**
1270 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1271 * @dev_priv: device private
1272 */
1273static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1274{
1275 struct drm_device *dev = dev_priv->dev;
1276 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1277
1278 if (dev->pdev->msi_enabled)
1279 pci_disable_msi(dev->pdev);
1280
1281 pm_qos_remove_request(&dev_priv->pm_qos);
1282 arch_phys_wc_del(ggtt->mtrr);
1283 io_mapping_free(ggtt->mappable);
1284 i915_ggtt_cleanup_hw(dev);
1285}
1286
1287/**
1288 * i915_driver_register - register the driver with the rest of the system
1289 * @dev_priv: device private
1290 *
1291 * Perform any steps necessary to make the driver available via kernel
1292 * internal or userspace interfaces.
1293 */
1294static void i915_driver_register(struct drm_i915_private *dev_priv)
1295{
1296 struct drm_device *dev = dev_priv->dev;
1297
1298 i915_gem_shrinker_init(dev_priv);
1299 /*
1300 * Notify a valid surface after modesetting,
1301 * when running inside a VM.
1302 */
1303 if (intel_vgpu_active(dev))
1304 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1305
1306 i915_setup_sysfs(dev);
1307
1308 if (INTEL_INFO(dev_priv)->num_pipes) {
1309 /* Must be done after probing outputs */
1310 intel_opregion_init(dev);
1311 acpi_video_register();
1312 }
1313
1314 if (IS_GEN5(dev_priv))
1315 intel_gpu_ips_init(dev_priv);
1316
1317 i915_audio_component_init(dev_priv);
1318}
1319
1320/**
1321 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1322 * @dev_priv: device private
1323 */
1324static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1325{
1326 i915_audio_component_cleanup(dev_priv);
1327 intel_gpu_ips_teardown();
1328 acpi_video_unregister();
1329 intel_opregion_fini(dev_priv->dev);
1330 i915_teardown_sysfs(dev_priv->dev);
1331 i915_gem_shrinker_cleanup(dev_priv);
1332}
1333
1334/**
1335 * i915_driver_load - setup chip and create an initial config
1336 * @dev: DRM device
1337 * @flags: startup flags
1338 *
1339 * The driver load routine has to do several things:
1340 * - drive output discovery via intel_modeset_init()
1341 * - initialize the memory manager
1342 * - allocate initial config memory
1343 * - setup the DRM framebuffer with the allocated memory
1344 */
1345int i915_driver_load(struct drm_device *dev, unsigned long flags)
1346{
1347 struct drm_i915_private *dev_priv;
1348 int ret = 0;
1349
1350 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1351 if (dev_priv == NULL)
1352 return -ENOMEM;
1353
1354 dev->dev_private = dev_priv;
1355 /* Must be set before calling __i915_printk */
1356 dev_priv->dev = dev;
1357
1358 ret = i915_driver_init_early(dev_priv, dev,
1359 (struct intel_device_info *)flags);
1360
1361 if (ret < 0)
1362 goto out_free_priv;
1363
1364 intel_runtime_pm_get(dev_priv);
1365
1366 ret = i915_driver_init_mmio(dev_priv);
1367 if (ret < 0)
1368 goto out_runtime_pm_put;
1369
1370 ret = i915_driver_init_hw(dev_priv);
1371 if (ret < 0)
1372 goto out_cleanup_mmio;
1373
1374 /*
1375 * TODO: move the vblank init and parts of modeset init steps into one
1376 * of the i915_driver_init_/i915_driver_register functions according
1377 * to the role/effect of the given init step.
1378 */
1379 if (INTEL_INFO(dev)->num_pipes) {
1380 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1381 if (ret)
1382 goto out_cleanup_hw;
1383 }
1384
1385 ret = i915_load_modeset_init(dev);
1386 if (ret < 0)
1387 goto out_cleanup_vblank;
1388
1389 i915_driver_register(dev_priv);
1390
1391 intel_runtime_pm_enable(dev_priv);
1392
1393 intel_runtime_pm_put(dev_priv);
1394
1395 return 0;
1396
1397out_cleanup_vblank:
1398 drm_vblank_cleanup(dev);
1399out_cleanup_hw:
1400 i915_driver_cleanup_hw(dev_priv);
1401out_cleanup_mmio:
1402 i915_driver_cleanup_mmio(dev_priv);
1403out_runtime_pm_put:
1404 intel_runtime_pm_put(dev_priv);
1405 i915_driver_cleanup_early(dev_priv);
1406out_free_priv:
1407 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1408
1409 kfree(dev_priv);
1410
1411 return ret;
1412}
1413
1414int i915_driver_unload(struct drm_device *dev)
1415{
1416 struct drm_i915_private *dev_priv = dev->dev_private;
1417 int ret;
1418
1419 intel_fbdev_fini(dev);
1420
1421 ret = i915_gem_suspend(dev);
1422 if (ret) {
1423 DRM_ERROR("failed to idle hardware: %d\n", ret);
1424 return ret;
1425 }
1426
1427 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1428
1429 i915_driver_unregister(dev_priv);
1430
1431 drm_vblank_cleanup(dev);
1432
1433 intel_modeset_cleanup(dev);
1434
1435 /*
1436 * free the memory space allocated for the child device
1437 * config parsed from VBT
1438 */
1439 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1440 kfree(dev_priv->vbt.child_dev);
1441 dev_priv->vbt.child_dev = NULL;
1442 dev_priv->vbt.child_dev_num = 0;
1443 }
1444 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1445 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1446 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1447 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1448
1449 vga_switcheroo_unregister_client(dev->pdev);
1450 vga_client_register(dev->pdev, NULL, NULL, NULL);
1451
1452 intel_csr_ucode_fini(dev_priv);
1453
1454 /* Free error state after interrupts are fully disabled. */
1455 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1456 i915_destroy_error_state(dev);
1457
1458 /* Flush any outstanding unpin_work. */
1459 flush_workqueue(dev_priv->wq);
1460
1461 intel_guc_ucode_fini(dev);
1462 mutex_lock(&dev->struct_mutex);
1463 i915_gem_cleanup_engines(dev);
1464 i915_gem_context_fini(dev);
1465 mutex_unlock(&dev->struct_mutex);
1466 intel_fbc_cleanup_cfb(dev_priv);
1467
1468 intel_power_domains_fini(dev_priv);
1469
1470 i915_driver_cleanup_hw(dev_priv);
1471 i915_driver_cleanup_mmio(dev_priv);
1472
1473 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1474
1475 i915_driver_cleanup_early(dev_priv);
1476 kfree(dev_priv);
1477
1478 return 0;
1479}
1480
1481int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1482{
1483 int ret;
1484
1485 ret = i915_gem_open(dev, file);
1486 if (ret)
1487 return ret;
1488
1489 return 0;
1490}
1491
1492/**
1493 * i915_driver_lastclose - clean up after all DRM clients have exited
1494 * @dev: DRM device
1495 *
1496 * Take care of cleaning up after all DRM clients have exited. In the
1497 * mode setting case, we want to restore the kernel's initial mode (just
1498 * in case the last client left us in a bad state).
1499 *
1500 * Additionally, in the non-mode setting case, we'll tear down the GTT
1501 * and DMA structures, since the kernel won't be using them, and clea
1502 * up any GEM state.
1503 */
1504void i915_driver_lastclose(struct drm_device *dev)
1505{
1506 intel_fbdev_restore_mode(dev);
1507 vga_switcheroo_process_delayed_switch();
1508}
1509
1510void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1511{
1512 mutex_lock(&dev->struct_mutex);
1513 i915_gem_context_close(dev, file);
1514 i915_gem_release(dev, file);
1515 mutex_unlock(&dev->struct_mutex);
1516}
1517
1518void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1519{
1520 struct drm_i915_file_private *file_priv = file->driver_priv;
1521
1522 kfree(file_priv);
1523}
1524
1525static int
1526i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1527 struct drm_file *file)
1528{
1529 return -ENODEV;
1530}
1531
1532const struct drm_ioctl_desc i915_ioctls[] = {
1533 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1534 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1535 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1536 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1537 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1538 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
1539 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1540 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1541 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1542 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1543 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1544 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
1545 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1546 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1547 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1548 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1549 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1550 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1551 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
1552 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
1553 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1554 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
1555 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1556 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
1557 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
1558 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1559 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1560 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1561 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
1562 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
1563 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
1564 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
1565 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
1566 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
1567 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
1568 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
1569 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
1570 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
1571 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
1572 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
1573 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
1574 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
1575 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
1576 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
1577 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
1578 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
1579 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
1580 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
1581 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_RENDER_ALLOW),
1582 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
1583 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
1584 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
1585};
1586
1587int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 85c4debf47e0..95ddd56b89f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,400 +27,92 @@
27 * 27 *
28 */ 28 */
29 29
30#include <linux/device.h>
31#include <linux/acpi.h> 30#include <linux/acpi.h>
32#include <drm/drmP.h> 31#include <linux/device.h>
33#include <drm/i915_drm.h> 32#include <linux/oom.h>
34#include "i915_drv.h"
35#include "i915_trace.h"
36#include "intel_drv.h"
37
38#include <linux/apple-gmux.h>
39#include <linux/console.h>
40#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/pci.h>
35#include <linux/pm.h>
41#include <linux/pm_runtime.h> 36#include <linux/pm_runtime.h>
37#include <linux/pnp.h>
38#include <linux/slab.h>
42#include <linux/vgaarb.h> 39#include <linux/vgaarb.h>
43#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
44#include <drm/drm_crtc_helper.h> 41#include <linux/vt.h>
45 42#include <acpi/video.h>
46static struct drm_driver driver;
47
48#define GEN_DEFAULT_PIPEOFFSETS \
49 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
50 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
51 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
52 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
53 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
54
55#define GEN_CHV_PIPEOFFSETS \
56 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
57 CHV_PIPE_C_OFFSET }, \
58 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
59 CHV_TRANSCODER_C_OFFSET, }, \
60 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
61 CHV_PALETTE_C_OFFSET }
62
63#define CURSOR_OFFSETS \
64 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
65
66#define IVB_CURSOR_OFFSETS \
67 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
68
69#define BDW_COLORS \
70 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
71#define CHV_COLORS \
72 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
73
74static const struct intel_device_info intel_i830_info = {
75 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
76 .has_overlay = 1, .overlay_needs_physical = 1,
77 .ring_mask = RENDER_RING,
78 GEN_DEFAULT_PIPEOFFSETS,
79 CURSOR_OFFSETS,
80};
81
82static const struct intel_device_info intel_845g_info = {
83 .gen = 2, .num_pipes = 1,
84 .has_overlay = 1, .overlay_needs_physical = 1,
85 .ring_mask = RENDER_RING,
86 GEN_DEFAULT_PIPEOFFSETS,
87 CURSOR_OFFSETS,
88};
89
90static const struct intel_device_info intel_i85x_info = {
91 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
92 .cursor_needs_physical = 1,
93 .has_overlay = 1, .overlay_needs_physical = 1,
94 .has_fbc = 1,
95 .ring_mask = RENDER_RING,
96 GEN_DEFAULT_PIPEOFFSETS,
97 CURSOR_OFFSETS,
98};
99
100static const struct intel_device_info intel_i865g_info = {
101 .gen = 2, .num_pipes = 1,
102 .has_overlay = 1, .overlay_needs_physical = 1,
103 .ring_mask = RENDER_RING,
104 GEN_DEFAULT_PIPEOFFSETS,
105 CURSOR_OFFSETS,
106};
107
108static const struct intel_device_info intel_i915g_info = {
109 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
110 .has_overlay = 1, .overlay_needs_physical = 1,
111 .ring_mask = RENDER_RING,
112 GEN_DEFAULT_PIPEOFFSETS,
113 CURSOR_OFFSETS,
114};
115static const struct intel_device_info intel_i915gm_info = {
116 .gen = 3, .is_mobile = 1, .num_pipes = 2,
117 .cursor_needs_physical = 1,
118 .has_overlay = 1, .overlay_needs_physical = 1,
119 .supports_tv = 1,
120 .has_fbc = 1,
121 .ring_mask = RENDER_RING,
122 GEN_DEFAULT_PIPEOFFSETS,
123 CURSOR_OFFSETS,
124};
125static const struct intel_device_info intel_i945g_info = {
126 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
127 .has_overlay = 1, .overlay_needs_physical = 1,
128 .ring_mask = RENDER_RING,
129 GEN_DEFAULT_PIPEOFFSETS,
130 CURSOR_OFFSETS,
131};
132static const struct intel_device_info intel_i945gm_info = {
133 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
134 .has_hotplug = 1, .cursor_needs_physical = 1,
135 .has_overlay = 1, .overlay_needs_physical = 1,
136 .supports_tv = 1,
137 .has_fbc = 1,
138 .ring_mask = RENDER_RING,
139 GEN_DEFAULT_PIPEOFFSETS,
140 CURSOR_OFFSETS,
141};
142
143static const struct intel_device_info intel_i965g_info = {
144 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
145 .has_hotplug = 1,
146 .has_overlay = 1,
147 .ring_mask = RENDER_RING,
148 GEN_DEFAULT_PIPEOFFSETS,
149 CURSOR_OFFSETS,
150};
151
152static const struct intel_device_info intel_i965gm_info = {
153 .gen = 4, .is_crestline = 1, .num_pipes = 2,
154 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
155 .has_overlay = 1,
156 .supports_tv = 1,
157 .ring_mask = RENDER_RING,
158 GEN_DEFAULT_PIPEOFFSETS,
159 CURSOR_OFFSETS,
160};
161
162static const struct intel_device_info intel_g33_info = {
163 .gen = 3, .is_g33 = 1, .num_pipes = 2,
164 .need_gfx_hws = 1, .has_hotplug = 1,
165 .has_overlay = 1,
166 .ring_mask = RENDER_RING,
167 GEN_DEFAULT_PIPEOFFSETS,
168 CURSOR_OFFSETS,
169};
170
171static const struct intel_device_info intel_g45_info = {
172 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
173 .has_pipe_cxsr = 1, .has_hotplug = 1,
174 .ring_mask = RENDER_RING | BSD_RING,
175 GEN_DEFAULT_PIPEOFFSETS,
176 CURSOR_OFFSETS,
177};
178
179static const struct intel_device_info intel_gm45_info = {
180 .gen = 4, .is_g4x = 1, .num_pipes = 2,
181 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
182 .has_pipe_cxsr = 1, .has_hotplug = 1,
183 .supports_tv = 1,
184 .ring_mask = RENDER_RING | BSD_RING,
185 GEN_DEFAULT_PIPEOFFSETS,
186 CURSOR_OFFSETS,
187};
188
189static const struct intel_device_info intel_pineview_info = {
190 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
191 .need_gfx_hws = 1, .has_hotplug = 1,
192 .has_overlay = 1,
193 GEN_DEFAULT_PIPEOFFSETS,
194 CURSOR_OFFSETS,
195};
196
197static const struct intel_device_info intel_ironlake_d_info = {
198 .gen = 5, .num_pipes = 2,
199 .need_gfx_hws = 1, .has_hotplug = 1,
200 .ring_mask = RENDER_RING | BSD_RING,
201 GEN_DEFAULT_PIPEOFFSETS,
202 CURSOR_OFFSETS,
203};
204 43
205static const struct intel_device_info intel_ironlake_m_info = { 44#include <drm/drmP.h>
206 .gen = 5, .is_mobile = 1, .num_pipes = 2, 45#include <drm/drm_crtc_helper.h>
207 .need_gfx_hws = 1, .has_hotplug = 1, 46#include <drm/i915_drm.h>
208 .has_fbc = 1,
209 .ring_mask = RENDER_RING | BSD_RING,
210 GEN_DEFAULT_PIPEOFFSETS,
211 CURSOR_OFFSETS,
212};
213
214static const struct intel_device_info intel_sandybridge_d_info = {
215 .gen = 6, .num_pipes = 2,
216 .need_gfx_hws = 1, .has_hotplug = 1,
217 .has_fbc = 1,
218 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
219 .has_llc = 1,
220 GEN_DEFAULT_PIPEOFFSETS,
221 CURSOR_OFFSETS,
222};
223
224static const struct intel_device_info intel_sandybridge_m_info = {
225 .gen = 6, .is_mobile = 1, .num_pipes = 2,
226 .need_gfx_hws = 1, .has_hotplug = 1,
227 .has_fbc = 1,
228 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
229 .has_llc = 1,
230 GEN_DEFAULT_PIPEOFFSETS,
231 CURSOR_OFFSETS,
232};
233
234#define GEN7_FEATURES \
235 .gen = 7, .num_pipes = 3, \
236 .need_gfx_hws = 1, .has_hotplug = 1, \
237 .has_fbc = 1, \
238 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
239 .has_llc = 1, \
240 GEN_DEFAULT_PIPEOFFSETS, \
241 IVB_CURSOR_OFFSETS
242
243static const struct intel_device_info intel_ivybridge_d_info = {
244 GEN7_FEATURES,
245 .is_ivybridge = 1,
246};
247
248static const struct intel_device_info intel_ivybridge_m_info = {
249 GEN7_FEATURES,
250 .is_ivybridge = 1,
251 .is_mobile = 1,
252};
253
254static const struct intel_device_info intel_ivybridge_q_info = {
255 GEN7_FEATURES,
256 .is_ivybridge = 1,
257 .num_pipes = 0, /* legal, last one wins */
258};
259
260#define VLV_FEATURES \
261 .gen = 7, .num_pipes = 2, \
262 .need_gfx_hws = 1, .has_hotplug = 1, \
263 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
264 .display_mmio_offset = VLV_DISPLAY_BASE, \
265 GEN_DEFAULT_PIPEOFFSETS, \
266 CURSOR_OFFSETS
267
268static const struct intel_device_info intel_valleyview_m_info = {
269 VLV_FEATURES,
270 .is_valleyview = 1,
271 .is_mobile = 1,
272};
273 47
274static const struct intel_device_info intel_valleyview_d_info = { 48#include "i915_drv.h"
275 VLV_FEATURES, 49#include "i915_trace.h"
276 .is_valleyview = 1, 50#include "i915_vgpu.h"
277}; 51#include "intel_drv.h"
278 52
279#define HSW_FEATURES \ 53static struct drm_driver driver;
280 GEN7_FEATURES, \
281 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
282 .has_ddi = 1, \
283 .has_fpga_dbg = 1
284 54
285static const struct intel_device_info intel_haswell_d_info = { 55static unsigned int i915_load_fail_count;
286 HSW_FEATURES,
287 .is_haswell = 1,
288};
289 56
290static const struct intel_device_info intel_haswell_m_info = { 57bool __i915_inject_load_failure(const char *func, int line)
291 HSW_FEATURES, 58{
292 .is_haswell = 1, 59 if (i915_load_fail_count >= i915.inject_load_failure)
293 .is_mobile = 1, 60 return false;
294};
295 61
296#define BDW_FEATURES \ 62 if (++i915_load_fail_count == i915.inject_load_failure) {
297 HSW_FEATURES, \ 63 DRM_INFO("Injecting failure at checkpoint %u [%s:%d]\n",
298 BDW_COLORS 64 i915.inject_load_failure, func, line);
65 return true;
66 }
299 67
300static const struct intel_device_info intel_broadwell_d_info = { 68 return false;
301 BDW_FEATURES, 69}
302 .gen = 8,
303};
304 70
305static const struct intel_device_info intel_broadwell_m_info = { 71#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
306 BDW_FEATURES, 72#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
307 .gen = 8, .is_mobile = 1, 73 "providing the dmesg log by booting with drm.debug=0xf"
308};
309 74
310static const struct intel_device_info intel_broadwell_gt3d_info = { 75void
311 BDW_FEATURES, 76__i915_printk(struct drm_i915_private *dev_priv, const char *level,
312 .gen = 8, 77 const char *fmt, ...)
313 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING, 78{
314}; 79 static bool shown_bug_once;
80 struct device *dev = dev_priv->drm.dev;
81 bool is_error = level[1] <= KERN_ERR[1];
82 bool is_debug = level[1] == KERN_DEBUG[1];
83 struct va_format vaf;
84 va_list args;
85
86 if (is_debug && !(drm_debug & DRM_UT_DRIVER))
87 return;
315 88
316static const struct intel_device_info intel_broadwell_gt3m_info = { 89 va_start(args, fmt);
317 BDW_FEATURES,
318 .gen = 8, .is_mobile = 1,
319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
320};
321 90
322static const struct intel_device_info intel_cherryview_info = { 91 vaf.fmt = fmt;
323 .gen = 8, .num_pipes = 3, 92 vaf.va = &args;
324 .need_gfx_hws = 1, .has_hotplug = 1,
325 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
326 .is_cherryview = 1,
327 .display_mmio_offset = VLV_DISPLAY_BASE,
328 GEN_CHV_PIPEOFFSETS,
329 CURSOR_OFFSETS,
330 CHV_COLORS,
331};
332 93
333static const struct intel_device_info intel_skylake_info = { 94 dev_printk(level, dev, "[" DRM_NAME ":%ps] %pV",
334 BDW_FEATURES, 95 __builtin_return_address(0), &vaf);
335 .is_skylake = 1,
336 .gen = 9,
337};
338 96
339static const struct intel_device_info intel_skylake_gt3_info = { 97 if (is_error && !shown_bug_once) {
340 BDW_FEATURES, 98 dev_notice(dev, "%s", FDO_BUG_MSG);
341 .is_skylake = 1, 99 shown_bug_once = true;
342 .gen = 9, 100 }
343 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
344};
345 101
346static const struct intel_device_info intel_broxton_info = { 102 va_end(args);
347 .is_preliminary = 1, 103}
348 .is_broxton = 1,
349 .gen = 9,
350 .need_gfx_hws = 1, .has_hotplug = 1,
351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
352 .num_pipes = 3,
353 .has_ddi = 1,
354 .has_fpga_dbg = 1,
355 .has_fbc = 1,
356 GEN_DEFAULT_PIPEOFFSETS,
357 IVB_CURSOR_OFFSETS,
358 BDW_COLORS,
359};
360 104
361static const struct intel_device_info intel_kabylake_info = { 105static bool i915_error_injected(struct drm_i915_private *dev_priv)
362 BDW_FEATURES, 106{
363 .is_kabylake = 1, 107 return i915.inject_load_failure &&
364 .gen = 9, 108 i915_load_fail_count == i915.inject_load_failure;
365}; 109}
366 110
367static const struct intel_device_info intel_kabylake_gt3_info = { 111#define i915_load_error(dev_priv, fmt, ...) \
368 BDW_FEATURES, 112 __i915_printk(dev_priv, \
369 .is_kabylake = 1, 113 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \
370 .gen = 9, 114 fmt, ##__VA_ARGS__)
371 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
372};
373 115
374/*
375 * Make sure any device matches here are from most specific to most
376 * general. For example, since the Quanta match is based on the subsystem
377 * and subvendor IDs, we need it to come before the more general IVB
378 * PCI ID matches, otherwise we'll use the wrong info struct above.
379 */
380static const struct pci_device_id pciidlist[] = {
381 INTEL_I830_IDS(&intel_i830_info),
382 INTEL_I845G_IDS(&intel_845g_info),
383 INTEL_I85X_IDS(&intel_i85x_info),
384 INTEL_I865G_IDS(&intel_i865g_info),
385 INTEL_I915G_IDS(&intel_i915g_info),
386 INTEL_I915GM_IDS(&intel_i915gm_info),
387 INTEL_I945G_IDS(&intel_i945g_info),
388 INTEL_I945GM_IDS(&intel_i945gm_info),
389 INTEL_I965G_IDS(&intel_i965g_info),
390 INTEL_G33_IDS(&intel_g33_info),
391 INTEL_I965GM_IDS(&intel_i965gm_info),
392 INTEL_GM45_IDS(&intel_gm45_info),
393 INTEL_G45_IDS(&intel_g45_info),
394 INTEL_PINEVIEW_IDS(&intel_pineview_info),
395 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
396 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
397 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
398 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
399 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
400 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
401 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
402 INTEL_HSW_D_IDS(&intel_haswell_d_info),
403 INTEL_HSW_M_IDS(&intel_haswell_m_info),
404 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
405 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
406 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
407 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
408 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
409 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
410 INTEL_CHV_IDS(&intel_cherryview_info),
411 INTEL_SKL_GT1_IDS(&intel_skylake_info),
412 INTEL_SKL_GT2_IDS(&intel_skylake_info),
413 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
414 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
415 INTEL_BXT_IDS(&intel_broxton_info),
416 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
417 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
418 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
419 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
420 {0, 0, 0}
421};
422
423MODULE_DEVICE_TABLE(pci, pciidlist);
424 116
425static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 117static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
426{ 118{
@@ -450,9 +142,9 @@ static enum intel_pch intel_virt_detect_pch(struct drm_device *dev)
450 return ret; 142 return ret;
451} 143}
452 144
453void intel_detect_pch(struct drm_device *dev) 145static void intel_detect_pch(struct drm_device *dev)
454{ 146{
455 struct drm_i915_private *dev_priv = dev->dev_private; 147 struct drm_i915_private *dev_priv = to_i915(dev);
456 struct pci_dev *pch = NULL; 148 struct pci_dev *pch = NULL;
457 149
458 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting 150 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
@@ -519,8 +211,10 @@ void intel_detect_pch(struct drm_device *dev)
519 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || 211 } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
520 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || 212 (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
521 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && 213 ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
522 pch->subsystem_vendor == 0x1af4 && 214 pch->subsystem_vendor ==
523 pch->subsystem_device == 0x1100)) { 215 PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
216 pch->subsystem_device ==
217 PCI_SUBDEVICE_ID_QEMU)) {
524 dev_priv->pch_type = intel_virt_detect_pch(dev); 218 dev_priv->pch_type = intel_virt_detect_pch(dev);
525 } else 219 } else
526 continue; 220 continue;
@@ -534,9 +228,9 @@ void intel_detect_pch(struct drm_device *dev)
534 pci_dev_put(pch); 228 pci_dev_put(pch);
535} 229}
536 230
537bool i915_semaphore_is_enabled(struct drm_device *dev) 231bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv)
538{ 232{
539 if (INTEL_INFO(dev)->gen < 6) 233 if (INTEL_GEN(dev_priv) < 6)
540 return false; 234 return false;
541 235
542 if (i915.semaphores >= 0) 236 if (i915.semaphores >= 0)
@@ -546,22 +240,1172 @@ bool i915_semaphore_is_enabled(struct drm_device *dev)
546 if (i915.enable_execlists) 240 if (i915.enable_execlists)
547 return false; 241 return false;
548 242
549 /* Until we get further testing... */
550 if (IS_GEN8(dev))
551 return false;
552
553#ifdef CONFIG_INTEL_IOMMU 243#ifdef CONFIG_INTEL_IOMMU
554 /* Enable semaphores on SNB when IO remapping is off */ 244 /* Enable semaphores on SNB when IO remapping is off */
555 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) 245 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped)
556 return false; 246 return false;
557#endif 247#endif
558 248
559 return true; 249 return true;
560} 250}
561 251
252static int i915_getparam(struct drm_device *dev, void *data,
253 struct drm_file *file_priv)
254{
255 struct drm_i915_private *dev_priv = to_i915(dev);
256 drm_i915_getparam_t *param = data;
257 int value;
258
259 switch (param->param) {
260 case I915_PARAM_IRQ_ACTIVE:
261 case I915_PARAM_ALLOW_BATCHBUFFER:
262 case I915_PARAM_LAST_DISPATCH:
263 /* Reject all old ums/dri params. */
264 return -ENODEV;
265 case I915_PARAM_CHIPSET_ID:
266 value = dev->pdev->device;
267 break;
268 case I915_PARAM_REVISION:
269 value = dev->pdev->revision;
270 break;
271 case I915_PARAM_HAS_GEM:
272 value = 1;
273 break;
274 case I915_PARAM_NUM_FENCES_AVAIL:
275 value = dev_priv->num_fence_regs;
276 break;
277 case I915_PARAM_HAS_OVERLAY:
278 value = dev_priv->overlay ? 1 : 0;
279 break;
280 case I915_PARAM_HAS_PAGEFLIPPING:
281 value = 1;
282 break;
283 case I915_PARAM_HAS_EXECBUF2:
284 /* depends on GEM */
285 value = 1;
286 break;
287 case I915_PARAM_HAS_BSD:
288 value = intel_engine_initialized(&dev_priv->engine[VCS]);
289 break;
290 case I915_PARAM_HAS_BLT:
291 value = intel_engine_initialized(&dev_priv->engine[BCS]);
292 break;
293 case I915_PARAM_HAS_VEBOX:
294 value = intel_engine_initialized(&dev_priv->engine[VECS]);
295 break;
296 case I915_PARAM_HAS_BSD2:
297 value = intel_engine_initialized(&dev_priv->engine[VCS2]);
298 break;
299 case I915_PARAM_HAS_RELAXED_FENCING:
300 value = 1;
301 break;
302 case I915_PARAM_HAS_COHERENT_RINGS:
303 value = 1;
304 break;
305 case I915_PARAM_HAS_EXEC_CONSTANTS:
306 value = INTEL_INFO(dev)->gen >= 4;
307 break;
308 case I915_PARAM_HAS_RELAXED_DELTA:
309 value = 1;
310 break;
311 case I915_PARAM_HAS_GEN7_SOL_RESET:
312 value = 1;
313 break;
314 case I915_PARAM_HAS_LLC:
315 value = HAS_LLC(dev);
316 break;
317 case I915_PARAM_HAS_WT:
318 value = HAS_WT(dev);
319 break;
320 case I915_PARAM_HAS_ALIASING_PPGTT:
321 value = USES_PPGTT(dev);
322 break;
323 case I915_PARAM_HAS_WAIT_TIMEOUT:
324 value = 1;
325 break;
326 case I915_PARAM_HAS_SEMAPHORES:
327 value = i915_semaphore_is_enabled(dev_priv);
328 break;
329 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
330 value = 1;
331 break;
332 case I915_PARAM_HAS_SECURE_BATCHES:
333 value = capable(CAP_SYS_ADMIN);
334 break;
335 case I915_PARAM_HAS_PINNED_BATCHES:
336 value = 1;
337 break;
338 case I915_PARAM_HAS_EXEC_NO_RELOC:
339 value = 1;
340 break;
341 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
342 value = 1;
343 break;
344 case I915_PARAM_CMD_PARSER_VERSION:
345 value = i915_cmd_parser_get_version(dev_priv);
346 break;
347 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
348 value = 1;
349 break;
350 case I915_PARAM_MMAP_VERSION:
351 value = 1;
352 break;
353 case I915_PARAM_SUBSLICE_TOTAL:
354 value = INTEL_INFO(dev)->subslice_total;
355 if (!value)
356 return -ENODEV;
357 break;
358 case I915_PARAM_EU_TOTAL:
359 value = INTEL_INFO(dev)->eu_total;
360 if (!value)
361 return -ENODEV;
362 break;
363 case I915_PARAM_HAS_GPU_RESET:
364 value = i915.enable_hangcheck && intel_has_gpu_reset(dev_priv);
365 break;
366 case I915_PARAM_HAS_RESOURCE_STREAMER:
367 value = HAS_RESOURCE_STREAMER(dev);
368 break;
369 case I915_PARAM_HAS_EXEC_SOFTPIN:
370 value = 1;
371 break;
372 case I915_PARAM_HAS_POOLED_EU:
373 value = HAS_POOLED_EU(dev);
374 break;
375 case I915_PARAM_MIN_EU_IN_POOL:
376 value = INTEL_INFO(dev)->min_eu_in_pool;
377 break;
378 default:
379 DRM_DEBUG("Unknown parameter %d\n", param->param);
380 return -EINVAL;
381 }
382
383 if (put_user(value, param->value))
384 return -EFAULT;
385
386 return 0;
387}
388
389static int i915_get_bridge_dev(struct drm_device *dev)
390{
391 struct drm_i915_private *dev_priv = to_i915(dev);
392
393 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
394 if (!dev_priv->bridge_dev) {
395 DRM_ERROR("bridge device not found\n");
396 return -1;
397 }
398 return 0;
399}
400
401/* Allocate space for the MCH regs if needed, return nonzero on error */
402static int
403intel_alloc_mchbar_resource(struct drm_device *dev)
404{
405 struct drm_i915_private *dev_priv = to_i915(dev);
406 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
407 u32 temp_lo, temp_hi = 0;
408 u64 mchbar_addr;
409 int ret;
410
411 if (INTEL_INFO(dev)->gen >= 4)
412 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
413 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
414 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
415
416 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
417#ifdef CONFIG_PNP
418 if (mchbar_addr &&
419 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
420 return 0;
421#endif
422
423 /* Get some space for it */
424 dev_priv->mch_res.name = "i915 MCHBAR";
425 dev_priv->mch_res.flags = IORESOURCE_MEM;
426 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
427 &dev_priv->mch_res,
428 MCHBAR_SIZE, MCHBAR_SIZE,
429 PCIBIOS_MIN_MEM,
430 0, pcibios_align_resource,
431 dev_priv->bridge_dev);
432 if (ret) {
433 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
434 dev_priv->mch_res.start = 0;
435 return ret;
436 }
437
438 if (INTEL_INFO(dev)->gen >= 4)
439 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
440 upper_32_bits(dev_priv->mch_res.start));
441
442 pci_write_config_dword(dev_priv->bridge_dev, reg,
443 lower_32_bits(dev_priv->mch_res.start));
444 return 0;
445}
446
447/* Setup MCHBAR if possible, return true if we should disable it again */
448static void
449intel_setup_mchbar(struct drm_device *dev)
450{
451 struct drm_i915_private *dev_priv = to_i915(dev);
452 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
453 u32 temp;
454 bool enabled;
455
456 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
457 return;
458
459 dev_priv->mchbar_need_disable = false;
460
461 if (IS_I915G(dev) || IS_I915GM(dev)) {
462 pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
463 enabled = !!(temp & DEVEN_MCHBAR_EN);
464 } else {
465 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
466 enabled = temp & 1;
467 }
468
469 /* If it's already enabled, don't have to do anything */
470 if (enabled)
471 return;
472
473 if (intel_alloc_mchbar_resource(dev))
474 return;
475
476 dev_priv->mchbar_need_disable = true;
477
478 /* Space is allocated or reserved, so enable it. */
479 if (IS_I915G(dev) || IS_I915GM(dev)) {
480 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
481 temp | DEVEN_MCHBAR_EN);
482 } else {
483 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
484 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
485 }
486}
487
488static void
489intel_teardown_mchbar(struct drm_device *dev)
490{
491 struct drm_i915_private *dev_priv = to_i915(dev);
492 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
493
494 if (dev_priv->mchbar_need_disable) {
495 if (IS_I915G(dev) || IS_I915GM(dev)) {
496 u32 deven_val;
497
498 pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
499 &deven_val);
500 deven_val &= ~DEVEN_MCHBAR_EN;
501 pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
502 deven_val);
503 } else {
504 u32 mchbar_val;
505
506 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
507 &mchbar_val);
508 mchbar_val &= ~1;
509 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
510 mchbar_val);
511 }
512 }
513
514 if (dev_priv->mch_res.start)
515 release_resource(&dev_priv->mch_res);
516}
517
518/* true = enable decode, false = disable decoder */
519static unsigned int i915_vga_set_decode(void *cookie, bool state)
520{
521 struct drm_device *dev = cookie;
522
523 intel_modeset_vga_set_state(dev, state);
524 if (state)
525 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
526 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
527 else
528 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
529}
530
531static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
532{
533 struct drm_device *dev = pci_get_drvdata(pdev);
534 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
535
536 if (state == VGA_SWITCHEROO_ON) {
537 pr_info("switched on\n");
538 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
539 /* i915 resume handler doesn't set to D0 */
540 pci_set_power_state(dev->pdev, PCI_D0);
541 i915_resume_switcheroo(dev);
542 dev->switch_power_state = DRM_SWITCH_POWER_ON;
543 } else {
544 pr_info("switched off\n");
545 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
546 i915_suspend_switcheroo(dev, pmm);
547 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
548 }
549}
550
551static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
552{
553 struct drm_device *dev = pci_get_drvdata(pdev);
554
555 /*
556 * FIXME: open_count is protected by drm_global_mutex but that would lead to
557 * locking inversion with the driver load path. And the access here is
558 * completely racy anyway. So don't bother with locking for now.
559 */
560 return dev->open_count == 0;
561}
562
563static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
564 .set_gpu_state = i915_switcheroo_set_state,
565 .reprobe = NULL,
566 .can_switch = i915_switcheroo_can_switch,
567};
568
569static void i915_gem_fini(struct drm_device *dev)
570{
571 struct drm_i915_private *dev_priv = to_i915(dev);
572
573 /*
574 * Neither the BIOS, ourselves or any other kernel
575 * expects the system to be in execlists mode on startup,
576 * so we need to reset the GPU back to legacy mode. And the only
577 * known way to disable logical contexts is through a GPU reset.
578 *
579 * So in order to leave the system in a known default configuration,
580 * always reset the GPU upon unload. Afterwards we then clean up the
581 * GEM state tracking, flushing off the requests and leaving the
582 * system in a known idle state.
583 *
584 * Note that is of the upmost importance that the GPU is idle and
585 * all stray writes are flushed *before* we dismantle the backing
586 * storage for the pinned objects.
587 *
588 * However, since we are uncertain that reseting the GPU on older
589 * machines is a good idea, we don't - just in case it leaves the
590 * machine in an unusable condition.
591 */
592 if (HAS_HW_CONTEXTS(dev)) {
593 int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
594 WARN_ON(reset && reset != -ENODEV);
595 }
596
597 mutex_lock(&dev->struct_mutex);
598 i915_gem_reset(dev);
599 i915_gem_cleanup_engines(dev);
600 i915_gem_context_fini(dev);
601 mutex_unlock(&dev->struct_mutex);
602
603 WARN_ON(!list_empty(&to_i915(dev)->context_list));
604}
605
606static int i915_load_modeset_init(struct drm_device *dev)
607{
608 struct drm_i915_private *dev_priv = to_i915(dev);
609 int ret;
610
611 if (i915_inject_load_failure())
612 return -ENODEV;
613
614 ret = intel_bios_init(dev_priv);
615 if (ret)
616 DRM_INFO("failed to find VBIOS tables\n");
617
618 /* If we have > 1 VGA cards, then we need to arbitrate access
619 * to the common VGA resources.
620 *
621 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
622 * then we do not take part in VGA arbitration and the
623 * vga_client_register() fails with -ENODEV.
624 */
625 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
626 if (ret && ret != -ENODEV)
627 goto out;
628
629 intel_register_dsm_handler();
630
631 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
632 if (ret)
633 goto cleanup_vga_client;
634
635 /* must happen before intel_power_domains_init_hw() on VLV/CHV */
636 intel_update_rawclk(dev_priv);
637
638 intel_power_domains_init_hw(dev_priv, false);
639
640 intel_csr_ucode_init(dev_priv);
641
642 ret = intel_irq_install(dev_priv);
643 if (ret)
644 goto cleanup_csr;
645
646 intel_setup_gmbus(dev);
647
648 /* Important: The output setup functions called by modeset_init need
649 * working irqs for e.g. gmbus and dp aux transfers. */
650 intel_modeset_init(dev);
651
652 intel_guc_init(dev);
653
654 ret = i915_gem_init(dev);
655 if (ret)
656 goto cleanup_irq;
657
658 intel_modeset_gem_init(dev);
659
660 if (INTEL_INFO(dev)->num_pipes == 0)
661 return 0;
662
663 ret = intel_fbdev_init(dev);
664 if (ret)
665 goto cleanup_gem;
666
667 /* Only enable hotplug handling once the fbdev is fully set up. */
668 intel_hpd_init(dev_priv);
669
670 drm_kms_helper_poll_init(dev);
671
672 return 0;
673
674cleanup_gem:
675 i915_gem_fini(dev);
676cleanup_irq:
677 intel_guc_fini(dev);
678 drm_irq_uninstall(dev);
679 intel_teardown_gmbus(dev);
680cleanup_csr:
681 intel_csr_ucode_fini(dev_priv);
682 intel_power_domains_fini(dev_priv);
683 vga_switcheroo_unregister_client(dev->pdev);
684cleanup_vga_client:
685 vga_client_register(dev->pdev, NULL, NULL, NULL);
686out:
687 return ret;
688}
689
690#if IS_ENABLED(CONFIG_FB)
691static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
692{
693 struct apertures_struct *ap;
694 struct pci_dev *pdev = dev_priv->drm.pdev;
695 struct i915_ggtt *ggtt = &dev_priv->ggtt;
696 bool primary;
697 int ret;
698
699 ap = alloc_apertures(1);
700 if (!ap)
701 return -ENOMEM;
702
703 ap->ranges[0].base = ggtt->mappable_base;
704 ap->ranges[0].size = ggtt->mappable_end;
705
706 primary =
707 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
708
709 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
710
711 kfree(ap);
712
713 return ret;
714}
715#else
716static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
717{
718 return 0;
719}
720#endif
721
722#if !defined(CONFIG_VGA_CONSOLE)
723static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
724{
725 return 0;
726}
727#elif !defined(CONFIG_DUMMY_CONSOLE)
728static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
729{
730 return -ENODEV;
731}
732#else
733static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
734{
735 int ret = 0;
736
737 DRM_INFO("Replacing VGA console driver\n");
738
739 console_lock();
740 if (con_is_bound(&vga_con))
741 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
742 if (ret == 0) {
743 ret = do_unregister_con_driver(&vga_con);
744
745 /* Ignore "already unregistered". */
746 if (ret == -ENODEV)
747 ret = 0;
748 }
749 console_unlock();
750
751 return ret;
752}
753#endif
754
755static void intel_init_dpio(struct drm_i915_private *dev_priv)
756{
757 /*
758 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
759 * CHV x1 PHY (DP/HDMI D)
760 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
761 */
762 if (IS_CHERRYVIEW(dev_priv)) {
763 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
764 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
765 } else if (IS_VALLEYVIEW(dev_priv)) {
766 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
767 }
768}
769
770static int i915_workqueues_init(struct drm_i915_private *dev_priv)
771{
772 /*
773 * The i915 workqueue is primarily used for batched retirement of
774 * requests (and thus managing bo) once the task has been completed
775 * by the GPU. i915_gem_retire_requests() is called directly when we
776 * need high-priority retirement, such as waiting for an explicit
777 * bo.
778 *
779 * It is also used for periodic low-priority events, such as
780 * idle-timers and recording error state.
781 *
782 * All tasks on the workqueue are expected to acquire the dev mutex
783 * so there is no point in running more than one instance of the
784 * workqueue at any time. Use an ordered one.
785 */
786 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
787 if (dev_priv->wq == NULL)
788 goto out_err;
789
790 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
791 if (dev_priv->hotplug.dp_wq == NULL)
792 goto out_free_wq;
793
794 return 0;
795
796out_free_wq:
797 destroy_workqueue(dev_priv->wq);
798out_err:
799 DRM_ERROR("Failed to allocate workqueues.\n");
800
801 return -ENOMEM;
802}
803
804static void i915_workqueues_cleanup(struct drm_i915_private *dev_priv)
805{
806 destroy_workqueue(dev_priv->hotplug.dp_wq);
807 destroy_workqueue(dev_priv->wq);
808}
809
810/**
811 * i915_driver_init_early - setup state not requiring device access
812 * @dev_priv: device private
813 *
814 * Initialize everything that is a "SW-only" state, that is state not
815 * requiring accessing the device or exposing the driver via kernel internal
816 * or userspace interfaces. Example steps belonging here: lock initialization,
817 * system memory allocation, setting up device specific attributes and
818 * function hooks not requiring accessing the device.
819 */
820static int i915_driver_init_early(struct drm_i915_private *dev_priv,
821 const struct pci_device_id *ent)
822{
823 const struct intel_device_info *match_info =
824 (struct intel_device_info *)ent->driver_data;
825 struct intel_device_info *device_info;
826 int ret = 0;
827
828 if (i915_inject_load_failure())
829 return -ENODEV;
830
831 /* Setup the write-once "constant" device info */
832 device_info = mkwrite_device_info(dev_priv);
833 memcpy(device_info, match_info, sizeof(*device_info));
834 device_info->device_id = dev_priv->drm.pdev->device;
835
836 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE);
837 device_info->gen_mask = BIT(device_info->gen - 1);
838
839 spin_lock_init(&dev_priv->irq_lock);
840 spin_lock_init(&dev_priv->gpu_error.lock);
841 mutex_init(&dev_priv->backlight_lock);
842 spin_lock_init(&dev_priv->uncore.lock);
843 spin_lock_init(&dev_priv->mm.object_stat_lock);
844 spin_lock_init(&dev_priv->mmio_flip_lock);
845 mutex_init(&dev_priv->sb_lock);
846 mutex_init(&dev_priv->modeset_restore_lock);
847 mutex_init(&dev_priv->av_mutex);
848 mutex_init(&dev_priv->wm.wm_mutex);
849 mutex_init(&dev_priv->pps_mutex);
850
851 ret = i915_workqueues_init(dev_priv);
852 if (ret < 0)
853 return ret;
854
855 ret = intel_gvt_init(dev_priv);
856 if (ret < 0)
857 goto err_workqueues;
858
859 /* This must be called before any calls to HAS_PCH_* */
860 intel_detect_pch(&dev_priv->drm);
861
862 intel_pm_setup(&dev_priv->drm);
863 intel_init_dpio(dev_priv);
864 intel_power_domains_init(dev_priv);
865 intel_irq_init(dev_priv);
866 intel_init_display_hooks(dev_priv);
867 intel_init_clock_gating_hooks(dev_priv);
868 intel_init_audio_hooks(dev_priv);
869 i915_gem_load_init(&dev_priv->drm);
870
871 intel_display_crc_init(&dev_priv->drm);
872
873 intel_device_info_dump(dev_priv);
874
875 /* Not all pre-production machines fall into this category, only the
876 * very first ones. Almost everything should work, except for maybe
877 * suspend/resume. And we don't implement workarounds that affect only
878 * pre-production machines. */
879 if (IS_HSW_EARLY_SDV(dev_priv))
880 DRM_INFO("This is an early pre-production Haswell machine. "
881 "It may not be fully functional.\n");
882
883 return 0;
884
885err_workqueues:
886 i915_workqueues_cleanup(dev_priv);
887 return ret;
888}
889
890/**
891 * i915_driver_cleanup_early - cleanup the setup done in i915_driver_init_early()
892 * @dev_priv: device private
893 */
894static void i915_driver_cleanup_early(struct drm_i915_private *dev_priv)
895{
896 i915_gem_load_cleanup(&dev_priv->drm);
897 i915_workqueues_cleanup(dev_priv);
898}
899
900static int i915_mmio_setup(struct drm_device *dev)
901{
902 struct drm_i915_private *dev_priv = to_i915(dev);
903 int mmio_bar;
904 int mmio_size;
905
906 mmio_bar = IS_GEN2(dev) ? 1 : 0;
907 /*
908 * Before gen4, the registers and the GTT are behind different BARs.
909 * However, from gen4 onwards, the registers and the GTT are shared
910 * in the same BAR, so we want to restrict this ioremap from
911 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
912 * the register BAR remains the same size for all the earlier
913 * generations up to Ironlake.
914 */
915 if (INTEL_INFO(dev)->gen < 5)
916 mmio_size = 512 * 1024;
917 else
918 mmio_size = 2 * 1024 * 1024;
919 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
920 if (dev_priv->regs == NULL) {
921 DRM_ERROR("failed to map registers\n");
922
923 return -EIO;
924 }
925
926 /* Try to make sure MCHBAR is enabled before poking at it */
927 intel_setup_mchbar(dev);
928
929 return 0;
930}
931
932static void i915_mmio_cleanup(struct drm_device *dev)
933{
934 struct drm_i915_private *dev_priv = to_i915(dev);
935
936 intel_teardown_mchbar(dev);
937 pci_iounmap(dev->pdev, dev_priv->regs);
938}
939
940/**
941 * i915_driver_init_mmio - setup device MMIO
942 * @dev_priv: device private
943 *
944 * Setup minimal device state necessary for MMIO accesses later in the
945 * initialization sequence. The setup here should avoid any other device-wide
946 * side effects or exposing the driver via kernel internal or user space
947 * interfaces.
948 */
949static int i915_driver_init_mmio(struct drm_i915_private *dev_priv)
950{
951 struct drm_device *dev = &dev_priv->drm;
952 int ret;
953
954 if (i915_inject_load_failure())
955 return -ENODEV;
956
957 if (i915_get_bridge_dev(dev))
958 return -EIO;
959
960 ret = i915_mmio_setup(dev);
961 if (ret < 0)
962 goto put_bridge;
963
964 intel_uncore_init(dev_priv);
965
966 return 0;
967
968put_bridge:
969 pci_dev_put(dev_priv->bridge_dev);
970
971 return ret;
972}
973
974/**
975 * i915_driver_cleanup_mmio - cleanup the setup done in i915_driver_init_mmio()
976 * @dev_priv: device private
977 */
978static void i915_driver_cleanup_mmio(struct drm_i915_private *dev_priv)
979{
980 struct drm_device *dev = &dev_priv->drm;
981
982 intel_uncore_fini(dev_priv);
983 i915_mmio_cleanup(dev);
984 pci_dev_put(dev_priv->bridge_dev);
985}
986
987static void intel_sanitize_options(struct drm_i915_private *dev_priv)
988{
989 i915.enable_execlists =
990 intel_sanitize_enable_execlists(dev_priv,
991 i915.enable_execlists);
992
993 /*
994 * i915.enable_ppgtt is read-only, so do an early pass to validate the
995 * user's requested state against the hardware/driver capabilities. We
996 * do this now so that we can print out any log messages once rather
997 * than every time we check intel_enable_ppgtt().
998 */
999 i915.enable_ppgtt =
1000 intel_sanitize_enable_ppgtt(dev_priv, i915.enable_ppgtt);
1001 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
1002}
1003
1004/**
1005 * i915_driver_init_hw - setup state requiring device access
1006 * @dev_priv: device private
1007 *
1008 * Setup state that requires accessing the device, but doesn't require
1009 * exposing the driver via kernel internal or userspace interfaces.
1010 */
1011static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
1012{
1013 struct drm_device *dev = &dev_priv->drm;
1014 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1015 uint32_t aperture_size;
1016 int ret;
1017
1018 if (i915_inject_load_failure())
1019 return -ENODEV;
1020
1021 intel_device_info_runtime_init(dev_priv);
1022
1023 intel_sanitize_options(dev_priv);
1024
1025 ret = i915_ggtt_init_hw(dev);
1026 if (ret)
1027 return ret;
1028
1029 ret = i915_ggtt_enable_hw(dev);
1030 if (ret) {
1031 DRM_ERROR("failed to enable GGTT\n");
1032 goto out_ggtt;
1033 }
1034
1035 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1036 * otherwise the vga fbdev driver falls over. */
1037 ret = i915_kick_out_firmware_fb(dev_priv);
1038 if (ret) {
1039 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1040 goto out_ggtt;
1041 }
1042
1043 ret = i915_kick_out_vgacon(dev_priv);
1044 if (ret) {
1045 DRM_ERROR("failed to remove conflicting VGA console\n");
1046 goto out_ggtt;
1047 }
1048
1049 pci_set_master(dev->pdev);
1050
1051 /* overlay on gen2 is broken and can't address above 1G */
1052 if (IS_GEN2(dev)) {
1053 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1054 if (ret) {
1055 DRM_ERROR("failed to set DMA mask\n");
1056
1057 goto out_ggtt;
1058 }
1059 }
1060
1061
1062 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1063 * using 32bit addressing, overwriting memory if HWS is located
1064 * above 4GB.
1065 *
1066 * The documentation also mentions an issue with undefined
1067 * behaviour if any general state is accessed within a page above 4GB,
1068 * which also needs to be handled carefully.
1069 */
1070 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) {
1071 ret = dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1072
1073 if (ret) {
1074 DRM_ERROR("failed to set DMA mask\n");
1075
1076 goto out_ggtt;
1077 }
1078 }
1079
1080 aperture_size = ggtt->mappable_end;
1081
1082 ggtt->mappable =
1083 io_mapping_create_wc(ggtt->mappable_base,
1084 aperture_size);
1085 if (!ggtt->mappable) {
1086 ret = -EIO;
1087 goto out_ggtt;
1088 }
1089
1090 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
1091 aperture_size);
1092
1093 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
1094 PM_QOS_DEFAULT_VALUE);
1095
1096 intel_uncore_sanitize(dev_priv);
1097
1098 intel_opregion_setup(dev_priv);
1099
1100 i915_gem_load_init_fences(dev_priv);
1101
1102 /* On the 945G/GM, the chipset reports the MSI capability on the
1103 * integrated graphics even though the support isn't actually there
1104 * according to the published specs. It doesn't appear to function
1105 * correctly in testing on 945G.
1106 * This may be a side effect of MSI having been made available for PEG
1107 * and the registers being closely associated.
1108 *
1109 * According to chipset errata, on the 965GM, MSI interrupts may
1110 * be lost or delayed, but we use them anyways to avoid
1111 * stuck interrupts on some machines.
1112 */
1113 if (!IS_I945G(dev) && !IS_I945GM(dev)) {
1114 if (pci_enable_msi(dev->pdev) < 0)
1115 DRM_DEBUG_DRIVER("can't enable MSI");
1116 }
1117
1118 return 0;
1119
1120out_ggtt:
1121 i915_ggtt_cleanup_hw(dev);
1122
1123 return ret;
1124}
1125
1126/**
1127 * i915_driver_cleanup_hw - cleanup the setup done in i915_driver_init_hw()
1128 * @dev_priv: device private
1129 */
1130static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
1131{
1132 struct drm_device *dev = &dev_priv->drm;
1133 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1134
1135 if (dev->pdev->msi_enabled)
1136 pci_disable_msi(dev->pdev);
1137
1138 pm_qos_remove_request(&dev_priv->pm_qos);
1139 arch_phys_wc_del(ggtt->mtrr);
1140 io_mapping_free(ggtt->mappable);
1141 i915_ggtt_cleanup_hw(dev);
1142}
1143
1144/**
1145 * i915_driver_register - register the driver with the rest of the system
1146 * @dev_priv: device private
1147 *
1148 * Perform any steps necessary to make the driver available via kernel
1149 * internal or userspace interfaces.
1150 */
1151static void i915_driver_register(struct drm_i915_private *dev_priv)
1152{
1153 struct drm_device *dev = &dev_priv->drm;
1154
1155 i915_gem_shrinker_init(dev_priv);
1156
1157 /*
1158 * Notify a valid surface after modesetting,
1159 * when running inside a VM.
1160 */
1161 if (intel_vgpu_active(dev_priv))
1162 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1163
1164 /* Reveal our presence to userspace */
1165 if (drm_dev_register(dev, 0) == 0) {
1166 i915_debugfs_register(dev_priv);
1167 i915_setup_sysfs(dev);
1168 } else
1169 DRM_ERROR("Failed to register driver for userspace access!\n");
1170
1171 if (INTEL_INFO(dev_priv)->num_pipes) {
1172 /* Must be done after probing outputs */
1173 intel_opregion_register(dev_priv);
1174 acpi_video_register();
1175 }
1176
1177 if (IS_GEN5(dev_priv))
1178 intel_gpu_ips_init(dev_priv);
1179
1180 i915_audio_component_init(dev_priv);
1181
1182 /*
1183 * Some ports require correctly set-up hpd registers for detection to
1184 * work properly (leading to ghost connected connector status), e.g. VGA
1185 * on gm45. Hence we can only set up the initial fbdev config after hpd
1186 * irqs are fully enabled. We do it last so that the async config
1187 * cannot run before the connectors are registered.
1188 */
1189 intel_fbdev_initial_config_async(dev);
1190}
1191
1192/**
1193 * i915_driver_unregister - cleanup the registration done in i915_driver_regiser()
1194 * @dev_priv: device private
1195 */
1196static void i915_driver_unregister(struct drm_i915_private *dev_priv)
1197{
1198 i915_audio_component_cleanup(dev_priv);
1199
1200 intel_gpu_ips_teardown();
1201 acpi_video_unregister();
1202 intel_opregion_unregister(dev_priv);
1203
1204 i915_teardown_sysfs(&dev_priv->drm);
1205 i915_debugfs_unregister(dev_priv);
1206 drm_dev_unregister(&dev_priv->drm);
1207
1208 i915_gem_shrinker_cleanup(dev_priv);
1209}
1210
1211/**
1212 * i915_driver_load - setup chip and create an initial config
1213 * @dev: DRM device
1214 * @flags: startup flags
1215 *
1216 * The driver load routine has to do several things:
1217 * - drive output discovery via intel_modeset_init()
1218 * - initialize the memory manager
1219 * - allocate initial config memory
1220 * - setup the DRM framebuffer with the allocated memory
1221 */
1222int i915_driver_load(struct pci_dev *pdev, const struct pci_device_id *ent)
1223{
1224 struct drm_i915_private *dev_priv;
1225 int ret;
1226
1227 if (i915.nuclear_pageflip)
1228 driver.driver_features |= DRIVER_ATOMIC;
1229
1230 ret = -ENOMEM;
1231 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
1232 if (dev_priv)
1233 ret = drm_dev_init(&dev_priv->drm, &driver, &pdev->dev);
1234 if (ret) {
1235 dev_printk(KERN_ERR, &pdev->dev,
1236 "[" DRM_NAME ":%s] allocation failed\n", __func__);
1237 kfree(dev_priv);
1238 return ret;
1239 }
1240
1241 dev_priv->drm.pdev = pdev;
1242 dev_priv->drm.dev_private = dev_priv;
1243
1244 ret = pci_enable_device(pdev);
1245 if (ret)
1246 goto out_free_priv;
1247
1248 pci_set_drvdata(pdev, &dev_priv->drm);
1249
1250 ret = i915_driver_init_early(dev_priv, ent);
1251 if (ret < 0)
1252 goto out_pci_disable;
1253
1254 intel_runtime_pm_get(dev_priv);
1255
1256 ret = i915_driver_init_mmio(dev_priv);
1257 if (ret < 0)
1258 goto out_runtime_pm_put;
1259
1260 ret = i915_driver_init_hw(dev_priv);
1261 if (ret < 0)
1262 goto out_cleanup_mmio;
1263
1264 /*
1265 * TODO: move the vblank init and parts of modeset init steps into one
1266 * of the i915_driver_init_/i915_driver_register functions according
1267 * to the role/effect of the given init step.
1268 */
1269 if (INTEL_INFO(dev_priv)->num_pipes) {
1270 ret = drm_vblank_init(&dev_priv->drm,
1271 INTEL_INFO(dev_priv)->num_pipes);
1272 if (ret)
1273 goto out_cleanup_hw;
1274 }
1275
1276 ret = i915_load_modeset_init(&dev_priv->drm);
1277 if (ret < 0)
1278 goto out_cleanup_vblank;
1279
1280 i915_driver_register(dev_priv);
1281
1282 intel_runtime_pm_enable(dev_priv);
1283
1284 intel_runtime_pm_put(dev_priv);
1285
1286 return 0;
1287
1288out_cleanup_vblank:
1289 drm_vblank_cleanup(&dev_priv->drm);
1290out_cleanup_hw:
1291 i915_driver_cleanup_hw(dev_priv);
1292out_cleanup_mmio:
1293 i915_driver_cleanup_mmio(dev_priv);
1294out_runtime_pm_put:
1295 intel_runtime_pm_put(dev_priv);
1296 i915_driver_cleanup_early(dev_priv);
1297out_pci_disable:
1298 pci_disable_device(pdev);
1299out_free_priv:
1300 i915_load_error(dev_priv, "Device initialization failed (%d)\n", ret);
1301 drm_dev_unref(&dev_priv->drm);
1302 return ret;
1303}
1304
1305void i915_driver_unload(struct drm_device *dev)
1306{
1307 struct drm_i915_private *dev_priv = to_i915(dev);
1308
1309 intel_fbdev_fini(dev);
1310
1311 if (i915_gem_suspend(dev))
1312 DRM_ERROR("failed to idle hardware; continuing to unload!\n");
1313
1314 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
1315
1316 i915_driver_unregister(dev_priv);
1317
1318 drm_vblank_cleanup(dev);
1319
1320 intel_modeset_cleanup(dev);
1321
1322 /*
1323 * free the memory space allocated for the child device
1324 * config parsed from VBT
1325 */
1326 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1327 kfree(dev_priv->vbt.child_dev);
1328 dev_priv->vbt.child_dev = NULL;
1329 dev_priv->vbt.child_dev_num = 0;
1330 }
1331 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1332 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1333 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1334 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
1335
1336 vga_switcheroo_unregister_client(dev->pdev);
1337 vga_client_register(dev->pdev, NULL, NULL, NULL);
1338
1339 intel_csr_ucode_fini(dev_priv);
1340
1341 /* Free error state after interrupts are fully disabled. */
1342 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1343 i915_destroy_error_state(dev);
1344
1345 /* Flush any outstanding unpin_work. */
1346 flush_workqueue(dev_priv->wq);
1347
1348 intel_guc_fini(dev);
1349 i915_gem_fini(dev);
1350 intel_fbc_cleanup_cfb(dev_priv);
1351
1352 intel_power_domains_fini(dev_priv);
1353
1354 i915_driver_cleanup_hw(dev_priv);
1355 i915_driver_cleanup_mmio(dev_priv);
1356
1357 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
1358
1359 i915_driver_cleanup_early(dev_priv);
1360}
1361
1362static int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1363{
1364 int ret;
1365
1366 ret = i915_gem_open(dev, file);
1367 if (ret)
1368 return ret;
1369
1370 return 0;
1371}
1372
1373/**
1374 * i915_driver_lastclose - clean up after all DRM clients have exited
1375 * @dev: DRM device
1376 *
1377 * Take care of cleaning up after all DRM clients have exited. In the
1378 * mode setting case, we want to restore the kernel's initial mode (just
1379 * in case the last client left us in a bad state).
1380 *
1381 * Additionally, in the non-mode setting case, we'll tear down the GTT
1382 * and DMA structures, since the kernel won't be using them, and clea
1383 * up any GEM state.
1384 */
1385static void i915_driver_lastclose(struct drm_device *dev)
1386{
1387 intel_fbdev_restore_mode(dev);
1388 vga_switcheroo_process_delayed_switch();
1389}
1390
1391static void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1392{
1393 mutex_lock(&dev->struct_mutex);
1394 i915_gem_context_close(dev, file);
1395 i915_gem_release(dev, file);
1396 mutex_unlock(&dev->struct_mutex);
1397}
1398
1399static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1400{
1401 struct drm_i915_file_private *file_priv = file->driver_priv;
1402
1403 kfree(file_priv);
1404}
1405
562static void intel_suspend_encoders(struct drm_i915_private *dev_priv) 1406static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
563{ 1407{
564 struct drm_device *dev = dev_priv->dev; 1408 struct drm_device *dev = &dev_priv->drm;
565 struct intel_encoder *encoder; 1409 struct intel_encoder *encoder;
566 1410
567 drm_modeset_lock_all(dev); 1411 drm_modeset_lock_all(dev);
@@ -586,7 +1430,7 @@ static bool suspend_to_idle(struct drm_i915_private *dev_priv)
586 1430
587static int i915_drm_suspend(struct drm_device *dev) 1431static int i915_drm_suspend(struct drm_device *dev)
588{ 1432{
589 struct drm_i915_private *dev_priv = dev->dev_private; 1433 struct drm_i915_private *dev_priv = to_i915(dev);
590 pci_power_t opregion_target_state; 1434 pci_power_t opregion_target_state;
591 int error; 1435 int error;
592 1436
@@ -614,7 +1458,7 @@ static int i915_drm_suspend(struct drm_device *dev)
614 1458
615 intel_guc_suspend(dev); 1459 intel_guc_suspend(dev);
616 1460
617 intel_suspend_gt_powersave(dev); 1461 intel_suspend_gt_powersave(dev_priv);
618 1462
619 intel_display_suspend(dev); 1463 intel_display_suspend(dev);
620 1464
@@ -632,10 +1476,10 @@ static int i915_drm_suspend(struct drm_device *dev)
632 i915_save_state(dev); 1476 i915_save_state(dev);
633 1477
634 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold; 1478 opregion_target_state = suspend_to_idle(dev_priv) ? PCI_D1 : PCI_D3cold;
635 intel_opregion_notify_adapter(dev, opregion_target_state); 1479 intel_opregion_notify_adapter(dev_priv, opregion_target_state);
636 1480
637 intel_uncore_forcewake_reset(dev, false); 1481 intel_uncore_forcewake_reset(dev_priv, false);
638 intel_opregion_fini(dev); 1482 intel_opregion_unregister(dev_priv);
639 1483
640 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true); 1484 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED, true);
641 1485
@@ -653,7 +1497,7 @@ out:
653 1497
654static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation) 1498static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
655{ 1499{
656 struct drm_i915_private *dev_priv = drm_dev->dev_private; 1500 struct drm_i915_private *dev_priv = to_i915(drm_dev);
657 bool fw_csr; 1501 bool fw_csr;
658 int ret; 1502 int ret;
659 1503
@@ -715,7 +1559,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
715{ 1559{
716 int error; 1560 int error;
717 1561
718 if (!dev || !dev->dev_private) { 1562 if (!dev) {
719 DRM_ERROR("dev: %p\n", dev); 1563 DRM_ERROR("dev: %p\n", dev);
720 DRM_ERROR("DRM not initialized, aborting suspend.\n"); 1564 DRM_ERROR("DRM not initialized, aborting suspend.\n");
721 return -ENODEV; 1565 return -ENODEV;
@@ -737,7 +1581,7 @@ int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state)
737 1581
738static int i915_drm_resume(struct drm_device *dev) 1582static int i915_drm_resume(struct drm_device *dev)
739{ 1583{
740 struct drm_i915_private *dev_priv = dev->dev_private; 1584 struct drm_i915_private *dev_priv = to_i915(dev);
741 int ret; 1585 int ret;
742 1586
743 disable_rpm_wakeref_asserts(dev_priv); 1587 disable_rpm_wakeref_asserts(dev_priv);
@@ -753,7 +1597,7 @@ static int i915_drm_resume(struct drm_device *dev)
753 mutex_unlock(&dev->struct_mutex); 1597 mutex_unlock(&dev->struct_mutex);
754 1598
755 i915_restore_state(dev); 1599 i915_restore_state(dev);
756 intel_opregion_setup(dev); 1600 intel_opregion_setup(dev_priv);
757 1601
758 intel_init_pch_refclk(dev); 1602 intel_init_pch_refclk(dev);
759 drm_mode_config_reset(dev); 1603 drm_mode_config_reset(dev);
@@ -771,7 +1615,7 @@ static int i915_drm_resume(struct drm_device *dev)
771 mutex_lock(&dev->struct_mutex); 1615 mutex_lock(&dev->struct_mutex);
772 if (i915_gem_init_hw(dev)) { 1616 if (i915_gem_init_hw(dev)) {
773 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n"); 1617 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
774 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 1618 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
775 } 1619 }
776 mutex_unlock(&dev->struct_mutex); 1620 mutex_unlock(&dev->struct_mutex);
777 1621
@@ -781,7 +1625,7 @@ static int i915_drm_resume(struct drm_device *dev)
781 1625
782 spin_lock_irq(&dev_priv->irq_lock); 1626 spin_lock_irq(&dev_priv->irq_lock);
783 if (dev_priv->display.hpd_irq_setup) 1627 if (dev_priv->display.hpd_irq_setup)
784 dev_priv->display.hpd_irq_setup(dev); 1628 dev_priv->display.hpd_irq_setup(dev_priv);
785 spin_unlock_irq(&dev_priv->irq_lock); 1629 spin_unlock_irq(&dev_priv->irq_lock);
786 1630
787 intel_dp_mst_resume(dev); 1631 intel_dp_mst_resume(dev);
@@ -798,7 +1642,7 @@ static int i915_drm_resume(struct drm_device *dev)
798 /* Config may have changed between suspend and resume */ 1642 /* Config may have changed between suspend and resume */
799 drm_helper_hpd_irq_event(dev); 1643 drm_helper_hpd_irq_event(dev);
800 1644
801 intel_opregion_init(dev); 1645 intel_opregion_register(dev_priv);
802 1646
803 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false); 1647 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING, false);
804 1648
@@ -806,7 +1650,7 @@ static int i915_drm_resume(struct drm_device *dev)
806 dev_priv->modeset_restore = MODESET_DONE; 1650 dev_priv->modeset_restore = MODESET_DONE;
807 mutex_unlock(&dev_priv->modeset_restore_lock); 1651 mutex_unlock(&dev_priv->modeset_restore_lock);
808 1652
809 intel_opregion_notify_adapter(dev, PCI_D0); 1653 intel_opregion_notify_adapter(dev_priv, PCI_D0);
810 1654
811 drm_kms_helper_poll_enable(dev); 1655 drm_kms_helper_poll_enable(dev);
812 1656
@@ -817,7 +1661,7 @@ static int i915_drm_resume(struct drm_device *dev)
817 1661
818static int i915_drm_resume_early(struct drm_device *dev) 1662static int i915_drm_resume_early(struct drm_device *dev)
819{ 1663{
820 struct drm_i915_private *dev_priv = dev->dev_private; 1664 struct drm_i915_private *dev_priv = to_i915(dev);
821 int ret; 1665 int ret;
822 1666
823 /* 1667 /*
@@ -874,9 +1718,9 @@ static int i915_drm_resume_early(struct drm_device *dev)
874 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n", 1718 DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
875 ret); 1719 ret);
876 1720
877 intel_uncore_early_sanitize(dev, true); 1721 intel_uncore_early_sanitize(dev_priv, true);
878 1722
879 if (IS_BROXTON(dev)) { 1723 if (IS_BROXTON(dev_priv)) {
880 if (!dev_priv->suspended_to_idle) 1724 if (!dev_priv->suspended_to_idle)
881 gen9_sanitize_dc_state(dev_priv); 1725 gen9_sanitize_dc_state(dev_priv);
882 bxt_disable_dc9(dev_priv); 1726 bxt_disable_dc9(dev_priv);
@@ -884,7 +1728,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
884 hsw_disable_pc8(dev_priv); 1728 hsw_disable_pc8(dev_priv);
885 } 1729 }
886 1730
887 intel_uncore_sanitize(dev); 1731 intel_uncore_sanitize(dev_priv);
888 1732
889 if (IS_BROXTON(dev_priv) || 1733 if (IS_BROXTON(dev_priv) ||
890 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload)) 1734 !(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
@@ -927,14 +1771,14 @@ int i915_resume_switcheroo(struct drm_device *dev)
927 * - re-init interrupt state 1771 * - re-init interrupt state
928 * - re-init display 1772 * - re-init display
929 */ 1773 */
930int i915_reset(struct drm_device *dev) 1774int i915_reset(struct drm_i915_private *dev_priv)
931{ 1775{
932 struct drm_i915_private *dev_priv = dev->dev_private; 1776 struct drm_device *dev = &dev_priv->drm;
933 struct i915_gpu_error *error = &dev_priv->gpu_error; 1777 struct i915_gpu_error *error = &dev_priv->gpu_error;
934 unsigned reset_counter; 1778 unsigned reset_counter;
935 int ret; 1779 int ret;
936 1780
937 intel_reset_gt_powersave(dev); 1781 intel_reset_gt_powersave(dev_priv);
938 1782
939 mutex_lock(&dev->struct_mutex); 1783 mutex_lock(&dev->struct_mutex);
940 1784
@@ -948,24 +1792,11 @@ int i915_reset(struct drm_device *dev)
948 goto error; 1792 goto error;
949 } 1793 }
950 1794
951 i915_gem_reset(dev); 1795 pr_notice("drm/i915: Resetting chip after gpu hang\n");
952
953 ret = intel_gpu_reset(dev, ALL_ENGINES);
954
955 /* Also reset the gpu hangman. */
956 if (error->stop_rings != 0) {
957 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
958 error->stop_rings = 0;
959 if (ret == -ENODEV) {
960 DRM_INFO("Reset not implemented, but ignoring "
961 "error for simulated gpu hangs\n");
962 ret = 0;
963 }
964 }
965 1796
966 if (i915_stop_ring_allow_warn(dev_priv)) 1797 i915_gem_reset(dev);
967 pr_notice("drm/i915: Resetting chip after gpu hang\n");
968 1798
1799 ret = intel_gpu_reset(dev_priv, ALL_ENGINES);
969 if (ret) { 1800 if (ret) {
970 if (ret != -ENODEV) 1801 if (ret != -ENODEV)
971 DRM_ERROR("Failed to reset chip: %i\n", ret); 1802 DRM_ERROR("Failed to reset chip: %i\n", ret);
@@ -1005,7 +1836,7 @@ int i915_reset(struct drm_device *dev)
1005 * of re-init after reset. 1836 * of re-init after reset.
1006 */ 1837 */
1007 if (INTEL_INFO(dev)->gen > 5) 1838 if (INTEL_INFO(dev)->gen > 5)
1008 intel_enable_gt_powersave(dev); 1839 intel_enable_gt_powersave(dev_priv);
1009 1840
1010 return 0; 1841 return 0;
1011 1842
@@ -1015,51 +1846,12 @@ error:
1015 return ret; 1846 return ret;
1016} 1847}
1017 1848
1018static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1019{
1020 struct intel_device_info *intel_info =
1021 (struct intel_device_info *) ent->driver_data;
1022
1023 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
1024 DRM_INFO("This hardware requires preliminary hardware support.\n"
1025 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
1026 return -ENODEV;
1027 }
1028
1029 /* Only bind to function 0 of the device. Early generations
1030 * used function 1 as a placeholder for multi-head. This causes
1031 * us confusion instead, especially on the systems where both
1032 * functions have the same PCI-ID!
1033 */
1034 if (PCI_FUNC(pdev->devfn))
1035 return -ENODEV;
1036
1037 /*
1038 * apple-gmux is needed on dual GPU MacBook Pro
1039 * to probe the panel if we're the inactive GPU.
1040 */
1041 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
1042 apple_gmux_present() && pdev != vga_default_device() &&
1043 !vga_switcheroo_handler_flags())
1044 return -EPROBE_DEFER;
1045
1046 return drm_get_pci_dev(pdev, ent, &driver);
1047}
1048
1049static void
1050i915_pci_remove(struct pci_dev *pdev)
1051{
1052 struct drm_device *dev = pci_get_drvdata(pdev);
1053
1054 drm_put_dev(dev);
1055}
1056
1057static int i915_pm_suspend(struct device *dev) 1849static int i915_pm_suspend(struct device *dev)
1058{ 1850{
1059 struct pci_dev *pdev = to_pci_dev(dev); 1851 struct pci_dev *pdev = to_pci_dev(dev);
1060 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1852 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1061 1853
1062 if (!drm_dev || !drm_dev->dev_private) { 1854 if (!drm_dev) {
1063 dev_err(dev, "DRM not initialized, aborting suspend.\n"); 1855 dev_err(dev, "DRM not initialized, aborting suspend.\n");
1064 return -ENODEV; 1856 return -ENODEV;
1065 } 1857 }
@@ -1072,7 +1864,7 @@ static int i915_pm_suspend(struct device *dev)
1072 1864
1073static int i915_pm_suspend_late(struct device *dev) 1865static int i915_pm_suspend_late(struct device *dev)
1074{ 1866{
1075 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1867 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1076 1868
1077 /* 1869 /*
1078 * We have a suspend ordering issue with the snd-hda driver also 1870 * We have a suspend ordering issue with the snd-hda driver also
@@ -1091,7 +1883,7 @@ static int i915_pm_suspend_late(struct device *dev)
1091 1883
1092static int i915_pm_poweroff_late(struct device *dev) 1884static int i915_pm_poweroff_late(struct device *dev)
1093{ 1885{
1094 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1886 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1095 1887
1096 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1888 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1097 return 0; 1889 return 0;
@@ -1101,7 +1893,7 @@ static int i915_pm_poweroff_late(struct device *dev)
1101 1893
1102static int i915_pm_resume_early(struct device *dev) 1894static int i915_pm_resume_early(struct device *dev)
1103{ 1895{
1104 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1896 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1105 1897
1106 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1898 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1107 return 0; 1899 return 0;
@@ -1111,7 +1903,7 @@ static int i915_pm_resume_early(struct device *dev)
1111 1903
1112static int i915_pm_resume(struct device *dev) 1904static int i915_pm_resume(struct device *dev)
1113{ 1905{
1114 struct drm_device *drm_dev = dev_to_i915(dev)->dev; 1906 struct drm_device *drm_dev = &dev_to_i915(dev)->drm;
1115 1907
1116 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 1908 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1117 return 0; 1909 return 0;
@@ -1119,6 +1911,49 @@ static int i915_pm_resume(struct device *dev)
1119 return i915_drm_resume(drm_dev); 1911 return i915_drm_resume(drm_dev);
1120} 1912}
1121 1913
1914/* freeze: before creating the hibernation_image */
1915static int i915_pm_freeze(struct device *dev)
1916{
1917 return i915_pm_suspend(dev);
1918}
1919
1920static int i915_pm_freeze_late(struct device *dev)
1921{
1922 int ret;
1923
1924 ret = i915_pm_suspend_late(dev);
1925 if (ret)
1926 return ret;
1927
1928 ret = i915_gem_freeze_late(dev_to_i915(dev));
1929 if (ret)
1930 return ret;
1931
1932 return 0;
1933}
1934
1935/* thaw: called after creating the hibernation image, but before turning off. */
1936static int i915_pm_thaw_early(struct device *dev)
1937{
1938 return i915_pm_resume_early(dev);
1939}
1940
1941static int i915_pm_thaw(struct device *dev)
1942{
1943 return i915_pm_resume(dev);
1944}
1945
1946/* restore: called after loading the hibernation image. */
1947static int i915_pm_restore_early(struct device *dev)
1948{
1949 return i915_pm_resume_early(dev);
1950}
1951
1952static int i915_pm_restore(struct device *dev)
1953{
1954 return i915_pm_resume(dev);
1955}
1956
1122/* 1957/*
1123 * Save all Gunit registers that may be lost after a D3 and a subsequent 1958 * Save all Gunit registers that may be lost after a D3 and a subsequent
1124 * S0i[R123] transition. The list of registers needing a save/restore is 1959 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1318,8 +2153,6 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1318 u32 val; 2153 u32 val;
1319 int err; 2154 int err;
1320 2155
1321#define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1322
1323 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG); 2156 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1324 val &= ~VLV_GFX_CLK_FORCE_ON_BIT; 2157 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1325 if (force_on) 2158 if (force_on)
@@ -1329,13 +2162,16 @@ int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1329 if (!force_on) 2162 if (!force_on)
1330 return 0; 2163 return 0;
1331 2164
1332 err = wait_for(COND, 20); 2165 err = intel_wait_for_register(dev_priv,
2166 VLV_GTLC_SURVIVABILITY_REG,
2167 VLV_GFX_CLK_STATUS_BIT,
2168 VLV_GFX_CLK_STATUS_BIT,
2169 20);
1333 if (err) 2170 if (err)
1334 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n", 2171 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1335 I915_READ(VLV_GTLC_SURVIVABILITY_REG)); 2172 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1336 2173
1337 return err; 2174 return err;
1338#undef COND
1339} 2175}
1340 2176
1341static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow) 2177static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
@@ -1350,13 +2186,15 @@ static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1350 I915_WRITE(VLV_GTLC_WAKE_CTRL, val); 2186 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1351 POSTING_READ(VLV_GTLC_WAKE_CTRL); 2187 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1352 2188
1353#define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \ 2189 err = intel_wait_for_register(dev_priv,
1354 allow) 2190 VLV_GTLC_PW_STATUS,
1355 err = wait_for(COND, 1); 2191 VLV_GTLC_ALLOWWAKEACK,
2192 allow,
2193 1);
1356 if (err) 2194 if (err)
1357 DRM_ERROR("timeout disabling GT waking\n"); 2195 DRM_ERROR("timeout disabling GT waking\n");
2196
1358 return err; 2197 return err;
1359#undef COND
1360} 2198}
1361 2199
1362static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv, 2200static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
@@ -1368,8 +2206,7 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1368 2206
1369 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK; 2207 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1370 val = wait_for_on ? mask : 0; 2208 val = wait_for_on ? mask : 0;
1371#define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val) 2209 if ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1372 if (COND)
1373 return 0; 2210 return 0;
1374 2211
1375 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n", 2212 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
@@ -1380,13 +2217,14 @@ static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1380 * RC6 transitioning can be delayed up to 2 msec (see 2217 * RC6 transitioning can be delayed up to 2 msec (see
1381 * valleyview_enable_rps), use 3 msec for safety. 2218 * valleyview_enable_rps), use 3 msec for safety.
1382 */ 2219 */
1383 err = wait_for(COND, 3); 2220 err = intel_wait_for_register(dev_priv,
2221 VLV_GTLC_PW_STATUS, mask, val,
2222 3);
1384 if (err) 2223 if (err)
1385 DRM_ERROR("timeout waiting for GT wells to go %s\n", 2224 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1386 onoff(wait_for_on)); 2225 onoff(wait_for_on));
1387 2226
1388 return err; 2227 return err;
1389#undef COND
1390} 2228}
1391 2229
1392static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv) 2230static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
@@ -1443,7 +2281,7 @@ err1:
1443static int vlv_resume_prepare(struct drm_i915_private *dev_priv, 2281static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
1444 bool rpm_resume) 2282 bool rpm_resume)
1445{ 2283{
1446 struct drm_device *dev = dev_priv->dev; 2284 struct drm_device *dev = &dev_priv->drm;
1447 int err; 2285 int err;
1448 int ret; 2286 int ret;
1449 2287
@@ -1479,10 +2317,10 @@ static int intel_runtime_suspend(struct device *device)
1479{ 2317{
1480 struct pci_dev *pdev = to_pci_dev(device); 2318 struct pci_dev *pdev = to_pci_dev(device);
1481 struct drm_device *dev = pci_get_drvdata(pdev); 2319 struct drm_device *dev = pci_get_drvdata(pdev);
1482 struct drm_i915_private *dev_priv = dev->dev_private; 2320 struct drm_i915_private *dev_priv = to_i915(dev);
1483 int ret; 2321 int ret;
1484 2322
1485 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev)))) 2323 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6())))
1486 return -ENODEV; 2324 return -ENODEV;
1487 2325
1488 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2326 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1517,11 +2355,8 @@ static int intel_runtime_suspend(struct device *device)
1517 i915_gem_release_all_mmaps(dev_priv); 2355 i915_gem_release_all_mmaps(dev_priv);
1518 mutex_unlock(&dev->struct_mutex); 2356 mutex_unlock(&dev->struct_mutex);
1519 2357
1520 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
1521
1522 intel_guc_suspend(dev); 2358 intel_guc_suspend(dev);
1523 2359
1524 intel_suspend_gt_powersave(dev);
1525 intel_runtime_pm_disable_interrupts(dev_priv); 2360 intel_runtime_pm_disable_interrupts(dev_priv);
1526 2361
1527 ret = 0; 2362 ret = 0;
@@ -1543,7 +2378,7 @@ static int intel_runtime_suspend(struct device *device)
1543 return ret; 2378 return ret;
1544 } 2379 }
1545 2380
1546 intel_uncore_forcewake_reset(dev, false); 2381 intel_uncore_forcewake_reset(dev_priv, false);
1547 2382
1548 enable_rpm_wakeref_asserts(dev_priv); 2383 enable_rpm_wakeref_asserts(dev_priv);
1549 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2384 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
@@ -1557,14 +2392,14 @@ static int intel_runtime_suspend(struct device *device)
1557 * FIXME: We really should find a document that references the arguments 2392 * FIXME: We really should find a document that references the arguments
1558 * used below! 2393 * used below!
1559 */ 2394 */
1560 if (IS_BROADWELL(dev)) { 2395 if (IS_BROADWELL(dev_priv)) {
1561 /* 2396 /*
1562 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop 2397 * On Broadwell, if we use PCI_D1 the PCH DDI ports will stop
1563 * being detected, and the call we do at intel_runtime_resume() 2398 * being detected, and the call we do at intel_runtime_resume()
1564 * won't be able to restore them. Since PCI_D3hot matches the 2399 * won't be able to restore them. Since PCI_D3hot matches the
1565 * actual specification and appears to be working, use it. 2400 * actual specification and appears to be working, use it.
1566 */ 2401 */
1567 intel_opregion_notify_adapter(dev, PCI_D3hot); 2402 intel_opregion_notify_adapter(dev_priv, PCI_D3hot);
1568 } else { 2403 } else {
1569 /* 2404 /*
1570 * current versions of firmware which depend on this opregion 2405 * current versions of firmware which depend on this opregion
@@ -1573,11 +2408,14 @@ static int intel_runtime_suspend(struct device *device)
1573 * to distinguish it from notifications that might be sent via 2408 * to distinguish it from notifications that might be sent via
1574 * the suspend path. 2409 * the suspend path.
1575 */ 2410 */
1576 intel_opregion_notify_adapter(dev, PCI_D1); 2411 intel_opregion_notify_adapter(dev_priv, PCI_D1);
1577 } 2412 }
1578 2413
1579 assert_forcewakes_inactive(dev_priv); 2414 assert_forcewakes_inactive(dev_priv);
1580 2415
2416 if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
2417 intel_hpd_poll_init(dev_priv);
2418
1581 DRM_DEBUG_KMS("Device suspended\n"); 2419 DRM_DEBUG_KMS("Device suspended\n");
1582 return 0; 2420 return 0;
1583} 2421}
@@ -1586,7 +2424,7 @@ static int intel_runtime_resume(struct device *device)
1586{ 2424{
1587 struct pci_dev *pdev = to_pci_dev(device); 2425 struct pci_dev *pdev = to_pci_dev(device);
1588 struct drm_device *dev = pci_get_drvdata(pdev); 2426 struct drm_device *dev = pci_get_drvdata(pdev);
1589 struct drm_i915_private *dev_priv = dev->dev_private; 2427 struct drm_i915_private *dev_priv = to_i915(dev);
1590 int ret = 0; 2428 int ret = 0;
1591 2429
1592 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 2430 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
@@ -1597,7 +2435,7 @@ static int intel_runtime_resume(struct device *device)
1597 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count)); 2435 WARN_ON_ONCE(atomic_read(&dev_priv->pm.wakeref_count));
1598 disable_rpm_wakeref_asserts(dev_priv); 2436 disable_rpm_wakeref_asserts(dev_priv);
1599 2437
1600 intel_opregion_notify_adapter(dev, PCI_D0); 2438 intel_opregion_notify_adapter(dev_priv, PCI_D0);
1601 dev_priv->pm.suspended = false; 2439 dev_priv->pm.suspended = false;
1602 if (intel_uncore_unclaimed_mmio(dev_priv)) 2440 if (intel_uncore_unclaimed_mmio(dev_priv))
1603 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n"); 2441 DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
@@ -1624,7 +2462,7 @@ static int intel_runtime_resume(struct device *device)
1624 * we can do is to hope that things will still work (and disable RPM). 2462 * we can do is to hope that things will still work (and disable RPM).
1625 */ 2463 */
1626 i915_gem_init_swizzling(dev); 2464 i915_gem_init_swizzling(dev);
1627 gen6_update_ring_freq(dev); 2465 gen6_update_ring_freq(dev_priv);
1628 2466
1629 intel_runtime_pm_enable_interrupts(dev_priv); 2467 intel_runtime_pm_enable_interrupts(dev_priv);
1630 2468
@@ -1636,8 +2474,6 @@ static int intel_runtime_resume(struct device *device)
1636 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 2474 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
1637 intel_hpd_init(dev_priv); 2475 intel_hpd_init(dev_priv);
1638 2476
1639 intel_enable_gt_powersave(dev);
1640
1641 enable_rpm_wakeref_asserts(dev_priv); 2477 enable_rpm_wakeref_asserts(dev_priv);
1642 2478
1643 if (ret) 2479 if (ret)
@@ -1648,7 +2484,7 @@ static int intel_runtime_resume(struct device *device)
1648 return ret; 2484 return ret;
1649} 2485}
1650 2486
1651static const struct dev_pm_ops i915_pm_ops = { 2487const struct dev_pm_ops i915_pm_ops = {
1652 /* 2488 /*
1653 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND, 2489 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1654 * PMSG_RESUME] 2490 * PMSG_RESUME]
@@ -1673,14 +2509,14 @@ static const struct dev_pm_ops i915_pm_ops = {
1673 * @restore, @restore_early : called after rebooting and restoring the 2509 * @restore, @restore_early : called after rebooting and restoring the
1674 * hibernation image [PMSG_RESTORE] 2510 * hibernation image [PMSG_RESTORE]
1675 */ 2511 */
1676 .freeze = i915_pm_suspend, 2512 .freeze = i915_pm_freeze,
1677 .freeze_late = i915_pm_suspend_late, 2513 .freeze_late = i915_pm_freeze_late,
1678 .thaw_early = i915_pm_resume_early, 2514 .thaw_early = i915_pm_thaw_early,
1679 .thaw = i915_pm_resume, 2515 .thaw = i915_pm_thaw,
1680 .poweroff = i915_pm_suspend, 2516 .poweroff = i915_pm_suspend,
1681 .poweroff_late = i915_pm_poweroff_late, 2517 .poweroff_late = i915_pm_poweroff_late,
1682 .restore_early = i915_pm_resume_early, 2518 .restore_early = i915_pm_restore_early,
1683 .restore = i915_pm_resume, 2519 .restore = i915_pm_restore,
1684 2520
1685 /* S0ix (via runtime suspend) event handlers */ 2521 /* S0ix (via runtime suspend) event handlers */
1686 .runtime_suspend = intel_runtime_suspend, 2522 .runtime_suspend = intel_runtime_suspend,
@@ -1707,6 +2543,68 @@ static const struct file_operations i915_driver_fops = {
1707 .llseek = noop_llseek, 2543 .llseek = noop_llseek,
1708}; 2544};
1709 2545
2546static int
2547i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
2548 struct drm_file *file)
2549{
2550 return -ENODEV;
2551}
2552
2553static const struct drm_ioctl_desc i915_ioctls[] = {
2554 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2555 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2556 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2557 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2558 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2559 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2560 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2561 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2562 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2563 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2564 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2565 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2566 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2567 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2568 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2569 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2570 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2571 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2572 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
2573 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW),
2574 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2575 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
2576 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2577 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_RENDER_ALLOW),
2578 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_RENDER_ALLOW),
2579 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2580 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2581 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2582 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_RENDER_ALLOW),
2583 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_RENDER_ALLOW),
2584 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_RENDER_ALLOW),
2585 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_RENDER_ALLOW),
2586 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_RENDER_ALLOW),
2587 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_RENDER_ALLOW),
2588 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_RENDER_ALLOW),
2589 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_RENDER_ALLOW),
2590 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_RENDER_ALLOW),
2591 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW),
2592 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
2593 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW),
2594 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2595 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
2596 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW),
2597 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW),
2598 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2599 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
2600 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_RENDER_ALLOW),
2601 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_RENDER_ALLOW),
2602 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_gem_context_reset_stats_ioctl, DRM_RENDER_ALLOW),
2603 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_RENDER_ALLOW),
2604 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_RENDER_ALLOW),
2605 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_RENDER_ALLOW),
2606};
2607
1710static struct drm_driver driver = { 2608static struct drm_driver driver = {
1711 /* Don't use MTRRs here; the Xserver or userspace app should 2609 /* Don't use MTRRs here; the Xserver or userspace app should
1712 * deal with them for Intel hardware. 2610 * deal with them for Intel hardware.
@@ -1714,18 +2612,12 @@ static struct drm_driver driver = {
1714 .driver_features = 2612 .driver_features =
1715 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME | 2613 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1716 DRIVER_RENDER | DRIVER_MODESET, 2614 DRIVER_RENDER | DRIVER_MODESET,
1717 .load = i915_driver_load,
1718 .unload = i915_driver_unload,
1719 .open = i915_driver_open, 2615 .open = i915_driver_open,
1720 .lastclose = i915_driver_lastclose, 2616 .lastclose = i915_driver_lastclose,
1721 .preclose = i915_driver_preclose, 2617 .preclose = i915_driver_preclose,
1722 .postclose = i915_driver_postclose, 2618 .postclose = i915_driver_postclose,
1723 .set_busid = drm_pci_set_busid, 2619 .set_busid = drm_pci_set_busid,
1724 2620
1725#if defined(CONFIG_DEBUG_FS)
1726 .debugfs_init = i915_debugfs_init,
1727 .debugfs_cleanup = i915_debugfs_cleanup,
1728#endif
1729 .gem_free_object = i915_gem_free_object, 2621 .gem_free_object = i915_gem_free_object,
1730 .gem_vm_ops = &i915_gem_vm_ops, 2622 .gem_vm_ops = &i915_gem_vm_ops,
1731 2623
@@ -1738,6 +2630,7 @@ static struct drm_driver driver = {
1738 .dumb_map_offset = i915_gem_mmap_gtt, 2630 .dumb_map_offset = i915_gem_mmap_gtt,
1739 .dumb_destroy = drm_gem_dumb_destroy, 2631 .dumb_destroy = drm_gem_dumb_destroy,
1740 .ioctls = i915_ioctls, 2632 .ioctls = i915_ioctls,
2633 .num_ioctls = ARRAY_SIZE(i915_ioctls),
1741 .fops = &i915_driver_fops, 2634 .fops = &i915_driver_fops,
1742 .name = DRIVER_NAME, 2635 .name = DRIVER_NAME,
1743 .desc = DRIVER_DESC, 2636 .desc = DRIVER_DESC,
@@ -1746,56 +2639,3 @@ static struct drm_driver driver = {
1746 .minor = DRIVER_MINOR, 2639 .minor = DRIVER_MINOR,
1747 .patchlevel = DRIVER_PATCHLEVEL, 2640 .patchlevel = DRIVER_PATCHLEVEL,
1748}; 2641};
1749
1750static struct pci_driver i915_pci_driver = {
1751 .name = DRIVER_NAME,
1752 .id_table = pciidlist,
1753 .probe = i915_pci_probe,
1754 .remove = i915_pci_remove,
1755 .driver.pm = &i915_pm_ops,
1756};
1757
1758static int __init i915_init(void)
1759{
1760 driver.num_ioctls = i915_max_ioctl;
1761
1762 /*
1763 * Enable KMS by default, unless explicitly overriden by
1764 * either the i915.modeset prarameter or by the
1765 * vga_text_mode_force boot option.
1766 */
1767
1768 if (i915.modeset == 0)
1769 driver.driver_features &= ~DRIVER_MODESET;
1770
1771 if (vgacon_text_force() && i915.modeset == -1)
1772 driver.driver_features &= ~DRIVER_MODESET;
1773
1774 if (!(driver.driver_features & DRIVER_MODESET)) {
1775 /* Silently fail loading to not upset userspace. */
1776 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1777 return 0;
1778 }
1779
1780 if (i915.nuclear_pageflip)
1781 driver.driver_features |= DRIVER_ATOMIC;
1782
1783 return drm_pci_init(&driver, &i915_pci_driver);
1784}
1785
1786static void __exit i915_exit(void)
1787{
1788 if (!(driver.driver_features & DRIVER_MODESET))
1789 return; /* Never loaded a driver. */
1790
1791 drm_pci_exit(&driver, &i915_pci_driver);
1792}
1793
1794module_init(i915_init);
1795module_exit(i915_exit);
1796
1797MODULE_AUTHOR("Tungsten Graphics, Inc.");
1798MODULE_AUTHOR("Intel Corporation");
1799
1800MODULE_DESCRIPTION(DRIVER_DESC);
1801MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bc3f2e6842e7..915a3d0acff3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -47,6 +47,7 @@
47#include <drm/intel-gtt.h> 47#include <drm/intel-gtt.h>
48#include <drm/drm_legacy.h> /* for struct drm_dma_handle */ 48#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
49#include <drm/drm_gem.h> 49#include <drm/drm_gem.h>
50#include <drm/drm_auth.h>
50 51
51#include "i915_params.h" 52#include "i915_params.h"
52#include "i915_reg.h" 53#include "i915_reg.h"
@@ -61,12 +62,14 @@
61#include "i915_gem_gtt.h" 62#include "i915_gem_gtt.h"
62#include "i915_gem_render_state.h" 63#include "i915_gem_render_state.h"
63 64
65#include "intel_gvt.h"
66
64/* General customization: 67/* General customization:
65 */ 68 */
66 69
67#define DRIVER_NAME "i915" 70#define DRIVER_NAME "i915"
68#define DRIVER_DESC "Intel Graphics" 71#define DRIVER_DESC "Intel Graphics"
69#define DRIVER_DATE "20160425" 72#define DRIVER_DATE "20160711"
70 73
71#undef WARN_ON 74#undef WARN_ON
72/* Many gcc seem to no see through this and fall over :( */ 75/* Many gcc seem to no see through this and fall over :( */
@@ -281,6 +284,9 @@ struct i915_hotplug {
281 u32 short_port_mask; 284 u32 short_port_mask;
282 struct work_struct dig_port_work; 285 struct work_struct dig_port_work;
283 286
287 struct work_struct poll_init_work;
288 bool poll_enabled;
289
284 /* 290 /*
285 * if we get a HPD irq from DP and a HPD irq from non-DP 291 * if we get a HPD irq from DP and a HPD irq from non-DP
286 * the non-DP HPD could block the workqueue on a mode config 292 * the non-DP HPD could block the workqueue on a mode config
@@ -317,21 +323,36 @@ struct i915_hotplug {
317 for_each_if ((__ports_mask) & (1 << (__port))) 323 for_each_if ((__ports_mask) & (1 << (__port)))
318 324
319#define for_each_crtc(dev, crtc) \ 325#define for_each_crtc(dev, crtc) \
320 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 326 list_for_each_entry(crtc, &(dev)->mode_config.crtc_list, head)
321 327
322#define for_each_intel_plane(dev, intel_plane) \ 328#define for_each_intel_plane(dev, intel_plane) \
323 list_for_each_entry(intel_plane, \ 329 list_for_each_entry(intel_plane, \
324 &dev->mode_config.plane_list, \ 330 &(dev)->mode_config.plane_list, \
325 base.head) 331 base.head)
326 332
333#define for_each_intel_plane_mask(dev, intel_plane, plane_mask) \
334 list_for_each_entry(intel_plane, \
335 &(dev)->mode_config.plane_list, \
336 base.head) \
337 for_each_if ((plane_mask) & \
338 (1 << drm_plane_index(&intel_plane->base)))
339
327#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ 340#define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \
328 list_for_each_entry(intel_plane, \ 341 list_for_each_entry(intel_plane, \
329 &(dev)->mode_config.plane_list, \ 342 &(dev)->mode_config.plane_list, \
330 base.head) \ 343 base.head) \
331 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe) 344 for_each_if ((intel_plane)->pipe == (intel_crtc)->pipe)
332 345
333#define for_each_intel_crtc(dev, intel_crtc) \ 346#define for_each_intel_crtc(dev, intel_crtc) \
334 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head) 347 list_for_each_entry(intel_crtc, \
348 &(dev)->mode_config.crtc_list, \
349 base.head)
350
351#define for_each_intel_crtc_mask(dev, intel_crtc, crtc_mask) \
352 list_for_each_entry(intel_crtc, \
353 &(dev)->mode_config.crtc_list, \
354 base.head) \
355 for_each_if ((crtc_mask) & (1 << drm_crtc_index(&intel_crtc->base)))
335 356
336#define for_each_intel_encoder(dev, intel_encoder) \ 357#define for_each_intel_encoder(dev, intel_encoder) \
337 list_for_each_entry(intel_encoder, \ 358 list_for_each_entry(intel_encoder, \
@@ -340,7 +361,7 @@ struct i915_hotplug {
340 361
341#define for_each_intel_connector(dev, intel_connector) \ 362#define for_each_intel_connector(dev, intel_connector) \
342 list_for_each_entry(intel_connector, \ 363 list_for_each_entry(intel_connector, \
343 &dev->mode_config.connector_list, \ 364 &(dev)->mode_config.connector_list, \
344 base.head) 365 base.head)
345 366
346#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ 367#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
@@ -462,6 +483,7 @@ struct drm_i915_error_state {
462 struct timeval time; 483 struct timeval time;
463 484
464 char error_msg[128]; 485 char error_msg[128];
486 bool simulated;
465 int iommu; 487 int iommu;
466 u32 reset_count; 488 u32 reset_count;
467 u32 suspend_count; 489 u32 suspend_count;
@@ -493,6 +515,7 @@ struct drm_i915_error_state {
493 bool valid; 515 bool valid;
494 /* Software tracked state */ 516 /* Software tracked state */
495 bool waiting; 517 bool waiting;
518 int num_waiters;
496 int hangcheck_score; 519 int hangcheck_score;
497 enum intel_ring_hangcheck_action hangcheck_action; 520 enum intel_ring_hangcheck_action hangcheck_action;
498 int num_requests; 521 int num_requests;
@@ -538,6 +561,12 @@ struct drm_i915_error_state {
538 u32 tail; 561 u32 tail;
539 } *requests; 562 } *requests;
540 563
564 struct drm_i915_error_waiter {
565 char comm[TASK_COMM_LEN];
566 pid_t pid;
567 u32 seqno;
568 } *waiters;
569
541 struct { 570 struct {
542 u32 gfx_mode; 571 u32 gfx_mode;
543 union { 572 union {
@@ -588,6 +617,7 @@ struct drm_i915_display_funcs {
588 struct intel_crtc_state *newstate); 617 struct intel_crtc_state *newstate);
589 void (*initial_watermarks)(struct intel_crtc_state *cstate); 618 void (*initial_watermarks)(struct intel_crtc_state *cstate);
590 void (*optimize_watermarks)(struct intel_crtc_state *cstate); 619 void (*optimize_watermarks)(struct intel_crtc_state *cstate);
620 int (*compute_global_watermarks)(struct drm_atomic_state *state);
591 void (*update_wm)(struct drm_crtc *crtc); 621 void (*update_wm)(struct drm_crtc *crtc);
592 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 622 int (*modeset_calc_cdclk)(struct drm_atomic_state *state);
593 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 623 void (*modeset_commit_cdclk)(struct drm_atomic_state *state);
@@ -612,7 +642,7 @@ struct drm_i915_display_funcs {
612 struct drm_i915_gem_object *obj, 642 struct drm_i915_gem_object *obj,
613 struct drm_i915_gem_request *req, 643 struct drm_i915_gem_request *req,
614 uint32_t flags); 644 uint32_t flags);
615 void (*hpd_irq_setup)(struct drm_device *dev); 645 void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
616 /* clock updates for mode set */ 646 /* clock updates for mode set */
617 /* cursor updates */ 647 /* cursor updates */
618 /* render clock increase/decrease */ 648 /* render clock increase/decrease */
@@ -735,6 +765,7 @@ struct intel_csr {
735 func(is_valleyview) sep \ 765 func(is_valleyview) sep \
736 func(is_cherryview) sep \ 766 func(is_cherryview) sep \
737 func(is_haswell) sep \ 767 func(is_haswell) sep \
768 func(is_broadwell) sep \
738 func(is_skylake) sep \ 769 func(is_skylake) sep \
739 func(is_broxton) sep \ 770 func(is_broxton) sep \
740 func(is_kabylake) sep \ 771 func(is_kabylake) sep \
@@ -749,7 +780,8 @@ struct intel_csr {
749 func(has_llc) sep \ 780 func(has_llc) sep \
750 func(has_snoop) sep \ 781 func(has_snoop) sep \
751 func(has_ddi) sep \ 782 func(has_ddi) sep \
752 func(has_fpga_dbg) 783 func(has_fpga_dbg) sep \
784 func(has_pooled_eu)
753 785
754#define DEFINE_FLAG(name) u8 name:1 786#define DEFINE_FLAG(name) u8 name:1
755#define SEP_SEMICOLON ; 787#define SEP_SEMICOLON ;
@@ -757,9 +789,10 @@ struct intel_csr {
757struct intel_device_info { 789struct intel_device_info {
758 u32 display_mmio_offset; 790 u32 display_mmio_offset;
759 u16 device_id; 791 u16 device_id;
760 u8 num_pipes:3; 792 u8 num_pipes;
761 u8 num_sprites[I915_MAX_PIPES]; 793 u8 num_sprites[I915_MAX_PIPES];
762 u8 gen; 794 u8 gen;
795 u16 gen_mask;
763 u8 ring_mask; /* Rings supported by the HW */ 796 u8 ring_mask; /* Rings supported by the HW */
764 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); 797 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
765 /* Register offsets for the various display pipes and transcoders */ 798 /* Register offsets for the various display pipes and transcoders */
@@ -774,6 +807,7 @@ struct intel_device_info {
774 u8 subslice_per_slice; 807 u8 subslice_per_slice;
775 u8 eu_total; 808 u8 eu_total;
776 u8 eu_per_subslice; 809 u8 eu_per_subslice;
810 u8 min_eu_in_pool;
777 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */ 811 /* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
778 u8 subslice_7eu[3]; 812 u8 subslice_7eu[3];
779 u8 has_slice_pg:1; 813 u8 has_slice_pg:1;
@@ -821,9 +855,8 @@ struct i915_ctx_hang_stats {
821/* This must match up with the value previously used for execbuf2.rsvd1. */ 855/* This must match up with the value previously used for execbuf2.rsvd1. */
822#define DEFAULT_CONTEXT_HANDLE 0 856#define DEFAULT_CONTEXT_HANDLE 0
823 857
824#define CONTEXT_NO_ZEROMAP (1<<0)
825/** 858/**
826 * struct intel_context - as the name implies, represents a context. 859 * struct i915_gem_context - as the name implies, represents a context.
827 * @ref: reference count. 860 * @ref: reference count.
828 * @user_handle: userspace tracking identity for this context. 861 * @user_handle: userspace tracking identity for this context.
829 * @remap_slice: l3 row remapping information. 862 * @remap_slice: l3 row remapping information.
@@ -841,33 +874,40 @@ struct i915_ctx_hang_stats {
841 * Contexts are memory images used by the hardware to store copies of their 874 * Contexts are memory images used by the hardware to store copies of their
842 * internal state. 875 * internal state.
843 */ 876 */
844struct intel_context { 877struct i915_gem_context {
845 struct kref ref; 878 struct kref ref;
846 int user_handle;
847 uint8_t remap_slice;
848 struct drm_i915_private *i915; 879 struct drm_i915_private *i915;
849 int flags;
850 struct drm_i915_file_private *file_priv; 880 struct drm_i915_file_private *file_priv;
851 struct i915_ctx_hang_stats hang_stats;
852 struct i915_hw_ppgtt *ppgtt; 881 struct i915_hw_ppgtt *ppgtt;
853 882
854 /* Legacy ring buffer submission */ 883 struct i915_ctx_hang_stats hang_stats;
855 struct {
856 struct drm_i915_gem_object *rcs_state;
857 bool initialized;
858 } legacy_hw_ctx;
859 884
860 /* Execlists */ 885 /* Unique identifier for this context, used by the hw for tracking */
861 struct { 886 unsigned long flags;
887#define CONTEXT_NO_ZEROMAP BIT(0)
888#define CONTEXT_NO_ERROR_CAPTURE BIT(1)
889 unsigned hw_id;
890 u32 user_handle;
891
892 u32 ggtt_alignment;
893
894 struct intel_context {
862 struct drm_i915_gem_object *state; 895 struct drm_i915_gem_object *state;
863 struct intel_ringbuffer *ringbuf; 896 struct intel_ringbuffer *ringbuf;
864 int pin_count;
865 struct i915_vma *lrc_vma; 897 struct i915_vma *lrc_vma;
866 u64 lrc_desc;
867 uint32_t *lrc_reg_state; 898 uint32_t *lrc_reg_state;
899 u64 lrc_desc;
900 int pin_count;
901 bool initialised;
868 } engine[I915_NUM_ENGINES]; 902 } engine[I915_NUM_ENGINES];
903 u32 ring_size;
904 u32 desc_template;
905 struct atomic_notifier_head status_notifier;
906 bool execlists_force_single_submission;
869 907
870 struct list_head link; 908 struct list_head link;
909
910 u8 remap_slice;
871}; 911};
872 912
873enum fb_op_origin { 913enum fb_op_origin {
@@ -1116,6 +1156,8 @@ struct intel_gen6_power_mgmt {
1116 bool interrupts_enabled; 1156 bool interrupts_enabled;
1117 u32 pm_iir; 1157 u32 pm_iir;
1118 1158
1159 u32 pm_intr_keep;
1160
1119 /* Frequencies are stored in potentially platform dependent multiples. 1161 /* Frequencies are stored in potentially platform dependent multiples.
1120 * In other words, *_freq needs to be multiplied by X to be interesting. 1162 * In other words, *_freq needs to be multiplied by X to be interesting.
1121 * Soft limits are those which are used for the dynamic reclocking done 1163 * Soft limits are those which are used for the dynamic reclocking done
@@ -1283,37 +1325,11 @@ struct i915_gem_mm {
1283 struct list_head fence_list; 1325 struct list_head fence_list;
1284 1326
1285 /** 1327 /**
1286 * We leave the user IRQ off as much as possible,
1287 * but this means that requests will finish and never
1288 * be retired once the system goes idle. Set a timer to
1289 * fire periodically while the ring is running. When it
1290 * fires, go retire requests.
1291 */
1292 struct delayed_work retire_work;
1293
1294 /**
1295 * When we detect an idle GPU, we want to turn on
1296 * powersaving features. So once we see that there
1297 * are no more requests outstanding and no more
1298 * arrive within a small period of time, we fire
1299 * off the idle_work.
1300 */
1301 struct delayed_work idle_work;
1302
1303 /**
1304 * Are we in a non-interruptible section of code like 1328 * Are we in a non-interruptible section of code like
1305 * modesetting? 1329 * modesetting?
1306 */ 1330 */
1307 bool interruptible; 1331 bool interruptible;
1308 1332
1309 /**
1310 * Is the GPU currently considered idle, or busy executing userspace
1311 * requests? Whilst idle, we attempt to power down the hardware and
1312 * display clocks. In order to reduce the effect on performance, there
1313 * is a slight delay before we do so.
1314 */
1315 bool busy;
1316
1317 /* the indicator for dispatch video commands on two BSD rings */ 1333 /* the indicator for dispatch video commands on two BSD rings */
1318 unsigned int bsd_ring_dispatch_index; 1334 unsigned int bsd_ring_dispatch_index;
1319 1335
@@ -1350,7 +1366,6 @@ struct i915_gpu_error {
1350 /* Hang gpu twice in this window and your context gets banned */ 1366 /* Hang gpu twice in this window and your context gets banned */
1351#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) 1367#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1352 1368
1353 struct workqueue_struct *hangcheck_wq;
1354 struct delayed_work hangcheck_work; 1369 struct delayed_work hangcheck_work;
1355 1370
1356 /* For reset and error_state handling. */ 1371 /* For reset and error_state handling. */
@@ -1387,20 +1402,19 @@ struct i915_gpu_error {
1387#define I915_WEDGED (1 << 31) 1402#define I915_WEDGED (1 << 31)
1388 1403
1389 /** 1404 /**
1405 * Waitqueue to signal when a hang is detected. Used to for waiters
1406 * to release the struct_mutex for the reset to procede.
1407 */
1408 wait_queue_head_t wait_queue;
1409
1410 /**
1390 * Waitqueue to signal when the reset has completed. Used by clients 1411 * Waitqueue to signal when the reset has completed. Used by clients
1391 * that wait for dev_priv->mm.wedged to settle. 1412 * that wait for dev_priv->mm.wedged to settle.
1392 */ 1413 */
1393 wait_queue_head_t reset_queue; 1414 wait_queue_head_t reset_queue;
1394 1415
1395 /* Userspace knobs for gpu hang simulation;
1396 * combines both a ring mask, and extra flags
1397 */
1398 u32 stop_rings;
1399#define I915_STOP_RING_ALLOW_BAN (1 << 31)
1400#define I915_STOP_RING_ALLOW_WARN (1 << 30)
1401
1402 /* For missed irq/seqno simulation. */ 1416 /* For missed irq/seqno simulation. */
1403 unsigned int test_irq_rings; 1417 unsigned long test_irq_rings;
1404}; 1418};
1405 1419
1406enum modeset_restore { 1420enum modeset_restore {
@@ -1489,6 +1503,7 @@ struct intel_vbt_data {
1489 bool present; 1503 bool present;
1490 bool active_low_pwm; 1504 bool active_low_pwm;
1491 u8 min_brightness; /* min_brightness/255 of max */ 1505 u8 min_brightness; /* min_brightness/255 of max */
1506 enum intel_backlight_type type;
1492 } backlight; 1507 } backlight;
1493 1508
1494 /* MIPI DSI */ 1509 /* MIPI DSI */
@@ -1581,7 +1596,7 @@ struct skl_ddb_allocation {
1581}; 1596};
1582 1597
1583struct skl_wm_values { 1598struct skl_wm_values {
1584 bool dirty[I915_MAX_PIPES]; 1599 unsigned dirty_pipes;
1585 struct skl_ddb_allocation ddb; 1600 struct skl_ddb_allocation ddb;
1586 uint32_t wm_linetime[I915_MAX_PIPES]; 1601 uint32_t wm_linetime[I915_MAX_PIPES];
1587 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1602 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
@@ -1698,7 +1713,7 @@ struct i915_execbuffer_params {
1698 uint64_t batch_obj_vm_offset; 1713 uint64_t batch_obj_vm_offset;
1699 struct intel_engine_cs *engine; 1714 struct intel_engine_cs *engine;
1700 struct drm_i915_gem_object *batch_obj; 1715 struct drm_i915_gem_object *batch_obj;
1701 struct intel_context *ctx; 1716 struct i915_gem_context *ctx;
1702 struct drm_i915_gem_request *request; 1717 struct drm_i915_gem_request *request;
1703}; 1718};
1704 1719
@@ -1710,7 +1725,8 @@ struct intel_wm_config {
1710}; 1725};
1711 1726
1712struct drm_i915_private { 1727struct drm_i915_private {
1713 struct drm_device *dev; 1728 struct drm_device drm;
1729
1714 struct kmem_cache *objects; 1730 struct kmem_cache *objects;
1715 struct kmem_cache *vmas; 1731 struct kmem_cache *vmas;
1716 struct kmem_cache *requests; 1732 struct kmem_cache *requests;
@@ -1725,6 +1741,8 @@ struct drm_i915_private {
1725 1741
1726 struct i915_virtual_gpu vgpu; 1742 struct i915_virtual_gpu vgpu;
1727 1743
1744 struct intel_gvt gvt;
1745
1728 struct intel_guc guc; 1746 struct intel_guc guc;
1729 1747
1730 struct intel_csr csr; 1748 struct intel_csr csr;
@@ -1748,6 +1766,7 @@ struct drm_i915_private {
1748 wait_queue_head_t gmbus_wait_queue; 1766 wait_queue_head_t gmbus_wait_queue;
1749 1767
1750 struct pci_dev *bridge_dev; 1768 struct pci_dev *bridge_dev;
1769 struct i915_gem_context *kernel_context;
1751 struct intel_engine_cs engine[I915_NUM_ENGINES]; 1770 struct intel_engine_cs engine[I915_NUM_ENGINES];
1752 struct drm_i915_gem_object *semaphore_obj; 1771 struct drm_i915_gem_object *semaphore_obj;
1753 uint32_t last_seqno, next_seqno; 1772 uint32_t last_seqno, next_seqno;
@@ -1803,13 +1822,17 @@ struct drm_i915_private {
1803 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 1822 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1804 1823
1805 unsigned int fsb_freq, mem_freq, is_ddr3; 1824 unsigned int fsb_freq, mem_freq, is_ddr3;
1806 unsigned int skl_boot_cdclk; 1825 unsigned int skl_preferred_vco_freq;
1807 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq; 1826 unsigned int cdclk_freq, max_cdclk_freq, atomic_cdclk_freq;
1808 unsigned int max_dotclk_freq; 1827 unsigned int max_dotclk_freq;
1809 unsigned int rawclk_freq; 1828 unsigned int rawclk_freq;
1810 unsigned int hpll_freq; 1829 unsigned int hpll_freq;
1811 unsigned int czclk_freq; 1830 unsigned int czclk_freq;
1812 1831
1832 struct {
1833 unsigned int vco, ref;
1834 } cdclk_pll;
1835
1813 /** 1836 /**
1814 * wq - Driver workqueue for GEM. 1837 * wq - Driver workqueue for GEM.
1815 * 1838 *
@@ -1839,6 +1862,13 @@ struct drm_i915_private {
1839 DECLARE_HASHTABLE(mm_structs, 7); 1862 DECLARE_HASHTABLE(mm_structs, 7);
1840 struct mutex mm_lock; 1863 struct mutex mm_lock;
1841 1864
1865 /* The hw wants to have a stable context identifier for the lifetime
1866 * of the context (for OA, PASID, faults, etc). This is limited
1867 * in execlists to 21 bits.
1868 */
1869 struct ida context_hw_ida;
1870#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
1871
1842 /* Kernel Modesetting */ 1872 /* Kernel Modesetting */
1843 1873
1844 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; 1874 struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
@@ -1951,9 +1981,6 @@ struct drm_i915_private {
1951 */ 1981 */
1952 uint16_t skl_latency[8]; 1982 uint16_t skl_latency[8];
1953 1983
1954 /* Committed wm config */
1955 struct intel_wm_config config;
1956
1957 /* 1984 /*
1958 * The skl_wm_values structure is a bit too big for stack 1985 * The skl_wm_values structure is a bit too big for stack
1959 * allocation, so we keep the staging struct where we store 1986 * allocation, so we keep the staging struct where we store
@@ -1976,6 +2003,13 @@ struct drm_i915_private {
1976 * cstate->wm.need_postvbl_update. 2003 * cstate->wm.need_postvbl_update.
1977 */ 2004 */
1978 struct mutex wm_mutex; 2005 struct mutex wm_mutex;
2006
2007 /*
2008 * Set during HW readout of watermarks/DDB. Some platforms
2009 * need to know when we're still using BIOS-provided values
2010 * (which we don't fully trust).
2011 */
2012 bool distrust_bios_wm;
1979 } wm; 2013 } wm;
1980 2014
1981 struct i915_runtime_pm pm; 2015 struct i915_runtime_pm pm;
@@ -1988,9 +2022,35 @@ struct drm_i915_private {
1988 int (*init_engines)(struct drm_device *dev); 2022 int (*init_engines)(struct drm_device *dev);
1989 void (*cleanup_engine)(struct intel_engine_cs *engine); 2023 void (*cleanup_engine)(struct intel_engine_cs *engine);
1990 void (*stop_engine)(struct intel_engine_cs *engine); 2024 void (*stop_engine)(struct intel_engine_cs *engine);
1991 } gt;
1992 2025
1993 struct intel_context *kernel_context; 2026 /**
2027 * Is the GPU currently considered idle, or busy executing
2028 * userspace requests? Whilst idle, we allow runtime power
2029 * management to power down the hardware and display clocks.
2030 * In order to reduce the effect on performance, there
2031 * is a slight delay before we do so.
2032 */
2033 unsigned int active_engines;
2034 bool awake;
2035
2036 /**
2037 * We leave the user IRQ off as much as possible,
2038 * but this means that requests will finish and never
2039 * be retired once the system goes idle. Set a timer to
2040 * fire periodically while the ring is running. When it
2041 * fires, go retire requests.
2042 */
2043 struct delayed_work retire_work;
2044
2045 /**
2046 * When we detect an idle GPU, we want to turn on
2047 * powersaving features. So once we see that there
2048 * are no more requests outstanding and no more
2049 * arrive within a small period of time, we fire
2050 * off the idle_work.
2051 */
2052 struct delayed_work idle_work;
2053 } gt;
1994 2054
1995 /* perform PHY state sanity checks? */ 2055 /* perform PHY state sanity checks? */
1996 bool chv_phy_assert[2]; 2056 bool chv_phy_assert[2];
@@ -2005,7 +2065,7 @@ struct drm_i915_private {
2005 2065
2006static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 2066static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
2007{ 2067{
2008 return dev->dev_private; 2068 return container_of(dev, struct drm_i915_private, drm);
2009} 2069}
2010 2070
2011static inline struct drm_i915_private *dev_to_i915(struct device *dev) 2071static inline struct drm_i915_private *dev_to_i915(struct device *dev)
@@ -2176,6 +2236,7 @@ struct drm_i915_gem_object {
2176 2236
2177 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 2237 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
2178 2238
2239 unsigned int has_wc_mmap;
2179 unsigned int pin_display; 2240 unsigned int pin_display;
2180 2241
2181 struct sg_table *pages; 2242 struct sg_table *pages;
@@ -2228,9 +2289,81 @@ struct drm_i915_gem_object {
2228}; 2289};
2229#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) 2290#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
2230 2291
2231void i915_gem_track_fb(struct drm_i915_gem_object *old, 2292static inline bool
2232 struct drm_i915_gem_object *new, 2293i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
2233 unsigned frontbuffer_bits); 2294{
2295 return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
2296}
2297
2298/*
2299 * Optimised SGL iterator for GEM objects
2300 */
2301static __always_inline struct sgt_iter {
2302 struct scatterlist *sgp;
2303 union {
2304 unsigned long pfn;
2305 dma_addr_t dma;
2306 };
2307 unsigned int curr;
2308 unsigned int max;
2309} __sgt_iter(struct scatterlist *sgl, bool dma) {
2310 struct sgt_iter s = { .sgp = sgl };
2311
2312 if (s.sgp) {
2313 s.max = s.curr = s.sgp->offset;
2314 s.max += s.sgp->length;
2315 if (dma)
2316 s.dma = sg_dma_address(s.sgp);
2317 else
2318 s.pfn = page_to_pfn(sg_page(s.sgp));
2319 }
2320
2321 return s;
2322}
2323
2324/**
2325 * __sg_next - return the next scatterlist entry in a list
2326 * @sg: The current sg entry
2327 *
2328 * Description:
2329 * If the entry is the last, return NULL; otherwise, step to the next
2330 * element in the array (@sg@+1). If that's a chain pointer, follow it;
2331 * otherwise just return the pointer to the current element.
2332 **/
2333static inline struct scatterlist *__sg_next(struct scatterlist *sg)
2334{
2335#ifdef CONFIG_DEBUG_SG
2336 BUG_ON(sg->sg_magic != SG_MAGIC);
2337#endif
2338 return sg_is_last(sg) ? NULL :
2339 likely(!sg_is_chain(++sg)) ? sg :
2340 sg_chain_ptr(sg);
2341}
2342
2343/**
2344 * for_each_sgt_dma - iterate over the DMA addresses of the given sg_table
2345 * @__dmap: DMA address (output)
2346 * @__iter: 'struct sgt_iter' (iterator state, internal)
2347 * @__sgt: sg_table to iterate over (input)
2348 */
2349#define for_each_sgt_dma(__dmap, __iter, __sgt) \
2350 for ((__iter) = __sgt_iter((__sgt)->sgl, true); \
2351 ((__dmap) = (__iter).dma + (__iter).curr); \
2352 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2353 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0))
2354
2355/**
2356 * for_each_sgt_page - iterate over the pages of the given sg_table
2357 * @__pp: page pointer (output)
2358 * @__iter: 'struct sgt_iter' (iterator state, internal)
2359 * @__sgt: sg_table to iterate over (input)
2360 */
2361#define for_each_sgt_page(__pp, __iter, __sgt) \
2362 for ((__iter) = __sgt_iter((__sgt)->sgl, false); \
2363 ((__pp) = (__iter).pfn == 0 ? NULL : \
2364 pfn_to_page((__iter).pfn + ((__iter).curr >> PAGE_SHIFT))); \
2365 (((__iter).curr += PAGE_SIZE) < (__iter).max) || \
2366 ((__iter) = __sgt_iter(__sg_next((__iter).sgp), false), 0))
2234 2367
2235/** 2368/**
2236 * Request queue structure. 2369 * Request queue structure.
@@ -2252,7 +2385,7 @@ struct drm_i915_gem_request {
2252 /** On Which ring this request was generated */ 2385 /** On Which ring this request was generated */
2253 struct drm_i915_private *i915; 2386 struct drm_i915_private *i915;
2254 struct intel_engine_cs *engine; 2387 struct intel_engine_cs *engine;
2255 unsigned reset_counter; 2388 struct intel_signal_node signaling;
2256 2389
2257 /** GEM sequence number associated with the previous request, 2390 /** GEM sequence number associated with the previous request,
2258 * when the HWS breadcrumb is equal to this the GPU is processing 2391 * when the HWS breadcrumb is equal to this the GPU is processing
@@ -2279,6 +2412,9 @@ struct drm_i915_gem_request {
2279 /** Position in the ringbuffer of the end of the whole request */ 2412 /** Position in the ringbuffer of the end of the whole request */
2280 u32 tail; 2413 u32 tail;
2281 2414
2415 /** Preallocate space in the ringbuffer for the emitting the request */
2416 u32 reserved_space;
2417
2282 /** 2418 /**
2283 * Context and ring buffer related to this request 2419 * Context and ring buffer related to this request
2284 * Contexts are refcounted, so when this request is associated with a 2420 * Contexts are refcounted, so when this request is associated with a
@@ -2289,9 +2425,20 @@ struct drm_i915_gem_request {
2289 * i915_gem_request_free() will then decrement the refcount on the 2425 * i915_gem_request_free() will then decrement the refcount on the
2290 * context. 2426 * context.
2291 */ 2427 */
2292 struct intel_context *ctx; 2428 struct i915_gem_context *ctx;
2293 struct intel_ringbuffer *ringbuf; 2429 struct intel_ringbuffer *ringbuf;
2294 2430
2431 /**
2432 * Context related to the previous request.
2433 * As the contexts are accessed by the hardware until the switch is
2434 * completed to a new context, the hardware may still be writing
2435 * to the context object after the breadcrumb is visible. We must
2436 * not unpin/unbind/prune that object whilst still active and so
2437 * we keep the previous context pinned until the following (this)
2438 * request is retired.
2439 */
2440 struct i915_gem_context *previous_context;
2441
2295 /** Batch buffer related to this request if any (used for 2442 /** Batch buffer related to this request if any (used for
2296 error state dump only) */ 2443 error state dump only) */
2297 struct drm_i915_gem_object *batch_obj; 2444 struct drm_i915_gem_object *batch_obj;
@@ -2328,11 +2475,13 @@ struct drm_i915_gem_request {
2328 /** Execlists no. of times this request has been sent to the ELSP */ 2475 /** Execlists no. of times this request has been sent to the ELSP */
2329 int elsp_submitted; 2476 int elsp_submitted;
2330 2477
2478 /** Execlists context hardware id. */
2479 unsigned ctx_hw_id;
2331}; 2480};
2332 2481
2333struct drm_i915_gem_request * __must_check 2482struct drm_i915_gem_request * __must_check
2334i915_gem_request_alloc(struct intel_engine_cs *engine, 2483i915_gem_request_alloc(struct intel_engine_cs *engine,
2335 struct intel_context *ctx); 2484 struct i915_gem_context *ctx);
2336void i915_gem_request_free(struct kref *req_ref); 2485void i915_gem_request_free(struct kref *req_ref);
2337int i915_gem_request_add_to_client(struct drm_i915_gem_request *req, 2486int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
2338 struct drm_file *file); 2487 struct drm_file *file);
@@ -2360,23 +2509,9 @@ i915_gem_request_reference(struct drm_i915_gem_request *req)
2360static inline void 2509static inline void
2361i915_gem_request_unreference(struct drm_i915_gem_request *req) 2510i915_gem_request_unreference(struct drm_i915_gem_request *req)
2362{ 2511{
2363 WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
2364 kref_put(&req->ref, i915_gem_request_free); 2512 kref_put(&req->ref, i915_gem_request_free);
2365} 2513}
2366 2514
2367static inline void
2368i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
2369{
2370 struct drm_device *dev;
2371
2372 if (!req)
2373 return;
2374
2375 dev = req->engine->dev;
2376 if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
2377 mutex_unlock(&dev->struct_mutex);
2378}
2379
2380static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst, 2515static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
2381 struct drm_i915_gem_request *src) 2516 struct drm_i915_gem_request *src)
2382{ 2517{
@@ -2504,9 +2639,29 @@ struct drm_i915_cmd_table {
2504#define INTEL_INFO(p) (&__I915__(p)->info) 2639#define INTEL_INFO(p) (&__I915__(p)->info)
2505#define INTEL_GEN(p) (INTEL_INFO(p)->gen) 2640#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
2506#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id) 2641#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
2507#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
2508 2642
2509#define REVID_FOREVER 0xff 2643#define REVID_FOREVER 0xff
2644#define INTEL_REVID(p) (__I915__(p)->drm.pdev->revision)
2645
2646#define GEN_FOREVER (0)
2647/*
2648 * Returns true if Gen is in inclusive range [Start, End].
2649 *
2650 * Use GEN_FOREVER for unbound start and or end.
2651 */
2652#define IS_GEN(p, s, e) ({ \
2653 unsigned int __s = (s), __e = (e); \
2654 BUILD_BUG_ON(!__builtin_constant_p(s)); \
2655 BUILD_BUG_ON(!__builtin_constant_p(e)); \
2656 if ((__s) != GEN_FOREVER) \
2657 __s = (s) - 1; \
2658 if ((__e) == GEN_FOREVER) \
2659 __e = BITS_PER_LONG - 1; \
2660 else \
2661 __e = (e) - 1; \
2662 !!(INTEL_INFO(p)->gen_mask & GENMASK((__e), (__s))); \
2663})
2664
2510/* 2665/*
2511 * Return true if revision is in range [since,until] inclusive. 2666 * Return true if revision is in range [since,until] inclusive.
2512 * 2667 *
@@ -2539,7 +2694,7 @@ struct drm_i915_cmd_table {
2539#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview) 2694#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
2540#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview) 2695#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_cherryview)
2541#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2696#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2542#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_cherryview && IS_GEN8(dev)) 2697#define IS_BROADWELL(dev) (INTEL_INFO(dev)->is_broadwell)
2543#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake) 2698#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2544#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton) 2699#define IS_BROXTON(dev) (INTEL_INFO(dev)->is_broxton)
2545#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake) 2700#define IS_KABYLAKE(dev) (INTEL_INFO(dev)->is_kabylake)
@@ -2591,6 +2746,8 @@ struct drm_i915_cmd_table {
2591#define SKL_REVID_D0 0x3 2746#define SKL_REVID_D0 0x3
2592#define SKL_REVID_E0 0x4 2747#define SKL_REVID_E0 0x4
2593#define SKL_REVID_F0 0x5 2748#define SKL_REVID_F0 0x5
2749#define SKL_REVID_G0 0x6
2750#define SKL_REVID_H0 0x7
2594 2751
2595#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) 2752#define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until))
2596 2753
@@ -2616,29 +2773,34 @@ struct drm_i915_cmd_table {
2616 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular 2773 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
2617 * chips, etc.). 2774 * chips, etc.).
2618 */ 2775 */
2619#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2) 2776#define IS_GEN2(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(1)))
2620#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3) 2777#define IS_GEN3(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(2)))
2621#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4) 2778#define IS_GEN4(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(3)))
2622#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5) 2779#define IS_GEN5(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(4)))
2623#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2780#define IS_GEN6(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(5)))
2624#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2781#define IS_GEN7(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(6)))
2625#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2782#define IS_GEN8(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(7)))
2626#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9) 2783#define IS_GEN9(dev) (!!(INTEL_INFO(dev)->gen_mask & BIT(8)))
2627 2784
2628#define RENDER_RING (1<<RCS) 2785#define ENGINE_MASK(id) BIT(id)
2629#define BSD_RING (1<<VCS) 2786#define RENDER_RING ENGINE_MASK(RCS)
2630#define BLT_RING (1<<BCS) 2787#define BSD_RING ENGINE_MASK(VCS)
2631#define VEBOX_RING (1<<VECS) 2788#define BLT_RING ENGINE_MASK(BCS)
2632#define BSD2_RING (1<<VCS2) 2789#define VEBOX_RING ENGINE_MASK(VECS)
2633#define ALL_ENGINES (~0) 2790#define BSD2_RING ENGINE_MASK(VCS2)
2634 2791#define ALL_ENGINES (~0)
2635#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING) 2792
2636#define HAS_BSD2(dev) (INTEL_INFO(dev)->ring_mask & BSD2_RING) 2793#define HAS_ENGINE(dev_priv, id) \
2637#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING) 2794 (!!(INTEL_INFO(dev_priv)->ring_mask & ENGINE_MASK(id)))
2638#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2795
2796#define HAS_BSD(dev_priv) HAS_ENGINE(dev_priv, VCS)
2797#define HAS_BSD2(dev_priv) HAS_ENGINE(dev_priv, VCS2)
2798#define HAS_BLT(dev_priv) HAS_ENGINE(dev_priv, BCS)
2799#define HAS_VEBOX(dev_priv) HAS_ENGINE(dev_priv, VECS)
2800
2639#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2801#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
2640#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop) 2802#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
2641#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED) 2803#define HAS_EDRAM(dev) (!!(__I915__(dev)->edram_cap & EDRAM_ENABLED))
2642#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2804#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2643 HAS_EDRAM(dev)) 2805 HAS_EDRAM(dev))
2644#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2806#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
@@ -2656,9 +2818,10 @@ struct drm_i915_cmd_table {
2656#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev)) 2818#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
2657 2819
2658/* WaRsDisableCoarsePowerGating:skl,bxt */ 2820/* WaRsDisableCoarsePowerGating:skl,bxt */
2659#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \ 2821#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
2660 IS_SKL_GT3(dev) || \ 2822 (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1) || \
2661 IS_SKL_GT4(dev)) 2823 IS_SKL_GT3(dev_priv) || \
2824 IS_SKL_GT4(dev_priv))
2662 2825
2663/* 2826/*
2664 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts 2827 * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
@@ -2696,12 +2859,18 @@ struct drm_i915_cmd_table {
2696 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \ 2859 IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
2697 IS_KABYLAKE(dev) || IS_BROXTON(dev)) 2860 IS_KABYLAKE(dev) || IS_BROXTON(dev))
2698#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6) 2861#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2699#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 2862#define HAS_RC6p(dev) (IS_GEN6(dev) || IS_IVYBRIDGE(dev))
2700 2863
2701#define HAS_CSR(dev) (IS_GEN9(dev)) 2864#define HAS_CSR(dev) (IS_GEN9(dev))
2702 2865
2703#define HAS_GUC_UCODE(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2866/*
2704#define HAS_GUC_SCHED(dev) (IS_GEN9(dev) && !IS_KABYLAKE(dev)) 2867 * For now, anything with a GuC requires uCode loading, and then supports
2868 * command submission once loaded. But these are logically independent
2869 * properties, so we have separate macros to test them.
2870 */
2871#define HAS_GUC(dev) (IS_GEN9(dev))
2872#define HAS_GUC_UCODE(dev) (HAS_GUC(dev))
2873#define HAS_GUC_SCHED(dev) (HAS_GUC(dev))
2705 2874
2706#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \ 2875#define HAS_RESOURCE_STREAMER(dev) (IS_HASWELL(dev) || \
2707 INTEL_INFO(dev)->gen >= 8) 2876 INTEL_INFO(dev)->gen >= 8)
@@ -2710,6 +2879,8 @@ struct drm_i915_cmd_table {
2710 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \ 2879 !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
2711 !IS_BROXTON(dev)) 2880 !IS_BROXTON(dev))
2712 2881
2882#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
2883
2713#define INTEL_PCH_DEVICE_ID_MASK 0xff00 2884#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2714#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2885#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
2715#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 2886#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
@@ -2746,13 +2917,22 @@ struct drm_i915_cmd_table {
2746 2917
2747#include "i915_trace.h" 2918#include "i915_trace.h"
2748 2919
2749extern const struct drm_ioctl_desc i915_ioctls[]; 2920static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv)
2750extern int i915_max_ioctl; 2921{
2922#ifdef CONFIG_INTEL_IOMMU
2923 if (INTEL_GEN(dev_priv) >= 6 && intel_iommu_gfx_mapped)
2924 return true;
2925#endif
2926 return false;
2927}
2751 2928
2752extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state); 2929extern int i915_suspend_switcheroo(struct drm_device *dev, pm_message_t state);
2753extern int i915_resume_switcheroo(struct drm_device *dev); 2930extern int i915_resume_switcheroo(struct drm_device *dev);
2754 2931
2755/* i915_dma.c */ 2932int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
2933 int enable_ppgtt);
2934
2935/* i915_drv.c */
2756void __printf(3, 4) 2936void __printf(3, 4)
2757__i915_printk(struct drm_i915_private *dev_priv, const char *level, 2937__i915_printk(struct drm_i915_private *dev_priv, const char *level,
2758 const char *fmt, ...); 2938 const char *fmt, ...);
@@ -2760,21 +2940,13 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level,
2760#define i915_report_error(dev_priv, fmt, ...) \ 2940#define i915_report_error(dev_priv, fmt, ...) \
2761 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__) 2941 __i915_printk(dev_priv, KERN_ERR, fmt, ##__VA_ARGS__)
2762 2942
2763extern int i915_driver_load(struct drm_device *, unsigned long flags);
2764extern int i915_driver_unload(struct drm_device *);
2765extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
2766extern void i915_driver_lastclose(struct drm_device * dev);
2767extern void i915_driver_preclose(struct drm_device *dev,
2768 struct drm_file *file);
2769extern void i915_driver_postclose(struct drm_device *dev,
2770 struct drm_file *file);
2771#ifdef CONFIG_COMPAT 2943#ifdef CONFIG_COMPAT
2772extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2944extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2773 unsigned long arg); 2945 unsigned long arg);
2774#endif 2946#endif
2775extern int intel_gpu_reset(struct drm_device *dev, u32 engine_mask); 2947extern int intel_gpu_reset(struct drm_i915_private *dev_priv, u32 engine_mask);
2776extern bool intel_has_gpu_reset(struct drm_device *dev); 2948extern bool intel_has_gpu_reset(struct drm_i915_private *dev_priv);
2777extern int i915_reset(struct drm_device *dev); 2949extern int i915_reset(struct drm_i915_private *dev_priv);
2778extern int intel_guc_reset(struct drm_i915_private *dev_priv); 2950extern int intel_guc_reset(struct drm_i915_private *dev_priv);
2779extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine); 2951extern void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
2780extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2952extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2784,30 +2956,51 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
2784int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); 2956int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
2785 2957
2786/* intel_hotplug.c */ 2958/* intel_hotplug.c */
2787void intel_hpd_irq_handler(struct drm_device *dev, u32 pin_mask, u32 long_mask); 2959void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
2960 u32 pin_mask, u32 long_mask);
2788void intel_hpd_init(struct drm_i915_private *dev_priv); 2961void intel_hpd_init(struct drm_i915_private *dev_priv);
2789void intel_hpd_init_work(struct drm_i915_private *dev_priv); 2962void intel_hpd_init_work(struct drm_i915_private *dev_priv);
2790void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); 2963void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
2791bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); 2964bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port);
2965bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2966void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin);
2792 2967
2793/* i915_irq.c */ 2968/* i915_irq.c */
2794void i915_queue_hangcheck(struct drm_device *dev); 2969static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv)
2970{
2971 unsigned long delay;
2972
2973 if (unlikely(!i915.enable_hangcheck))
2974 return;
2975
2976 /* Don't continually defer the hangcheck so that it is always run at
2977 * least once after work has been scheduled on any ring. Otherwise,
2978 * we will ignore a hung ring if a second ring is kept busy.
2979 */
2980
2981 delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
2982 queue_delayed_work(system_long_wq,
2983 &dev_priv->gpu_error.hangcheck_work, delay);
2984}
2985
2795__printf(3, 4) 2986__printf(3, 4)
2796void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2987void i915_handle_error(struct drm_i915_private *dev_priv,
2988 u32 engine_mask,
2797 const char *fmt, ...); 2989 const char *fmt, ...);
2798 2990
2799extern void intel_irq_init(struct drm_i915_private *dev_priv); 2991extern void intel_irq_init(struct drm_i915_private *dev_priv);
2800int intel_irq_install(struct drm_i915_private *dev_priv); 2992int intel_irq_install(struct drm_i915_private *dev_priv);
2801void intel_irq_uninstall(struct drm_i915_private *dev_priv); 2993void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2802 2994
2803extern void intel_uncore_sanitize(struct drm_device *dev); 2995extern void intel_uncore_sanitize(struct drm_i915_private *dev_priv);
2804extern void intel_uncore_early_sanitize(struct drm_device *dev, 2996extern void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
2805 bool restore_forcewake); 2997 bool restore_forcewake);
2806extern void intel_uncore_init(struct drm_device *dev); 2998extern void intel_uncore_init(struct drm_i915_private *dev_priv);
2807extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv); 2999extern bool intel_uncore_unclaimed_mmio(struct drm_i915_private *dev_priv);
2808extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv); 3000extern bool intel_uncore_arm_unclaimed_mmio_detection(struct drm_i915_private *dev_priv);
2809extern void intel_uncore_fini(struct drm_device *dev); 3001extern void intel_uncore_fini(struct drm_i915_private *dev_priv);
2810extern void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore); 3002extern void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
3003 bool restore);
2811const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); 3004const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
2812void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 3005void intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
2813 enum forcewake_domains domains); 3006 enum forcewake_domains domains);
@@ -2823,9 +3016,26 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
2823u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv); 3016u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
2824 3017
2825void assert_forcewakes_inactive(struct drm_i915_private *dev_priv); 3018void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
2826static inline bool intel_vgpu_active(struct drm_device *dev) 3019
3020int intel_wait_for_register(struct drm_i915_private *dev_priv,
3021 i915_reg_t reg,
3022 const u32 mask,
3023 const u32 value,
3024 const unsigned long timeout_ms);
3025int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
3026 i915_reg_t reg,
3027 const u32 mask,
3028 const u32 value,
3029 const unsigned long timeout_ms);
3030
3031static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
3032{
3033 return dev_priv->gvt.initialized;
3034}
3035
3036static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
2827{ 3037{
2828 return to_i915(dev)->vgpu.active; 3038 return dev_priv->vgpu.active;
2829} 3039}
2830 3040
2831void 3041void
@@ -2882,7 +3092,6 @@ ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, uint32_t bits)
2882 ibx_display_interrupt_update(dev_priv, bits, 0); 3092 ibx_display_interrupt_update(dev_priv, bits, 0);
2883} 3093}
2884 3094
2885
2886/* i915_gem.c */ 3095/* i915_gem.c */
2887int i915_gem_create_ioctl(struct drm_device *dev, void *data, 3096int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2888 struct drm_file *file_priv); 3097 struct drm_file *file_priv);
@@ -2921,7 +3130,7 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,
2921 struct drm_file *file_priv); 3130 struct drm_file *file_priv);
2922int i915_gem_get_tiling(struct drm_device *dev, void *data, 3131int i915_gem_get_tiling(struct drm_device *dev, void *data,
2923 struct drm_file *file_priv); 3132 struct drm_file *file_priv);
2924int i915_gem_init_userptr(struct drm_device *dev); 3133void i915_gem_init_userptr(struct drm_i915_private *dev_priv);
2925int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, 3134int i915_gem_userptr_ioctl(struct drm_device *dev, void *data,
2926 struct drm_file *file); 3135 struct drm_file *file);
2927int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 3136int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
@@ -2931,11 +3140,13 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2931void i915_gem_load_init(struct drm_device *dev); 3140void i915_gem_load_init(struct drm_device *dev);
2932void i915_gem_load_cleanup(struct drm_device *dev); 3141void i915_gem_load_cleanup(struct drm_device *dev);
2933void i915_gem_load_init_fences(struct drm_i915_private *dev_priv); 3142void i915_gem_load_init_fences(struct drm_i915_private *dev_priv);
3143int i915_gem_freeze_late(struct drm_i915_private *dev_priv);
3144
2934void *i915_gem_object_alloc(struct drm_device *dev); 3145void *i915_gem_object_alloc(struct drm_device *dev);
2935void i915_gem_object_free(struct drm_i915_gem_object *obj); 3146void i915_gem_object_free(struct drm_i915_gem_object *obj);
2936void i915_gem_object_init(struct drm_i915_gem_object *obj, 3147void i915_gem_object_init(struct drm_i915_gem_object *obj,
2937 const struct drm_i915_gem_object_ops *ops); 3148 const struct drm_i915_gem_object_ops *ops);
2938struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 3149struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
2939 size_t size); 3150 size_t size);
2940struct drm_i915_gem_object *i915_gem_object_create_from_data( 3151struct drm_i915_gem_object *i915_gem_object_create_from_data(
2941 struct drm_device *dev, const void *data, size_t size); 3152 struct drm_device *dev, const void *data, size_t size);
@@ -2990,6 +3201,23 @@ static inline int __sg_page_count(struct scatterlist *sg)
2990struct page * 3201struct page *
2991i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n); 3202i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
2992 3203
3204static inline dma_addr_t
3205i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
3206{
3207 if (n < obj->get_page.last) {
3208 obj->get_page.sg = obj->pages->sgl;
3209 obj->get_page.last = 0;
3210 }
3211
3212 while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
3213 obj->get_page.last += __sg_page_count(obj->get_page.sg++);
3214 if (unlikely(sg_is_chain(obj->get_page.sg)))
3215 obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
3216 }
3217
3218 return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
3219}
3220
2993static inline struct page * 3221static inline struct page *
2994i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n) 3222i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2995{ 3223{
@@ -3066,6 +3294,11 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
3066 struct drm_mode_create_dumb *args); 3294 struct drm_mode_create_dumb *args);
3067int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 3295int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
3068 uint32_t handle, uint64_t *offset); 3296 uint32_t handle, uint64_t *offset);
3297
3298void i915_gem_track_fb(struct drm_i915_gem_object *old,
3299 struct drm_i915_gem_object *new,
3300 unsigned frontbuffer_bits);
3301
3069/** 3302/**
3070 * Returns true if seq1 is later than seq2. 3303 * Returns true if seq1 is later than seq2.
3071 */ 3304 */
@@ -3075,31 +3308,34 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
3075 return (int32_t)(seq1 - seq2) >= 0; 3308 return (int32_t)(seq1 - seq2) >= 0;
3076} 3309}
3077 3310
3078static inline bool i915_gem_request_started(struct drm_i915_gem_request *req, 3311static inline bool i915_gem_request_started(const struct drm_i915_gem_request *req)
3079 bool lazy_coherency)
3080{ 3312{
3081 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3313 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
3082 req->engine->irq_seqno_barrier(req->engine);
3083 return i915_seqno_passed(req->engine->get_seqno(req->engine),
3084 req->previous_seqno); 3314 req->previous_seqno);
3085} 3315}
3086 3316
3087static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req, 3317static inline bool i915_gem_request_completed(const struct drm_i915_gem_request *req)
3088 bool lazy_coherency)
3089{ 3318{
3090 if (!lazy_coherency && req->engine->irq_seqno_barrier) 3319 return i915_seqno_passed(intel_engine_get_seqno(req->engine),
3091 req->engine->irq_seqno_barrier(req->engine);
3092 return i915_seqno_passed(req->engine->get_seqno(req->engine),
3093 req->seqno); 3320 req->seqno);
3094} 3321}
3095 3322
3096int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno); 3323bool __i915_spin_request(const struct drm_i915_gem_request *request,
3324 int state, unsigned long timeout_us);
3325static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
3326 int state, unsigned long timeout_us)
3327{
3328 return (i915_gem_request_started(request) &&
3329 __i915_spin_request(request, state, timeout_us));
3330}
3331
3332int __must_check i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno);
3097int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno); 3333int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
3098 3334
3099struct drm_i915_gem_request * 3335struct drm_i915_gem_request *
3100i915_gem_find_active_request(struct intel_engine_cs *engine); 3336i915_gem_find_active_request(struct intel_engine_cs *engine);
3101 3337
3102bool i915_gem_retire_requests(struct drm_device *dev); 3338void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
3103void i915_gem_retire_requests_ring(struct intel_engine_cs *engine); 3339void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
3104 3340
3105static inline u32 i915_reset_counter(struct i915_gpu_error *error) 3341static inline u32 i915_reset_counter(struct i915_gpu_error *error)
@@ -3142,27 +3378,14 @@ static inline u32 i915_reset_count(struct i915_gpu_error *error)
3142 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2; 3378 return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
3143} 3379}
3144 3380
3145static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
3146{
3147 return dev_priv->gpu_error.stop_rings == 0 ||
3148 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN;
3149}
3150
3151static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv)
3152{
3153 return dev_priv->gpu_error.stop_rings == 0 ||
3154 dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;
3155}
3156
3157void i915_gem_reset(struct drm_device *dev); 3381void i915_gem_reset(struct drm_device *dev);
3158bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force); 3382bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
3159int __must_check i915_gem_init(struct drm_device *dev); 3383int __must_check i915_gem_init(struct drm_device *dev);
3160int i915_gem_init_engines(struct drm_device *dev); 3384int i915_gem_init_engines(struct drm_device *dev);
3161int __must_check i915_gem_init_hw(struct drm_device *dev); 3385int __must_check i915_gem_init_hw(struct drm_device *dev);
3162int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
3163void i915_gem_init_swizzling(struct drm_device *dev); 3386void i915_gem_init_swizzling(struct drm_device *dev);
3164void i915_gem_cleanup_engines(struct drm_device *dev); 3387void i915_gem_cleanup_engines(struct drm_device *dev);
3165int __must_check i915_gpu_idle(struct drm_device *dev); 3388int __must_check i915_gem_wait_for_idle(struct drm_i915_private *dev_priv);
3166int __must_check i915_gem_suspend(struct drm_device *dev); 3389int __must_check i915_gem_suspend(struct drm_device *dev);
3167void __i915_add_request(struct drm_i915_gem_request *req, 3390void __i915_add_request(struct drm_i915_gem_request *req,
3168 struct drm_i915_gem_object *batch_obj, 3391 struct drm_i915_gem_object *batch_obj,
@@ -3227,8 +3450,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
3227bool i915_gem_obj_bound(struct drm_i915_gem_object *o, 3450bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
3228 struct i915_address_space *vm); 3451 struct i915_address_space *vm);
3229 3452
3230unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
3231 struct i915_address_space *vm);
3232struct i915_vma * 3453struct i915_vma *
3233i915_gem_obj_to_vma(struct drm_i915_gem_object *obj, 3454i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
3234 struct i915_address_space *vm); 3455 struct i915_address_space *vm);
@@ -3263,14 +3484,8 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
3263 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal); 3484 return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
3264} 3485}
3265 3486
3266static inline unsigned long 3487unsigned long
3267i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) 3488i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
3268{
3269 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3270 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3271
3272 return i915_gem_obj_size(obj, &ggtt->base);
3273}
3274 3489
3275static inline int __must_check 3490static inline int __must_check
3276i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, 3491i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
@@ -3284,12 +3499,6 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
3284 alignment, flags | PIN_GLOBAL); 3499 alignment, flags | PIN_GLOBAL);
3285} 3500}
3286 3501
3287static inline int
3288i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
3289{
3290 return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
3291}
3292
3293void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj, 3502void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
3294 const struct i915_ggtt_view *view); 3503 const struct i915_ggtt_view *view);
3295static inline void 3504static inline void
@@ -3313,28 +3522,44 @@ void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
3313 3522
3314/* i915_gem_context.c */ 3523/* i915_gem_context.c */
3315int __must_check i915_gem_context_init(struct drm_device *dev); 3524int __must_check i915_gem_context_init(struct drm_device *dev);
3525void i915_gem_context_lost(struct drm_i915_private *dev_priv);
3316void i915_gem_context_fini(struct drm_device *dev); 3526void i915_gem_context_fini(struct drm_device *dev);
3317void i915_gem_context_reset(struct drm_device *dev); 3527void i915_gem_context_reset(struct drm_device *dev);
3318int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); 3528int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
3319int i915_gem_context_enable(struct drm_i915_gem_request *req);
3320void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); 3529void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
3321int i915_switch_context(struct drm_i915_gem_request *req); 3530int i915_switch_context(struct drm_i915_gem_request *req);
3322struct intel_context *
3323i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);
3324void i915_gem_context_free(struct kref *ctx_ref); 3531void i915_gem_context_free(struct kref *ctx_ref);
3325struct drm_i915_gem_object * 3532struct drm_i915_gem_object *
3326i915_gem_alloc_context_obj(struct drm_device *dev, size_t size); 3533i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
3327static inline void i915_gem_context_reference(struct intel_context *ctx) 3534struct i915_gem_context *
3535i915_gem_context_create_gvt(struct drm_device *dev);
3536
3537static inline struct i915_gem_context *
3538i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
3539{
3540 struct i915_gem_context *ctx;
3541
3542 lockdep_assert_held(&file_priv->dev_priv->drm.struct_mutex);
3543
3544 ctx = idr_find(&file_priv->context_idr, id);
3545 if (!ctx)
3546 return ERR_PTR(-ENOENT);
3547
3548 return ctx;
3549}
3550
3551static inline void i915_gem_context_reference(struct i915_gem_context *ctx)
3328{ 3552{
3329 kref_get(&ctx->ref); 3553 kref_get(&ctx->ref);
3330} 3554}
3331 3555
3332static inline void i915_gem_context_unreference(struct intel_context *ctx) 3556static inline void i915_gem_context_unreference(struct i915_gem_context *ctx)
3333{ 3557{
3558 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
3334 kref_put(&ctx->ref, i915_gem_context_free); 3559 kref_put(&ctx->ref, i915_gem_context_free);
3335} 3560}
3336 3561
3337static inline bool i915_gem_context_is_default(const struct intel_context *c) 3562static inline bool i915_gem_context_is_default(const struct i915_gem_context *c)
3338{ 3563{
3339 return c->user_handle == DEFAULT_CONTEXT_HANDLE; 3564 return c->user_handle == DEFAULT_CONTEXT_HANDLE;
3340} 3565}
@@ -3347,6 +3572,8 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
3347 struct drm_file *file_priv); 3572 struct drm_file *file_priv);
3348int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, 3573int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
3349 struct drm_file *file_priv); 3574 struct drm_file *file_priv);
3575int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
3576 struct drm_file *file);
3350 3577
3351/* i915_gem_evict.c */ 3578/* i915_gem_evict.c */
3352int __must_check i915_gem_evict_something(struct drm_device *dev, 3579int __must_check i915_gem_evict_something(struct drm_device *dev,
@@ -3361,9 +3588,9 @@ int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
3361int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3588int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
3362 3589
3363/* belongs in i915_gem_gtt.h */ 3590/* belongs in i915_gem_gtt.h */
3364static inline void i915_gem_chipset_flush(struct drm_device *dev) 3591static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
3365{ 3592{
3366 if (INTEL_INFO(dev)->gen < 6) 3593 if (INTEL_GEN(dev_priv) < 6)
3367 intel_gtt_chipset_flush(); 3594 intel_gtt_chipset_flush();
3368} 3595}
3369 3596
@@ -3404,7 +3631,7 @@ void i915_gem_shrinker_cleanup(struct drm_i915_private *dev_priv);
3404/* i915_gem_tiling.c */ 3631/* i915_gem_tiling.c */
3405static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) 3632static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
3406{ 3633{
3407 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3634 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3408 3635
3409 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 3636 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
3410 obj->tiling_mode != I915_TILING_NONE; 3637 obj->tiling_mode != I915_TILING_NONE;
@@ -3418,12 +3645,14 @@ int i915_verify_lists(struct drm_device *dev);
3418#endif 3645#endif
3419 3646
3420/* i915_debugfs.c */ 3647/* i915_debugfs.c */
3421int i915_debugfs_init(struct drm_minor *minor);
3422void i915_debugfs_cleanup(struct drm_minor *minor);
3423#ifdef CONFIG_DEBUG_FS 3648#ifdef CONFIG_DEBUG_FS
3649int i915_debugfs_register(struct drm_i915_private *dev_priv);
3650void i915_debugfs_unregister(struct drm_i915_private *dev_priv);
3424int i915_debugfs_connector_add(struct drm_connector *connector); 3651int i915_debugfs_connector_add(struct drm_connector *connector);
3425void intel_display_crc_init(struct drm_device *dev); 3652void intel_display_crc_init(struct drm_device *dev);
3426#else 3653#else
3654static inline int i915_debugfs_register(struct drm_i915_private *) {return 0;}
3655static inline void i915_debugfs_unregister(struct drm_i915_private *) {}
3427static inline int i915_debugfs_connector_add(struct drm_connector *connector) 3656static inline int i915_debugfs_connector_add(struct drm_connector *connector)
3428{ return 0; } 3657{ return 0; }
3429static inline void intel_display_crc_init(struct drm_device *dev) {} 3658static inline void intel_display_crc_init(struct drm_device *dev) {}
@@ -3442,18 +3671,19 @@ static inline void i915_error_state_buf_release(
3442{ 3671{
3443 kfree(eb->buf); 3672 kfree(eb->buf);
3444} 3673}
3445void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 3674void i915_capture_error_state(struct drm_i915_private *dev_priv,
3675 u32 engine_mask,
3446 const char *error_msg); 3676 const char *error_msg);
3447void i915_error_state_get(struct drm_device *dev, 3677void i915_error_state_get(struct drm_device *dev,
3448 struct i915_error_state_file_priv *error_priv); 3678 struct i915_error_state_file_priv *error_priv);
3449void i915_error_state_put(struct i915_error_state_file_priv *error_priv); 3679void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
3450void i915_destroy_error_state(struct drm_device *dev); 3680void i915_destroy_error_state(struct drm_device *dev);
3451 3681
3452void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone); 3682void i915_get_extra_instdone(struct drm_i915_private *dev_priv, uint32_t *instdone);
3453const char *i915_cache_level_str(struct drm_i915_private *i915, int type); 3683const char *i915_cache_level_str(struct drm_i915_private *i915, int type);
3454 3684
3455/* i915_cmd_parser.c */ 3685/* i915_cmd_parser.c */
3456int i915_cmd_parser_get_version(void); 3686int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv);
3457int i915_cmd_parser_init_ring(struct intel_engine_cs *engine); 3687int i915_cmd_parser_init_ring(struct intel_engine_cs *engine);
3458void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine); 3688void i915_cmd_parser_fini_ring(struct intel_engine_cs *engine);
3459bool i915_needs_cmd_parser(struct intel_engine_cs *engine); 3689bool i915_needs_cmd_parser(struct intel_engine_cs *engine);
@@ -3502,31 +3732,33 @@ bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv,
3502 3732
3503/* intel_opregion.c */ 3733/* intel_opregion.c */
3504#ifdef CONFIG_ACPI 3734#ifdef CONFIG_ACPI
3505extern int intel_opregion_setup(struct drm_device *dev); 3735extern int intel_opregion_setup(struct drm_i915_private *dev_priv);
3506extern void intel_opregion_init(struct drm_device *dev); 3736extern void intel_opregion_register(struct drm_i915_private *dev_priv);
3507extern void intel_opregion_fini(struct drm_device *dev); 3737extern void intel_opregion_unregister(struct drm_i915_private *dev_priv);
3508extern void intel_opregion_asle_intr(struct drm_device *dev); 3738extern void intel_opregion_asle_intr(struct drm_i915_private *dev_priv);
3509extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 3739extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
3510 bool enable); 3740 bool enable);
3511extern int intel_opregion_notify_adapter(struct drm_device *dev, 3741extern int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
3512 pci_power_t state); 3742 pci_power_t state);
3513extern int intel_opregion_get_panel_type(struct drm_device *dev); 3743extern int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv);
3514#else 3744#else
3515static inline int intel_opregion_setup(struct drm_device *dev) { return 0; } 3745static inline int intel_opregion_setup(struct drm_i915_private *dev) { return 0; }
3516static inline void intel_opregion_init(struct drm_device *dev) { return; } 3746static inline void intel_opregion_register(struct drm_i915_private *dev_priv) { }
3517static inline void intel_opregion_fini(struct drm_device *dev) { return; } 3747static inline void intel_opregion_unregister(struct drm_i915_private *dev_priv) { }
3518static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } 3748static inline void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
3749{
3750}
3519static inline int 3751static inline int
3520intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) 3752intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
3521{ 3753{
3522 return 0; 3754 return 0;
3523} 3755}
3524static inline int 3756static inline int
3525intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 3757intel_opregion_notify_adapter(struct drm_i915_private *dev, pci_power_t state)
3526{ 3758{
3527 return 0; 3759 return 0;
3528} 3760}
3529static inline int intel_opregion_get_panel_type(struct drm_device *dev) 3761static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev)
3530{ 3762{
3531 return -ENODEV; 3763 return -ENODEV;
3532} 3764}
@@ -3541,36 +3773,45 @@ static inline void intel_register_dsm_handler(void) { return; }
3541static inline void intel_unregister_dsm_handler(void) { return; } 3773static inline void intel_unregister_dsm_handler(void) { return; }
3542#endif /* CONFIG_ACPI */ 3774#endif /* CONFIG_ACPI */
3543 3775
3776/* intel_device_info.c */
3777static inline struct intel_device_info *
3778mkwrite_device_info(struct drm_i915_private *dev_priv)
3779{
3780 return (struct intel_device_info *)&dev_priv->info;
3781}
3782
3783void intel_device_info_runtime_init(struct drm_i915_private *dev_priv);
3784void intel_device_info_dump(struct drm_i915_private *dev_priv);
3785
3544/* modesetting */ 3786/* modesetting */
3545extern void intel_modeset_init_hw(struct drm_device *dev); 3787extern void intel_modeset_init_hw(struct drm_device *dev);
3546extern void intel_modeset_init(struct drm_device *dev); 3788extern void intel_modeset_init(struct drm_device *dev);
3547extern void intel_modeset_gem_init(struct drm_device *dev); 3789extern void intel_modeset_gem_init(struct drm_device *dev);
3548extern void intel_modeset_cleanup(struct drm_device *dev); 3790extern void intel_modeset_cleanup(struct drm_device *dev);
3549extern void intel_connector_unregister(struct intel_connector *); 3791extern int intel_connector_register(struct drm_connector *);
3792extern void intel_connector_unregister(struct drm_connector *);
3550extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); 3793extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
3551extern void intel_display_resume(struct drm_device *dev); 3794extern void intel_display_resume(struct drm_device *dev);
3552extern void i915_redisable_vga(struct drm_device *dev); 3795extern void i915_redisable_vga(struct drm_device *dev);
3553extern void i915_redisable_vga_power_on(struct drm_device *dev); 3796extern void i915_redisable_vga_power_on(struct drm_device *dev);
3554extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 3797extern bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val);
3555extern void intel_init_pch_refclk(struct drm_device *dev); 3798extern void intel_init_pch_refclk(struct drm_device *dev);
3556extern void intel_set_rps(struct drm_device *dev, u8 val); 3799extern void intel_set_rps(struct drm_i915_private *dev_priv, u8 val);
3557extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, 3800extern void intel_set_memory_cxsr(struct drm_i915_private *dev_priv,
3558 bool enable); 3801 bool enable);
3559extern void intel_detect_pch(struct drm_device *dev);
3560extern int intel_enable_rc6(const struct drm_device *dev);
3561 3802
3562extern bool i915_semaphore_is_enabled(struct drm_device *dev); 3803extern bool i915_semaphore_is_enabled(struct drm_i915_private *dev_priv);
3563int i915_reg_read_ioctl(struct drm_device *dev, void *data, 3804int i915_reg_read_ioctl(struct drm_device *dev, void *data,
3564 struct drm_file *file); 3805 struct drm_file *file);
3565int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
3566 struct drm_file *file);
3567 3806
3568/* overlay */ 3807/* overlay */
3569extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 3808extern struct intel_overlay_error_state *
3809intel_overlay_capture_error_state(struct drm_i915_private *dev_priv);
3570extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e, 3810extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
3571 struct intel_overlay_error_state *error); 3811 struct intel_overlay_error_state *error);
3572 3812
3573extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev); 3813extern struct intel_display_error_state *
3814intel_display_capture_error_state(struct drm_i915_private *dev_priv);
3574extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, 3815extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
3575 struct drm_device *dev, 3816 struct drm_device *dev,
3576 struct intel_display_error_state *error); 3817 struct intel_display_error_state *error);
@@ -3599,6 +3840,24 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
3599u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); 3840u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
3600void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); 3841void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
3601 3842
3843/* intel_dpio_phy.c */
3844void chv_set_phy_signal_level(struct intel_encoder *encoder,
3845 u32 deemph_reg_value, u32 margin_reg_value,
3846 bool uniq_trans_scale);
3847void chv_data_lane_soft_reset(struct intel_encoder *encoder,
3848 bool reset);
3849void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
3850void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3851void chv_phy_release_cl2_override(struct intel_encoder *encoder);
3852void chv_phy_post_pll_disable(struct intel_encoder *encoder);
3853
3854void vlv_set_phy_signal_level(struct intel_encoder *encoder,
3855 u32 demph_reg_value, u32 preemph_reg_value,
3856 u32 uniqtranscale_reg_value, u32 tx3_demph);
3857void vlv_phy_pre_pll_enable(struct intel_encoder *encoder);
3858void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder);
3859void vlv_phy_reset_lanes(struct intel_encoder *encoder);
3860
3602int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); 3861int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
3603int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); 3862int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3604 3863
@@ -3672,6 +3931,7 @@ __raw_write(64, q)
3672 */ 3931 */
3673#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__)) 3932#define I915_READ_FW(reg__) __raw_i915_read32(dev_priv, (reg__))
3674#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__)) 3933#define I915_WRITE_FW(reg__, val__) __raw_i915_write32(dev_priv, (reg__), (val__))
3934#define I915_WRITE64_FW(reg__, val__) __raw_i915_write64(dev_priv, (reg__), (val__))
3675#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__) 3935#define POSTING_READ_FW(reg__) (void)I915_READ_FW(reg__)
3676 3936
3677/* "Broadcast RGB" property */ 3937/* "Broadcast RGB" property */
@@ -3735,12 +3995,80 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
3735 schedule_timeout_uninterruptible(remaining_jiffies); 3995 schedule_timeout_uninterruptible(remaining_jiffies);
3736 } 3996 }
3737} 3997}
3738 3998static inline bool __i915_request_irq_complete(struct drm_i915_gem_request *req)
3739static inline void i915_trace_irq_get(struct intel_engine_cs *engine,
3740 struct drm_i915_gem_request *req)
3741{ 3999{
3742 if (engine->trace_irq_req == NULL && engine->irq_get(engine)) 4000 struct intel_engine_cs *engine = req->engine;
3743 i915_gem_request_assign(&engine->trace_irq_req, req); 4001
4002 /* Before we do the heavier coherent read of the seqno,
4003 * check the value (hopefully) in the CPU cacheline.
4004 */
4005 if (i915_gem_request_completed(req))
4006 return true;
4007
4008 /* Ensure our read of the seqno is coherent so that we
4009 * do not "miss an interrupt" (i.e. if this is the last
4010 * request and the seqno write from the GPU is not visible
4011 * by the time the interrupt fires, we will see that the
4012 * request is incomplete and go back to sleep awaiting
4013 * another interrupt that will never come.)
4014 *
4015 * Strictly, we only need to do this once after an interrupt,
4016 * but it is easier and safer to do it every time the waiter
4017 * is woken.
4018 */
4019 if (engine->irq_seqno_barrier &&
4020 READ_ONCE(engine->breadcrumbs.irq_seqno_bh) == current &&
4021 cmpxchg_relaxed(&engine->breadcrumbs.irq_posted, 1, 0)) {
4022 struct task_struct *tsk;
4023
4024 /* The ordering of irq_posted versus applying the barrier
4025 * is crucial. The clearing of the current irq_posted must
4026 * be visible before we perform the barrier operation,
4027 * such that if a subsequent interrupt arrives, irq_posted
4028 * is reasserted and our task rewoken (which causes us to
4029 * do another __i915_request_irq_complete() immediately
4030 * and reapply the barrier). Conversely, if the clear
4031 * occurs after the barrier, then an interrupt that arrived
4032 * whilst we waited on the barrier would not trigger a
4033 * barrier on the next pass, and the read may not see the
4034 * seqno update.
4035 */
4036 engine->irq_seqno_barrier(engine);
4037
4038 /* If we consume the irq, but we are no longer the bottom-half,
4039 * the real bottom-half may not have serialised their own
4040 * seqno check with the irq-barrier (i.e. may have inspected
4041 * the seqno before we believe it coherent since they see
4042 * irq_posted == false but we are still running).
4043 */
4044 rcu_read_lock();
4045 tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
4046 if (tsk && tsk != current)
4047 /* Note that if the bottom-half is changed as we
4048 * are sending the wake-up, the new bottom-half will
4049 * be woken by whomever made the change. We only have
4050 * to worry about when we steal the irq-posted for
4051 * ourself.
4052 */
4053 wake_up_process(tsk);
4054 rcu_read_unlock();
4055
4056 if (i915_gem_request_completed(req))
4057 return true;
4058 }
4059
4060 /* We need to check whether any gpu reset happened in between
4061 * the request being submitted and now. If a reset has occurred,
4062 * the seqno will have been advance past ours and our request
4063 * is complete. If we are in the process of handling a reset,
4064 * the request is effectively complete as the rendering will
4065 * be discarded, but we need to return in order to drop the
4066 * struct_mutex.
4067 */
4068 if (i915_reset_in_progress(&req->i915->gpu_error))
4069 return true;
4070
4071 return false;
3744} 4072}
3745 4073
3746#endif 4074#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ed6117a0ee84..11681501d7b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -54,12 +54,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
54 54
55static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) 55static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
56{ 56{
57 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
58 return false;
59
57 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) 60 if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
58 return true; 61 return true;
59 62
60 return obj->pin_display; 63 return obj->pin_display;
61} 64}
62 65
66static int
67insert_mappable_node(struct drm_i915_private *i915,
68 struct drm_mm_node *node, u32 size)
69{
70 memset(node, 0, sizeof(*node));
71 return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
72 size, 0, 0, 0,
73 i915->ggtt.mappable_end,
74 DRM_MM_SEARCH_DEFAULT,
75 DRM_MM_CREATE_DEFAULT);
76}
77
78static void
79remove_mappable_node(struct drm_mm_node *node)
80{
81 drm_mm_remove_node(node);
82}
83
63/* some bookkeeping */ 84/* some bookkeeping */
64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 85static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
65 size_t size) 86 size_t size)
@@ -107,7 +128,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
107 128
108int i915_mutex_lock_interruptible(struct drm_device *dev) 129int i915_mutex_lock_interruptible(struct drm_device *dev)
109{ 130{
110 struct drm_i915_private *dev_priv = dev->dev_private; 131 struct drm_i915_private *dev_priv = to_i915(dev);
111 int ret; 132 int ret;
112 133
113 ret = i915_gem_wait_for_error(&dev_priv->gpu_error); 134 ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -177,7 +198,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177 vaddr += PAGE_SIZE; 198 vaddr += PAGE_SIZE;
178 } 199 }
179 200
180 i915_gem_chipset_flush(obj->base.dev); 201 i915_gem_chipset_flush(to_i915(obj->base.dev));
181 202
182 st = kmalloc(sizeof(*st), GFP_KERNEL); 203 st = kmalloc(sizeof(*st), GFP_KERNEL);
183 if (st == NULL) 204 if (st == NULL)
@@ -347,7 +368,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
347 } 368 }
348 369
349 drm_clflush_virt_range(vaddr, args->size); 370 drm_clflush_virt_range(vaddr, args->size);
350 i915_gem_chipset_flush(dev); 371 i915_gem_chipset_flush(to_i915(dev));
351 372
352out: 373out:
353 intel_fb_obj_flush(obj, false, ORIGIN_CPU); 374 intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -356,13 +377,13 @@ out:
356 377
357void *i915_gem_object_alloc(struct drm_device *dev) 378void *i915_gem_object_alloc(struct drm_device *dev)
358{ 379{
359 struct drm_i915_private *dev_priv = dev->dev_private; 380 struct drm_i915_private *dev_priv = to_i915(dev);
360 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL); 381 return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
361} 382}
362 383
363void i915_gem_object_free(struct drm_i915_gem_object *obj) 384void i915_gem_object_free(struct drm_i915_gem_object *obj)
364{ 385{
365 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 386 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
366 kmem_cache_free(dev_priv->objects, obj); 387 kmem_cache_free(dev_priv->objects, obj);
367} 388}
368 389
@@ -381,9 +402,9 @@ i915_gem_create(struct drm_file *file,
381 return -EINVAL; 402 return -EINVAL;
382 403
383 /* Allocate the new object */ 404 /* Allocate the new object */
384 obj = i915_gem_alloc_object(dev, size); 405 obj = i915_gem_object_create(dev, size);
385 if (obj == NULL) 406 if (IS_ERR(obj))
386 return -ENOMEM; 407 return PTR_ERR(obj);
387 408
388 ret = drm_gem_handle_create(file, &obj->base, &handle); 409 ret = drm_gem_handle_create(file, &obj->base, &handle);
389 /* drop reference from allocate - handle holds it now */ 410 /* drop reference from allocate - handle holds it now */
@@ -409,6 +430,9 @@ i915_gem_dumb_create(struct drm_file *file,
409 430
410/** 431/**
411 * Creates a new mm object and returns a handle to it. 432 * Creates a new mm object and returns a handle to it.
433 * @dev: drm device pointer
434 * @data: ioctl data blob
435 * @file: drm file pointer
412 */ 436 */
413int 437int
414i915_gem_create_ioctl(struct drm_device *dev, void *data, 438i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -484,7 +508,7 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
484 508
485 *needs_clflush = 0; 509 *needs_clflush = 0;
486 510
487 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) 511 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
488 return -EINVAL; 512 return -EINVAL;
489 513
490 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) { 514 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
@@ -585,6 +609,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
585 return ret ? - EFAULT : 0; 609 return ret ? - EFAULT : 0;
586} 610}
587 611
612static inline unsigned long
613slow_user_access(struct io_mapping *mapping,
614 uint64_t page_base, int page_offset,
615 char __user *user_data,
616 unsigned long length, bool pwrite)
617{
618 void __iomem *ioaddr;
619 void *vaddr;
620 uint64_t unwritten;
621
622 ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
623 /* We can use the cpu mem copy function because this is X86. */
624 vaddr = (void __force *)ioaddr + page_offset;
625 if (pwrite)
626 unwritten = __copy_from_user(vaddr, user_data, length);
627 else
628 unwritten = __copy_to_user(user_data, vaddr, length);
629
630 io_mapping_unmap(ioaddr);
631 return unwritten;
632}
633
634static int
635i915_gem_gtt_pread(struct drm_device *dev,
636 struct drm_i915_gem_object *obj, uint64_t size,
637 uint64_t data_offset, uint64_t data_ptr)
638{
639 struct drm_i915_private *dev_priv = to_i915(dev);
640 struct i915_ggtt *ggtt = &dev_priv->ggtt;
641 struct drm_mm_node node;
642 char __user *user_data;
643 uint64_t remain;
644 uint64_t offset;
645 int ret;
646
647 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
648 if (ret) {
649 ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
650 if (ret)
651 goto out;
652
653 ret = i915_gem_object_get_pages(obj);
654 if (ret) {
655 remove_mappable_node(&node);
656 goto out;
657 }
658
659 i915_gem_object_pin_pages(obj);
660 } else {
661 node.start = i915_gem_obj_ggtt_offset(obj);
662 node.allocated = false;
663 ret = i915_gem_object_put_fence(obj);
664 if (ret)
665 goto out_unpin;
666 }
667
668 ret = i915_gem_object_set_to_gtt_domain(obj, false);
669 if (ret)
670 goto out_unpin;
671
672 user_data = u64_to_user_ptr(data_ptr);
673 remain = size;
674 offset = data_offset;
675
676 mutex_unlock(&dev->struct_mutex);
677 if (likely(!i915.prefault_disable)) {
678 ret = fault_in_multipages_writeable(user_data, remain);
679 if (ret) {
680 mutex_lock(&dev->struct_mutex);
681 goto out_unpin;
682 }
683 }
684
685 while (remain > 0) {
686 /* Operation in this page
687 *
688 * page_base = page offset within aperture
689 * page_offset = offset within page
690 * page_length = bytes to copy for this page
691 */
692 u32 page_base = node.start;
693 unsigned page_offset = offset_in_page(offset);
694 unsigned page_length = PAGE_SIZE - page_offset;
695 page_length = remain < page_length ? remain : page_length;
696 if (node.allocated) {
697 wmb();
698 ggtt->base.insert_page(&ggtt->base,
699 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
700 node.start,
701 I915_CACHE_NONE, 0);
702 wmb();
703 } else {
704 page_base += offset & PAGE_MASK;
705 }
706 /* This is a slow read/write as it tries to read from
707 * and write to user memory which may result into page
708 * faults, and so we cannot perform this under struct_mutex.
709 */
710 if (slow_user_access(ggtt->mappable, page_base,
711 page_offset, user_data,
712 page_length, false)) {
713 ret = -EFAULT;
714 break;
715 }
716
717 remain -= page_length;
718 user_data += page_length;
719 offset += page_length;
720 }
721
722 mutex_lock(&dev->struct_mutex);
723 if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
724 /* The user has modified the object whilst we tried
725 * reading from it, and we now have no idea what domain
726 * the pages should be in. As we have just been touching
727 * them directly, flush everything back to the GTT
728 * domain.
729 */
730 ret = i915_gem_object_set_to_gtt_domain(obj, false);
731 }
732
733out_unpin:
734 if (node.allocated) {
735 wmb();
736 ggtt->base.clear_range(&ggtt->base,
737 node.start, node.size,
738 true);
739 i915_gem_object_unpin_pages(obj);
740 remove_mappable_node(&node);
741 } else {
742 i915_gem_object_ggtt_unpin(obj);
743 }
744out:
745 return ret;
746}
747
588static int 748static int
589i915_gem_shmem_pread(struct drm_device *dev, 749i915_gem_shmem_pread(struct drm_device *dev,
590 struct drm_i915_gem_object *obj, 750 struct drm_i915_gem_object *obj,
@@ -600,6 +760,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
600 int needs_clflush = 0; 760 int needs_clflush = 0;
601 struct sg_page_iter sg_iter; 761 struct sg_page_iter sg_iter;
602 762
763 if (!i915_gem_object_has_struct_page(obj))
764 return -ENODEV;
765
603 user_data = u64_to_user_ptr(args->data_ptr); 766 user_data = u64_to_user_ptr(args->data_ptr);
604 remain = args->size; 767 remain = args->size;
605 768
@@ -672,6 +835,9 @@ out:
672 835
673/** 836/**
674 * Reads data from the object referenced by handle. 837 * Reads data from the object referenced by handle.
838 * @dev: drm device pointer
839 * @data: ioctl data blob
840 * @file: drm file pointer
675 * 841 *
676 * On error, the contents of *data are undefined. 842 * On error, the contents of *data are undefined.
677 */ 843 */
@@ -708,18 +874,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
708 goto out; 874 goto out;
709 } 875 }
710 876
711 /* prime objects have no backing filp to GEM pread/pwrite
712 * pages from.
713 */
714 if (!obj->base.filp) {
715 ret = -EINVAL;
716 goto out;
717 }
718
719 trace_i915_gem_object_pread(obj, args->offset, args->size); 877 trace_i915_gem_object_pread(obj, args->offset, args->size);
720 878
721 ret = i915_gem_shmem_pread(dev, obj, args, file); 879 ret = i915_gem_shmem_pread(dev, obj, args, file);
722 880
881 /* pread for non shmem backed objects */
882 if (ret == -EFAULT || ret == -ENODEV)
883 ret = i915_gem_gtt_pread(dev, obj, args->size,
884 args->offset, args->data_ptr);
885
723out: 886out:
724 drm_gem_object_unreference(&obj->base); 887 drm_gem_object_unreference(&obj->base);
725unlock: 888unlock:
@@ -753,60 +916,99 @@ fast_user_write(struct io_mapping *mapping,
753/** 916/**
754 * This is the fast pwrite path, where we copy the data directly from the 917 * This is the fast pwrite path, where we copy the data directly from the
755 * user into the GTT, uncached. 918 * user into the GTT, uncached.
919 * @dev: drm device pointer
920 * @obj: i915 gem object
921 * @args: pwrite arguments structure
922 * @file: drm file pointer
756 */ 923 */
757static int 924static int
758i915_gem_gtt_pwrite_fast(struct drm_device *dev, 925i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
759 struct drm_i915_gem_object *obj, 926 struct drm_i915_gem_object *obj,
760 struct drm_i915_gem_pwrite *args, 927 struct drm_i915_gem_pwrite *args,
761 struct drm_file *file) 928 struct drm_file *file)
762{ 929{
763 struct drm_i915_private *dev_priv = to_i915(dev); 930 struct i915_ggtt *ggtt = &i915->ggtt;
764 struct i915_ggtt *ggtt = &dev_priv->ggtt; 931 struct drm_device *dev = obj->base.dev;
765 ssize_t remain; 932 struct drm_mm_node node;
766 loff_t offset, page_base; 933 uint64_t remain, offset;
767 char __user *user_data; 934 char __user *user_data;
768 int page_offset, page_length, ret; 935 int ret;
936 bool hit_slow_path = false;
937
938 if (obj->tiling_mode != I915_TILING_NONE)
939 return -EFAULT;
769 940
770 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK); 941 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
771 if (ret) 942 if (ret) {
772 goto out; 943 ret = insert_mappable_node(i915, &node, PAGE_SIZE);
944 if (ret)
945 goto out;
946
947 ret = i915_gem_object_get_pages(obj);
948 if (ret) {
949 remove_mappable_node(&node);
950 goto out;
951 }
952
953 i915_gem_object_pin_pages(obj);
954 } else {
955 node.start = i915_gem_obj_ggtt_offset(obj);
956 node.allocated = false;
957 ret = i915_gem_object_put_fence(obj);
958 if (ret)
959 goto out_unpin;
960 }
773 961
774 ret = i915_gem_object_set_to_gtt_domain(obj, true); 962 ret = i915_gem_object_set_to_gtt_domain(obj, true);
775 if (ret) 963 if (ret)
776 goto out_unpin; 964 goto out_unpin;
777 965
778 ret = i915_gem_object_put_fence(obj); 966 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
779 if (ret) 967 obj->dirty = true;
780 goto out_unpin;
781 968
782 user_data = u64_to_user_ptr(args->data_ptr); 969 user_data = u64_to_user_ptr(args->data_ptr);
970 offset = args->offset;
783 remain = args->size; 971 remain = args->size;
784 972 while (remain) {
785 offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
786
787 intel_fb_obj_invalidate(obj, ORIGIN_GTT);
788
789 while (remain > 0) {
790 /* Operation in this page 973 /* Operation in this page
791 * 974 *
792 * page_base = page offset within aperture 975 * page_base = page offset within aperture
793 * page_offset = offset within page 976 * page_offset = offset within page
794 * page_length = bytes to copy for this page 977 * page_length = bytes to copy for this page
795 */ 978 */
796 page_base = offset & PAGE_MASK; 979 u32 page_base = node.start;
797 page_offset = offset_in_page(offset); 980 unsigned page_offset = offset_in_page(offset);
798 page_length = remain; 981 unsigned page_length = PAGE_SIZE - page_offset;
799 if ((page_offset + remain) > PAGE_SIZE) 982 page_length = remain < page_length ? remain : page_length;
800 page_length = PAGE_SIZE - page_offset; 983 if (node.allocated) {
801 984 wmb(); /* flush the write before we modify the GGTT */
985 ggtt->base.insert_page(&ggtt->base,
986 i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
987 node.start, I915_CACHE_NONE, 0);
988 wmb(); /* flush modifications to the GGTT (insert_page) */
989 } else {
990 page_base += offset & PAGE_MASK;
991 }
802 /* If we get a fault while copying data, then (presumably) our 992 /* If we get a fault while copying data, then (presumably) our
803 * source page isn't available. Return the error and we'll 993 * source page isn't available. Return the error and we'll
804 * retry in the slow path. 994 * retry in the slow path.
995 * If the object is non-shmem backed, we retry again with the
996 * path that handles page fault.
805 */ 997 */
806 if (fast_user_write(ggtt->mappable, page_base, 998 if (fast_user_write(ggtt->mappable, page_base,
807 page_offset, user_data, page_length)) { 999 page_offset, user_data, page_length)) {
808 ret = -EFAULT; 1000 hit_slow_path = true;
809 goto out_flush; 1001 mutex_unlock(&dev->struct_mutex);
1002 if (slow_user_access(ggtt->mappable,
1003 page_base,
1004 page_offset, user_data,
1005 page_length, true)) {
1006 ret = -EFAULT;
1007 mutex_lock(&dev->struct_mutex);
1008 goto out_flush;
1009 }
1010
1011 mutex_lock(&dev->struct_mutex);
810 } 1012 }
811 1013
812 remain -= page_length; 1014 remain -= page_length;
@@ -815,9 +1017,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
815 } 1017 }
816 1018
817out_flush: 1019out_flush:
1020 if (hit_slow_path) {
1021 if (ret == 0 &&
1022 (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
1023 /* The user has modified the object whilst we tried
1024 * reading from it, and we now have no idea what domain
1025 * the pages should be in. As we have just been touching
1026 * them directly, flush everything back to the GTT
1027 * domain.
1028 */
1029 ret = i915_gem_object_set_to_gtt_domain(obj, false);
1030 }
1031 }
1032
818 intel_fb_obj_flush(obj, false, ORIGIN_GTT); 1033 intel_fb_obj_flush(obj, false, ORIGIN_GTT);
819out_unpin: 1034out_unpin:
820 i915_gem_object_ggtt_unpin(obj); 1035 if (node.allocated) {
1036 wmb();
1037 ggtt->base.clear_range(&ggtt->base,
1038 node.start, node.size,
1039 true);
1040 i915_gem_object_unpin_pages(obj);
1041 remove_mappable_node(&node);
1042 } else {
1043 i915_gem_object_ggtt_unpin(obj);
1044 }
821out: 1045out:
822 return ret; 1046 return ret;
823} 1047}
@@ -1006,7 +1230,7 @@ out:
1006 } 1230 }
1007 1231
1008 if (needs_clflush_after) 1232 if (needs_clflush_after)
1009 i915_gem_chipset_flush(dev); 1233 i915_gem_chipset_flush(to_i915(dev));
1010 else 1234 else
1011 obj->cache_dirty = true; 1235 obj->cache_dirty = true;
1012 1236
@@ -1016,6 +1240,9 @@ out:
1016 1240
1017/** 1241/**
1018 * Writes data to the object referenced by handle. 1242 * Writes data to the object referenced by handle.
1243 * @dev: drm device
1244 * @data: ioctl data blob
1245 * @file: drm file
1019 * 1246 *
1020 * On error, the contents of the buffer that were to be modified are undefined. 1247 * On error, the contents of the buffer that were to be modified are undefined.
1021 */ 1248 */
@@ -1023,7 +1250,7 @@ int
1023i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 1250i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1024 struct drm_file *file) 1251 struct drm_file *file)
1025{ 1252{
1026 struct drm_i915_private *dev_priv = dev->dev_private; 1253 struct drm_i915_private *dev_priv = to_i915(dev);
1027 struct drm_i915_gem_pwrite *args = data; 1254 struct drm_i915_gem_pwrite *args = data;
1028 struct drm_i915_gem_object *obj; 1255 struct drm_i915_gem_object *obj;
1029 int ret; 1256 int ret;
@@ -1062,14 +1289,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1062 goto out; 1289 goto out;
1063 } 1290 }
1064 1291
1065 /* prime objects have no backing filp to GEM pread/pwrite
1066 * pages from.
1067 */
1068 if (!obj->base.filp) {
1069 ret = -EINVAL;
1070 goto out;
1071 }
1072
1073 trace_i915_gem_object_pwrite(obj, args->offset, args->size); 1292 trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1074 1293
1075 ret = -EFAULT; 1294 ret = -EFAULT;
@@ -1079,20 +1298,21 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1079 * pread/pwrite currently are reading and writing from the CPU 1298 * pread/pwrite currently are reading and writing from the CPU
1080 * perspective, requiring manual detiling by the client. 1299 * perspective, requiring manual detiling by the client.
1081 */ 1300 */
1082 if (obj->tiling_mode == I915_TILING_NONE && 1301 if (!i915_gem_object_has_struct_page(obj) ||
1083 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1084 cpu_write_needs_clflush(obj)) { 1302 cpu_write_needs_clflush(obj)) {
1085 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 1303 ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
1086 /* Note that the gtt paths might fail with non-page-backed user 1304 /* Note that the gtt paths might fail with non-page-backed user
1087 * pointers (e.g. gtt mappings when moving data between 1305 * pointers (e.g. gtt mappings when moving data between
1088 * textures). Fallback to the shmem path in that case. */ 1306 * textures). Fallback to the shmem path in that case. */
1089 } 1307 }
1090 1308
1091 if (ret == -EFAULT || ret == -ENOSPC) { 1309 if (ret == -EFAULT) {
1092 if (obj->phys_handle) 1310 if (obj->phys_handle)
1093 ret = i915_gem_phys_pwrite(obj, args, file); 1311 ret = i915_gem_phys_pwrite(obj, args, file);
1094 else 1312 else if (i915_gem_object_has_struct_page(obj))
1095 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1313 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1314 else
1315 ret = -ENODEV;
1096 } 1316 }
1097 1317
1098out: 1318out:
@@ -1123,17 +1343,6 @@ i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
1123 return 0; 1343 return 0;
1124} 1344}
1125 1345
1126static void fake_irq(unsigned long data)
1127{
1128 wake_up_process((struct task_struct *)data);
1129}
1130
1131static bool missed_irq(struct drm_i915_private *dev_priv,
1132 struct intel_engine_cs *engine)
1133{
1134 return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
1135}
1136
1137static unsigned long local_clock_us(unsigned *cpu) 1346static unsigned long local_clock_us(unsigned *cpu)
1138{ 1347{
1139 unsigned long t; 1348 unsigned long t;
@@ -1166,9 +1375,9 @@ static bool busywait_stop(unsigned long timeout, unsigned cpu)
1166 return this_cpu != cpu; 1375 return this_cpu != cpu;
1167} 1376}
1168 1377
1169static int __i915_spin_request(struct drm_i915_gem_request *req, int state) 1378bool __i915_spin_request(const struct drm_i915_gem_request *req,
1379 int state, unsigned long timeout_us)
1170{ 1380{
1171 unsigned long timeout;
1172 unsigned cpu; 1381 unsigned cpu;
1173 1382
1174 /* When waiting for high frequency requests, e.g. during synchronous 1383 /* When waiting for high frequency requests, e.g. during synchronous
@@ -1181,31 +1390,21 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1181 * takes to sleep on a request, on the order of a microsecond. 1390 * takes to sleep on a request, on the order of a microsecond.
1182 */ 1391 */
1183 1392
1184 if (req->engine->irq_refcount) 1393 timeout_us += local_clock_us(&cpu);
1185 return -EBUSY; 1394 do {
1186 1395 if (i915_gem_request_completed(req))
1187 /* Only spin if we know the GPU is processing this request */ 1396 return true;
1188 if (!i915_gem_request_started(req, true))
1189 return -EAGAIN;
1190
1191 timeout = local_clock_us(&cpu) + 5;
1192 while (!need_resched()) {
1193 if (i915_gem_request_completed(req, true))
1194 return 0;
1195 1397
1196 if (signal_pending_state(state, current)) 1398 if (signal_pending_state(state, current))
1197 break; 1399 break;
1198 1400
1199 if (busywait_stop(timeout, cpu)) 1401 if (busywait_stop(timeout_us, cpu))
1200 break; 1402 break;
1201 1403
1202 cpu_relax_lowlatency(); 1404 cpu_relax_lowlatency();
1203 } 1405 } while (!need_resched());
1204 1406
1205 if (i915_gem_request_completed(req, false)) 1407 return false;
1206 return 0;
1207
1208 return -EAGAIN;
1209} 1408}
1210 1409
1211/** 1410/**
@@ -1213,6 +1412,7 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
1213 * @req: duh! 1412 * @req: duh!
1214 * @interruptible: do an interruptible wait (normally yes) 1413 * @interruptible: do an interruptible wait (normally yes)
1215 * @timeout: in - how long to wait (NULL forever); out - how much time remaining 1414 * @timeout: in - how long to wait (NULL forever); out - how much time remaining
1415 * @rps: RPS client
1216 * 1416 *
1217 * Note: It is of utmost importance that the passed in seqno and reset_counter 1417 * Note: It is of utmost importance that the passed in seqno and reset_counter
1218 * values have been read by the caller in an smp safe manner. Where read-side 1418 * values have been read by the caller in an smp safe manner. Where read-side
@@ -1229,26 +1429,22 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1229 s64 *timeout, 1429 s64 *timeout,
1230 struct intel_rps_client *rps) 1430 struct intel_rps_client *rps)
1231{ 1431{
1232 struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
1233 struct drm_device *dev = engine->dev;
1234 struct drm_i915_private *dev_priv = dev->dev_private;
1235 const bool irq_test_in_progress =
1236 ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
1237 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; 1432 int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
1238 DEFINE_WAIT(wait); 1433 DEFINE_WAIT(reset);
1239 unsigned long timeout_expire; 1434 struct intel_wait wait;
1435 unsigned long timeout_remain;
1240 s64 before = 0; /* Only to silence a compiler warning. */ 1436 s64 before = 0; /* Only to silence a compiler warning. */
1241 int ret; 1437 int ret = 0;
1242 1438
1243 WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled"); 1439 might_sleep();
1244 1440
1245 if (list_empty(&req->list)) 1441 if (list_empty(&req->list))
1246 return 0; 1442 return 0;
1247 1443
1248 if (i915_gem_request_completed(req, true)) 1444 if (i915_gem_request_completed(req))
1249 return 0; 1445 return 0;
1250 1446
1251 timeout_expire = 0; 1447 timeout_remain = MAX_SCHEDULE_TIMEOUT;
1252 if (timeout) { 1448 if (timeout) {
1253 if (WARN_ON(*timeout < 0)) 1449 if (WARN_ON(*timeout < 0))
1254 return -EINVAL; 1450 return -EINVAL;
@@ -1256,7 +1452,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1256 if (*timeout == 0) 1452 if (*timeout == 0)
1257 return -ETIME; 1453 return -ETIME;
1258 1454
1259 timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout); 1455 timeout_remain = nsecs_to_jiffies_timeout(*timeout);
1260 1456
1261 /* 1457 /*
1262 * Record current time in case interrupted by signal, or wedged. 1458 * Record current time in case interrupted by signal, or wedged.
@@ -1264,75 +1460,76 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
1264 before = ktime_get_raw_ns(); 1460 before = ktime_get_raw_ns();
1265 } 1461 }
1266 1462
1267 if (INTEL_INFO(dev_priv)->gen >= 6)
1268 gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
1269
1270 trace_i915_gem_request_wait_begin(req); 1463 trace_i915_gem_request_wait_begin(req);
1271 1464
1272 /* Optimistic spin for the next jiffie before touching IRQs */ 1465 /* This client is about to stall waiting for the GPU. In many cases
1273 ret = __i915_spin_request(req, state); 1466 * this is undesirable and limits the throughput of the system, as
1274 if (ret == 0) 1467 * many clients cannot continue processing user input/output whilst
1275 goto out; 1468 * blocked. RPS autotuning may take tens of milliseconds to respond
1276 1469 * to the GPU load and thus incurs additional latency for the client.
1277 if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) { 1470 * We can circumvent that by promoting the GPU frequency to maximum
1278 ret = -ENODEV; 1471 * before we wait. This makes the GPU throttle up much more quickly
1279 goto out; 1472 * (good for benchmarks and user experience, e.g. window animations),
1280 } 1473 * but at a cost of spending more power processing the workload
1474 * (bad for battery). Not all clients even want their results
1475 * immediately and for them we should just let the GPU select its own
1476 * frequency to maximise efficiency. To prevent a single client from
1477 * forcing the clocks too high for the whole system, we only allow
1478 * each client to waitboost once in a busy period.
1479 */
1480 if (INTEL_INFO(req->i915)->gen >= 6)
1481 gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
1281 1482
1282 for (;;) { 1483 /* Optimistic spin for the next ~jiffie before touching IRQs */
1283 struct timer_list timer; 1484 if (i915_spin_request(req, state, 5))
1485 goto complete;
1284 1486
1285 prepare_to_wait(&engine->irq_queue, &wait, state); 1487 set_current_state(state);
1488 add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1286 1489
1287 /* We need to check whether any gpu reset happened in between 1490 intel_wait_init(&wait, req->seqno);
1288 * the request being submitted and now. If a reset has occurred, 1491 if (intel_engine_add_wait(req->engine, &wait))
1289 * the request is effectively complete (we either are in the 1492 /* In order to check that we haven't missed the interrupt
1290 * process of or have discarded the rendering and completely 1493 * as we enabled it, we need to kick ourselves to do a
1291 * reset the GPU. The results of the request are lost and we 1494 * coherent check on the seqno before we sleep.
1292 * are free to continue on with the original operation.
1293 */ 1495 */
1294 if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) { 1496 goto wakeup;
1295 ret = 0;
1296 break;
1297 }
1298
1299 if (i915_gem_request_completed(req, false)) {
1300 ret = 0;
1301 break;
1302 }
1303 1497
1498 for (;;) {
1304 if (signal_pending_state(state, current)) { 1499 if (signal_pending_state(state, current)) {
1305 ret = -ERESTARTSYS; 1500 ret = -ERESTARTSYS;
1306 break; 1501 break;
1307 } 1502 }
1308 1503
1309 if (timeout && time_after_eq(jiffies, timeout_expire)) { 1504 timeout_remain = io_schedule_timeout(timeout_remain);
1505 if (timeout_remain == 0) {
1310 ret = -ETIME; 1506 ret = -ETIME;
1311 break; 1507 break;
1312 } 1508 }
1313 1509
1314 timer.function = NULL; 1510 if (intel_wait_complete(&wait))
1315 if (timeout || missed_irq(dev_priv, engine)) { 1511 break;
1316 unsigned long expire;
1317 1512
1318 setup_timer_on_stack(&timer, fake_irq, (unsigned long)current); 1513 set_current_state(state);
1319 expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
1320 mod_timer(&timer, expire);
1321 }
1322 1514
1323 io_schedule(); 1515wakeup:
1516 /* Carefully check if the request is complete, giving time
1517 * for the seqno to be visible following the interrupt.
1518 * We also have to check in case we are kicked by the GPU
1519 * reset in order to drop the struct_mutex.
1520 */
1521 if (__i915_request_irq_complete(req))
1522 break;
1324 1523
1325 if (timer.function) { 1524 /* Only spin if we know the GPU is processing this request */
1326 del_singleshot_timer_sync(&timer); 1525 if (i915_spin_request(req, state, 2))
1327 destroy_timer_on_stack(&timer); 1526 break;
1328 }
1329 } 1527 }
1330 if (!irq_test_in_progress) 1528 remove_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
1331 engine->irq_put(engine);
1332 1529
1333 finish_wait(&engine->irq_queue, &wait); 1530 intel_engine_remove_wait(req->engine, &wait);
1334 1531 __set_current_state(TASK_RUNNING);
1335out: 1532complete:
1336 trace_i915_gem_request_wait_end(req); 1533 trace_i915_gem_request_wait_end(req);
1337 1534
1338 if (timeout) { 1535 if (timeout) {
@@ -1351,6 +1548,22 @@ out:
1351 *timeout = 0; 1548 *timeout = 0;
1352 } 1549 }
1353 1550
1551 if (rps && req->seqno == req->engine->last_submitted_seqno) {
1552 /* The GPU is now idle and this client has stalled.
1553 * Since no other client has submitted a request in the
1554 * meantime, assume that this client is the only one
1555 * supplying work to the GPU but is unable to keep that
1556 * work supplied because it is waiting. Since the GPU is
1557 * then never kept fully busy, RPS autoclocking will
1558 * keep the clocks relatively low, causing further delays.
1559 * Compensate by giving the synchronous client credit for
1560 * a waitboost next time.
1561 */
1562 spin_lock(&req->i915->rps.client_lock);
1563 list_del_init(&rps->link);
1564 spin_unlock(&req->i915->rps.client_lock);
1565 }
1566
1354 return ret; 1567 return ret;
1355} 1568}
1356 1569
@@ -1413,6 +1626,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
1413 list_del_init(&request->list); 1626 list_del_init(&request->list);
1414 i915_gem_request_remove_from_client(request); 1627 i915_gem_request_remove_from_client(request);
1415 1628
1629 if (request->previous_context) {
1630 if (i915.enable_execlists)
1631 intel_lr_context_unpin(request->previous_context,
1632 request->engine);
1633 }
1634
1635 i915_gem_context_unreference(request->ctx);
1416 i915_gem_request_unreference(request); 1636 i915_gem_request_unreference(request);
1417} 1637}
1418 1638
@@ -1422,7 +1642,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1422 struct intel_engine_cs *engine = req->engine; 1642 struct intel_engine_cs *engine = req->engine;
1423 struct drm_i915_gem_request *tmp; 1643 struct drm_i915_gem_request *tmp;
1424 1644
1425 lockdep_assert_held(&engine->dev->struct_mutex); 1645 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1426 1646
1427 if (list_empty(&req->list)) 1647 if (list_empty(&req->list))
1428 return; 1648 return;
@@ -1440,6 +1660,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
1440/** 1660/**
1441 * Waits for a request to be signaled, and cleans up the 1661 * Waits for a request to be signaled, and cleans up the
1442 * request and object lists appropriately for that event. 1662 * request and object lists appropriately for that event.
1663 * @req: request to wait on
1443 */ 1664 */
1444int 1665int
1445i915_wait_request(struct drm_i915_gem_request *req) 1666i915_wait_request(struct drm_i915_gem_request *req)
@@ -1450,14 +1671,14 @@ i915_wait_request(struct drm_i915_gem_request *req)
1450 1671
1451 interruptible = dev_priv->mm.interruptible; 1672 interruptible = dev_priv->mm.interruptible;
1452 1673
1453 BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 1674 BUG_ON(!mutex_is_locked(&dev_priv->drm.struct_mutex));
1454 1675
1455 ret = __i915_wait_request(req, interruptible, NULL, NULL); 1676 ret = __i915_wait_request(req, interruptible, NULL, NULL);
1456 if (ret) 1677 if (ret)
1457 return ret; 1678 return ret;
1458 1679
1459 /* If the GPU hung, we want to keep the requests to find the guilty. */ 1680 /* If the GPU hung, we want to keep the requests to find the guilty. */
1460 if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) 1681 if (!i915_reset_in_progress(&dev_priv->gpu_error))
1461 __i915_gem_request_retire__upto(req); 1682 __i915_gem_request_retire__upto(req);
1462 1683
1463 return 0; 1684 return 0;
@@ -1466,6 +1687,8 @@ i915_wait_request(struct drm_i915_gem_request *req)
1466/** 1687/**
1467 * Ensures that all rendering to the object has completed and the object is 1688 * Ensures that all rendering to the object has completed and the object is
1468 * safe to unbind from the GTT or access from the CPU. 1689 * safe to unbind from the GTT or access from the CPU.
1690 * @obj: i915 gem object
1691 * @readonly: waiting for read access or write
1469 */ 1692 */
1470int 1693int
1471i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, 1694i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
@@ -1516,7 +1739,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
1516 else if (obj->last_write_req == req) 1739 else if (obj->last_write_req == req)
1517 i915_gem_object_retire__write(obj); 1740 i915_gem_object_retire__write(obj);
1518 1741
1519 if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) 1742 if (!i915_reset_in_progress(&req->i915->gpu_error))
1520 __i915_gem_request_retire__upto(req); 1743 __i915_gem_request_retire__upto(req);
1521} 1744}
1522 1745
@@ -1529,7 +1752,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1529 bool readonly) 1752 bool readonly)
1530{ 1753{
1531 struct drm_device *dev = obj->base.dev; 1754 struct drm_device *dev = obj->base.dev;
1532 struct drm_i915_private *dev_priv = dev->dev_private; 1755 struct drm_i915_private *dev_priv = to_i915(dev);
1533 struct drm_i915_gem_request *requests[I915_NUM_ENGINES]; 1756 struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
1534 int ret, i, n = 0; 1757 int ret, i, n = 0;
1535 1758
@@ -1580,9 +1803,19 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
1580 return &fpriv->rps; 1803 return &fpriv->rps;
1581} 1804}
1582 1805
1806static enum fb_op_origin
1807write_origin(struct drm_i915_gem_object *obj, unsigned domain)
1808{
1809 return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
1810 ORIGIN_GTT : ORIGIN_CPU;
1811}
1812
1583/** 1813/**
1584 * Called when user space prepares to use an object with the CPU, either 1814 * Called when user space prepares to use an object with the CPU, either
1585 * through the mmap ioctl's mapping or a GTT mapping. 1815 * through the mmap ioctl's mapping or a GTT mapping.
1816 * @dev: drm device
1817 * @data: ioctl data blob
1818 * @file: drm file
1586 */ 1819 */
1587int 1820int
1588i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1821i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1633,9 +1866,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1633 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1866 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1634 1867
1635 if (write_domain != 0) 1868 if (write_domain != 0)
1636 intel_fb_obj_invalidate(obj, 1869 intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
1637 write_domain == I915_GEM_DOMAIN_GTT ?
1638 ORIGIN_GTT : ORIGIN_CPU);
1639 1870
1640unref: 1871unref:
1641 drm_gem_object_unreference(&obj->base); 1872 drm_gem_object_unreference(&obj->base);
@@ -1646,6 +1877,9 @@ unlock:
1646 1877
1647/** 1878/**
1648 * Called when user space has done writes to this buffer 1879 * Called when user space has done writes to this buffer
1880 * @dev: drm device
1881 * @data: ioctl data blob
1882 * @file: drm file
1649 */ 1883 */
1650int 1884int
1651i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1885i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -1676,8 +1910,11 @@ unlock:
1676} 1910}
1677 1911
1678/** 1912/**
1679 * Maps the contents of an object, returning the address it is mapped 1913 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1680 * into. 1914 * it is mapped to.
1915 * @dev: drm device
1916 * @data: ioctl data blob
1917 * @file: drm file
1681 * 1918 *
1682 * While the mapping holds a reference on the contents of the object, it doesn't 1919 * While the mapping holds a reference on the contents of the object, it doesn't
1683 * imply a ref on the object itself. 1920 * imply a ref on the object itself.
@@ -1736,6 +1973,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1736 else 1973 else
1737 addr = -ENOMEM; 1974 addr = -ENOMEM;
1738 up_write(&mm->mmap_sem); 1975 up_write(&mm->mmap_sem);
1976
1977 /* This may race, but that's ok, it only gets set */
1978 WRITE_ONCE(to_intel_bo(obj)->has_wc_mmap, true);
1739 } 1979 }
1740 drm_gem_object_unreference_unlocked(obj); 1980 drm_gem_object_unreference_unlocked(obj);
1741 if (IS_ERR((void *)addr)) 1981 if (IS_ERR((void *)addr))
@@ -1982,7 +2222,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1982 return size; 2222 return size;
1983 2223
1984 /* Previous chips need a power-of-two fence region when tiling */ 2224 /* Previous chips need a power-of-two fence region when tiling */
1985 if (INTEL_INFO(dev)->gen == 3) 2225 if (IS_GEN3(dev))
1986 gtt_size = 1024*1024; 2226 gtt_size = 1024*1024;
1987 else 2227 else
1988 gtt_size = 512*1024; 2228 gtt_size = 512*1024;
@@ -1995,7 +2235,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
1995 2235
1996/** 2236/**
1997 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 2237 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
1998 * @obj: object to check 2238 * @dev: drm device
2239 * @size: object size
2240 * @tiling_mode: tiling mode
2241 * @fenced: is fenced alignemned required or not
1999 * 2242 *
2000 * Return the required GTT alignment for an object, taking into account 2243 * Return the required GTT alignment for an object, taking into account
2001 * potential fence register mapping. 2244 * potential fence register mapping.
@@ -2021,7 +2264,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2021 2264
2022static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj) 2265static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2023{ 2266{
2024 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2267 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2025 int ret; 2268 int ret;
2026 2269
2027 dev_priv->mm.shrinker_no_lock_stealing = true; 2270 dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2162,7 +2405,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2162static void 2405static void
2163i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) 2406i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2164{ 2407{
2165 struct sg_page_iter sg_iter; 2408 struct sgt_iter sgt_iter;
2409 struct page *page;
2166 int ret; 2410 int ret;
2167 2411
2168 BUG_ON(obj->madv == __I915_MADV_PURGED); 2412 BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2428,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
2184 if (obj->madv == I915_MADV_DONTNEED) 2428 if (obj->madv == I915_MADV_DONTNEED)
2185 obj->dirty = 0; 2429 obj->dirty = 0;
2186 2430
2187 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 2431 for_each_sgt_page(page, sgt_iter, obj->pages) {
2188 struct page *page = sg_page_iter_page(&sg_iter);
2189
2190 if (obj->dirty) 2432 if (obj->dirty)
2191 set_page_dirty(page); 2433 set_page_dirty(page);
2192 2434
@@ -2238,12 +2480,12 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
2238static int 2480static int
2239i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2481i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2240{ 2482{
2241 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2483 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2242 int page_count, i; 2484 int page_count, i;
2243 struct address_space *mapping; 2485 struct address_space *mapping;
2244 struct sg_table *st; 2486 struct sg_table *st;
2245 struct scatterlist *sg; 2487 struct scatterlist *sg;
2246 struct sg_page_iter sg_iter; 2488 struct sgt_iter sgt_iter;
2247 struct page *page; 2489 struct page *page;
2248 unsigned long last_pfn = 0; /* suppress gcc warning */ 2490 unsigned long last_pfn = 0; /* suppress gcc warning */
2249 int ret; 2491 int ret;
@@ -2340,8 +2582,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2340 2582
2341err_pages: 2583err_pages:
2342 sg_mark_end(sg); 2584 sg_mark_end(sg);
2343 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) 2585 for_each_sgt_page(page, sgt_iter, st)
2344 put_page(sg_page_iter_page(&sg_iter)); 2586 put_page(page);
2345 sg_free_table(st); 2587 sg_free_table(st);
2346 kfree(st); 2588 kfree(st);
2347 2589
@@ -2369,7 +2611,7 @@ err_pages:
2369int 2611int
2370i915_gem_object_get_pages(struct drm_i915_gem_object *obj) 2612i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2371{ 2613{
2372 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2614 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2373 const struct drm_i915_gem_object_ops *ops = obj->ops; 2615 const struct drm_i915_gem_object_ops *ops = obj->ops;
2374 int ret; 2616 int ret;
2375 2617
@@ -2395,6 +2637,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2395 return 0; 2637 return 0;
2396} 2638}
2397 2639
2640/* The 'mapping' part of i915_gem_object_pin_map() below */
2641static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
2642{
2643 unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2644 struct sg_table *sgt = obj->pages;
2645 struct sgt_iter sgt_iter;
2646 struct page *page;
2647 struct page *stack_pages[32];
2648 struct page **pages = stack_pages;
2649 unsigned long i = 0;
2650 void *addr;
2651
2652 /* A single page can always be kmapped */
2653 if (n_pages == 1)
2654 return kmap(sg_page(sgt->sgl));
2655
2656 if (n_pages > ARRAY_SIZE(stack_pages)) {
2657 /* Too big for stack -- allocate temporary array instead */
2658 pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
2659 if (!pages)
2660 return NULL;
2661 }
2662
2663 for_each_sgt_page(page, sgt_iter, sgt)
2664 pages[i++] = page;
2665
2666 /* Check that we have the expected number of pages */
2667 GEM_BUG_ON(i != n_pages);
2668
2669 addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
2670
2671 if (pages != stack_pages)
2672 drm_free_large(pages);
2673
2674 return addr;
2675}
2676
2677/* get, pin, and map the pages of the object into kernel space */
2398void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj) 2678void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2399{ 2679{
2400 int ret; 2680 int ret;
@@ -2407,29 +2687,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
2407 2687
2408 i915_gem_object_pin_pages(obj); 2688 i915_gem_object_pin_pages(obj);
2409 2689
2410 if (obj->mapping == NULL) { 2690 if (!obj->mapping) {
2411 struct page **pages; 2691 obj->mapping = i915_gem_object_map(obj);
2412 2692 if (!obj->mapping) {
2413 pages = NULL;
2414 if (obj->base.size == PAGE_SIZE)
2415 obj->mapping = kmap(sg_page(obj->pages->sgl));
2416 else
2417 pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
2418 sizeof(*pages),
2419 GFP_TEMPORARY);
2420 if (pages != NULL) {
2421 struct sg_page_iter sg_iter;
2422 int n;
2423
2424 n = 0;
2425 for_each_sg_page(obj->pages->sgl, &sg_iter,
2426 obj->pages->nents, 0)
2427 pages[n++] = sg_page_iter_page(&sg_iter);
2428
2429 obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
2430 drm_free_large(pages);
2431 }
2432 if (obj->mapping == NULL) {
2433 i915_gem_object_unpin_pages(obj); 2693 i915_gem_object_unpin_pages(obj);
2434 return ERR_PTR(-ENOMEM); 2694 return ERR_PTR(-ENOMEM);
2435 } 2695 }
@@ -2502,9 +2762,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2502} 2762}
2503 2763
2504static int 2764static int
2505i915_gem_init_seqno(struct drm_device *dev, u32 seqno) 2765i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
2506{ 2766{
2507 struct drm_i915_private *dev_priv = dev->dev_private;
2508 struct intel_engine_cs *engine; 2767 struct intel_engine_cs *engine;
2509 int ret; 2768 int ret;
2510 2769
@@ -2514,7 +2773,14 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2514 if (ret) 2773 if (ret)
2515 return ret; 2774 return ret;
2516 } 2775 }
2517 i915_gem_retire_requests(dev); 2776 i915_gem_retire_requests(dev_priv);
2777
2778 /* If the seqno wraps around, we need to clear the breadcrumb rbtree */
2779 if (!i915_seqno_passed(seqno, dev_priv->next_seqno)) {
2780 while (intel_kick_waiters(dev_priv) ||
2781 intel_kick_signalers(dev_priv))
2782 yield();
2783 }
2518 2784
2519 /* Finally reset hw state */ 2785 /* Finally reset hw state */
2520 for_each_engine(engine, dev_priv) 2786 for_each_engine(engine, dev_priv)
@@ -2525,7 +2791,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
2525 2791
2526int i915_gem_set_seqno(struct drm_device *dev, u32 seqno) 2792int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2527{ 2793{
2528 struct drm_i915_private *dev_priv = dev->dev_private; 2794 struct drm_i915_private *dev_priv = to_i915(dev);
2529 int ret; 2795 int ret;
2530 2796
2531 if (seqno == 0) 2797 if (seqno == 0)
@@ -2534,7 +2800,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2534 /* HWS page needs to be set less than what we 2800 /* HWS page needs to be set less than what we
2535 * will inject to ring 2801 * will inject to ring
2536 */ 2802 */
2537 ret = i915_gem_init_seqno(dev, seqno - 1); 2803 ret = i915_gem_init_seqno(dev_priv, seqno - 1);
2538 if (ret) 2804 if (ret)
2539 return ret; 2805 return ret;
2540 2806
@@ -2550,13 +2816,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
2550} 2816}
2551 2817
2552int 2818int
2553i915_gem_get_seqno(struct drm_device *dev, u32 *seqno) 2819i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
2554{ 2820{
2555 struct drm_i915_private *dev_priv = dev->dev_private;
2556
2557 /* reserve 0 for non-seqno */ 2821 /* reserve 0 for non-seqno */
2558 if (dev_priv->next_seqno == 0) { 2822 if (dev_priv->next_seqno == 0) {
2559 int ret = i915_gem_init_seqno(dev, 0); 2823 int ret = i915_gem_init_seqno(dev_priv, 0);
2560 if (ret) 2824 if (ret)
2561 return ret; 2825 return ret;
2562 2826
@@ -2567,6 +2831,26 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
2567 return 0; 2831 return 0;
2568} 2832}
2569 2833
2834static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
2835{
2836 struct drm_i915_private *dev_priv = engine->i915;
2837
2838 dev_priv->gt.active_engines |= intel_engine_flag(engine);
2839 if (dev_priv->gt.awake)
2840 return;
2841
2842 intel_runtime_pm_get_noresume(dev_priv);
2843 dev_priv->gt.awake = true;
2844
2845 i915_update_gfx_val(dev_priv);
2846 if (INTEL_GEN(dev_priv) >= 6)
2847 gen6_rps_busy(dev_priv);
2848
2849 queue_delayed_work(dev_priv->wq,
2850 &dev_priv->gt.retire_work,
2851 round_jiffies_up_relative(HZ));
2852}
2853
2570/* 2854/*
2571 * NB: This function is not allowed to fail. Doing so would mean the the 2855 * NB: This function is not allowed to fail. Doing so would mean the the
2572 * request is not being tracked for completion but the work itself is 2856 * request is not being tracked for completion but the work itself is
@@ -2577,16 +2861,15 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2577 bool flush_caches) 2861 bool flush_caches)
2578{ 2862{
2579 struct intel_engine_cs *engine; 2863 struct intel_engine_cs *engine;
2580 struct drm_i915_private *dev_priv;
2581 struct intel_ringbuffer *ringbuf; 2864 struct intel_ringbuffer *ringbuf;
2582 u32 request_start; 2865 u32 request_start;
2866 u32 reserved_tail;
2583 int ret; 2867 int ret;
2584 2868
2585 if (WARN_ON(request == NULL)) 2869 if (WARN_ON(request == NULL))
2586 return; 2870 return;
2587 2871
2588 engine = request->engine; 2872 engine = request->engine;
2589 dev_priv = request->i915;
2590 ringbuf = request->ringbuf; 2873 ringbuf = request->ringbuf;
2591 2874
2592 /* 2875 /*
@@ -2594,9 +2877,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2594 * should already have been reserved in the ring buffer. Let the ring 2877 * should already have been reserved in the ring buffer. Let the ring
2595 * know that it is time to use that space up. 2878 * know that it is time to use that space up.
2596 */ 2879 */
2597 intel_ring_reserved_space_use(ringbuf);
2598
2599 request_start = intel_ring_get_tail(ringbuf); 2880 request_start = intel_ring_get_tail(ringbuf);
2881 reserved_tail = request->reserved_space;
2882 request->reserved_space = 0;
2883
2600 /* 2884 /*
2601 * Emit any outstanding flushes - execbuf can fail to emit the flush 2885 * Emit any outstanding flushes - execbuf can fail to emit the flush
2602 * after having emitted the batchbuffer command. Hence we need to fix 2886 * after having emitted the batchbuffer command. Hence we need to fix
@@ -2651,56 +2935,42 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2651 } 2935 }
2652 /* Not allowed to fail! */ 2936 /* Not allowed to fail! */
2653 WARN(ret, "emit|add_request failed: %d!\n", ret); 2937 WARN(ret, "emit|add_request failed: %d!\n", ret);
2654
2655 i915_queue_hangcheck(engine->dev);
2656
2657 queue_delayed_work(dev_priv->wq,
2658 &dev_priv->mm.retire_work,
2659 round_jiffies_up_relative(HZ));
2660 intel_mark_busy(dev_priv->dev);
2661
2662 /* Sanity check that the reserved size was large enough. */ 2938 /* Sanity check that the reserved size was large enough. */
2663 intel_ring_reserved_space_end(ringbuf); 2939 ret = intel_ring_get_tail(ringbuf) - request_start;
2940 if (ret < 0)
2941 ret += ringbuf->size;
2942 WARN_ONCE(ret > reserved_tail,
2943 "Not enough space reserved (%d bytes) "
2944 "for adding the request (%d bytes)\n",
2945 reserved_tail, ret);
2946
2947 i915_gem_mark_busy(engine);
2664} 2948}
2665 2949
2666static bool i915_context_is_banned(struct drm_i915_private *dev_priv, 2950static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2667 const struct intel_context *ctx)
2668{ 2951{
2669 unsigned long elapsed; 2952 unsigned long elapsed;
2670 2953
2671 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2672
2673 if (ctx->hang_stats.banned) 2954 if (ctx->hang_stats.banned)
2674 return true; 2955 return true;
2675 2956
2957 elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2676 if (ctx->hang_stats.ban_period_seconds && 2958 if (ctx->hang_stats.ban_period_seconds &&
2677 elapsed <= ctx->hang_stats.ban_period_seconds) { 2959 elapsed <= ctx->hang_stats.ban_period_seconds) {
2678 if (!i915_gem_context_is_default(ctx)) { 2960 DRM_DEBUG("context hanging too fast, banning!\n");
2679 DRM_DEBUG("context hanging too fast, banning!\n"); 2961 return true;
2680 return true;
2681 } else if (i915_stop_ring_allow_ban(dev_priv)) {
2682 if (i915_stop_ring_allow_warn(dev_priv))
2683 DRM_ERROR("gpu hanging too fast, banning!\n");
2684 return true;
2685 }
2686 } 2962 }
2687 2963
2688 return false; 2964 return false;
2689} 2965}
2690 2966
2691static void i915_set_reset_status(struct drm_i915_private *dev_priv, 2967static void i915_set_reset_status(struct i915_gem_context *ctx,
2692 struct intel_context *ctx,
2693 const bool guilty) 2968 const bool guilty)
2694{ 2969{
2695 struct i915_ctx_hang_stats *hs; 2970 struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2696
2697 if (WARN_ON(!ctx))
2698 return;
2699
2700 hs = &ctx->hang_stats;
2701 2971
2702 if (guilty) { 2972 if (guilty) {
2703 hs->banned = i915_context_is_banned(dev_priv, ctx); 2973 hs->banned = i915_context_is_banned(ctx);
2704 hs->batch_active++; 2974 hs->batch_active++;
2705 hs->guilty_ts = get_seconds(); 2975 hs->guilty_ts = get_seconds();
2706 } else { 2976 } else {
@@ -2712,27 +2982,15 @@ void i915_gem_request_free(struct kref *req_ref)
2712{ 2982{
2713 struct drm_i915_gem_request *req = container_of(req_ref, 2983 struct drm_i915_gem_request *req = container_of(req_ref,
2714 typeof(*req), ref); 2984 typeof(*req), ref);
2715 struct intel_context *ctx = req->ctx;
2716
2717 if (req->file_priv)
2718 i915_gem_request_remove_from_client(req);
2719
2720 if (ctx) {
2721 if (i915.enable_execlists && ctx != req->i915->kernel_context)
2722 intel_lr_context_unpin(ctx, req->engine);
2723
2724 i915_gem_context_unreference(ctx);
2725 }
2726
2727 kmem_cache_free(req->i915->requests, req); 2985 kmem_cache_free(req->i915->requests, req);
2728} 2986}
2729 2987
2730static inline int 2988static inline int
2731__i915_gem_request_alloc(struct intel_engine_cs *engine, 2989__i915_gem_request_alloc(struct intel_engine_cs *engine,
2732 struct intel_context *ctx, 2990 struct i915_gem_context *ctx,
2733 struct drm_i915_gem_request **req_out) 2991 struct drm_i915_gem_request **req_out)
2734{ 2992{
2735 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2993 struct drm_i915_private *dev_priv = engine->i915;
2736 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error); 2994 unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
2737 struct drm_i915_gem_request *req; 2995 struct drm_i915_gem_request *req;
2738 int ret; 2996 int ret;
@@ -2754,26 +3012,16 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2754 if (req == NULL) 3012 if (req == NULL)
2755 return -ENOMEM; 3013 return -ENOMEM;
2756 3014
2757 ret = i915_gem_get_seqno(engine->dev, &req->seqno); 3015 ret = i915_gem_get_seqno(engine->i915, &req->seqno);
2758 if (ret) 3016 if (ret)
2759 goto err; 3017 goto err;
2760 3018
2761 kref_init(&req->ref); 3019 kref_init(&req->ref);
2762 req->i915 = dev_priv; 3020 req->i915 = dev_priv;
2763 req->engine = engine; 3021 req->engine = engine;
2764 req->reset_counter = reset_counter;
2765 req->ctx = ctx; 3022 req->ctx = ctx;
2766 i915_gem_context_reference(req->ctx); 3023 i915_gem_context_reference(req->ctx);
2767 3024
2768 if (i915.enable_execlists)
2769 ret = intel_logical_ring_alloc_request_extras(req);
2770 else
2771 ret = intel_ring_alloc_request_extras(req);
2772 if (ret) {
2773 i915_gem_context_unreference(req->ctx);
2774 goto err;
2775 }
2776
2777 /* 3025 /*
2778 * Reserve space in the ring buffer for all the commands required to 3026 * Reserve space in the ring buffer for all the commands required to
2779 * eventually emit this request. This is to guarantee that the 3027 * eventually emit this request. This is to guarantee that the
@@ -2781,24 +3029,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
2781 * to be redone if the request is not actually submitted straight 3029 * to be redone if the request is not actually submitted straight
2782 * away, e.g. because a GPU scheduler has deferred it. 3030 * away, e.g. because a GPU scheduler has deferred it.
2783 */ 3031 */
3032 req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
3033
2784 if (i915.enable_execlists) 3034 if (i915.enable_execlists)
2785 ret = intel_logical_ring_reserve_space(req); 3035 ret = intel_logical_ring_alloc_request_extras(req);
2786 else 3036 else
2787 ret = intel_ring_reserve_space(req); 3037 ret = intel_ring_alloc_request_extras(req);
2788 if (ret) { 3038 if (ret)
2789 /* 3039 goto err_ctx;
2790 * At this point, the request is fully allocated even if not
2791 * fully prepared. Thus it can be cleaned up using the proper
2792 * free code.
2793 */
2794 intel_ring_reserved_space_cancel(req->ringbuf);
2795 i915_gem_request_unreference(req);
2796 return ret;
2797 }
2798 3040
2799 *req_out = req; 3041 *req_out = req;
2800 return 0; 3042 return 0;
2801 3043
3044err_ctx:
3045 i915_gem_context_unreference(ctx);
2802err: 3046err:
2803 kmem_cache_free(dev_priv->requests, req); 3047 kmem_cache_free(dev_priv->requests, req);
2804 return ret; 3048 return ret;
@@ -2818,13 +3062,13 @@ err:
2818 */ 3062 */
2819struct drm_i915_gem_request * 3063struct drm_i915_gem_request *
2820i915_gem_request_alloc(struct intel_engine_cs *engine, 3064i915_gem_request_alloc(struct intel_engine_cs *engine,
2821 struct intel_context *ctx) 3065 struct i915_gem_context *ctx)
2822{ 3066{
2823 struct drm_i915_gem_request *req; 3067 struct drm_i915_gem_request *req;
2824 int err; 3068 int err;
2825 3069
2826 if (ctx == NULL) 3070 if (ctx == NULL)
2827 ctx = to_i915(engine->dev)->kernel_context; 3071 ctx = engine->i915->kernel_context;
2828 err = __i915_gem_request_alloc(engine, ctx, &req); 3072 err = __i915_gem_request_alloc(engine, ctx, &req);
2829 return err ? ERR_PTR(err) : req; 3073 return err ? ERR_PTR(err) : req;
2830} 3074}
@@ -2834,8 +3078,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
2834{ 3078{
2835 struct drm_i915_gem_request *request; 3079 struct drm_i915_gem_request *request;
2836 3080
3081 /* We are called by the error capture and reset at a random
3082 * point in time. In particular, note that neither is crucially
3083 * ordered with an interrupt. After a hang, the GPU is dead and we
3084 * assume that no more writes can happen (we waited long enough for
3085 * all writes that were in transaction to be flushed) - adding an
3086 * extra delay for a recent interrupt is pointless. Hence, we do
3087 * not need an engine->irq_seqno_barrier() before the seqno reads.
3088 */
2837 list_for_each_entry(request, &engine->request_list, list) { 3089 list_for_each_entry(request, &engine->request_list, list) {
2838 if (i915_gem_request_completed(request, false)) 3090 if (i915_gem_request_completed(request))
2839 continue; 3091 continue;
2840 3092
2841 return request; 3093 return request;
@@ -2844,27 +3096,23 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
2844 return NULL; 3096 return NULL;
2845} 3097}
2846 3098
2847static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv, 3099static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
2848 struct intel_engine_cs *engine)
2849{ 3100{
2850 struct drm_i915_gem_request *request; 3101 struct drm_i915_gem_request *request;
2851 bool ring_hung; 3102 bool ring_hung;
2852 3103
2853 request = i915_gem_find_active_request(engine); 3104 request = i915_gem_find_active_request(engine);
2854
2855 if (request == NULL) 3105 if (request == NULL)
2856 return; 3106 return;
2857 3107
2858 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG; 3108 ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2859 3109
2860 i915_set_reset_status(dev_priv, request->ctx, ring_hung); 3110 i915_set_reset_status(request->ctx, ring_hung);
2861
2862 list_for_each_entry_continue(request, &engine->request_list, list) 3111 list_for_each_entry_continue(request, &engine->request_list, list)
2863 i915_set_reset_status(dev_priv, request->ctx, false); 3112 i915_set_reset_status(request->ctx, false);
2864} 3113}
2865 3114
2866static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv, 3115static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
2867 struct intel_engine_cs *engine)
2868{ 3116{
2869 struct intel_ringbuffer *buffer; 3117 struct intel_ringbuffer *buffer;
2870 3118
@@ -2888,13 +3136,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2888 /* Ensure irq handler finishes or is cancelled. */ 3136 /* Ensure irq handler finishes or is cancelled. */
2889 tasklet_kill(&engine->irq_tasklet); 3137 tasklet_kill(&engine->irq_tasklet);
2890 3138
2891 spin_lock_bh(&engine->execlist_lock); 3139 intel_execlists_cancel_requests(engine);
2892 /* list_splice_tail_init checks for empty lists */
2893 list_splice_tail_init(&engine->execlist_queue,
2894 &engine->execlist_retired_req_list);
2895 spin_unlock_bh(&engine->execlist_lock);
2896
2897 intel_execlists_retire_requests(engine);
2898 } 3140 }
2899 3141
2900 /* 3142 /*
@@ -2931,7 +3173,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
2931 3173
2932void i915_gem_reset(struct drm_device *dev) 3174void i915_gem_reset(struct drm_device *dev)
2933{ 3175{
2934 struct drm_i915_private *dev_priv = dev->dev_private; 3176 struct drm_i915_private *dev_priv = to_i915(dev);
2935 struct intel_engine_cs *engine; 3177 struct intel_engine_cs *engine;
2936 3178
2937 /* 3179 /*
@@ -2940,10 +3182,10 @@ void i915_gem_reset(struct drm_device *dev)
2940 * their reference to the objects, the inspection must be done first. 3182 * their reference to the objects, the inspection must be done first.
2941 */ 3183 */
2942 for_each_engine(engine, dev_priv) 3184 for_each_engine(engine, dev_priv)
2943 i915_gem_reset_engine_status(dev_priv, engine); 3185 i915_gem_reset_engine_status(engine);
2944 3186
2945 for_each_engine(engine, dev_priv) 3187 for_each_engine(engine, dev_priv)
2946 i915_gem_reset_engine_cleanup(dev_priv, engine); 3188 i915_gem_reset_engine_cleanup(engine);
2947 3189
2948 i915_gem_context_reset(dev); 3190 i915_gem_context_reset(dev);
2949 3191
@@ -2954,6 +3196,7 @@ void i915_gem_reset(struct drm_device *dev)
2954 3196
2955/** 3197/**
2956 * This function clears the request list as sequence numbers are passed. 3198 * This function clears the request list as sequence numbers are passed.
3199 * @engine: engine to retire requests on
2957 */ 3200 */
2958void 3201void
2959i915_gem_retire_requests_ring(struct intel_engine_cs *engine) 3202i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@@ -2972,7 +3215,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2972 struct drm_i915_gem_request, 3215 struct drm_i915_gem_request,
2973 list); 3216 list);
2974 3217
2975 if (!i915_gem_request_completed(request, true)) 3218 if (!i915_gem_request_completed(request))
2976 break; 3219 break;
2977 3220
2978 i915_gem_request_retire(request); 3221 i915_gem_request_retire(request);
@@ -2995,58 +3238,52 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
2995 i915_gem_object_retire__read(obj, engine->id); 3238 i915_gem_object_retire__read(obj, engine->id);
2996 } 3239 }
2997 3240
2998 if (unlikely(engine->trace_irq_req &&
2999 i915_gem_request_completed(engine->trace_irq_req, true))) {
3000 engine->irq_put(engine);
3001 i915_gem_request_assign(&engine->trace_irq_req, NULL);
3002 }
3003
3004 WARN_ON(i915_verify_lists(engine->dev)); 3241 WARN_ON(i915_verify_lists(engine->dev));
3005} 3242}
3006 3243
3007bool 3244void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
3008i915_gem_retire_requests(struct drm_device *dev)
3009{ 3245{
3010 struct drm_i915_private *dev_priv = dev->dev_private;
3011 struct intel_engine_cs *engine; 3246 struct intel_engine_cs *engine;
3012 bool idle = true; 3247
3248 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3249
3250 if (dev_priv->gt.active_engines == 0)
3251 return;
3252
3253 GEM_BUG_ON(!dev_priv->gt.awake);
3013 3254
3014 for_each_engine(engine, dev_priv) { 3255 for_each_engine(engine, dev_priv) {
3015 i915_gem_retire_requests_ring(engine); 3256 i915_gem_retire_requests_ring(engine);
3016 idle &= list_empty(&engine->request_list); 3257 if (list_empty(&engine->request_list))
3017 if (i915.enable_execlists) { 3258 dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
3018 spin_lock_bh(&engine->execlist_lock);
3019 idle &= list_empty(&engine->execlist_queue);
3020 spin_unlock_bh(&engine->execlist_lock);
3021
3022 intel_execlists_retire_requests(engine);
3023 }
3024 } 3259 }
3025 3260
3026 if (idle) 3261 if (dev_priv->gt.active_engines == 0)
3027 mod_delayed_work(dev_priv->wq, 3262 queue_delayed_work(dev_priv->wq,
3028 &dev_priv->mm.idle_work, 3263 &dev_priv->gt.idle_work,
3029 msecs_to_jiffies(100)); 3264 msecs_to_jiffies(100));
3030
3031 return idle;
3032} 3265}
3033 3266
3034static void 3267static void
3035i915_gem_retire_work_handler(struct work_struct *work) 3268i915_gem_retire_work_handler(struct work_struct *work)
3036{ 3269{
3037 struct drm_i915_private *dev_priv = 3270 struct drm_i915_private *dev_priv =
3038 container_of(work, typeof(*dev_priv), mm.retire_work.work); 3271 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3039 struct drm_device *dev = dev_priv->dev; 3272 struct drm_device *dev = &dev_priv->drm;
3040 bool idle;
3041 3273
3042 /* Come back later if the device is busy... */ 3274 /* Come back later if the device is busy... */
3043 idle = false;
3044 if (mutex_trylock(&dev->struct_mutex)) { 3275 if (mutex_trylock(&dev->struct_mutex)) {
3045 idle = i915_gem_retire_requests(dev); 3276 i915_gem_retire_requests(dev_priv);
3046 mutex_unlock(&dev->struct_mutex); 3277 mutex_unlock(&dev->struct_mutex);
3047 } 3278 }
3048 if (!idle) 3279
3049 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 3280 /* Keep the retire handler running until we are finally idle.
3281 * We do not need to do this test under locking as in the worst-case
3282 * we queue the retire worker once too often.
3283 */
3284 if (READ_ONCE(dev_priv->gt.awake))
3285 queue_delayed_work(dev_priv->wq,
3286 &dev_priv->gt.retire_work,
3050 round_jiffies_up_relative(HZ)); 3287 round_jiffies_up_relative(HZ));
3051} 3288}
3052 3289
@@ -3054,25 +3291,55 @@ static void
3054i915_gem_idle_work_handler(struct work_struct *work) 3291i915_gem_idle_work_handler(struct work_struct *work)
3055{ 3292{
3056 struct drm_i915_private *dev_priv = 3293 struct drm_i915_private *dev_priv =
3057 container_of(work, typeof(*dev_priv), mm.idle_work.work); 3294 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3058 struct drm_device *dev = dev_priv->dev; 3295 struct drm_device *dev = &dev_priv->drm;
3059 struct intel_engine_cs *engine; 3296 struct intel_engine_cs *engine;
3297 unsigned int stuck_engines;
3298 bool rearm_hangcheck;
3299
3300 if (!READ_ONCE(dev_priv->gt.awake))
3301 return;
3302
3303 if (READ_ONCE(dev_priv->gt.active_engines))
3304 return;
3305
3306 rearm_hangcheck =
3307 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3308
3309 if (!mutex_trylock(&dev->struct_mutex)) {
3310 /* Currently busy, come back later */
3311 mod_delayed_work(dev_priv->wq,
3312 &dev_priv->gt.idle_work,
3313 msecs_to_jiffies(50));
3314 goto out_rearm;
3315 }
3316
3317 if (dev_priv->gt.active_engines)
3318 goto out_unlock;
3060 3319
3061 for_each_engine(engine, dev_priv) 3320 for_each_engine(engine, dev_priv)
3062 if (!list_empty(&engine->request_list)) 3321 i915_gem_batch_pool_fini(&engine->batch_pool);
3063 return;
3064 3322
3065 /* we probably should sync with hangcheck here, using cancel_work_sync. 3323 GEM_BUG_ON(!dev_priv->gt.awake);
3066 * Also locking seems to be fubar here, engine->request_list is protected 3324 dev_priv->gt.awake = false;
3067 * by dev->struct_mutex. */ 3325 rearm_hangcheck = false;
3068 3326
3069 intel_mark_idle(dev); 3327 stuck_engines = intel_kick_waiters(dev_priv);
3328 if (unlikely(stuck_engines)) {
3329 DRM_DEBUG_DRIVER("kicked stuck waiters...missed irq\n");
3330 dev_priv->gpu_error.missed_irq_rings |= stuck_engines;
3331 }
3070 3332
3071 if (mutex_trylock(&dev->struct_mutex)) { 3333 if (INTEL_GEN(dev_priv) >= 6)
3072 for_each_engine(engine, dev_priv) 3334 gen6_rps_idle(dev_priv);
3073 i915_gem_batch_pool_fini(&engine->batch_pool); 3335 intel_runtime_pm_put(dev_priv);
3336out_unlock:
3337 mutex_unlock(&dev->struct_mutex);
3074 3338
3075 mutex_unlock(&dev->struct_mutex); 3339out_rearm:
3340 if (rearm_hangcheck) {
3341 GEM_BUG_ON(!dev_priv->gt.awake);
3342 i915_queue_hangcheck(dev_priv);
3076 } 3343 }
3077} 3344}
3078 3345
@@ -3080,6 +3347,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
3080 * Ensures that an object will eventually get non-busy by flushing any required 3347 * Ensures that an object will eventually get non-busy by flushing any required
3081 * write domains, emitting any outstanding lazy request and retiring and 3348 * write domains, emitting any outstanding lazy request and retiring and
3082 * completed requests. 3349 * completed requests.
3350 * @obj: object to flush
3083 */ 3351 */
3084static int 3352static int
3085i915_gem_object_flush_active(struct drm_i915_gem_object *obj) 3353i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@@ -3096,14 +3364,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
3096 if (req == NULL) 3364 if (req == NULL)
3097 continue; 3365 continue;
3098 3366
3099 if (list_empty(&req->list)) 3367 if (i915_gem_request_completed(req))
3100 goto retire;
3101
3102 if (i915_gem_request_completed(req, true)) {
3103 __i915_gem_request_retire__upto(req);
3104retire:
3105 i915_gem_object_retire__read(obj, i); 3368 i915_gem_object_retire__read(obj, i);
3106 }
3107 } 3369 }
3108 3370
3109 return 0; 3371 return 0;
@@ -3111,7 +3373,9 @@ retire:
3111 3373
3112/** 3374/**
3113 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT 3375 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3114 * @DRM_IOCTL_ARGS: standard ioctl arguments 3376 * @dev: drm device pointer
3377 * @data: ioctl data blob
3378 * @file: drm file pointer
3115 * 3379 *
3116 * Returns 0 if successful, else an error is returned with the remaining time in 3380 * Returns 0 if successful, else an error is returned with the remaining time in
3117 * the timeout parameter. 3381 * the timeout parameter.
@@ -3185,7 +3449,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3185 ret = __i915_wait_request(req[i], true, 3449 ret = __i915_wait_request(req[i], true,
3186 args->timeout_ns > 0 ? &args->timeout_ns : NULL, 3450 args->timeout_ns > 0 ? &args->timeout_ns : NULL,
3187 to_rps_client(file)); 3451 to_rps_client(file));
3188 i915_gem_request_unreference__unlocked(req[i]); 3452 i915_gem_request_unreference(req[i]);
3189 } 3453 }
3190 return ret; 3454 return ret;
3191 3455
@@ -3208,10 +3472,10 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
3208 if (to == from) 3472 if (to == from)
3209 return 0; 3473 return 0;
3210 3474
3211 if (i915_gem_request_completed(from_req, true)) 3475 if (i915_gem_request_completed(from_req))
3212 return 0; 3476 return 0;
3213 3477
3214 if (!i915_semaphore_is_enabled(obj->base.dev)) { 3478 if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
3215 struct drm_i915_private *i915 = to_i915(obj->base.dev); 3479 struct drm_i915_private *i915 = to_i915(obj->base.dev);
3216 ret = __i915_wait_request(from_req, 3480 ret = __i915_wait_request(from_req,
3217 i915->mm.interruptible, 3481 i915->mm.interruptible,
@@ -3345,10 +3609,21 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
3345 old_write_domain); 3609 old_write_domain);
3346} 3610}
3347 3611
3612static void __i915_vma_iounmap(struct i915_vma *vma)
3613{
3614 GEM_BUG_ON(vma->pin_count);
3615
3616 if (vma->iomap == NULL)
3617 return;
3618
3619 io_mapping_unmap(vma->iomap);
3620 vma->iomap = NULL;
3621}
3622
3348static int __i915_vma_unbind(struct i915_vma *vma, bool wait) 3623static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3349{ 3624{
3350 struct drm_i915_gem_object *obj = vma->obj; 3625 struct drm_i915_gem_object *obj = vma->obj;
3351 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 3626 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3352 int ret; 3627 int ret;
3353 3628
3354 if (list_empty(&vma->obj_link)) 3629 if (list_empty(&vma->obj_link))
@@ -3377,6 +3652,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
3377 ret = i915_gem_object_put_fence(obj); 3652 ret = i915_gem_object_put_fence(obj);
3378 if (ret) 3653 if (ret)
3379 return ret; 3654 return ret;
3655
3656 __i915_vma_iounmap(vma);
3380 } 3657 }
3381 3658
3382 trace_i915_vma_unbind(vma); 3659 trace_i915_vma_unbind(vma);
@@ -3422,26 +3699,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
3422 return __i915_vma_unbind(vma, false); 3699 return __i915_vma_unbind(vma, false);
3423} 3700}
3424 3701
3425int i915_gpu_idle(struct drm_device *dev) 3702int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
3426{ 3703{
3427 struct drm_i915_private *dev_priv = dev->dev_private;
3428 struct intel_engine_cs *engine; 3704 struct intel_engine_cs *engine;
3429 int ret; 3705 int ret;
3430 3706
3431 /* Flush everything onto the inactive list. */ 3707 lockdep_assert_held(&dev_priv->drm.struct_mutex);
3432 for_each_engine(engine, dev_priv) {
3433 if (!i915.enable_execlists) {
3434 struct drm_i915_gem_request *req;
3435 3708
3436 req = i915_gem_request_alloc(engine, NULL); 3709 for_each_engine(engine, dev_priv) {
3437 if (IS_ERR(req)) 3710 if (engine->last_context == NULL)
3438 return PTR_ERR(req); 3711 continue;
3439
3440 ret = i915_switch_context(req);
3441 i915_add_request_no_flush(req);
3442 if (ret)
3443 return ret;
3444 }
3445 3712
3446 ret = intel_engine_idle(engine); 3713 ret = intel_engine_idle(engine);
3447 if (ret) 3714 if (ret)
@@ -3488,6 +3755,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3488/** 3755/**
3489 * Finds free space in the GTT aperture and binds the object or a view of it 3756 * Finds free space in the GTT aperture and binds the object or a view of it
3490 * there. 3757 * there.
3758 * @obj: object to bind
3759 * @vm: address space to bind into
3760 * @ggtt_view: global gtt view if applicable
3761 * @alignment: requested alignment
3762 * @flags: mask of PIN_* flags to use
3491 */ 3763 */
3492static struct i915_vma * 3764static struct i915_vma *
3493i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj, 3765i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3731,7 +4003,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3731 return; 4003 return;
3732 4004
3733 if (i915_gem_clflush_object(obj, obj->pin_display)) 4005 if (i915_gem_clflush_object(obj, obj->pin_display))
3734 i915_gem_chipset_flush(obj->base.dev); 4006 i915_gem_chipset_flush(to_i915(obj->base.dev));
3735 4007
3736 old_write_domain = obj->base.write_domain; 4008 old_write_domain = obj->base.write_domain;
3737 obj->base.write_domain = 0; 4009 obj->base.write_domain = 0;
@@ -3745,6 +4017,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3745 4017
3746/** 4018/**
3747 * Moves a single object to the GTT read, and possibly write domain. 4019 * Moves a single object to the GTT read, and possibly write domain.
4020 * @obj: object to act on
4021 * @write: ask for write access or read only
3748 * 4022 *
3749 * This function returns when the move is complete, including waiting on 4023 * This function returns when the move is complete, including waiting on
3750 * flushes to occur. 4024 * flushes to occur.
@@ -3816,6 +4090,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3816 4090
3817/** 4091/**
3818 * Changes the cache-level of an object across all VMA. 4092 * Changes the cache-level of an object across all VMA.
4093 * @obj: object to act on
4094 * @cache_level: new cache level to set for the object
3819 * 4095 *
3820 * After this function returns, the object will be in the new cache-level 4096 * After this function returns, the object will be in the new cache-level
3821 * across all GTT and the contents of the backing storage will be coherent, 4097 * across all GTT and the contents of the backing storage will be coherent,
@@ -3925,11 +4201,9 @@ out:
3925 * object is now coherent at its new cache level (with respect 4201 * object is now coherent at its new cache level (with respect
3926 * to the access domain). 4202 * to the access domain).
3927 */ 4203 */
3928 if (obj->cache_dirty && 4204 if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3929 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
3930 cpu_write_needs_clflush(obj)) {
3931 if (i915_gem_clflush_object(obj, true)) 4205 if (i915_gem_clflush_object(obj, true))
3932 i915_gem_chipset_flush(obj->base.dev); 4206 i915_gem_chipset_flush(to_i915(obj->base.dev));
3933 } 4207 }
3934 4208
3935 return 0; 4209 return 0;
@@ -3967,7 +4241,7 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3967int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, 4241int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3968 struct drm_file *file) 4242 struct drm_file *file)
3969{ 4243{
3970 struct drm_i915_private *dev_priv = dev->dev_private; 4244 struct drm_i915_private *dev_priv = to_i915(dev);
3971 struct drm_i915_gem_caching *args = data; 4245 struct drm_i915_gem_caching *args = data;
3972 struct drm_i915_gem_object *obj; 4246 struct drm_i915_gem_object *obj;
3973 enum i915_cache_level level; 4247 enum i915_cache_level level;
@@ -4097,6 +4371,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
4097 4371
4098/** 4372/**
4099 * Moves a single object to the CPU read, and possibly write domain. 4373 * Moves a single object to the CPU read, and possibly write domain.
4374 * @obj: object to act on
4375 * @write: requesting write or read-only access
4100 * 4376 *
4101 * This function returns when the move is complete, including waiting on 4377 * This function returns when the move is complete, including waiting on
4102 * flushes to occur. 4378 * flushes to occur.
@@ -4159,7 +4435,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4159static int 4435static int
4160i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file) 4436i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4161{ 4437{
4162 struct drm_i915_private *dev_priv = dev->dev_private; 4438 struct drm_i915_private *dev_priv = to_i915(dev);
4163 struct drm_i915_file_private *file_priv = file->driver_priv; 4439 struct drm_i915_file_private *file_priv = file->driver_priv;
4164 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES; 4440 unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4165 struct drm_i915_gem_request *request, *target = NULL; 4441 struct drm_i915_gem_request *request, *target = NULL;
@@ -4195,10 +4471,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4195 return 0; 4471 return 0;
4196 4472
4197 ret = __i915_wait_request(target, true, NULL, NULL); 4473 ret = __i915_wait_request(target, true, NULL, NULL);
4198 if (ret == 0) 4474 i915_gem_request_unreference(target);
4199 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4200
4201 i915_gem_request_unreference__unlocked(target);
4202 4475
4203 return ret; 4476 return ret;
4204} 4477}
@@ -4256,7 +4529,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
4256 uint32_t alignment, 4529 uint32_t alignment,
4257 uint64_t flags) 4530 uint64_t flags)
4258{ 4531{
4259 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4532 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4260 struct i915_vma *vma; 4533 struct i915_vma *vma;
4261 unsigned bound; 4534 unsigned bound;
4262 int ret; 4535 int ret;
@@ -4420,7 +4693,7 @@ int
4420i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4693i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4421 struct drm_file *file_priv) 4694 struct drm_file *file_priv)
4422{ 4695{
4423 struct drm_i915_private *dev_priv = dev->dev_private; 4696 struct drm_i915_private *dev_priv = to_i915(dev);
4424 struct drm_i915_gem_madvise *args = data; 4697 struct drm_i915_gem_madvise *args = data;
4425 struct drm_i915_gem_object *obj; 4698 struct drm_i915_gem_object *obj;
4426 int ret; 4699 int ret;
@@ -4490,7 +4763,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
4490 obj->fence_reg = I915_FENCE_REG_NONE; 4763 obj->fence_reg = I915_FENCE_REG_NONE;
4491 obj->madv = I915_MADV_WILLNEED; 4764 obj->madv = I915_MADV_WILLNEED;
4492 4765
4493 i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size); 4766 i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4494} 4767}
4495 4768
4496static const struct drm_i915_gem_object_ops i915_gem_object_ops = { 4769static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4499,21 +4772,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4499 .put_pages = i915_gem_object_put_pages_gtt, 4772 .put_pages = i915_gem_object_put_pages_gtt,
4500}; 4773};
4501 4774
4502struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, 4775struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
4503 size_t size) 4776 size_t size)
4504{ 4777{
4505 struct drm_i915_gem_object *obj; 4778 struct drm_i915_gem_object *obj;
4506 struct address_space *mapping; 4779 struct address_space *mapping;
4507 gfp_t mask; 4780 gfp_t mask;
4781 int ret;
4508 4782
4509 obj = i915_gem_object_alloc(dev); 4783 obj = i915_gem_object_alloc(dev);
4510 if (obj == NULL) 4784 if (obj == NULL)
4511 return NULL; 4785 return ERR_PTR(-ENOMEM);
4512 4786
4513 if (drm_gem_object_init(dev, &obj->base, size) != 0) { 4787 ret = drm_gem_object_init(dev, &obj->base, size);
4514 i915_gem_object_free(obj); 4788 if (ret)
4515 return NULL; 4789 goto fail;
4516 }
4517 4790
4518 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE; 4791 mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4519 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) { 4792 if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4823,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4550 trace_i915_gem_object_create(obj); 4823 trace_i915_gem_object_create(obj);
4551 4824
4552 return obj; 4825 return obj;
4826
4827fail:
4828 i915_gem_object_free(obj);
4829
4830 return ERR_PTR(ret);
4553} 4831}
4554 4832
4555static bool discard_backing_storage(struct drm_i915_gem_object *obj) 4833static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4580,7 +4858,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4580{ 4858{
4581 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj); 4859 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4582 struct drm_device *dev = obj->base.dev; 4860 struct drm_device *dev = obj->base.dev;
4583 struct drm_i915_private *dev_priv = dev->dev_private; 4861 struct drm_i915_private *dev_priv = to_i915(dev);
4584 struct i915_vma *vma, *next; 4862 struct i915_vma *vma, *next;
4585 4863
4586 intel_runtime_pm_get(dev_priv); 4864 intel_runtime_pm_get(dev_priv);
@@ -4655,16 +4933,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
4655struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj, 4933struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
4656 const struct i915_ggtt_view *view) 4934 const struct i915_ggtt_view *view)
4657{ 4935{
4658 struct drm_device *dev = obj->base.dev;
4659 struct drm_i915_private *dev_priv = to_i915(dev);
4660 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4661 struct i915_vma *vma; 4936 struct i915_vma *vma;
4662 4937
4663 BUG_ON(!view); 4938 GEM_BUG_ON(!view);
4664 4939
4665 list_for_each_entry(vma, &obj->vma_list, obj_link) 4940 list_for_each_entry(vma, &obj->vma_list, obj_link)
4666 if (vma->vm == &ggtt->base && 4941 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
4667 i915_ggtt_view_equal(&vma->ggtt_view, view))
4668 return vma; 4942 return vma;
4669 return NULL; 4943 return NULL;
4670} 4944}
@@ -4688,7 +4962,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
4688static void 4962static void
4689i915_gem_stop_engines(struct drm_device *dev) 4963i915_gem_stop_engines(struct drm_device *dev)
4690{ 4964{
4691 struct drm_i915_private *dev_priv = dev->dev_private; 4965 struct drm_i915_private *dev_priv = to_i915(dev);
4692 struct intel_engine_cs *engine; 4966 struct intel_engine_cs *engine;
4693 4967
4694 for_each_engine(engine, dev_priv) 4968 for_each_engine(engine, dev_priv)
@@ -4698,27 +4972,28 @@ i915_gem_stop_engines(struct drm_device *dev)
4698int 4972int
4699i915_gem_suspend(struct drm_device *dev) 4973i915_gem_suspend(struct drm_device *dev)
4700{ 4974{
4701 struct drm_i915_private *dev_priv = dev->dev_private; 4975 struct drm_i915_private *dev_priv = to_i915(dev);
4702 int ret = 0; 4976 int ret = 0;
4703 4977
4704 mutex_lock(&dev->struct_mutex); 4978 mutex_lock(&dev->struct_mutex);
4705 ret = i915_gpu_idle(dev); 4979 ret = i915_gem_wait_for_idle(dev_priv);
4706 if (ret) 4980 if (ret)
4707 goto err; 4981 goto err;
4708 4982
4709 i915_gem_retire_requests(dev); 4983 i915_gem_retire_requests(dev_priv);
4710 4984
4711 i915_gem_stop_engines(dev); 4985 i915_gem_stop_engines(dev);
4986 i915_gem_context_lost(dev_priv);
4712 mutex_unlock(&dev->struct_mutex); 4987 mutex_unlock(&dev->struct_mutex);
4713 4988
4714 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work); 4989 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4715 cancel_delayed_work_sync(&dev_priv->mm.retire_work); 4990 cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4716 flush_delayed_work(&dev_priv->mm.idle_work); 4991 flush_delayed_work(&dev_priv->gt.idle_work);
4717 4992
4718 /* Assert that we sucessfully flushed all the work and 4993 /* Assert that we sucessfully flushed all the work and
4719 * reset the GPU back to its idle, low power state. 4994 * reset the GPU back to its idle, low power state.
4720 */ 4995 */
4721 WARN_ON(dev_priv->mm.busy); 4996 WARN_ON(dev_priv->gt.awake);
4722 4997
4723 return 0; 4998 return 0;
4724 4999
@@ -4727,40 +5002,9 @@ err:
4727 return ret; 5002 return ret;
4728} 5003}
4729 5004
4730int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
4731{
4732 struct intel_engine_cs *engine = req->engine;
4733 struct drm_device *dev = engine->dev;
4734 struct drm_i915_private *dev_priv = dev->dev_private;
4735 u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
4736 int i, ret;
4737
4738 if (!HAS_L3_DPF(dev) || !remap_info)
4739 return 0;
4740
4741 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
4742 if (ret)
4743 return ret;
4744
4745 /*
4746 * Note: We do not worry about the concurrent register cacheline hang
4747 * here because no other code should access these registers other than
4748 * at initialization time.
4749 */
4750 for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
4751 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
4752 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
4753 intel_ring_emit(engine, remap_info[i]);
4754 }
4755
4756 intel_ring_advance(engine);
4757
4758 return ret;
4759}
4760
4761void i915_gem_init_swizzling(struct drm_device *dev) 5005void i915_gem_init_swizzling(struct drm_device *dev)
4762{ 5006{
4763 struct drm_i915_private *dev_priv = dev->dev_private; 5007 struct drm_i915_private *dev_priv = to_i915(dev);
4764 5008
4765 if (INTEL_INFO(dev)->gen < 5 || 5009 if (INTEL_INFO(dev)->gen < 5 ||
4766 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) 5010 dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4785,7 +5029,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
4785 5029
4786static void init_unused_ring(struct drm_device *dev, u32 base) 5030static void init_unused_ring(struct drm_device *dev, u32 base)
4787{ 5031{
4788 struct drm_i915_private *dev_priv = dev->dev_private; 5032 struct drm_i915_private *dev_priv = to_i915(dev);
4789 5033
4790 I915_WRITE(RING_CTL(base), 0); 5034 I915_WRITE(RING_CTL(base), 0);
4791 I915_WRITE(RING_HEAD(base), 0); 5035 I915_WRITE(RING_HEAD(base), 0);
@@ -4812,7 +5056,7 @@ static void init_unused_rings(struct drm_device *dev)
4812 5056
4813int i915_gem_init_engines(struct drm_device *dev) 5057int i915_gem_init_engines(struct drm_device *dev)
4814{ 5058{
4815 struct drm_i915_private *dev_priv = dev->dev_private; 5059 struct drm_i915_private *dev_priv = to_i915(dev);
4816 int ret; 5060 int ret;
4817 5061
4818 ret = intel_init_render_ring_buffer(dev); 5062 ret = intel_init_render_ring_buffer(dev);
@@ -4860,9 +5104,9 @@ cleanup_render_ring:
4860int 5104int
4861i915_gem_init_hw(struct drm_device *dev) 5105i915_gem_init_hw(struct drm_device *dev)
4862{ 5106{
4863 struct drm_i915_private *dev_priv = dev->dev_private; 5107 struct drm_i915_private *dev_priv = to_i915(dev);
4864 struct intel_engine_cs *engine; 5108 struct intel_engine_cs *engine;
4865 int ret, j; 5109 int ret;
4866 5110
4867 /* Double layer security blanket, see i915_gem_init() */ 5111 /* Double layer security blanket, see i915_gem_init() */
4868 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5112 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,59 +5158,10 @@ i915_gem_init_hw(struct drm_device *dev)
4914 intel_mocs_init_l3cc_table(dev); 5158 intel_mocs_init_l3cc_table(dev);
4915 5159
4916 /* We can't enable contexts until all firmware is loaded */ 5160 /* We can't enable contexts until all firmware is loaded */
4917 if (HAS_GUC_UCODE(dev)) { 5161 ret = intel_guc_setup(dev);
4918 ret = intel_guc_ucode_load(dev);
4919 if (ret) {
4920 DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
4921 ret = -EIO;
4922 goto out;
4923 }
4924 }
4925
4926 /*
4927 * Increment the next seqno by 0x100 so we have a visible break
4928 * on re-initialisation
4929 */
4930 ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
4931 if (ret) 5162 if (ret)
4932 goto out; 5163 goto out;
4933 5164
4934 /* Now it is safe to go back round and do everything else: */
4935 for_each_engine(engine, dev_priv) {
4936 struct drm_i915_gem_request *req;
4937
4938 req = i915_gem_request_alloc(engine, NULL);
4939 if (IS_ERR(req)) {
4940 ret = PTR_ERR(req);
4941 break;
4942 }
4943
4944 if (engine->id == RCS) {
4945 for (j = 0; j < NUM_L3_SLICES(dev); j++) {
4946 ret = i915_gem_l3_remap(req, j);
4947 if (ret)
4948 goto err_request;
4949 }
4950 }
4951
4952 ret = i915_ppgtt_init_ring(req);
4953 if (ret)
4954 goto err_request;
4955
4956 ret = i915_gem_context_enable(req);
4957 if (ret)
4958 goto err_request;
4959
4960err_request:
4961 i915_add_request_no_flush(req);
4962 if (ret) {
4963 DRM_ERROR("Failed to enable %s, error=%d\n",
4964 engine->name, ret);
4965 i915_gem_cleanup_engines(dev);
4966 break;
4967 }
4968 }
4969
4970out: 5165out:
4971 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5166 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4972 return ret; 5167 return ret;
@@ -4974,12 +5169,9 @@ out:
4974 5169
4975int i915_gem_init(struct drm_device *dev) 5170int i915_gem_init(struct drm_device *dev)
4976{ 5171{
4977 struct drm_i915_private *dev_priv = dev->dev_private; 5172 struct drm_i915_private *dev_priv = to_i915(dev);
4978 int ret; 5173 int ret;
4979 5174
4980 i915.enable_execlists = intel_sanitize_enable_execlists(dev,
4981 i915.enable_execlists);
4982
4983 mutex_lock(&dev->struct_mutex); 5175 mutex_lock(&dev->struct_mutex);
4984 5176
4985 if (!i915.enable_execlists) { 5177 if (!i915.enable_execlists) {
@@ -5002,10 +5194,7 @@ int i915_gem_init(struct drm_device *dev)
5002 */ 5194 */
5003 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5195 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5004 5196
5005 ret = i915_gem_init_userptr(dev); 5197 i915_gem_init_userptr(dev_priv);
5006 if (ret)
5007 goto out_unlock;
5008
5009 i915_gem_init_ggtt(dev); 5198 i915_gem_init_ggtt(dev);
5010 5199
5011 ret = i915_gem_context_init(dev); 5200 ret = i915_gem_context_init(dev);
@@ -5037,19 +5226,11 @@ out_unlock:
5037void 5226void
5038i915_gem_cleanup_engines(struct drm_device *dev) 5227i915_gem_cleanup_engines(struct drm_device *dev)
5039{ 5228{
5040 struct drm_i915_private *dev_priv = dev->dev_private; 5229 struct drm_i915_private *dev_priv = to_i915(dev);
5041 struct intel_engine_cs *engine; 5230 struct intel_engine_cs *engine;
5042 5231
5043 for_each_engine(engine, dev_priv) 5232 for_each_engine(engine, dev_priv)
5044 dev_priv->gt.cleanup_engine(engine); 5233 dev_priv->gt.cleanup_engine(engine);
5045
5046 if (i915.enable_execlists)
5047 /*
5048 * Neither the BIOS, ourselves or any other kernel
5049 * expects the system to be in execlists mode on startup,
5050 * so we need to reset the GPU back to legacy mode.
5051 */
5052 intel_gpu_reset(dev, ALL_ENGINES);
5053} 5234}
5054 5235
5055static void 5236static void
@@ -5062,7 +5243,7 @@ init_engine_lists(struct intel_engine_cs *engine)
5062void 5243void
5063i915_gem_load_init_fences(struct drm_i915_private *dev_priv) 5244i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5064{ 5245{
5065 struct drm_device *dev = dev_priv->dev; 5246 struct drm_device *dev = &dev_priv->drm;
5066 5247
5067 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5248 if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5068 !IS_CHERRYVIEW(dev_priv)) 5249 !IS_CHERRYVIEW(dev_priv))
@@ -5073,7 +5254,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5073 else 5254 else
5074 dev_priv->num_fence_regs = 8; 5255 dev_priv->num_fence_regs = 8;
5075 5256
5076 if (intel_vgpu_active(dev)) 5257 if (intel_vgpu_active(dev_priv))
5077 dev_priv->num_fence_regs = 5258 dev_priv->num_fence_regs =
5078 I915_READ(vgtif_reg(avail_rs.fence_num)); 5259 I915_READ(vgtif_reg(avail_rs.fence_num));
5079 5260
@@ -5086,7 +5267,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5086void 5267void
5087i915_gem_load_init(struct drm_device *dev) 5268i915_gem_load_init(struct drm_device *dev)
5088{ 5269{
5089 struct drm_i915_private *dev_priv = dev->dev_private; 5270 struct drm_i915_private *dev_priv = to_i915(dev);
5090 int i; 5271 int i;
5091 5272
5092 dev_priv->objects = 5273 dev_priv->objects =
@@ -5114,22 +5295,15 @@ i915_gem_load_init(struct drm_device *dev)
5114 init_engine_lists(&dev_priv->engine[i]); 5295 init_engine_lists(&dev_priv->engine[i]);
5115 for (i = 0; i < I915_MAX_NUM_FENCES; i++) 5296 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
5116 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 5297 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
5117 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 5298 INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5118 i915_gem_retire_work_handler); 5299 i915_gem_retire_work_handler);
5119 INIT_DELAYED_WORK(&dev_priv->mm.idle_work, 5300 INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5120 i915_gem_idle_work_handler); 5301 i915_gem_idle_work_handler);
5302 init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5121 init_waitqueue_head(&dev_priv->gpu_error.reset_queue); 5303 init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5122 5304
5123 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL; 5305 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
5124 5306
5125 /*
5126 * Set initial sequence number for requests.
5127 * Using this number allows the wraparound to happen early,
5128 * catching any obvious problems.
5129 */
5130 dev_priv->next_seqno = ((u32)~0 - 0x1100);
5131 dev_priv->last_seqno = ((u32)~0 - 0x1101);
5132
5133 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 5307 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
5134 5308
5135 init_waitqueue_head(&dev_priv->pending_flip_queue); 5309 init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5148,6 +5322,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
5148 kmem_cache_destroy(dev_priv->objects); 5322 kmem_cache_destroy(dev_priv->objects);
5149} 5323}
5150 5324
5325int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5326{
5327 struct drm_i915_gem_object *obj;
5328
5329 /* Called just before we write the hibernation image.
5330 *
5331 * We need to update the domain tracking to reflect that the CPU
5332 * will be accessing all the pages to create and restore from the
5333 * hibernation, and so upon restoration those pages will be in the
5334 * CPU domain.
5335 *
5336 * To make sure the hibernation image contains the latest state,
5337 * we update that state just before writing out the image.
5338 */
5339
5340 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
5341 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5342 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5343 }
5344
5345 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
5346 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
5347 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
5348 }
5349
5350 return 0;
5351}
5352
5151void i915_gem_release(struct drm_device *dev, struct drm_file *file) 5353void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5152{ 5354{
5153 struct drm_i915_file_private *file_priv = file->driver_priv; 5355 struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5187,7 +5389,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5187 return -ENOMEM; 5389 return -ENOMEM;
5188 5390
5189 file->driver_priv = file_priv; 5391 file->driver_priv = file_priv;
5190 file_priv->dev_priv = dev->dev_private; 5392 file_priv->dev_priv = to_i915(dev);
5191 file_priv->file = file; 5393 file_priv->file = file;
5192 INIT_LIST_HEAD(&file_priv->rps.link); 5394 INIT_LIST_HEAD(&file_priv->rps.link);
5193 5395
@@ -5233,7 +5435,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
5233u64 i915_gem_obj_offset(struct drm_i915_gem_object *o, 5435u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5234 struct i915_address_space *vm) 5436 struct i915_address_space *vm)
5235{ 5437{
5236 struct drm_i915_private *dev_priv = o->base.dev->dev_private; 5438 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5237 struct i915_vma *vma; 5439 struct i915_vma *vma;
5238 5440
5239 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5441 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5254,13 +5456,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
5254u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o, 5456u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
5255 const struct i915_ggtt_view *view) 5457 const struct i915_ggtt_view *view)
5256{ 5458{
5257 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5258 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5259 struct i915_vma *vma; 5459 struct i915_vma *vma;
5260 5460
5261 list_for_each_entry(vma, &o->vma_list, obj_link) 5461 list_for_each_entry(vma, &o->vma_list, obj_link)
5262 if (vma->vm == &ggtt->base && 5462 if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
5263 i915_ggtt_view_equal(&vma->ggtt_view, view))
5264 return vma->node.start; 5463 return vma->node.start;
5265 5464
5266 WARN(1, "global vma for this object not found. (view=%u)\n", view->type); 5465 WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5485,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
5286bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o, 5485bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
5287 const struct i915_ggtt_view *view) 5486 const struct i915_ggtt_view *view)
5288{ 5487{
5289 struct drm_i915_private *dev_priv = to_i915(o->base.dev);
5290 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5291 struct i915_vma *vma; 5488 struct i915_vma *vma;
5292 5489
5293 list_for_each_entry(vma, &o->vma_list, obj_link) 5490 list_for_each_entry(vma, &o->vma_list, obj_link)
5294 if (vma->vm == &ggtt->base && 5491 if (vma->is_ggtt &&
5295 i915_ggtt_view_equal(&vma->ggtt_view, view) && 5492 i915_ggtt_view_equal(&vma->ggtt_view, view) &&
5296 drm_mm_node_allocated(&vma->node)) 5493 drm_mm_node_allocated(&vma->node))
5297 return true; 5494 return true;
@@ -5310,23 +5507,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
5310 return false; 5507 return false;
5311} 5508}
5312 5509
5313unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o, 5510unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
5314 struct i915_address_space *vm)
5315{ 5511{
5316 struct drm_i915_private *dev_priv = o->base.dev->dev_private;
5317 struct i915_vma *vma; 5512 struct i915_vma *vma;
5318 5513
5319 WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base); 5514 GEM_BUG_ON(list_empty(&o->vma_list));
5320
5321 BUG_ON(list_empty(&o->vma_list));
5322 5515
5323 list_for_each_entry(vma, &o->vma_list, obj_link) { 5516 list_for_each_entry(vma, &o->vma_list, obj_link) {
5324 if (vma->is_ggtt && 5517 if (vma->is_ggtt &&
5325 vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) 5518 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
5326 continue;
5327 if (vma->vm == vm)
5328 return vma->node.size; 5519 return vma->node.size;
5329 } 5520 }
5521
5330 return 0; 5522 return 0;
5331} 5523}
5332 5524
@@ -5347,7 +5539,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
5347 struct page *page; 5539 struct page *page;
5348 5540
5349 /* Only default objects have per-page dirty tracking */ 5541 /* Only default objects have per-page dirty tracking */
5350 if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0)) 5542 if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
5351 return NULL; 5543 return NULL;
5352 5544
5353 page = i915_gem_object_get_page(obj, n); 5545 page = i915_gem_object_get_page(obj, n);
@@ -5365,8 +5557,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
5365 size_t bytes; 5557 size_t bytes;
5366 int ret; 5558 int ret;
5367 5559
5368 obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE)); 5560 obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
5369 if (IS_ERR_OR_NULL(obj)) 5561 if (IS_ERR(obj))
5370 return obj; 5562 return obj;
5371 5563
5372 ret = i915_gem_object_set_to_cpu_domain(obj, true); 5564 ret = i915_gem_object_set_to_cpu_domain(obj, true);
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 7bf2f3f2968e..3752d5daa4b2 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -134,9 +134,9 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
134 if (obj == NULL) { 134 if (obj == NULL) {
135 int ret; 135 int ret;
136 136
137 obj = i915_gem_alloc_object(pool->dev, size); 137 obj = i915_gem_object_create(pool->dev, size);
138 if (obj == NULL) 138 if (IS_ERR(obj))
139 return ERR_PTR(-ENOMEM); 139 return obj;
140 140
141 ret = i915_gem_object_get_pages(obj); 141 ret = i915_gem_object_get_pages(obj);
142 if (ret) 142 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5acc3916f75..3c97f0e7a003 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -90,6 +90,8 @@
90#include "i915_drv.h" 90#include "i915_drv.h"
91#include "i915_trace.h" 91#include "i915_trace.h"
92 92
93#define ALL_L3_SLICES(dev) (1 << NUM_L3_SLICES(dev)) - 1
94
93/* This is a HW constraint. The value below is the largest known requirement 95/* This is a HW constraint. The value below is the largest known requirement
94 * I've seen in a spec to date, and that was a workaround for a non-shipping 96 * I've seen in a spec to date, and that was a workaround for a non-shipping
95 * part. It should be safe to decrease this, but it's more future proof as is. 97 * part. It should be safe to decrease this, but it's more future proof as is.
@@ -97,28 +99,27 @@
97#define GEN6_CONTEXT_ALIGN (64<<10) 99#define GEN6_CONTEXT_ALIGN (64<<10)
98#define GEN7_CONTEXT_ALIGN 4096 100#define GEN7_CONTEXT_ALIGN 4096
99 101
100static size_t get_context_alignment(struct drm_device *dev) 102static size_t get_context_alignment(struct drm_i915_private *dev_priv)
101{ 103{
102 if (IS_GEN6(dev)) 104 if (IS_GEN6(dev_priv))
103 return GEN6_CONTEXT_ALIGN; 105 return GEN6_CONTEXT_ALIGN;
104 106
105 return GEN7_CONTEXT_ALIGN; 107 return GEN7_CONTEXT_ALIGN;
106} 108}
107 109
108static int get_context_size(struct drm_device *dev) 110static int get_context_size(struct drm_i915_private *dev_priv)
109{ 111{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 int ret; 112 int ret;
112 u32 reg; 113 u32 reg;
113 114
114 switch (INTEL_INFO(dev)->gen) { 115 switch (INTEL_GEN(dev_priv)) {
115 case 6: 116 case 6:
116 reg = I915_READ(CXT_SIZE); 117 reg = I915_READ(CXT_SIZE);
117 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64; 118 ret = GEN6_CXT_TOTAL_SIZE(reg) * 64;
118 break; 119 break;
119 case 7: 120 case 7:
120 reg = I915_READ(GEN7_CXT_SIZE); 121 reg = I915_READ(GEN7_CXT_SIZE);
121 if (IS_HASWELL(dev)) 122 if (IS_HASWELL(dev_priv))
122 ret = HSW_CXT_TOTAL_SIZE; 123 ret = HSW_CXT_TOTAL_SIZE;
123 else 124 else
124 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64; 125 ret = GEN7_CXT_TOTAL_SIZE(reg) * 64;
@@ -133,7 +134,7 @@ static int get_context_size(struct drm_device *dev)
133 return ret; 134 return ret;
134} 135}
135 136
136static void i915_gem_context_clean(struct intel_context *ctx) 137static void i915_gem_context_clean(struct i915_gem_context *ctx)
137{ 138{
138 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
139 struct i915_vma *vma, *next; 140 struct i915_vma *vma, *next;
@@ -150,13 +151,12 @@ static void i915_gem_context_clean(struct intel_context *ctx)
150 151
151void i915_gem_context_free(struct kref *ctx_ref) 152void i915_gem_context_free(struct kref *ctx_ref)
152{ 153{
153 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); 154 struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
155 int i;
154 156
157 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
155 trace_i915_context_free(ctx); 158 trace_i915_context_free(ctx);
156 159
157 if (i915.enable_execlists)
158 intel_lr_context_free(ctx);
159
160 /* 160 /*
161 * This context is going away and we need to remove all VMAs still 161 * This context is going away and we need to remove all VMAs still
162 * around. This is to handle imported shared objects for which 162 * around. This is to handle imported shared objects for which
@@ -166,9 +166,22 @@ void i915_gem_context_free(struct kref *ctx_ref)
166 166
167 i915_ppgtt_put(ctx->ppgtt); 167 i915_ppgtt_put(ctx->ppgtt);
168 168
169 if (ctx->legacy_hw_ctx.rcs_state) 169 for (i = 0; i < I915_NUM_ENGINES; i++) {
170 drm_gem_object_unreference(&ctx->legacy_hw_ctx.rcs_state->base); 170 struct intel_context *ce = &ctx->engine[i];
171
172 if (!ce->state)
173 continue;
174
175 WARN_ON(ce->pin_count);
176 if (ce->ringbuf)
177 intel_ringbuffer_free(ce->ringbuf);
178
179 drm_gem_object_unreference(&ce->state->base);
180 }
181
171 list_del(&ctx->link); 182 list_del(&ctx->link);
183
184 ida_simple_remove(&ctx->i915->context_hw_ida, ctx->hw_id);
172 kfree(ctx); 185 kfree(ctx);
173} 186}
174 187
@@ -178,9 +191,11 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
178 struct drm_i915_gem_object *obj; 191 struct drm_i915_gem_object *obj;
179 int ret; 192 int ret;
180 193
181 obj = i915_gem_alloc_object(dev, size); 194 lockdep_assert_held(&dev->struct_mutex);
182 if (obj == NULL) 195
183 return ERR_PTR(-ENOMEM); 196 obj = i915_gem_object_create(dev, size);
197 if (IS_ERR(obj))
198 return obj;
184 199
185 /* 200 /*
186 * Try to make the context utilize L3 as well as LLC. 201 * Try to make the context utilize L3 as well as LLC.
@@ -209,22 +224,52 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
209 return obj; 224 return obj;
210} 225}
211 226
212static struct intel_context * 227static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
228{
229 int ret;
230
231 ret = ida_simple_get(&dev_priv->context_hw_ida,
232 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
233 if (ret < 0) {
234 /* Contexts are only released when no longer active.
235 * Flush any pending retires to hopefully release some
236 * stale contexts and try again.
237 */
238 i915_gem_retire_requests(dev_priv);
239 ret = ida_simple_get(&dev_priv->context_hw_ida,
240 0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
241 if (ret < 0)
242 return ret;
243 }
244
245 *out = ret;
246 return 0;
247}
248
249static struct i915_gem_context *
213__create_hw_context(struct drm_device *dev, 250__create_hw_context(struct drm_device *dev,
214 struct drm_i915_file_private *file_priv) 251 struct drm_i915_file_private *file_priv)
215{ 252{
216 struct drm_i915_private *dev_priv = dev->dev_private; 253 struct drm_i915_private *dev_priv = to_i915(dev);
217 struct intel_context *ctx; 254 struct i915_gem_context *ctx;
218 int ret; 255 int ret;
219 256
220 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 257 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
221 if (ctx == NULL) 258 if (ctx == NULL)
222 return ERR_PTR(-ENOMEM); 259 return ERR_PTR(-ENOMEM);
223 260
261 ret = assign_hw_id(dev_priv, &ctx->hw_id);
262 if (ret) {
263 kfree(ctx);
264 return ERR_PTR(ret);
265 }
266
224 kref_init(&ctx->ref); 267 kref_init(&ctx->ref);
225 list_add_tail(&ctx->link, &dev_priv->context_list); 268 list_add_tail(&ctx->link, &dev_priv->context_list);
226 ctx->i915 = dev_priv; 269 ctx->i915 = dev_priv;
227 270
271 ctx->ggtt_alignment = get_context_alignment(dev_priv);
272
228 if (dev_priv->hw_context_size) { 273 if (dev_priv->hw_context_size) {
229 struct drm_i915_gem_object *obj = 274 struct drm_i915_gem_object *obj =
230 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size); 275 i915_gem_alloc_context_obj(dev, dev_priv->hw_context_size);
@@ -232,7 +277,7 @@ __create_hw_context(struct drm_device *dev,
232 ret = PTR_ERR(obj); 277 ret = PTR_ERR(obj);
233 goto err_out; 278 goto err_out;
234 } 279 }
235 ctx->legacy_hw_ctx.rcs_state = obj; 280 ctx->engine[RCS].state = obj;
236 } 281 }
237 282
238 /* Default context will never have a file_priv */ 283 /* Default context will never have a file_priv */
@@ -249,9 +294,13 @@ __create_hw_context(struct drm_device *dev,
249 /* NB: Mark all slices as needing a remap so that when the context first 294 /* NB: Mark all slices as needing a remap so that when the context first
250 * loads it will restore whatever remap state already exists. If there 295 * loads it will restore whatever remap state already exists. If there
251 * is no remap info, it will be a NOP. */ 296 * is no remap info, it will be a NOP. */
252 ctx->remap_slice = (1 << NUM_L3_SLICES(dev)) - 1; 297 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
253 298
254 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD; 299 ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
300 ctx->ring_size = 4 * PAGE_SIZE;
301 ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
302 GEN8_CTX_ADDRESSING_MODE_SHIFT;
303 ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
255 304
256 return ctx; 305 return ctx;
257 306
@@ -265,44 +314,27 @@ err_out:
265 * context state of the GPU for applications that don't utilize HW contexts, as 314 * context state of the GPU for applications that don't utilize HW contexts, as
266 * well as an idle case. 315 * well as an idle case.
267 */ 316 */
268static struct intel_context * 317static struct i915_gem_context *
269i915_gem_create_context(struct drm_device *dev, 318i915_gem_create_context(struct drm_device *dev,
270 struct drm_i915_file_private *file_priv) 319 struct drm_i915_file_private *file_priv)
271{ 320{
272 const bool is_global_default_ctx = file_priv == NULL; 321 struct i915_gem_context *ctx;
273 struct intel_context *ctx;
274 int ret = 0;
275 322
276 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 323 lockdep_assert_held(&dev->struct_mutex);
277 324
278 ctx = __create_hw_context(dev, file_priv); 325 ctx = __create_hw_context(dev, file_priv);
279 if (IS_ERR(ctx)) 326 if (IS_ERR(ctx))
280 return ctx; 327 return ctx;
281 328
282 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) {
283 /* We may need to do things with the shrinker which
284 * require us to immediately switch back to the default
285 * context. This can cause a problem as pinning the
286 * default context also requires GTT space which may not
287 * be available. To avoid this we always pin the default
288 * context.
289 */
290 ret = i915_gem_obj_ggtt_pin(ctx->legacy_hw_ctx.rcs_state,
291 get_context_alignment(dev), 0);
292 if (ret) {
293 DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
294 goto err_destroy;
295 }
296 }
297
298 if (USES_FULL_PPGTT(dev)) { 329 if (USES_FULL_PPGTT(dev)) {
299 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv); 330 struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
300 331
301 if (IS_ERR_OR_NULL(ppgtt)) { 332 if (IS_ERR(ppgtt)) {
302 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", 333 DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
303 PTR_ERR(ppgtt)); 334 PTR_ERR(ppgtt));
304 ret = PTR_ERR(ppgtt); 335 idr_remove(&file_priv->context_idr, ctx->user_handle);
305 goto err_unpin; 336 i915_gem_context_unreference(ctx);
337 return ERR_CAST(ppgtt);
306 } 338 }
307 339
308 ctx->ppgtt = ppgtt; 340 ctx->ppgtt = ppgtt;
@@ -311,76 +343,102 @@ i915_gem_create_context(struct drm_device *dev,
311 trace_i915_context_create(ctx); 343 trace_i915_context_create(ctx);
312 344
313 return ctx; 345 return ctx;
346}
314 347
315err_unpin: 348/**
316 if (is_global_default_ctx && ctx->legacy_hw_ctx.rcs_state) 349 * i915_gem_context_create_gvt - create a GVT GEM context
317 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 350 * @dev: drm device *
318err_destroy: 351 *
319 idr_remove(&file_priv->context_idr, ctx->user_handle); 352 * This function is used to create a GVT specific GEM context.
320 i915_gem_context_unreference(ctx); 353 *
321 return ERR_PTR(ret); 354 * Returns:
355 * pointer to i915_gem_context on success, error pointer if failed
356 *
357 */
358struct i915_gem_context *
359i915_gem_context_create_gvt(struct drm_device *dev)
360{
361 struct i915_gem_context *ctx;
362 int ret;
363
364 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
365 return ERR_PTR(-ENODEV);
366
367 ret = i915_mutex_lock_interruptible(dev);
368 if (ret)
369 return ERR_PTR(ret);
370
371 ctx = i915_gem_create_context(dev, NULL);
372 if (IS_ERR(ctx))
373 goto out;
374
375 ctx->execlists_force_single_submission = true;
376 ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
377out:
378 mutex_unlock(&dev->struct_mutex);
379 return ctx;
322} 380}
323 381
324static void i915_gem_context_unpin(struct intel_context *ctx, 382static void i915_gem_context_unpin(struct i915_gem_context *ctx,
325 struct intel_engine_cs *engine) 383 struct intel_engine_cs *engine)
326{ 384{
327 if (i915.enable_execlists) { 385 if (i915.enable_execlists) {
328 intel_lr_context_unpin(ctx, engine); 386 intel_lr_context_unpin(ctx, engine);
329 } else { 387 } else {
330 if (engine->id == RCS && ctx->legacy_hw_ctx.rcs_state) 388 struct intel_context *ce = &ctx->engine[engine->id];
331 i915_gem_object_ggtt_unpin(ctx->legacy_hw_ctx.rcs_state); 389
390 if (ce->state)
391 i915_gem_object_ggtt_unpin(ce->state);
392
332 i915_gem_context_unreference(ctx); 393 i915_gem_context_unreference(ctx);
333 } 394 }
334} 395}
335 396
336void i915_gem_context_reset(struct drm_device *dev) 397void i915_gem_context_reset(struct drm_device *dev)
337{ 398{
338 struct drm_i915_private *dev_priv = dev->dev_private; 399 struct drm_i915_private *dev_priv = to_i915(dev);
339 int i; 400
401 lockdep_assert_held(&dev->struct_mutex);
340 402
341 if (i915.enable_execlists) { 403 if (i915.enable_execlists) {
342 struct intel_context *ctx; 404 struct i915_gem_context *ctx;
343 405
344 list_for_each_entry(ctx, &dev_priv->context_list, link) 406 list_for_each_entry(ctx, &dev_priv->context_list, link)
345 intel_lr_context_reset(dev_priv, ctx); 407 intel_lr_context_reset(dev_priv, ctx);
346 } 408 }
347 409
348 for (i = 0; i < I915_NUM_ENGINES; i++) { 410 i915_gem_context_lost(dev_priv);
349 struct intel_engine_cs *engine = &dev_priv->engine[i];
350
351 if (engine->last_context) {
352 i915_gem_context_unpin(engine->last_context, engine);
353 engine->last_context = NULL;
354 }
355 }
356
357 /* Force the GPU state to be reinitialised on enabling */
358 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
359} 411}
360 412
361int i915_gem_context_init(struct drm_device *dev) 413int i915_gem_context_init(struct drm_device *dev)
362{ 414{
363 struct drm_i915_private *dev_priv = dev->dev_private; 415 struct drm_i915_private *dev_priv = to_i915(dev);
364 struct intel_context *ctx; 416 struct i915_gem_context *ctx;
365 417
366 /* Init should only be called once per module load. Eventually the 418 /* Init should only be called once per module load. Eventually the
367 * restriction on the context_disabled check can be loosened. */ 419 * restriction on the context_disabled check can be loosened. */
368 if (WARN_ON(dev_priv->kernel_context)) 420 if (WARN_ON(dev_priv->kernel_context))
369 return 0; 421 return 0;
370 422
371 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 423 if (intel_vgpu_active(dev_priv) &&
424 HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
372 if (!i915.enable_execlists) { 425 if (!i915.enable_execlists) {
373 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n"); 426 DRM_INFO("Only EXECLIST mode is supported in vgpu.\n");
374 return -EINVAL; 427 return -EINVAL;
375 } 428 }
376 } 429 }
377 430
431 /* Using the simple ida interface, the max is limited by sizeof(int) */
432 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX);
433 ida_init(&dev_priv->context_hw_ida);
434
378 if (i915.enable_execlists) { 435 if (i915.enable_execlists) {
379 /* NB: intentionally left blank. We will allocate our own 436 /* NB: intentionally left blank. We will allocate our own
380 * backing objects as we need them, thank you very much */ 437 * backing objects as we need them, thank you very much */
381 dev_priv->hw_context_size = 0; 438 dev_priv->hw_context_size = 0;
382 } else if (HAS_HW_CONTEXTS(dev)) { 439 } else if (HAS_HW_CONTEXTS(dev_priv)) {
383 dev_priv->hw_context_size = round_up(get_context_size(dev), 4096); 440 dev_priv->hw_context_size =
441 round_up(get_context_size(dev_priv), 4096);
384 if (dev_priv->hw_context_size > (1<<20)) { 442 if (dev_priv->hw_context_size > (1<<20)) {
385 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n", 443 DRM_DEBUG_DRIVER("Disabling HW Contexts; invalid size %d\n",
386 dev_priv->hw_context_size); 444 dev_priv->hw_context_size);
@@ -403,67 +461,60 @@ int i915_gem_context_init(struct drm_device *dev)
403 return 0; 461 return 0;
404} 462}
405 463
406void i915_gem_context_fini(struct drm_device *dev) 464void i915_gem_context_lost(struct drm_i915_private *dev_priv)
407{ 465{
408 struct drm_i915_private *dev_priv = dev->dev_private; 466 struct intel_engine_cs *engine;
409 struct intel_context *dctx = dev_priv->kernel_context;
410 int i;
411
412 if (dctx->legacy_hw_ctx.rcs_state) {
413 /* The only known way to stop the gpu from accessing the hw context is
414 * to reset it. Do this as the very last operation to avoid confusing
415 * other code, leading to spurious errors. */
416 intel_gpu_reset(dev, ALL_ENGINES);
417 467
418 /* When default context is created and switched to, base object refcount 468 lockdep_assert_held(&dev_priv->drm.struct_mutex);
419 * will be 2 (+1 from object creation and +1 from do_switch()).
420 * i915_gem_context_fini() will be called after gpu_idle() has switched
421 * to default context. So we need to unreference the base object once
422 * to offset the do_switch part, so that i915_gem_context_unreference()
423 * can then free the base object correctly. */
424 WARN_ON(!dev_priv->engine[RCS].last_context);
425
426 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
427 }
428
429 for (i = I915_NUM_ENGINES; --i >= 0;) {
430 struct intel_engine_cs *engine = &dev_priv->engine[i];
431 469
470 for_each_engine(engine, dev_priv) {
432 if (engine->last_context) { 471 if (engine->last_context) {
433 i915_gem_context_unpin(engine->last_context, engine); 472 i915_gem_context_unpin(engine->last_context, engine);
434 engine->last_context = NULL; 473 engine->last_context = NULL;
435 } 474 }
436 } 475 }
437 476
438 i915_gem_context_unreference(dctx); 477 /* Force the GPU state to be restored on enabling */
439 dev_priv->kernel_context = NULL; 478 if (!i915.enable_execlists) {
440} 479 struct i915_gem_context *ctx;
441 480
442int i915_gem_context_enable(struct drm_i915_gem_request *req) 481 list_for_each_entry(ctx, &dev_priv->context_list, link) {
443{ 482 if (!i915_gem_context_is_default(ctx))
444 struct intel_engine_cs *engine = req->engine; 483 continue;
445 int ret;
446 484
447 if (i915.enable_execlists) { 485 for_each_engine(engine, dev_priv)
448 if (engine->init_context == NULL) 486 ctx->engine[engine->id].initialised = false;
449 return 0;
450 487
451 ret = engine->init_context(req); 488 ctx->remap_slice = ALL_L3_SLICES(dev_priv);
452 } else 489 }
453 ret = i915_switch_context(req);
454 490
455 if (ret) { 491 for_each_engine(engine, dev_priv) {
456 DRM_ERROR("ring init context: %d\n", ret); 492 struct intel_context *kce =
457 return ret; 493 &dev_priv->kernel_context->engine[engine->id];
494
495 kce->initialised = true;
496 }
458 } 497 }
498}
459 499
460 return 0; 500void i915_gem_context_fini(struct drm_device *dev)
501{
502 struct drm_i915_private *dev_priv = to_i915(dev);
503 struct i915_gem_context *dctx = dev_priv->kernel_context;
504
505 lockdep_assert_held(&dev->struct_mutex);
506
507 i915_gem_context_unreference(dctx);
508 dev_priv->kernel_context = NULL;
509
510 ida_destroy(&dev_priv->context_hw_ida);
461} 511}
462 512
463static int context_idr_cleanup(int id, void *p, void *data) 513static int context_idr_cleanup(int id, void *p, void *data)
464{ 514{
465 struct intel_context *ctx = p; 515 struct i915_gem_context *ctx = p;
466 516
517 ctx->file_priv = ERR_PTR(-EBADF);
467 i915_gem_context_unreference(ctx); 518 i915_gem_context_unreference(ctx);
468 return 0; 519 return 0;
469} 520}
@@ -471,7 +522,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
471int i915_gem_context_open(struct drm_device *dev, struct drm_file *file) 522int i915_gem_context_open(struct drm_device *dev, struct drm_file *file)
472{ 523{
473 struct drm_i915_file_private *file_priv = file->driver_priv; 524 struct drm_i915_file_private *file_priv = file->driver_priv;
474 struct intel_context *ctx; 525 struct i915_gem_context *ctx;
475 526
476 idr_init(&file_priv->context_idr); 527 idr_init(&file_priv->context_idr);
477 528
@@ -491,31 +542,22 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
491{ 542{
492 struct drm_i915_file_private *file_priv = file->driver_priv; 543 struct drm_i915_file_private *file_priv = file->driver_priv;
493 544
545 lockdep_assert_held(&dev->struct_mutex);
546
494 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); 547 idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
495 idr_destroy(&file_priv->context_idr); 548 idr_destroy(&file_priv->context_idr);
496} 549}
497 550
498struct intel_context *
499i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id)
500{
501 struct intel_context *ctx;
502
503 ctx = (struct intel_context *)idr_find(&file_priv->context_idr, id);
504 if (!ctx)
505 return ERR_PTR(-ENOENT);
506
507 return ctx;
508}
509
510static inline int 551static inline int
511mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags) 552mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
512{ 553{
554 struct drm_i915_private *dev_priv = req->i915;
513 struct intel_engine_cs *engine = req->engine; 555 struct intel_engine_cs *engine = req->engine;
514 u32 flags = hw_flags | MI_MM_SPACE_GTT; 556 u32 flags = hw_flags | MI_MM_SPACE_GTT;
515 const int num_rings = 557 const int num_rings =
516 /* Use an extended w/a on ivb+ if signalling from other rings */ 558 /* Use an extended w/a on ivb+ if signalling from other rings */
517 i915_semaphore_is_enabled(engine->dev) ? 559 i915_semaphore_is_enabled(dev_priv) ?
518 hweight32(INTEL_INFO(engine->dev)->ring_mask) - 1 : 560 hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1 :
519 0; 561 0;
520 int len, ret; 562 int len, ret;
521 563
@@ -524,21 +566,21 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
524 * explicitly, so we rely on the value at ring init, stored in 566 * explicitly, so we rely on the value at ring init, stored in
525 * itlb_before_ctx_switch. 567 * itlb_before_ctx_switch.
526 */ 568 */
527 if (IS_GEN6(engine->dev)) { 569 if (IS_GEN6(dev_priv)) {
528 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0); 570 ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
529 if (ret) 571 if (ret)
530 return ret; 572 return ret;
531 } 573 }
532 574
533 /* These flags are for resource streamer on HSW+ */ 575 /* These flags are for resource streamer on HSW+ */
534 if (IS_HASWELL(engine->dev) || INTEL_INFO(engine->dev)->gen >= 8) 576 if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8)
535 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN); 577 flags |= (HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN);
536 else if (INTEL_INFO(engine->dev)->gen < 8) 578 else if (INTEL_GEN(dev_priv) < 8)
537 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 579 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
538 580
539 581
540 len = 4; 582 len = 4;
541 if (INTEL_INFO(engine->dev)->gen >= 7) 583 if (INTEL_GEN(dev_priv) >= 7)
542 len += 2 + (num_rings ? 4*num_rings + 6 : 0); 584 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
543 585
544 ret = intel_ring_begin(req, len); 586 ret = intel_ring_begin(req, len);
@@ -546,14 +588,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
546 return ret; 588 return ret;
547 589
548 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 590 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
549 if (INTEL_INFO(engine->dev)->gen >= 7) { 591 if (INTEL_GEN(dev_priv) >= 7) {
550 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE); 592 intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
551 if (num_rings) { 593 if (num_rings) {
552 struct intel_engine_cs *signaller; 594 struct intel_engine_cs *signaller;
553 595
554 intel_ring_emit(engine, 596 intel_ring_emit(engine,
555 MI_LOAD_REGISTER_IMM(num_rings)); 597 MI_LOAD_REGISTER_IMM(num_rings));
556 for_each_engine(signaller, to_i915(engine->dev)) { 598 for_each_engine(signaller, dev_priv) {
557 if (signaller == engine) 599 if (signaller == engine)
558 continue; 600 continue;
559 601
@@ -568,7 +610,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
568 intel_ring_emit(engine, MI_NOOP); 610 intel_ring_emit(engine, MI_NOOP);
569 intel_ring_emit(engine, MI_SET_CONTEXT); 611 intel_ring_emit(engine, MI_SET_CONTEXT);
570 intel_ring_emit(engine, 612 intel_ring_emit(engine,
571 i915_gem_obj_ggtt_offset(req->ctx->legacy_hw_ctx.rcs_state) | 613 i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
572 flags); 614 flags);
573 /* 615 /*
574 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 616 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
@@ -576,14 +618,14 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
576 */ 618 */
577 intel_ring_emit(engine, MI_NOOP); 619 intel_ring_emit(engine, MI_NOOP);
578 620
579 if (INTEL_INFO(engine->dev)->gen >= 7) { 621 if (INTEL_GEN(dev_priv) >= 7) {
580 if (num_rings) { 622 if (num_rings) {
581 struct intel_engine_cs *signaller; 623 struct intel_engine_cs *signaller;
582 i915_reg_t last_reg = {}; /* keep gcc quiet */ 624 i915_reg_t last_reg = {}; /* keep gcc quiet */
583 625
584 intel_ring_emit(engine, 626 intel_ring_emit(engine,
585 MI_LOAD_REGISTER_IMM(num_rings)); 627 MI_LOAD_REGISTER_IMM(num_rings));
586 for_each_engine(signaller, to_i915(engine->dev)) { 628 for_each_engine(signaller, dev_priv) {
587 if (signaller == engine) 629 if (signaller == engine)
588 continue; 630 continue;
589 631
@@ -609,45 +651,83 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
609 return ret; 651 return ret;
610} 652}
611 653
612static inline bool skip_rcs_switch(struct intel_engine_cs *engine, 654static int remap_l3(struct drm_i915_gem_request *req, int slice)
613 struct intel_context *to) 655{
656 u32 *remap_info = req->i915->l3_parity.remap_info[slice];
657 struct intel_engine_cs *engine = req->engine;
658 int i, ret;
659
660 if (!remap_info)
661 return 0;
662
663 ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
664 if (ret)
665 return ret;
666
667 /*
668 * Note: We do not worry about the concurrent register cacheline hang
669 * here because no other code should access these registers other than
670 * at initialization time.
671 */
672 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
673 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
674 intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
675 intel_ring_emit(engine, remap_info[i]);
676 }
677 intel_ring_emit(engine, MI_NOOP);
678 intel_ring_advance(engine);
679
680 return 0;
681}
682
683static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
684 struct intel_engine_cs *engine,
685 struct i915_gem_context *to)
614{ 686{
615 if (to->remap_slice) 687 if (to->remap_slice)
616 return false; 688 return false;
617 689
618 if (!to->legacy_hw_ctx.initialized) 690 if (!to->engine[RCS].initialised)
619 return false; 691 return false;
620 692
621 if (to->ppgtt && 693 if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
622 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
623 return false; 694 return false;
624 695
625 return to == engine->last_context; 696 return to == engine->last_context;
626} 697}
627 698
628static bool 699static bool
629needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to) 700needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
701 struct intel_engine_cs *engine,
702 struct i915_gem_context *to)
630{ 703{
631 if (!to->ppgtt) 704 if (!ppgtt)
632 return false; 705 return false;
633 706
707 /* Always load the ppgtt on first use */
708 if (!engine->last_context)
709 return true;
710
711 /* Same context without new entries, skip */
634 if (engine->last_context == to && 712 if (engine->last_context == to &&
635 !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) 713 !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
636 return false; 714 return false;
637 715
638 if (engine->id != RCS) 716 if (engine->id != RCS)
639 return true; 717 return true;
640 718
641 if (INTEL_INFO(engine->dev)->gen < 8) 719 if (INTEL_GEN(engine->i915) < 8)
642 return true; 720 return true;
643 721
644 return false; 722 return false;
645} 723}
646 724
647static bool 725static bool
648needs_pd_load_post(struct intel_context *to, u32 hw_flags) 726needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
727 struct i915_gem_context *to,
728 u32 hw_flags)
649{ 729{
650 if (!to->ppgtt) 730 if (!ppgtt)
651 return false; 731 return false;
652 732
653 if (!IS_GEN8(to->i915)) 733 if (!IS_GEN8(to->i915))
@@ -661,18 +741,19 @@ needs_pd_load_post(struct intel_context *to, u32 hw_flags)
661 741
662static int do_rcs_switch(struct drm_i915_gem_request *req) 742static int do_rcs_switch(struct drm_i915_gem_request *req)
663{ 743{
664 struct intel_context *to = req->ctx; 744 struct i915_gem_context *to = req->ctx;
665 struct intel_engine_cs *engine = req->engine; 745 struct intel_engine_cs *engine = req->engine;
666 struct intel_context *from; 746 struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
747 struct i915_gem_context *from;
667 u32 hw_flags; 748 u32 hw_flags;
668 int ret, i; 749 int ret, i;
669 750
670 if (skip_rcs_switch(engine, to)) 751 if (skip_rcs_switch(ppgtt, engine, to))
671 return 0; 752 return 0;
672 753
673 /* Trying to pin first makes error handling easier. */ 754 /* Trying to pin first makes error handling easier. */
674 ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state, 755 ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
675 get_context_alignment(engine->dev), 756 to->ggtt_alignment,
676 0); 757 0);
677 if (ret) 758 if (ret)
678 return ret; 759 return ret;
@@ -694,37 +775,32 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
694 * 775 *
695 * XXX: We need a real interface to do this instead of trickery. 776 * XXX: We need a real interface to do this instead of trickery.
696 */ 777 */
697 ret = i915_gem_object_set_to_gtt_domain(to->legacy_hw_ctx.rcs_state, false); 778 ret = i915_gem_object_set_to_gtt_domain(to->engine[RCS].state, false);
698 if (ret) 779 if (ret)
699 goto unpin_out; 780 goto unpin_out;
700 781
701 if (needs_pd_load_pre(engine, to)) { 782 if (needs_pd_load_pre(ppgtt, engine, to)) {
702 /* Older GENs and non render rings still want the load first, 783 /* Older GENs and non render rings still want the load first,
703 * "PP_DCLV followed by PP_DIR_BASE register through Load 784 * "PP_DCLV followed by PP_DIR_BASE register through Load
704 * Register Immediate commands in Ring Buffer before submitting 785 * Register Immediate commands in Ring Buffer before submitting
705 * a context."*/ 786 * a context."*/
706 trace_switch_mm(engine, to); 787 trace_switch_mm(engine, to);
707 ret = to->ppgtt->switch_mm(to->ppgtt, req); 788 ret = ppgtt->switch_mm(ppgtt, req);
708 if (ret) 789 if (ret)
709 goto unpin_out; 790 goto unpin_out;
710 } 791 }
711 792
712 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 793 if (!to->engine[RCS].initialised || i915_gem_context_is_default(to))
713 /* NB: If we inhibit the restore, the context is not allowed to 794 /* NB: If we inhibit the restore, the context is not allowed to
714 * die because future work may end up depending on valid address 795 * die because future work may end up depending on valid address
715 * space. This means we must enforce that a page table load 796 * space. This means we must enforce that a page table load
716 * occur when this occurs. */ 797 * occur when this occurs. */
717 hw_flags = MI_RESTORE_INHIBIT; 798 hw_flags = MI_RESTORE_INHIBIT;
718 else if (to->ppgtt && 799 else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
719 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
720 hw_flags = MI_FORCE_RESTORE; 800 hw_flags = MI_FORCE_RESTORE;
721 else 801 else
722 hw_flags = 0; 802 hw_flags = 0;
723 803
724 /* We should never emit switch_mm more than once */
725 WARN_ON(needs_pd_load_pre(engine, to) &&
726 needs_pd_load_post(to, hw_flags));
727
728 if (to != from || (hw_flags & MI_FORCE_RESTORE)) { 804 if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
729 ret = mi_set_context(req, hw_flags); 805 ret = mi_set_context(req, hw_flags);
730 if (ret) 806 if (ret)
@@ -738,8 +814,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
738 * MI_SET_CONTEXT instead of when the next seqno has completed. 814 * MI_SET_CONTEXT instead of when the next seqno has completed.
739 */ 815 */
740 if (from != NULL) { 816 if (from != NULL) {
741 from->legacy_hw_ctx.rcs_state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; 817 from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
742 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->legacy_hw_ctx.rcs_state), req); 818 i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
743 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the 819 /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
744 * whole damn pipeline, we don't need to explicitly mark the 820 * whole damn pipeline, we don't need to explicitly mark the
745 * object dirty. The only exception is that the context must be 821 * object dirty. The only exception is that the context must be
@@ -747,10 +823,10 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
747 * able to defer doing this until we know the object would be 823 * able to defer doing this until we know the object would be
748 * swapped, but there is no way to do that yet. 824 * swapped, but there is no way to do that yet.
749 */ 825 */
750 from->legacy_hw_ctx.rcs_state->dirty = 1; 826 from->engine[RCS].state->dirty = 1;
751 827
752 /* obj is kept alive until the next request by its active ref */ 828 /* obj is kept alive until the next request by its active ref */
753 i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state); 829 i915_gem_object_ggtt_unpin(from->engine[RCS].state);
754 i915_gem_context_unreference(from); 830 i915_gem_context_unreference(from);
755 } 831 }
756 i915_gem_context_reference(to); 832 i915_gem_context_reference(to);
@@ -759,9 +835,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
759 /* GEN8 does *not* require an explicit reload if the PDPs have been 835 /* GEN8 does *not* require an explicit reload if the PDPs have been
760 * setup, and we do not wish to move them. 836 * setup, and we do not wish to move them.
761 */ 837 */
762 if (needs_pd_load_post(to, hw_flags)) { 838 if (needs_pd_load_post(ppgtt, to, hw_flags)) {
763 trace_switch_mm(engine, to); 839 trace_switch_mm(engine, to);
764 ret = to->ppgtt->switch_mm(to->ppgtt, req); 840 ret = ppgtt->switch_mm(ppgtt, req);
765 /* The hardware context switch is emitted, but we haven't 841 /* The hardware context switch is emitted, but we haven't
766 * actually changed the state - so it's probably safe to bail 842 * actually changed the state - so it's probably safe to bail
767 * here. Still, let the user know something dangerous has 843 * here. Still, let the user know something dangerous has
@@ -771,33 +847,33 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
771 return ret; 847 return ret;
772 } 848 }
773 849
774 if (to->ppgtt) 850 if (ppgtt)
775 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); 851 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
776 852
777 for (i = 0; i < MAX_L3_SLICES; i++) { 853 for (i = 0; i < MAX_L3_SLICES; i++) {
778 if (!(to->remap_slice & (1<<i))) 854 if (!(to->remap_slice & (1<<i)))
779 continue; 855 continue;
780 856
781 ret = i915_gem_l3_remap(req, i); 857 ret = remap_l3(req, i);
782 if (ret) 858 if (ret)
783 return ret; 859 return ret;
784 860
785 to->remap_slice &= ~(1<<i); 861 to->remap_slice &= ~(1<<i);
786 } 862 }
787 863
788 if (!to->legacy_hw_ctx.initialized) { 864 if (!to->engine[RCS].initialised) {
789 if (engine->init_context) { 865 if (engine->init_context) {
790 ret = engine->init_context(req); 866 ret = engine->init_context(req);
791 if (ret) 867 if (ret)
792 return ret; 868 return ret;
793 } 869 }
794 to->legacy_hw_ctx.initialized = true; 870 to->engine[RCS].initialised = true;
795 } 871 }
796 872
797 return 0; 873 return 0;
798 874
799unpin_out: 875unpin_out:
800 i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state); 876 i915_gem_object_ggtt_unpin(to->engine[RCS].state);
801 return ret; 877 return ret;
802} 878}
803 879
@@ -817,25 +893,24 @@ unpin_out:
817int i915_switch_context(struct drm_i915_gem_request *req) 893int i915_switch_context(struct drm_i915_gem_request *req)
818{ 894{
819 struct intel_engine_cs *engine = req->engine; 895 struct intel_engine_cs *engine = req->engine;
820 struct drm_i915_private *dev_priv = req->i915;
821 896
822 WARN_ON(i915.enable_execlists); 897 WARN_ON(i915.enable_execlists);
823 WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex)); 898 lockdep_assert_held(&req->i915->drm.struct_mutex);
824 899
825 if (engine->id != RCS || 900 if (!req->ctx->engine[engine->id].state) {
826 req->ctx->legacy_hw_ctx.rcs_state == NULL) { 901 struct i915_gem_context *to = req->ctx;
827 struct intel_context *to = req->ctx; 902 struct i915_hw_ppgtt *ppgtt =
903 to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
828 904
829 if (needs_pd_load_pre(engine, to)) { 905 if (needs_pd_load_pre(ppgtt, engine, to)) {
830 int ret; 906 int ret;
831 907
832 trace_switch_mm(engine, to); 908 trace_switch_mm(engine, to);
833 ret = to->ppgtt->switch_mm(to->ppgtt, req); 909 ret = ppgtt->switch_mm(ppgtt, req);
834 if (ret) 910 if (ret)
835 return ret; 911 return ret;
836 912
837 /* Doing a PD load always reloads the page dirs */ 913 ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
838 to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
839 } 914 }
840 915
841 if (to != engine->last_context) { 916 if (to != engine->last_context) {
@@ -861,7 +936,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
861{ 936{
862 struct drm_i915_gem_context_create *args = data; 937 struct drm_i915_gem_context_create *args = data;
863 struct drm_i915_file_private *file_priv = file->driver_priv; 938 struct drm_i915_file_private *file_priv = file->driver_priv;
864 struct intel_context *ctx; 939 struct i915_gem_context *ctx;
865 int ret; 940 int ret;
866 941
867 if (!contexts_enabled(dev)) 942 if (!contexts_enabled(dev))
@@ -890,7 +965,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
890{ 965{
891 struct drm_i915_gem_context_destroy *args = data; 966 struct drm_i915_gem_context_destroy *args = data;
892 struct drm_i915_file_private *file_priv = file->driver_priv; 967 struct drm_i915_file_private *file_priv = file->driver_priv;
893 struct intel_context *ctx; 968 struct i915_gem_context *ctx;
894 int ret; 969 int ret;
895 970
896 if (args->pad != 0) 971 if (args->pad != 0)
@@ -903,13 +978,13 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
903 if (ret) 978 if (ret)
904 return ret; 979 return ret;
905 980
906 ctx = i915_gem_context_get(file_priv, args->ctx_id); 981 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
907 if (IS_ERR(ctx)) { 982 if (IS_ERR(ctx)) {
908 mutex_unlock(&dev->struct_mutex); 983 mutex_unlock(&dev->struct_mutex);
909 return PTR_ERR(ctx); 984 return PTR_ERR(ctx);
910 } 985 }
911 986
912 idr_remove(&ctx->file_priv->context_idr, ctx->user_handle); 987 idr_remove(&file_priv->context_idr, ctx->user_handle);
913 i915_gem_context_unreference(ctx); 988 i915_gem_context_unreference(ctx);
914 mutex_unlock(&dev->struct_mutex); 989 mutex_unlock(&dev->struct_mutex);
915 990
@@ -922,14 +997,14 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
922{ 997{
923 struct drm_i915_file_private *file_priv = file->driver_priv; 998 struct drm_i915_file_private *file_priv = file->driver_priv;
924 struct drm_i915_gem_context_param *args = data; 999 struct drm_i915_gem_context_param *args = data;
925 struct intel_context *ctx; 1000 struct i915_gem_context *ctx;
926 int ret; 1001 int ret;
927 1002
928 ret = i915_mutex_lock_interruptible(dev); 1003 ret = i915_mutex_lock_interruptible(dev);
929 if (ret) 1004 if (ret)
930 return ret; 1005 return ret;
931 1006
932 ctx = i915_gem_context_get(file_priv, args->ctx_id); 1007 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
933 if (IS_ERR(ctx)) { 1008 if (IS_ERR(ctx)) {
934 mutex_unlock(&dev->struct_mutex); 1009 mutex_unlock(&dev->struct_mutex);
935 return PTR_ERR(ctx); 1010 return PTR_ERR(ctx);
@@ -951,6 +1026,9 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
951 else 1026 else
952 args->value = to_i915(dev)->ggtt.base.total; 1027 args->value = to_i915(dev)->ggtt.base.total;
953 break; 1028 break;
1029 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1030 args->value = !!(ctx->flags & CONTEXT_NO_ERROR_CAPTURE);
1031 break;
954 default: 1032 default:
955 ret = -EINVAL; 1033 ret = -EINVAL;
956 break; 1034 break;
@@ -965,14 +1043,14 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
965{ 1043{
966 struct drm_i915_file_private *file_priv = file->driver_priv; 1044 struct drm_i915_file_private *file_priv = file->driver_priv;
967 struct drm_i915_gem_context_param *args = data; 1045 struct drm_i915_gem_context_param *args = data;
968 struct intel_context *ctx; 1046 struct i915_gem_context *ctx;
969 int ret; 1047 int ret;
970 1048
971 ret = i915_mutex_lock_interruptible(dev); 1049 ret = i915_mutex_lock_interruptible(dev);
972 if (ret) 1050 if (ret)
973 return ret; 1051 return ret;
974 1052
975 ctx = i915_gem_context_get(file_priv, args->ctx_id); 1053 ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
976 if (IS_ERR(ctx)) { 1054 if (IS_ERR(ctx)) {
977 mutex_unlock(&dev->struct_mutex); 1055 mutex_unlock(&dev->struct_mutex);
978 return PTR_ERR(ctx); 1056 return PTR_ERR(ctx);
@@ -996,6 +1074,16 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
996 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0; 1074 ctx->flags |= args->value ? CONTEXT_NO_ZEROMAP : 0;
997 } 1075 }
998 break; 1076 break;
1077 case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
1078 if (args->size) {
1079 ret = -EINVAL;
1080 } else {
1081 if (args->value)
1082 ctx->flags |= CONTEXT_NO_ERROR_CAPTURE;
1083 else
1084 ctx->flags &= ~CONTEXT_NO_ERROR_CAPTURE;
1085 }
1086 break;
999 default: 1087 default:
1000 ret = -EINVAL; 1088 ret = -EINVAL;
1001 break; 1089 break;
@@ -1004,3 +1092,42 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
1004 1092
1005 return ret; 1093 return ret;
1006} 1094}
1095
1096int i915_gem_context_reset_stats_ioctl(struct drm_device *dev,
1097 void *data, struct drm_file *file)
1098{
1099 struct drm_i915_private *dev_priv = to_i915(dev);
1100 struct drm_i915_reset_stats *args = data;
1101 struct i915_ctx_hang_stats *hs;
1102 struct i915_gem_context *ctx;
1103 int ret;
1104
1105 if (args->flags || args->pad)
1106 return -EINVAL;
1107
1108 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1109 return -EPERM;
1110
1111 ret = i915_mutex_lock_interruptible(dev);
1112 if (ret)
1113 return ret;
1114
1115 ctx = i915_gem_context_lookup(file->driver_priv, args->ctx_id);
1116 if (IS_ERR(ctx)) {
1117 mutex_unlock(&dev->struct_mutex);
1118 return PTR_ERR(ctx);
1119 }
1120 hs = &ctx->hang_stats;
1121
1122 if (capable(CAP_SYS_ADMIN))
1123 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1124 else
1125 args->reset_count = 0;
1126
1127 args->batch_active = hs->batch_active;
1128 args->batch_pending = hs->batch_pending;
1129
1130 mutex_unlock(&dev->struct_mutex);
1131
1132 return 0;
1133}
diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.h b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
new file mode 100644
index 000000000000..91315557e421
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 */
24
25#ifndef _I915_GEM_DMABUF_H_
26#define _I915_GEM_DMABUF_H_
27
28#include <linux/dma-buf.h>
29
30static inline struct reservation_object *
31i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
32{
33 struct dma_buf *dma_buf;
34
35 if (obj->base.dma_buf)
36 dma_buf = obj->base.dma_buf;
37 else if (obj->base.import_attach)
38 dma_buf = obj->base.import_attach->dmabuf;
39 else
40 return NULL;
41
42 return dma_buf->resv;
43}
44
45#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ea1f8d1bd228..3c1280ec7ff6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -33,6 +33,37 @@
33#include "intel_drv.h" 33#include "intel_drv.h"
34#include "i915_trace.h" 34#include "i915_trace.h"
35 35
36static int switch_to_pinned_context(struct drm_i915_private *dev_priv)
37{
38 struct intel_engine_cs *engine;
39
40 if (i915.enable_execlists)
41 return 0;
42
43 for_each_engine(engine, dev_priv) {
44 struct drm_i915_gem_request *req;
45 int ret;
46
47 if (engine->last_context == NULL)
48 continue;
49
50 if (engine->last_context == dev_priv->kernel_context)
51 continue;
52
53 req = i915_gem_request_alloc(engine, dev_priv->kernel_context);
54 if (IS_ERR(req))
55 return PTR_ERR(req);
56
57 ret = i915_switch_context(req);
58 i915_add_request_no_flush(req);
59 if (ret)
60 return ret;
61 }
62
63 return 0;
64}
65
66
36static bool 67static bool
37mark_free(struct i915_vma *vma, struct list_head *unwind) 68mark_free(struct i915_vma *vma, struct list_head *unwind)
38{ 69{
@@ -150,11 +181,19 @@ none:
150 181
151 /* Only idle the GPU and repeat the search once */ 182 /* Only idle the GPU and repeat the search once */
152 if (pass++ == 0) { 183 if (pass++ == 0) {
153 ret = i915_gpu_idle(dev); 184 struct drm_i915_private *dev_priv = to_i915(dev);
185
186 if (i915_is_ggtt(vm)) {
187 ret = switch_to_pinned_context(dev_priv);
188 if (ret)
189 return ret;
190 }
191
192 ret = i915_gem_wait_for_idle(dev_priv);
154 if (ret) 193 if (ret)
155 return ret; 194 return ret;
156 195
157 i915_gem_retire_requests(dev); 196 i915_gem_retire_requests(dev_priv);
158 goto search_again; 197 goto search_again;
159 } 198 }
160 199
@@ -261,11 +300,19 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
261 trace_i915_gem_evict_vm(vm); 300 trace_i915_gem_evict_vm(vm);
262 301
263 if (do_idle) { 302 if (do_idle) {
264 ret = i915_gpu_idle(vm->dev); 303 struct drm_i915_private *dev_priv = to_i915(vm->dev);
304
305 if (i915_is_ggtt(vm)) {
306 ret = switch_to_pinned_context(dev_priv);
307 if (ret)
308 return ret;
309 }
310
311 ret = i915_gem_wait_for_idle(dev_priv);
265 if (ret) 312 if (ret)
266 return ret; 313 return ret;
267 314
268 i915_gem_retire_requests(vm->dev); 315 i915_gem_retire_requests(dev_priv);
269 316
270 WARN_ON(!list_empty(&vm->active_list)); 317 WARN_ON(!list_empty(&vm->active_list));
271 } 318 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 33df74d98269..1978633e7549 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -714,7 +714,7 @@ eb_vma_misplaced(struct i915_vma *vma)
714static int 714static int
715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine, 715i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
716 struct list_head *vmas, 716 struct list_head *vmas,
717 struct intel_context *ctx, 717 struct i915_gem_context *ctx,
718 bool *need_relocs) 718 bool *need_relocs)
719{ 719{
720 struct drm_i915_gem_object *obj; 720 struct drm_i915_gem_object *obj;
@@ -722,7 +722,7 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
722 struct i915_address_space *vm; 722 struct i915_address_space *vm;
723 struct list_head ordered_vmas; 723 struct list_head ordered_vmas;
724 struct list_head pinned_vmas; 724 struct list_head pinned_vmas;
725 bool has_fenced_gpu_access = INTEL_INFO(engine->dev)->gen < 4; 725 bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
726 int retry; 726 int retry;
727 727
728 i915_gem_retire_requests_ring(engine); 728 i915_gem_retire_requests_ring(engine);
@@ -826,7 +826,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
826 struct intel_engine_cs *engine, 826 struct intel_engine_cs *engine,
827 struct eb_vmas *eb, 827 struct eb_vmas *eb,
828 struct drm_i915_gem_exec_object2 *exec, 828 struct drm_i915_gem_exec_object2 *exec,
829 struct intel_context *ctx) 829 struct i915_gem_context *ctx)
830{ 830{
831 struct drm_i915_gem_relocation_entry *reloc; 831 struct drm_i915_gem_relocation_entry *reloc;
832 struct i915_address_space *vm; 832 struct i915_address_space *vm;
@@ -963,7 +963,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
963 } 963 }
964 964
965 if (flush_chipset) 965 if (flush_chipset)
966 i915_gem_chipset_flush(req->engine->dev); 966 i915_gem_chipset_flush(req->engine->i915);
967 967
968 if (flush_domains & I915_GEM_DOMAIN_GTT) 968 if (flush_domains & I915_GEM_DOMAIN_GTT)
969 wmb(); 969 wmb();
@@ -1063,17 +1063,17 @@ validate_exec_list(struct drm_device *dev,
1063 return 0; 1063 return 0;
1064} 1064}
1065 1065
1066static struct intel_context * 1066static struct i915_gem_context *
1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, 1067i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1068 struct intel_engine_cs *engine, const u32 ctx_id) 1068 struct intel_engine_cs *engine, const u32 ctx_id)
1069{ 1069{
1070 struct intel_context *ctx = NULL; 1070 struct i915_gem_context *ctx = NULL;
1071 struct i915_ctx_hang_stats *hs; 1071 struct i915_ctx_hang_stats *hs;
1072 1072
1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE) 1073 if (engine->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1074 return ERR_PTR(-EINVAL); 1074 return ERR_PTR(-EINVAL);
1075 1075
1076 ctx = i915_gem_context_get(file->driver_priv, ctx_id); 1076 ctx = i915_gem_context_lookup(file->driver_priv, ctx_id);
1077 if (IS_ERR(ctx)) 1077 if (IS_ERR(ctx))
1078 return ctx; 1078 return ctx;
1079 1079
@@ -1083,14 +1083,6 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1083 return ERR_PTR(-EIO); 1083 return ERR_PTR(-EIO);
1084 } 1084 }
1085 1085
1086 if (i915.enable_execlists && !ctx->engine[engine->id].state) {
1087 int ret = intel_lr_context_deferred_alloc(ctx, engine);
1088 if (ret) {
1089 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1090 return ERR_PTR(ret);
1091 }
1092 }
1093
1094 return ctx; 1086 return ctx;
1095} 1087}
1096 1088
@@ -1125,7 +1117,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1125 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 1117 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1126 i915_gem_request_assign(&obj->last_fenced_req, req); 1118 i915_gem_request_assign(&obj->last_fenced_req, req);
1127 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) { 1119 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1128 struct drm_i915_private *dev_priv = to_i915(engine->dev); 1120 struct drm_i915_private *dev_priv = engine->i915;
1129 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list, 1121 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1130 &dev_priv->mm.fence_list); 1122 &dev_priv->mm.fence_list);
1131 } 1123 }
@@ -1150,7 +1142,7 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1150 struct drm_i915_gem_request *req) 1142 struct drm_i915_gem_request *req)
1151{ 1143{
1152 struct intel_engine_cs *engine = req->engine; 1144 struct intel_engine_cs *engine = req->engine;
1153 struct drm_i915_private *dev_priv = dev->dev_private; 1145 struct drm_i915_private *dev_priv = to_i915(dev);
1154 int ret, i; 1146 int ret, i;
1155 1147
1156 if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) { 1148 if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
@@ -1233,7 +1225,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1233{ 1225{
1234 struct drm_device *dev = params->dev; 1226 struct drm_device *dev = params->dev;
1235 struct intel_engine_cs *engine = params->engine; 1227 struct intel_engine_cs *engine = params->engine;
1236 struct drm_i915_private *dev_priv = dev->dev_private; 1228 struct drm_i915_private *dev_priv = to_i915(dev);
1237 u64 exec_start, exec_len; 1229 u64 exec_start, exec_len;
1238 int instp_mode; 1230 int instp_mode;
1239 u32 instp_mask; 1231 u32 instp_mask;
@@ -1336,10 +1328,10 @@ gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
1336 /* Check whether the file_priv has already selected one ring. */ 1328 /* Check whether the file_priv has already selected one ring. */
1337 if ((int)file_priv->bsd_ring < 0) { 1329 if ((int)file_priv->bsd_ring < 0) {
1338 /* If not, use the ping-pong mechanism to select one. */ 1330 /* If not, use the ping-pong mechanism to select one. */
1339 mutex_lock(&dev_priv->dev->struct_mutex); 1331 mutex_lock(&dev_priv->drm.struct_mutex);
1340 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index; 1332 file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
1341 dev_priv->mm.bsd_ring_dispatch_index ^= 1; 1333 dev_priv->mm.bsd_ring_dispatch_index ^= 1;
1342 mutex_unlock(&dev_priv->dev->struct_mutex); 1334 mutex_unlock(&dev_priv->drm.struct_mutex);
1343 } 1335 }
1344 1336
1345 return file_priv->bsd_ring; 1337 return file_priv->bsd_ring;
@@ -1436,7 +1428,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1436 struct drm_i915_gem_object *batch_obj; 1428 struct drm_i915_gem_object *batch_obj;
1437 struct drm_i915_gem_exec_object2 shadow_exec_entry; 1429 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1438 struct intel_engine_cs *engine; 1430 struct intel_engine_cs *engine;
1439 struct intel_context *ctx; 1431 struct i915_gem_context *ctx;
1440 struct i915_address_space *vm; 1432 struct i915_address_space *vm;
1441 struct i915_execbuffer_params params_master; /* XXX: will be removed later */ 1433 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1442 struct i915_execbuffer_params *params = &params_master; 1434 struct i915_execbuffer_params *params = &params_master;
@@ -1454,7 +1446,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1454 1446
1455 dispatch_flags = 0; 1447 dispatch_flags = 0;
1456 if (args->flags & I915_EXEC_SECURE) { 1448 if (args->flags & I915_EXEC_SECURE) {
1457 if (!file->is_master || !capable(CAP_SYS_ADMIN)) 1449 if (!drm_is_current_master(file) || !capable(CAP_SYS_ADMIN))
1458 return -EPERM; 1450 return -EPERM;
1459 1451
1460 dispatch_flags |= I915_DISPATCH_SECURE; 1452 dispatch_flags |= I915_DISPATCH_SECURE;
@@ -1485,6 +1477,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1485 dispatch_flags |= I915_DISPATCH_RS; 1477 dispatch_flags |= I915_DISPATCH_RS;
1486 } 1478 }
1487 1479
1480 /* Take a local wakeref for preparing to dispatch the execbuf as
1481 * we expect to access the hardware fairly frequently in the
1482 * process. Upon first dispatch, we acquire another prolonged
1483 * wakeref that we hold until the GPU has been idle for at least
1484 * 100ms.
1485 */
1488 intel_runtime_pm_get(dev_priv); 1486 intel_runtime_pm_get(dev_priv);
1489 1487
1490 ret = i915_mutex_lock_interruptible(dev); 1488 ret = i915_mutex_lock_interruptible(dev);
@@ -1561,7 +1559,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1561 batch_obj, 1559 batch_obj,
1562 args->batch_start_offset, 1560 args->batch_start_offset,
1563 args->batch_len, 1561 args->batch_len,
1564 file->is_master); 1562 drm_is_current_master(file));
1565 if (IS_ERR(parsed_batch_obj)) { 1563 if (IS_ERR(parsed_batch_obj)) {
1566 ret = PTR_ERR(parsed_batch_obj); 1564 ret = PTR_ERR(parsed_batch_obj);
1567 goto err; 1565 goto err;
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a2b938ec01a7..251d7a95af89 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -58,7 +58,7 @@
58static void i965_write_fence_reg(struct drm_device *dev, int reg, 58static void i965_write_fence_reg(struct drm_device *dev, int reg,
59 struct drm_i915_gem_object *obj) 59 struct drm_i915_gem_object *obj)
60{ 60{
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = to_i915(dev);
62 i915_reg_t fence_reg_lo, fence_reg_hi; 62 i915_reg_t fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift; 63 int fence_pitch_shift;
64 64
@@ -117,7 +117,7 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
117static void i915_write_fence_reg(struct drm_device *dev, int reg, 117static void i915_write_fence_reg(struct drm_device *dev, int reg,
118 struct drm_i915_gem_object *obj) 118 struct drm_i915_gem_object *obj)
119{ 119{
120 struct drm_i915_private *dev_priv = dev->dev_private; 120 struct drm_i915_private *dev_priv = to_i915(dev);
121 u32 val; 121 u32 val;
122 122
123 if (obj) { 123 if (obj) {
@@ -156,7 +156,7 @@ static void i915_write_fence_reg(struct drm_device *dev, int reg,
156static void i830_write_fence_reg(struct drm_device *dev, int reg, 156static void i830_write_fence_reg(struct drm_device *dev, int reg,
157 struct drm_i915_gem_object *obj) 157 struct drm_i915_gem_object *obj)
158{ 158{
159 struct drm_i915_private *dev_priv = dev->dev_private; 159 struct drm_i915_private *dev_priv = to_i915(dev);
160 uint32_t val; 160 uint32_t val;
161 161
162 if (obj) { 162 if (obj) {
@@ -193,7 +193,7 @@ inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
193static void i915_gem_write_fence(struct drm_device *dev, int reg, 193static void i915_gem_write_fence(struct drm_device *dev, int reg,
194 struct drm_i915_gem_object *obj) 194 struct drm_i915_gem_object *obj)
195{ 195{
196 struct drm_i915_private *dev_priv = dev->dev_private; 196 struct drm_i915_private *dev_priv = to_i915(dev);
197 197
198 /* Ensure that all CPU reads are completed before installing a fence 198 /* Ensure that all CPU reads are completed before installing a fence
199 * and all writes before removing the fence. 199 * and all writes before removing the fence.
@@ -229,7 +229,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
229 struct drm_i915_fence_reg *fence, 229 struct drm_i915_fence_reg *fence,
230 bool enable) 230 bool enable)
231{ 231{
232 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 232 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
233 int reg = fence_number(dev_priv, fence); 233 int reg = fence_number(dev_priv, fence);
234 234
235 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL); 235 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
@@ -286,7 +286,7 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
286int 286int
287i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 287i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
288{ 288{
289 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 289 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
290 struct drm_i915_fence_reg *fence; 290 struct drm_i915_fence_reg *fence;
291 int ret; 291 int ret;
292 292
@@ -311,7 +311,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
311static struct drm_i915_fence_reg * 311static struct drm_i915_fence_reg *
312i915_find_fence_reg(struct drm_device *dev) 312i915_find_fence_reg(struct drm_device *dev)
313{ 313{
314 struct drm_i915_private *dev_priv = dev->dev_private; 314 struct drm_i915_private *dev_priv = to_i915(dev);
315 struct drm_i915_fence_reg *reg, *avail; 315 struct drm_i915_fence_reg *reg, *avail;
316 int i; 316 int i;
317 317
@@ -367,7 +367,7 @@ int
367i915_gem_object_get_fence(struct drm_i915_gem_object *obj) 367i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
368{ 368{
369 struct drm_device *dev = obj->base.dev; 369 struct drm_device *dev = obj->base.dev;
370 struct drm_i915_private *dev_priv = dev->dev_private; 370 struct drm_i915_private *dev_priv = to_i915(dev);
371 bool enable = obj->tiling_mode != I915_TILING_NONE; 371 bool enable = obj->tiling_mode != I915_TILING_NONE;
372 struct drm_i915_fence_reg *reg; 372 struct drm_i915_fence_reg *reg;
373 int ret; 373 int ret;
@@ -433,7 +433,7 @@ bool
433i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 433i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
434{ 434{
435 if (obj->fence_reg != I915_FENCE_REG_NONE) { 435 if (obj->fence_reg != I915_FENCE_REG_NONE) {
436 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 436 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
437 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj); 437 struct i915_vma *ggtt_vma = i915_gem_obj_to_ggtt(obj);
438 438
439 WARN_ON(!ggtt_vma || 439 WARN_ON(!ggtt_vma ||
@@ -457,7 +457,7 @@ void
457i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) 457i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
458{ 458{
459 if (obj->fence_reg != I915_FENCE_REG_NONE) { 459 if (obj->fence_reg != I915_FENCE_REG_NONE) {
460 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 460 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
461 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); 461 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
462 dev_priv->fence_regs[obj->fence_reg].pin_count--; 462 dev_priv->fence_regs[obj->fence_reg].pin_count--;
463 } 463 }
@@ -472,7 +472,7 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
472 */ 472 */
473void i915_gem_restore_fences(struct drm_device *dev) 473void i915_gem_restore_fences(struct drm_device *dev)
474{ 474{
475 struct drm_i915_private *dev_priv = dev->dev_private; 475 struct drm_i915_private *dev_priv = to_i915(dev);
476 int i; 476 int i;
477 477
478 for (i = 0; i < dev_priv->num_fence_regs; i++) { 478 for (i = 0; i < dev_priv->num_fence_regs; i++) {
@@ -549,7 +549,7 @@ void i915_gem_restore_fences(struct drm_device *dev)
549void 549void
550i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 550i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
551{ 551{
552 struct drm_i915_private *dev_priv = dev->dev_private; 552 struct drm_i915_private *dev_priv = to_i915(dev);
553 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 553 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
554 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 554 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
555 555
@@ -745,15 +745,15 @@ i915_gem_swizzle_page(struct page *page)
745void 745void
746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) 746i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
747{ 747{
748 struct sg_page_iter sg_iter; 748 struct sgt_iter sgt_iter;
749 struct page *page;
749 int i; 750 int i;
750 751
751 if (obj->bit_17 == NULL) 752 if (obj->bit_17 == NULL)
752 return; 753 return;
753 754
754 i = 0; 755 i = 0;
755 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 756 for_each_sgt_page(page, sgt_iter, obj->pages) {
756 struct page *page = sg_page_iter_page(&sg_iter);
757 char new_bit_17 = page_to_phys(page) >> 17; 757 char new_bit_17 = page_to_phys(page) >> 17;
758 if ((new_bit_17 & 0x1) != 758 if ((new_bit_17 & 0x1) !=
759 (test_bit(i, obj->bit_17) != 0)) { 759 (test_bit(i, obj->bit_17) != 0)) {
@@ -775,7 +775,8 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
775void 775void
776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) 776i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
777{ 777{
778 struct sg_page_iter sg_iter; 778 struct sgt_iter sgt_iter;
779 struct page *page;
779 int page_count = obj->base.size >> PAGE_SHIFT; 780 int page_count = obj->base.size >> PAGE_SHIFT;
780 int i; 781 int i;
781 782
@@ -790,8 +791,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
790 } 791 }
791 792
792 i = 0; 793 i = 0;
793 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 794
794 if (page_to_phys(sg_page_iter_page(&sg_iter)) & (1 << 17)) 795 for_each_sgt_page(page, sgt_iter, obj->pages) {
796 if (page_to_phys(page) & (1 << 17))
795 __set_bit(i, obj->bit_17); 797 __set_bit(i, obj->bit_17);
796 else 798 else
797 __clear_bit(i, obj->bit_17); 799 __clear_bit(i, obj->bit_17);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 92acdff9dad3..10f1e32767e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -93,6 +93,13 @@
93 * 93 *
94 */ 94 */
95 95
96static inline struct i915_ggtt *
97i915_vm_to_ggtt(struct i915_address_space *vm)
98{
99 GEM_BUG_ON(!i915_is_ggtt(vm));
100 return container_of(vm, struct i915_ggtt, base);
101}
102
96static int 103static int
97i915_get_ggtt_vma_pages(struct i915_vma *vma); 104i915_get_ggtt_vma_pages(struct i915_vma *vma);
98 105
@@ -103,25 +110,29 @@ const struct i915_ggtt_view i915_ggtt_view_rotated = {
103 .type = I915_GGTT_VIEW_ROTATED, 110 .type = I915_GGTT_VIEW_ROTATED,
104}; 111};
105 112
106static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 113int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
114 int enable_ppgtt)
107{ 115{
108 bool has_aliasing_ppgtt; 116 bool has_aliasing_ppgtt;
109 bool has_full_ppgtt; 117 bool has_full_ppgtt;
110 bool has_full_48bit_ppgtt; 118 bool has_full_48bit_ppgtt;
111 119
112 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6; 120 has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
113 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7; 121 has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
114 has_full_48bit_ppgtt = IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9; 122 has_full_48bit_ppgtt =
123 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
115 124
116 if (intel_vgpu_active(dev)) 125 if (intel_vgpu_active(dev_priv))
117 has_full_ppgtt = false; /* emulation is too hard */ 126 has_full_ppgtt = false; /* emulation is too hard */
118 127
128 if (!has_aliasing_ppgtt)
129 return 0;
130
119 /* 131 /*
120 * We don't allow disabling PPGTT for gen9+ as it's a requirement for 132 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
121 * execlists, the sole mechanism available to submit work. 133 * execlists, the sole mechanism available to submit work.
122 */ 134 */
123 if (INTEL_INFO(dev)->gen < 9 && 135 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
124 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
125 return 0; 136 return 0;
126 137
127 if (enable_ppgtt == 1) 138 if (enable_ppgtt == 1)
@@ -135,19 +146,19 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
135 146
136#ifdef CONFIG_INTEL_IOMMU 147#ifdef CONFIG_INTEL_IOMMU
137 /* Disable ppgtt on SNB if VT-d is on. */ 148 /* Disable ppgtt on SNB if VT-d is on. */
138 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) { 149 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
139 DRM_INFO("Disabling PPGTT because VT-d is on\n"); 150 DRM_INFO("Disabling PPGTT because VT-d is on\n");
140 return 0; 151 return 0;
141 } 152 }
142#endif 153#endif
143 154
144 /* Early VLV doesn't have this */ 155 /* Early VLV doesn't have this */
145 if (IS_VALLEYVIEW(dev) && dev->pdev->revision < 0xb) { 156 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
146 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); 157 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
147 return 0; 158 return 0;
148 } 159 }
149 160
150 if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists) 161 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
151 return has_full_48bit_ppgtt ? 3 : 2; 162 return has_full_48bit_ppgtt ? 3 : 2;
152 else 163 else
153 return has_aliasing_ppgtt ? 1 : 0; 164 return has_aliasing_ppgtt ? 1 : 0;
@@ -866,6 +877,7 @@ static void gen8_free_page_tables(struct drm_device *dev,
866static int gen8_init_scratch(struct i915_address_space *vm) 877static int gen8_init_scratch(struct i915_address_space *vm)
867{ 878{
868 struct drm_device *dev = vm->dev; 879 struct drm_device *dev = vm->dev;
880 int ret;
869 881
870 vm->scratch_page = alloc_scratch_page(dev); 882 vm->scratch_page = alloc_scratch_page(dev);
871 if (IS_ERR(vm->scratch_page)) 883 if (IS_ERR(vm->scratch_page))
@@ -873,24 +885,21 @@ static int gen8_init_scratch(struct i915_address_space *vm)
873 885
874 vm->scratch_pt = alloc_pt(dev); 886 vm->scratch_pt = alloc_pt(dev);
875 if (IS_ERR(vm->scratch_pt)) { 887 if (IS_ERR(vm->scratch_pt)) {
876 free_scratch_page(dev, vm->scratch_page); 888 ret = PTR_ERR(vm->scratch_pt);
877 return PTR_ERR(vm->scratch_pt); 889 goto free_scratch_page;
878 } 890 }
879 891
880 vm->scratch_pd = alloc_pd(dev); 892 vm->scratch_pd = alloc_pd(dev);
881 if (IS_ERR(vm->scratch_pd)) { 893 if (IS_ERR(vm->scratch_pd)) {
882 free_pt(dev, vm->scratch_pt); 894 ret = PTR_ERR(vm->scratch_pd);
883 free_scratch_page(dev, vm->scratch_page); 895 goto free_pt;
884 return PTR_ERR(vm->scratch_pd);
885 } 896 }
886 897
887 if (USES_FULL_48BIT_PPGTT(dev)) { 898 if (USES_FULL_48BIT_PPGTT(dev)) {
888 vm->scratch_pdp = alloc_pdp(dev); 899 vm->scratch_pdp = alloc_pdp(dev);
889 if (IS_ERR(vm->scratch_pdp)) { 900 if (IS_ERR(vm->scratch_pdp)) {
890 free_pd(dev, vm->scratch_pd); 901 ret = PTR_ERR(vm->scratch_pdp);
891 free_pt(dev, vm->scratch_pt); 902 goto free_pd;
892 free_scratch_page(dev, vm->scratch_page);
893 return PTR_ERR(vm->scratch_pdp);
894 } 903 }
895 } 904 }
896 905
@@ -900,6 +909,15 @@ static int gen8_init_scratch(struct i915_address_space *vm)
900 gen8_initialize_pdp(vm, vm->scratch_pdp); 909 gen8_initialize_pdp(vm, vm->scratch_pdp);
901 910
902 return 0; 911 return 0;
912
913free_pd:
914 free_pd(dev, vm->scratch_pd);
915free_pt:
916 free_pt(dev, vm->scratch_pt);
917free_scratch_page:
918 free_scratch_page(dev, vm->scratch_page);
919
920 return ret;
903} 921}
904 922
905static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) 923static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
@@ -978,7 +996,7 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
978{ 996{
979 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 997 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
980 998
981 if (intel_vgpu_active(vm->dev)) 999 if (intel_vgpu_active(to_i915(vm->dev)))
982 gen8_ppgtt_notify_vgt(ppgtt, false); 1000 gen8_ppgtt_notify_vgt(ppgtt, false);
983 1001
984 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) 1002 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
@@ -1529,14 +1547,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1529 0, 0, 1547 0, 0,
1530 GEN8_PML4E_SHIFT); 1548 GEN8_PML4E_SHIFT);
1531 1549
1532 if (intel_vgpu_active(ppgtt->base.dev)) { 1550 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
1533 ret = gen8_preallocate_top_level_pdps(ppgtt); 1551 ret = gen8_preallocate_top_level_pdps(ppgtt);
1534 if (ret) 1552 if (ret)
1535 goto free_scratch; 1553 goto free_scratch;
1536 } 1554 }
1537 } 1555 }
1538 1556
1539 if (intel_vgpu_active(ppgtt->base.dev)) 1557 if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
1540 gen8_ppgtt_notify_vgt(ppgtt, true); 1558 gen8_ppgtt_notify_vgt(ppgtt, true);
1541 1559
1542 return 0; 1560 return 0;
@@ -1552,13 +1570,13 @@ static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1552 struct i915_page_table *unused; 1570 struct i915_page_table *unused;
1553 gen6_pte_t scratch_pte; 1571 gen6_pte_t scratch_pte;
1554 uint32_t pd_entry; 1572 uint32_t pd_entry;
1555 uint32_t pte, pde, temp; 1573 uint32_t pte, pde;
1556 uint32_t start = ppgtt->base.start, length = ppgtt->base.total; 1574 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
1557 1575
1558 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page), 1576 scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
1559 I915_CACHE_LLC, true, 0); 1577 I915_CACHE_LLC, true, 0);
1560 1578
1561 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) { 1579 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
1562 u32 expected; 1580 u32 expected;
1563 gen6_pte_t *pt_vaddr; 1581 gen6_pte_t *pt_vaddr;
1564 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); 1582 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
@@ -1622,9 +1640,9 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1622{ 1640{
1623 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1641 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1624 struct i915_page_table *pt; 1642 struct i915_page_table *pt;
1625 uint32_t pde, temp; 1643 uint32_t pde;
1626 1644
1627 gen6_for_each_pde(pt, pd, start, length, temp, pde) 1645 gen6_for_each_pde(pt, pd, start, length, pde)
1628 gen6_write_pde(pd, pde, pt); 1646 gen6_write_pde(pd, pde, pt);
1629 1647
1630 /* Make sure write is complete before other code can use this page 1648 /* Make sure write is complete before other code can use this page
@@ -1665,17 +1683,6 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1665 return 0; 1683 return 0;
1666} 1684}
1667 1685
1668static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
1669 struct drm_i915_gem_request *req)
1670{
1671 struct intel_engine_cs *engine = req->engine;
1672 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
1673
1674 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1675 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1676 return 0;
1677}
1678
1679static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, 1686static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1680 struct drm_i915_gem_request *req) 1687 struct drm_i915_gem_request *req)
1681{ 1688{
@@ -1713,21 +1720,16 @@ static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1713 struct drm_i915_gem_request *req) 1720 struct drm_i915_gem_request *req)
1714{ 1721{
1715 struct intel_engine_cs *engine = req->engine; 1722 struct intel_engine_cs *engine = req->engine;
1716 struct drm_device *dev = ppgtt->base.dev; 1723 struct drm_i915_private *dev_priv = req->i915;
1717 struct drm_i915_private *dev_priv = dev->dev_private;
1718
1719 1724
1720 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); 1725 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1721 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); 1726 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1722
1723 POSTING_READ(RING_PP_DIR_DCLV(engine));
1724
1725 return 0; 1727 return 0;
1726} 1728}
1727 1729
1728static void gen8_ppgtt_enable(struct drm_device *dev) 1730static void gen8_ppgtt_enable(struct drm_device *dev)
1729{ 1731{
1730 struct drm_i915_private *dev_priv = dev->dev_private; 1732 struct drm_i915_private *dev_priv = to_i915(dev);
1731 struct intel_engine_cs *engine; 1733 struct intel_engine_cs *engine;
1732 1734
1733 for_each_engine(engine, dev_priv) { 1735 for_each_engine(engine, dev_priv) {
@@ -1739,7 +1741,7 @@ static void gen8_ppgtt_enable(struct drm_device *dev)
1739 1741
1740static void gen7_ppgtt_enable(struct drm_device *dev) 1742static void gen7_ppgtt_enable(struct drm_device *dev)
1741{ 1743{
1742 struct drm_i915_private *dev_priv = dev->dev_private; 1744 struct drm_i915_private *dev_priv = to_i915(dev);
1743 struct intel_engine_cs *engine; 1745 struct intel_engine_cs *engine;
1744 uint32_t ecochk, ecobits; 1746 uint32_t ecochk, ecobits;
1745 1747
@@ -1764,7 +1766,7 @@ static void gen7_ppgtt_enable(struct drm_device *dev)
1764 1766
1765static void gen6_ppgtt_enable(struct drm_device *dev) 1767static void gen6_ppgtt_enable(struct drm_device *dev)
1766{ 1768{
1767 struct drm_i915_private *dev_priv = dev->dev_private; 1769 struct drm_i915_private *dev_priv = to_i915(dev);
1768 uint32_t ecochk, gab_ctl, ecobits; 1770 uint32_t ecochk, gab_ctl, ecobits;
1769 1771
1770 ecobits = I915_READ(GAC_ECO_BITS); 1772 ecobits = I915_READ(GAC_ECO_BITS);
@@ -1821,20 +1823,19 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1821 enum i915_cache_level cache_level, u32 flags) 1823 enum i915_cache_level cache_level, u32 flags)
1822{ 1824{
1823 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1825 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1824 gen6_pte_t *pt_vaddr;
1825 unsigned first_entry = start >> PAGE_SHIFT; 1826 unsigned first_entry = start >> PAGE_SHIFT;
1826 unsigned act_pt = first_entry / GEN6_PTES; 1827 unsigned act_pt = first_entry / GEN6_PTES;
1827 unsigned act_pte = first_entry % GEN6_PTES; 1828 unsigned act_pte = first_entry % GEN6_PTES;
1828 struct sg_page_iter sg_iter; 1829 gen6_pte_t *pt_vaddr = NULL;
1830 struct sgt_iter sgt_iter;
1831 dma_addr_t addr;
1829 1832
1830 pt_vaddr = NULL; 1833 for_each_sgt_dma(addr, sgt_iter, pages) {
1831 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1832 if (pt_vaddr == NULL) 1834 if (pt_vaddr == NULL)
1833 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]); 1835 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1834 1836
1835 pt_vaddr[act_pte] = 1837 pt_vaddr[act_pte] =
1836 vm->pte_encode(sg_page_iter_dma_address(&sg_iter), 1838 vm->pte_encode(addr, cache_level, true, flags);
1837 cache_level, true, flags);
1838 1839
1839 if (++act_pte == GEN6_PTES) { 1840 if (++act_pte == GEN6_PTES) {
1840 kunmap_px(ppgtt, pt_vaddr); 1841 kunmap_px(ppgtt, pt_vaddr);
@@ -1843,6 +1844,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1843 act_pte = 0; 1844 act_pte = 0;
1844 } 1845 }
1845 } 1846 }
1847
1846 if (pt_vaddr) 1848 if (pt_vaddr)
1847 kunmap_px(ppgtt, pt_vaddr); 1849 kunmap_px(ppgtt, pt_vaddr);
1848} 1850}
@@ -1857,7 +1859,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1857 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1859 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1858 struct i915_page_table *pt; 1860 struct i915_page_table *pt;
1859 uint32_t start, length, start_save, length_save; 1861 uint32_t start, length, start_save, length_save;
1860 uint32_t pde, temp; 1862 uint32_t pde;
1861 int ret; 1863 int ret;
1862 1864
1863 if (WARN_ON(start_in + length_in > ppgtt->base.total)) 1865 if (WARN_ON(start_in + length_in > ppgtt->base.total))
@@ -1873,7 +1875,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1873 * need allocation. The second stage marks use ptes within the page 1875 * need allocation. The second stage marks use ptes within the page
1874 * tables. 1876 * tables.
1875 */ 1877 */
1876 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1878 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1877 if (pt != vm->scratch_pt) { 1879 if (pt != vm->scratch_pt) {
1878 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES)); 1880 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1879 continue; 1881 continue;
@@ -1898,7 +1900,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
1898 start = start_save; 1900 start = start_save;
1899 length = length_save; 1901 length = length_save;
1900 1902
1901 gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) { 1903 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1902 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES); 1904 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1903 1905
1904 bitmap_zero(tmp_bitmap, GEN6_PTES); 1906 bitmap_zero(tmp_bitmap, GEN6_PTES);
@@ -1967,15 +1969,16 @@ static void gen6_free_scratch(struct i915_address_space *vm)
1967static void gen6_ppgtt_cleanup(struct i915_address_space *vm) 1969static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1968{ 1970{
1969 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 1971 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1972 struct i915_page_directory *pd = &ppgtt->pd;
1973 struct drm_device *dev = vm->dev;
1970 struct i915_page_table *pt; 1974 struct i915_page_table *pt;
1971 uint32_t pde; 1975 uint32_t pde;
1972 1976
1973 drm_mm_remove_node(&ppgtt->node); 1977 drm_mm_remove_node(&ppgtt->node);
1974 1978
1975 gen6_for_all_pdes(pt, ppgtt, pde) { 1979 gen6_for_all_pdes(pt, pd, pde)
1976 if (pt != vm->scratch_pt) 1980 if (pt != vm->scratch_pt)
1977 free_pt(ppgtt->base.dev, pt); 1981 free_pt(dev, pt);
1978 }
1979 1982
1980 gen6_free_scratch(vm); 1983 gen6_free_scratch(vm);
1981} 1984}
@@ -2041,9 +2044,9 @@ static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2041 uint64_t start, uint64_t length) 2044 uint64_t start, uint64_t length)
2042{ 2045{
2043 struct i915_page_table *unused; 2046 struct i915_page_table *unused;
2044 uint32_t pde, temp; 2047 uint32_t pde;
2045 2048
2046 gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde) 2049 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2047 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; 2050 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2048} 2051}
2049 2052
@@ -2055,18 +2058,15 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2055 int ret; 2058 int ret;
2056 2059
2057 ppgtt->base.pte_encode = ggtt->base.pte_encode; 2060 ppgtt->base.pte_encode = ggtt->base.pte_encode;
2058 if (IS_GEN6(dev)) { 2061 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
2059 ppgtt->switch_mm = gen6_mm_switch; 2062 ppgtt->switch_mm = gen6_mm_switch;
2060 } else if (IS_HASWELL(dev)) { 2063 else if (IS_HASWELL(dev))
2061 ppgtt->switch_mm = hsw_mm_switch; 2064 ppgtt->switch_mm = hsw_mm_switch;
2062 } else if (IS_GEN7(dev)) { 2065 else if (IS_GEN7(dev))
2063 ppgtt->switch_mm = gen7_mm_switch; 2066 ppgtt->switch_mm = gen7_mm_switch;
2064 } else 2067 else
2065 BUG(); 2068 BUG();
2066 2069
2067 if (intel_vgpu_active(dev))
2068 ppgtt->switch_mm = vgpu_mm_switch;
2069
2070 ret = gen6_ppgtt_alloc(ppgtt); 2070 ret = gen6_ppgtt_alloc(ppgtt);
2071 if (ret) 2071 if (ret)
2072 return ret; 2072 return ret;
@@ -2115,7 +2115,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
2115 struct drm_i915_private *dev_priv) 2115 struct drm_i915_private *dev_priv)
2116{ 2116{
2117 drm_mm_init(&vm->mm, vm->start, vm->total); 2117 drm_mm_init(&vm->mm, vm->start, vm->total);
2118 vm->dev = dev_priv->dev; 2118 vm->dev = &dev_priv->drm;
2119 INIT_LIST_HEAD(&vm->active_list); 2119 INIT_LIST_HEAD(&vm->active_list);
2120 INIT_LIST_HEAD(&vm->inactive_list); 2120 INIT_LIST_HEAD(&vm->inactive_list);
2121 list_add_tail(&vm->global_link, &dev_priv->vm_list); 2121 list_add_tail(&vm->global_link, &dev_priv->vm_list);
@@ -2123,7 +2123,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
2123 2123
2124static void gtt_write_workarounds(struct drm_device *dev) 2124static void gtt_write_workarounds(struct drm_device *dev)
2125{ 2125{
2126 struct drm_i915_private *dev_priv = dev->dev_private; 2126 struct drm_i915_private *dev_priv = to_i915(dev);
2127 2127
2128 /* This function is for gtt related workarounds. This function is 2128 /* This function is for gtt related workarounds. This function is
2129 * called on driver load and after a GPU reset, so you can place 2129 * called on driver load and after a GPU reset, so you can place
@@ -2140,9 +2140,9 @@ static void gtt_write_workarounds(struct drm_device *dev)
2140 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); 2140 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2141} 2141}
2142 2142
2143int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt) 2143static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2144{ 2144{
2145 struct drm_i915_private *dev_priv = dev->dev_private; 2145 struct drm_i915_private *dev_priv = to_i915(dev);
2146 int ret = 0; 2146 int ret = 0;
2147 2147
2148 ret = __hw_ppgtt_init(dev, ppgtt); 2148 ret = __hw_ppgtt_init(dev, ppgtt);
@@ -2179,20 +2179,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
2179 return 0; 2179 return 0;
2180} 2180}
2181 2181
2182int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
2183{
2184 struct drm_i915_private *dev_priv = req->i915;
2185 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
2186
2187 if (i915.enable_execlists)
2188 return 0;
2189
2190 if (!ppgtt)
2191 return 0;
2192
2193 return ppgtt->switch_mm(ppgtt, req);
2194}
2195
2196struct i915_hw_ppgtt * 2182struct i915_hw_ppgtt *
2197i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv) 2183i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
2198{ 2184{
@@ -2257,8 +2243,8 @@ static bool do_idling(struct drm_i915_private *dev_priv)
2257 2243
2258 if (unlikely(ggtt->do_idle_maps)) { 2244 if (unlikely(ggtt->do_idle_maps)) {
2259 dev_priv->mm.interruptible = false; 2245 dev_priv->mm.interruptible = false;
2260 if (i915_gpu_idle(dev_priv->dev)) { 2246 if (i915_gem_wait_for_idle(dev_priv)) {
2261 DRM_ERROR("Couldn't idle GPU\n"); 2247 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2262 /* Wait a bit, in hopes it avoids the hang */ 2248 /* Wait a bit, in hopes it avoids the hang */
2263 udelay(10); 2249 udelay(10);
2264 } 2250 }
@@ -2275,12 +2261,11 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
2275 dev_priv->mm.interruptible = interruptible; 2261 dev_priv->mm.interruptible = interruptible;
2276} 2262}
2277 2263
2278void i915_check_and_clear_faults(struct drm_device *dev) 2264void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2279{ 2265{
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 struct intel_engine_cs *engine; 2266 struct intel_engine_cs *engine;
2282 2267
2283 if (INTEL_INFO(dev)->gen < 6) 2268 if (INTEL_INFO(dev_priv)->gen < 6)
2284 return; 2269 return;
2285 2270
2286 for_each_engine(engine, dev_priv) { 2271 for_each_engine(engine, dev_priv) {
@@ -2324,7 +2309,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2324 if (INTEL_INFO(dev)->gen < 6) 2309 if (INTEL_INFO(dev)->gen < 6)
2325 return; 2310 return;
2326 2311
2327 i915_check_and_clear_faults(dev); 2312 i915_check_and_clear_faults(dev_priv);
2328 2313
2329 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 2314 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2330 true); 2315 true);
@@ -2352,29 +2337,49 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
2352#endif 2337#endif
2353} 2338}
2354 2339
2340static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2341 dma_addr_t addr,
2342 uint64_t offset,
2343 enum i915_cache_level level,
2344 u32 unused)
2345{
2346 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2347 gen8_pte_t __iomem *pte =
2348 (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
2349 (offset >> PAGE_SHIFT);
2350 int rpm_atomic_seq;
2351
2352 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2353
2354 gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
2355
2356 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2357 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2358
2359 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2360}
2361
2355static void gen8_ggtt_insert_entries(struct i915_address_space *vm, 2362static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2356 struct sg_table *st, 2363 struct sg_table *st,
2357 uint64_t start, 2364 uint64_t start,
2358 enum i915_cache_level level, u32 unused) 2365 enum i915_cache_level level, u32 unused)
2359{ 2366{
2360 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2367 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2361 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2368 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2362 unsigned first_entry = start >> PAGE_SHIFT; 2369 struct sgt_iter sgt_iter;
2363 gen8_pte_t __iomem *gtt_entries = 2370 gen8_pte_t __iomem *gtt_entries;
2364 (gen8_pte_t __iomem *)ggtt->gsm + first_entry; 2371 gen8_pte_t gtt_entry;
2365 int i = 0; 2372 dma_addr_t addr;
2366 struct sg_page_iter sg_iter;
2367 dma_addr_t addr = 0; /* shut up gcc */
2368 int rpm_atomic_seq; 2373 int rpm_atomic_seq;
2374 int i = 0;
2369 2375
2370 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2376 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2371 2377
2372 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2378 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2373 addr = sg_dma_address(sg_iter.sg) + 2379
2374 (sg_iter.sg_pgoffset << PAGE_SHIFT); 2380 for_each_sgt_dma(addr, sgt_iter, st) {
2375 gen8_set_pte(&gtt_entries[i], 2381 gtt_entry = gen8_pte_encode(addr, level, true);
2376 gen8_pte_encode(addr, level, true)); 2382 gen8_set_pte(&gtt_entries[i++], gtt_entry);
2377 i++;
2378 } 2383 }
2379 2384
2380 /* 2385 /*
@@ -2385,8 +2390,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2385 * hardware should work, we must keep this posting read for paranoia. 2390 * hardware should work, we must keep this posting read for paranoia.
2386 */ 2391 */
2387 if (i != 0) 2392 if (i != 0)
2388 WARN_ON(readq(&gtt_entries[i-1]) 2393 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
2389 != gen8_pte_encode(addr, level, true));
2390 2394
2391 /* This next bit makes the above posting read even more important. We 2395 /* This next bit makes the above posting read even more important. We
2392 * want to flush the TLBs only after we're certain all the PTE updates 2396 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2424,6 +2428,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2424 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL); 2428 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2425} 2429}
2426 2430
2431static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2432 dma_addr_t addr,
2433 uint64_t offset,
2434 enum i915_cache_level level,
2435 u32 flags)
2436{
2437 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2438 gen6_pte_t __iomem *pte =
2439 (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
2440 (offset >> PAGE_SHIFT);
2441 int rpm_atomic_seq;
2442
2443 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2444
2445 iowrite32(vm->pte_encode(addr, level, true, flags), pte);
2446
2447 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2448 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2449
2450 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2451}
2452
2427/* 2453/*
2428 * Binds an object into the global gtt with the specified cache level. The object 2454 * Binds an object into the global gtt with the specified cache level. The object
2429 * will be accessible to the GPU via commands whose operands reference offsets 2455 * will be accessible to the GPU via commands whose operands reference offsets
@@ -2436,21 +2462,21 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2436 enum i915_cache_level level, u32 flags) 2462 enum i915_cache_level level, u32 flags)
2437{ 2463{
2438 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2464 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2439 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2465 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2440 unsigned first_entry = start >> PAGE_SHIFT; 2466 struct sgt_iter sgt_iter;
2441 gen6_pte_t __iomem *gtt_entries = 2467 gen6_pte_t __iomem *gtt_entries;
2442 (gen6_pte_t __iomem *)ggtt->gsm + first_entry; 2468 gen6_pte_t gtt_entry;
2443 int i = 0; 2469 dma_addr_t addr;
2444 struct sg_page_iter sg_iter;
2445 dma_addr_t addr = 0;
2446 int rpm_atomic_seq; 2470 int rpm_atomic_seq;
2471 int i = 0;
2447 2472
2448 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv); 2473 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2449 2474
2450 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) { 2475 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2451 addr = sg_page_iter_dma_address(&sg_iter); 2476
2452 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]); 2477 for_each_sgt_dma(addr, sgt_iter, st) {
2453 i++; 2478 gtt_entry = vm->pte_encode(addr, level, true, flags);
2479 iowrite32(gtt_entry, &gtt_entries[i++]);
2454 } 2480 }
2455 2481
2456 /* XXX: This serves as a posting read to make sure that the PTE has 2482 /* XXX: This serves as a posting read to make sure that the PTE has
@@ -2459,10 +2485,8 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2459 * of NUMA access patterns. Therefore, even with the way we assume 2485 * of NUMA access patterns. Therefore, even with the way we assume
2460 * hardware should work, we must keep this posting read for paranoia. 2486 * hardware should work, we must keep this posting read for paranoia.
2461 */ 2487 */
2462 if (i != 0) { 2488 if (i != 0)
2463 unsigned long gtt = readl(&gtt_entries[i-1]); 2489 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2464 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
2465 }
2466 2490
2467 /* This next bit makes the above posting read even more important. We 2491 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates 2492 * want to flush the TLBs only after we're certain all the PTE updates
@@ -2474,13 +2498,20 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2498 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2475} 2499}
2476 2500
2501static void nop_clear_range(struct i915_address_space *vm,
2502 uint64_t start,
2503 uint64_t length,
2504 bool use_scratch)
2505{
2506}
2507
2477static void gen8_ggtt_clear_range(struct i915_address_space *vm, 2508static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2478 uint64_t start, 2509 uint64_t start,
2479 uint64_t length, 2510 uint64_t length,
2480 bool use_scratch) 2511 bool use_scratch)
2481{ 2512{
2482 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2513 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2483 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2514 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2484 unsigned first_entry = start >> PAGE_SHIFT; 2515 unsigned first_entry = start >> PAGE_SHIFT;
2485 unsigned num_entries = length >> PAGE_SHIFT; 2516 unsigned num_entries = length >> PAGE_SHIFT;
2486 gen8_pte_t scratch_pte, __iomem *gtt_base = 2517 gen8_pte_t scratch_pte, __iomem *gtt_base =
@@ -2512,7 +2543,7 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2512 bool use_scratch) 2543 bool use_scratch)
2513{ 2544{
2514 struct drm_i915_private *dev_priv = to_i915(vm->dev); 2545 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2515 struct i915_ggtt *ggtt = &dev_priv->ggtt; 2546 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2516 unsigned first_entry = start >> PAGE_SHIFT; 2547 unsigned first_entry = start >> PAGE_SHIFT;
2517 unsigned num_entries = length >> PAGE_SHIFT; 2548 unsigned num_entries = length >> PAGE_SHIFT;
2518 gen6_pte_t scratch_pte, __iomem *gtt_base = 2549 gen6_pte_t scratch_pte, __iomem *gtt_base =
@@ -2538,12 +2569,30 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2538 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq); 2569 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2539} 2570}
2540 2571
2572static void i915_ggtt_insert_page(struct i915_address_space *vm,
2573 dma_addr_t addr,
2574 uint64_t offset,
2575 enum i915_cache_level cache_level,
2576 u32 unused)
2577{
2578 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2579 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2580 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2581 int rpm_atomic_seq;
2582
2583 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2584
2585 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2586
2587 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2588}
2589
2541static void i915_ggtt_insert_entries(struct i915_address_space *vm, 2590static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2542 struct sg_table *pages, 2591 struct sg_table *pages,
2543 uint64_t start, 2592 uint64_t start,
2544 enum i915_cache_level cache_level, u32 unused) 2593 enum i915_cache_level cache_level, u32 unused)
2545{ 2594{
2546 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2595 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2547 unsigned int flags = (cache_level == I915_CACHE_NONE) ? 2596 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2548 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; 2597 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2549 int rpm_atomic_seq; 2598 int rpm_atomic_seq;
@@ -2561,7 +2610,7 @@ static void i915_ggtt_clear_range(struct i915_address_space *vm,
2561 uint64_t length, 2610 uint64_t length,
2562 bool unused) 2611 bool unused)
2563{ 2612{
2564 struct drm_i915_private *dev_priv = vm->dev->dev_private; 2613 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2565 unsigned first_entry = start >> PAGE_SHIFT; 2614 unsigned first_entry = start >> PAGE_SHIFT;
2566 unsigned num_entries = length >> PAGE_SHIFT; 2615 unsigned num_entries = length >> PAGE_SHIFT;
2567 int rpm_atomic_seq; 2616 int rpm_atomic_seq;
@@ -2642,7 +2691,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2642static void ggtt_unbind_vma(struct i915_vma *vma) 2691static void ggtt_unbind_vma(struct i915_vma *vma)
2643{ 2692{
2644 struct drm_device *dev = vma->vm->dev; 2693 struct drm_device *dev = vma->vm->dev;
2645 struct drm_i915_private *dev_priv = dev->dev_private; 2694 struct drm_i915_private *dev_priv = to_i915(dev);
2646 struct drm_i915_gem_object *obj = vma->obj; 2695 struct drm_i915_gem_object *obj = vma->obj;
2647 const uint64_t size = min_t(uint64_t, 2696 const uint64_t size = min_t(uint64_t,
2648 obj->base.size, 2697 obj->base.size,
@@ -2668,7 +2717,7 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
2668void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj) 2717void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2669{ 2718{
2670 struct drm_device *dev = obj->base.dev; 2719 struct drm_device *dev = obj->base.dev;
2671 struct drm_i915_private *dev_priv = dev->dev_private; 2720 struct drm_i915_private *dev_priv = to_i915(dev);
2672 bool interruptible; 2721 bool interruptible;
2673 2722
2674 interruptible = do_idling(dev_priv); 2723 interruptible = do_idling(dev_priv);
@@ -2727,11 +2776,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
2727 i915_address_space_init(&ggtt->base, dev_priv); 2776 i915_address_space_init(&ggtt->base, dev_priv);
2728 ggtt->base.total += PAGE_SIZE; 2777 ggtt->base.total += PAGE_SIZE;
2729 2778
2730 if (intel_vgpu_active(dev)) { 2779 ret = intel_vgt_balloon(dev_priv);
2731 ret = intel_vgt_balloon(dev); 2780 if (ret)
2732 if (ret) 2781 return ret;
2733 return ret;
2734 }
2735 2782
2736 if (!HAS_LLC(dev)) 2783 if (!HAS_LLC(dev))
2737 ggtt->base.mm.color_adjust = i915_gtt_color_adjust; 2784 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
@@ -2831,8 +2878,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
2831 i915_gem_cleanup_stolen(dev); 2878 i915_gem_cleanup_stolen(dev);
2832 2879
2833 if (drm_mm_initialized(&ggtt->base.mm)) { 2880 if (drm_mm_initialized(&ggtt->base.mm)) {
2834 if (intel_vgpu_active(dev)) 2881 intel_vgt_deballoon(dev_priv);
2835 intel_vgt_deballoon();
2836 2882
2837 drm_mm_takedown(&ggtt->base.mm); 2883 drm_mm_takedown(&ggtt->base.mm);
2838 list_del(&ggtt->base.global_link); 2884 list_del(&ggtt->base.global_link);
@@ -3069,13 +3115,16 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
3069 3115
3070 ret = ggtt_probe_common(dev, ggtt->size); 3116 ret = ggtt_probe_common(dev, ggtt->size);
3071 3117
3072 ggtt->base.clear_range = gen8_ggtt_clear_range;
3073 if (IS_CHERRYVIEW(dev_priv))
3074 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3075 else
3076 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3077 ggtt->base.bind_vma = ggtt_bind_vma; 3118 ggtt->base.bind_vma = ggtt_bind_vma;
3078 ggtt->base.unbind_vma = ggtt_unbind_vma; 3119 ggtt->base.unbind_vma = ggtt_unbind_vma;
3120 ggtt->base.insert_page = gen8_ggtt_insert_page;
3121 ggtt->base.clear_range = nop_clear_range;
3122 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3123 ggtt->base.clear_range = gen8_ggtt_clear_range;
3124
3125 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3126 if (IS_CHERRYVIEW(dev_priv))
3127 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3079 3128
3080 return ret; 3129 return ret;
3081} 3130}
@@ -3108,6 +3157,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3108 ret = ggtt_probe_common(dev, ggtt->size); 3157 ret = ggtt_probe_common(dev, ggtt->size);
3109 3158
3110 ggtt->base.clear_range = gen6_ggtt_clear_range; 3159 ggtt->base.clear_range = gen6_ggtt_clear_range;
3160 ggtt->base.insert_page = gen6_ggtt_insert_page;
3111 ggtt->base.insert_entries = gen6_ggtt_insert_entries; 3161 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3112 ggtt->base.bind_vma = ggtt_bind_vma; 3162 ggtt->base.bind_vma = ggtt_bind_vma;
3113 ggtt->base.unbind_vma = ggtt_unbind_vma; 3163 ggtt->base.unbind_vma = ggtt_unbind_vma;
@@ -3129,7 +3179,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3129 struct drm_i915_private *dev_priv = to_i915(dev); 3179 struct drm_i915_private *dev_priv = to_i915(dev);
3130 int ret; 3180 int ret;
3131 3181
3132 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL); 3182 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3133 if (!ret) { 3183 if (!ret) {
3134 DRM_ERROR("failed to set up gmch\n"); 3184 DRM_ERROR("failed to set up gmch\n");
3135 return -EIO; 3185 return -EIO;
@@ -3138,7 +3188,8 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
3138 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size, 3188 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
3139 &ggtt->mappable_base, &ggtt->mappable_end); 3189 &ggtt->mappable_base, &ggtt->mappable_end);
3140 3190
3141 ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev); 3191 ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
3192 ggtt->base.insert_page = i915_ggtt_insert_page;
3142 ggtt->base.insert_entries = i915_ggtt_insert_entries; 3193 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3143 ggtt->base.clear_range = i915_ggtt_clear_range; 3194 ggtt->base.clear_range = i915_ggtt_clear_range;
3144 ggtt->base.bind_vma = ggtt_bind_vma; 3195 ggtt->base.bind_vma = ggtt_bind_vma;
@@ -3219,14 +3270,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
3219 if (intel_iommu_gfx_mapped) 3270 if (intel_iommu_gfx_mapped)
3220 DRM_INFO("VT-d active for gfx access\n"); 3271 DRM_INFO("VT-d active for gfx access\n");
3221#endif 3272#endif
3222 /*
3223 * i915.enable_ppgtt is read-only, so do an early pass to validate the
3224 * user's requested state against the hardware/driver capabilities. We
3225 * do this now so that we can print out any log messages once rather
3226 * than every time we check intel_enable_ppgtt().
3227 */
3228 i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
3229 DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
3230 3273
3231 return 0; 3274 return 0;
3232 3275
@@ -3250,9 +3293,8 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3250 struct i915_ggtt *ggtt = &dev_priv->ggtt; 3293 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3251 struct drm_i915_gem_object *obj; 3294 struct drm_i915_gem_object *obj;
3252 struct i915_vma *vma; 3295 struct i915_vma *vma;
3253 bool flush;
3254 3296
3255 i915_check_and_clear_faults(dev); 3297 i915_check_and_clear_faults(dev_priv);
3256 3298
3257 /* First fill our portion of the GTT with scratch pages */ 3299 /* First fill our portion of the GTT with scratch pages */
3258 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total, 3300 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
@@ -3260,19 +3302,16 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3260 3302
3261 /* Cache flush objects bound into GGTT and rebind them. */ 3303 /* Cache flush objects bound into GGTT and rebind them. */
3262 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 3304 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3263 flush = false;
3264 list_for_each_entry(vma, &obj->vma_list, obj_link) { 3305 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3265 if (vma->vm != &ggtt->base) 3306 if (vma->vm != &ggtt->base)
3266 continue; 3307 continue;
3267 3308
3268 WARN_ON(i915_vma_bind(vma, obj->cache_level, 3309 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3269 PIN_UPDATE)); 3310 PIN_UPDATE));
3270
3271 flush = true;
3272 } 3311 }
3273 3312
3274 if (flush) 3313 if (obj->pin_display)
3275 i915_gem_clflush_object(obj, obj->pin_display); 3314 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3276 } 3315 }
3277 3316
3278 if (INTEL_INFO(dev)->gen >= 8) { 3317 if (INTEL_INFO(dev)->gen >= 8) {
@@ -3398,9 +3437,11 @@ static struct sg_table *
3398intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info, 3437intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3399 struct drm_i915_gem_object *obj) 3438 struct drm_i915_gem_object *obj)
3400{ 3439{
3440 const size_t n_pages = obj->base.size / PAGE_SIZE;
3401 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height; 3441 unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
3402 unsigned int size_pages_uv; 3442 unsigned int size_pages_uv;
3403 struct sg_page_iter sg_iter; 3443 struct sgt_iter sgt_iter;
3444 dma_addr_t dma_addr;
3404 unsigned long i; 3445 unsigned long i;
3405 dma_addr_t *page_addr_list; 3446 dma_addr_t *page_addr_list;
3406 struct sg_table *st; 3447 struct sg_table *st;
@@ -3409,7 +3450,7 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3409 int ret = -ENOMEM; 3450 int ret = -ENOMEM;
3410 3451
3411 /* Allocate a temporary list of source pages for random access. */ 3452 /* Allocate a temporary list of source pages for random access. */
3412 page_addr_list = drm_malloc_gfp(obj->base.size / PAGE_SIZE, 3453 page_addr_list = drm_malloc_gfp(n_pages,
3413 sizeof(dma_addr_t), 3454 sizeof(dma_addr_t),
3414 GFP_TEMPORARY); 3455 GFP_TEMPORARY);
3415 if (!page_addr_list) 3456 if (!page_addr_list)
@@ -3432,11 +3473,10 @@ intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3432 3473
3433 /* Populate source page list from the object. */ 3474 /* Populate source page list from the object. */
3434 i = 0; 3475 i = 0;
3435 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 3476 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
3436 page_addr_list[i] = sg_page_iter_dma_address(&sg_iter); 3477 page_addr_list[i++] = dma_addr;
3437 i++;
3438 }
3439 3478
3479 GEM_BUG_ON(i != n_pages);
3440 st->nents = 0; 3480 st->nents = 0;
3441 sg = st->sgl; 3481 sg = st->sgl;
3442 3482
@@ -3634,3 +3674,29 @@ i915_ggtt_view_size(struct drm_i915_gem_object *obj,
3634 return obj->base.size; 3674 return obj->base.size;
3635 } 3675 }
3636} 3676}
3677
3678void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3679{
3680 void __iomem *ptr;
3681
3682 lockdep_assert_held(&vma->vm->dev->struct_mutex);
3683 if (WARN_ON(!vma->obj->map_and_fenceable))
3684 return ERR_PTR(-ENODEV);
3685
3686 GEM_BUG_ON(!vma->is_ggtt);
3687 GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
3688
3689 ptr = vma->iomap;
3690 if (ptr == NULL) {
3691 ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
3692 vma->node.start,
3693 vma->node.size);
3694 if (ptr == NULL)
3695 return ERR_PTR(-ENOMEM);
3696
3697 vma->iomap = ptr;
3698 }
3699
3700 vma->pin_count++;
3701 return ptr;
3702}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 0008543d55f6..aa5f31d1c2ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -34,6 +34,8 @@
34#ifndef __I915_GEM_GTT_H__ 34#ifndef __I915_GEM_GTT_H__
35#define __I915_GEM_GTT_H__ 35#define __I915_GEM_GTT_H__
36 36
37#include <linux/io-mapping.h>
38
37struct drm_i915_file_private; 39struct drm_i915_file_private;
38 40
39typedef uint32_t gen6_pte_t; 41typedef uint32_t gen6_pte_t;
@@ -175,6 +177,7 @@ struct i915_vma {
175 struct drm_mm_node node; 177 struct drm_mm_node node;
176 struct drm_i915_gem_object *obj; 178 struct drm_i915_gem_object *obj;
177 struct i915_address_space *vm; 179 struct i915_address_space *vm;
180 void __iomem *iomap;
178 181
179 /** Flags and address space this VMA is bound to */ 182 /** Flags and address space this VMA is bound to */
180#define GLOBAL_BIND (1<<0) 183#define GLOBAL_BIND (1<<0)
@@ -316,6 +319,11 @@ struct i915_address_space {
316 uint64_t start, 319 uint64_t start,
317 uint64_t length, 320 uint64_t length,
318 bool use_scratch); 321 bool use_scratch);
322 void (*insert_page)(struct i915_address_space *vm,
323 dma_addr_t addr,
324 uint64_t offset,
325 enum i915_cache_level cache_level,
326 u32 flags);
319 void (*insert_entries)(struct i915_address_space *vm, 327 void (*insert_entries)(struct i915_address_space *vm,
320 struct sg_table *st, 328 struct sg_table *st,
321 uint64_t start, 329 uint64_t start,
@@ -382,27 +390,27 @@ struct i915_hw_ppgtt {
382 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m); 390 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
383}; 391};
384 392
385/* For each pde iterates over every pde between from start until start + length. 393/*
386 * If start, and start+length are not perfectly divisible, the macro will round 394 * gen6_for_each_pde() iterates over every pde from start until start+length.
387 * down, and up as needed. The macro modifies pde, start, and length. Dev is 395 * If start and start+length are not perfectly divisible, the macro will round
388 * only used to differentiate shift values. Temp is temp. On gen6/7, start = 0, 396 * down and up as needed. Start=0 and length=2G effectively iterates over
389 * and length = 2G effectively iterates over every PDE in the system. 397 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
390 * 398 * so each of the other parameters should preferably be a simple variable, or
391 * XXX: temp is not actually needed, but it saves doing the ALIGN operation. 399 * at most an lvalue with no side-effects!
392 */ 400 */
393#define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ 401#define gen6_for_each_pde(pt, pd, start, length, iter) \
394 for (iter = gen6_pde_index(start); \ 402 for (iter = gen6_pde_index(start); \
395 length > 0 && iter < I915_PDES ? \ 403 length > 0 && iter < I915_PDES && \
396 (pt = (pd)->page_table[iter]), 1 : 0; \ 404 (pt = (pd)->page_table[iter], true); \
397 iter++, \ 405 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
398 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ 406 temp = min(temp - start, length); \
399 temp = min_t(unsigned, temp, length), \ 407 start += temp, length -= temp; }), ++iter)
400 start += temp, length -= temp) 408
401 409#define gen6_for_all_pdes(pt, pd, iter) \
402#define gen6_for_all_pdes(pt, ppgtt, iter) \ 410 for (iter = 0; \
403 for (iter = 0; \ 411 iter < I915_PDES && \
404 pt = ppgtt->pd.page_table[iter], iter < I915_PDES; \ 412 (pt = (pd)->page_table[iter], true); \
405 iter++) 413 ++iter)
406 414
407static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift) 415static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
408{ 416{
@@ -518,9 +526,7 @@ int i915_ggtt_enable_hw(struct drm_device *dev);
518void i915_gem_init_ggtt(struct drm_device *dev); 526void i915_gem_init_ggtt(struct drm_device *dev);
519void i915_ggtt_cleanup_hw(struct drm_device *dev); 527void i915_ggtt_cleanup_hw(struct drm_device *dev);
520 528
521int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
522int i915_ppgtt_init_hw(struct drm_device *dev); 529int i915_ppgtt_init_hw(struct drm_device *dev);
523int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
524void i915_ppgtt_release(struct kref *kref); 530void i915_ppgtt_release(struct kref *kref);
525struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev, 531struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
526 struct drm_i915_file_private *fpriv); 532 struct drm_i915_file_private *fpriv);
@@ -535,7 +541,7 @@ static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
535 kref_put(&ppgtt->ref, i915_ppgtt_release); 541 kref_put(&ppgtt->ref, i915_ppgtt_release);
536} 542}
537 543
538void i915_check_and_clear_faults(struct drm_device *dev); 544void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
539void i915_gem_suspend_gtt_mappings(struct drm_device *dev); 545void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
540void i915_gem_restore_gtt_mappings(struct drm_device *dev); 546void i915_gem_restore_gtt_mappings(struct drm_device *dev);
541 547
@@ -560,4 +566,36 @@ size_t
560i915_ggtt_view_size(struct drm_i915_gem_object *obj, 566i915_ggtt_view_size(struct drm_i915_gem_object *obj,
561 const struct i915_ggtt_view *view); 567 const struct i915_ggtt_view *view);
562 568
569/**
570 * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
571 * @vma: VMA to iomap
572 *
573 * The passed in VMA has to be pinned in the global GTT mappable region.
574 * An extra pinning of the VMA is acquired for the return iomapping,
575 * the caller must call i915_vma_unpin_iomap to relinquish the pinning
576 * after the iomapping is no longer required.
577 *
578 * Callers must hold the struct_mutex.
579 *
580 * Returns a valid iomapped pointer or ERR_PTR.
581 */
582void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
583
584/**
585 * i915_vma_unpin_iomap - unpins the mapping returned from i915_vma_iomap
586 * @vma: VMA to unpin
587 *
588 * Unpins the previously iomapped VMA from i915_vma_pin_iomap().
589 *
590 * Callers must hold the struct_mutex. This function is only valid to be
591 * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap().
592 */
593static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
594{
595 lockdep_assert_held(&vma->vm->dev->struct_mutex);
596 GEM_BUG_ON(vma->pin_count == 0);
597 GEM_BUG_ON(vma->iomap == NULL);
598 vma->pin_count--;
599}
600
563#endif 601#endif
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 71611bf21fca..f75bbd67a13a 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -29,7 +29,7 @@
29#include "intel_renderstate.h" 29#include "intel_renderstate.h"
30 30
31static const struct intel_renderstate_rodata * 31static const struct intel_renderstate_rodata *
32render_state_get_rodata(struct drm_device *dev, const int gen) 32render_state_get_rodata(const int gen)
33{ 33{
34 switch (gen) { 34 switch (gen) {
35 case 6: 35 case 6:
@@ -45,21 +45,22 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
45 return NULL; 45 return NULL;
46} 46}
47 47
48static int render_state_init(struct render_state *so, struct drm_device *dev) 48static int render_state_init(struct render_state *so,
49 struct drm_i915_private *dev_priv)
49{ 50{
50 int ret; 51 int ret;
51 52
52 so->gen = INTEL_INFO(dev)->gen; 53 so->gen = INTEL_GEN(dev_priv);
53 so->rodata = render_state_get_rodata(dev, so->gen); 54 so->rodata = render_state_get_rodata(so->gen);
54 if (so->rodata == NULL) 55 if (so->rodata == NULL)
55 return 0; 56 return 0;
56 57
57 if (so->rodata->batch_items * 4 > 4096) 58 if (so->rodata->batch_items * 4 > 4096)
58 return -EINVAL; 59 return -EINVAL;
59 60
60 so->obj = i915_gem_alloc_object(dev, 4096); 61 so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
61 if (so->obj == NULL) 62 if (IS_ERR(so->obj))
62 return -ENOMEM; 63 return PTR_ERR(so->obj);
63 64
64 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0); 65 ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
65 if (ret) 66 if (ret)
@@ -93,6 +94,7 @@ free_gem:
93 94
94static int render_state_setup(struct render_state *so) 95static int render_state_setup(struct render_state *so)
95{ 96{
97 struct drm_device *dev = so->obj->base.dev;
96 const struct intel_renderstate_rodata *rodata = so->rodata; 98 const struct intel_renderstate_rodata *rodata = so->rodata;
97 unsigned int i = 0, reloc_index = 0; 99 unsigned int i = 0, reloc_index = 0;
98 struct page *page; 100 struct page *page;
@@ -134,6 +136,33 @@ static int render_state_setup(struct render_state *so)
134 136
135 so->aux_batch_offset = i * sizeof(u32); 137 so->aux_batch_offset = i * sizeof(u32);
136 138
139 if (HAS_POOLED_EU(dev)) {
140 /*
141 * We always program 3x6 pool config but depending upon which
142 * subslice is disabled HW drops down to appropriate config
143 * shown below.
144 *
145 * In the below table 2x6 config always refers to
146 * fused-down version, native 2x6 is not available and can
147 * be ignored
148 *
149 * SNo subslices config eu pool configuration
150 * -----------------------------------------------------------
151 * 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
152 * 2 ss0 disabled (2x6) - 0x00777000 (3+9)
153 * 3 ss1 disabled (2x6) - 0x00770000 (6+6)
154 * 4 ss2 disabled (2x6) - 0x00007000 (9+3)
155 */
156 u32 eu_pool_config = 0x00777000;
157
158 OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
159 OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
160 OUT_BATCH(d, i, eu_pool_config);
161 OUT_BATCH(d, i, 0);
162 OUT_BATCH(d, i, 0);
163 OUT_BATCH(d, i, 0);
164 }
165
137 OUT_BATCH(d, i, MI_BATCH_BUFFER_END); 166 OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
138 so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset; 167 so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
139 168
@@ -177,7 +206,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
177 if (WARN_ON(engine->id != RCS)) 206 if (WARN_ON(engine->id != RCS))
178 return -ENOENT; 207 return -ENOENT;
179 208
180 ret = render_state_init(so, engine->dev); 209 ret = render_state_init(so, engine->i915);
181 if (ret) 210 if (ret)
182 return ret; 211 return ret;
183 212
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 66571466e9a8..6f10b421487b 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -131,7 +131,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
131 unsigned long count = 0; 131 unsigned long count = 0;
132 132
133 trace_i915_gem_shrink(dev_priv, target, flags); 133 trace_i915_gem_shrink(dev_priv, target, flags);
134 i915_gem_retire_requests(dev_priv->dev); 134 i915_gem_retire_requests(dev_priv);
135
136 /*
137 * Unbinding of objects will require HW access; Let us not wake the
138 * device just to recover a little memory. If absolutely necessary,
139 * we will force the wake during oom-notifier.
140 */
141 if ((flags & I915_SHRINK_BOUND) &&
142 !intel_runtime_pm_get_if_in_use(dev_priv))
143 flags &= ~I915_SHRINK_BOUND;
135 144
136 /* 145 /*
137 * As we may completely rewrite the (un)bound list whilst unbinding 146 * As we may completely rewrite the (un)bound list whilst unbinding
@@ -197,7 +206,10 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
197 list_splice(&still_in_list, phase->list); 206 list_splice(&still_in_list, phase->list);
198 } 207 }
199 208
200 i915_gem_retire_requests(dev_priv->dev); 209 if (flags & I915_SHRINK_BOUND)
210 intel_runtime_pm_put(dev_priv);
211
212 i915_gem_retire_requests(dev_priv);
201 213
202 return count; 214 return count;
203} 215}
@@ -245,7 +257,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
245{ 257{
246 struct drm_i915_private *dev_priv = 258 struct drm_i915_private *dev_priv =
247 container_of(shrinker, struct drm_i915_private, mm.shrinker); 259 container_of(shrinker, struct drm_i915_private, mm.shrinker);
248 struct drm_device *dev = dev_priv->dev; 260 struct drm_device *dev = &dev_priv->drm;
249 struct drm_i915_gem_object *obj; 261 struct drm_i915_gem_object *obj;
250 unsigned long count; 262 unsigned long count;
251 bool unlock; 263 bool unlock;
@@ -253,6 +265,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
253 if (!i915_gem_shrinker_lock(dev, &unlock)) 265 if (!i915_gem_shrinker_lock(dev, &unlock))
254 return 0; 266 return 0;
255 267
268 i915_gem_retire_requests(dev_priv);
269
256 count = 0; 270 count = 0;
257 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 271 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
258 if (can_release_pages(obj)) 272 if (can_release_pages(obj))
@@ -274,7 +288,7 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
274{ 288{
275 struct drm_i915_private *dev_priv = 289 struct drm_i915_private *dev_priv =
276 container_of(shrinker, struct drm_i915_private, mm.shrinker); 290 container_of(shrinker, struct drm_i915_private, mm.shrinker);
277 struct drm_device *dev = dev_priv->dev; 291 struct drm_device *dev = &dev_priv->drm;
278 unsigned long freed; 292 unsigned long freed;
279 bool unlock; 293 bool unlock;
280 294
@@ -309,7 +323,7 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv,
309{ 323{
310 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; 324 unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1;
311 325
312 while (!i915_gem_shrinker_lock(dev_priv->dev, &slu->unlock)) { 326 while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) {
313 schedule_timeout_killable(1); 327 schedule_timeout_killable(1);
314 if (fatal_signal_pending(current)) 328 if (fatal_signal_pending(current))
315 return false; 329 return false;
@@ -330,7 +344,7 @@ i915_gem_shrinker_unlock_uninterruptible(struct drm_i915_private *dev_priv,
330{ 344{
331 dev_priv->mm.interruptible = slu->was_interruptible; 345 dev_priv->mm.interruptible = slu->was_interruptible;
332 if (slu->unlock) 346 if (slu->unlock)
333 mutex_unlock(&dev_priv->dev->struct_mutex); 347 mutex_unlock(&dev_priv->drm.struct_mutex);
334} 348}
335 349
336static int 350static int
@@ -345,7 +359,9 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
345 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 359 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
346 return NOTIFY_DONE; 360 return NOTIFY_DONE;
347 361
362 intel_runtime_pm_get(dev_priv);
348 freed_pages = i915_gem_shrink_all(dev_priv); 363 freed_pages = i915_gem_shrink_all(dev_priv);
364 intel_runtime_pm_put(dev_priv);
349 365
350 /* Because we may be allocating inside our own driver, we cannot 366 /* Because we may be allocating inside our own driver, we cannot
351 * assert that there are no objects with pinned pages that are not 367 * assert that there are no objects with pinned pages that are not
@@ -386,17 +402,35 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
386 struct drm_i915_private *dev_priv = 402 struct drm_i915_private *dev_priv =
387 container_of(nb, struct drm_i915_private, mm.vmap_notifier); 403 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
388 struct shrinker_lock_uninterruptible slu; 404 struct shrinker_lock_uninterruptible slu;
389 unsigned long freed_pages; 405 struct i915_vma *vma, *next;
406 unsigned long freed_pages = 0;
407 int ret;
390 408
391 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000)) 409 if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
392 return NOTIFY_DONE; 410 return NOTIFY_DONE;
393 411
394 freed_pages = i915_gem_shrink(dev_priv, -1UL, 412 /* Force everything onto the inactive lists */
395 I915_SHRINK_BOUND | 413 ret = i915_gem_wait_for_idle(dev_priv);
396 I915_SHRINK_UNBOUND | 414 if (ret)
397 I915_SHRINK_ACTIVE | 415 goto out;
398 I915_SHRINK_VMAPS); 416
417 intel_runtime_pm_get(dev_priv);
418 freed_pages += i915_gem_shrink(dev_priv, -1UL,
419 I915_SHRINK_BOUND |
420 I915_SHRINK_UNBOUND |
421 I915_SHRINK_ACTIVE |
422 I915_SHRINK_VMAPS);
423 intel_runtime_pm_put(dev_priv);
424
425 /* We also want to clear any cached iomaps as they wrap vmap */
426 list_for_each_entry_safe(vma, next,
427 &dev_priv->ggtt.base.inactive_list, vm_link) {
428 unsigned long count = vma->node.size >> PAGE_SHIFT;
429 if (vma->iomap && i915_vma_unbind(vma) == 0)
430 freed_pages += count;
431 }
399 432
433out:
400 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu); 434 i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
401 435
402 *(unsigned long *)ptr += freed_pages; 436 *(unsigned long *)ptr += freed_pages;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 44004e3f09e4..66be299a1486 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -111,9 +111,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
111 if (INTEL_INFO(dev)->gen >= 3) { 111 if (INTEL_INFO(dev)->gen >= 3) {
112 u32 bsm; 112 u32 bsm;
113 113
114 pci_read_config_dword(dev->pdev, BSM, &bsm); 114 pci_read_config_dword(dev->pdev, INTEL_BSM, &bsm);
115 115
116 base = bsm & BSM_MASK; 116 base = bsm & INTEL_BSM_MASK;
117 } else if (IS_I865G(dev)) { 117 } else if (IS_I865G(dev)) {
118 u16 toud = 0; 118 u16 toud = 0;
119 119
@@ -270,7 +270,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
270 270
271void i915_gem_cleanup_stolen(struct drm_device *dev) 271void i915_gem_cleanup_stolen(struct drm_device *dev)
272{ 272{
273 struct drm_i915_private *dev_priv = dev->dev_private; 273 struct drm_i915_private *dev_priv = to_i915(dev);
274 274
275 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 275 if (!drm_mm_initialized(&dev_priv->mm.stolen))
276 return; 276 return;
@@ -550,7 +550,7 @@ static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
550static void 550static void
551i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) 551i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
552{ 552{
553 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 553 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
554 554
555 if (obj->stolen) { 555 if (obj->stolen) {
556 i915_gem_stolen_remove_node(dev_priv, obj->stolen); 556 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
@@ -601,7 +601,7 @@ cleanup:
601struct drm_i915_gem_object * 601struct drm_i915_gem_object *
602i915_gem_object_create_stolen(struct drm_device *dev, u32 size) 602i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
603{ 603{
604 struct drm_i915_private *dev_priv = dev->dev_private; 604 struct drm_i915_private *dev_priv = to_i915(dev);
605 struct drm_i915_gem_object *obj; 605 struct drm_i915_gem_object *obj;
606 struct drm_mm_node *stolen; 606 struct drm_mm_node *stolen;
607 int ret; 607 int ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b9bdb34032cd..8030199731db 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -125,7 +125,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
125 if (INTEL_INFO(obj->base.dev)->gen >= 4) 125 if (INTEL_INFO(obj->base.dev)->gen >= 4)
126 return true; 126 return true;
127 127
128 if (INTEL_INFO(obj->base.dev)->gen == 3) { 128 if (IS_GEN3(obj->base.dev)) {
129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK) 129 if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
130 return false; 130 return false;
131 } else { 131 } else {
@@ -162,7 +162,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
162 struct drm_file *file) 162 struct drm_file *file)
163{ 163{
164 struct drm_i915_gem_set_tiling *args = data; 164 struct drm_i915_gem_set_tiling *args = data;
165 struct drm_i915_private *dev_priv = dev->dev_private; 165 struct drm_i915_private *dev_priv = to_i915(dev);
166 struct drm_i915_gem_object *obj; 166 struct drm_i915_gem_object *obj;
167 int ret = 0; 167 int ret = 0;
168 168
@@ -229,7 +229,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
229 */ 229 */
230 if (obj->map_and_fenceable && 230 if (obj->map_and_fenceable &&
231 !i915_gem_object_fence_ok(obj, args->tiling_mode)) 231 !i915_gem_object_fence_ok(obj, args->tiling_mode))
232 ret = i915_gem_object_ggtt_unbind(obj); 232 ret = i915_vma_unbind(i915_gem_obj_to_ggtt(obj));
233 233
234 if (ret == 0) { 234 if (ret == 0) {
235 if (obj->pages && 235 if (obj->pages &&
@@ -294,7 +294,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
294 struct drm_file *file) 294 struct drm_file *file)
295{ 295{
296 struct drm_i915_gem_get_tiling *args = data; 296 struct drm_i915_gem_get_tiling *args = data;
297 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = to_i915(dev);
298 struct drm_i915_gem_object *obj; 298 struct drm_i915_gem_object *obj;
299 299
300 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle)); 300 obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32d9726e38b1..2314c88323e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -706,7 +706,8 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
706static void 706static void
707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj) 707i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
708{ 708{
709 struct sg_page_iter sg_iter; 709 struct sgt_iter sgt_iter;
710 struct page *page;
710 711
711 BUG_ON(obj->userptr.work != NULL); 712 BUG_ON(obj->userptr.work != NULL);
712 __i915_gem_userptr_set_active(obj, false); 713 __i915_gem_userptr_set_active(obj, false);
@@ -716,9 +717,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj)
716 717
717 i915_gem_gtt_finish_object(obj); 718 i915_gem_gtt_finish_object(obj);
718 719
719 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) { 720 for_each_sgt_page(page, sgt_iter, obj->pages) {
720 struct page *page = sg_page_iter_page(&sg_iter);
721
722 if (obj->dirty) 721 if (obj->dirty)
723 set_page_dirty(page); 722 set_page_dirty(page);
724 723
@@ -855,11 +854,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file
855 return 0; 854 return 0;
856} 855}
857 856
858int 857void i915_gem_init_userptr(struct drm_i915_private *dev_priv)
859i915_gem_init_userptr(struct drm_device *dev)
860{ 858{
861 struct drm_i915_private *dev_priv = to_i915(dev);
862 mutex_init(&dev_priv->mm_lock); 859 mutex_init(&dev_priv->mm_lock);
863 hash_init(dev_priv->mm_structs); 860 hash_init(dev_priv->mm_structs);
864 return 0;
865} 861}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 89725c9efc25..9d73d2216adc 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -332,7 +332,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
332 const struct i915_error_state_file_priv *error_priv) 332 const struct i915_error_state_file_priv *error_priv)
333{ 333{
334 struct drm_device *dev = error_priv->dev; 334 struct drm_device *dev = error_priv->dev;
335 struct drm_i915_private *dev_priv = dev->dev_private; 335 struct drm_i915_private *dev_priv = to_i915(dev);
336 struct drm_i915_error_state *error = error_priv->error; 336 struct drm_i915_error_state *error = error_priv->error;
337 struct drm_i915_error_object *obj; 337 struct drm_i915_error_object *obj;
338 int i, j, offset, elt; 338 int i, j, offset, elt;
@@ -411,7 +411,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg); 411 err_printf(m, "DONE_REG: 0x%08x\n", error->done_reg);
412 } 412 }
413 413
414 if (INTEL_INFO(dev)->gen == 7) 414 if (IS_GEN7(dev))
415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 415 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
416 416
417 for (i = 0; i < ARRAY_SIZE(error->ring); i++) 417 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
@@ -463,6 +463,18 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
463 } 463 }
464 } 464 }
465 465
466 if (error->ring[i].num_waiters) {
467 err_printf(m, "%s --- %d waiters\n",
468 dev_priv->engine[i].name,
469 error->ring[i].num_waiters);
470 for (j = 0; j < error->ring[i].num_waiters; j++) {
471 err_printf(m, " seqno 0x%08x for %s [%d]\n",
472 error->ring[i].waiters[j].seqno,
473 error->ring[i].waiters[j].comm,
474 error->ring[i].waiters[j].pid);
475 }
476 }
477
466 if ((obj = error->ring[i].ringbuffer)) { 478 if ((obj = error->ring[i].ringbuffer)) {
467 err_printf(m, "%s --- ringbuffer = 0x%08x\n", 479 err_printf(m, "%s --- ringbuffer = 0x%08x\n",
468 dev_priv->engine[i].name, 480 dev_priv->engine[i].name,
@@ -488,7 +500,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
488 hws_page[elt+1], 500 hws_page[elt+1],
489 hws_page[elt+2], 501 hws_page[elt+2],
490 hws_page[elt+3]); 502 hws_page[elt+3]);
491 offset += 16; 503 offset += 16;
492 } 504 }
493 } 505 }
494 506
@@ -605,8 +617,9 @@ static void i915_error_state_free(struct kref *error_ref)
605 i915_error_object_free(error->ring[i].ringbuffer); 617 i915_error_object_free(error->ring[i].ringbuffer);
606 i915_error_object_free(error->ring[i].hws_page); 618 i915_error_object_free(error->ring[i].hws_page);
607 i915_error_object_free(error->ring[i].ctx); 619 i915_error_object_free(error->ring[i].ctx);
608 kfree(error->ring[i].requests);
609 i915_error_object_free(error->ring[i].wa_ctx); 620 i915_error_object_free(error->ring[i].wa_ctx);
621 kfree(error->ring[i].requests);
622 kfree(error->ring[i].waiters);
610 } 623 }
611 624
612 i915_error_object_free(error->semaphore_obj); 625 i915_error_object_free(error->semaphore_obj);
@@ -824,19 +837,18 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
824 return error_code; 837 return error_code;
825} 838}
826 839
827static void i915_gem_record_fences(struct drm_device *dev, 840static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
828 struct drm_i915_error_state *error) 841 struct drm_i915_error_state *error)
829{ 842{
830 struct drm_i915_private *dev_priv = dev->dev_private;
831 int i; 843 int i;
832 844
833 if (IS_GEN3(dev) || IS_GEN2(dev)) { 845 if (IS_GEN3(dev_priv) || IS_GEN2(dev_priv)) {
834 for (i = 0; i < dev_priv->num_fence_regs; i++) 846 for (i = 0; i < dev_priv->num_fence_regs; i++)
835 error->fence[i] = I915_READ(FENCE_REG(i)); 847 error->fence[i] = I915_READ(FENCE_REG(i));
836 } else if (IS_GEN5(dev) || IS_GEN4(dev)) { 848 } else if (IS_GEN5(dev_priv) || IS_GEN4(dev_priv)) {
837 for (i = 0; i < dev_priv->num_fence_regs; i++) 849 for (i = 0; i < dev_priv->num_fence_regs; i++)
838 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); 850 error->fence[i] = I915_READ64(FENCE_REG_965_LO(i));
839 } else if (INTEL_INFO(dev)->gen >= 6) { 851 } else if (INTEL_GEN(dev_priv) >= 6) {
840 for (i = 0; i < dev_priv->num_fence_regs; i++) 852 for (i = 0; i < dev_priv->num_fence_regs; i++)
841 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); 853 error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i));
842 } 854 }
@@ -851,7 +863,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
851 struct intel_engine_cs *to; 863 struct intel_engine_cs *to;
852 enum intel_engine_id id; 864 enum intel_engine_id id;
853 865
854 if (!i915_semaphore_is_enabled(dev_priv->dev)) 866 if (!i915_semaphore_is_enabled(dev_priv))
855 return; 867 return;
856 868
857 if (!error->semaphore_obj) 869 if (!error->semaphore_obj)
@@ -893,31 +905,71 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
893 } 905 }
894} 906}
895 907
896static void i915_record_ring_state(struct drm_device *dev, 908static void engine_record_waiters(struct intel_engine_cs *engine,
909 struct drm_i915_error_ring *ering)
910{
911 struct intel_breadcrumbs *b = &engine->breadcrumbs;
912 struct drm_i915_error_waiter *waiter;
913 struct rb_node *rb;
914 int count;
915
916 ering->num_waiters = 0;
917 ering->waiters = NULL;
918
919 spin_lock(&b->lock);
920 count = 0;
921 for (rb = rb_first(&b->waiters); rb != NULL; rb = rb_next(rb))
922 count++;
923 spin_unlock(&b->lock);
924
925 waiter = NULL;
926 if (count)
927 waiter = kmalloc_array(count,
928 sizeof(struct drm_i915_error_waiter),
929 GFP_ATOMIC);
930 if (!waiter)
931 return;
932
933 ering->waiters = waiter;
934
935 spin_lock(&b->lock);
936 for (rb = rb_first(&b->waiters); rb; rb = rb_next(rb)) {
937 struct intel_wait *w = container_of(rb, typeof(*w), node);
938
939 strcpy(waiter->comm, w->tsk->comm);
940 waiter->pid = w->tsk->pid;
941 waiter->seqno = w->seqno;
942 waiter++;
943
944 if (++ering->num_waiters == count)
945 break;
946 }
947 spin_unlock(&b->lock);
948}
949
950static void i915_record_ring_state(struct drm_i915_private *dev_priv,
897 struct drm_i915_error_state *error, 951 struct drm_i915_error_state *error,
898 struct intel_engine_cs *engine, 952 struct intel_engine_cs *engine,
899 struct drm_i915_error_ring *ering) 953 struct drm_i915_error_ring *ering)
900{ 954{
901 struct drm_i915_private *dev_priv = dev->dev_private; 955 if (INTEL_GEN(dev_priv) >= 6) {
902
903 if (INTEL_INFO(dev)->gen >= 6) {
904 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base)); 956 ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
905 ering->fault_reg = I915_READ(RING_FAULT_REG(engine)); 957 ering->fault_reg = I915_READ(RING_FAULT_REG(engine));
906 if (INTEL_INFO(dev)->gen >= 8) 958 if (INTEL_GEN(dev_priv) >= 8)
907 gen8_record_semaphore_state(dev_priv, error, engine, 959 gen8_record_semaphore_state(dev_priv, error, engine,
908 ering); 960 ering);
909 else 961 else
910 gen6_record_semaphore_state(dev_priv, engine, ering); 962 gen6_record_semaphore_state(dev_priv, engine, ering);
911 } 963 }
912 964
913 if (INTEL_INFO(dev)->gen >= 4) { 965 if (INTEL_GEN(dev_priv) >= 4) {
914 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base)); 966 ering->faddr = I915_READ(RING_DMA_FADD(engine->mmio_base));
915 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base)); 967 ering->ipeir = I915_READ(RING_IPEIR(engine->mmio_base));
916 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 968 ering->ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
917 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base)); 969 ering->instdone = I915_READ(RING_INSTDONE(engine->mmio_base));
918 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base)); 970 ering->instps = I915_READ(RING_INSTPS(engine->mmio_base));
919 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base)); 971 ering->bbaddr = I915_READ(RING_BBADDR(engine->mmio_base));
920 if (INTEL_INFO(dev)->gen >= 8) { 972 if (INTEL_GEN(dev_priv) >= 8) {
921 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32; 973 ering->faddr |= (u64) I915_READ(RING_DMA_FADD_UDW(engine->mmio_base)) << 32;
922 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32; 974 ering->bbaddr |= (u64) I915_READ(RING_BBADDR_UDW(engine->mmio_base)) << 32;
923 } 975 }
@@ -929,20 +981,20 @@ static void i915_record_ring_state(struct drm_device *dev,
929 ering->instdone = I915_READ(GEN2_INSTDONE); 981 ering->instdone = I915_READ(GEN2_INSTDONE);
930 } 982 }
931 983
932 ering->waiting = waitqueue_active(&engine->irq_queue); 984 ering->waiting = intel_engine_has_waiter(engine);
933 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base)); 985 ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
934 ering->acthd = intel_ring_get_active_head(engine); 986 ering->acthd = intel_ring_get_active_head(engine);
935 ering->seqno = engine->get_seqno(engine); 987 ering->seqno = intel_engine_get_seqno(engine);
936 ering->last_seqno = engine->last_submitted_seqno; 988 ering->last_seqno = engine->last_submitted_seqno;
937 ering->start = I915_READ_START(engine); 989 ering->start = I915_READ_START(engine);
938 ering->head = I915_READ_HEAD(engine); 990 ering->head = I915_READ_HEAD(engine);
939 ering->tail = I915_READ_TAIL(engine); 991 ering->tail = I915_READ_TAIL(engine);
940 ering->ctl = I915_READ_CTL(engine); 992 ering->ctl = I915_READ_CTL(engine);
941 993
942 if (I915_NEED_GFX_HWS(dev)) { 994 if (I915_NEED_GFX_HWS(dev_priv)) {
943 i915_reg_t mmio; 995 i915_reg_t mmio;
944 996
945 if (IS_GEN7(dev)) { 997 if (IS_GEN7(dev_priv)) {
946 switch (engine->id) { 998 switch (engine->id) {
947 default: 999 default:
948 case RCS: 1000 case RCS:
@@ -958,7 +1010,7 @@ static void i915_record_ring_state(struct drm_device *dev,
958 mmio = VEBOX_HWS_PGA_GEN7; 1010 mmio = VEBOX_HWS_PGA_GEN7;
959 break; 1011 break;
960 } 1012 }
961 } else if (IS_GEN6(engine->dev)) { 1013 } else if (IS_GEN6(engine->i915)) {
962 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 1014 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
963 } else { 1015 } else {
964 /* XXX: gen8 returns to sanity */ 1016 /* XXX: gen8 returns to sanity */
@@ -971,18 +1023,18 @@ static void i915_record_ring_state(struct drm_device *dev,
971 ering->hangcheck_score = engine->hangcheck.score; 1023 ering->hangcheck_score = engine->hangcheck.score;
972 ering->hangcheck_action = engine->hangcheck.action; 1024 ering->hangcheck_action = engine->hangcheck.action;
973 1025
974 if (USES_PPGTT(dev)) { 1026 if (USES_PPGTT(dev_priv)) {
975 int i; 1027 int i;
976 1028
977 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine)); 1029 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(engine));
978 1030
979 if (IS_GEN6(dev)) 1031 if (IS_GEN6(dev_priv))
980 ering->vm_info.pp_dir_base = 1032 ering->vm_info.pp_dir_base =
981 I915_READ(RING_PP_DIR_BASE_READ(engine)); 1033 I915_READ(RING_PP_DIR_BASE_READ(engine));
982 else if (IS_GEN7(dev)) 1034 else if (IS_GEN7(dev_priv))
983 ering->vm_info.pp_dir_base = 1035 ering->vm_info.pp_dir_base =
984 I915_READ(RING_PP_DIR_BASE(engine)); 1036 I915_READ(RING_PP_DIR_BASE(engine));
985 else if (INTEL_INFO(dev)->gen >= 8) 1037 else if (INTEL_GEN(dev_priv) >= 8)
986 for (i = 0; i < 4; i++) { 1038 for (i = 0; i < 4; i++) {
987 ering->vm_info.pdp[i] = 1039 ering->vm_info.pdp[i] =
988 I915_READ(GEN8_RING_PDP_UDW(engine, i)); 1040 I915_READ(GEN8_RING_PDP_UDW(engine, i));
@@ -998,7 +1050,7 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
998 struct drm_i915_error_state *error, 1050 struct drm_i915_error_state *error,
999 struct drm_i915_error_ring *ering) 1051 struct drm_i915_error_ring *ering)
1000{ 1052{
1001 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1053 struct drm_i915_private *dev_priv = engine->i915;
1002 struct drm_i915_gem_object *obj; 1054 struct drm_i915_gem_object *obj;
1003 1055
1004 /* Currently render ring is the only HW context user */ 1056 /* Currently render ring is the only HW context user */
@@ -1016,34 +1068,33 @@ static void i915_gem_record_active_context(struct intel_engine_cs *engine,
1016 } 1068 }
1017} 1069}
1018 1070
1019static void i915_gem_record_rings(struct drm_device *dev, 1071static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
1020 struct drm_i915_error_state *error) 1072 struct drm_i915_error_state *error)
1021{ 1073{
1022 struct drm_i915_private *dev_priv = to_i915(dev);
1023 struct i915_ggtt *ggtt = &dev_priv->ggtt; 1074 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1024 struct drm_i915_gem_request *request; 1075 struct drm_i915_gem_request *request;
1025 int i, count; 1076 int i, count;
1026 1077
1027 for (i = 0; i < I915_NUM_ENGINES; i++) { 1078 for (i = 0; i < I915_NUM_ENGINES; i++) {
1028 struct intel_engine_cs *engine = &dev_priv->engine[i]; 1079 struct intel_engine_cs *engine = &dev_priv->engine[i];
1029 struct intel_ringbuffer *rbuf;
1030 1080
1031 error->ring[i].pid = -1; 1081 error->ring[i].pid = -1;
1032 1082
1033 if (engine->dev == NULL) 1083 if (!intel_engine_initialized(engine))
1034 continue; 1084 continue;
1035 1085
1036 error->ring[i].valid = true; 1086 error->ring[i].valid = true;
1037 1087
1038 i915_record_ring_state(dev, error, engine, &error->ring[i]); 1088 i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
1089 engine_record_waiters(engine, &error->ring[i]);
1039 1090
1040 request = i915_gem_find_active_request(engine); 1091 request = i915_gem_find_active_request(engine);
1041 if (request) { 1092 if (request) {
1042 struct i915_address_space *vm; 1093 struct i915_address_space *vm;
1094 struct intel_ringbuffer *rb;
1043 1095
1044 vm = request->ctx && request->ctx->ppgtt ? 1096 vm = request->ctx->ppgtt ?
1045 &request->ctx->ppgtt->base : 1097 &request->ctx->ppgtt->base : &ggtt->base;
1046 &ggtt->base;
1047 1098
1048 /* We need to copy these to an anonymous buffer 1099 /* We need to copy these to an anonymous buffer
1049 * as the simplest method to avoid being overwritten 1100 * as the simplest method to avoid being overwritten
@@ -1070,26 +1121,17 @@ static void i915_gem_record_rings(struct drm_device *dev,
1070 } 1121 }
1071 rcu_read_unlock(); 1122 rcu_read_unlock();
1072 } 1123 }
1073 }
1074 1124
1075 if (i915.enable_execlists) { 1125 error->simulated |=
1076 /* TODO: This is only a small fix to keep basic error 1126 request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
1077 * capture working, but we need to add more information
1078 * for it to be useful (e.g. dump the context being
1079 * executed).
1080 */
1081 if (request)
1082 rbuf = request->ctx->engine[engine->id].ringbuf;
1083 else
1084 rbuf = dev_priv->kernel_context->engine[engine->id].ringbuf;
1085 } else
1086 rbuf = engine->buffer;
1087
1088 error->ring[i].cpu_ring_head = rbuf->head;
1089 error->ring[i].cpu_ring_tail = rbuf->tail;
1090 1127
1091 error->ring[i].ringbuffer = 1128 rb = request->ringbuf;
1092 i915_error_ggtt_object_create(dev_priv, rbuf->obj); 1129 error->ring[i].cpu_ring_head = rb->head;
1130 error->ring[i].cpu_ring_tail = rb->tail;
1131 error->ring[i].ringbuffer =
1132 i915_error_ggtt_object_create(dev_priv,
1133 rb->obj);
1134 }
1093 1135
1094 error->ring[i].hws_page = 1136 error->ring[i].hws_page =
1095 i915_error_ggtt_object_create(dev_priv, 1137 i915_error_ggtt_object_create(dev_priv,
@@ -1234,7 +1276,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
1234static void i915_capture_reg_state(struct drm_i915_private *dev_priv, 1276static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1235 struct drm_i915_error_state *error) 1277 struct drm_i915_error_state *error)
1236{ 1278{
1237 struct drm_device *dev = dev_priv->dev; 1279 struct drm_device *dev = &dev_priv->drm;
1238 int i; 1280 int i;
1239 1281
1240 /* General organization 1282 /* General organization
@@ -1301,15 +1343,14 @@ static void i915_capture_reg_state(struct drm_i915_private *dev_priv,
1301 error->eir = I915_READ(EIR); 1343 error->eir = I915_READ(EIR);
1302 error->pgtbl_er = I915_READ(PGTBL_ER); 1344 error->pgtbl_er = I915_READ(PGTBL_ER);
1303 1345
1304 i915_get_extra_instdone(dev, error->extra_instdone); 1346 i915_get_extra_instdone(dev_priv, error->extra_instdone);
1305} 1347}
1306 1348
1307static void i915_error_capture_msg(struct drm_device *dev, 1349static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
1308 struct drm_i915_error_state *error, 1350 struct drm_i915_error_state *error,
1309 u32 engine_mask, 1351 u32 engine_mask,
1310 const char *error_msg) 1352 const char *error_msg)
1311{ 1353{
1312 struct drm_i915_private *dev_priv = dev->dev_private;
1313 u32 ecode; 1354 u32 ecode;
1314 int ring_id = -1, len; 1355 int ring_id = -1, len;
1315 1356
@@ -1317,7 +1358,7 @@ static void i915_error_capture_msg(struct drm_device *dev,
1317 1358
1318 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1359 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1319 "GPU HANG: ecode %d:%d:0x%08x", 1360 "GPU HANG: ecode %d:%d:0x%08x",
1320 INTEL_INFO(dev)->gen, ring_id, ecode); 1361 INTEL_GEN(dev_priv), ring_id, ecode);
1321 1362
1322 if (ring_id != -1 && error->ring[ring_id].pid != -1) 1363 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1323 len += scnprintf(error->error_msg + len, 1364 len += scnprintf(error->error_msg + len,
@@ -1352,14 +1393,17 @@ static void i915_capture_gen_state(struct drm_i915_private *dev_priv,
1352 * out a structure which becomes available in debugfs for user level tools 1393 * out a structure which becomes available in debugfs for user level tools
1353 * to pick up. 1394 * to pick up.
1354 */ 1395 */
1355void i915_capture_error_state(struct drm_device *dev, u32 engine_mask, 1396void i915_capture_error_state(struct drm_i915_private *dev_priv,
1397 u32 engine_mask,
1356 const char *error_msg) 1398 const char *error_msg)
1357{ 1399{
1358 static bool warned; 1400 static bool warned;
1359 struct drm_i915_private *dev_priv = dev->dev_private;
1360 struct drm_i915_error_state *error; 1401 struct drm_i915_error_state *error;
1361 unsigned long flags; 1402 unsigned long flags;
1362 1403
1404 if (READ_ONCE(dev_priv->gpu_error.first_error))
1405 return;
1406
1363 /* Account for pipe specific data like PIPE*STAT */ 1407 /* Account for pipe specific data like PIPE*STAT */
1364 error = kzalloc(sizeof(*error), GFP_ATOMIC); 1408 error = kzalloc(sizeof(*error), GFP_ATOMIC);
1365 if (!error) { 1409 if (!error) {
@@ -1372,23 +1416,25 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1372 i915_capture_gen_state(dev_priv, error); 1416 i915_capture_gen_state(dev_priv, error);
1373 i915_capture_reg_state(dev_priv, error); 1417 i915_capture_reg_state(dev_priv, error);
1374 i915_gem_capture_buffers(dev_priv, error); 1418 i915_gem_capture_buffers(dev_priv, error);
1375 i915_gem_record_fences(dev, error); 1419 i915_gem_record_fences(dev_priv, error);
1376 i915_gem_record_rings(dev, error); 1420 i915_gem_record_rings(dev_priv, error);
1377 1421
1378 do_gettimeofday(&error->time); 1422 do_gettimeofday(&error->time);
1379 1423
1380 error->overlay = intel_overlay_capture_error_state(dev); 1424 error->overlay = intel_overlay_capture_error_state(dev_priv);
1381 error->display = intel_display_capture_error_state(dev); 1425 error->display = intel_display_capture_error_state(dev_priv);
1382 1426
1383 i915_error_capture_msg(dev, error, engine_mask, error_msg); 1427 i915_error_capture_msg(dev_priv, error, engine_mask, error_msg);
1384 DRM_INFO("%s\n", error->error_msg); 1428 DRM_INFO("%s\n", error->error_msg);
1385 1429
1386 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1430 if (!error->simulated) {
1387 if (dev_priv->gpu_error.first_error == NULL) { 1431 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1388 dev_priv->gpu_error.first_error = error; 1432 if (!dev_priv->gpu_error.first_error) {
1389 error = NULL; 1433 dev_priv->gpu_error.first_error = error;
1434 error = NULL;
1435 }
1436 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1390 } 1437 }
1391 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
1392 1438
1393 if (error) { 1439 if (error) {
1394 i915_error_state_free(&error->ref); 1440 i915_error_state_free(&error->ref);
@@ -1400,7 +1446,8 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1400 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n"); 1446 DRM_INFO("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
1401 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1447 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
1402 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1448 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n");
1403 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", dev->primary->index); 1449 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n",
1450 dev_priv->drm.primary->index);
1404 warned = true; 1451 warned = true;
1405 } 1452 }
1406} 1453}
@@ -1408,7 +1455,7 @@ void i915_capture_error_state(struct drm_device *dev, u32 engine_mask,
1408void i915_error_state_get(struct drm_device *dev, 1455void i915_error_state_get(struct drm_device *dev,
1409 struct i915_error_state_file_priv *error_priv) 1456 struct i915_error_state_file_priv *error_priv)
1410{ 1457{
1411 struct drm_i915_private *dev_priv = dev->dev_private; 1458 struct drm_i915_private *dev_priv = to_i915(dev);
1412 1459
1413 spin_lock_irq(&dev_priv->gpu_error.lock); 1460 spin_lock_irq(&dev_priv->gpu_error.lock);
1414 error_priv->error = dev_priv->gpu_error.first_error; 1461 error_priv->error = dev_priv->gpu_error.first_error;
@@ -1426,7 +1473,7 @@ void i915_error_state_put(struct i915_error_state_file_priv *error_priv)
1426 1473
1427void i915_destroy_error_state(struct drm_device *dev) 1474void i915_destroy_error_state(struct drm_device *dev)
1428{ 1475{
1429 struct drm_i915_private *dev_priv = dev->dev_private; 1476 struct drm_i915_private *dev_priv = to_i915(dev);
1430 struct drm_i915_error_state *error; 1477 struct drm_i915_error_state *error;
1431 1478
1432 spin_lock_irq(&dev_priv->gpu_error.lock); 1479 spin_lock_irq(&dev_priv->gpu_error.lock);
@@ -1450,17 +1497,17 @@ const char *i915_cache_level_str(struct drm_i915_private *i915, int type)
1450} 1497}
1451 1498
1452/* NB: please notice the memset */ 1499/* NB: please notice the memset */
1453void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone) 1500void i915_get_extra_instdone(struct drm_i915_private *dev_priv,
1501 uint32_t *instdone)
1454{ 1502{
1455 struct drm_i915_private *dev_priv = dev->dev_private;
1456 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1503 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1457 1504
1458 if (IS_GEN2(dev) || IS_GEN3(dev)) 1505 if (IS_GEN2(dev_priv) || IS_GEN3(dev_priv))
1459 instdone[0] = I915_READ(GEN2_INSTDONE); 1506 instdone[0] = I915_READ(GEN2_INSTDONE);
1460 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1507 else if (IS_GEN4(dev_priv) || IS_GEN5(dev_priv) || IS_GEN6(dev_priv)) {
1461 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1508 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1462 instdone[1] = I915_READ(GEN4_INSTDONE1); 1509 instdone[1] = I915_READ(GEN4_INSTDONE1);
1463 } else if (INTEL_INFO(dev)->gen >= 7) { 1510 } else if (INTEL_GEN(dev_priv) >= 7) {
1464 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1511 instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE));
1465 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1512 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1466 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1513 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_guc_reg.h b/drivers/gpu/drm/i915/i915_guc_reg.h
index 80786d9f9ad3..cf5a65be4fe0 100644
--- a/drivers/gpu/drm/i915/i915_guc_reg.h
+++ b/drivers/gpu/drm/i915/i915_guc_reg.h
@@ -67,11 +67,11 @@
67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */ 67#define GUC_WOPCM_OFFSET_VALUE 0x80000 /* 512KB */
68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4) 68#define GUC_MAX_IDLE_COUNT _MMIO(0xC3E4)
69 69
70/* Defines WOPCM space available to GuC firmware */
70#define GUC_WOPCM_SIZE _MMIO(0xc050) 71#define GUC_WOPCM_SIZE _MMIO(0xc050)
71#define GUC_WOPCM_SIZE_VALUE (0x80 << 12) /* 512KB */
72
73/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */ 72/* GuC addresses below GUC_WOPCM_TOP don't map through the GTT */
74#define GUC_WOPCM_TOP (GUC_WOPCM_SIZE_VALUE) 73#define GUC_WOPCM_TOP (0x80 << 12) /* 512KB */
74#define BXT_GUC_WOPCM_RC6_RESERVED (0x10 << 12) /* 64KB */
75 75
76#define GEN8_GT_PM_CONFIG _MMIO(0x138140) 76#define GEN8_GT_PM_CONFIG _MMIO(0x138140)
77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140) 77#define GEN9LP_GT_PM_CONFIG _MMIO(0x138140)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index d40c13fb6643..2112e029db6a 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -97,8 +97,14 @@ static int host2guc_action(struct intel_guc *guc, u32 *data, u32 len)
97 97
98 I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER); 98 I915_WRITE(HOST2GUC_INTERRUPT, HOST2GUC_TRIGGER);
99 99
100 /* No HOST2GUC command should take longer than 10ms */ 100 /*
101 ret = wait_for_atomic(host2guc_action_response(dev_priv, &status), 10); 101 * Fast commands should complete in less than 10us, so sample quickly
102 * up to that length of time, then switch to a slower sleep-wait loop.
103 * No HOST2GUC command should ever take longer than 10ms.
104 */
105 ret = wait_for_us(host2guc_action_response(dev_priv, &status), 10);
106 if (ret)
107 ret = wait_for(host2guc_action_response(dev_priv, &status), 10);
102 if (status != GUC2HOST_STATUS_SUCCESS) { 108 if (status != GUC2HOST_STATUS_SUCCESS) {
103 /* 109 /*
104 * Either the GuC explicitly returned an error (which 110 * Either the GuC explicitly returned an error (which
@@ -153,13 +159,11 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
153 struct i915_guc_client *client) 159 struct i915_guc_client *client)
154{ 160{
155 struct drm_i915_private *dev_priv = guc_to_i915(guc); 161 struct drm_i915_private *dev_priv = guc_to_i915(guc);
156 struct drm_device *dev = dev_priv->dev;
157 u32 data[2]; 162 u32 data[2];
158 163
159 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 164 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE;
160 /* WaRsDisableCoarsePowerGating:skl,bxt */ 165 /* WaRsDisableCoarsePowerGating:skl,bxt */
161 if (!intel_enable_rc6(dev) || 166 if (!intel_enable_rc6() || NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
162 NEEDS_WaRsDisableCoarsePowerGating(dev))
163 data[1] = 0; 167 data[1] = 0;
164 else 168 else
165 /* bit 0 and 1 are for Render and Media domain separately */ 169 /* bit 0 and 1 are for Render and Media domain separately */
@@ -175,94 +179,88 @@ static int host2guc_sample_forcewake(struct intel_guc *guc,
175 * client object which contains the page being used for the doorbell 179 * client object which contains the page being used for the doorbell
176 */ 180 */
177 181
178static void guc_init_doorbell(struct intel_guc *guc, 182static int guc_update_doorbell_id(struct intel_guc *guc,
179 struct i915_guc_client *client) 183 struct i915_guc_client *client,
184 u16 new_id)
180{ 185{
186 struct sg_table *sg = guc->ctx_pool_obj->pages;
187 void *doorbell_bitmap = guc->doorbell_bitmap;
181 struct guc_doorbell_info *doorbell; 188 struct guc_doorbell_info *doorbell;
189 struct guc_context_desc desc;
190 size_t len;
182 191
183 doorbell = client->client_base + client->doorbell_offset; 192 doorbell = client->client_base + client->doorbell_offset;
184 193
185 doorbell->db_status = GUC_DOORBELL_ENABLED; 194 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID &&
186 doorbell->cookie = 0; 195 test_bit(client->doorbell_id, doorbell_bitmap)) {
187} 196 /* Deactivate the old doorbell */
188 197 doorbell->db_status = GUC_DOORBELL_DISABLED;
189static int guc_ring_doorbell(struct i915_guc_client *gc) 198 (void)host2guc_release_doorbell(guc, client);
190{ 199 __clear_bit(client->doorbell_id, doorbell_bitmap);
191 struct guc_process_desc *desc; 200 }
192 union guc_doorbell_qw db_cmp, db_exc, db_ret;
193 union guc_doorbell_qw *db;
194 int attempt = 2, ret = -EAGAIN;
195
196 desc = gc->client_base + gc->proc_desc_offset;
197
198 /* Update the tail so it is visible to GuC */
199 desc->tail = gc->wq_tail;
200
201 /* current cookie */
202 db_cmp.db_status = GUC_DOORBELL_ENABLED;
203 db_cmp.cookie = gc->cookie;
204
205 /* cookie to be updated */
206 db_exc.db_status = GUC_DOORBELL_ENABLED;
207 db_exc.cookie = gc->cookie + 1;
208 if (db_exc.cookie == 0)
209 db_exc.cookie = 1;
210
211 /* pointer of current doorbell cacheline */
212 db = gc->client_base + gc->doorbell_offset;
213
214 while (attempt--) {
215 /* lets ring the doorbell */
216 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
217 db_cmp.value_qw, db_exc.value_qw);
218
219 /* if the exchange was successfully executed */
220 if (db_ret.value_qw == db_cmp.value_qw) {
221 /* db was successfully rung */
222 gc->cookie = db_exc.cookie;
223 ret = 0;
224 break;
225 }
226 201
227 /* XXX: doorbell was lost and need to acquire it again */ 202 /* Update the GuC's idea of the doorbell ID */
228 if (db_ret.db_status == GUC_DOORBELL_DISABLED) 203 len = sg_pcopy_to_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
229 break; 204 sizeof(desc) * client->ctx_index);
205 if (len != sizeof(desc))
206 return -EFAULT;
207 desc.db_id = new_id;
208 len = sg_pcopy_from_buffer(sg->sgl, sg->nents, &desc, sizeof(desc),
209 sizeof(desc) * client->ctx_index);
210 if (len != sizeof(desc))
211 return -EFAULT;
230 212
231 DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n", 213 client->doorbell_id = new_id;
232 db_cmp.cookie, db_ret.cookie); 214 if (new_id == GUC_INVALID_DOORBELL_ID)
215 return 0;
233 216
234 /* update the cookie to newly read cookie from GuC */ 217 /* Activate the new doorbell */
235 db_cmp.cookie = db_ret.cookie; 218 __set_bit(new_id, doorbell_bitmap);
236 db_exc.cookie = db_ret.cookie + 1; 219 doorbell->cookie = 0;
237 if (db_exc.cookie == 0) 220 doorbell->db_status = GUC_DOORBELL_ENABLED;
238 db_exc.cookie = 1; 221 return host2guc_allocate_doorbell(guc, client);
239 } 222}
240 223
241 return ret; 224static int guc_init_doorbell(struct intel_guc *guc,
225 struct i915_guc_client *client,
226 uint16_t db_id)
227{
228 return guc_update_doorbell_id(guc, client, db_id);
242} 229}
243 230
244static void guc_disable_doorbell(struct intel_guc *guc, 231static void guc_disable_doorbell(struct intel_guc *guc,
245 struct i915_guc_client *client) 232 struct i915_guc_client *client)
246{ 233{
247 struct drm_i915_private *dev_priv = guc_to_i915(guc); 234 (void)guc_update_doorbell_id(guc, client, GUC_INVALID_DOORBELL_ID);
248 struct guc_doorbell_info *doorbell;
249 i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
250 int value;
251
252 doorbell = client->client_base + client->doorbell_offset;
253 235
254 doorbell->db_status = GUC_DOORBELL_DISABLED; 236 /* XXX: wait for any interrupts */
237 /* XXX: wait for workqueue to drain */
238}
255 239
256 I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID); 240static uint16_t
241select_doorbell_register(struct intel_guc *guc, uint32_t priority)
242{
243 /*
244 * The bitmap tracks which doorbell registers are currently in use.
245 * It is split into two halves; the first half is used for normal
246 * priority contexts, the second half for high-priority ones.
247 * Note that logically higher priorities are numerically less than
248 * normal ones, so the test below means "is it high-priority?"
249 */
250 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
251 const uint16_t half = GUC_MAX_DOORBELLS / 2;
252 const uint16_t start = hi_pri ? half : 0;
253 const uint16_t end = start + half;
254 uint16_t id;
257 255
258 value = I915_READ(drbreg); 256 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
259 WARN_ON((value & GEN8_DRB_VALID) != 0); 257 if (id == end)
258 id = GUC_INVALID_DOORBELL_ID;
260 259
261 I915_WRITE(GEN8_DRBREGU(client->doorbell_id), 0); 260 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
262 I915_WRITE(drbreg, 0); 261 hi_pri ? "high" : "normal", id);
263 262
264 /* XXX: wait for any interrupts */ 263 return id;
265 /* XXX: wait for workqueue to drain */
266} 264}
267 265
268/* 266/*
@@ -289,37 +287,6 @@ static uint32_t select_doorbell_cacheline(struct intel_guc *guc)
289 return offset; 287 return offset;
290} 288}
291 289
292static uint16_t assign_doorbell(struct intel_guc *guc, uint32_t priority)
293{
294 /*
295 * The bitmap is split into two halves; the first half is used for
296 * normal priority contexts, the second half for high-priority ones.
297 * Note that logically higher priorities are numerically less than
298 * normal ones, so the test below means "is it high-priority?"
299 */
300 const bool hi_pri = (priority <= GUC_CTX_PRIORITY_HIGH);
301 const uint16_t half = GUC_MAX_DOORBELLS / 2;
302 const uint16_t start = hi_pri ? half : 0;
303 const uint16_t end = start + half;
304 uint16_t id;
305
306 id = find_next_zero_bit(guc->doorbell_bitmap, end, start);
307 if (id == end)
308 id = GUC_INVALID_DOORBELL_ID;
309 else
310 bitmap_set(guc->doorbell_bitmap, id, 1);
311
312 DRM_DEBUG_DRIVER("assigned %s priority doorbell id 0x%x\n",
313 hi_pri ? "high" : "normal", id);
314
315 return id;
316}
317
318static void release_doorbell(struct intel_guc *guc, uint16_t id)
319{
320 bitmap_clear(guc->doorbell_bitmap, id, 1);
321}
322
323/* 290/*
324 * Initialise the process descriptor shared with the GuC firmware. 291 * Initialise the process descriptor shared with the GuC firmware.
325 */ 292 */
@@ -361,10 +328,9 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
361 struct drm_i915_gem_object *client_obj = client->client_obj; 328 struct drm_i915_gem_object *client_obj = client->client_obj;
362 struct drm_i915_private *dev_priv = guc_to_i915(guc); 329 struct drm_i915_private *dev_priv = guc_to_i915(guc);
363 struct intel_engine_cs *engine; 330 struct intel_engine_cs *engine;
364 struct intel_context *ctx = client->owner; 331 struct i915_gem_context *ctx = client->owner;
365 struct guc_context_desc desc; 332 struct guc_context_desc desc;
366 struct sg_table *sg; 333 struct sg_table *sg;
367 enum intel_engine_id id;
368 u32 gfx_addr; 334 u32 gfx_addr;
369 335
370 memset(&desc, 0, sizeof(desc)); 336 memset(&desc, 0, sizeof(desc));
@@ -374,10 +340,10 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
374 desc.priority = client->priority; 340 desc.priority = client->priority;
375 desc.db_id = client->doorbell_id; 341 desc.db_id = client->doorbell_id;
376 342
377 for_each_engine_id(engine, dev_priv, id) { 343 for_each_engine(engine, dev_priv) {
344 struct intel_context *ce = &ctx->engine[engine->id];
378 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id]; 345 struct guc_execlist_context *lrc = &desc.lrc[engine->guc_id];
379 struct drm_i915_gem_object *obj; 346 struct drm_i915_gem_object *obj;
380 uint64_t ctx_desc;
381 347
382 /* TODO: We have a design issue to be solved here. Only when we 348 /* TODO: We have a design issue to be solved here. Only when we
383 * receive the first batch, we know which engine is used by the 349 * receive the first batch, we know which engine is used by the
@@ -386,20 +352,18 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
386 * for now who owns a GuC client. But for future owner of GuC 352 * for now who owns a GuC client. But for future owner of GuC
387 * client, need to make sure lrc is pinned prior to enter here. 353 * client, need to make sure lrc is pinned prior to enter here.
388 */ 354 */
389 obj = ctx->engine[id].state; 355 if (!ce->state)
390 if (!obj)
391 break; /* XXX: continue? */ 356 break; /* XXX: continue? */
392 357
393 ctx_desc = intel_lr_context_descriptor(ctx, engine); 358 lrc->context_desc = lower_32_bits(ce->lrc_desc);
394 lrc->context_desc = (u32)ctx_desc;
395 359
396 /* The state page is after PPHWSP */ 360 /* The state page is after PPHWSP */
397 gfx_addr = i915_gem_obj_ggtt_offset(obj); 361 gfx_addr = i915_gem_obj_ggtt_offset(ce->state);
398 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE; 362 lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
399 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) | 363 lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
400 (engine->guc_id << GUC_ELC_ENGINE_OFFSET); 364 (engine->guc_id << GUC_ELC_ENGINE_OFFSET);
401 365
402 obj = ctx->engine[id].ringbuf->obj; 366 obj = ce->ringbuf->obj;
403 gfx_addr = i915_gem_obj_ggtt_offset(obj); 367 gfx_addr = i915_gem_obj_ggtt_offset(obj);
404 368
405 lrc->ring_begin = gfx_addr; 369 lrc->ring_begin = gfx_addr;
@@ -427,7 +391,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
427 desc.wq_size = client->wq_size; 391 desc.wq_size = client->wq_size;
428 392
429 /* 393 /*
430 * XXX: Take LRCs from an existing intel_context if this is not an 394 * XXX: Take LRCs from an existing context if this is not an
431 * IsKMDCreatedContext client 395 * IsKMDCreatedContext client
432 */ 396 */
433 desc.desc_private = (uintptr_t)client; 397 desc.desc_private = (uintptr_t)client;
@@ -451,47 +415,64 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
451 sizeof(desc) * client->ctx_index); 415 sizeof(desc) * client->ctx_index);
452} 416}
453 417
454int i915_guc_wq_check_space(struct i915_guc_client *gc) 418/**
419 * i915_guc_wq_check_space() - check that the GuC can accept a request
420 * @request: request associated with the commands
421 *
422 * Return: 0 if space is available
423 * -EAGAIN if space is not currently available
424 *
425 * This function must be called (and must return 0) before a request
426 * is submitted to the GuC via i915_guc_submit() below. Once a result
427 * of 0 has been returned, it remains valid until (but only until)
428 * the next call to submit().
429 *
430 * This precheck allows the caller to determine in advance that space
431 * will be available for the next submission before committing resources
432 * to it, and helps avoid late failures with complicated recovery paths.
433 */
434int i915_guc_wq_check_space(struct drm_i915_gem_request *request)
455{ 435{
436 const size_t wqi_size = sizeof(struct guc_wq_item);
437 struct i915_guc_client *gc = request->i915->guc.execbuf_client;
456 struct guc_process_desc *desc; 438 struct guc_process_desc *desc;
457 u32 size = sizeof(struct guc_wq_item); 439 u32 freespace;
458 int ret = -ETIMEDOUT, timeout_counter = 200;
459 440
460 if (!gc) 441 GEM_BUG_ON(gc == NULL);
461 return 0;
462 442
463 desc = gc->client_base + gc->proc_desc_offset; 443 desc = gc->client_base + gc->proc_desc_offset;
464 444
465 while (timeout_counter-- > 0) { 445 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
466 if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) { 446 if (likely(freespace >= wqi_size))
467 ret = 0; 447 return 0;
468 break;
469 }
470 448
471 if (timeout_counter) 449 gc->no_wq_space += 1;
472 usleep_range(1000, 2000);
473 };
474 450
475 return ret; 451 return -EAGAIN;
476} 452}
477 453
478static int guc_add_workqueue_item(struct i915_guc_client *gc, 454static void guc_add_workqueue_item(struct i915_guc_client *gc,
479 struct drm_i915_gem_request *rq) 455 struct drm_i915_gem_request *rq)
480{ 456{
457 /* wqi_len is in DWords, and does not include the one-word header */
458 const size_t wqi_size = sizeof(struct guc_wq_item);
459 const u32 wqi_len = wqi_size/sizeof(u32) - 1;
481 struct guc_process_desc *desc; 460 struct guc_process_desc *desc;
482 struct guc_wq_item *wqi; 461 struct guc_wq_item *wqi;
483 void *base; 462 void *base;
484 u32 tail, wq_len, wq_off, space; 463 u32 freespace, tail, wq_off, wq_page;
485 464
486 desc = gc->client_base + gc->proc_desc_offset; 465 desc = gc->client_base + gc->proc_desc_offset;
487 space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
488 if (WARN_ON(space < sizeof(struct guc_wq_item)))
489 return -ENOSPC; /* shouldn't happen */
490 466
491 /* postincrement WQ tail for next time */ 467 /* Free space is guaranteed, see i915_guc_wq_check_space() above */
492 wq_off = gc->wq_tail; 468 freespace = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
493 gc->wq_tail += sizeof(struct guc_wq_item); 469 GEM_BUG_ON(freespace < wqi_size);
494 gc->wq_tail &= gc->wq_size - 1; 470
471 /* The GuC firmware wants the tail index in QWords, not bytes */
472 tail = rq->tail;
473 GEM_BUG_ON(tail & 7);
474 tail >>= 3;
475 GEM_BUG_ON(tail > WQ_RING_TAIL_MAX);
495 476
496 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we 477 /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
497 * should not have the case where structure wqi is across page, neither 478 * should not have the case where structure wqi is across page, neither
@@ -500,19 +481,23 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
500 * XXX: if not the case, we need save data to a temp wqi and copy it to 481 * XXX: if not the case, we need save data to a temp wqi and copy it to
501 * workqueue buffer dw by dw. 482 * workqueue buffer dw by dw.
502 */ 483 */
503 WARN_ON(sizeof(struct guc_wq_item) != 16); 484 BUILD_BUG_ON(wqi_size != 16);
504 WARN_ON(wq_off & 3); 485
486 /* postincrement WQ tail for next time */
487 wq_off = gc->wq_tail;
488 gc->wq_tail += wqi_size;
489 gc->wq_tail &= gc->wq_size - 1;
490 GEM_BUG_ON(wq_off & (wqi_size - 1));
505 491
506 /* wq starts from the page after doorbell / process_desc */ 492 /* WQ starts from the page after doorbell / process_desc */
507 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 493 wq_page = (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT;
508 (wq_off + GUC_DB_SIZE) >> PAGE_SHIFT));
509 wq_off &= PAGE_SIZE - 1; 494 wq_off &= PAGE_SIZE - 1;
495 base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, wq_page));
510 wqi = (struct guc_wq_item *)((char *)base + wq_off); 496 wqi = (struct guc_wq_item *)((char *)base + wq_off);
511 497
512 /* len does not include the header */ 498 /* Now fill in the 4-word work queue item */
513 wq_len = sizeof(struct guc_wq_item) / sizeof(u32) - 1;
514 wqi->header = WQ_TYPE_INORDER | 499 wqi->header = WQ_TYPE_INORDER |
515 (wq_len << WQ_LEN_SHIFT) | 500 (wqi_len << WQ_LEN_SHIFT) |
516 (rq->engine->guc_id << WQ_TARGET_SHIFT) | 501 (rq->engine->guc_id << WQ_TARGET_SHIFT) |
517 WQ_NO_WCFLUSH_WAIT; 502 WQ_NO_WCFLUSH_WAIT;
518 503
@@ -520,48 +505,105 @@ static int guc_add_workqueue_item(struct i915_guc_client *gc,
520 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, 505 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx,
521 rq->engine); 506 rq->engine);
522 507
523 /* The GuC firmware wants the tail index in QWords, not bytes */
524 tail = rq->ringbuf->tail >> 3;
525 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT; 508 wqi->ring_tail = tail << WQ_RING_TAIL_SHIFT;
526 wqi->fence_id = 0; /*XXX: what fence to be here */ 509 wqi->fence_id = rq->seqno;
527 510
528 kunmap_atomic(base); 511 kunmap_atomic(base);
512}
529 513
530 return 0; 514static int guc_ring_doorbell(struct i915_guc_client *gc)
515{
516 struct guc_process_desc *desc;
517 union guc_doorbell_qw db_cmp, db_exc, db_ret;
518 union guc_doorbell_qw *db;
519 int attempt = 2, ret = -EAGAIN;
520
521 desc = gc->client_base + gc->proc_desc_offset;
522
523 /* Update the tail so it is visible to GuC */
524 desc->tail = gc->wq_tail;
525
526 /* current cookie */
527 db_cmp.db_status = GUC_DOORBELL_ENABLED;
528 db_cmp.cookie = gc->cookie;
529
530 /* cookie to be updated */
531 db_exc.db_status = GUC_DOORBELL_ENABLED;
532 db_exc.cookie = gc->cookie + 1;
533 if (db_exc.cookie == 0)
534 db_exc.cookie = 1;
535
536 /* pointer of current doorbell cacheline */
537 db = gc->client_base + gc->doorbell_offset;
538
539 while (attempt--) {
540 /* lets ring the doorbell */
541 db_ret.value_qw = atomic64_cmpxchg((atomic64_t *)db,
542 db_cmp.value_qw, db_exc.value_qw);
543
544 /* if the exchange was successfully executed */
545 if (db_ret.value_qw == db_cmp.value_qw) {
546 /* db was successfully rung */
547 gc->cookie = db_exc.cookie;
548 ret = 0;
549 break;
550 }
551
552 /* XXX: doorbell was lost and need to acquire it again */
553 if (db_ret.db_status == GUC_DOORBELL_DISABLED)
554 break;
555
556 DRM_ERROR("Cookie mismatch. Expected %d, returned %d\n",
557 db_cmp.cookie, db_ret.cookie);
558
559 /* update the cookie to newly read cookie from GuC */
560 db_cmp.cookie = db_ret.cookie;
561 db_exc.cookie = db_ret.cookie + 1;
562 if (db_exc.cookie == 0)
563 db_exc.cookie = 1;
564 }
565
566 return ret;
531} 567}
532 568
533/** 569/**
534 * i915_guc_submit() - Submit commands through GuC 570 * i915_guc_submit() - Submit commands through GuC
535 * @client: the guc client where commands will go through
536 * @rq: request associated with the commands 571 * @rq: request associated with the commands
537 * 572 *
538 * Return: 0 if succeed 573 * Return: 0 on success, otherwise an errno.
574 * (Note: nonzero really shouldn't happen!)
575 *
576 * The caller must have already called i915_guc_wq_check_space() above
577 * with a result of 0 (success) since the last request submission. This
578 * guarantees that there is space in the work queue for the new request,
579 * so enqueuing the item cannot fail.
580 *
581 * Bad Things Will Happen if the caller violates this protocol e.g. calls
582 * submit() when check() says there's no space, or calls submit() multiple
583 * times with no intervening check().
584 *
585 * The only error here arises if the doorbell hardware isn't functioning
586 * as expected, which really shouln't happen.
539 */ 587 */
540int i915_guc_submit(struct i915_guc_client *client, 588int i915_guc_submit(struct drm_i915_gem_request *rq)
541 struct drm_i915_gem_request *rq)
542{ 589{
543 struct intel_guc *guc = client->guc; 590 unsigned int engine_id = rq->engine->id;
544 unsigned int engine_id = rq->engine->guc_id; 591 struct intel_guc *guc = &rq->i915->guc;
545 int q_ret, b_ret; 592 struct i915_guc_client *client = guc->execbuf_client;
593 int b_ret;
546 594
547 q_ret = guc_add_workqueue_item(client, rq); 595 guc_add_workqueue_item(client, rq);
548 if (q_ret == 0) 596 b_ret = guc_ring_doorbell(client);
549 b_ret = guc_ring_doorbell(client);
550 597
551 client->submissions[engine_id] += 1; 598 client->submissions[engine_id] += 1;
552 if (q_ret) { 599 client->retcode = b_ret;
553 client->q_fail += 1; 600 if (b_ret)
554 client->retcode = q_ret;
555 } else if (b_ret) {
556 client->b_fail += 1; 601 client->b_fail += 1;
557 client->retcode = q_ret = b_ret; 602
558 } else {
559 client->retcode = 0;
560 }
561 guc->submissions[engine_id] += 1; 603 guc->submissions[engine_id] += 1;
562 guc->last_seqno[engine_id] = rq->seqno; 604 guc->last_seqno[engine_id] = rq->seqno;
563 605
564 return q_ret; 606 return b_ret;
565} 607}
566 608
567/* 609/*
@@ -572,7 +614,7 @@ int i915_guc_submit(struct i915_guc_client *client,
572 614
573/** 615/**
574 * gem_allocate_guc_obj() - Allocate gem object for GuC usage 616 * gem_allocate_guc_obj() - Allocate gem object for GuC usage
575 * @dev: drm device 617 * @dev_priv: driver private data structure
576 * @size: size of object 618 * @size: size of object
577 * 619 *
578 * This is a wrapper to create a gem obj. In order to use it inside GuC, the 620 * This is a wrapper to create a gem obj. In order to use it inside GuC, the
@@ -581,14 +623,13 @@ int i915_guc_submit(struct i915_guc_client *client,
581 * 623 *
582 * Return: A drm_i915_gem_object if successful, otherwise NULL. 624 * Return: A drm_i915_gem_object if successful, otherwise NULL.
583 */ 625 */
584static struct drm_i915_gem_object *gem_allocate_guc_obj(struct drm_device *dev, 626static struct drm_i915_gem_object *
585 u32 size) 627gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
586{ 628{
587 struct drm_i915_private *dev_priv = dev->dev_private;
588 struct drm_i915_gem_object *obj; 629 struct drm_i915_gem_object *obj;
589 630
590 obj = i915_gem_alloc_object(dev, size); 631 obj = i915_gem_object_create(&dev_priv->drm, size);
591 if (!obj) 632 if (IS_ERR(obj))
592 return NULL; 633 return NULL;
593 634
594 if (i915_gem_object_get_pages(obj)) { 635 if (i915_gem_object_get_pages(obj)) {
@@ -623,10 +664,10 @@ static void gem_release_guc_obj(struct drm_i915_gem_object *obj)
623 drm_gem_object_unreference(&obj->base); 664 drm_gem_object_unreference(&obj->base);
624} 665}
625 666
626static void guc_client_free(struct drm_device *dev, 667static void
627 struct i915_guc_client *client) 668guc_client_free(struct drm_i915_private *dev_priv,
669 struct i915_guc_client *client)
628{ 670{
629 struct drm_i915_private *dev_priv = dev->dev_private;
630 struct intel_guc *guc = &dev_priv->guc; 671 struct intel_guc *guc = &dev_priv->guc;
631 672
632 if (!client) 673 if (!client)
@@ -639,17 +680,10 @@ static void guc_client_free(struct drm_device *dev,
639 680
640 if (client->client_base) { 681 if (client->client_base) {
641 /* 682 /*
642 * If we got as far as setting up a doorbell, make sure 683 * If we got as far as setting up a doorbell, make sure we
643 * we shut it down before unmapping & deallocating the 684 * shut it down before unmapping & deallocating the memory.
644 * memory. So first disable the doorbell, then tell the
645 * GuC that we've finished with it, finally deallocate
646 * it in our bitmap
647 */ 685 */
648 if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) { 686 guc_disable_doorbell(guc, client);
649 guc_disable_doorbell(guc, client);
650 host2guc_release_doorbell(guc, client);
651 release_doorbell(guc, client->doorbell_id);
652 }
653 687
654 kunmap(kmap_to_page(client->client_base)); 688 kunmap(kmap_to_page(client->client_base));
655 } 689 }
@@ -664,9 +698,51 @@ static void guc_client_free(struct drm_device *dev,
664 kfree(client); 698 kfree(client);
665} 699}
666 700
701/*
702 * Borrow the first client to set up & tear down every doorbell
703 * in turn, to ensure that all doorbell h/w is (re)initialised.
704 */
705static void guc_init_doorbell_hw(struct intel_guc *guc)
706{
707 struct drm_i915_private *dev_priv = guc_to_i915(guc);
708 struct i915_guc_client *client = guc->execbuf_client;
709 uint16_t db_id, i;
710 int err;
711
712 db_id = client->doorbell_id;
713
714 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
715 i915_reg_t drbreg = GEN8_DRBREGL(i);
716 u32 value = I915_READ(drbreg);
717
718 err = guc_update_doorbell_id(guc, client, i);
719
720 /* Report update failure or unexpectedly active doorbell */
721 if (err || (i != db_id && (value & GUC_DOORBELL_ENABLED)))
722 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) was 0x%x, err %d\n",
723 i, drbreg.reg, value, err);
724 }
725
726 /* Restore to original value */
727 err = guc_update_doorbell_id(guc, client, db_id);
728 if (err)
729 DRM_ERROR("Failed to restore doorbell to %d, err %d\n",
730 db_id, err);
731
732 for (i = 0; i < GUC_MAX_DOORBELLS; ++i) {
733 i915_reg_t drbreg = GEN8_DRBREGL(i);
734 u32 value = I915_READ(drbreg);
735
736 if (i != db_id && (value & GUC_DOORBELL_ENABLED))
737 DRM_DEBUG_DRIVER("Doorbell %d (reg 0x%x) finally 0x%x\n",
738 i, drbreg.reg, value);
739
740 }
741}
742
667/** 743/**
668 * guc_client_alloc() - Allocate an i915_guc_client 744 * guc_client_alloc() - Allocate an i915_guc_client
669 * @dev: drm device 745 * @dev_priv: driver private data structure
670 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW 746 * @priority: four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
671 * The kernel client to replace ExecList submission is created with 747 * The kernel client to replace ExecList submission is created with
672 * NORMAL priority. Priority of a client for scheduler can be HIGH, 748 * NORMAL priority. Priority of a client for scheduler can be HIGH,
@@ -676,14 +752,15 @@ static void guc_client_free(struct drm_device *dev,
676 * 752 *
677 * Return: An i915_guc_client object if success, else NULL. 753 * Return: An i915_guc_client object if success, else NULL.
678 */ 754 */
679static struct i915_guc_client *guc_client_alloc(struct drm_device *dev, 755static struct i915_guc_client *
680 uint32_t priority, 756guc_client_alloc(struct drm_i915_private *dev_priv,
681 struct intel_context *ctx) 757 uint32_t priority,
758 struct i915_gem_context *ctx)
682{ 759{
683 struct i915_guc_client *client; 760 struct i915_guc_client *client;
684 struct drm_i915_private *dev_priv = dev->dev_private;
685 struct intel_guc *guc = &dev_priv->guc; 761 struct intel_guc *guc = &dev_priv->guc;
686 struct drm_i915_gem_object *obj; 762 struct drm_i915_gem_object *obj;
763 uint16_t db_id;
687 764
688 client = kzalloc(sizeof(*client), GFP_KERNEL); 765 client = kzalloc(sizeof(*client), GFP_KERNEL);
689 if (!client) 766 if (!client)
@@ -702,7 +779,7 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
702 } 779 }
703 780
704 /* The first page is doorbell/proc_desc. Two followed pages are wq. */ 781 /* The first page is doorbell/proc_desc. Two followed pages are wq. */
705 obj = gem_allocate_guc_obj(dev, GUC_DB_SIZE + GUC_WQ_SIZE); 782 obj = gem_allocate_guc_obj(dev_priv, GUC_DB_SIZE + GUC_WQ_SIZE);
706 if (!obj) 783 if (!obj)
707 goto err; 784 goto err;
708 785
@@ -712,6 +789,11 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
712 client->wq_offset = GUC_DB_SIZE; 789 client->wq_offset = GUC_DB_SIZE;
713 client->wq_size = GUC_WQ_SIZE; 790 client->wq_size = GUC_WQ_SIZE;
714 791
792 db_id = select_doorbell_register(guc, client->priority);
793 if (db_id == GUC_INVALID_DOORBELL_ID)
794 /* XXX: evict a doorbell instead? */
795 goto err;
796
715 client->doorbell_offset = select_doorbell_cacheline(guc); 797 client->doorbell_offset = select_doorbell_cacheline(guc);
716 798
717 /* 799 /*
@@ -724,29 +806,22 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
724 else 806 else
725 client->proc_desc_offset = (GUC_DB_SIZE / 2); 807 client->proc_desc_offset = (GUC_DB_SIZE / 2);
726 808
727 client->doorbell_id = assign_doorbell(guc, client->priority);
728 if (client->doorbell_id == GUC_INVALID_DOORBELL_ID)
729 /* XXX: evict a doorbell instead */
730 goto err;
731
732 guc_init_proc_desc(guc, client); 809 guc_init_proc_desc(guc, client);
733 guc_init_ctx_desc(guc, client); 810 guc_init_ctx_desc(guc, client);
734 guc_init_doorbell(guc, client); 811 if (guc_init_doorbell(guc, client, db_id))
735
736 /* XXX: Any cache flushes needed? General domain mgmt calls? */
737
738 if (host2guc_allocate_doorbell(guc, client))
739 goto err; 812 goto err;
740 813
741 DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u db_id %u\n", 814 DRM_DEBUG_DRIVER("new priority %u client %p: ctx_index %u\n",
742 priority, client, client->ctx_index, client->doorbell_id); 815 priority, client, client->ctx_index);
816 DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%x\n",
817 client->doorbell_id, client->doorbell_offset);
743 818
744 return client; 819 return client;
745 820
746err: 821err:
747 DRM_ERROR("FAILED to create priority %u GuC client!\n", priority); 822 DRM_ERROR("FAILED to create priority %u GuC client!\n", priority);
748 823
749 guc_client_free(dev, client); 824 guc_client_free(dev_priv, client);
750 return NULL; 825 return NULL;
751} 826}
752 827
@@ -771,7 +846,7 @@ static void guc_create_log(struct intel_guc *guc)
771 846
772 obj = guc->log_obj; 847 obj = guc->log_obj;
773 if (!obj) { 848 if (!obj) {
774 obj = gem_allocate_guc_obj(dev_priv->dev, size); 849 obj = gem_allocate_guc_obj(dev_priv, size);
775 if (!obj) { 850 if (!obj) {
776 /* logging will be off */ 851 /* logging will be off */
777 i915.guc_log_level = -1; 852 i915.guc_log_level = -1;
@@ -831,7 +906,7 @@ static void guc_create_ads(struct intel_guc *guc)
831 906
832 obj = guc->ads_obj; 907 obj = guc->ads_obj;
833 if (!obj) { 908 if (!obj) {
834 obj = gem_allocate_guc_obj(dev_priv->dev, PAGE_ALIGN(size)); 909 obj = gem_allocate_guc_obj(dev_priv, PAGE_ALIGN(size));
835 if (!obj) 910 if (!obj)
836 return; 911 return;
837 912
@@ -885,66 +960,65 @@ static void guc_create_ads(struct intel_guc *guc)
885 * Set up the memory resources to be shared with the GuC. At this point, 960 * Set up the memory resources to be shared with the GuC. At this point,
886 * we require just one object that can be mapped through the GGTT. 961 * we require just one object that can be mapped through the GGTT.
887 */ 962 */
888int i915_guc_submission_init(struct drm_device *dev) 963int i915_guc_submission_init(struct drm_i915_private *dev_priv)
889{ 964{
890 struct drm_i915_private *dev_priv = dev->dev_private;
891 const size_t ctxsize = sizeof(struct guc_context_desc); 965 const size_t ctxsize = sizeof(struct guc_context_desc);
892 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize; 966 const size_t poolsize = GUC_MAX_GPU_CONTEXTS * ctxsize;
893 const size_t gemsize = round_up(poolsize, PAGE_SIZE); 967 const size_t gemsize = round_up(poolsize, PAGE_SIZE);
894 struct intel_guc *guc = &dev_priv->guc; 968 struct intel_guc *guc = &dev_priv->guc;
895 969
970 /* Wipe bitmap & delete client in case of reinitialisation */
971 bitmap_clear(guc->doorbell_bitmap, 0, GUC_MAX_DOORBELLS);
972 i915_guc_submission_disable(dev_priv);
973
896 if (!i915.enable_guc_submission) 974 if (!i915.enable_guc_submission)
897 return 0; /* not enabled */ 975 return 0; /* not enabled */
898 976
899 if (guc->ctx_pool_obj) 977 if (guc->ctx_pool_obj)
900 return 0; /* already allocated */ 978 return 0; /* already allocated */
901 979
902 guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv->dev, gemsize); 980 guc->ctx_pool_obj = gem_allocate_guc_obj(dev_priv, gemsize);
903 if (!guc->ctx_pool_obj) 981 if (!guc->ctx_pool_obj)
904 return -ENOMEM; 982 return -ENOMEM;
905 983
906 ida_init(&guc->ctx_ids); 984 ida_init(&guc->ctx_ids);
907
908 guc_create_log(guc); 985 guc_create_log(guc);
909
910 guc_create_ads(guc); 986 guc_create_ads(guc);
911 987
912 return 0; 988 return 0;
913} 989}
914 990
915int i915_guc_submission_enable(struct drm_device *dev) 991int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
916{ 992{
917 struct drm_i915_private *dev_priv = dev->dev_private;
918 struct intel_guc *guc = &dev_priv->guc; 993 struct intel_guc *guc = &dev_priv->guc;
919 struct intel_context *ctx = dev_priv->kernel_context;
920 struct i915_guc_client *client; 994 struct i915_guc_client *client;
921 995
922 /* client for execbuf submission */ 996 /* client for execbuf submission */
923 client = guc_client_alloc(dev, GUC_CTX_PRIORITY_KMD_NORMAL, ctx); 997 client = guc_client_alloc(dev_priv,
998 GUC_CTX_PRIORITY_KMD_NORMAL,
999 dev_priv->kernel_context);
924 if (!client) { 1000 if (!client) {
925 DRM_ERROR("Failed to create execbuf guc_client\n"); 1001 DRM_ERROR("Failed to create execbuf guc_client\n");
926 return -ENOMEM; 1002 return -ENOMEM;
927 } 1003 }
928 1004
929 guc->execbuf_client = client; 1005 guc->execbuf_client = client;
930
931 host2guc_sample_forcewake(guc, client); 1006 host2guc_sample_forcewake(guc, client);
1007 guc_init_doorbell_hw(guc);
932 1008
933 return 0; 1009 return 0;
934} 1010}
935 1011
936void i915_guc_submission_disable(struct drm_device *dev) 1012void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
937{ 1013{
938 struct drm_i915_private *dev_priv = dev->dev_private;
939 struct intel_guc *guc = &dev_priv->guc; 1014 struct intel_guc *guc = &dev_priv->guc;
940 1015
941 guc_client_free(dev, guc->execbuf_client); 1016 guc_client_free(dev_priv, guc->execbuf_client);
942 guc->execbuf_client = NULL; 1017 guc->execbuf_client = NULL;
943} 1018}
944 1019
945void i915_guc_submission_fini(struct drm_device *dev) 1020void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
946{ 1021{
947 struct drm_i915_private *dev_priv = dev->dev_private;
948 struct intel_guc *guc = &dev_priv->guc; 1022 struct intel_guc *guc = &dev_priv->guc;
949 1023
950 gem_release_guc_obj(dev_priv->guc.ads_obj); 1024 gem_release_guc_obj(dev_priv->guc.ads_obj);
@@ -965,12 +1039,12 @@ void i915_guc_submission_fini(struct drm_device *dev)
965 */ 1039 */
966int intel_guc_suspend(struct drm_device *dev) 1040int intel_guc_suspend(struct drm_device *dev)
967{ 1041{
968 struct drm_i915_private *dev_priv = dev->dev_private; 1042 struct drm_i915_private *dev_priv = to_i915(dev);
969 struct intel_guc *guc = &dev_priv->guc; 1043 struct intel_guc *guc = &dev_priv->guc;
970 struct intel_context *ctx; 1044 struct i915_gem_context *ctx;
971 u32 data[3]; 1045 u32 data[3];
972 1046
973 if (!i915.enable_guc_submission) 1047 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
974 return 0; 1048 return 0;
975 1049
976 ctx = dev_priv->kernel_context; 1050 ctx = dev_priv->kernel_context;
@@ -991,12 +1065,12 @@ int intel_guc_suspend(struct drm_device *dev)
991 */ 1065 */
992int intel_guc_resume(struct drm_device *dev) 1066int intel_guc_resume(struct drm_device *dev)
993{ 1067{
994 struct drm_i915_private *dev_priv = dev->dev_private; 1068 struct drm_i915_private *dev_priv = to_i915(dev);
995 struct intel_guc *guc = &dev_priv->guc; 1069 struct intel_guc *guc = &dev_priv->guc;
996 struct intel_context *ctx; 1070 struct i915_gem_context *ctx;
997 u32 data[3]; 1071 u32 data[3];
998 1072
999 if (!i915.enable_guc_submission) 1073 if (guc->guc_fw.guc_fw_load_status != GUC_FIRMWARE_SUCCESS)
1000 return 0; 1074 return 0;
1001 1075
1002 ctx = dev_priv->kernel_context; 1076 ctx = dev_priv->kernel_context;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index aab47f7bb61b..1c2aec392412 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -259,12 +259,12 @@ static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
259 dev_priv->gt_irq_mask &= ~interrupt_mask; 259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); 260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 POSTING_READ(GTIMR);
263} 262}
264 263
265void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 264void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
266{ 265{
267 ilk_update_gt_irq(dev_priv, mask, mask); 266 ilk_update_gt_irq(dev_priv, mask, mask);
267 POSTING_READ_FW(GTIMR);
268} 268}
269 269
270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) 270void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
@@ -336,9 +336,8 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
336 __gen6_disable_pm_irq(dev_priv, mask); 336 __gen6_disable_pm_irq(dev_priv, mask);
337} 337}
338 338
339void gen6_reset_rps_interrupts(struct drm_device *dev) 339void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
340{ 340{
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 i915_reg_t reg = gen6_pm_iir(dev_priv); 341 i915_reg_t reg = gen6_pm_iir(dev_priv);
343 342
344 spin_lock_irq(&dev_priv->irq_lock); 343 spin_lock_irq(&dev_priv->irq_lock);
@@ -349,14 +348,11 @@ void gen6_reset_rps_interrupts(struct drm_device *dev)
349 spin_unlock_irq(&dev_priv->irq_lock); 348 spin_unlock_irq(&dev_priv->irq_lock);
350} 349}
351 350
352void gen6_enable_rps_interrupts(struct drm_device *dev) 351void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
353{ 352{
354 struct drm_i915_private *dev_priv = dev->dev_private;
355
356 spin_lock_irq(&dev_priv->irq_lock); 353 spin_lock_irq(&dev_priv->irq_lock);
357 354 WARN_ON_ONCE(dev_priv->rps.pm_iir);
358 WARN_ON(dev_priv->rps.pm_iir); 355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
359 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
360 dev_priv->rps.interrupts_enabled = true; 356 dev_priv->rps.interrupts_enabled = true;
361 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
362 dev_priv->pm_rps_events); 358 dev_priv->pm_rps_events);
@@ -367,32 +363,13 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
367 363
368u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) 364u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
369{ 365{
370 /* 366 return (mask & ~dev_priv->rps.pm_intr_keep);
371 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
372 * if GEN6_PM_UP_EI_EXPIRED is masked.
373 *
374 * TODO: verify if this can be reproduced on VLV,CHV.
375 */
376 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
377 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
378
379 if (INTEL_INFO(dev_priv)->gen >= 8)
380 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
381
382 return mask;
383} 367}
384 368
385void gen6_disable_rps_interrupts(struct drm_device *dev) 369void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
386{ 370{
387 struct drm_i915_private *dev_priv = dev->dev_private;
388
389 spin_lock_irq(&dev_priv->irq_lock); 371 spin_lock_irq(&dev_priv->irq_lock);
390 dev_priv->rps.interrupts_enabled = false; 372 dev_priv->rps.interrupts_enabled = false;
391 spin_unlock_irq(&dev_priv->irq_lock);
392
393 cancel_work_sync(&dev_priv->rps.work);
394
395 spin_lock_irq(&dev_priv->irq_lock);
396 373
397 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); 374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
398 375
@@ -401,8 +378,15 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
401 ~dev_priv->pm_rps_events); 378 ~dev_priv->pm_rps_events);
402 379
403 spin_unlock_irq(&dev_priv->irq_lock); 380 spin_unlock_irq(&dev_priv->irq_lock);
381 synchronize_irq(dev_priv->drm.irq);
404 382
405 synchronize_irq(dev->irq); 383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
387 */
388 cancel_work_sync(&dev_priv->rps.work);
389 gen6_reset_rps_interrupts(dev_priv);
406} 390}
407 391
408/** 392/**
@@ -582,7 +566,7 @@ i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
582 u32 enable_mask; 566 u32 enable_mask;
583 567
584 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
585 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
586 status_mask); 570 status_mask);
587 else 571 else
588 enable_mask = status_mask << 16; 572 enable_mask = status_mask << 16;
@@ -596,7 +580,7 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
596 u32 enable_mask; 580 u32 enable_mask;
597 581
598 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
599 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, 583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
600 status_mask); 584 status_mask);
601 else 585 else
602 enable_mask = status_mask << 16; 586 enable_mask = status_mask << 16;
@@ -605,19 +589,17 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
605 589
606/** 590/**
607 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
608 * @dev: drm device 592 * @dev_priv: i915 device private
609 */ 593 */
610static void i915_enable_asle_pipestat(struct drm_device *dev) 594static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
611{ 595{
612 struct drm_i915_private *dev_priv = dev->dev_private; 596 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
613
614 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
615 return; 597 return;
616 598
617 spin_lock_irq(&dev_priv->irq_lock); 599 spin_lock_irq(&dev_priv->irq_lock);
618 600
619 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 601 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
620 if (INTEL_INFO(dev)->gen >= 4) 602 if (INTEL_GEN(dev_priv) >= 4)
621 i915_enable_pipestat(dev_priv, PIPE_A, 603 i915_enable_pipestat(dev_priv, PIPE_A,
622 PIPE_LEGACY_BLC_EVENT_STATUS); 604 PIPE_LEGACY_BLC_EVENT_STATUS);
623 605
@@ -685,7 +667,7 @@ static u32 i8xx_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
685 */ 667 */
686static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 668static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
687{ 669{
688 struct drm_i915_private *dev_priv = dev->dev_private; 670 struct drm_i915_private *dev_priv = to_i915(dev);
689 i915_reg_t high_frame, low_frame; 671 i915_reg_t high_frame, low_frame;
690 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; 672 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
691 struct intel_crtc *intel_crtc = 673 struct intel_crtc *intel_crtc =
@@ -732,7 +714,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
732 714
733static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe) 715static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
734{ 716{
735 struct drm_i915_private *dev_priv = dev->dev_private; 717 struct drm_i915_private *dev_priv = to_i915(dev);
736 718
737 return I915_READ(PIPE_FRMCOUNT_G4X(pipe)); 719 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
738} 720}
@@ -741,7 +723,7 @@ static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
741static int __intel_get_crtc_scanline(struct intel_crtc *crtc) 723static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
742{ 724{
743 struct drm_device *dev = crtc->base.dev; 725 struct drm_device *dev = crtc->base.dev;
744 struct drm_i915_private *dev_priv = dev->dev_private; 726 struct drm_i915_private *dev_priv = to_i915(dev);
745 const struct drm_display_mode *mode = &crtc->base.hwmode; 727 const struct drm_display_mode *mode = &crtc->base.hwmode;
746 enum pipe pipe = crtc->pipe; 728 enum pipe pipe = crtc->pipe;
747 int position, vtotal; 729 int position, vtotal;
@@ -750,7 +732,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
750 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 732 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
751 vtotal /= 2; 733 vtotal /= 2;
752 734
753 if (IS_GEN2(dev)) 735 if (IS_GEN2(dev_priv))
754 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; 736 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
755 else 737 else
756 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; 738 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
@@ -767,7 +749,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
767 * problem. We may need to extend this to include other platforms, 749 * problem. We may need to extend this to include other platforms,
768 * but so far testing only shows the problem on HSW. 750 * but so far testing only shows the problem on HSW.
769 */ 751 */
770 if (HAS_DDI(dev) && !position) { 752 if (HAS_DDI(dev_priv) && !position) {
771 int i, temp; 753 int i, temp;
772 754
773 for (i = 0; i < 100; i++) { 755 for (i = 0; i < 100; i++) {
@@ -793,7 +775,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
793 ktime_t *stime, ktime_t *etime, 775 ktime_t *stime, ktime_t *etime,
794 const struct drm_display_mode *mode) 776 const struct drm_display_mode *mode)
795{ 777{
796 struct drm_i915_private *dev_priv = dev->dev_private; 778 struct drm_i915_private *dev_priv = to_i915(dev);
797 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 779 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
798 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 780 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
799 int position; 781 int position;
@@ -835,7 +817,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
835 if (stime) 817 if (stime)
836 *stime = ktime_get(); 818 *stime = ktime_get();
837 819
838 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 820 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
839 /* No obvious pixelcount register. Only query vertical 821 /* No obvious pixelcount register. Only query vertical
840 * scanout position from Display scan line register. 822 * scanout position from Display scan line register.
841 */ 823 */
@@ -897,7 +879,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
897 else 879 else
898 position += vtotal - vbl_end; 880 position += vtotal - vbl_end;
899 881
900 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 882 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
901 *vpos = position; 883 *vpos = position;
902 *hpos = 0; 884 *hpos = 0;
903 } else { 885 } else {
@@ -914,7 +896,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
914 896
915int intel_get_crtc_scanline(struct intel_crtc *crtc) 897int intel_get_crtc_scanline(struct intel_crtc *crtc)
916{ 898{
917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 899 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
918 unsigned long irqflags; 900 unsigned long irqflags;
919 int position; 901 int position;
920 902
@@ -955,9 +937,8 @@ static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
955 &crtc->hwmode); 937 &crtc->hwmode);
956} 938}
957 939
958static void ironlake_rps_change_irq_handler(struct drm_device *dev) 940static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
959{ 941{
960 struct drm_i915_private *dev_priv = dev->dev_private;
961 u32 busy_up, busy_down, max_avg, min_avg; 942 u32 busy_up, busy_down, max_avg, min_avg;
962 u8 new_delay; 943 u8 new_delay;
963 944
@@ -986,7 +967,7 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
986 new_delay = dev_priv->ips.min_delay; 967 new_delay = dev_priv->ips.min_delay;
987 } 968 }
988 969
989 if (ironlake_set_drps(dev, new_delay)) 970 if (ironlake_set_drps(dev_priv, new_delay))
990 dev_priv->ips.cur_delay = new_delay; 971 dev_priv->ips.cur_delay = new_delay;
991 972
992 spin_unlock(&mchdev_lock); 973 spin_unlock(&mchdev_lock);
@@ -996,13 +977,11 @@ static void ironlake_rps_change_irq_handler(struct drm_device *dev)
996 977
997static void notify_ring(struct intel_engine_cs *engine) 978static void notify_ring(struct intel_engine_cs *engine)
998{ 979{
999 if (!intel_engine_initialized(engine)) 980 smp_store_mb(engine->breadcrumbs.irq_posted, true);
1000 return; 981 if (intel_engine_wakeup(engine)) {
1001 982 trace_i915_gem_request_notify(engine);
1002 trace_i915_gem_request_notify(engine); 983 engine->breadcrumbs.irq_wakeups++;
1003 engine->user_interrupts++; 984 }
1004
1005 wake_up_all(&engine->irq_queue);
1006} 985}
1007 986
1008static void vlv_c0_read(struct drm_i915_private *dev_priv, 987static void vlv_c0_read(struct drm_i915_private *dev_priv,
@@ -1083,7 +1062,7 @@ static bool any_waiters(struct drm_i915_private *dev_priv)
1083 struct intel_engine_cs *engine; 1062 struct intel_engine_cs *engine;
1084 1063
1085 for_each_engine(engine, dev_priv) 1064 for_each_engine(engine, dev_priv)
1086 if (engine->irq_refcount) 1065 if (intel_engine_has_waiter(engine))
1087 return true; 1066 return true;
1088 1067
1089 return false; 1068 return false;
@@ -1104,13 +1083,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
1104 return; 1083 return;
1105 } 1084 }
1106 1085
1107 /*
1108 * The RPS work is synced during runtime suspend, we don't require a
1109 * wakeref. TODO: instead of disabling the asserts make sure that we
1110 * always hold an RPM reference while the work is running.
1111 */
1112 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1113
1114 pm_iir = dev_priv->rps.pm_iir; 1086 pm_iir = dev_priv->rps.pm_iir;
1115 dev_priv->rps.pm_iir = 0; 1087 dev_priv->rps.pm_iir = 0;
1116 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ 1088 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
@@ -1123,7 +1095,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
1123 WARN_ON(pm_iir & ~dev_priv->pm_rps_events); 1095 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1124 1096
1125 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) 1097 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1126 goto out; 1098 return;
1127 1099
1128 mutex_lock(&dev_priv->rps.hw_lock); 1100 mutex_lock(&dev_priv->rps.hw_lock);
1129 1101
@@ -1175,11 +1147,9 @@ static void gen6_pm_rps_work(struct work_struct *work)
1175 new_delay += adj; 1147 new_delay += adj;
1176 new_delay = clamp_t(int, new_delay, min, max); 1148 new_delay = clamp_t(int, new_delay, min, max);
1177 1149
1178 intel_set_rps(dev_priv->dev, new_delay); 1150 intel_set_rps(dev_priv, new_delay);
1179 1151
1180 mutex_unlock(&dev_priv->rps.hw_lock); 1152 mutex_unlock(&dev_priv->rps.hw_lock);
1181out:
1182 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
1183} 1153}
1184 1154
1185 1155
@@ -1205,7 +1175,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1205 * In order to prevent a get/put style interface, acquire struct mutex 1175 * In order to prevent a get/put style interface, acquire struct mutex
1206 * any time we access those registers. 1176 * any time we access those registers.
1207 */ 1177 */
1208 mutex_lock(&dev_priv->dev->struct_mutex); 1178 mutex_lock(&dev_priv->drm.struct_mutex);
1209 1179
1210 /* If we've screwed up tracking, just let the interrupt fire again */ 1180 /* If we've screwed up tracking, just let the interrupt fire again */
1211 if (WARN_ON(!dev_priv->l3_parity.which_slice)) 1181 if (WARN_ON(!dev_priv->l3_parity.which_slice))
@@ -1241,7 +1211,7 @@ static void ivybridge_parity_work(struct work_struct *work)
1241 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); 1211 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1242 parity_event[5] = NULL; 1212 parity_event[5] = NULL;
1243 1213
1244 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, 1214 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1245 KOBJ_CHANGE, parity_event); 1215 KOBJ_CHANGE, parity_event);
1246 1216
1247 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", 1217 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
@@ -1261,7 +1231,7 @@ out:
1261 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv)); 1231 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1262 spin_unlock_irq(&dev_priv->irq_lock); 1232 spin_unlock_irq(&dev_priv->irq_lock);
1263 1233
1264 mutex_unlock(&dev_priv->dev->struct_mutex); 1234 mutex_unlock(&dev_priv->drm.struct_mutex);
1265} 1235}
1266 1236
1267static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv, 1237static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
@@ -1287,8 +1257,7 @@ static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv
1287static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv, 1257static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1288 u32 gt_iir) 1258 u32 gt_iir)
1289{ 1259{
1290 if (gt_iir & 1260 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1291 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1292 notify_ring(&dev_priv->engine[RCS]); 1261 notify_ring(&dev_priv->engine[RCS]);
1293 if (gt_iir & ILK_BSD_USER_INTERRUPT) 1262 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1294 notify_ring(&dev_priv->engine[VCS]); 1263 notify_ring(&dev_priv->engine[VCS]);
@@ -1297,9 +1266,7 @@ static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1297static void snb_gt_irq_handler(struct drm_i915_private *dev_priv, 1266static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1298 u32 gt_iir) 1267 u32 gt_iir)
1299{ 1268{
1300 1269 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1301 if (gt_iir &
1302 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1303 notify_ring(&dev_priv->engine[RCS]); 1270 notify_ring(&dev_priv->engine[RCS]);
1304 if (gt_iir & GT_BSD_USER_INTERRUPT) 1271 if (gt_iir & GT_BSD_USER_INTERRUPT)
1305 notify_ring(&dev_priv->engine[VCS]); 1272 notify_ring(&dev_priv->engine[VCS]);
@@ -1506,27 +1473,23 @@ static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1506 1473
1507} 1474}
1508 1475
1509static void gmbus_irq_handler(struct drm_device *dev) 1476static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1510{ 1477{
1511 struct drm_i915_private *dev_priv = dev->dev_private;
1512
1513 wake_up_all(&dev_priv->gmbus_wait_queue); 1478 wake_up_all(&dev_priv->gmbus_wait_queue);
1514} 1479}
1515 1480
1516static void dp_aux_irq_handler(struct drm_device *dev) 1481static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1517{ 1482{
1518 struct drm_i915_private *dev_priv = dev->dev_private;
1519
1520 wake_up_all(&dev_priv->gmbus_wait_queue); 1483 wake_up_all(&dev_priv->gmbus_wait_queue);
1521} 1484}
1522 1485
1523#if defined(CONFIG_DEBUG_FS) 1486#if defined(CONFIG_DEBUG_FS)
1524static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1487static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1488 enum pipe pipe,
1525 uint32_t crc0, uint32_t crc1, 1489 uint32_t crc0, uint32_t crc1,
1526 uint32_t crc2, uint32_t crc3, 1490 uint32_t crc2, uint32_t crc3,
1527 uint32_t crc4) 1491 uint32_t crc4)
1528{ 1492{
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 1493 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1531 struct intel_pipe_crc_entry *entry; 1494 struct intel_pipe_crc_entry *entry;
1532 int head, tail; 1495 int head, tail;
@@ -1550,7 +1513,8 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1550 1513
1551 entry = &pipe_crc->entries[head]; 1514 entry = &pipe_crc->entries[head];
1552 1515
1553 entry->frame = dev->driver->get_vblank_counter(dev, pipe); 1516 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1517 pipe);
1554 entry->crc[0] = crc0; 1518 entry->crc[0] = crc0;
1555 entry->crc[1] = crc1; 1519 entry->crc[1] = crc1;
1556 entry->crc[2] = crc2; 1520 entry->crc[2] = crc2;
@@ -1566,27 +1530,26 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1566} 1530}
1567#else 1531#else
1568static inline void 1532static inline void
1569display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, 1533display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1534 enum pipe pipe,
1570 uint32_t crc0, uint32_t crc1, 1535 uint32_t crc0, uint32_t crc1,
1571 uint32_t crc2, uint32_t crc3, 1536 uint32_t crc2, uint32_t crc3,
1572 uint32_t crc4) {} 1537 uint32_t crc4) {}
1573#endif 1538#endif
1574 1539
1575 1540
1576static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1541static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1542 enum pipe pipe)
1577{ 1543{
1578 struct drm_i915_private *dev_priv = dev->dev_private; 1544 display_pipe_crc_irq_handler(dev_priv, pipe,
1579
1580 display_pipe_crc_irq_handler(dev, pipe,
1581 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1545 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1582 0, 0, 0, 0); 1546 0, 0, 0, 0);
1583} 1547}
1584 1548
1585static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1549static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1550 enum pipe pipe)
1586{ 1551{
1587 struct drm_i915_private *dev_priv = dev->dev_private; 1552 display_pipe_crc_irq_handler(dev_priv, pipe,
1588
1589 display_pipe_crc_irq_handler(dev, pipe,
1590 I915_READ(PIPE_CRC_RES_1_IVB(pipe)), 1553 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1591 I915_READ(PIPE_CRC_RES_2_IVB(pipe)), 1554 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1592 I915_READ(PIPE_CRC_RES_3_IVB(pipe)), 1555 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
@@ -1594,22 +1557,22 @@ static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1594 I915_READ(PIPE_CRC_RES_5_IVB(pipe))); 1557 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1595} 1558}
1596 1559
1597static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) 1560static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1561 enum pipe pipe)
1598{ 1562{
1599 struct drm_i915_private *dev_priv = dev->dev_private;
1600 uint32_t res1, res2; 1563 uint32_t res1, res2;
1601 1564
1602 if (INTEL_INFO(dev)->gen >= 3) 1565 if (INTEL_GEN(dev_priv) >= 3)
1603 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); 1566 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1604 else 1567 else
1605 res1 = 0; 1568 res1 = 0;
1606 1569
1607 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 1570 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1608 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); 1571 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1609 else 1572 else
1610 res2 = 0; 1573 res2 = 0;
1611 1574
1612 display_pipe_crc_irq_handler(dev, pipe, 1575 display_pipe_crc_irq_handler(dev_priv, pipe,
1613 I915_READ(PIPE_CRC_RES_RED(pipe)), 1576 I915_READ(PIPE_CRC_RES_RED(pipe)),
1614 I915_READ(PIPE_CRC_RES_GREEN(pipe)), 1577 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1615 I915_READ(PIPE_CRC_RES_BLUE(pipe)), 1578 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
@@ -1626,7 +1589,7 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1626 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1589 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1627 if (dev_priv->rps.interrupts_enabled) { 1590 if (dev_priv->rps.interrupts_enabled) {
1628 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; 1591 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1629 queue_work(dev_priv->wq, &dev_priv->rps.work); 1592 schedule_work(&dev_priv->rps.work);
1630 } 1593 }
1631 spin_unlock(&dev_priv->irq_lock); 1594 spin_unlock(&dev_priv->irq_lock);
1632 } 1595 }
@@ -1643,18 +1606,21 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1643 } 1606 }
1644} 1607}
1645 1608
1646static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) 1609static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1610 enum pipe pipe)
1647{ 1611{
1648 if (!drm_handle_vblank(dev, pipe)) 1612 bool ret;
1649 return false;
1650 1613
1651 return true; 1614 ret = drm_handle_vblank(&dev_priv->drm, pipe);
1615 if (ret)
1616 intel_finish_page_flip_mmio(dev_priv, pipe);
1617
1618 return ret;
1652} 1619}
1653 1620
1654static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir, 1621static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1655 u32 pipe_stats[I915_MAX_PIPES]) 1622 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1656{ 1623{
1657 struct drm_i915_private *dev_priv = dev->dev_private;
1658 int pipe; 1624 int pipe;
1659 1625
1660 spin_lock(&dev_priv->irq_lock); 1626 spin_lock(&dev_priv->irq_lock);
@@ -1710,31 +1676,28 @@ static void valleyview_pipestat_irq_ack(struct drm_device *dev, u32 iir,
1710 spin_unlock(&dev_priv->irq_lock); 1676 spin_unlock(&dev_priv->irq_lock);
1711} 1677}
1712 1678
1713static void valleyview_pipestat_irq_handler(struct drm_device *dev, 1679static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1714 u32 pipe_stats[I915_MAX_PIPES]) 1680 u32 pipe_stats[I915_MAX_PIPES])
1715{ 1681{
1716 struct drm_i915_private *dev_priv = to_i915(dev);
1717 enum pipe pipe; 1682 enum pipe pipe;
1718 1683
1719 for_each_pipe(dev_priv, pipe) { 1684 for_each_pipe(dev_priv, pipe) {
1720 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 1685 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1721 intel_pipe_handle_vblank(dev, pipe)) 1686 intel_pipe_handle_vblank(dev_priv, pipe))
1722 intel_check_page_flip(dev, pipe); 1687 intel_check_page_flip(dev_priv, pipe);
1723 1688
1724 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { 1689 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1725 intel_prepare_page_flip(dev, pipe); 1690 intel_finish_page_flip_cs(dev_priv, pipe);
1726 intel_finish_page_flip(dev, pipe);
1727 }
1728 1691
1729 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1692 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1730 i9xx_pipe_crc_irq_handler(dev, pipe); 1693 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1731 1694
1732 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 1695 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1733 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1696 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1734 } 1697 }
1735 1698
1736 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1699 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1737 gmbus_irq_handler(dev); 1700 gmbus_irq_handler(dev_priv);
1738} 1701}
1739 1702
1740static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv) 1703static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
@@ -1747,12 +1710,13 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1747 return hotplug_status; 1710 return hotplug_status;
1748} 1711}
1749 1712
1750static void i9xx_hpd_irq_handler(struct drm_device *dev, 1713static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1751 u32 hotplug_status) 1714 u32 hotplug_status)
1752{ 1715{
1753 u32 pin_mask = 0, long_mask = 0; 1716 u32 pin_mask = 0, long_mask = 0;
1754 1717
1755 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1718 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1719 IS_CHERRYVIEW(dev_priv)) {
1756 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; 1720 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1757 1721
1758 if (hotplug_trigger) { 1722 if (hotplug_trigger) {
@@ -1760,11 +1724,11 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1760 hotplug_trigger, hpd_status_g4x, 1724 hotplug_trigger, hpd_status_g4x,
1761 i9xx_port_hotplug_long_detect); 1725 i9xx_port_hotplug_long_detect);
1762 1726
1763 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1727 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1764 } 1728 }
1765 1729
1766 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) 1730 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1767 dp_aux_irq_handler(dev); 1731 dp_aux_irq_handler(dev_priv);
1768 } else { 1732 } else {
1769 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; 1733 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1770 1734
@@ -1772,7 +1736,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1772 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger, 1736 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1773 hotplug_trigger, hpd_status_i915, 1737 hotplug_trigger, hpd_status_i915,
1774 i9xx_port_hotplug_long_detect); 1738 i9xx_port_hotplug_long_detect);
1775 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1739 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1776 } 1740 }
1777 } 1741 }
1778} 1742}
@@ -1780,7 +1744,7 @@ static void i9xx_hpd_irq_handler(struct drm_device *dev,
1780static irqreturn_t valleyview_irq_handler(int irq, void *arg) 1744static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1781{ 1745{
1782 struct drm_device *dev = arg; 1746 struct drm_device *dev = arg;
1783 struct drm_i915_private *dev_priv = dev->dev_private; 1747 struct drm_i915_private *dev_priv = to_i915(dev);
1784 irqreturn_t ret = IRQ_NONE; 1748 irqreturn_t ret = IRQ_NONE;
1785 1749
1786 if (!intel_irqs_enabled(dev_priv)) 1750 if (!intel_irqs_enabled(dev_priv))
@@ -1831,7 +1795,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1831 1795
1832 /* Call regardless, as some status bits might not be 1796 /* Call regardless, as some status bits might not be
1833 * signalled in iir */ 1797 * signalled in iir */
1834 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1798 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1835 1799
1836 /* 1800 /*
1837 * VLV_IIR is single buffered, and reflects the level 1801 * VLV_IIR is single buffered, and reflects the level
@@ -1850,9 +1814,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1850 gen6_rps_irq_handler(dev_priv, pm_iir); 1814 gen6_rps_irq_handler(dev_priv, pm_iir);
1851 1815
1852 if (hotplug_status) 1816 if (hotplug_status)
1853 i9xx_hpd_irq_handler(dev, hotplug_status); 1817 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1854 1818
1855 valleyview_pipestat_irq_handler(dev, pipe_stats); 1819 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1856 } while (0); 1820 } while (0);
1857 1821
1858 enable_rpm_wakeref_asserts(dev_priv); 1822 enable_rpm_wakeref_asserts(dev_priv);
@@ -1863,7 +1827,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1863static irqreturn_t cherryview_irq_handler(int irq, void *arg) 1827static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1864{ 1828{
1865 struct drm_device *dev = arg; 1829 struct drm_device *dev = arg;
1866 struct drm_i915_private *dev_priv = dev->dev_private; 1830 struct drm_i915_private *dev_priv = to_i915(dev);
1867 irqreturn_t ret = IRQ_NONE; 1831 irqreturn_t ret = IRQ_NONE;
1868 1832
1869 if (!intel_irqs_enabled(dev_priv)) 1833 if (!intel_irqs_enabled(dev_priv))
@@ -1911,7 +1875,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1911 1875
1912 /* Call regardless, as some status bits might not be 1876 /* Call regardless, as some status bits might not be
1913 * signalled in iir */ 1877 * signalled in iir */
1914 valleyview_pipestat_irq_ack(dev, iir, pipe_stats); 1878 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1915 1879
1916 /* 1880 /*
1917 * VLV_IIR is single buffered, and reflects the level 1881 * VLV_IIR is single buffered, and reflects the level
@@ -1927,9 +1891,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1927 gen8_gt_irq_handler(dev_priv, gt_iir); 1891 gen8_gt_irq_handler(dev_priv, gt_iir);
1928 1892
1929 if (hotplug_status) 1893 if (hotplug_status)
1930 i9xx_hpd_irq_handler(dev, hotplug_status); 1894 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1931 1895
1932 valleyview_pipestat_irq_handler(dev, pipe_stats); 1896 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1933 } while (0); 1897 } while (0);
1934 1898
1935 enable_rpm_wakeref_asserts(dev_priv); 1899 enable_rpm_wakeref_asserts(dev_priv);
@@ -1937,10 +1901,10 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1937 return ret; 1901 return ret;
1938} 1902}
1939 1903
1940static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 1904static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1905 u32 hotplug_trigger,
1941 const u32 hpd[HPD_NUM_PINS]) 1906 const u32 hpd[HPD_NUM_PINS])
1942{ 1907{
1943 struct drm_i915_private *dev_priv = to_i915(dev);
1944 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 1908 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1945 1909
1946 /* 1910 /*
@@ -1966,16 +1930,15 @@ static void ibx_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
1966 dig_hotplug_reg, hpd, 1930 dig_hotplug_reg, hpd,
1967 pch_port_hotplug_long_detect); 1931 pch_port_hotplug_long_detect);
1968 1932
1969 intel_hpd_irq_handler(dev, pin_mask, long_mask); 1933 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1970} 1934}
1971 1935
1972static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) 1936static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1973{ 1937{
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 int pipe; 1938 int pipe;
1976 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; 1939 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1977 1940
1978 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); 1941 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1979 1942
1980 if (pch_iir & SDE_AUDIO_POWER_MASK) { 1943 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1981 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> 1944 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
@@ -1985,10 +1948,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1985 } 1948 }
1986 1949
1987 if (pch_iir & SDE_AUX_MASK) 1950 if (pch_iir & SDE_AUX_MASK)
1988 dp_aux_irq_handler(dev); 1951 dp_aux_irq_handler(dev_priv);
1989 1952
1990 if (pch_iir & SDE_GMBUS) 1953 if (pch_iir & SDE_GMBUS)
1991 gmbus_irq_handler(dev); 1954 gmbus_irq_handler(dev_priv);
1992 1955
1993 if (pch_iir & SDE_AUDIO_HDCP_MASK) 1956 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1994 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); 1957 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
@@ -2018,9 +1981,8 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2018 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); 1981 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2019} 1982}
2020 1983
2021static void ivb_err_int_handler(struct drm_device *dev) 1984static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
2022{ 1985{
2023 struct drm_i915_private *dev_priv = dev->dev_private;
2024 u32 err_int = I915_READ(GEN7_ERR_INT); 1986 u32 err_int = I915_READ(GEN7_ERR_INT);
2025 enum pipe pipe; 1987 enum pipe pipe;
2026 1988
@@ -2032,19 +1994,18 @@ static void ivb_err_int_handler(struct drm_device *dev)
2032 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 1994 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2033 1995
2034 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 1996 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2035 if (IS_IVYBRIDGE(dev)) 1997 if (IS_IVYBRIDGE(dev_priv))
2036 ivb_pipe_crc_irq_handler(dev, pipe); 1998 ivb_pipe_crc_irq_handler(dev_priv, pipe);
2037 else 1999 else
2038 hsw_pipe_crc_irq_handler(dev, pipe); 2000 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2039 } 2001 }
2040 } 2002 }
2041 2003
2042 I915_WRITE(GEN7_ERR_INT, err_int); 2004 I915_WRITE(GEN7_ERR_INT, err_int);
2043} 2005}
2044 2006
2045static void cpt_serr_int_handler(struct drm_device *dev) 2007static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2046{ 2008{
2047 struct drm_i915_private *dev_priv = dev->dev_private;
2048 u32 serr_int = I915_READ(SERR_INT); 2009 u32 serr_int = I915_READ(SERR_INT);
2049 2010
2050 if (serr_int & SERR_INT_POISON) 2011 if (serr_int & SERR_INT_POISON)
@@ -2062,13 +2023,12 @@ static void cpt_serr_int_handler(struct drm_device *dev)
2062 I915_WRITE(SERR_INT, serr_int); 2023 I915_WRITE(SERR_INT, serr_int);
2063} 2024}
2064 2025
2065static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) 2026static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2066{ 2027{
2067 struct drm_i915_private *dev_priv = dev->dev_private;
2068 int pipe; 2028 int pipe;
2069 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; 2029 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2070 2030
2071 ibx_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); 2031 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2072 2032
2073 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { 2033 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2074 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> 2034 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
@@ -2078,10 +2038,10 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2078 } 2038 }
2079 2039
2080 if (pch_iir & SDE_AUX_MASK_CPT) 2040 if (pch_iir & SDE_AUX_MASK_CPT)
2081 dp_aux_irq_handler(dev); 2041 dp_aux_irq_handler(dev_priv);
2082 2042
2083 if (pch_iir & SDE_GMBUS_CPT) 2043 if (pch_iir & SDE_GMBUS_CPT)
2084 gmbus_irq_handler(dev); 2044 gmbus_irq_handler(dev_priv);
2085 2045
2086 if (pch_iir & SDE_AUDIO_CP_REQ_CPT) 2046 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2087 DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); 2047 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
@@ -2096,12 +2056,11 @@ static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2096 I915_READ(FDI_RX_IIR(pipe))); 2056 I915_READ(FDI_RX_IIR(pipe)));
2097 2057
2098 if (pch_iir & SDE_ERROR_CPT) 2058 if (pch_iir & SDE_ERROR_CPT)
2099 cpt_serr_int_handler(dev); 2059 cpt_serr_int_handler(dev_priv);
2100} 2060}
2101 2061
2102static void spt_irq_handler(struct drm_device *dev, u32 pch_iir) 2062static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2103{ 2063{
2104 struct drm_i915_private *dev_priv = dev->dev_private;
2105 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT & 2064 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2106 ~SDE_PORTE_HOTPLUG_SPT; 2065 ~SDE_PORTE_HOTPLUG_SPT;
2107 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT; 2066 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
@@ -2130,16 +2089,16 @@ static void spt_irq_handler(struct drm_device *dev, u32 pch_iir)
2130 } 2089 }
2131 2090
2132 if (pin_mask) 2091 if (pin_mask)
2133 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2092 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2134 2093
2135 if (pch_iir & SDE_GMBUS_CPT) 2094 if (pch_iir & SDE_GMBUS_CPT)
2136 gmbus_irq_handler(dev); 2095 gmbus_irq_handler(dev_priv);
2137} 2096}
2138 2097
2139static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2098static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2099 u32 hotplug_trigger,
2140 const u32 hpd[HPD_NUM_PINS]) 2100 const u32 hpd[HPD_NUM_PINS])
2141{ 2101{
2142 struct drm_i915_private *dev_priv = to_i915(dev);
2143 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2102 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2144 2103
2145 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL); 2104 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
@@ -2149,97 +2108,93 @@ static void ilk_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2149 dig_hotplug_reg, hpd, 2108 dig_hotplug_reg, hpd,
2150 ilk_port_hotplug_long_detect); 2109 ilk_port_hotplug_long_detect);
2151 2110
2152 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2111 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2153} 2112}
2154 2113
2155static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) 2114static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2115 u32 de_iir)
2156{ 2116{
2157 struct drm_i915_private *dev_priv = dev->dev_private;
2158 enum pipe pipe; 2117 enum pipe pipe;
2159 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG; 2118 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2160 2119
2161 if (hotplug_trigger) 2120 if (hotplug_trigger)
2162 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ilk); 2121 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2163 2122
2164 if (de_iir & DE_AUX_CHANNEL_A) 2123 if (de_iir & DE_AUX_CHANNEL_A)
2165 dp_aux_irq_handler(dev); 2124 dp_aux_irq_handler(dev_priv);
2166 2125
2167 if (de_iir & DE_GSE) 2126 if (de_iir & DE_GSE)
2168 intel_opregion_asle_intr(dev); 2127 intel_opregion_asle_intr(dev_priv);
2169 2128
2170 if (de_iir & DE_POISON) 2129 if (de_iir & DE_POISON)
2171 DRM_ERROR("Poison interrupt\n"); 2130 DRM_ERROR("Poison interrupt\n");
2172 2131
2173 for_each_pipe(dev_priv, pipe) { 2132 for_each_pipe(dev_priv, pipe) {
2174 if (de_iir & DE_PIPE_VBLANK(pipe) && 2133 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2175 intel_pipe_handle_vblank(dev, pipe)) 2134 intel_pipe_handle_vblank(dev_priv, pipe))
2176 intel_check_page_flip(dev, pipe); 2135 intel_check_page_flip(dev_priv, pipe);
2177 2136
2178 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2137 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2179 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2138 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2180 2139
2181 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2140 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2182 i9xx_pipe_crc_irq_handler(dev, pipe); 2141 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2183 2142
2184 /* plane/pipes map 1:1 on ilk+ */ 2143 /* plane/pipes map 1:1 on ilk+ */
2185 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { 2144 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2186 intel_prepare_page_flip(dev, pipe); 2145 intel_finish_page_flip_cs(dev_priv, pipe);
2187 intel_finish_page_flip_plane(dev, pipe);
2188 }
2189 } 2146 }
2190 2147
2191 /* check event from PCH */ 2148 /* check event from PCH */
2192 if (de_iir & DE_PCH_EVENT) { 2149 if (de_iir & DE_PCH_EVENT) {
2193 u32 pch_iir = I915_READ(SDEIIR); 2150 u32 pch_iir = I915_READ(SDEIIR);
2194 2151
2195 if (HAS_PCH_CPT(dev)) 2152 if (HAS_PCH_CPT(dev_priv))
2196 cpt_irq_handler(dev, pch_iir); 2153 cpt_irq_handler(dev_priv, pch_iir);
2197 else 2154 else
2198 ibx_irq_handler(dev, pch_iir); 2155 ibx_irq_handler(dev_priv, pch_iir);
2199 2156
2200 /* should clear PCH hotplug event before clear CPU irq */ 2157 /* should clear PCH hotplug event before clear CPU irq */
2201 I915_WRITE(SDEIIR, pch_iir); 2158 I915_WRITE(SDEIIR, pch_iir);
2202 } 2159 }
2203 2160
2204 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) 2161 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2205 ironlake_rps_change_irq_handler(dev); 2162 ironlake_rps_change_irq_handler(dev_priv);
2206} 2163}
2207 2164
2208static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) 2165static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2166 u32 de_iir)
2209{ 2167{
2210 struct drm_i915_private *dev_priv = dev->dev_private;
2211 enum pipe pipe; 2168 enum pipe pipe;
2212 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB; 2169 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2213 2170
2214 if (hotplug_trigger) 2171 if (hotplug_trigger)
2215 ilk_hpd_irq_handler(dev, hotplug_trigger, hpd_ivb); 2172 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2216 2173
2217 if (de_iir & DE_ERR_INT_IVB) 2174 if (de_iir & DE_ERR_INT_IVB)
2218 ivb_err_int_handler(dev); 2175 ivb_err_int_handler(dev_priv);
2219 2176
2220 if (de_iir & DE_AUX_CHANNEL_A_IVB) 2177 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2221 dp_aux_irq_handler(dev); 2178 dp_aux_irq_handler(dev_priv);
2222 2179
2223 if (de_iir & DE_GSE_IVB) 2180 if (de_iir & DE_GSE_IVB)
2224 intel_opregion_asle_intr(dev); 2181 intel_opregion_asle_intr(dev_priv);
2225 2182
2226 for_each_pipe(dev_priv, pipe) { 2183 for_each_pipe(dev_priv, pipe) {
2227 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && 2184 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2228 intel_pipe_handle_vblank(dev, pipe)) 2185 intel_pipe_handle_vblank(dev_priv, pipe))
2229 intel_check_page_flip(dev, pipe); 2186 intel_check_page_flip(dev_priv, pipe);
2230 2187
2231 /* plane/pipes map 1:1 on ilk+ */ 2188 /* plane/pipes map 1:1 on ilk+ */
2232 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { 2189 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2233 intel_prepare_page_flip(dev, pipe); 2190 intel_finish_page_flip_cs(dev_priv, pipe);
2234 intel_finish_page_flip_plane(dev, pipe);
2235 }
2236 } 2191 }
2237 2192
2238 /* check event from PCH */ 2193 /* check event from PCH */
2239 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { 2194 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2240 u32 pch_iir = I915_READ(SDEIIR); 2195 u32 pch_iir = I915_READ(SDEIIR);
2241 2196
2242 cpt_irq_handler(dev, pch_iir); 2197 cpt_irq_handler(dev_priv, pch_iir);
2243 2198
2244 /* clear PCH hotplug event before clear CPU irq */ 2199 /* clear PCH hotplug event before clear CPU irq */
2245 I915_WRITE(SDEIIR, pch_iir); 2200 I915_WRITE(SDEIIR, pch_iir);
@@ -2257,7 +2212,7 @@ static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2257static irqreturn_t ironlake_irq_handler(int irq, void *arg) 2212static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2258{ 2213{
2259 struct drm_device *dev = arg; 2214 struct drm_device *dev = arg;
2260 struct drm_i915_private *dev_priv = dev->dev_private; 2215 struct drm_i915_private *dev_priv = to_i915(dev);
2261 u32 de_iir, gt_iir, de_ier, sde_ier = 0; 2216 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2262 irqreturn_t ret = IRQ_NONE; 2217 irqreturn_t ret = IRQ_NONE;
2263 2218
@@ -2277,7 +2232,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2277 * able to process them after we restore SDEIER (as soon as we restore 2232 * able to process them after we restore SDEIER (as soon as we restore
2278 * it, we'll get an interrupt if SDEIIR still has something to process 2233 * it, we'll get an interrupt if SDEIIR still has something to process
2279 * due to its back queue). */ 2234 * due to its back queue). */
2280 if (!HAS_PCH_NOP(dev)) { 2235 if (!HAS_PCH_NOP(dev_priv)) {
2281 sde_ier = I915_READ(SDEIER); 2236 sde_ier = I915_READ(SDEIER);
2282 I915_WRITE(SDEIER, 0); 2237 I915_WRITE(SDEIER, 0);
2283 POSTING_READ(SDEIER); 2238 POSTING_READ(SDEIER);
@@ -2289,7 +2244,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2289 if (gt_iir) { 2244 if (gt_iir) {
2290 I915_WRITE(GTIIR, gt_iir); 2245 I915_WRITE(GTIIR, gt_iir);
2291 ret = IRQ_HANDLED; 2246 ret = IRQ_HANDLED;
2292 if (INTEL_INFO(dev)->gen >= 6) 2247 if (INTEL_GEN(dev_priv) >= 6)
2293 snb_gt_irq_handler(dev_priv, gt_iir); 2248 snb_gt_irq_handler(dev_priv, gt_iir);
2294 else 2249 else
2295 ilk_gt_irq_handler(dev_priv, gt_iir); 2250 ilk_gt_irq_handler(dev_priv, gt_iir);
@@ -2299,13 +2254,13 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2299 if (de_iir) { 2254 if (de_iir) {
2300 I915_WRITE(DEIIR, de_iir); 2255 I915_WRITE(DEIIR, de_iir);
2301 ret = IRQ_HANDLED; 2256 ret = IRQ_HANDLED;
2302 if (INTEL_INFO(dev)->gen >= 7) 2257 if (INTEL_GEN(dev_priv) >= 7)
2303 ivb_display_irq_handler(dev, de_iir); 2258 ivb_display_irq_handler(dev_priv, de_iir);
2304 else 2259 else
2305 ilk_display_irq_handler(dev, de_iir); 2260 ilk_display_irq_handler(dev_priv, de_iir);
2306 } 2261 }
2307 2262
2308 if (INTEL_INFO(dev)->gen >= 6) { 2263 if (INTEL_GEN(dev_priv) >= 6) {
2309 u32 pm_iir = I915_READ(GEN6_PMIIR); 2264 u32 pm_iir = I915_READ(GEN6_PMIIR);
2310 if (pm_iir) { 2265 if (pm_iir) {
2311 I915_WRITE(GEN6_PMIIR, pm_iir); 2266 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -2316,7 +2271,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2316 2271
2317 I915_WRITE(DEIER, de_ier); 2272 I915_WRITE(DEIER, de_ier);
2318 POSTING_READ(DEIER); 2273 POSTING_READ(DEIER);
2319 if (!HAS_PCH_NOP(dev)) { 2274 if (!HAS_PCH_NOP(dev_priv)) {
2320 I915_WRITE(SDEIER, sde_ier); 2275 I915_WRITE(SDEIER, sde_ier);
2321 POSTING_READ(SDEIER); 2276 POSTING_READ(SDEIER);
2322 } 2277 }
@@ -2327,10 +2282,10 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2327 return ret; 2282 return ret;
2328} 2283}
2329 2284
2330static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger, 2285static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2286 u32 hotplug_trigger,
2331 const u32 hpd[HPD_NUM_PINS]) 2287 const u32 hpd[HPD_NUM_PINS])
2332{ 2288{
2333 struct drm_i915_private *dev_priv = to_i915(dev);
2334 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0; 2289 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2335 2290
2336 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); 2291 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
@@ -2340,13 +2295,12 @@ static void bxt_hpd_irq_handler(struct drm_device *dev, u32 hotplug_trigger,
2340 dig_hotplug_reg, hpd, 2295 dig_hotplug_reg, hpd,
2341 bxt_port_hotplug_long_detect); 2296 bxt_port_hotplug_long_detect);
2342 2297
2343 intel_hpd_irq_handler(dev, pin_mask, long_mask); 2298 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2344} 2299}
2345 2300
2346static irqreturn_t 2301static irqreturn_t
2347gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) 2302gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2348{ 2303{
2349 struct drm_device *dev = dev_priv->dev;
2350 irqreturn_t ret = IRQ_NONE; 2304 irqreturn_t ret = IRQ_NONE;
2351 u32 iir; 2305 u32 iir;
2352 enum pipe pipe; 2306 enum pipe pipe;
@@ -2357,7 +2311,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2357 I915_WRITE(GEN8_DE_MISC_IIR, iir); 2311 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2358 ret = IRQ_HANDLED; 2312 ret = IRQ_HANDLED;
2359 if (iir & GEN8_DE_MISC_GSE) 2313 if (iir & GEN8_DE_MISC_GSE)
2360 intel_opregion_asle_intr(dev); 2314 intel_opregion_asle_intr(dev_priv);
2361 else 2315 else
2362 DRM_ERROR("Unexpected DE Misc interrupt\n"); 2316 DRM_ERROR("Unexpected DE Misc interrupt\n");
2363 } 2317 }
@@ -2381,26 +2335,28 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2381 GEN9_AUX_CHANNEL_D; 2335 GEN9_AUX_CHANNEL_D;
2382 2336
2383 if (iir & tmp_mask) { 2337 if (iir & tmp_mask) {
2384 dp_aux_irq_handler(dev); 2338 dp_aux_irq_handler(dev_priv);
2385 found = true; 2339 found = true;
2386 } 2340 }
2387 2341
2388 if (IS_BROXTON(dev_priv)) { 2342 if (IS_BROXTON(dev_priv)) {
2389 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK; 2343 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2390 if (tmp_mask) { 2344 if (tmp_mask) {
2391 bxt_hpd_irq_handler(dev, tmp_mask, hpd_bxt); 2345 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2346 hpd_bxt);
2392 found = true; 2347 found = true;
2393 } 2348 }
2394 } else if (IS_BROADWELL(dev_priv)) { 2349 } else if (IS_BROADWELL(dev_priv)) {
2395 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG; 2350 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2396 if (tmp_mask) { 2351 if (tmp_mask) {
2397 ilk_hpd_irq_handler(dev, tmp_mask, hpd_bdw); 2352 ilk_hpd_irq_handler(dev_priv,
2353 tmp_mask, hpd_bdw);
2398 found = true; 2354 found = true;
2399 } 2355 }
2400 } 2356 }
2401 2357
2402 if (IS_BROXTON(dev) && (iir & BXT_DE_PORT_GMBUS)) { 2358 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2403 gmbus_irq_handler(dev); 2359 gmbus_irq_handler(dev_priv);
2404 found = true; 2360 found = true;
2405 } 2361 }
2406 2362
@@ -2427,8 +2383,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2427 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir); 2383 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2428 2384
2429 if (iir & GEN8_PIPE_VBLANK && 2385 if (iir & GEN8_PIPE_VBLANK &&
2430 intel_pipe_handle_vblank(dev, pipe)) 2386 intel_pipe_handle_vblank(dev_priv, pipe))
2431 intel_check_page_flip(dev, pipe); 2387 intel_check_page_flip(dev_priv, pipe);
2432 2388
2433 flip_done = iir; 2389 flip_done = iir;
2434 if (INTEL_INFO(dev_priv)->gen >= 9) 2390 if (INTEL_INFO(dev_priv)->gen >= 9)
@@ -2436,13 +2392,11 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2436 else 2392 else
2437 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE; 2393 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2438 2394
2439 if (flip_done) { 2395 if (flip_done)
2440 intel_prepare_page_flip(dev, pipe); 2396 intel_finish_page_flip_cs(dev_priv, pipe);
2441 intel_finish_page_flip_plane(dev, pipe);
2442 }
2443 2397
2444 if (iir & GEN8_PIPE_CDCLK_CRC_DONE) 2398 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2445 hsw_pipe_crc_irq_handler(dev, pipe); 2399 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2446 2400
2447 if (iir & GEN8_PIPE_FIFO_UNDERRUN) 2401 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2448 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 2402 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2459,7 +2413,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2459 fault_errors); 2413 fault_errors);
2460 } 2414 }
2461 2415
2462 if (HAS_PCH_SPLIT(dev) && !HAS_PCH_NOP(dev) && 2416 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2463 master_ctl & GEN8_DE_PCH_IRQ) { 2417 master_ctl & GEN8_DE_PCH_IRQ) {
2464 /* 2418 /*
2465 * FIXME(BDW): Assume for now that the new interrupt handling 2419 * FIXME(BDW): Assume for now that the new interrupt handling
@@ -2472,9 +2426,9 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2472 ret = IRQ_HANDLED; 2426 ret = IRQ_HANDLED;
2473 2427
2474 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv)) 2428 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2475 spt_irq_handler(dev, iir); 2429 spt_irq_handler(dev_priv, iir);
2476 else 2430 else
2477 cpt_irq_handler(dev, iir); 2431 cpt_irq_handler(dev_priv, iir);
2478 } else { 2432 } else {
2479 /* 2433 /*
2480 * Like on previous PCH there seems to be something 2434 * Like on previous PCH there seems to be something
@@ -2490,7 +2444,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2490static irqreturn_t gen8_irq_handler(int irq, void *arg) 2444static irqreturn_t gen8_irq_handler(int irq, void *arg)
2491{ 2445{
2492 struct drm_device *dev = arg; 2446 struct drm_device *dev = arg;
2493 struct drm_i915_private *dev_priv = dev->dev_private; 2447 struct drm_i915_private *dev_priv = to_i915(dev);
2494 u32 master_ctl; 2448 u32 master_ctl;
2495 u32 gt_iir[4] = {}; 2449 u32 gt_iir[4] = {};
2496 irqreturn_t ret; 2450 irqreturn_t ret;
@@ -2521,11 +2475,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2521 return ret; 2475 return ret;
2522} 2476}
2523 2477
2524static void i915_error_wake_up(struct drm_i915_private *dev_priv, 2478static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2525 bool reset_completed)
2526{ 2479{
2527 struct intel_engine_cs *engine;
2528
2529 /* 2480 /*
2530 * Notify all waiters for GPU completion events that reset state has 2481 * Notify all waiters for GPU completion events that reset state has
2531 * been changed, and that they need to restart their wait after 2482 * been changed, and that they need to restart their wait after
@@ -2534,36 +2485,28 @@ static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2534 */ 2485 */
2535 2486
2536 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ 2487 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2537 for_each_engine(engine, dev_priv) 2488 wake_up_all(&dev_priv->gpu_error.wait_queue);
2538 wake_up_all(&engine->irq_queue);
2539 2489
2540 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ 2490 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2541 wake_up_all(&dev_priv->pending_flip_queue); 2491 wake_up_all(&dev_priv->pending_flip_queue);
2542
2543 /*
2544 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2545 * reset state is cleared.
2546 */
2547 if (reset_completed)
2548 wake_up_all(&dev_priv->gpu_error.reset_queue);
2549} 2492}
2550 2493
2551/** 2494/**
2552 * i915_reset_and_wakeup - do process context error handling work 2495 * i915_reset_and_wakeup - do process context error handling work
2553 * @dev: drm device 2496 * @dev_priv: i915 device private
2554 * 2497 *
2555 * Fire an error uevent so userspace can see that a hang or error 2498 * Fire an error uevent so userspace can see that a hang or error
2556 * was detected. 2499 * was detected.
2557 */ 2500 */
2558static void i915_reset_and_wakeup(struct drm_device *dev) 2501static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2559{ 2502{
2560 struct drm_i915_private *dev_priv = to_i915(dev); 2503 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2561 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; 2504 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2562 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; 2505 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2563 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; 2506 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2564 int ret; 2507 int ret;
2565 2508
2566 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); 2509 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2567 2510
2568 /* 2511 /*
2569 * Note that there's only one work item which does gpu resets, so we 2512 * Note that there's only one work item which does gpu resets, so we
@@ -2577,8 +2520,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2577 */ 2520 */
2578 if (i915_reset_in_progress(&dev_priv->gpu_error)) { 2521 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2579 DRM_DEBUG_DRIVER("resetting chip\n"); 2522 DRM_DEBUG_DRIVER("resetting chip\n");
2580 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, 2523 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2581 reset_event);
2582 2524
2583 /* 2525 /*
2584 * In most cases it's guaranteed that we get here with an RPM 2526 * In most cases it's guaranteed that we get here with an RPM
@@ -2589,7 +2531,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2589 */ 2531 */
2590 intel_runtime_pm_get(dev_priv); 2532 intel_runtime_pm_get(dev_priv);
2591 2533
2592 intel_prepare_reset(dev); 2534 intel_prepare_reset(dev_priv);
2593 2535
2594 /* 2536 /*
2595 * All state reset _must_ be completed before we update the 2537 * All state reset _must_ be completed before we update the
@@ -2597,27 +2539,26 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
2597 * pending state and not properly drop locks, resulting in 2539 * pending state and not properly drop locks, resulting in
2598 * deadlocks with the reset work. 2540 * deadlocks with the reset work.
2599 */ 2541 */
2600 ret = i915_reset(dev); 2542 ret = i915_reset(dev_priv);
2601 2543
2602 intel_finish_reset(dev); 2544 intel_finish_reset(dev_priv);
2603 2545
2604 intel_runtime_pm_put(dev_priv); 2546 intel_runtime_pm_put(dev_priv);
2605 2547
2606 if (ret == 0) 2548 if (ret == 0)
2607 kobject_uevent_env(&dev->primary->kdev->kobj, 2549 kobject_uevent_env(kobj,
2608 KOBJ_CHANGE, reset_done_event); 2550 KOBJ_CHANGE, reset_done_event);
2609 2551
2610 /* 2552 /*
2611 * Note: The wake_up also serves as a memory barrier so that 2553 * Note: The wake_up also serves as a memory barrier so that
2612 * waiters see the update value of the reset counter atomic_t. 2554 * waiters see the update value of the reset counter atomic_t.
2613 */ 2555 */
2614 i915_error_wake_up(dev_priv, true); 2556 wake_up_all(&dev_priv->gpu_error.reset_queue);
2615 } 2557 }
2616} 2558}
2617 2559
2618static void i915_report_and_clear_eir(struct drm_device *dev) 2560static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2619{ 2561{
2620 struct drm_i915_private *dev_priv = dev->dev_private;
2621 uint32_t instdone[I915_NUM_INSTDONE_REG]; 2562 uint32_t instdone[I915_NUM_INSTDONE_REG];
2622 u32 eir = I915_READ(EIR); 2563 u32 eir = I915_READ(EIR);
2623 int pipe, i; 2564 int pipe, i;
@@ -2627,9 +2568,9 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2627 2568
2628 pr_err("render error detected, EIR: 0x%08x\n", eir); 2569 pr_err("render error detected, EIR: 0x%08x\n", eir);
2629 2570
2630 i915_get_extra_instdone(dev, instdone); 2571 i915_get_extra_instdone(dev_priv, instdone);
2631 2572
2632 if (IS_G4X(dev)) { 2573 if (IS_G4X(dev_priv)) {
2633 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { 2574 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2634 u32 ipeir = I915_READ(IPEIR_I965); 2575 u32 ipeir = I915_READ(IPEIR_I965);
2635 2576
@@ -2651,7 +2592,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2651 } 2592 }
2652 } 2593 }
2653 2594
2654 if (!IS_GEN2(dev)) { 2595 if (!IS_GEN2(dev_priv)) {
2655 if (eir & I915_ERROR_PAGE_TABLE) { 2596 if (eir & I915_ERROR_PAGE_TABLE) {
2656 u32 pgtbl_err = I915_READ(PGTBL_ER); 2597 u32 pgtbl_err = I915_READ(PGTBL_ER);
2657 pr_err("page table error\n"); 2598 pr_err("page table error\n");
@@ -2673,7 +2614,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2673 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); 2614 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2674 for (i = 0; i < ARRAY_SIZE(instdone); i++) 2615 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2675 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); 2616 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2676 if (INTEL_INFO(dev)->gen < 4) { 2617 if (INTEL_GEN(dev_priv) < 4) {
2677 u32 ipeir = I915_READ(IPEIR); 2618 u32 ipeir = I915_READ(IPEIR);
2678 2619
2679 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); 2620 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
@@ -2709,18 +2650,19 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
2709 2650
2710/** 2651/**
2711 * i915_handle_error - handle a gpu error 2652 * i915_handle_error - handle a gpu error
2712 * @dev: drm device 2653 * @dev_priv: i915 device private
2713 * @engine_mask: mask representing engines that are hung 2654 * @engine_mask: mask representing engines that are hung
2714 * Do some basic checking of register state at error time and 2655 * Do some basic checking of register state at error time and
2715 * dump it to the syslog. Also call i915_capture_error_state() to make 2656 * dump it to the syslog. Also call i915_capture_error_state() to make
2716 * sure we get a record and make it available in debugfs. Fire a uevent 2657 * sure we get a record and make it available in debugfs. Fire a uevent
2717 * so userspace knows something bad happened (should trigger collection 2658 * so userspace knows something bad happened (should trigger collection
2718 * of a ring dump etc.). 2659 * of a ring dump etc.).
2660 * @fmt: Error message format string
2719 */ 2661 */
2720void i915_handle_error(struct drm_device *dev, u32 engine_mask, 2662void i915_handle_error(struct drm_i915_private *dev_priv,
2663 u32 engine_mask,
2721 const char *fmt, ...) 2664 const char *fmt, ...)
2722{ 2665{
2723 struct drm_i915_private *dev_priv = dev->dev_private;
2724 va_list args; 2666 va_list args;
2725 char error_msg[80]; 2667 char error_msg[80];
2726 2668
@@ -2728,8 +2670,8 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2728 vscnprintf(error_msg, sizeof(error_msg), fmt, args); 2670 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2729 va_end(args); 2671 va_end(args);
2730 2672
2731 i915_capture_error_state(dev, engine_mask, error_msg); 2673 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2732 i915_report_and_clear_eir(dev); 2674 i915_report_and_clear_eir(dev_priv);
2733 2675
2734 if (engine_mask) { 2676 if (engine_mask) {
2735 atomic_or(I915_RESET_IN_PROGRESS_FLAG, 2677 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
@@ -2748,10 +2690,10 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2748 * ensure that the waiters see the updated value of the reset 2690 * ensure that the waiters see the updated value of the reset
2749 * counter atomic_t. 2691 * counter atomic_t.
2750 */ 2692 */
2751 i915_error_wake_up(dev_priv, false); 2693 i915_error_wake_up(dev_priv);
2752 } 2694 }
2753 2695
2754 i915_reset_and_wakeup(dev); 2696 i915_reset_and_wakeup(dev_priv);
2755} 2697}
2756 2698
2757/* Called from drm generic code, passed 'crtc' which 2699/* Called from drm generic code, passed 'crtc' which
@@ -2759,7 +2701,7 @@ void i915_handle_error(struct drm_device *dev, u32 engine_mask,
2759 */ 2701 */
2760static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe) 2702static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2761{ 2703{
2762 struct drm_i915_private *dev_priv = dev->dev_private; 2704 struct drm_i915_private *dev_priv = to_i915(dev);
2763 unsigned long irqflags; 2705 unsigned long irqflags;
2764 2706
2765 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2707 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2776,7 +2718,7 @@ static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2776 2718
2777static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe) 2719static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2778{ 2720{
2779 struct drm_i915_private *dev_priv = dev->dev_private; 2721 struct drm_i915_private *dev_priv = to_i915(dev);
2780 unsigned long irqflags; 2722 unsigned long irqflags;
2781 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2723 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2782 DE_PIPE_VBLANK(pipe); 2724 DE_PIPE_VBLANK(pipe);
@@ -2790,7 +2732,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2790 2732
2791static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe) 2733static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2792{ 2734{
2793 struct drm_i915_private *dev_priv = dev->dev_private; 2735 struct drm_i915_private *dev_priv = to_i915(dev);
2794 unsigned long irqflags; 2736 unsigned long irqflags;
2795 2737
2796 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2803,7 +2745,7 @@ static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2803 2745
2804static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe) 2746static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2805{ 2747{
2806 struct drm_i915_private *dev_priv = dev->dev_private; 2748 struct drm_i915_private *dev_priv = to_i915(dev);
2807 unsigned long irqflags; 2749 unsigned long irqflags;
2808 2750
2809 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2818,7 +2760,7 @@ static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2818 */ 2760 */
2819static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe) 2761static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2820{ 2762{
2821 struct drm_i915_private *dev_priv = dev->dev_private; 2763 struct drm_i915_private *dev_priv = to_i915(dev);
2822 unsigned long irqflags; 2764 unsigned long irqflags;
2823 2765
2824 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2766 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2830,7 +2772,7 @@ static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2830 2772
2831static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe) 2773static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2832{ 2774{
2833 struct drm_i915_private *dev_priv = dev->dev_private; 2775 struct drm_i915_private *dev_priv = to_i915(dev);
2834 unsigned long irqflags; 2776 unsigned long irqflags;
2835 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : 2777 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2836 DE_PIPE_VBLANK(pipe); 2778 DE_PIPE_VBLANK(pipe);
@@ -2842,7 +2784,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2842 2784
2843static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe) 2785static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2844{ 2786{
2845 struct drm_i915_private *dev_priv = dev->dev_private; 2787 struct drm_i915_private *dev_priv = to_i915(dev);
2846 unsigned long irqflags; 2788 unsigned long irqflags;
2847 2789
2848 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2790 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2853,7 +2795,7 @@ static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2853 2795
2854static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe) 2796static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2855{ 2797{
2856 struct drm_i915_private *dev_priv = dev->dev_private; 2798 struct drm_i915_private *dev_priv = to_i915(dev);
2857 unsigned long irqflags; 2799 unsigned long irqflags;
2858 2800
2859 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2801 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
@@ -2869,9 +2811,9 @@ ring_idle(struct intel_engine_cs *engine, u32 seqno)
2869} 2811}
2870 2812
2871static bool 2813static bool
2872ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) 2814ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2873{ 2815{
2874 if (INTEL_INFO(dev)->gen >= 8) { 2816 if (INTEL_GEN(engine->i915) >= 8) {
2875 return (ipehr >> 23) == 0x1c; 2817 return (ipehr >> 23) == 0x1c;
2876 } else { 2818 } else {
2877 ipehr &= ~MI_SEMAPHORE_SYNC_MASK; 2819 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
@@ -2884,10 +2826,10 @@ static struct intel_engine_cs *
2884semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr, 2826semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2885 u64 offset) 2827 u64 offset)
2886{ 2828{
2887 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2829 struct drm_i915_private *dev_priv = engine->i915;
2888 struct intel_engine_cs *signaller; 2830 struct intel_engine_cs *signaller;
2889 2831
2890 if (INTEL_INFO(dev_priv)->gen >= 8) { 2832 if (INTEL_GEN(dev_priv) >= 8) {
2891 for_each_engine(signaller, dev_priv) { 2833 for_each_engine(signaller, dev_priv) {
2892 if (engine == signaller) 2834 if (engine == signaller)
2893 continue; 2835 continue;
@@ -2916,7 +2858,7 @@ semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2916static struct intel_engine_cs * 2858static struct intel_engine_cs *
2917semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno) 2859semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2918{ 2860{
2919 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2861 struct drm_i915_private *dev_priv = engine->i915;
2920 u32 cmd, ipehr, head; 2862 u32 cmd, ipehr, head;
2921 u64 offset = 0; 2863 u64 offset = 0;
2922 int i, backwards; 2864 int i, backwards;
@@ -2942,7 +2884,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2942 return NULL; 2884 return NULL;
2943 2885
2944 ipehr = I915_READ(RING_IPEHR(engine->mmio_base)); 2886 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2945 if (!ipehr_is_semaphore_wait(engine->dev, ipehr)) 2887 if (!ipehr_is_semaphore_wait(engine, ipehr))
2946 return NULL; 2888 return NULL;
2947 2889
2948 /* 2890 /*
@@ -2954,7 +2896,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2954 * ringbuffer itself. 2896 * ringbuffer itself.
2955 */ 2897 */
2956 head = I915_READ_HEAD(engine) & HEAD_ADDR; 2898 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2957 backwards = (INTEL_INFO(engine->dev)->gen >= 8) ? 5 : 4; 2899 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2958 2900
2959 for (i = backwards; i; --i) { 2901 for (i = backwards; i; --i) {
2960 /* 2902 /*
@@ -2976,7 +2918,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2976 return NULL; 2918 return NULL;
2977 2919
2978 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1; 2920 *seqno = ioread32(engine->buffer->virtual_start + head + 4) + 1;
2979 if (INTEL_INFO(engine->dev)->gen >= 8) { 2921 if (INTEL_GEN(dev_priv) >= 8) {
2980 offset = ioread32(engine->buffer->virtual_start + head + 12); 2922 offset = ioread32(engine->buffer->virtual_start + head + 12);
2981 offset <<= 32; 2923 offset <<= 32;
2982 offset = ioread32(engine->buffer->virtual_start + head + 8); 2924 offset = ioread32(engine->buffer->virtual_start + head + 8);
@@ -2986,7 +2928,7 @@ semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2986 2928
2987static int semaphore_passed(struct intel_engine_cs *engine) 2929static int semaphore_passed(struct intel_engine_cs *engine)
2988{ 2930{
2989 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2931 struct drm_i915_private *dev_priv = engine->i915;
2990 struct intel_engine_cs *signaller; 2932 struct intel_engine_cs *signaller;
2991 u32 seqno; 2933 u32 seqno;
2992 2934
@@ -3000,7 +2942,7 @@ static int semaphore_passed(struct intel_engine_cs *engine)
3000 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES) 2942 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
3001 return -1; 2943 return -1;
3002 2944
3003 if (i915_seqno_passed(signaller->get_seqno(signaller), seqno)) 2945 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
3004 return 1; 2946 return 1;
3005 2947
3006 /* cursory check for an unkickable deadlock */ 2948 /* cursory check for an unkickable deadlock */
@@ -3028,7 +2970,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
3028 if (engine->id != RCS) 2970 if (engine->id != RCS)
3029 return true; 2971 return true;
3030 2972
3031 i915_get_extra_instdone(engine->dev, instdone); 2973 i915_get_extra_instdone(engine->i915, instdone);
3032 2974
3033 /* There might be unstable subunit states even when 2975 /* There might be unstable subunit states even when
3034 * actual head is not moving. Filter out the unstable ones by 2976 * actual head is not moving. Filter out the unstable ones by
@@ -3069,8 +3011,7 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
3069static enum intel_ring_hangcheck_action 3011static enum intel_ring_hangcheck_action
3070ring_stuck(struct intel_engine_cs *engine, u64 acthd) 3012ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3071{ 3013{
3072 struct drm_device *dev = engine->dev; 3014 struct drm_i915_private *dev_priv = engine->i915;
3073 struct drm_i915_private *dev_priv = dev->dev_private;
3074 enum intel_ring_hangcheck_action ha; 3015 enum intel_ring_hangcheck_action ha;
3075 u32 tmp; 3016 u32 tmp;
3076 3017
@@ -3078,7 +3019,7 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3078 if (ha != HANGCHECK_HUNG) 3019 if (ha != HANGCHECK_HUNG)
3079 return ha; 3020 return ha;
3080 3021
3081 if (IS_GEN2(dev)) 3022 if (IS_GEN2(dev_priv))
3082 return HANGCHECK_HUNG; 3023 return HANGCHECK_HUNG;
3083 3024
3084 /* Is the chip hanging on a WAIT_FOR_EVENT? 3025 /* Is the chip hanging on a WAIT_FOR_EVENT?
@@ -3088,19 +3029,19 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3088 */ 3029 */
3089 tmp = I915_READ_CTL(engine); 3030 tmp = I915_READ_CTL(engine);
3090 if (tmp & RING_WAIT) { 3031 if (tmp & RING_WAIT) {
3091 i915_handle_error(dev, 0, 3032 i915_handle_error(dev_priv, 0,
3092 "Kicking stuck wait on %s", 3033 "Kicking stuck wait on %s",
3093 engine->name); 3034 engine->name);
3094 I915_WRITE_CTL(engine, tmp); 3035 I915_WRITE_CTL(engine, tmp);
3095 return HANGCHECK_KICK; 3036 return HANGCHECK_KICK;
3096 } 3037 }
3097 3038
3098 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { 3039 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3099 switch (semaphore_passed(engine)) { 3040 switch (semaphore_passed(engine)) {
3100 default: 3041 default:
3101 return HANGCHECK_HUNG; 3042 return HANGCHECK_HUNG;
3102 case 1: 3043 case 1:
3103 i915_handle_error(dev, 0, 3044 i915_handle_error(dev_priv, 0,
3104 "Kicking stuck semaphore on %s", 3045 "Kicking stuck semaphore on %s",
3105 engine->name); 3046 engine->name);
3106 I915_WRITE_CTL(engine, tmp); 3047 I915_WRITE_CTL(engine, tmp);
@@ -3113,23 +3054,21 @@ ring_stuck(struct intel_engine_cs *engine, u64 acthd)
3113 return HANGCHECK_HUNG; 3054 return HANGCHECK_HUNG;
3114} 3055}
3115 3056
3116static unsigned kick_waiters(struct intel_engine_cs *engine) 3057static unsigned long kick_waiters(struct intel_engine_cs *engine)
3117{ 3058{
3118 struct drm_i915_private *i915 = to_i915(engine->dev); 3059 struct drm_i915_private *i915 = engine->i915;
3119 unsigned user_interrupts = READ_ONCE(engine->user_interrupts); 3060 unsigned long irq_count = READ_ONCE(engine->breadcrumbs.irq_wakeups);
3120 3061
3121 if (engine->hangcheck.user_interrupts == user_interrupts && 3062 if (engine->hangcheck.user_interrupts == irq_count &&
3122 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) { 3063 !test_and_set_bit(engine->id, &i915->gpu_error.missed_irq_rings)) {
3123 if (!(i915->gpu_error.test_irq_rings & intel_engine_flag(engine))) 3064 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
3124 DRM_ERROR("Hangcheck timer elapsed... %s idle\n", 3065 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3125 engine->name); 3066 engine->name);
3126 else 3067
3127 DRM_INFO("Fake missed irq on %s\n", 3068 intel_engine_enable_fake_irq(engine);
3128 engine->name);
3129 wake_up_all(&engine->irq_queue);
3130 } 3069 }
3131 3070
3132 return user_interrupts; 3071 return irq_count;
3133} 3072}
3134/* 3073/*
3135 * This is called when the chip hasn't reported back with completed 3074 * This is called when the chip hasn't reported back with completed
@@ -3144,11 +3083,9 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3144 struct drm_i915_private *dev_priv = 3083 struct drm_i915_private *dev_priv =
3145 container_of(work, typeof(*dev_priv), 3084 container_of(work, typeof(*dev_priv),
3146 gpu_error.hangcheck_work.work); 3085 gpu_error.hangcheck_work.work);
3147 struct drm_device *dev = dev_priv->dev;
3148 struct intel_engine_cs *engine; 3086 struct intel_engine_cs *engine;
3149 enum intel_engine_id id; 3087 unsigned int hung = 0, stuck = 0;
3150 int busy_count = 0, rings_hung = 0; 3088 int busy_count = 0;
3151 bool stuck[I915_NUM_ENGINES] = { 0 };
3152#define BUSY 1 3089#define BUSY 1
3153#define KICK 5 3090#define KICK 5
3154#define HUNG 20 3091#define HUNG 20
@@ -3157,12 +3094,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3157 if (!i915.enable_hangcheck) 3094 if (!i915.enable_hangcheck)
3158 return; 3095 return;
3159 3096
3160 /* 3097 if (!READ_ONCE(dev_priv->gt.awake))
3161 * The hangcheck work is synced during runtime suspend, we don't 3098 return;
3162 * require a wakeref. TODO: instead of disabling the asserts make
3163 * sure that we hold a reference when this work is running.
3164 */
3165 DISABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3166 3099
3167 /* As enabling the GPU requires fairly extensive mmio access, 3100 /* As enabling the GPU requires fairly extensive mmio access,
3168 * periodically arm the mmio checker to see if we are triggering 3101 * periodically arm the mmio checker to see if we are triggering
@@ -3170,11 +3103,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3170 */ 3103 */
3171 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 3104 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3172 3105
3173 for_each_engine_id(engine, dev_priv, id) { 3106 for_each_engine(engine, dev_priv) {
3107 bool busy = intel_engine_has_waiter(engine);
3174 u64 acthd; 3108 u64 acthd;
3175 u32 seqno; 3109 u32 seqno;
3176 unsigned user_interrupts; 3110 unsigned user_interrupts;
3177 bool busy = true;
3178 3111
3179 semaphore_clear_deadlocks(dev_priv); 3112 semaphore_clear_deadlocks(dev_priv);
3180 3113
@@ -3189,7 +3122,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3189 engine->irq_seqno_barrier(engine); 3122 engine->irq_seqno_barrier(engine);
3190 3123
3191 acthd = intel_ring_get_active_head(engine); 3124 acthd = intel_ring_get_active_head(engine);
3192 seqno = engine->get_seqno(engine); 3125 seqno = intel_engine_get_seqno(engine);
3193 3126
3194 /* Reset stuck interrupts between batch advances */ 3127 /* Reset stuck interrupts between batch advances */
3195 user_interrupts = 0; 3128 user_interrupts = 0;
@@ -3197,12 +3130,11 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3197 if (engine->hangcheck.seqno == seqno) { 3130 if (engine->hangcheck.seqno == seqno) {
3198 if (ring_idle(engine, seqno)) { 3131 if (ring_idle(engine, seqno)) {
3199 engine->hangcheck.action = HANGCHECK_IDLE; 3132 engine->hangcheck.action = HANGCHECK_IDLE;
3200 if (waitqueue_active(&engine->irq_queue)) { 3133 if (busy) {
3201 /* Safeguard against driver failure */ 3134 /* Safeguard against driver failure */
3202 user_interrupts = kick_waiters(engine); 3135 user_interrupts = kick_waiters(engine);
3203 engine->hangcheck.score += BUSY; 3136 engine->hangcheck.score += BUSY;
3204 } else 3137 }
3205 busy = false;
3206 } else { 3138 } else {
3207 /* We always increment the hangcheck score 3139 /* We always increment the hangcheck score
3208 * if the ring is busy and still processing 3140 * if the ring is busy and still processing
@@ -3234,10 +3166,15 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3234 break; 3166 break;
3235 case HANGCHECK_HUNG: 3167 case HANGCHECK_HUNG:
3236 engine->hangcheck.score += HUNG; 3168 engine->hangcheck.score += HUNG;
3237 stuck[id] = true;
3238 break; 3169 break;
3239 } 3170 }
3240 } 3171 }
3172
3173 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3174 hung |= intel_engine_flag(engine);
3175 if (engine->hangcheck.action != HANGCHECK_HUNG)
3176 stuck |= intel_engine_flag(engine);
3177 }
3241 } else { 3178 } else {
3242 engine->hangcheck.action = HANGCHECK_ACTIVE; 3179 engine->hangcheck.action = HANGCHECK_ACTIVE;
3243 3180
@@ -3262,48 +3199,33 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
3262 busy_count += busy; 3199 busy_count += busy;
3263 } 3200 }
3264 3201
3265 for_each_engine_id(engine, dev_priv, id) { 3202 if (hung) {
3266 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { 3203 char msg[80];
3267 DRM_INFO("%s on %s\n", 3204 int len;
3268 stuck[id] ? "stuck" : "no progress",
3269 engine->name);
3270 rings_hung |= intel_engine_flag(engine);
3271 }
3272 }
3273 3205
3274 if (rings_hung) { 3206 /* If some rings hung but others were still busy, only
3275 i915_handle_error(dev, rings_hung, "Engine(s) hung"); 3207 * blame the hanging rings in the synopsis.
3276 goto out; 3208 */
3209 if (stuck != hung)
3210 hung &= ~stuck;
3211 len = scnprintf(msg, sizeof(msg),
3212 "%s on ", stuck == hung ? "No progress" : "Hang");
3213 for_each_engine_masked(engine, dev_priv, hung)
3214 len += scnprintf(msg + len, sizeof(msg) - len,
3215 "%s, ", engine->name);
3216 msg[len-2] = '\0';
3217
3218 return i915_handle_error(dev_priv, hung, msg);
3277 } 3219 }
3278 3220
3221 /* Reset timer in case GPU hangs without another request being added */
3279 if (busy_count) 3222 if (busy_count)
3280 /* Reset timer case chip hangs without another request 3223 i915_queue_hangcheck(dev_priv);
3281 * being added */
3282 i915_queue_hangcheck(dev);
3283
3284out:
3285 ENABLE_RPM_WAKEREF_ASSERTS(dev_priv);
3286}
3287
3288void i915_queue_hangcheck(struct drm_device *dev)
3289{
3290 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3291
3292 if (!i915.enable_hangcheck)
3293 return;
3294
3295 /* Don't continually defer the hangcheck so that it is always run at
3296 * least once after work has been scheduled on any ring. Otherwise,
3297 * we will ignore a hung ring if a second ring is kept busy.
3298 */
3299
3300 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3301 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3302} 3224}
3303 3225
3304static void ibx_irq_reset(struct drm_device *dev) 3226static void ibx_irq_reset(struct drm_device *dev)
3305{ 3227{
3306 struct drm_i915_private *dev_priv = dev->dev_private; 3228 struct drm_i915_private *dev_priv = to_i915(dev);
3307 3229
3308 if (HAS_PCH_NOP(dev)) 3230 if (HAS_PCH_NOP(dev))
3309 return; 3231 return;
@@ -3324,7 +3246,7 @@ static void ibx_irq_reset(struct drm_device *dev)
3324 */ 3246 */
3325static void ibx_irq_pre_postinstall(struct drm_device *dev) 3247static void ibx_irq_pre_postinstall(struct drm_device *dev)
3326{ 3248{
3327 struct drm_i915_private *dev_priv = dev->dev_private; 3249 struct drm_i915_private *dev_priv = to_i915(dev);
3328 3250
3329 if (HAS_PCH_NOP(dev)) 3251 if (HAS_PCH_NOP(dev))
3330 return; 3252 return;
@@ -3336,7 +3258,7 @@ static void ibx_irq_pre_postinstall(struct drm_device *dev)
3336 3258
3337static void gen5_gt_irq_reset(struct drm_device *dev) 3259static void gen5_gt_irq_reset(struct drm_device *dev)
3338{ 3260{
3339 struct drm_i915_private *dev_priv = dev->dev_private; 3261 struct drm_i915_private *dev_priv = to_i915(dev);
3340 3262
3341 GEN5_IRQ_RESET(GT); 3263 GEN5_IRQ_RESET(GT);
3342 if (INTEL_INFO(dev)->gen >= 6) 3264 if (INTEL_INFO(dev)->gen >= 6)
@@ -3396,7 +3318,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3396*/ 3318*/
3397static void ironlake_irq_reset(struct drm_device *dev) 3319static void ironlake_irq_reset(struct drm_device *dev)
3398{ 3320{
3399 struct drm_i915_private *dev_priv = dev->dev_private; 3321 struct drm_i915_private *dev_priv = to_i915(dev);
3400 3322
3401 I915_WRITE(HWSTAM, 0xffffffff); 3323 I915_WRITE(HWSTAM, 0xffffffff);
3402 3324
@@ -3411,7 +3333,7 @@ static void ironlake_irq_reset(struct drm_device *dev)
3411 3333
3412static void valleyview_irq_preinstall(struct drm_device *dev) 3334static void valleyview_irq_preinstall(struct drm_device *dev)
3413{ 3335{
3414 struct drm_i915_private *dev_priv = dev->dev_private; 3336 struct drm_i915_private *dev_priv = to_i915(dev);
3415 3337
3416 I915_WRITE(VLV_MASTER_IER, 0); 3338 I915_WRITE(VLV_MASTER_IER, 0);
3417 POSTING_READ(VLV_MASTER_IER); 3339 POSTING_READ(VLV_MASTER_IER);
@@ -3434,7 +3356,7 @@ static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3434 3356
3435static void gen8_irq_reset(struct drm_device *dev) 3357static void gen8_irq_reset(struct drm_device *dev)
3436{ 3358{
3437 struct drm_i915_private *dev_priv = dev->dev_private; 3359 struct drm_i915_private *dev_priv = to_i915(dev);
3438 int pipe; 3360 int pipe;
3439 3361
3440 I915_WRITE(GEN8_MASTER_IRQ, 0); 3362 I915_WRITE(GEN8_MASTER_IRQ, 0);
@@ -3480,12 +3402,12 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3480 spin_unlock_irq(&dev_priv->irq_lock); 3402 spin_unlock_irq(&dev_priv->irq_lock);
3481 3403
3482 /* make sure we're done processing display irqs */ 3404 /* make sure we're done processing display irqs */
3483 synchronize_irq(dev_priv->dev->irq); 3405 synchronize_irq(dev_priv->drm.irq);
3484} 3406}
3485 3407
3486static void cherryview_irq_preinstall(struct drm_device *dev) 3408static void cherryview_irq_preinstall(struct drm_device *dev)
3487{ 3409{
3488 struct drm_i915_private *dev_priv = dev->dev_private; 3410 struct drm_i915_private *dev_priv = to_i915(dev);
3489 3411
3490 I915_WRITE(GEN8_MASTER_IRQ, 0); 3412 I915_WRITE(GEN8_MASTER_IRQ, 0);
3491 POSTING_READ(GEN8_MASTER_IRQ); 3413 POSTING_READ(GEN8_MASTER_IRQ);
@@ -3500,31 +3422,29 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3500 spin_unlock_irq(&dev_priv->irq_lock); 3422 spin_unlock_irq(&dev_priv->irq_lock);
3501} 3423}
3502 3424
3503static u32 intel_hpd_enabled_irqs(struct drm_device *dev, 3425static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3504 const u32 hpd[HPD_NUM_PINS]) 3426 const u32 hpd[HPD_NUM_PINS])
3505{ 3427{
3506 struct drm_i915_private *dev_priv = to_i915(dev);
3507 struct intel_encoder *encoder; 3428 struct intel_encoder *encoder;
3508 u32 enabled_irqs = 0; 3429 u32 enabled_irqs = 0;
3509 3430
3510 for_each_intel_encoder(dev, encoder) 3431 for_each_intel_encoder(&dev_priv->drm, encoder)
3511 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED) 3432 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3512 enabled_irqs |= hpd[encoder->hpd_pin]; 3433 enabled_irqs |= hpd[encoder->hpd_pin];
3513 3434
3514 return enabled_irqs; 3435 return enabled_irqs;
3515} 3436}
3516 3437
3517static void ibx_hpd_irq_setup(struct drm_device *dev) 3438static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3518{ 3439{
3519 struct drm_i915_private *dev_priv = dev->dev_private;
3520 u32 hotplug_irqs, hotplug, enabled_irqs; 3440 u32 hotplug_irqs, hotplug, enabled_irqs;
3521 3441
3522 if (HAS_PCH_IBX(dev)) { 3442 if (HAS_PCH_IBX(dev_priv)) {
3523 hotplug_irqs = SDE_HOTPLUG_MASK; 3443 hotplug_irqs = SDE_HOTPLUG_MASK;
3524 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ibx); 3444 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3525 } else { 3445 } else {
3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT; 3446 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3527 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_cpt); 3447 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3528 } 3448 }
3529 3449
3530 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3450 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3543,18 +3463,17 @@ static void ibx_hpd_irq_setup(struct drm_device *dev)
3543 * When CPU and PCH are on the same package, port A 3463 * When CPU and PCH are on the same package, port A
3544 * HPD must be enabled in both north and south. 3464 * HPD must be enabled in both north and south.
3545 */ 3465 */
3546 if (HAS_PCH_LPT_LP(dev)) 3466 if (HAS_PCH_LPT_LP(dev_priv))
3547 hotplug |= PORTA_HOTPLUG_ENABLE; 3467 hotplug |= PORTA_HOTPLUG_ENABLE;
3548 I915_WRITE(PCH_PORT_HOTPLUG, hotplug); 3468 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3549} 3469}
3550 3470
3551static void spt_hpd_irq_setup(struct drm_device *dev) 3471static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3552{ 3472{
3553 struct drm_i915_private *dev_priv = dev->dev_private;
3554 u32 hotplug_irqs, hotplug, enabled_irqs; 3473 u32 hotplug_irqs, hotplug, enabled_irqs;
3555 3474
3556 hotplug_irqs = SDE_HOTPLUG_MASK_SPT; 3475 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3557 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_spt); 3476 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3558 3477
3559 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); 3478 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3560 3479
@@ -3569,24 +3488,23 @@ static void spt_hpd_irq_setup(struct drm_device *dev)
3569 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug); 3488 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3570} 3489}
3571 3490
3572static void ilk_hpd_irq_setup(struct drm_device *dev) 3491static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3573{ 3492{
3574 struct drm_i915_private *dev_priv = dev->dev_private;
3575 u32 hotplug_irqs, hotplug, enabled_irqs; 3493 u32 hotplug_irqs, hotplug, enabled_irqs;
3576 3494
3577 if (INTEL_INFO(dev)->gen >= 8) { 3495 if (INTEL_GEN(dev_priv) >= 8) {
3578 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG; 3496 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3579 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bdw); 3497 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3580 3498
3581 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3499 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3582 } else if (INTEL_INFO(dev)->gen >= 7) { 3500 } else if (INTEL_GEN(dev_priv) >= 7) {
3583 hotplug_irqs = DE_DP_A_HOTPLUG_IVB; 3501 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3584 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ivb); 3502 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3585 3503
3586 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3504 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3587 } else { 3505 } else {
3588 hotplug_irqs = DE_DP_A_HOTPLUG; 3506 hotplug_irqs = DE_DP_A_HOTPLUG;
3589 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_ilk); 3507 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3590 3508
3591 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs); 3509 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3592 } 3510 }
@@ -3601,15 +3519,14 @@ static void ilk_hpd_irq_setup(struct drm_device *dev)
3601 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms; 3519 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3602 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug); 3520 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3603 3521
3604 ibx_hpd_irq_setup(dev); 3522 ibx_hpd_irq_setup(dev_priv);
3605} 3523}
3606 3524
3607static void bxt_hpd_irq_setup(struct drm_device *dev) 3525static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3608{ 3526{
3609 struct drm_i915_private *dev_priv = dev->dev_private;
3610 u32 hotplug_irqs, hotplug, enabled_irqs; 3527 u32 hotplug_irqs, hotplug, enabled_irqs;
3611 3528
3612 enabled_irqs = intel_hpd_enabled_irqs(dev, hpd_bxt); 3529 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3613 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK; 3530 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3614 3531
3615 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs); 3532 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
@@ -3642,7 +3559,7 @@ static void bxt_hpd_irq_setup(struct drm_device *dev)
3642 3559
3643static void ibx_irq_postinstall(struct drm_device *dev) 3560static void ibx_irq_postinstall(struct drm_device *dev)
3644{ 3561{
3645 struct drm_i915_private *dev_priv = dev->dev_private; 3562 struct drm_i915_private *dev_priv = to_i915(dev);
3646 u32 mask; 3563 u32 mask;
3647 3564
3648 if (HAS_PCH_NOP(dev)) 3565 if (HAS_PCH_NOP(dev))
@@ -3659,7 +3576,7 @@ static void ibx_irq_postinstall(struct drm_device *dev)
3659 3576
3660static void gen5_gt_irq_postinstall(struct drm_device *dev) 3577static void gen5_gt_irq_postinstall(struct drm_device *dev)
3661{ 3578{
3662 struct drm_i915_private *dev_priv = dev->dev_private; 3579 struct drm_i915_private *dev_priv = to_i915(dev);
3663 u32 pm_irqs, gt_irqs; 3580 u32 pm_irqs, gt_irqs;
3664 3581
3665 pm_irqs = gt_irqs = 0; 3582 pm_irqs = gt_irqs = 0;
@@ -3673,8 +3590,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3673 3590
3674 gt_irqs |= GT_RENDER_USER_INTERRUPT; 3591 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3675 if (IS_GEN5(dev)) { 3592 if (IS_GEN5(dev)) {
3676 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | 3593 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3677 ILK_BSD_USER_INTERRUPT;
3678 } else { 3594 } else {
3679 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; 3595 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3680 } 3596 }
@@ -3696,7 +3612,7 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3696 3612
3697static int ironlake_irq_postinstall(struct drm_device *dev) 3613static int ironlake_irq_postinstall(struct drm_device *dev)
3698{ 3614{
3699 struct drm_i915_private *dev_priv = dev->dev_private; 3615 struct drm_i915_private *dev_priv = to_i915(dev);
3700 u32 display_mask, extra_mask; 3616 u32 display_mask, extra_mask;
3701 3617
3702 if (INTEL_INFO(dev)->gen >= 7) { 3618 if (INTEL_INFO(dev)->gen >= 7) {
@@ -3775,7 +3691,7 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3775 3691
3776static int valleyview_irq_postinstall(struct drm_device *dev) 3692static int valleyview_irq_postinstall(struct drm_device *dev)
3777{ 3693{
3778 struct drm_i915_private *dev_priv = dev->dev_private; 3694 struct drm_i915_private *dev_priv = to_i915(dev);
3779 3695
3780 gen5_gt_irq_postinstall(dev); 3696 gen5_gt_irq_postinstall(dev);
3781 3697
@@ -3827,6 +3743,7 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3827 uint32_t de_pipe_enables; 3743 uint32_t de_pipe_enables;
3828 u32 de_port_masked = GEN8_AUX_CHANNEL_A; 3744 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3829 u32 de_port_enables; 3745 u32 de_port_enables;
3746 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3830 enum pipe pipe; 3747 enum pipe pipe;
3831 3748
3832 if (INTEL_INFO(dev_priv)->gen >= 9) { 3749 if (INTEL_INFO(dev_priv)->gen >= 9) {
@@ -3862,11 +3779,12 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3862 de_pipe_enables); 3779 de_pipe_enables);
3863 3780
3864 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables); 3781 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3782 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3865} 3783}
3866 3784
3867static int gen8_irq_postinstall(struct drm_device *dev) 3785static int gen8_irq_postinstall(struct drm_device *dev)
3868{ 3786{
3869 struct drm_i915_private *dev_priv = dev->dev_private; 3787 struct drm_i915_private *dev_priv = to_i915(dev);
3870 3788
3871 if (HAS_PCH_SPLIT(dev)) 3789 if (HAS_PCH_SPLIT(dev))
3872 ibx_irq_pre_postinstall(dev); 3790 ibx_irq_pre_postinstall(dev);
@@ -3885,7 +3803,7 @@ static int gen8_irq_postinstall(struct drm_device *dev)
3885 3803
3886static int cherryview_irq_postinstall(struct drm_device *dev) 3804static int cherryview_irq_postinstall(struct drm_device *dev)
3887{ 3805{
3888 struct drm_i915_private *dev_priv = dev->dev_private; 3806 struct drm_i915_private *dev_priv = to_i915(dev);
3889 3807
3890 gen8_gt_irq_postinstall(dev_priv); 3808 gen8_gt_irq_postinstall(dev_priv);
3891 3809
@@ -3902,7 +3820,7 @@ static int cherryview_irq_postinstall(struct drm_device *dev)
3902 3820
3903static void gen8_irq_uninstall(struct drm_device *dev) 3821static void gen8_irq_uninstall(struct drm_device *dev)
3904{ 3822{
3905 struct drm_i915_private *dev_priv = dev->dev_private; 3823 struct drm_i915_private *dev_priv = to_i915(dev);
3906 3824
3907 if (!dev_priv) 3825 if (!dev_priv)
3908 return; 3826 return;
@@ -3912,7 +3830,7 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3912 3830
3913static void valleyview_irq_uninstall(struct drm_device *dev) 3831static void valleyview_irq_uninstall(struct drm_device *dev)
3914{ 3832{
3915 struct drm_i915_private *dev_priv = dev->dev_private; 3833 struct drm_i915_private *dev_priv = to_i915(dev);
3916 3834
3917 if (!dev_priv) 3835 if (!dev_priv)
3918 return; 3836 return;
@@ -3932,7 +3850,7 @@ static void valleyview_irq_uninstall(struct drm_device *dev)
3932 3850
3933static void cherryview_irq_uninstall(struct drm_device *dev) 3851static void cherryview_irq_uninstall(struct drm_device *dev)
3934{ 3852{
3935 struct drm_i915_private *dev_priv = dev->dev_private; 3853 struct drm_i915_private *dev_priv = to_i915(dev);
3936 3854
3937 if (!dev_priv) 3855 if (!dev_priv)
3938 return; 3856 return;
@@ -3952,7 +3870,7 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
3952 3870
3953static void ironlake_irq_uninstall(struct drm_device *dev) 3871static void ironlake_irq_uninstall(struct drm_device *dev)
3954{ 3872{
3955 struct drm_i915_private *dev_priv = dev->dev_private; 3873 struct drm_i915_private *dev_priv = to_i915(dev);
3956 3874
3957 if (!dev_priv) 3875 if (!dev_priv)
3958 return; 3876 return;
@@ -3962,7 +3880,7 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
3962 3880
3963static void i8xx_irq_preinstall(struct drm_device * dev) 3881static void i8xx_irq_preinstall(struct drm_device * dev)
3964{ 3882{
3965 struct drm_i915_private *dev_priv = dev->dev_private; 3883 struct drm_i915_private *dev_priv = to_i915(dev);
3966 int pipe; 3884 int pipe;
3967 3885
3968 for_each_pipe(dev_priv, pipe) 3886 for_each_pipe(dev_priv, pipe)
@@ -3974,7 +3892,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3974 3892
3975static int i8xx_irq_postinstall(struct drm_device *dev) 3893static int i8xx_irq_postinstall(struct drm_device *dev)
3976{ 3894{
3977 struct drm_i915_private *dev_priv = dev->dev_private; 3895 struct drm_i915_private *dev_priv = to_i915(dev);
3978 3896
3979 I915_WRITE16(EMR, 3897 I915_WRITE16(EMR,
3980 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3898 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4006,13 +3924,12 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
4006/* 3924/*
4007 * Returns true when a page flip has completed. 3925 * Returns true when a page flip has completed.
4008 */ 3926 */
4009static bool i8xx_handle_vblank(struct drm_device *dev, 3927static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
4010 int plane, int pipe, u32 iir) 3928 int plane, int pipe, u32 iir)
4011{ 3929{
4012 struct drm_i915_private *dev_priv = dev->dev_private;
4013 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 3930 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4014 3931
4015 if (!intel_pipe_handle_vblank(dev, pipe)) 3932 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4016 return false; 3933 return false;
4017 3934
4018 if ((iir & flip_pending) == 0) 3935 if ((iir & flip_pending) == 0)
@@ -4027,19 +3944,18 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
4027 if (I915_READ16(ISR) & flip_pending) 3944 if (I915_READ16(ISR) & flip_pending)
4028 goto check_page_flip; 3945 goto check_page_flip;
4029 3946
4030 intel_prepare_page_flip(dev, plane); 3947 intel_finish_page_flip_cs(dev_priv, pipe);
4031 intel_finish_page_flip(dev, pipe);
4032 return true; 3948 return true;
4033 3949
4034check_page_flip: 3950check_page_flip:
4035 intel_check_page_flip(dev, pipe); 3951 intel_check_page_flip(dev_priv, pipe);
4036 return false; 3952 return false;
4037} 3953}
4038 3954
4039static irqreturn_t i8xx_irq_handler(int irq, void *arg) 3955static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4040{ 3956{
4041 struct drm_device *dev = arg; 3957 struct drm_device *dev = arg;
4042 struct drm_i915_private *dev_priv = dev->dev_private; 3958 struct drm_i915_private *dev_priv = to_i915(dev);
4043 u16 iir, new_iir; 3959 u16 iir, new_iir;
4044 u32 pipe_stats[2]; 3960 u32 pipe_stats[2];
4045 int pipe; 3961 int pipe;
@@ -4089,15 +4005,15 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4089 4005
4090 for_each_pipe(dev_priv, pipe) { 4006 for_each_pipe(dev_priv, pipe) {
4091 int plane = pipe; 4007 int plane = pipe;
4092 if (HAS_FBC(dev)) 4008 if (HAS_FBC(dev_priv))
4093 plane = !plane; 4009 plane = !plane;
4094 4010
4095 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4011 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4096 i8xx_handle_vblank(dev, plane, pipe, iir)) 4012 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
4097 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4013 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4098 4014
4099 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4015 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4100 i9xx_pipe_crc_irq_handler(dev, pipe); 4016 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4101 4017
4102 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4018 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4103 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4019 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4116,7 +4032,7 @@ out:
4116 4032
4117static void i8xx_irq_uninstall(struct drm_device * dev) 4033static void i8xx_irq_uninstall(struct drm_device * dev)
4118{ 4034{
4119 struct drm_i915_private *dev_priv = dev->dev_private; 4035 struct drm_i915_private *dev_priv = to_i915(dev);
4120 int pipe; 4036 int pipe;
4121 4037
4122 for_each_pipe(dev_priv, pipe) { 4038 for_each_pipe(dev_priv, pipe) {
@@ -4131,7 +4047,7 @@ static void i8xx_irq_uninstall(struct drm_device * dev)
4131 4047
4132static void i915_irq_preinstall(struct drm_device * dev) 4048static void i915_irq_preinstall(struct drm_device * dev)
4133{ 4049{
4134 struct drm_i915_private *dev_priv = dev->dev_private; 4050 struct drm_i915_private *dev_priv = to_i915(dev);
4135 int pipe; 4051 int pipe;
4136 4052
4137 if (I915_HAS_HOTPLUG(dev)) { 4053 if (I915_HAS_HOTPLUG(dev)) {
@@ -4149,7 +4065,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
4149 4065
4150static int i915_irq_postinstall(struct drm_device *dev) 4066static int i915_irq_postinstall(struct drm_device *dev)
4151{ 4067{
4152 struct drm_i915_private *dev_priv = dev->dev_private; 4068 struct drm_i915_private *dev_priv = to_i915(dev);
4153 u32 enable_mask; 4069 u32 enable_mask;
4154 4070
4155 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 4071 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -4182,7 +4098,7 @@ static int i915_irq_postinstall(struct drm_device *dev)
4182 I915_WRITE(IER, enable_mask); 4098 I915_WRITE(IER, enable_mask);
4183 POSTING_READ(IER); 4099 POSTING_READ(IER);
4184 4100
4185 i915_enable_asle_pipestat(dev); 4101 i915_enable_asle_pipestat(dev_priv);
4186 4102
4187 /* Interrupt setup is already guaranteed to be single-threaded, this is 4103 /* Interrupt setup is already guaranteed to be single-threaded, this is
4188 * just to make the assert_spin_locked check happy. */ 4104 * just to make the assert_spin_locked check happy. */
@@ -4197,13 +4113,12 @@ static int i915_irq_postinstall(struct drm_device *dev)
4197/* 4113/*
4198 * Returns true when a page flip has completed. 4114 * Returns true when a page flip has completed.
4199 */ 4115 */
4200static bool i915_handle_vblank(struct drm_device *dev, 4116static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4201 int plane, int pipe, u32 iir) 4117 int plane, int pipe, u32 iir)
4202{ 4118{
4203 struct drm_i915_private *dev_priv = dev->dev_private;
4204 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); 4119 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4205 4120
4206 if (!intel_pipe_handle_vblank(dev, pipe)) 4121 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4207 return false; 4122 return false;
4208 4123
4209 if ((iir & flip_pending) == 0) 4124 if ((iir & flip_pending) == 0)
@@ -4218,19 +4133,18 @@ static bool i915_handle_vblank(struct drm_device *dev,
4218 if (I915_READ(ISR) & flip_pending) 4133 if (I915_READ(ISR) & flip_pending)
4219 goto check_page_flip; 4134 goto check_page_flip;
4220 4135
4221 intel_prepare_page_flip(dev, plane); 4136 intel_finish_page_flip_cs(dev_priv, pipe);
4222 intel_finish_page_flip(dev, pipe);
4223 return true; 4137 return true;
4224 4138
4225check_page_flip: 4139check_page_flip:
4226 intel_check_page_flip(dev, pipe); 4140 intel_check_page_flip(dev_priv, pipe);
4227 return false; 4141 return false;
4228} 4142}
4229 4143
4230static irqreturn_t i915_irq_handler(int irq, void *arg) 4144static irqreturn_t i915_irq_handler(int irq, void *arg)
4231{ 4145{
4232 struct drm_device *dev = arg; 4146 struct drm_device *dev = arg;
4233 struct drm_i915_private *dev_priv = dev->dev_private; 4147 struct drm_i915_private *dev_priv = to_i915(dev);
4234 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 4148 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4235 u32 flip_mask = 4149 u32 flip_mask =
4236 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4150 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4273,11 +4187,11 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4273 break; 4187 break;
4274 4188
4275 /* Consume port. Then clear IIR or we'll miss events */ 4189 /* Consume port. Then clear IIR or we'll miss events */
4276 if (I915_HAS_HOTPLUG(dev) && 4190 if (I915_HAS_HOTPLUG(dev_priv) &&
4277 iir & I915_DISPLAY_PORT_INTERRUPT) { 4191 iir & I915_DISPLAY_PORT_INTERRUPT) {
4278 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4192 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4279 if (hotplug_status) 4193 if (hotplug_status)
4280 i9xx_hpd_irq_handler(dev, hotplug_status); 4194 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4281 } 4195 }
4282 4196
4283 I915_WRITE(IIR, iir & ~flip_mask); 4197 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4288,18 +4202,18 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4288 4202
4289 for_each_pipe(dev_priv, pipe) { 4203 for_each_pipe(dev_priv, pipe) {
4290 int plane = pipe; 4204 int plane = pipe;
4291 if (HAS_FBC(dev)) 4205 if (HAS_FBC(dev_priv))
4292 plane = !plane; 4206 plane = !plane;
4293 4207
4294 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && 4208 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4295 i915_handle_vblank(dev, plane, pipe, iir)) 4209 i915_handle_vblank(dev_priv, plane, pipe, iir))
4296 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); 4210 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4297 4211
4298 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4212 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4299 blc_event = true; 4213 blc_event = true;
4300 4214
4301 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4215 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4302 i9xx_pipe_crc_irq_handler(dev, pipe); 4216 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4303 4217
4304 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4218 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4305 intel_cpu_fifo_underrun_irq_handler(dev_priv, 4219 intel_cpu_fifo_underrun_irq_handler(dev_priv,
@@ -4307,7 +4221,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4307 } 4221 }
4308 4222
4309 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4223 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4310 intel_opregion_asle_intr(dev); 4224 intel_opregion_asle_intr(dev_priv);
4311 4225
4312 /* With MSI, interrupts are only generated when iir 4226 /* With MSI, interrupts are only generated when iir
4313 * transitions from zero to nonzero. If another bit got 4227 * transitions from zero to nonzero. If another bit got
@@ -4335,7 +4249,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4335 4249
4336static void i915_irq_uninstall(struct drm_device * dev) 4250static void i915_irq_uninstall(struct drm_device * dev)
4337{ 4251{
4338 struct drm_i915_private *dev_priv = dev->dev_private; 4252 struct drm_i915_private *dev_priv = to_i915(dev);
4339 int pipe; 4253 int pipe;
4340 4254
4341 if (I915_HAS_HOTPLUG(dev)) { 4255 if (I915_HAS_HOTPLUG(dev)) {
@@ -4357,7 +4271,7 @@ static void i915_irq_uninstall(struct drm_device * dev)
4357 4271
4358static void i965_irq_preinstall(struct drm_device * dev) 4272static void i965_irq_preinstall(struct drm_device * dev)
4359{ 4273{
4360 struct drm_i915_private *dev_priv = dev->dev_private; 4274 struct drm_i915_private *dev_priv = to_i915(dev);
4361 int pipe; 4275 int pipe;
4362 4276
4363 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4277 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
@@ -4373,7 +4287,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
4373 4287
4374static int i965_irq_postinstall(struct drm_device *dev) 4288static int i965_irq_postinstall(struct drm_device *dev)
4375{ 4289{
4376 struct drm_i915_private *dev_priv = dev->dev_private; 4290 struct drm_i915_private *dev_priv = to_i915(dev);
4377 u32 enable_mask; 4291 u32 enable_mask;
4378 u32 error_mask; 4292 u32 error_mask;
4379 4293
@@ -4391,7 +4305,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4391 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); 4305 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4392 enable_mask |= I915_USER_INTERRUPT; 4306 enable_mask |= I915_USER_INTERRUPT;
4393 4307
4394 if (IS_G4X(dev)) 4308 if (IS_G4X(dev_priv))
4395 enable_mask |= I915_BSD_USER_INTERRUPT; 4309 enable_mask |= I915_BSD_USER_INTERRUPT;
4396 4310
4397 /* Interrupt setup is already guaranteed to be single-threaded, this is 4311 /* Interrupt setup is already guaranteed to be single-threaded, this is
@@ -4406,7 +4320,7 @@ static int i965_irq_postinstall(struct drm_device *dev)
4406 * Enable some error detection, note the instruction error mask 4320 * Enable some error detection, note the instruction error mask
4407 * bit is reserved, so we leave it masked. 4321 * bit is reserved, so we leave it masked.
4408 */ 4322 */
4409 if (IS_G4X(dev)) { 4323 if (IS_G4X(dev_priv)) {
4410 error_mask = ~(GM45_ERROR_PAGE_TABLE | 4324 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4411 GM45_ERROR_MEM_PRIV | 4325 GM45_ERROR_MEM_PRIV |
4412 GM45_ERROR_CP_PRIV | 4326 GM45_ERROR_CP_PRIV |
@@ -4424,26 +4338,25 @@ static int i965_irq_postinstall(struct drm_device *dev)
4424 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0); 4338 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4425 POSTING_READ(PORT_HOTPLUG_EN); 4339 POSTING_READ(PORT_HOTPLUG_EN);
4426 4340
4427 i915_enable_asle_pipestat(dev); 4341 i915_enable_asle_pipestat(dev_priv);
4428 4342
4429 return 0; 4343 return 0;
4430} 4344}
4431 4345
4432static void i915_hpd_irq_setup(struct drm_device *dev) 4346static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4433{ 4347{
4434 struct drm_i915_private *dev_priv = dev->dev_private;
4435 u32 hotplug_en; 4348 u32 hotplug_en;
4436 4349
4437 assert_spin_locked(&dev_priv->irq_lock); 4350 assert_spin_locked(&dev_priv->irq_lock);
4438 4351
4439 /* Note HDMI and DP share hotplug bits */ 4352 /* Note HDMI and DP share hotplug bits */
4440 /* enable bits are the same for all generations */ 4353 /* enable bits are the same for all generations */
4441 hotplug_en = intel_hpd_enabled_irqs(dev, hpd_mask_i915); 4354 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4442 /* Programming the CRT detection parameters tends 4355 /* Programming the CRT detection parameters tends
4443 to generate a spurious hotplug event about three 4356 to generate a spurious hotplug event about three
4444 seconds later. So just do it once. 4357 seconds later. So just do it once.
4445 */ 4358 */
4446 if (IS_G4X(dev)) 4359 if (IS_G4X(dev_priv))
4447 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; 4360 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4448 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; 4361 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4449 4362
@@ -4458,7 +4371,7 @@ static void i915_hpd_irq_setup(struct drm_device *dev)
4458static irqreturn_t i965_irq_handler(int irq, void *arg) 4371static irqreturn_t i965_irq_handler(int irq, void *arg)
4459{ 4372{
4460 struct drm_device *dev = arg; 4373 struct drm_device *dev = arg;
4461 struct drm_i915_private *dev_priv = dev->dev_private; 4374 struct drm_i915_private *dev_priv = to_i915(dev);
4462 u32 iir, new_iir; 4375 u32 iir, new_iir;
4463 u32 pipe_stats[I915_MAX_PIPES]; 4376 u32 pipe_stats[I915_MAX_PIPES];
4464 int ret = IRQ_NONE, pipe; 4377 int ret = IRQ_NONE, pipe;
@@ -4510,7 +4423,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4510 if (iir & I915_DISPLAY_PORT_INTERRUPT) { 4423 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4511 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv); 4424 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4512 if (hotplug_status) 4425 if (hotplug_status)
4513 i9xx_hpd_irq_handler(dev, hotplug_status); 4426 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4514 } 4427 }
4515 4428
4516 I915_WRITE(IIR, iir & ~flip_mask); 4429 I915_WRITE(IIR, iir & ~flip_mask);
@@ -4523,24 +4436,24 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4523 4436
4524 for_each_pipe(dev_priv, pipe) { 4437 for_each_pipe(dev_priv, pipe) {
4525 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && 4438 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4526 i915_handle_vblank(dev, pipe, pipe, iir)) 4439 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4527 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); 4440 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4528 4441
4529 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 4442 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4530 blc_event = true; 4443 blc_event = true;
4531 4444
4532 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4445 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4533 i9xx_pipe_crc_irq_handler(dev, pipe); 4446 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4534 4447
4535 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) 4448 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4536 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); 4449 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4537 } 4450 }
4538 4451
4539 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4452 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4540 intel_opregion_asle_intr(dev); 4453 intel_opregion_asle_intr(dev_priv);
4541 4454
4542 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 4455 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4543 gmbus_irq_handler(dev); 4456 gmbus_irq_handler(dev_priv);
4544 4457
4545 /* With MSI, interrupts are only generated when iir 4458 /* With MSI, interrupts are only generated when iir
4546 * transitions from zero to nonzero. If another bit got 4459 * transitions from zero to nonzero. If another bit got
@@ -4567,7 +4480,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4567 4480
4568static void i965_irq_uninstall(struct drm_device * dev) 4481static void i965_irq_uninstall(struct drm_device * dev)
4569{ 4482{
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4483 struct drm_i915_private *dev_priv = to_i915(dev);
4571 int pipe; 4484 int pipe;
4572 4485
4573 if (!dev_priv) 4486 if (!dev_priv)
@@ -4597,7 +4510,7 @@ static void i965_irq_uninstall(struct drm_device * dev)
4597 */ 4510 */
4598void intel_irq_init(struct drm_i915_private *dev_priv) 4511void intel_irq_init(struct drm_i915_private *dev_priv)
4599{ 4512{
4600 struct drm_device *dev = dev_priv->dev; 4513 struct drm_device *dev = &dev_priv->drm;
4601 4514
4602 intel_hpd_init_work(dev_priv); 4515 intel_hpd_init_work(dev_priv);
4603 4516
@@ -4611,6 +4524,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4611 else 4524 else
4612 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; 4525 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4613 4526
4527 dev_priv->rps.pm_intr_keep = 0;
4528
4529 /*
4530 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4531 * if GEN6_PM_UP_EI_EXPIRED is masked.
4532 *
4533 * TODO: verify if this can be reproduced on VLV,CHV.
4534 */
4535 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4536 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4537
4538 if (INTEL_INFO(dev_priv)->gen >= 8)
4539 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4540
4614 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work, 4541 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4615 i915_hangcheck_elapsed); 4542 i915_hangcheck_elapsed);
4616 4543
@@ -4674,12 +4601,12 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
4674 dev->driver->disable_vblank = ironlake_disable_vblank; 4601 dev->driver->disable_vblank = ironlake_disable_vblank;
4675 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; 4602 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4676 } else { 4603 } else {
4677 if (INTEL_INFO(dev_priv)->gen == 2) { 4604 if (IS_GEN2(dev_priv)) {
4678 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4605 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4679 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4606 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4680 dev->driver->irq_handler = i8xx_irq_handler; 4607 dev->driver->irq_handler = i8xx_irq_handler;
4681 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4608 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4682 } else if (INTEL_INFO(dev_priv)->gen == 3) { 4609 } else if (IS_GEN3(dev_priv)) {
4683 dev->driver->irq_preinstall = i915_irq_preinstall; 4610 dev->driver->irq_preinstall = i915_irq_preinstall;
4684 dev->driver->irq_postinstall = i915_irq_postinstall; 4611 dev->driver->irq_postinstall = i915_irq_postinstall;
4685 dev->driver->irq_uninstall = i915_irq_uninstall; 4612 dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4717,7 +4644,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
4717 */ 4644 */
4718 dev_priv->pm.irqs_enabled = true; 4645 dev_priv->pm.irqs_enabled = true;
4719 4646
4720 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); 4647 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4721} 4648}
4722 4649
4723/** 4650/**
@@ -4729,7 +4656,7 @@ int intel_irq_install(struct drm_i915_private *dev_priv)
4729 */ 4656 */
4730void intel_irq_uninstall(struct drm_i915_private *dev_priv) 4657void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4731{ 4658{
4732 drm_irq_uninstall(dev_priv->dev); 4659 drm_irq_uninstall(&dev_priv->drm);
4733 intel_hpd_cancel_work(dev_priv); 4660 intel_hpd_cancel_work(dev_priv);
4734 dev_priv->pm.irqs_enabled = false; 4661 dev_priv->pm.irqs_enabled = false;
4735} 4662}
@@ -4743,9 +4670,9 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4743 */ 4670 */
4744void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) 4671void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4745{ 4672{
4746 dev_priv->dev->driver->irq_uninstall(dev_priv->dev); 4673 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4747 dev_priv->pm.irqs_enabled = false; 4674 dev_priv->pm.irqs_enabled = false;
4748 synchronize_irq(dev_priv->dev->irq); 4675 synchronize_irq(dev_priv->drm.irq);
4749} 4676}
4750 4677
4751/** 4678/**
@@ -4758,6 +4685,6 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4758void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) 4685void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4759{ 4686{
4760 dev_priv->pm.irqs_enabled = true; 4687 dev_priv->pm.irqs_enabled = true;
4761 dev_priv->dev->driver->irq_preinstall(dev_priv->dev); 4688 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4762 dev_priv->dev->driver->irq_postinstall(dev_priv->dev); 4689 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4763} 4690}
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1779f02e6df8..b6e404c91eed 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -54,10 +54,13 @@ struct i915_params i915 __read_mostly = {
54 .verbose_state_checks = 1, 54 .verbose_state_checks = 1,
55 .nuclear_pageflip = 0, 55 .nuclear_pageflip = 0,
56 .edp_vswing = 0, 56 .edp_vswing = 0,
57 .enable_guc_submission = false, 57 .enable_guc_loading = 0,
58 .enable_guc_submission = 0,
58 .guc_log_level = -1, 59 .guc_log_level = -1,
59 .enable_dp_mst = true, 60 .enable_dp_mst = true,
60 .inject_load_failure = 0, 61 .inject_load_failure = 0,
62 .enable_dpcd_backlight = false,
63 .enable_gvt = false,
61}; 64};
62 65
63module_param_named(modeset, i915.modeset, int, 0400); 66module_param_named(modeset, i915.modeset, int, 0400);
@@ -197,8 +200,15 @@ MODULE_PARM_DESC(edp_vswing,
197 "(0=use value from vbt [default], 1=low power swing(200mV)," 200 "(0=use value from vbt [default], 1=low power swing(200mV),"
198 "2=default swing(400mV))"); 201 "2=default swing(400mV))");
199 202
200module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, bool, 0400); 203module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400);
201MODULE_PARM_DESC(enable_guc_submission, "Enable GuC submission (default:false)"); 204MODULE_PARM_DESC(enable_guc_loading,
205 "Enable GuC firmware loading "
206 "(-1=auto, 0=never [default], 1=if available, 2=required)");
207
208module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400);
209MODULE_PARM_DESC(enable_guc_submission,
210 "Enable GuC submission "
211 "(-1=auto, 0=never [default], 1=if available, 2=required)");
202 212
203module_param_named(guc_log_level, i915.guc_log_level, int, 0400); 213module_param_named(guc_log_level, i915.guc_log_level, int, 0400);
204MODULE_PARM_DESC(guc_log_level, 214MODULE_PARM_DESC(guc_log_level,
@@ -210,3 +220,10 @@ MODULE_PARM_DESC(enable_dp_mst,
210module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400); 220module_param_named_unsafe(inject_load_failure, i915.inject_load_failure, uint, 0400);
211MODULE_PARM_DESC(inject_load_failure, 221MODULE_PARM_DESC(inject_load_failure,
212 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); 222 "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)");
223module_param_named(enable_dpcd_backlight, i915.enable_dpcd_backlight, bool, 0600);
224MODULE_PARM_DESC(enable_dpcd_backlight,
225 "Enable support for DPCD backlight control (default:false)");
226
227module_param_named(enable_gvt, i915.enable_gvt, bool, 0400);
228MODULE_PARM_DESC(enable_gvt,
229 "Enable support for Intel GVT-g graphics virtualization host support(default:false)");
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 02bc27804291..0ad020b4a925 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -45,6 +45,8 @@ struct i915_params {
45 int enable_ips; 45 int enable_ips;
46 int invert_brightness; 46 int invert_brightness;
47 int enable_cmd_parser; 47 int enable_cmd_parser;
48 int enable_guc_loading;
49 int enable_guc_submission;
48 int guc_log_level; 50 int guc_log_level;
49 int use_mmio_flip; 51 int use_mmio_flip;
50 int mmio_debug; 52 int mmio_debug;
@@ -57,10 +59,11 @@ struct i915_params {
57 bool load_detect_test; 59 bool load_detect_test;
58 bool reset; 60 bool reset;
59 bool disable_display; 61 bool disable_display;
60 bool enable_guc_submission;
61 bool verbose_state_checks; 62 bool verbose_state_checks;
62 bool nuclear_pageflip; 63 bool nuclear_pageflip;
63 bool enable_dp_mst; 64 bool enable_dp_mst;
65 bool enable_dpcd_backlight;
66 bool enable_gvt;
64}; 67};
65 68
66extern struct i915_params i915 __read_mostly; 69extern struct i915_params i915 __read_mostly;
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
new file mode 100644
index 000000000000..949c01686a66
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -0,0 +1,503 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/console.h>
26#include <linux/vgaarb.h>
27#include <linux/vga_switcheroo.h>
28
29#include "i915_drv.h"
30
31#define GEN_DEFAULT_PIPEOFFSETS \
32 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
33 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
34 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
35 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
36 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
37
38#define GEN_CHV_PIPEOFFSETS \
39 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
40 CHV_PIPE_C_OFFSET }, \
41 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
42 CHV_TRANSCODER_C_OFFSET, }, \
43 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
44 CHV_PALETTE_C_OFFSET }
45
46#define CURSOR_OFFSETS \
47 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
48
49#define IVB_CURSOR_OFFSETS \
50 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
51
52#define BDW_COLORS \
53 .color = { .degamma_lut_size = 512, .gamma_lut_size = 512 }
54#define CHV_COLORS \
55 .color = { .degamma_lut_size = 65, .gamma_lut_size = 257 }
56
57static const struct intel_device_info intel_i830_info = {
58 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
59 .has_overlay = 1, .overlay_needs_physical = 1,
60 .ring_mask = RENDER_RING,
61 GEN_DEFAULT_PIPEOFFSETS,
62 CURSOR_OFFSETS,
63};
64
65static const struct intel_device_info intel_845g_info = {
66 .gen = 2, .num_pipes = 1,
67 .has_overlay = 1, .overlay_needs_physical = 1,
68 .ring_mask = RENDER_RING,
69 GEN_DEFAULT_PIPEOFFSETS,
70 CURSOR_OFFSETS,
71};
72
73static const struct intel_device_info intel_i85x_info = {
74 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
75 .cursor_needs_physical = 1,
76 .has_overlay = 1, .overlay_needs_physical = 1,
77 .has_fbc = 1,
78 .ring_mask = RENDER_RING,
79 GEN_DEFAULT_PIPEOFFSETS,
80 CURSOR_OFFSETS,
81};
82
83static const struct intel_device_info intel_i865g_info = {
84 .gen = 2, .num_pipes = 1,
85 .has_overlay = 1, .overlay_needs_physical = 1,
86 .ring_mask = RENDER_RING,
87 GEN_DEFAULT_PIPEOFFSETS,
88 CURSOR_OFFSETS,
89};
90
91static const struct intel_device_info intel_i915g_info = {
92 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
93 .has_overlay = 1, .overlay_needs_physical = 1,
94 .ring_mask = RENDER_RING,
95 GEN_DEFAULT_PIPEOFFSETS,
96 CURSOR_OFFSETS,
97};
98static const struct intel_device_info intel_i915gm_info = {
99 .gen = 3, .is_mobile = 1, .num_pipes = 2,
100 .cursor_needs_physical = 1,
101 .has_overlay = 1, .overlay_needs_physical = 1,
102 .supports_tv = 1,
103 .has_fbc = 1,
104 .ring_mask = RENDER_RING,
105 GEN_DEFAULT_PIPEOFFSETS,
106 CURSOR_OFFSETS,
107};
108static const struct intel_device_info intel_i945g_info = {
109 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
110 .has_overlay = 1, .overlay_needs_physical = 1,
111 .ring_mask = RENDER_RING,
112 GEN_DEFAULT_PIPEOFFSETS,
113 CURSOR_OFFSETS,
114};
115static const struct intel_device_info intel_i945gm_info = {
116 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
117 .has_hotplug = 1, .cursor_needs_physical = 1,
118 .has_overlay = 1, .overlay_needs_physical = 1,
119 .supports_tv = 1,
120 .has_fbc = 1,
121 .ring_mask = RENDER_RING,
122 GEN_DEFAULT_PIPEOFFSETS,
123 CURSOR_OFFSETS,
124};
125
126static const struct intel_device_info intel_i965g_info = {
127 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
128 .has_hotplug = 1,
129 .has_overlay = 1,
130 .ring_mask = RENDER_RING,
131 GEN_DEFAULT_PIPEOFFSETS,
132 CURSOR_OFFSETS,
133};
134
135static const struct intel_device_info intel_i965gm_info = {
136 .gen = 4, .is_crestline = 1, .num_pipes = 2,
137 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
138 .has_overlay = 1,
139 .supports_tv = 1,
140 .ring_mask = RENDER_RING,
141 GEN_DEFAULT_PIPEOFFSETS,
142 CURSOR_OFFSETS,
143};
144
145static const struct intel_device_info intel_g33_info = {
146 .gen = 3, .is_g33 = 1, .num_pipes = 2,
147 .need_gfx_hws = 1, .has_hotplug = 1,
148 .has_overlay = 1,
149 .ring_mask = RENDER_RING,
150 GEN_DEFAULT_PIPEOFFSETS,
151 CURSOR_OFFSETS,
152};
153
154static const struct intel_device_info intel_g45_info = {
155 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
156 .has_pipe_cxsr = 1, .has_hotplug = 1,
157 .ring_mask = RENDER_RING | BSD_RING,
158 GEN_DEFAULT_PIPEOFFSETS,
159 CURSOR_OFFSETS,
160};
161
162static const struct intel_device_info intel_gm45_info = {
163 .gen = 4, .is_g4x = 1, .num_pipes = 2,
164 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
165 .has_pipe_cxsr = 1, .has_hotplug = 1,
166 .supports_tv = 1,
167 .ring_mask = RENDER_RING | BSD_RING,
168 GEN_DEFAULT_PIPEOFFSETS,
169 CURSOR_OFFSETS,
170};
171
172static const struct intel_device_info intel_pineview_info = {
173 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
174 .need_gfx_hws = 1, .has_hotplug = 1,
175 .has_overlay = 1,
176 GEN_DEFAULT_PIPEOFFSETS,
177 CURSOR_OFFSETS,
178};
179
180static const struct intel_device_info intel_ironlake_d_info = {
181 .gen = 5, .num_pipes = 2,
182 .need_gfx_hws = 1, .has_hotplug = 1,
183 .ring_mask = RENDER_RING | BSD_RING,
184 GEN_DEFAULT_PIPEOFFSETS,
185 CURSOR_OFFSETS,
186};
187
188static const struct intel_device_info intel_ironlake_m_info = {
189 .gen = 5, .is_mobile = 1, .num_pipes = 2,
190 .need_gfx_hws = 1, .has_hotplug = 1,
191 .has_fbc = 1,
192 .ring_mask = RENDER_RING | BSD_RING,
193 GEN_DEFAULT_PIPEOFFSETS,
194 CURSOR_OFFSETS,
195};
196
197static const struct intel_device_info intel_sandybridge_d_info = {
198 .gen = 6, .num_pipes = 2,
199 .need_gfx_hws = 1, .has_hotplug = 1,
200 .has_fbc = 1,
201 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
202 .has_llc = 1,
203 GEN_DEFAULT_PIPEOFFSETS,
204 CURSOR_OFFSETS,
205};
206
207static const struct intel_device_info intel_sandybridge_m_info = {
208 .gen = 6, .is_mobile = 1, .num_pipes = 2,
209 .need_gfx_hws = 1, .has_hotplug = 1,
210 .has_fbc = 1,
211 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
212 .has_llc = 1,
213 GEN_DEFAULT_PIPEOFFSETS,
214 CURSOR_OFFSETS,
215};
216
217#define GEN7_FEATURES \
218 .gen = 7, .num_pipes = 3, \
219 .need_gfx_hws = 1, .has_hotplug = 1, \
220 .has_fbc = 1, \
221 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
222 .has_llc = 1, \
223 GEN_DEFAULT_PIPEOFFSETS, \
224 IVB_CURSOR_OFFSETS
225
226static const struct intel_device_info intel_ivybridge_d_info = {
227 GEN7_FEATURES,
228 .is_ivybridge = 1,
229};
230
231static const struct intel_device_info intel_ivybridge_m_info = {
232 GEN7_FEATURES,
233 .is_ivybridge = 1,
234 .is_mobile = 1,
235};
236
237static const struct intel_device_info intel_ivybridge_q_info = {
238 GEN7_FEATURES,
239 .is_ivybridge = 1,
240 .num_pipes = 0, /* legal, last one wins */
241};
242
243#define VLV_FEATURES \
244 .gen = 7, .num_pipes = 2, \
245 .need_gfx_hws = 1, .has_hotplug = 1, \
246 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
247 .display_mmio_offset = VLV_DISPLAY_BASE, \
248 GEN_DEFAULT_PIPEOFFSETS, \
249 CURSOR_OFFSETS
250
251static const struct intel_device_info intel_valleyview_m_info = {
252 VLV_FEATURES,
253 .is_valleyview = 1,
254 .is_mobile = 1,
255};
256
257static const struct intel_device_info intel_valleyview_d_info = {
258 VLV_FEATURES,
259 .is_valleyview = 1,
260};
261
262#define HSW_FEATURES \
263 GEN7_FEATURES, \
264 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \
265 .has_ddi = 1, \
266 .has_fpga_dbg = 1
267
268static const struct intel_device_info intel_haswell_d_info = {
269 HSW_FEATURES,
270 .is_haswell = 1,
271};
272
273static const struct intel_device_info intel_haswell_m_info = {
274 HSW_FEATURES,
275 .is_haswell = 1,
276 .is_mobile = 1,
277};
278
279#define BDW_FEATURES \
280 HSW_FEATURES, \
281 BDW_COLORS
282
283static const struct intel_device_info intel_broadwell_d_info = {
284 BDW_FEATURES,
285 .gen = 8,
286 .is_broadwell = 1,
287};
288
289static const struct intel_device_info intel_broadwell_m_info = {
290 BDW_FEATURES,
291 .gen = 8, .is_mobile = 1,
292 .is_broadwell = 1,
293};
294
295static const struct intel_device_info intel_broadwell_gt3d_info = {
296 BDW_FEATURES,
297 .gen = 8,
298 .is_broadwell = 1,
299 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
300};
301
302static const struct intel_device_info intel_broadwell_gt3m_info = {
303 BDW_FEATURES,
304 .gen = 8, .is_mobile = 1,
305 .is_broadwell = 1,
306 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
307};
308
309static const struct intel_device_info intel_cherryview_info = {
310 .gen = 8, .num_pipes = 3,
311 .need_gfx_hws = 1, .has_hotplug = 1,
312 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
313 .is_cherryview = 1,
314 .display_mmio_offset = VLV_DISPLAY_BASE,
315 GEN_CHV_PIPEOFFSETS,
316 CURSOR_OFFSETS,
317 CHV_COLORS,
318};
319
320static const struct intel_device_info intel_skylake_info = {
321 BDW_FEATURES,
322 .is_skylake = 1,
323 .gen = 9,
324};
325
326static const struct intel_device_info intel_skylake_gt3_info = {
327 BDW_FEATURES,
328 .is_skylake = 1,
329 .gen = 9,
330 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
331};
332
333static const struct intel_device_info intel_broxton_info = {
334 .is_broxton = 1,
335 .gen = 9,
336 .need_gfx_hws = 1, .has_hotplug = 1,
337 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
338 .num_pipes = 3,
339 .has_ddi = 1,
340 .has_fpga_dbg = 1,
341 .has_fbc = 1,
342 .has_pooled_eu = 0,
343 GEN_DEFAULT_PIPEOFFSETS,
344 IVB_CURSOR_OFFSETS,
345 BDW_COLORS,
346};
347
348static const struct intel_device_info intel_kabylake_info = {
349 BDW_FEATURES,
350 .is_kabylake = 1,
351 .gen = 9,
352};
353
354static const struct intel_device_info intel_kabylake_gt3_info = {
355 BDW_FEATURES,
356 .is_kabylake = 1,
357 .gen = 9,
358 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
359};
360
361/*
362 * Make sure any device matches here are from most specific to most
363 * general. For example, since the Quanta match is based on the subsystem
364 * and subvendor IDs, we need it to come before the more general IVB
365 * PCI ID matches, otherwise we'll use the wrong info struct above.
366 */
367static const struct pci_device_id pciidlist[] = {
368 INTEL_I830_IDS(&intel_i830_info),
369 INTEL_I845G_IDS(&intel_845g_info),
370 INTEL_I85X_IDS(&intel_i85x_info),
371 INTEL_I865G_IDS(&intel_i865g_info),
372 INTEL_I915G_IDS(&intel_i915g_info),
373 INTEL_I915GM_IDS(&intel_i915gm_info),
374 INTEL_I945G_IDS(&intel_i945g_info),
375 INTEL_I945GM_IDS(&intel_i945gm_info),
376 INTEL_I965G_IDS(&intel_i965g_info),
377 INTEL_G33_IDS(&intel_g33_info),
378 INTEL_I965GM_IDS(&intel_i965gm_info),
379 INTEL_GM45_IDS(&intel_gm45_info),
380 INTEL_G45_IDS(&intel_g45_info),
381 INTEL_PINEVIEW_IDS(&intel_pineview_info),
382 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info),
383 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info),
384 INTEL_SNB_D_IDS(&intel_sandybridge_d_info),
385 INTEL_SNB_M_IDS(&intel_sandybridge_m_info),
386 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */
387 INTEL_IVB_M_IDS(&intel_ivybridge_m_info),
388 INTEL_IVB_D_IDS(&intel_ivybridge_d_info),
389 INTEL_HSW_D_IDS(&intel_haswell_d_info),
390 INTEL_HSW_M_IDS(&intel_haswell_m_info),
391 INTEL_VLV_M_IDS(&intel_valleyview_m_info),
392 INTEL_VLV_D_IDS(&intel_valleyview_d_info),
393 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info),
394 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info),
395 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info),
396 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info),
397 INTEL_CHV_IDS(&intel_cherryview_info),
398 INTEL_SKL_GT1_IDS(&intel_skylake_info),
399 INTEL_SKL_GT2_IDS(&intel_skylake_info),
400 INTEL_SKL_GT3_IDS(&intel_skylake_gt3_info),
401 INTEL_SKL_GT4_IDS(&intel_skylake_gt3_info),
402 INTEL_BXT_IDS(&intel_broxton_info),
403 INTEL_KBL_GT1_IDS(&intel_kabylake_info),
404 INTEL_KBL_GT2_IDS(&intel_kabylake_info),
405 INTEL_KBL_GT3_IDS(&intel_kabylake_gt3_info),
406 INTEL_KBL_GT4_IDS(&intel_kabylake_gt3_info),
407 {0, 0, 0}
408};
409MODULE_DEVICE_TABLE(pci, pciidlist);
410
411extern int i915_driver_load(struct pci_dev *pdev,
412 const struct pci_device_id *ent);
413
414static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
415{
416 struct intel_device_info *intel_info =
417 (struct intel_device_info *) ent->driver_data;
418
419 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
420 DRM_INFO("This hardware requires preliminary hardware support.\n"
421 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
422 return -ENODEV;
423 }
424
425 /* Only bind to function 0 of the device. Early generations
426 * used function 1 as a placeholder for multi-head. This causes
427 * us confusion instead, especially on the systems where both
428 * functions have the same PCI-ID!
429 */
430 if (PCI_FUNC(pdev->devfn))
431 return -ENODEV;
432
433 /*
434 * apple-gmux is needed on dual GPU MacBook Pro
435 * to probe the panel if we're the inactive GPU.
436 */
437 if (vga_switcheroo_client_probe_defer(pdev))
438 return -EPROBE_DEFER;
439
440 return i915_driver_load(pdev, ent);
441}
442
443extern void i915_driver_unload(struct drm_device *dev);
444
445static void i915_pci_remove(struct pci_dev *pdev)
446{
447 struct drm_device *dev = pci_get_drvdata(pdev);
448
449 i915_driver_unload(dev);
450 drm_dev_unref(dev);
451}
452
453extern const struct dev_pm_ops i915_pm_ops;
454
455static struct pci_driver i915_pci_driver = {
456 .name = DRIVER_NAME,
457 .id_table = pciidlist,
458 .probe = i915_pci_probe,
459 .remove = i915_pci_remove,
460 .driver.pm = &i915_pm_ops,
461};
462
463static int __init i915_init(void)
464{
465 bool use_kms = true;
466
467 /*
468 * Enable KMS by default, unless explicitly overriden by
469 * either the i915.modeset prarameter or by the
470 * vga_text_mode_force boot option.
471 */
472
473 if (i915.modeset == 0)
474 use_kms = false;
475
476 if (vgacon_text_force() && i915.modeset == -1)
477 use_kms = false;
478
479 if (!use_kms) {
480 /* Silently fail loading to not upset userspace. */
481 DRM_DEBUG_DRIVER("KMS disabled.\n");
482 return 0;
483 }
484
485 return pci_register_driver(&i915_pci_driver);
486}
487
488static void __exit i915_exit(void)
489{
490 if (!i915_pci_driver.driver.owner)
491 return;
492
493 pci_unregister_driver(&i915_pci_driver);
494}
495
496module_init(i915_init);
497module_exit(i915_exit);
498
499MODULE_AUTHOR("Tungsten Graphics, Inc.");
500MODULE_AUTHOR("Intel Corporation");
501
502MODULE_DESCRIPTION(DRIVER_DESC);
503MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/i915_pvinfo.h b/drivers/gpu/drm/i915/i915_pvinfo.h
new file mode 100644
index 000000000000..c0cb2974caac
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_pvinfo.h
@@ -0,0 +1,113 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _I915_PVINFO_H_
25#define _I915_PVINFO_H_
26
27/* The MMIO offset of the shared info between guest and host emulator */
28#define VGT_PVINFO_PAGE 0x78000
29#define VGT_PVINFO_SIZE 0x1000
30
31/*
32 * The following structure pages are defined in GEN MMIO space
33 * for virtualization. (One page for now)
34 */
35#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0
38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43/*
44 * notifications from guest to vgpu device model
45 */
46enum vgt_g2v_type {
47 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
48 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
49 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
50 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
51 VGT_G2V_EXECLIST_CONTEXT_CREATE,
52 VGT_G2V_EXECLIST_CONTEXT_DESTROY,
53 VGT_G2V_MAX,
54};
55
56struct vgt_if {
57 u64 magic; /* VGT_MAGIC */
58 uint16_t version_major;
59 uint16_t version_minor;
60 u32 vgt_id; /* ID of vGT instance */
61 u32 rsv1[12]; /* pad to offset 0x40 */
62 /*
63 * Data structure to describe the balooning info of resources.
64 * Each VM can only have one portion of continuous area for now.
65 * (May support scattered resource in future)
66 * (starting from offset 0x40)
67 */
68 struct {
69 /* Aperture register balooning */
70 struct {
71 u32 base;
72 u32 size;
73 } mappable_gmadr; /* aperture */
74 /* GMADR register balooning */
75 struct {
76 u32 base;
77 u32 size;
78 } nonmappable_gmadr; /* non aperture */
79 /* allowed fence registers */
80 u32 fence_num;
81 u32 rsv2[3];
82 } avail_rs; /* available/assigned resource */
83 u32 rsv3[0x200 - 24]; /* pad to half page */
84 /*
85 * The bottom half page is for response from Gfx driver to hypervisor.
86 */
87 u32 rsv4;
88 u32 display_ready; /* ready for display owner switch */
89
90 u32 rsv5[4];
91
92 u32 g2v_notify;
93 u32 rsv6[7];
94
95 struct {
96 u32 lo;
97 u32 hi;
98 } pdp[4];
99
100 u32 execlist_context_descriptor_lo;
101 u32 execlist_context_descriptor_hi;
102
103 u32 rsv7[0x200 - 24]; /* pad to one page */
104} __packed;
105
106#define vgtif_reg(x) \
107 _MMIO((VGT_PVINFO_PAGE + offsetof(struct vgt_if, x)))
108
109/* vGPU display status to be used by the host side */
110#define VGT_DRV_DISPLAY_NOT_READY 0
111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
112
113#endif /* _I915_PVINFO_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3fcf7dd5b6ca..ce14fe09d962 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -445,6 +445,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
445 */ 445 */
446#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) 446#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
447 447
448#define GEN9_MEDIA_POOL_STATE ((0x3 << 29) | (0x2 << 27) | (0x5 << 16) | 4)
449#define GEN9_MEDIA_POOL_ENABLE (1 << 31)
448#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) 450#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
449#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 451#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
450#define SC_UPDATE_SCISSOR (0x1<<1) 452#define SC_UPDATE_SCISSOR (0x1<<1)
@@ -716,6 +718,9 @@ enum skl_disp_power_wells {
716 /* Not actual bit groups. Used as IDs for lookup_power_well() */ 718 /* Not actual bit groups. Used as IDs for lookup_power_well() */
717 SKL_DISP_PW_ALWAYS_ON, 719 SKL_DISP_PW_ALWAYS_ON,
718 SKL_DISP_PW_DC_OFF, 720 SKL_DISP_PW_DC_OFF,
721
722 BXT_DPIO_CMN_A,
723 BXT_DPIO_CMN_BC,
719}; 724};
720 725
721#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2)) 726#define SKL_POWER_WELL_STATE(pw) (1 << ((pw) * 2))
@@ -889,7 +894,7 @@ enum skl_disp_power_wells {
889 * PLLs can be routed to any transcoder A/B/C. 894 * PLLs can be routed to any transcoder A/B/C.
890 * 895 *
891 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is 896 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
892 * digital port D (CHV) or port A (BXT). 897 * digital port D (CHV) or port A (BXT). ::
893 * 898 *
894 * 899 *
895 * Dual channel PHY (VLV/CHV/BXT) 900 * Dual channel PHY (VLV/CHV/BXT)
@@ -1276,6 +1281,15 @@ enum skl_disp_power_wells {
1276#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090) 1281#define BXT_P_CR_GT_DISP_PWRON _MMIO(0x138090)
1277#define GT_DISPLAY_POWER_ON(phy) (1 << (phy)) 1282#define GT_DISPLAY_POWER_ON(phy) (1 << (phy))
1278 1283
1284#define _BXT_PHY_CTL_DDI_A 0x64C00
1285#define _BXT_PHY_CTL_DDI_B 0x64C10
1286#define _BXT_PHY_CTL_DDI_C 0x64C20
1287#define BXT_PHY_CMNLANE_POWERDOWN_ACK (1 << 10)
1288#define BXT_PHY_LANE_POWERDOWN_ACK (1 << 9)
1289#define BXT_PHY_LANE_ENABLED (1 << 8)
1290#define BXT_PHY_CTL(port) _MMIO_PORT(port, _BXT_PHY_CTL_DDI_A, \
1291 _BXT_PHY_CTL_DDI_B)
1292
1279#define _PHY_CTL_FAMILY_EDP 0x64C80 1293#define _PHY_CTL_FAMILY_EDP 0x64C80
1280#define _PHY_CTL_FAMILY_DDI 0x64C90 1294#define _PHY_CTL_FAMILY_DDI 0x64C90
1281#define COMMON_RESET_DIS (1 << 31) 1295#define COMMON_RESET_DIS (1 << 31)
@@ -1672,6 +1686,9 @@ enum skl_disp_power_wells {
1672 1686
1673#define GEN7_TLB_RD_ADDR _MMIO(0x4700) 1687#define GEN7_TLB_RD_ADDR _MMIO(0x4700)
1674 1688
1689#define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0)
1690#define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18)
1691
1675#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) 1692#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
1676#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) 1693#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
1677 1694
@@ -2171,6 +2188,9 @@ enum skl_disp_power_wells {
2171 2188
2172#define FBC_LL_SIZE (1536) 2189#define FBC_LL_SIZE (1536)
2173 2190
2191#define FBC_LLC_READ_CTRL _MMIO(0x9044)
2192#define FBC_LLC_FULLY_OPEN (1<<30)
2193
2174/* Framebuffer compression for GM45+ */ 2194/* Framebuffer compression for GM45+ */
2175#define DPFC_CB_BASE _MMIO(0x3200) 2195#define DPFC_CB_BASE _MMIO(0x3200)
2176#define DPFC_CONTROL _MMIO(0x3208) 2196#define DPFC_CONTROL _MMIO(0x3208)
@@ -2461,6 +2481,8 @@ enum skl_disp_power_wells {
2461#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 2481#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
2462#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 2482#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
2463 2483
2484#define RAWCLK_FREQ_VLV _MMIO(VLV_DISPLAY_BASE + 0x6024)
2485
2464#define _FPA0 0x6040 2486#define _FPA0 0x6040
2465#define _FPA1 0x6044 2487#define _FPA1 0x6044
2466#define _FPB0 0x6048 2488#define _FPB0 0x6048
@@ -3032,6 +3054,18 @@ enum skl_disp_power_wells {
3032/* Same as Haswell, but 72064 bytes now. */ 3054/* Same as Haswell, but 72064 bytes now. */
3033#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE) 3055#define GEN8_CXT_TOTAL_SIZE (18 * PAGE_SIZE)
3034 3056
3057enum {
3058 INTEL_ADVANCED_CONTEXT = 0,
3059 INTEL_LEGACY_32B_CONTEXT,
3060 INTEL_ADVANCED_AD_CONTEXT,
3061 INTEL_LEGACY_64B_CONTEXT
3062};
3063
3064#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
3065#define GEN8_CTX_ADDRESSING_MODE(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
3066 INTEL_LEGACY_64B_CONTEXT : \
3067 INTEL_LEGACY_32B_CONTEXT)
3068
3035#define CHV_CLK_CTL1 _MMIO(0x101100) 3069#define CHV_CLK_CTL1 _MMIO(0x101100)
3036#define VLV_CLK_CTL2 _MMIO(0x101104) 3070#define VLV_CLK_CTL2 _MMIO(0x101104)
3037#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28 3071#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
@@ -6045,6 +6079,9 @@ enum skl_disp_power_wells {
6045#define FORCE_ARB_IDLE_PLANES (1 << 14) 6079#define FORCE_ARB_IDLE_PLANES (1 << 14)
6046#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3) 6080#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
6047 6081
6082#define CHICKEN_PAR2_1 _MMIO(0x42090)
6083#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT (1 << 14)
6084
6048#define _CHICKEN_PIPESL_1_A 0x420b0 6085#define _CHICKEN_PIPESL_1_A 0x420b0
6049#define _CHICKEN_PIPESL_1_B 0x420b4 6086#define _CHICKEN_PIPESL_1_B 0x420b4
6050#define HSW_FBCQ_DIS (1 << 22) 6087#define HSW_FBCQ_DIS (1 << 22)
@@ -6084,6 +6121,7 @@ enum skl_disp_power_wells {
6084 6121
6085#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4) 6122#define FF_SLICE_CS_CHICKEN2 _MMIO(0x20e4)
6086#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) 6123#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
6124#define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1<<10)
6087 6125
6088#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) 6126#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
6089#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) 6127#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
@@ -6108,7 +6146,14 @@ enum skl_disp_power_wells {
6108#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000 6146#define VLV_B0_WA_L3SQCREG1_VALUE 0x00D30000
6109 6147
6110#define GEN8_L3SQCREG1 _MMIO(0xB100) 6148#define GEN8_L3SQCREG1 _MMIO(0xB100)
6111#define BDW_WA_L3SQCREG1_DEFAULT 0x784000 6149/*
6150 * Note that on CHV the following has an off-by-one error wrt. to BSpec.
6151 * Using the formula in BSpec leads to a hang, while the formula here works
6152 * fine and matches the formulas for all other platforms. A BSpec change
6153 * request has been filed to clarify this.
6154 */
6155#define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19)
6156#define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14)
6112 6157
6113#define GEN7_L3CNTLREG1 _MMIO(0xB01C) 6158#define GEN7_L3CNTLREG1 _MMIO(0xB01C)
6114#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C 6159#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C
@@ -7028,7 +7073,8 @@ enum skl_disp_power_wells {
7028#define GEN6_RPDEUC _MMIO(0xA084) 7073#define GEN6_RPDEUC _MMIO(0xA084)
7029#define GEN6_RPDEUCSW _MMIO(0xA088) 7074#define GEN6_RPDEUCSW _MMIO(0xA088)
7030#define GEN6_RC_STATE _MMIO(0xA094) 7075#define GEN6_RC_STATE _MMIO(0xA094)
7031#define RC6_STATE (1 << 18) 7076#define RC_SW_TARGET_STATE_SHIFT 16
7077#define RC_SW_TARGET_STATE_MASK (7 << RC_SW_TARGET_STATE_SHIFT)
7032#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098) 7078#define GEN6_RC1_WAKE_RATE_LIMIT _MMIO(0xA098)
7033#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C) 7079#define GEN6_RC6_WAKE_RATE_LIMIT _MMIO(0xA09C)
7034#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0) 7080#define GEN6_RC6pp_WAKE_RATE_LIMIT _MMIO(0xA0A0)
@@ -7042,13 +7088,17 @@ enum skl_disp_power_wells {
7042#define VLV_RCEDATA _MMIO(0xA0BC) 7088#define VLV_RCEDATA _MMIO(0xA0BC)
7043#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0) 7089#define GEN6_RC6pp_THRESHOLD _MMIO(0xA0C0)
7044#define GEN6_PMINTRMSK _MMIO(0xA168) 7090#define GEN6_PMINTRMSK _MMIO(0xA168)
7045#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31) 7091#define GEN8_PMINTR_REDIRECT_TO_NON_DISP (1<<31)
7092#define GEN8_MISC_CTRL0 _MMIO(0xA180)
7046#define VLV_PWRDWNUPCTL _MMIO(0xA294) 7093#define VLV_PWRDWNUPCTL _MMIO(0xA294)
7047#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4) 7094#define GEN9_MEDIA_PG_IDLE_HYSTERESIS _MMIO(0xA0C4)
7048#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8) 7095#define GEN9_RENDER_PG_IDLE_HYSTERESIS _MMIO(0xA0C8)
7049#define GEN9_PG_ENABLE _MMIO(0xA210) 7096#define GEN9_PG_ENABLE _MMIO(0xA210)
7050#define GEN9_RENDER_PG_ENABLE (1<<0) 7097#define GEN9_RENDER_PG_ENABLE (1<<0)
7051#define GEN9_MEDIA_PG_ENABLE (1<<1) 7098#define GEN9_MEDIA_PG_ENABLE (1<<1)
7099#define GEN8_PUSHBUS_CONTROL _MMIO(0xA248)
7100#define GEN8_PUSHBUS_ENABLE _MMIO(0xA250)
7101#define GEN8_PUSHBUS_SHIFT _MMIO(0xA25C)
7052 7102
7053#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C) 7103#define VLV_CHICKEN_3 _MMIO(VLV_DISPLAY_BASE + 0x7040C)
7054#define PIXEL_OVERLAP_CNT_MASK (3 << 30) 7104#define PIXEL_OVERLAP_CNT_MASK (3 << 30)
@@ -7578,14 +7628,15 @@ enum skl_disp_power_wells {
7578#define CDCLK_FREQ_540 (1<<26) 7628#define CDCLK_FREQ_540 (1<<26)
7579#define CDCLK_FREQ_337_308 (2<<26) 7629#define CDCLK_FREQ_337_308 (2<<26)
7580#define CDCLK_FREQ_675_617 (3<<26) 7630#define CDCLK_FREQ_675_617 (3<<26)
7581#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7582
7583#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) 7631#define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22)
7584#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) 7632#define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22)
7585#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) 7633#define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22)
7586#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 7634#define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22)
7587#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 7635#define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22)
7636#define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20)
7637#define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3)
7588#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 7638#define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16)
7639#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
7589 7640
7590/* LCPLL_CTL */ 7641/* LCPLL_CTL */
7591#define LCPLL1_CTL _MMIO(0x46010) 7642#define LCPLL1_CTL _MMIO(0x46010)
@@ -8161,6 +8212,8 @@ enum skl_disp_power_wells {
8161#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c) 8212#define _MIPIA_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb05c)
8162#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c) 8213#define _MIPIC_EOT_DISABLE (dev_priv->mipi_mmio_base + 0xb85c)
8163#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE) 8214#define MIPI_EOT_DISABLE(port) _MMIO_MIPI(port, _MIPIA_EOT_DISABLE, _MIPIC_EOT_DISABLE)
8215#define BXT_DEFEATURE_DPI_FIFO_CTR (1 << 9)
8216#define BXT_DPHY_DEFEATURE_EN (1 << 8)
8164#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7) 8217#define LP_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 7)
8165#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6) 8218#define HS_RX_TIMEOUT_ERROR_RECOVERY_DISABLE (1 << 6)
8166#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5) 8219#define LOW_CONTENTION_RECOVERY_DISABLE (1 << 5)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 34e061a9ef06..5cfe4c7716b4 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -31,7 +31,7 @@
31 31
32static void i915_save_display(struct drm_device *dev) 32static void i915_save_display(struct drm_device *dev)
33{ 33{
34 struct drm_i915_private *dev_priv = dev->dev_private; 34 struct drm_i915_private *dev_priv = to_i915(dev);
35 35
36 /* Display arbitration control */ 36 /* Display arbitration control */
37 if (INTEL_INFO(dev)->gen <= 4) 37 if (INTEL_INFO(dev)->gen <= 4)
@@ -63,7 +63,7 @@ static void i915_save_display(struct drm_device *dev)
63 63
64static void i915_restore_display(struct drm_device *dev) 64static void i915_restore_display(struct drm_device *dev)
65{ 65{
66 struct drm_i915_private *dev_priv = dev->dev_private; 66 struct drm_i915_private *dev_priv = to_i915(dev);
67 u32 mask = 0xffffffff; 67 u32 mask = 0xffffffff;
68 68
69 /* Display arbitration */ 69 /* Display arbitration */
@@ -103,7 +103,7 @@ static void i915_restore_display(struct drm_device *dev)
103 103
104int i915_save_state(struct drm_device *dev) 104int i915_save_state(struct drm_device *dev)
105{ 105{
106 struct drm_i915_private *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = to_i915(dev);
107 int i; 107 int i;
108 108
109 mutex_lock(&dev->struct_mutex); 109 mutex_lock(&dev->struct_mutex);
@@ -148,7 +148,7 @@ int i915_save_state(struct drm_device *dev)
148 148
149int i915_restore_state(struct drm_device *dev) 149int i915_restore_state(struct drm_device *dev)
150{ 150{
151 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = to_i915(dev);
152 int i; 152 int i;
153 153
154 mutex_lock(&dev->struct_mutex); 154 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 2d576b7ff299..d61829e54f93 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -38,12 +38,12 @@
38static u32 calc_residency(struct drm_device *dev, 38static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg) 39 i915_reg_t reg)
40{ 40{
41 struct drm_i915_private *dev_priv = dev->dev_private; 41 struct drm_i915_private *dev_priv = to_i915(dev);
42 u64 raw_time; /* 32b value may overflow during fixed point math */ 42 u64 raw_time; /* 32b value may overflow during fixed point math */
43 u64 units = 128ULL, div = 100000ULL; 43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret; 44 u32 ret;
45 45
46 if (!intel_enable_rc6(dev)) 46 if (!intel_enable_rc6())
47 return 0; 47 return 0;
48 48
49 intel_runtime_pm_get(dev_priv); 49 intel_runtime_pm_get(dev_priv);
@@ -70,8 +70,7 @@ static u32 calc_residency(struct drm_device *dev,
70static ssize_t 70static ssize_t
71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf) 71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72{ 72{
73 struct drm_minor *dminor = dev_to_drm_minor(kdev); 73 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6());
74 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
75} 74}
76 75
77static ssize_t 76static ssize_t
@@ -167,7 +166,7 @@ i915_l3_read(struct file *filp, struct kobject *kobj,
167 struct device *dev = kobj_to_dev(kobj); 166 struct device *dev = kobj_to_dev(kobj);
168 struct drm_minor *dminor = dev_to_drm_minor(dev); 167 struct drm_minor *dminor = dev_to_drm_minor(dev);
169 struct drm_device *drm_dev = dminor->dev; 168 struct drm_device *drm_dev = dminor->dev;
170 struct drm_i915_private *dev_priv = drm_dev->dev_private; 169 struct drm_i915_private *dev_priv = to_i915(drm_dev);
171 int slice = (int)(uintptr_t)attr->private; 170 int slice = (int)(uintptr_t)attr->private;
172 int ret; 171 int ret;
173 172
@@ -203,8 +202,8 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
203 struct device *dev = kobj_to_dev(kobj); 202 struct device *dev = kobj_to_dev(kobj);
204 struct drm_minor *dminor = dev_to_drm_minor(dev); 203 struct drm_minor *dminor = dev_to_drm_minor(dev);
205 struct drm_device *drm_dev = dminor->dev; 204 struct drm_device *drm_dev = dminor->dev;
206 struct drm_i915_private *dev_priv = drm_dev->dev_private; 205 struct drm_i915_private *dev_priv = to_i915(drm_dev);
207 struct intel_context *ctx; 206 struct i915_gem_context *ctx;
208 u32 *temp = NULL; /* Just here to make handling failures easy */ 207 u32 *temp = NULL; /* Just here to make handling failures easy */
209 int slice = (int)(uintptr_t)attr->private; 208 int slice = (int)(uintptr_t)attr->private;
210 int ret; 209 int ret;
@@ -228,13 +227,6 @@ i915_l3_write(struct file *filp, struct kobject *kobj,
228 } 227 }
229 } 228 }
230 229
231 ret = i915_gpu_idle(drm_dev);
232 if (ret) {
233 kfree(temp);
234 mutex_unlock(&drm_dev->struct_mutex);
235 return ret;
236 }
237
238 /* TODO: Ideally we really want a GPU reset here to make sure errors 230 /* TODO: Ideally we really want a GPU reset here to make sure errors
239 * aren't propagated. Since I cannot find a stable way to reset the GPU 231 * aren't propagated. Since I cannot find a stable way to reset the GPU
240 * at this point it is left as a TODO. 232 * at this point it is left as a TODO.
@@ -276,7 +268,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev,
276{ 268{
277 struct drm_minor *minor = dev_to_drm_minor(kdev); 269 struct drm_minor *minor = dev_to_drm_minor(kdev);
278 struct drm_device *dev = minor->dev; 270 struct drm_device *dev = minor->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = to_i915(dev);
280 int ret; 272 int ret;
281 273
282 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 274 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -310,7 +302,7 @@ static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
310{ 302{
311 struct drm_minor *minor = dev_to_drm_minor(kdev); 303 struct drm_minor *minor = dev_to_drm_minor(kdev);
312 struct drm_device *dev = minor->dev; 304 struct drm_device *dev = minor->dev;
313 struct drm_i915_private *dev_priv = dev->dev_private; 305 struct drm_i915_private *dev_priv = to_i915(dev);
314 int ret; 306 int ret;
315 307
316 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 308 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -331,7 +323,7 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
331{ 323{
332 struct drm_minor *minor = dev_to_drm_minor(kdev); 324 struct drm_minor *minor = dev_to_drm_minor(kdev);
333 struct drm_device *dev = minor->dev; 325 struct drm_device *dev = minor->dev;
334 struct drm_i915_private *dev_priv = dev->dev_private; 326 struct drm_i915_private *dev_priv = to_i915(dev);
335 327
336 return snprintf(buf, PAGE_SIZE, 328 return snprintf(buf, PAGE_SIZE,
337 "%d\n", 329 "%d\n",
@@ -342,7 +334,7 @@ static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute
342{ 334{
343 struct drm_minor *minor = dev_to_drm_minor(kdev); 335 struct drm_minor *minor = dev_to_drm_minor(kdev);
344 struct drm_device *dev = minor->dev; 336 struct drm_device *dev = minor->dev;
345 struct drm_i915_private *dev_priv = dev->dev_private; 337 struct drm_i915_private *dev_priv = to_i915(dev);
346 int ret; 338 int ret;
347 339
348 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 340 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -360,7 +352,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
360{ 352{
361 struct drm_minor *minor = dev_to_drm_minor(kdev); 353 struct drm_minor *minor = dev_to_drm_minor(kdev);
362 struct drm_device *dev = minor->dev; 354 struct drm_device *dev = minor->dev;
363 struct drm_i915_private *dev_priv = dev->dev_private; 355 struct drm_i915_private *dev_priv = to_i915(dev);
364 u32 val; 356 u32 val;
365 ssize_t ret; 357 ssize_t ret;
366 358
@@ -397,7 +389,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev,
397 /* We still need *_set_rps to process the new max_delay and 389 /* We still need *_set_rps to process the new max_delay and
398 * update the interrupt limits and PMINTRMSK even though 390 * update the interrupt limits and PMINTRMSK even though
399 * frequency request may be unchanged. */ 391 * frequency request may be unchanged. */
400 intel_set_rps(dev, val); 392 intel_set_rps(dev_priv, val);
401 393
402 mutex_unlock(&dev_priv->rps.hw_lock); 394 mutex_unlock(&dev_priv->rps.hw_lock);
403 395
@@ -410,7 +402,7 @@ static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute
410{ 402{
411 struct drm_minor *minor = dev_to_drm_minor(kdev); 403 struct drm_minor *minor = dev_to_drm_minor(kdev);
412 struct drm_device *dev = minor->dev; 404 struct drm_device *dev = minor->dev;
413 struct drm_i915_private *dev_priv = dev->dev_private; 405 struct drm_i915_private *dev_priv = to_i915(dev);
414 int ret; 406 int ret;
415 407
416 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 408 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
@@ -428,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
428{ 420{
429 struct drm_minor *minor = dev_to_drm_minor(kdev); 421 struct drm_minor *minor = dev_to_drm_minor(kdev);
430 struct drm_device *dev = minor->dev; 422 struct drm_device *dev = minor->dev;
431 struct drm_i915_private *dev_priv = dev->dev_private; 423 struct drm_i915_private *dev_priv = to_i915(dev);
432 u32 val; 424 u32 val;
433 ssize_t ret; 425 ssize_t ret;
434 426
@@ -461,7 +453,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev,
461 /* We still need *_set_rps to process the new min_delay and 453 /* We still need *_set_rps to process the new min_delay and
462 * update the interrupt limits and PMINTRMSK even though 454 * update the interrupt limits and PMINTRMSK even though
463 * frequency request may be unchanged. */ 455 * frequency request may be unchanged. */
464 intel_set_rps(dev, val); 456 intel_set_rps(dev_priv, val);
465 457
466 mutex_unlock(&dev_priv->rps.hw_lock); 458 mutex_unlock(&dev_priv->rps.hw_lock);
467 459
@@ -488,7 +480,7 @@ static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr
488{ 480{
489 struct drm_minor *minor = dev_to_drm_minor(kdev); 481 struct drm_minor *minor = dev_to_drm_minor(kdev);
490 struct drm_device *dev = minor->dev; 482 struct drm_device *dev = minor->dev;
491 struct drm_i915_private *dev_priv = dev->dev_private; 483 struct drm_i915_private *dev_priv = to_i915(dev);
492 u32 val; 484 u32 val;
493 485
494 if (attr == &dev_attr_gt_RP0_freq_mhz) 486 if (attr == &dev_attr_gt_RP0_freq_mhz)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index dc0def210097..534154e05fbe 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -118,7 +118,7 @@ TRACE_EVENT(i915_gem_shrink,
118 ), 118 ),
119 119
120 TP_fast_assign( 120 TP_fast_assign(
121 __entry->dev = i915->dev->primary->index; 121 __entry->dev = i915->drm.primary->index;
122 __entry->target = target; 122 __entry->target = target;
123 __entry->flags = flags; 123 __entry->flags = flags;
124 ), 124 ),
@@ -462,7 +462,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
462 ), 462 ),
463 463
464 TP_fast_assign( 464 TP_fast_assign(
465 __entry->dev = from->dev->primary->index; 465 __entry->dev = from->i915->drm.primary->index;
466 __entry->sync_from = from->id; 466 __entry->sync_from = from->id;
467 __entry->sync_to = to_req->engine->id; 467 __entry->sync_to = to_req->engine->id;
468 __entry->seqno = i915_gem_request_get_seqno(req); 468 __entry->seqno = i915_gem_request_get_seqno(req);
@@ -486,13 +486,11 @@ TRACE_EVENT(i915_gem_ring_dispatch,
486 ), 486 ),
487 487
488 TP_fast_assign( 488 TP_fast_assign(
489 struct intel_engine_cs *engine = 489 __entry->dev = req->i915->drm.primary->index;
490 i915_gem_request_get_engine(req); 490 __entry->ring = req->engine->id;
491 __entry->dev = engine->dev->primary->index; 491 __entry->seqno = req->seqno;
492 __entry->ring = engine->id;
493 __entry->seqno = i915_gem_request_get_seqno(req);
494 __entry->flags = flags; 492 __entry->flags = flags;
495 i915_trace_irq_get(engine, req); 493 intel_engine_enable_signaling(req);
496 ), 494 ),
497 495
498 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x", 496 TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -511,7 +509,7 @@ TRACE_EVENT(i915_gem_ring_flush,
511 ), 509 ),
512 510
513 TP_fast_assign( 511 TP_fast_assign(
514 __entry->dev = req->engine->dev->primary->index; 512 __entry->dev = req->i915->drm.primary->index;
515 __entry->ring = req->engine->id; 513 __entry->ring = req->engine->id;
516 __entry->invalidate = invalidate; 514 __entry->invalidate = invalidate;
517 __entry->flush = flush; 515 __entry->flush = flush;
@@ -533,11 +531,9 @@ DECLARE_EVENT_CLASS(i915_gem_request,
533 ), 531 ),
534 532
535 TP_fast_assign( 533 TP_fast_assign(
536 struct intel_engine_cs *engine = 534 __entry->dev = req->i915->drm.primary->index;
537 i915_gem_request_get_engine(req); 535 __entry->ring = req->engine->id;
538 __entry->dev = engine->dev->primary->index; 536 __entry->seqno = req->seqno;
539 __entry->ring = engine->id;
540 __entry->seqno = i915_gem_request_get_seqno(req);
541 ), 537 ),
542 538
543 TP_printk("dev=%u, ring=%u, seqno=%u", 539 TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -560,9 +556,9 @@ TRACE_EVENT(i915_gem_request_notify,
560 ), 556 ),
561 557
562 TP_fast_assign( 558 TP_fast_assign(
563 __entry->dev = engine->dev->primary->index; 559 __entry->dev = engine->i915->drm.primary->index;
564 __entry->ring = engine->id; 560 __entry->ring = engine->id;
565 __entry->seqno = engine->get_seqno(engine); 561 __entry->seqno = intel_engine_get_seqno(engine);
566 ), 562 ),
567 563
568 TP_printk("dev=%u, ring=%u, seqno=%u", 564 TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -597,13 +593,11 @@ TRACE_EVENT(i915_gem_request_wait_begin,
597 * less desirable. 593 * less desirable.
598 */ 594 */
599 TP_fast_assign( 595 TP_fast_assign(
600 struct intel_engine_cs *engine = 596 __entry->dev = req->i915->drm.primary->index;
601 i915_gem_request_get_engine(req); 597 __entry->ring = req->engine->id;
602 __entry->dev = engine->dev->primary->index; 598 __entry->seqno = req->seqno;
603 __entry->ring = engine->id;
604 __entry->seqno = i915_gem_request_get_seqno(req);
605 __entry->blocking = 599 __entry->blocking =
606 mutex_is_locked(&engine->dev->struct_mutex); 600 mutex_is_locked(&req->i915->drm.struct_mutex);
607 ), 601 ),
608 602
609 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s", 603 TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -740,19 +734,19 @@ DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
740 * the context. 734 * the context.
741 */ 735 */
742DECLARE_EVENT_CLASS(i915_context, 736DECLARE_EVENT_CLASS(i915_context,
743 TP_PROTO(struct intel_context *ctx), 737 TP_PROTO(struct i915_gem_context *ctx),
744 TP_ARGS(ctx), 738 TP_ARGS(ctx),
745 739
746 TP_STRUCT__entry( 740 TP_STRUCT__entry(
747 __field(u32, dev) 741 __field(u32, dev)
748 __field(struct intel_context *, ctx) 742 __field(struct i915_gem_context *, ctx)
749 __field(struct i915_address_space *, vm) 743 __field(struct i915_address_space *, vm)
750 ), 744 ),
751 745
752 TP_fast_assign( 746 TP_fast_assign(
753 __entry->ctx = ctx; 747 __entry->ctx = ctx;
754 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL; 748 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
755 __entry->dev = ctx->i915->dev->primary->index; 749 __entry->dev = ctx->i915->drm.primary->index;
756 ), 750 ),
757 751
758 TP_printk("dev=%u, ctx=%p, ctx_vm=%p", 752 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
@@ -760,12 +754,12 @@ DECLARE_EVENT_CLASS(i915_context,
760) 754)
761 755
762DEFINE_EVENT(i915_context, i915_context_create, 756DEFINE_EVENT(i915_context, i915_context_create,
763 TP_PROTO(struct intel_context *ctx), 757 TP_PROTO(struct i915_gem_context *ctx),
764 TP_ARGS(ctx) 758 TP_ARGS(ctx)
765); 759);
766 760
767DEFINE_EVENT(i915_context, i915_context_free, 761DEFINE_EVENT(i915_context, i915_context_free,
768 TP_PROTO(struct intel_context *ctx), 762 TP_PROTO(struct i915_gem_context *ctx),
769 TP_ARGS(ctx) 763 TP_ARGS(ctx)
770); 764);
771 765
@@ -777,13 +771,13 @@ DEFINE_EVENT(i915_context, i915_context_free,
777 * called only if full ppgtt is enabled. 771 * called only if full ppgtt is enabled.
778 */ 772 */
779TRACE_EVENT(switch_mm, 773TRACE_EVENT(switch_mm,
780 TP_PROTO(struct intel_engine_cs *engine, struct intel_context *to), 774 TP_PROTO(struct intel_engine_cs *engine, struct i915_gem_context *to),
781 775
782 TP_ARGS(engine, to), 776 TP_ARGS(engine, to),
783 777
784 TP_STRUCT__entry( 778 TP_STRUCT__entry(
785 __field(u32, ring) 779 __field(u32, ring)
786 __field(struct intel_context *, to) 780 __field(struct i915_gem_context *, to)
787 __field(struct i915_address_space *, vm) 781 __field(struct i915_address_space *, vm)
788 __field(u32, dev) 782 __field(u32, dev)
789 ), 783 ),
@@ -792,7 +786,7 @@ TRACE_EVENT(switch_mm,
792 __entry->ring = engine->id; 786 __entry->ring = engine->id;
793 __entry->to = to; 787 __entry->to = to;
794 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL; 788 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
795 __entry->dev = engine->dev->primary->index; 789 __entry->dev = engine->i915->drm.primary->index;
796 ), 790 ),
797 791
798 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p", 792 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index d02efb8cad4d..f6acb5a0e701 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -53,20 +53,19 @@
53 53
54/** 54/**
55 * i915_check_vgpu - detect virtual GPU 55 * i915_check_vgpu - detect virtual GPU
56 * @dev: drm device * 56 * @dev_priv: i915 device private
57 * 57 *
58 * This function is called at the initialization stage, to detect whether 58 * This function is called at the initialization stage, to detect whether
59 * running on a vGPU. 59 * running on a vGPU.
60 */ 60 */
61void i915_check_vgpu(struct drm_device *dev) 61void i915_check_vgpu(struct drm_i915_private *dev_priv)
62{ 62{
63 struct drm_i915_private *dev_priv = to_i915(dev);
64 uint64_t magic; 63 uint64_t magic;
65 uint32_t version; 64 uint32_t version;
66 65
67 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 66 BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
68 67
69 if (!IS_HASWELL(dev)) 68 if (!IS_HASWELL(dev_priv))
70 return; 69 return;
71 70
72 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic)); 71 magic = __raw_i915_read64(dev_priv, vgtif_reg(magic));
@@ -102,10 +101,13 @@ static struct _balloon_info_ bl_info;
102 * This function is called to deallocate the ballooned-out graphic memory, when 101 * This function is called to deallocate the ballooned-out graphic memory, when
103 * driver is unloaded or when ballooning fails. 102 * driver is unloaded or when ballooning fails.
104 */ 103 */
105void intel_vgt_deballoon(void) 104void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
106{ 105{
107 int i; 106 int i;
108 107
108 if (!intel_vgpu_active(dev_priv))
109 return;
110
109 DRM_DEBUG("VGT deballoon.\n"); 111 DRM_DEBUG("VGT deballoon.\n");
110 112
111 for (i = 0; i < 4; i++) { 113 for (i = 0; i < 4; i++) {
@@ -151,36 +153,35 @@ static int vgt_balloon_space(struct drm_mm *mm,
151 * of its graphic space being zero. Yet there are some portions ballooned out( 153 * of its graphic space being zero. Yet there are some portions ballooned out(
152 * the shadow part, which are marked as reserved by drm allocator). From the 154 * the shadow part, which are marked as reserved by drm allocator). From the
153 * host point of view, the graphic address space is partitioned by multiple 155 * host point of view, the graphic address space is partitioned by multiple
154 * vGPUs in different VMs. 156 * vGPUs in different VMs. ::
155 * 157 *
156 * vGPU1 view Host view 158 * vGPU1 view Host view
157 * 0 ------> +-----------+ +-----------+ 159 * 0 ------> +-----------+ +-----------+
158 * ^ |///////////| | vGPU3 | 160 * ^ |###########| | vGPU3 |
159 * | |///////////| +-----------+ 161 * | |###########| +-----------+
160 * | |///////////| | vGPU2 | 162 * | |###########| | vGPU2 |
161 * | +-----------+ +-----------+ 163 * | +-----------+ +-----------+
162 * mappable GM | available | ==> | vGPU1 | 164 * mappable GM | available | ==> | vGPU1 |
163 * | +-----------+ +-----------+ 165 * | +-----------+ +-----------+
164 * | |///////////| | | 166 * | |###########| | |
165 * v |///////////| | Host | 167 * v |###########| | Host |
166 * +=======+===========+ +===========+ 168 * +=======+===========+ +===========+
167 * ^ |///////////| | vGPU3 | 169 * ^ |###########| | vGPU3 |
168 * | |///////////| +-----------+ 170 * | |###########| +-----------+
169 * | |///////////| | vGPU2 | 171 * | |###########| | vGPU2 |
170 * | +-----------+ +-----------+ 172 * | +-----------+ +-----------+
171 * unmappable GM | available | ==> | vGPU1 | 173 * unmappable GM | available | ==> | vGPU1 |
172 * | +-----------+ +-----------+ 174 * | +-----------+ +-----------+
173 * | |///////////| | | 175 * | |###########| | |
174 * | |///////////| | Host | 176 * | |###########| | Host |
175 * v |///////////| | | 177 * v |###########| | |
176 * total GM size ------> +-----------+ +-----------+ 178 * total GM size ------> +-----------+ +-----------+
177 * 179 *
178 * Returns: 180 * Returns:
179 * zero on success, non-zero if configuration invalid or ballooning failed 181 * zero on success, non-zero if configuration invalid or ballooning failed
180 */ 182 */
181int intel_vgt_balloon(struct drm_device *dev) 183int intel_vgt_balloon(struct drm_i915_private *dev_priv)
182{ 184{
183 struct drm_i915_private *dev_priv = to_i915(dev);
184 struct i915_ggtt *ggtt = &dev_priv->ggtt; 185 struct i915_ggtt *ggtt = &dev_priv->ggtt;
185 unsigned long ggtt_end = ggtt->base.start + ggtt->base.total; 186 unsigned long ggtt_end = ggtt->base.start + ggtt->base.total;
186 187
@@ -188,6 +189,9 @@ int intel_vgt_balloon(struct drm_device *dev)
188 unsigned long unmappable_base, unmappable_size, unmappable_end; 189 unsigned long unmappable_base, unmappable_size, unmappable_end;
189 int ret; 190 int ret;
190 191
192 if (!intel_vgpu_active(dev_priv))
193 return 0;
194
191 mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base)); 195 mappable_base = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.base));
192 mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size)); 196 mappable_size = I915_READ(vgtif_reg(avail_rs.mappable_gmadr.size));
193 unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base)); 197 unmappable_base = I915_READ(vgtif_reg(avail_rs.nonmappable_gmadr.base));
@@ -259,6 +263,6 @@ int intel_vgt_balloon(struct drm_device *dev)
259 263
260err: 264err:
261 DRM_ERROR("VGT balloon fail\n"); 265 DRM_ERROR("VGT balloon fail\n");
262 intel_vgt_deballoon(); 266 intel_vgt_deballoon(dev_priv);
263 return ret; 267 return ret;
264} 268}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 3c83b47b5f69..3c3b2d24e830 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,94 +24,10 @@
24#ifndef _I915_VGPU_H_ 24#ifndef _I915_VGPU_H_
25#define _I915_VGPU_H_ 25#define _I915_VGPU_H_
26 26
27/* The MMIO offset of the shared info between guest and host emulator */ 27#include "i915_pvinfo.h"
28#define VGT_PVINFO_PAGE 0x78000
29#define VGT_PVINFO_SIZE 0x1000
30 28
31/* 29void i915_check_vgpu(struct drm_i915_private *dev_priv);
32 * The following structure pages are defined in GEN MMIO space 30int intel_vgt_balloon(struct drm_i915_private *dev_priv);
33 * for virtualization. (One page for now) 31void intel_vgt_deballoon(struct drm_i915_private *dev_priv);
34 */
35#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */
36#define VGT_VERSION_MAJOR 1
37#define VGT_VERSION_MINOR 0
38
39#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
40#define INTEL_VGT_IF_VERSION \
41 INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
42
43/*
44 * notifications from guest to vgpu device model
45 */
46enum vgt_g2v_type {
47 VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE = 2,
48 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY,
49 VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE,
50 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY,
51 VGT_G2V_EXECLIST_CONTEXT_CREATE,
52 VGT_G2V_EXECLIST_CONTEXT_DESTROY,
53 VGT_G2V_MAX,
54};
55
56struct vgt_if {
57 uint64_t magic; /* VGT_MAGIC */
58 uint16_t version_major;
59 uint16_t version_minor;
60 uint32_t vgt_id; /* ID of vGT instance */
61 uint32_t rsv1[12]; /* pad to offset 0x40 */
62 /*
63 * Data structure to describe the balooning info of resources.
64 * Each VM can only have one portion of continuous area for now.
65 * (May support scattered resource in future)
66 * (starting from offset 0x40)
67 */
68 struct {
69 /* Aperture register balooning */
70 struct {
71 uint32_t base;
72 uint32_t size;
73 } mappable_gmadr; /* aperture */
74 /* GMADR register balooning */
75 struct {
76 uint32_t base;
77 uint32_t size;
78 } nonmappable_gmadr; /* non aperture */
79 /* allowed fence registers */
80 uint32_t fence_num;
81 uint32_t rsv2[3];
82 } avail_rs; /* available/assigned resource */
83 uint32_t rsv3[0x200 - 24]; /* pad to half page */
84 /*
85 * The bottom half page is for response from Gfx driver to hypervisor.
86 */
87 uint32_t rsv4;
88 uint32_t display_ready; /* ready for display owner switch */
89
90 uint32_t rsv5[4];
91
92 uint32_t g2v_notify;
93 uint32_t rsv6[7];
94
95 struct {
96 uint32_t lo;
97 uint32_t hi;
98 } pdp[4];
99
100 uint32_t execlist_context_descriptor_lo;
101 uint32_t execlist_context_descriptor_hi;
102
103 uint32_t rsv7[0x200 - 24]; /* pad to one page */
104} __packed;
105
106#define vgtif_reg(x) \
107 _MMIO((VGT_PVINFO_PAGE + (long)&((struct vgt_if *)NULL)->x))
108
109/* vGPU display status to be used by the host side */
110#define VGT_DRV_DISPLAY_NOT_READY 0
111#define VGT_DRV_DISPLAY_READY 1 /* ready for display switch */
112
113extern void i915_check_vgpu(struct drm_device *dev);
114extern int intel_vgt_balloon(struct drm_device *dev);
115extern void intel_vgt_deballoon(void);
116 32
117#endif /* _I915_VGPU_H_ */ 33#endif /* _I915_VGPU_H_ */
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 50ff90aea721..c5a166752eda 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -191,7 +191,7 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
191 191
192 /* plane scaler case: assign as a plane scaler */ 192 /* plane scaler case: assign as a plane scaler */
193 /* find the plane that set the bit as scaler_user */ 193 /* find the plane that set the bit as scaler_user */
194 plane = drm_state->planes[i]; 194 plane = drm_state->planes[i].ptr;
195 195
196 /* 196 /*
197 * to enable/disable hq mode, add planes that are using scaler 197 * to enable/disable hq mode, add planes that are using scaler
@@ -223,7 +223,8 @@ int intel_atomic_setup_scalers(struct drm_device *dev,
223 continue; 223 continue;
224 } 224 }
225 225
226 plane_state = to_intel_plane_state(drm_state->plane_states[i]); 226 plane_state = intel_atomic_get_existing_plane_state(drm_state,
227 intel_plane);
227 scaler_id = &plane_state->scaler_id; 228 scaler_id = &plane_state->scaler_id;
228 } 229 }
229 230
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index 02a7527ce7bb..6700a7be7f78 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -154,7 +154,7 @@ static bool audio_rate_need_prog(struct intel_crtc *crtc,
154{ 154{
155 if (((mode->clock == TMDS_297M) || 155 if (((mode->clock == TMDS_297M) ||
156 (mode->clock == TMDS_296M)) && 156 (mode->clock == TMDS_296M)) &&
157 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 157 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
158 return true; 158 return true;
159 else 159 else
160 return false; 160 return false;
@@ -165,7 +165,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
165 i915_reg_t reg_elda, uint32_t bits_elda, 165 i915_reg_t reg_elda, uint32_t bits_elda,
166 i915_reg_t reg_edid) 166 i915_reg_t reg_edid)
167{ 167{
168 struct drm_i915_private *dev_priv = connector->dev->dev_private; 168 struct drm_i915_private *dev_priv = to_i915(connector->dev);
169 uint8_t *eld = connector->eld; 169 uint8_t *eld = connector->eld;
170 uint32_t tmp; 170 uint32_t tmp;
171 int i; 171 int i;
@@ -189,7 +189,7 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
189 189
190static void g4x_audio_codec_disable(struct intel_encoder *encoder) 190static void g4x_audio_codec_disable(struct intel_encoder *encoder)
191{ 191{
192 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 192 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
193 uint32_t eldv, tmp; 193 uint32_t eldv, tmp;
194 194
195 DRM_DEBUG_KMS("Disable audio codec\n"); 195 DRM_DEBUG_KMS("Disable audio codec\n");
@@ -210,7 +210,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
210 struct intel_encoder *encoder, 210 struct intel_encoder *encoder,
211 const struct drm_display_mode *adjusted_mode) 211 const struct drm_display_mode *adjusted_mode)
212{ 212{
213 struct drm_i915_private *dev_priv = connector->dev->dev_private; 213 struct drm_i915_private *dev_priv = to_i915(connector->dev);
214 uint8_t *eld = connector->eld; 214 uint8_t *eld = connector->eld;
215 uint32_t eldv; 215 uint32_t eldv;
216 uint32_t tmp; 216 uint32_t tmp;
@@ -247,7 +247,7 @@ static void g4x_audio_codec_enable(struct drm_connector *connector,
247 247
248static void hsw_audio_codec_disable(struct intel_encoder *encoder) 248static void hsw_audio_codec_disable(struct intel_encoder *encoder)
249{ 249{
250 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 250 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
251 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 251 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
252 enum pipe pipe = intel_crtc->pipe; 252 enum pipe pipe = intel_crtc->pipe;
253 uint32_t tmp; 253 uint32_t tmp;
@@ -262,7 +262,7 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder)
262 tmp |= AUD_CONFIG_N_PROG_ENABLE; 262 tmp |= AUD_CONFIG_N_PROG_ENABLE;
263 tmp &= ~AUD_CONFIG_UPPER_N_MASK; 263 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
264 tmp &= ~AUD_CONFIG_LOWER_N_MASK; 264 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
265 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 265 if (intel_crtc_has_dp_encoder(intel_crtc->config))
266 tmp |= AUD_CONFIG_N_VALUE_INDEX; 266 tmp |= AUD_CONFIG_N_VALUE_INDEX;
267 I915_WRITE(HSW_AUD_CFG(pipe), tmp); 267 I915_WRITE(HSW_AUD_CFG(pipe), tmp);
268 268
@@ -279,7 +279,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
279 struct intel_encoder *encoder, 279 struct intel_encoder *encoder,
280 const struct drm_display_mode *adjusted_mode) 280 const struct drm_display_mode *adjusted_mode)
281{ 281{
282 struct drm_i915_private *dev_priv = connector->dev->dev_private; 282 struct drm_i915_private *dev_priv = to_i915(connector->dev);
283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 283 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
284 enum pipe pipe = intel_crtc->pipe; 284 enum pipe pipe = intel_crtc->pipe;
285 struct i915_audio_component *acomp = dev_priv->audio_component; 285 struct i915_audio_component *acomp = dev_priv->audio_component;
@@ -328,7 +328,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
328 tmp = I915_READ(HSW_AUD_CFG(pipe)); 328 tmp = I915_READ(HSW_AUD_CFG(pipe));
329 tmp &= ~AUD_CONFIG_N_VALUE_INDEX; 329 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
330 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; 330 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
331 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 331 if (intel_crtc_has_dp_encoder(intel_crtc->config))
332 tmp |= AUD_CONFIG_N_VALUE_INDEX; 332 tmp |= AUD_CONFIG_N_VALUE_INDEX;
333 else 333 else
334 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); 334 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -357,7 +357,7 @@ static void hsw_audio_codec_enable(struct drm_connector *connector,
357 357
358static void ilk_audio_codec_disable(struct intel_encoder *encoder) 358static void ilk_audio_codec_disable(struct intel_encoder *encoder)
359{ 359{
360 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 360 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
361 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 361 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
362 struct intel_digital_port *intel_dig_port = 362 struct intel_digital_port *intel_dig_port =
363 enc_to_dig_port(&encoder->base); 363 enc_to_dig_port(&encoder->base);
@@ -389,7 +389,7 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder)
389 tmp |= AUD_CONFIG_N_PROG_ENABLE; 389 tmp |= AUD_CONFIG_N_PROG_ENABLE;
390 tmp &= ~AUD_CONFIG_UPPER_N_MASK; 390 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
391 tmp &= ~AUD_CONFIG_LOWER_N_MASK; 391 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
392 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 392 if (intel_crtc_has_dp_encoder(intel_crtc->config))
393 tmp |= AUD_CONFIG_N_VALUE_INDEX; 393 tmp |= AUD_CONFIG_N_VALUE_INDEX;
394 I915_WRITE(aud_config, tmp); 394 I915_WRITE(aud_config, tmp);
395 395
@@ -405,7 +405,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
405 struct intel_encoder *encoder, 405 struct intel_encoder *encoder,
406 const struct drm_display_mode *adjusted_mode) 406 const struct drm_display_mode *adjusted_mode)
407{ 407{
408 struct drm_i915_private *dev_priv = connector->dev->dev_private; 408 struct drm_i915_private *dev_priv = to_i915(connector->dev);
409 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 409 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
410 struct intel_digital_port *intel_dig_port = 410 struct intel_digital_port *intel_dig_port =
411 enc_to_dig_port(&encoder->base); 411 enc_to_dig_port(&encoder->base);
@@ -475,7 +475,7 @@ static void ilk_audio_codec_enable(struct drm_connector *connector,
475 tmp &= ~AUD_CONFIG_N_VALUE_INDEX; 475 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
476 tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 476 tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
477 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; 477 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
478 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 478 if (intel_crtc_has_dp_encoder(intel_crtc->config))
479 tmp |= AUD_CONFIG_N_VALUE_INDEX; 479 tmp |= AUD_CONFIG_N_VALUE_INDEX;
480 else 480 else
481 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); 481 tmp |= audio_config_hdmi_pixel_clock(adjusted_mode);
@@ -496,7 +496,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
496 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 496 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
497 struct drm_connector *connector; 497 struct drm_connector *connector;
498 struct drm_device *dev = encoder->dev; 498 struct drm_device *dev = encoder->dev;
499 struct drm_i915_private *dev_priv = dev->dev_private; 499 struct drm_i915_private *dev_priv = to_i915(dev);
500 struct i915_audio_component *acomp = dev_priv->audio_component; 500 struct i915_audio_component *acomp = dev_priv->audio_component;
501 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 501 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
502 enum port port = intel_dig_port->port; 502 enum port port = intel_dig_port->port;
@@ -513,7 +513,7 @@ void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
513 513
514 /* ELD Conn_Type */ 514 /* ELD Conn_Type */
515 connector->eld[5] &= ~(3 << 2); 515 connector->eld[5] &= ~(3 << 2);
516 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 516 if (intel_crtc_has_dp_encoder(crtc->config))
517 connector->eld[5] |= (1 << 2); 517 connector->eld[5] |= (1 << 2);
518 518
519 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 519 connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -543,7 +543,7 @@ void intel_audio_codec_disable(struct intel_encoder *intel_encoder)
543{ 543{
544 struct drm_encoder *encoder = &intel_encoder->base; 544 struct drm_encoder *encoder = &intel_encoder->base;
545 struct drm_device *dev = encoder->dev; 545 struct drm_device *dev = encoder->dev;
546 struct drm_i915_private *dev_priv = dev->dev_private; 546 struct drm_i915_private *dev_priv = to_i915(dev);
547 struct i915_audio_component *acomp = dev_priv->audio_component; 547 struct i915_audio_component *acomp = dev_priv->audio_component;
548 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 548 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
549 enum port port = intel_dig_port->port; 549 enum port port = intel_dig_port->port;
@@ -621,17 +621,11 @@ static void i915_audio_component_codec_wake_override(struct device *dev,
621static int i915_audio_component_get_cdclk_freq(struct device *dev) 621static int i915_audio_component_get_cdclk_freq(struct device *dev)
622{ 622{
623 struct drm_i915_private *dev_priv = dev_to_i915(dev); 623 struct drm_i915_private *dev_priv = dev_to_i915(dev);
624 int ret;
625 624
626 if (WARN_ON_ONCE(!HAS_DDI(dev_priv))) 625 if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
627 return -ENODEV; 626 return -ENODEV;
628 627
629 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 628 return dev_priv->cdclk_freq;
630 ret = dev_priv->display.get_display_clock_speed(dev_priv->dev);
631
632 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
633
634 return ret;
635} 629}
636 630
637static int i915_audio_component_sync_audio_rate(struct device *dev, 631static int i915_audio_component_sync_audio_rate(struct device *dev,
@@ -755,14 +749,14 @@ static int i915_audio_component_bind(struct device *i915_dev,
755 if (WARN_ON(acomp->ops || acomp->dev)) 749 if (WARN_ON(acomp->ops || acomp->dev))
756 return -EEXIST; 750 return -EEXIST;
757 751
758 drm_modeset_lock_all(dev_priv->dev); 752 drm_modeset_lock_all(&dev_priv->drm);
759 acomp->ops = &i915_audio_component_ops; 753 acomp->ops = &i915_audio_component_ops;
760 acomp->dev = i915_dev; 754 acomp->dev = i915_dev;
761 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); 755 BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
762 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) 756 for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
763 acomp->aud_sample_rate[i] = 0; 757 acomp->aud_sample_rate[i] = 0;
764 dev_priv->audio_component = acomp; 758 dev_priv->audio_component = acomp;
765 drm_modeset_unlock_all(dev_priv->dev); 759 drm_modeset_unlock_all(&dev_priv->drm);
766 760
767 return 0; 761 return 0;
768} 762}
@@ -773,11 +767,11 @@ static void i915_audio_component_unbind(struct device *i915_dev,
773 struct i915_audio_component *acomp = data; 767 struct i915_audio_component *acomp = data;
774 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); 768 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev);
775 769
776 drm_modeset_lock_all(dev_priv->dev); 770 drm_modeset_lock_all(&dev_priv->drm);
777 acomp->ops = NULL; 771 acomp->ops = NULL;
778 acomp->dev = NULL; 772 acomp->dev = NULL;
779 dev_priv->audio_component = NULL; 773 dev_priv->audio_component = NULL;
780 drm_modeset_unlock_all(dev_priv->dev); 774 drm_modeset_unlock_all(&dev_priv->drm);
781} 775}
782 776
783static const struct component_ops i915_audio_component_bind_ops = { 777static const struct component_ops i915_audio_component_bind_ops = {
@@ -805,7 +799,7 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv)
805{ 799{
806 int ret; 800 int ret;
807 801
808 ret = component_add(dev_priv->dev->dev, &i915_audio_component_bind_ops); 802 ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops);
809 if (ret < 0) { 803 if (ret < 0) {
810 DRM_ERROR("failed to add audio component (%d)\n", ret); 804 DRM_ERROR("failed to add audio component (%d)\n", ret);
811 /* continue with reduced functionality */ 805 /* continue with reduced functionality */
@@ -827,6 +821,6 @@ void i915_audio_component_cleanup(struct drm_i915_private *dev_priv)
827 if (!dev_priv->audio_component_registered) 821 if (!dev_priv->audio_component_registered)
828 return; 822 return;
829 823
830 component_del(dev_priv->dev->dev, &i915_audio_component_bind_ops); 824 component_del(dev_priv->drm.dev, &i915_audio_component_bind_ops);
831 dev_priv->audio_component_registered = false; 825 dev_priv->audio_component_registered = false;
832} 826}
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index b9022fa053d6..c6e69e4cfa83 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -218,7 +218,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
218 218
219 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither; 219 dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
220 220
221 ret = intel_opregion_get_panel_type(dev_priv->dev); 221 ret = intel_opregion_get_panel_type(dev_priv);
222 if (ret >= 0) { 222 if (ret >= 0) {
223 WARN_ON(ret > 0xf); 223 WARN_ON(ret > 0xf);
224 panel_type = ret; 224 panel_type = ret;
@@ -323,6 +323,15 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
323 return; 323 return;
324 } 324 }
325 325
326 dev_priv->vbt.backlight.type = INTEL_BACKLIGHT_DISPLAY_DDI;
327 if (bdb->version >= 191 &&
328 get_blocksize(backlight_data) >= sizeof(*backlight_data)) {
329 const struct bdb_lfp_backlight_control_method *method;
330
331 method = &backlight_data->backlight_control[panel_type];
332 dev_priv->vbt.backlight.type = method->type;
333 }
334
326 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz; 335 dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
327 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm; 336 dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
328 dev_priv->vbt.backlight.min_brightness = entry->min_brightness; 337 dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
@@ -768,6 +777,16 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
768 return; 777 return;
769 } 778 }
770 779
780 /*
781 * These fields are introduced from the VBT version 197 onwards,
782 * so making sure that these bits are set zero in the previous
783 * versions.
784 */
785 if (dev_priv->vbt.dsi.config->dual_link && bdb->version < 197) {
786 dev_priv->vbt.dsi.config->dl_dcs_cabc_ports = 0;
787 dev_priv->vbt.dsi.config->dl_dcs_backlight_ports = 0;
788 }
789
771 /* We have mandatory mipi config blocks. Initialize as generic panel */ 790 /* We have mandatory mipi config blocks. Initialize as generic panel */
772 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID; 791 dev_priv->vbt.dsi.panel_id = MIPI_DSI_GENERIC_PANEL_ID;
773} 792}
@@ -1407,7 +1426,7 @@ static const struct vbt_header *find_vbt(void __iomem *bios, size_t size)
1407int 1426int
1408intel_bios_init(struct drm_i915_private *dev_priv) 1427intel_bios_init(struct drm_i915_private *dev_priv)
1409{ 1428{
1410 struct pci_dev *pdev = dev_priv->dev->pdev; 1429 struct pci_dev *pdev = dev_priv->drm.pdev;
1411 const struct vbt_header *vbt = dev_priv->opregion.vbt; 1430 const struct vbt_header *vbt = dev_priv->opregion.vbt;
1412 const struct bdb_header *bdb; 1431 const struct bdb_header *bdb;
1413 u8 __iomem *bios = NULL; 1432 u8 __iomem *bios = NULL;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index ab0ea315eddb..8405b5a367d7 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -30,6 +30,14 @@
30#ifndef _INTEL_BIOS_H_ 30#ifndef _INTEL_BIOS_H_
31#define _INTEL_BIOS_H_ 31#define _INTEL_BIOS_H_
32 32
33enum intel_backlight_type {
34 INTEL_BACKLIGHT_PMIC,
35 INTEL_BACKLIGHT_LPSS,
36 INTEL_BACKLIGHT_DISPLAY_DDI,
37 INTEL_BACKLIGHT_DSI_DCS,
38 INTEL_BACKLIGHT_PANEL_DRIVER_INTERFACE,
39};
40
33struct edp_power_seq { 41struct edp_power_seq {
34 u16 t1_t3; 42 u16 t1_t3;
35 u16 t8; 43 u16 t8;
@@ -113,7 +121,13 @@ struct mipi_config {
113 u16 dual_link:2; 121 u16 dual_link:2;
114 u16 lane_cnt:2; 122 u16 lane_cnt:2;
115 u16 pixel_overlap:3; 123 u16 pixel_overlap:3;
116 u16 rsvd3:9; 124 u16 rgb_flip:1;
125#define DL_DCS_PORT_A 0x00
126#define DL_DCS_PORT_C 0x01
127#define DL_DCS_PORT_A_AND_C 0x02
128 u16 dl_dcs_cabc_ports:2;
129 u16 dl_dcs_backlight_ports:2;
130 u16 rsvd3:4;
117 131
118 u16 rsvd4; 132 u16 rsvd4;
119 133
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c
new file mode 100644
index 000000000000..b074f3d6d127
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c
@@ -0,0 +1,595 @@
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include <linux/kthread.h>
26
27#include "i915_drv.h"
28
29static void intel_breadcrumbs_fake_irq(unsigned long data)
30{
31 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
32
33 /*
34 * The timer persists in case we cannot enable interrupts,
35 * or if we have previously seen seqno/interrupt incoherency
36 * ("missed interrupt" syndrome). Here the worker will wake up
37 * every jiffie in order to kick the oldest waiter to do the
38 * coherent seqno check.
39 */
40 rcu_read_lock();
41 if (intel_engine_wakeup(engine))
42 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
43 rcu_read_unlock();
44}
45
46static void irq_enable(struct intel_engine_cs *engine)
47{
48 /* Enabling the IRQ may miss the generation of the interrupt, but
49 * we still need to force the barrier before reading the seqno,
50 * just in case.
51 */
52 engine->breadcrumbs.irq_posted = true;
53
54 spin_lock_irq(&engine->i915->irq_lock);
55 engine->irq_enable(engine);
56 spin_unlock_irq(&engine->i915->irq_lock);
57}
58
59static void irq_disable(struct intel_engine_cs *engine)
60{
61 spin_lock_irq(&engine->i915->irq_lock);
62 engine->irq_disable(engine);
63 spin_unlock_irq(&engine->i915->irq_lock);
64
65 engine->breadcrumbs.irq_posted = false;
66}
67
68static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b)
69{
70 struct intel_engine_cs *engine =
71 container_of(b, struct intel_engine_cs, breadcrumbs);
72 struct drm_i915_private *i915 = engine->i915;
73
74 assert_spin_locked(&b->lock);
75 if (b->rpm_wakelock)
76 return;
77
78 /* Since we are waiting on a request, the GPU should be busy
79 * and should have its own rpm reference. For completeness,
80 * record an rpm reference for ourselves to cover the
81 * interrupt we unmask.
82 */
83 intel_runtime_pm_get_noresume(i915);
84 b->rpm_wakelock = true;
85
86 /* No interrupts? Kick the waiter every jiffie! */
87 if (intel_irqs_enabled(i915)) {
88 if (!test_bit(engine->id, &i915->gpu_error.test_irq_rings))
89 irq_enable(engine);
90 b->irq_enabled = true;
91 }
92
93 if (!b->irq_enabled ||
94 test_bit(engine->id, &i915->gpu_error.missed_irq_rings))
95 mod_timer(&b->fake_irq, jiffies + 1);
96
97 /* Ensure that even if the GPU hangs, we get woken up.
98 *
99 * However, note that if no one is waiting, we never notice
100 * a gpu hang. Eventually, we will have to wait for a resource
101 * held by the GPU and so trigger a hangcheck. In the most
102 * pathological case, this will be upon memory starvation!
103 */
104 i915_queue_hangcheck(i915);
105}
106
107static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b)
108{
109 struct intel_engine_cs *engine =
110 container_of(b, struct intel_engine_cs, breadcrumbs);
111
112 assert_spin_locked(&b->lock);
113 if (!b->rpm_wakelock)
114 return;
115
116 if (b->irq_enabled) {
117 irq_disable(engine);
118 b->irq_enabled = false;
119 }
120
121 intel_runtime_pm_put(engine->i915);
122 b->rpm_wakelock = false;
123}
124
125static inline struct intel_wait *to_wait(struct rb_node *node)
126{
127 return container_of(node, struct intel_wait, node);
128}
129
130static inline void __intel_breadcrumbs_finish(struct intel_breadcrumbs *b,
131 struct intel_wait *wait)
132{
133 assert_spin_locked(&b->lock);
134
135 /* This request is completed, so remove it from the tree, mark it as
136 * complete, and *then* wake up the associated task.
137 */
138 rb_erase(&wait->node, &b->waiters);
139 RB_CLEAR_NODE(&wait->node);
140
141 wake_up_process(wait->tsk); /* implicit smp_wmb() */
142}
143
144static bool __intel_engine_add_wait(struct intel_engine_cs *engine,
145 struct intel_wait *wait)
146{
147 struct intel_breadcrumbs *b = &engine->breadcrumbs;
148 struct rb_node **p, *parent, *completed;
149 bool first;
150 u32 seqno;
151
152 /* Insert the request into the retirement ordered list
153 * of waiters by walking the rbtree. If we are the oldest
154 * seqno in the tree (the first to be retired), then
155 * set ourselves as the bottom-half.
156 *
157 * As we descend the tree, prune completed branches since we hold the
158 * spinlock we know that the first_waiter must be delayed and can
159 * reduce some of the sequential wake up latency if we take action
160 * ourselves and wake up the completed tasks in parallel. Also, by
161 * removing stale elements in the tree, we may be able to reduce the
162 * ping-pong between the old bottom-half and ourselves as first-waiter.
163 */
164 first = true;
165 parent = NULL;
166 completed = NULL;
167 seqno = intel_engine_get_seqno(engine);
168
169 /* If the request completed before we managed to grab the spinlock,
170 * return now before adding ourselves to the rbtree. We let the
171 * current bottom-half handle any pending wakeups and instead
172 * try and get out of the way quickly.
173 */
174 if (i915_seqno_passed(seqno, wait->seqno)) {
175 RB_CLEAR_NODE(&wait->node);
176 return first;
177 }
178
179 p = &b->waiters.rb_node;
180 while (*p) {
181 parent = *p;
182 if (wait->seqno == to_wait(parent)->seqno) {
183 /* We have multiple waiters on the same seqno, select
184 * the highest priority task (that with the smallest
185 * task->prio) to serve as the bottom-half for this
186 * group.
187 */
188 if (wait->tsk->prio > to_wait(parent)->tsk->prio) {
189 p = &parent->rb_right;
190 first = false;
191 } else {
192 p = &parent->rb_left;
193 }
194 } else if (i915_seqno_passed(wait->seqno,
195 to_wait(parent)->seqno)) {
196 p = &parent->rb_right;
197 if (i915_seqno_passed(seqno, to_wait(parent)->seqno))
198 completed = parent;
199 else
200 first = false;
201 } else {
202 p = &parent->rb_left;
203 }
204 }
205 rb_link_node(&wait->node, parent, p);
206 rb_insert_color(&wait->node, &b->waiters);
207 GEM_BUG_ON(!first && !b->irq_seqno_bh);
208
209 if (completed) {
210 struct rb_node *next = rb_next(completed);
211
212 GEM_BUG_ON(!next && !first);
213 if (next && next != &wait->node) {
214 GEM_BUG_ON(first);
215 b->first_wait = to_wait(next);
216 smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
217 /* As there is a delay between reading the current
218 * seqno, processing the completed tasks and selecting
219 * the next waiter, we may have missed the interrupt
220 * and so need for the next bottom-half to wakeup.
221 *
222 * Also as we enable the IRQ, we may miss the
223 * interrupt for that seqno, so we have to wake up
224 * the next bottom-half in order to do a coherent check
225 * in case the seqno passed.
226 */
227 __intel_breadcrumbs_enable_irq(b);
228 if (READ_ONCE(b->irq_posted))
229 wake_up_process(to_wait(next)->tsk);
230 }
231
232 do {
233 struct intel_wait *crumb = to_wait(completed);
234 completed = rb_prev(completed);
235 __intel_breadcrumbs_finish(b, crumb);
236 } while (completed);
237 }
238
239 if (first) {
240 GEM_BUG_ON(rb_first(&b->waiters) != &wait->node);
241 b->first_wait = wait;
242 smp_store_mb(b->irq_seqno_bh, wait->tsk);
243 /* After assigning ourselves as the new bottom-half, we must
244 * perform a cursory check to prevent a missed interrupt.
245 * Either we miss the interrupt whilst programming the hardware,
246 * or if there was a previous waiter (for a later seqno) they
247 * may be woken instead of us (due to the inherent race
248 * in the unlocked read of b->irq_seqno_bh in the irq handler)
249 * and so we miss the wake up.
250 */
251 __intel_breadcrumbs_enable_irq(b);
252 }
253 GEM_BUG_ON(!b->irq_seqno_bh);
254 GEM_BUG_ON(!b->first_wait);
255 GEM_BUG_ON(rb_first(&b->waiters) != &b->first_wait->node);
256
257 return first;
258}
259
260bool intel_engine_add_wait(struct intel_engine_cs *engine,
261 struct intel_wait *wait)
262{
263 struct intel_breadcrumbs *b = &engine->breadcrumbs;
264 bool first;
265
266 spin_lock(&b->lock);
267 first = __intel_engine_add_wait(engine, wait);
268 spin_unlock(&b->lock);
269
270 return first;
271}
272
273void intel_engine_enable_fake_irq(struct intel_engine_cs *engine)
274{
275 mod_timer(&engine->breadcrumbs.fake_irq, jiffies + 1);
276}
277
278static inline bool chain_wakeup(struct rb_node *rb, int priority)
279{
280 return rb && to_wait(rb)->tsk->prio <= priority;
281}
282
283static inline int wakeup_priority(struct intel_breadcrumbs *b,
284 struct task_struct *tsk)
285{
286 if (tsk == b->signaler)
287 return INT_MIN;
288 else
289 return tsk->prio;
290}
291
292void intel_engine_remove_wait(struct intel_engine_cs *engine,
293 struct intel_wait *wait)
294{
295 struct intel_breadcrumbs *b = &engine->breadcrumbs;
296
297 /* Quick check to see if this waiter was already decoupled from
298 * the tree by the bottom-half to avoid contention on the spinlock
299 * by the herd.
300 */
301 if (RB_EMPTY_NODE(&wait->node))
302 return;
303
304 spin_lock(&b->lock);
305
306 if (RB_EMPTY_NODE(&wait->node))
307 goto out_unlock;
308
309 if (b->first_wait == wait) {
310 const int priority = wakeup_priority(b, wait->tsk);
311 struct rb_node *next;
312
313 GEM_BUG_ON(b->irq_seqno_bh != wait->tsk);
314
315 /* We are the current bottom-half. Find the next candidate,
316 * the first waiter in the queue on the remaining oldest
317 * request. As multiple seqnos may complete in the time it
318 * takes us to wake up and find the next waiter, we have to
319 * wake up that waiter for it to perform its own coherent
320 * completion check.
321 */
322 next = rb_next(&wait->node);
323 if (chain_wakeup(next, priority)) {
324 /* If the next waiter is already complete,
325 * wake it up and continue onto the next waiter. So
326 * if have a small herd, they will wake up in parallel
327 * rather than sequentially, which should reduce
328 * the overall latency in waking all the completed
329 * clients.
330 *
331 * However, waking up a chain adds extra latency to
332 * the first_waiter. This is undesirable if that
333 * waiter is a high priority task.
334 */
335 u32 seqno = intel_engine_get_seqno(engine);
336
337 while (i915_seqno_passed(seqno, to_wait(next)->seqno)) {
338 struct rb_node *n = rb_next(next);
339
340 __intel_breadcrumbs_finish(b, to_wait(next));
341 next = n;
342 if (!chain_wakeup(next, priority))
343 break;
344 }
345 }
346
347 if (next) {
348 /* In our haste, we may have completed the first waiter
349 * before we enabled the interrupt. Do so now as we
350 * have a second waiter for a future seqno. Afterwards,
351 * we have to wake up that waiter in case we missed
352 * the interrupt, or if we have to handle an
353 * exception rather than a seqno completion.
354 */
355 b->first_wait = to_wait(next);
356 smp_store_mb(b->irq_seqno_bh, b->first_wait->tsk);
357 if (b->first_wait->seqno != wait->seqno)
358 __intel_breadcrumbs_enable_irq(b);
359 wake_up_process(b->irq_seqno_bh);
360 } else {
361 b->first_wait = NULL;
362 WRITE_ONCE(b->irq_seqno_bh, NULL);
363 __intel_breadcrumbs_disable_irq(b);
364 }
365 } else {
366 GEM_BUG_ON(rb_first(&b->waiters) == &wait->node);
367 }
368
369 GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
370 rb_erase(&wait->node, &b->waiters);
371
372out_unlock:
373 GEM_BUG_ON(b->first_wait == wait);
374 GEM_BUG_ON(rb_first(&b->waiters) !=
375 (b->first_wait ? &b->first_wait->node : NULL));
376 GEM_BUG_ON(!b->irq_seqno_bh ^ RB_EMPTY_ROOT(&b->waiters));
377 spin_unlock(&b->lock);
378}
379
380static bool signal_complete(struct drm_i915_gem_request *request)
381{
382 if (!request)
383 return false;
384
385 /* If another process served as the bottom-half it may have already
386 * signalled that this wait is already completed.
387 */
388 if (intel_wait_complete(&request->signaling.wait))
389 return true;
390
391 /* Carefully check if the request is complete, giving time for the
392 * seqno to be visible or if the GPU hung.
393 */
394 if (__i915_request_irq_complete(request))
395 return true;
396
397 return false;
398}
399
400static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
401{
402 return container_of(rb, struct drm_i915_gem_request, signaling.node);
403}
404
405static void signaler_set_rtpriority(void)
406{
407 struct sched_param param = { .sched_priority = 1 };
408
409 sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
410}
411
412static int intel_breadcrumbs_signaler(void *arg)
413{
414 struct intel_engine_cs *engine = arg;
415 struct intel_breadcrumbs *b = &engine->breadcrumbs;
416 struct drm_i915_gem_request *request;
417
418 /* Install ourselves with high priority to reduce signalling latency */
419 signaler_set_rtpriority();
420
421 do {
422 set_current_state(TASK_INTERRUPTIBLE);
423
424 /* We are either woken up by the interrupt bottom-half,
425 * or by a client adding a new signaller. In both cases,
426 * the GPU seqno may have advanced beyond our oldest signal.
427 * If it has, propagate the signal, remove the waiter and
428 * check again with the next oldest signal. Otherwise we
429 * need to wait for a new interrupt from the GPU or for
430 * a new client.
431 */
432 request = READ_ONCE(b->first_signal);
433 if (signal_complete(request)) {
434 /* Wake up all other completed waiters and select the
435 * next bottom-half for the next user interrupt.
436 */
437 intel_engine_remove_wait(engine,
438 &request->signaling.wait);
439
440 /* Find the next oldest signal. Note that as we have
441 * not been holding the lock, another client may
442 * have installed an even older signal than the one
443 * we just completed - so double check we are still
444 * the oldest before picking the next one.
445 */
446 spin_lock(&b->lock);
447 if (request == b->first_signal) {
448 struct rb_node *rb =
449 rb_next(&request->signaling.node);
450 b->first_signal = rb ? to_signaler(rb) : NULL;
451 }
452 rb_erase(&request->signaling.node, &b->signals);
453 spin_unlock(&b->lock);
454
455 i915_gem_request_unreference(request);
456 } else {
457 if (kthread_should_stop())
458 break;
459
460 schedule();
461 }
462 } while (1);
463 __set_current_state(TASK_RUNNING);
464
465 return 0;
466}
467
468void intel_engine_enable_signaling(struct drm_i915_gem_request *request)
469{
470 struct intel_engine_cs *engine = request->engine;
471 struct intel_breadcrumbs *b = &engine->breadcrumbs;
472 struct rb_node *parent, **p;
473 bool first, wakeup;
474
475 if (unlikely(READ_ONCE(request->signaling.wait.tsk)))
476 return;
477
478 spin_lock(&b->lock);
479 if (unlikely(request->signaling.wait.tsk)) {
480 wakeup = false;
481 goto unlock;
482 }
483
484 request->signaling.wait.tsk = b->signaler;
485 request->signaling.wait.seqno = request->seqno;
486 i915_gem_request_reference(request);
487
488 /* First add ourselves into the list of waiters, but register our
489 * bottom-half as the signaller thread. As per usual, only the oldest
490 * waiter (not just signaller) is tasked as the bottom-half waking
491 * up all completed waiters after the user interrupt.
492 *
493 * If we are the oldest waiter, enable the irq (after which we
494 * must double check that the seqno did not complete).
495 */
496 wakeup = __intel_engine_add_wait(engine, &request->signaling.wait);
497
498 /* Now insert ourselves into the retirement ordered list of signals
499 * on this engine. We track the oldest seqno as that will be the
500 * first signal to complete.
501 */
502 parent = NULL;
503 first = true;
504 p = &b->signals.rb_node;
505 while (*p) {
506 parent = *p;
507 if (i915_seqno_passed(request->seqno,
508 to_signaler(parent)->seqno)) {
509 p = &parent->rb_right;
510 first = false;
511 } else {
512 p = &parent->rb_left;
513 }
514 }
515 rb_link_node(&request->signaling.node, parent, p);
516 rb_insert_color(&request->signaling.node, &b->signals);
517 if (first)
518 smp_store_mb(b->first_signal, request);
519
520unlock:
521 spin_unlock(&b->lock);
522
523 if (wakeup)
524 wake_up_process(b->signaler);
525}
526
527int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine)
528{
529 struct intel_breadcrumbs *b = &engine->breadcrumbs;
530 struct task_struct *tsk;
531
532 spin_lock_init(&b->lock);
533 setup_timer(&b->fake_irq,
534 intel_breadcrumbs_fake_irq,
535 (unsigned long)engine);
536
537 /* Spawn a thread to provide a common bottom-half for all signals.
538 * As this is an asynchronous interface we cannot steal the current
539 * task for handling the bottom-half to the user interrupt, therefore
540 * we create a thread to do the coherent seqno dance after the
541 * interrupt and then signal the waitqueue (via the dma-buf/fence).
542 */
543 tsk = kthread_run(intel_breadcrumbs_signaler, engine,
544 "i915/signal:%d", engine->id);
545 if (IS_ERR(tsk))
546 return PTR_ERR(tsk);
547
548 b->signaler = tsk;
549
550 return 0;
551}
552
553void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine)
554{
555 struct intel_breadcrumbs *b = &engine->breadcrumbs;
556
557 if (!IS_ERR_OR_NULL(b->signaler))
558 kthread_stop(b->signaler);
559
560 del_timer_sync(&b->fake_irq);
561}
562
563unsigned int intel_kick_waiters(struct drm_i915_private *i915)
564{
565 struct intel_engine_cs *engine;
566 unsigned int mask = 0;
567
568 /* To avoid the task_struct disappearing beneath us as we wake up
569 * the process, we must first inspect the task_struct->state under the
570 * RCU lock, i.e. as we call wake_up_process() we must be holding the
571 * rcu_read_lock().
572 */
573 rcu_read_lock();
574 for_each_engine(engine, i915)
575 if (unlikely(intel_engine_wakeup(engine)))
576 mask |= intel_engine_flag(engine);
577 rcu_read_unlock();
578
579 return mask;
580}
581
582unsigned int intel_kick_signalers(struct drm_i915_private *i915)
583{
584 struct intel_engine_cs *engine;
585 unsigned int mask = 0;
586
587 for_each_engine(engine, i915) {
588 if (unlikely(READ_ONCE(engine->breadcrumbs.first_signal))) {
589 wake_up_process(engine->breadcrumbs.signaler);
590 mask |= intel_engine_flag(engine);
591 }
592 }
593
594 return mask;
595}
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c
index 1b3f97449395..bc0fef3d3335 100644
--- a/drivers/gpu/drm/i915/intel_color.c
+++ b/drivers/gpu/drm/i915/intel_color.c
@@ -96,7 +96,7 @@ static void i9xx_load_csc_matrix(struct drm_crtc_state *crtc_state)
96{ 96{
97 struct drm_crtc *crtc = crtc_state->crtc; 97 struct drm_crtc *crtc = crtc_state->crtc;
98 struct drm_device *dev = crtc->dev; 98 struct drm_device *dev = crtc->dev;
99 struct drm_i915_private *dev_priv = dev->dev_private; 99 struct drm_i915_private *dev_priv = to_i915(dev);
100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 100 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
101 int i, pipe = intel_crtc->pipe; 101 int i, pipe = intel_crtc->pipe;
102 uint16_t coeffs[9] = { 0, }; 102 uint16_t coeffs[9] = { 0, };
@@ -207,7 +207,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
207{ 207{
208 struct drm_crtc *crtc = state->crtc; 208 struct drm_crtc *crtc = state->crtc;
209 struct drm_device *dev = crtc->dev; 209 struct drm_device *dev = crtc->dev;
210 struct drm_i915_private *dev_priv = dev->dev_private; 210 struct drm_i915_private *dev_priv = to_i915(dev);
211 int pipe = to_intel_crtc(crtc)->pipe; 211 int pipe = to_intel_crtc(crtc)->pipe;
212 uint32_t mode; 212 uint32_t mode;
213 213
@@ -255,7 +255,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state)
255void intel_color_set_csc(struct drm_crtc_state *crtc_state) 255void intel_color_set_csc(struct drm_crtc_state *crtc_state)
256{ 256{
257 struct drm_device *dev = crtc_state->crtc->dev; 257 struct drm_device *dev = crtc_state->crtc->dev;
258 struct drm_i915_private *dev_priv = dev->dev_private; 258 struct drm_i915_private *dev_priv = to_i915(dev);
259 259
260 if (dev_priv->display.load_csc_matrix) 260 if (dev_priv->display.load_csc_matrix)
261 dev_priv->display.load_csc_matrix(crtc_state); 261 dev_priv->display.load_csc_matrix(crtc_state);
@@ -266,13 +266,13 @@ static void i9xx_load_luts_internal(struct drm_crtc *crtc,
266 struct drm_property_blob *blob) 266 struct drm_property_blob *blob)
267{ 267{
268 struct drm_device *dev = crtc->dev; 268 struct drm_device *dev = crtc->dev;
269 struct drm_i915_private *dev_priv = dev->dev_private; 269 struct drm_i915_private *dev_priv = to_i915(dev);
270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
271 enum pipe pipe = intel_crtc->pipe; 271 enum pipe pipe = intel_crtc->pipe;
272 int i; 272 int i;
273 273
274 if (HAS_GMCH_DISPLAY(dev)) { 274 if (HAS_GMCH_DISPLAY(dev)) {
275 if (intel_crtc->config->has_dsi_encoder) 275 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI))
276 assert_dsi_pll_enabled(dev_priv); 276 assert_dsi_pll_enabled(dev_priv);
277 else 277 else
278 assert_pll_enabled(dev_priv, pipe); 278 assert_pll_enabled(dev_priv, pipe);
@@ -313,7 +313,7 @@ static void haswell_load_luts(struct drm_crtc_state *crtc_state)
313{ 313{
314 struct drm_crtc *crtc = crtc_state->crtc; 314 struct drm_crtc *crtc = crtc_state->crtc;
315 struct drm_device *dev = crtc->dev; 315 struct drm_device *dev = crtc->dev;
316 struct drm_i915_private *dev_priv = dev->dev_private; 316 struct drm_i915_private *dev_priv = to_i915(dev);
317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
318 struct intel_crtc_state *intel_crtc_state = 318 struct intel_crtc_state *intel_crtc_state =
319 to_intel_crtc_state(crtc_state); 319 to_intel_crtc_state(crtc_state);
@@ -343,7 +343,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state)
343{ 343{
344 struct drm_crtc *crtc = state->crtc; 344 struct drm_crtc *crtc = state->crtc;
345 struct drm_device *dev = crtc->dev; 345 struct drm_device *dev = crtc->dev;
346 struct drm_i915_private *dev_priv = dev->dev_private; 346 struct drm_i915_private *dev_priv = to_i915(dev);
347 struct intel_crtc_state *intel_state = to_intel_crtc_state(state); 347 struct intel_crtc_state *intel_state = to_intel_crtc_state(state);
348 enum pipe pipe = to_intel_crtc(crtc)->pipe; 348 enum pipe pipe = to_intel_crtc(crtc)->pipe;
349 uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size; 349 uint32_t i, lut_size = INTEL_INFO(dev)->color.degamma_lut_size;
@@ -426,7 +426,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
426{ 426{
427 struct drm_crtc *crtc = state->crtc; 427 struct drm_crtc *crtc = state->crtc;
428 struct drm_device *dev = crtc->dev; 428 struct drm_device *dev = crtc->dev;
429 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = to_i915(dev);
430 enum pipe pipe = to_intel_crtc(crtc)->pipe; 430 enum pipe pipe = to_intel_crtc(crtc)->pipe;
431 struct drm_color_lut *lut; 431 struct drm_color_lut *lut;
432 uint32_t i, lut_size; 432 uint32_t i, lut_size;
@@ -485,7 +485,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state)
485void intel_color_load_luts(struct drm_crtc_state *crtc_state) 485void intel_color_load_luts(struct drm_crtc_state *crtc_state)
486{ 486{
487 struct drm_device *dev = crtc_state->crtc->dev; 487 struct drm_device *dev = crtc_state->crtc->dev;
488 struct drm_i915_private *dev_priv = dev->dev_private; 488 struct drm_i915_private *dev_priv = to_i915(dev);
489 489
490 dev_priv->display.load_luts(crtc_state); 490 dev_priv->display.load_luts(crtc_state);
491} 491}
@@ -526,7 +526,7 @@ int intel_color_check(struct drm_crtc *crtc,
526void intel_color_init(struct drm_crtc *crtc) 526void intel_color_init(struct drm_crtc *crtc)
527{ 527{
528 struct drm_device *dev = crtc->dev; 528 struct drm_device *dev = crtc->dev;
529 struct drm_i915_private *dev_priv = dev->dev_private; 529 struct drm_i915_private *dev_priv = to_i915(dev);
530 530
531 drm_mode_crtc_set_gamma_size(crtc, 256); 531 drm_mode_crtc_set_gamma_size(crtc, 256);
532 532
@@ -547,7 +547,8 @@ void intel_color_init(struct drm_crtc *crtc)
547 /* Enable color management support when we have degamma & gamma LUTs. */ 547 /* Enable color management support when we have degamma & gamma LUTs. */
548 if (INTEL_INFO(dev)->color.degamma_lut_size != 0 && 548 if (INTEL_INFO(dev)->color.degamma_lut_size != 0 &&
549 INTEL_INFO(dev)->color.gamma_lut_size != 0) 549 INTEL_INFO(dev)->color.gamma_lut_size != 0)
550 drm_helper_crtc_enable_color_mgmt(crtc, 550 drm_crtc_enable_color_mgmt(crtc,
551 INTEL_INFO(dev)->color.degamma_lut_size, 551 INTEL_INFO(dev)->color.degamma_lut_size,
552 true,
552 INTEL_INFO(dev)->color.gamma_lut_size); 553 INTEL_INFO(dev)->color.gamma_lut_size);
553} 554}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 3fbb6fc66451..827b6ef4e9ae 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -67,7 +67,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
67 enum pipe *pipe) 67 enum pipe *pipe)
68{ 68{
69 struct drm_device *dev = encoder->base.dev; 69 struct drm_device *dev = encoder->base.dev;
70 struct drm_i915_private *dev_priv = dev->dev_private; 70 struct drm_i915_private *dev_priv = to_i915(dev);
71 struct intel_crt *crt = intel_encoder_to_crt(encoder); 71 struct intel_crt *crt = intel_encoder_to_crt(encoder);
72 enum intel_display_power_domain power_domain; 72 enum intel_display_power_domain power_domain;
73 u32 tmp; 73 u32 tmp;
@@ -98,7 +98,7 @@ out:
98 98
99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder) 99static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
100{ 100{
101 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 101 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
102 struct intel_crt *crt = intel_encoder_to_crt(encoder); 102 struct intel_crt *crt = intel_encoder_to_crt(encoder);
103 u32 tmp, flags = 0; 103 u32 tmp, flags = 0;
104 104
@@ -146,7 +146,7 @@ static void hsw_crt_get_config(struct intel_encoder *encoder,
146static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode) 146static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
147{ 147{
148 struct drm_device *dev = encoder->base.dev; 148 struct drm_device *dev = encoder->base.dev;
149 struct drm_i915_private *dev_priv = dev->dev_private; 149 struct drm_i915_private *dev_priv = to_i915(dev);
150 struct intel_crt *crt = intel_encoder_to_crt(encoder); 150 struct intel_crt *crt = intel_encoder_to_crt(encoder);
151 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 151 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
152 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 152 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -281,7 +281,7 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
281{ 281{
282 struct drm_device *dev = connector->dev; 282 struct drm_device *dev = connector->dev;
283 struct intel_crt *crt = intel_attached_crt(connector); 283 struct intel_crt *crt = intel_attached_crt(connector);
284 struct drm_i915_private *dev_priv = dev->dev_private; 284 struct drm_i915_private *dev_priv = to_i915(dev);
285 u32 adpa; 285 u32 adpa;
286 bool ret; 286 bool ret;
287 287
@@ -301,8 +301,10 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
301 301
302 I915_WRITE(crt->adpa_reg, adpa); 302 I915_WRITE(crt->adpa_reg, adpa);
303 303
304 if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 304 if (intel_wait_for_register(dev_priv,
305 1000)) 305 crt->adpa_reg,
306 ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
307 1000))
306 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 308 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
307 309
308 if (turn_off_dac) { 310 if (turn_off_dac) {
@@ -326,11 +328,26 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
326{ 328{
327 struct drm_device *dev = connector->dev; 329 struct drm_device *dev = connector->dev;
328 struct intel_crt *crt = intel_attached_crt(connector); 330 struct intel_crt *crt = intel_attached_crt(connector);
329 struct drm_i915_private *dev_priv = dev->dev_private; 331 struct drm_i915_private *dev_priv = to_i915(dev);
332 bool reenable_hpd;
330 u32 adpa; 333 u32 adpa;
331 bool ret; 334 bool ret;
332 u32 save_adpa; 335 u32 save_adpa;
333 336
337 /*
338 * Doing a force trigger causes a hpd interrupt to get sent, which can
339 * get us stuck in a loop if we're polling:
340 * - We enable power wells and reset the ADPA
341 * - output_poll_exec does force probe on VGA, triggering a hpd
342 * - HPD handler waits for poll to unlock dev->mode_config.mutex
343 * - output_poll_exec shuts off the ADPA, unlocks
344 * dev->mode_config.mutex
345 * - HPD handler runs, resets ADPA and brings us back to the start
346 *
347 * Just disable HPD interrupts here to prevent this
348 */
349 reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
350
334 save_adpa = adpa = I915_READ(crt->adpa_reg); 351 save_adpa = adpa = I915_READ(crt->adpa_reg);
335 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); 352 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
336 353
@@ -338,8 +355,10 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
338 355
339 I915_WRITE(crt->adpa_reg, adpa); 356 I915_WRITE(crt->adpa_reg, adpa);
340 357
341 if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, 358 if (intel_wait_for_register(dev_priv,
342 1000)) { 359 crt->adpa_reg,
360 ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 0,
361 1000)) {
343 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); 362 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
344 I915_WRITE(crt->adpa_reg, save_adpa); 363 I915_WRITE(crt->adpa_reg, save_adpa);
345 } 364 }
@@ -353,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
353 372
354 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); 373 DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
355 374
375 if (reenable_hpd)
376 intel_hpd_enable(dev_priv, crt->base.hpd_pin);
377
356 return ret; 378 return ret;
357} 379}
358 380
@@ -367,7 +389,7 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
367static bool intel_crt_detect_hotplug(struct drm_connector *connector) 389static bool intel_crt_detect_hotplug(struct drm_connector *connector)
368{ 390{
369 struct drm_device *dev = connector->dev; 391 struct drm_device *dev = connector->dev;
370 struct drm_i915_private *dev_priv = dev->dev_private; 392 struct drm_i915_private *dev_priv = to_i915(dev);
371 u32 stat; 393 u32 stat;
372 bool ret = false; 394 bool ret = false;
373 int i, tries = 0; 395 int i, tries = 0;
@@ -394,9 +416,9 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
394 CRT_HOTPLUG_FORCE_DETECT, 416 CRT_HOTPLUG_FORCE_DETECT,
395 CRT_HOTPLUG_FORCE_DETECT); 417 CRT_HOTPLUG_FORCE_DETECT);
396 /* wait for FORCE_DETECT to go off */ 418 /* wait for FORCE_DETECT to go off */
397 if (wait_for((I915_READ(PORT_HOTPLUG_EN) & 419 if (intel_wait_for_register(dev_priv, PORT_HOTPLUG_EN,
398 CRT_HOTPLUG_FORCE_DETECT) == 0, 420 CRT_HOTPLUG_FORCE_DETECT, 0,
399 1000)) 421 1000))
400 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off"); 422 DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
401 } 423 }
402 424
@@ -449,7 +471,7 @@ static int intel_crt_ddc_get_modes(struct drm_connector *connector,
449static bool intel_crt_detect_ddc(struct drm_connector *connector) 471static bool intel_crt_detect_ddc(struct drm_connector *connector)
450{ 472{
451 struct intel_crt *crt = intel_attached_crt(connector); 473 struct intel_crt *crt = intel_attached_crt(connector);
452 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; 474 struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
453 struct edid *edid; 475 struct edid *edid;
454 struct i2c_adapter *i2c; 476 struct i2c_adapter *i2c;
455 477
@@ -485,7 +507,7 @@ static enum drm_connector_status
485intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe) 507intel_crt_load_detect(struct intel_crt *crt, uint32_t pipe)
486{ 508{
487 struct drm_device *dev = crt->base.base.dev; 509 struct drm_device *dev = crt->base.base.dev;
488 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = to_i915(dev);
489 uint32_t save_bclrpat; 511 uint32_t save_bclrpat;
490 uint32_t save_vtotal; 512 uint32_t save_vtotal;
491 uint32_t vtotal, vactive; 513 uint32_t vtotal, vactive;
@@ -600,7 +622,7 @@ static enum drm_connector_status
600intel_crt_detect(struct drm_connector *connector, bool force) 622intel_crt_detect(struct drm_connector *connector, bool force)
601{ 623{
602 struct drm_device *dev = connector->dev; 624 struct drm_device *dev = connector->dev;
603 struct drm_i915_private *dev_priv = dev->dev_private; 625 struct drm_i915_private *dev_priv = to_i915(dev);
604 struct intel_crt *crt = intel_attached_crt(connector); 626 struct intel_crt *crt = intel_attached_crt(connector);
605 struct intel_encoder *intel_encoder = &crt->base; 627 struct intel_encoder *intel_encoder = &crt->base;
606 enum intel_display_power_domain power_domain; 628 enum intel_display_power_domain power_domain;
@@ -681,7 +703,7 @@ static void intel_crt_destroy(struct drm_connector *connector)
681static int intel_crt_get_modes(struct drm_connector *connector) 703static int intel_crt_get_modes(struct drm_connector *connector)
682{ 704{
683 struct drm_device *dev = connector->dev; 705 struct drm_device *dev = connector->dev;
684 struct drm_i915_private *dev_priv = dev->dev_private; 706 struct drm_i915_private *dev_priv = to_i915(dev);
685 struct intel_crt *crt = intel_attached_crt(connector); 707 struct intel_crt *crt = intel_attached_crt(connector);
686 struct intel_encoder *intel_encoder = &crt->base; 708 struct intel_encoder *intel_encoder = &crt->base;
687 enum intel_display_power_domain power_domain; 709 enum intel_display_power_domain power_domain;
@@ -713,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector,
713 return 0; 735 return 0;
714} 736}
715 737
716static void intel_crt_reset(struct drm_connector *connector) 738void intel_crt_reset(struct drm_encoder *encoder)
717{ 739{
718 struct drm_device *dev = connector->dev; 740 struct drm_device *dev = encoder->dev;
719 struct drm_i915_private *dev_priv = dev->dev_private; 741 struct drm_i915_private *dev_priv = to_i915(dev);
720 struct intel_crt *crt = intel_attached_crt(connector); 742 struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder));
721 743
722 if (INTEL_INFO(dev)->gen >= 5) { 744 if (INTEL_INFO(dev)->gen >= 5) {
723 u32 adpa; 745 u32 adpa;
@@ -739,10 +761,11 @@ static void intel_crt_reset(struct drm_connector *connector)
739 */ 761 */
740 762
741static const struct drm_connector_funcs intel_crt_connector_funcs = { 763static const struct drm_connector_funcs intel_crt_connector_funcs = {
742 .reset = intel_crt_reset,
743 .dpms = drm_atomic_helper_connector_dpms, 764 .dpms = drm_atomic_helper_connector_dpms,
744 .detect = intel_crt_detect, 765 .detect = intel_crt_detect,
745 .fill_modes = drm_helper_probe_single_connector_modes, 766 .fill_modes = drm_helper_probe_single_connector_modes,
767 .late_register = intel_connector_register,
768 .early_unregister = intel_connector_unregister,
746 .destroy = intel_crt_destroy, 769 .destroy = intel_crt_destroy,
747 .set_property = intel_crt_set_property, 770 .set_property = intel_crt_set_property,
748 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 771 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
@@ -753,10 +776,10 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
753static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = { 776static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
754 .mode_valid = intel_crt_mode_valid, 777 .mode_valid = intel_crt_mode_valid,
755 .get_modes = intel_crt_get_modes, 778 .get_modes = intel_crt_get_modes,
756 .best_encoder = intel_best_encoder,
757}; 779};
758 780
759static const struct drm_encoder_funcs intel_crt_enc_funcs = { 781static const struct drm_encoder_funcs intel_crt_enc_funcs = {
782 .reset = intel_crt_reset,
760 .destroy = intel_encoder_destroy, 783 .destroy = intel_encoder_destroy,
761}; 784};
762 785
@@ -791,7 +814,7 @@ void intel_crt_init(struct drm_device *dev)
791 struct drm_connector *connector; 814 struct drm_connector *connector;
792 struct intel_crt *crt; 815 struct intel_crt *crt;
793 struct intel_connector *intel_connector; 816 struct intel_connector *intel_connector;
794 struct drm_i915_private *dev_priv = dev->dev_private; 817 struct drm_i915_private *dev_priv = to_i915(dev);
795 i915_reg_t adpa_reg; 818 i915_reg_t adpa_reg;
796 u32 adpa; 819 u32 adpa;
797 820
@@ -839,7 +862,7 @@ void intel_crt_init(struct drm_device *dev)
839 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 862 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
840 863
841 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, 864 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
842 DRM_MODE_ENCODER_DAC, NULL); 865 DRM_MODE_ENCODER_DAC, "CRT");
843 866
844 intel_connector_attach_encoder(intel_connector, &crt->base); 867 intel_connector_attach_encoder(intel_connector, &crt->base);
845 868
@@ -876,12 +899,9 @@ void intel_crt_init(struct drm_device *dev)
876 crt->base.get_hw_state = intel_crt_get_hw_state; 899 crt->base.get_hw_state = intel_crt_get_hw_state;
877 } 900 }
878 intel_connector->get_hw_state = intel_connector_get_hw_state; 901 intel_connector->get_hw_state = intel_connector_get_hw_state;
879 intel_connector->unregister = intel_connector_unregister;
880 902
881 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 903 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
882 904
883 drm_connector_register(connector);
884
885 if (!I915_HAS_HOTPLUG(dev)) 905 if (!I915_HAS_HOTPLUG(dev))
886 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT; 906 intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
887 907
@@ -902,5 +922,5 @@ void intel_crt_init(struct drm_device *dev)
902 dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; 922 dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
903 } 923 }
904 924
905 intel_crt_reset(connector); 925 intel_crt_reset(&crt->base.base);
906} 926}
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 2b3b428d9cd2..3edb9580928e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -286,7 +286,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
286 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes; 286 uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
287 uint32_t i; 287 uint32_t i;
288 uint32_t *dmc_payload; 288 uint32_t *dmc_payload;
289 uint32_t required_min_version; 289 uint32_t required_version;
290 290
291 if (!fw) 291 if (!fw)
292 return NULL; 292 return NULL;
@@ -303,24 +303,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
303 csr->version = css_header->version; 303 csr->version = css_header->version;
304 304
305 if (IS_KABYLAKE(dev_priv)) { 305 if (IS_KABYLAKE(dev_priv)) {
306 required_min_version = KBL_CSR_VERSION_REQUIRED; 306 required_version = KBL_CSR_VERSION_REQUIRED;
307 } else if (IS_SKYLAKE(dev_priv)) { 307 } else if (IS_SKYLAKE(dev_priv)) {
308 required_min_version = SKL_CSR_VERSION_REQUIRED; 308 required_version = SKL_CSR_VERSION_REQUIRED;
309 } else if (IS_BROXTON(dev_priv)) { 309 } else if (IS_BROXTON(dev_priv)) {
310 required_min_version = BXT_CSR_VERSION_REQUIRED; 310 required_version = BXT_CSR_VERSION_REQUIRED;
311 } else { 311 } else {
312 MISSING_CASE(INTEL_REVID(dev_priv)); 312 MISSING_CASE(INTEL_REVID(dev_priv));
313 required_min_version = 0; 313 required_version = 0;
314 } 314 }
315 315
316 if (csr->version < required_min_version) { 316 if (csr->version != required_version) {
317 DRM_INFO("Refusing to load old DMC firmware v%u.%u," 317 DRM_INFO("Refusing to load DMC firmware v%u.%u,"
318 " please upgrade to v%u.%u or later" 318 " please use v%u.%u [" FIRMWARE_URL "].\n",
319 " [" FIRMWARE_URL "].\n",
320 CSR_VERSION_MAJOR(csr->version), 319 CSR_VERSION_MAJOR(csr->version),
321 CSR_VERSION_MINOR(csr->version), 320 CSR_VERSION_MINOR(csr->version),
322 CSR_VERSION_MAJOR(required_min_version), 321 CSR_VERSION_MAJOR(required_version),
323 CSR_VERSION_MINOR(required_min_version)); 322 CSR_VERSION_MINOR(required_version));
324 return NULL; 323 return NULL;
325 } 324 }
326 325
@@ -413,7 +412,7 @@ static void csr_load_work_fn(struct work_struct *work)
413 csr = &dev_priv->csr; 412 csr = &dev_priv->csr;
414 413
415 ret = request_firmware(&fw, dev_priv->csr.fw_path, 414 ret = request_firmware(&fw, dev_priv->csr.fw_path,
416 &dev_priv->dev->pdev->dev); 415 &dev_priv->drm.pdev->dev);
417 if (fw) 416 if (fw)
418 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw); 417 dev_priv->csr.dmc_payload = parse_csr_fw(dev_priv, fw);
419 418
@@ -427,7 +426,7 @@ static void csr_load_work_fn(struct work_struct *work)
427 CSR_VERSION_MAJOR(csr->version), 426 CSR_VERSION_MAJOR(csr->version),
428 CSR_VERSION_MINOR(csr->version)); 427 CSR_VERSION_MINOR(csr->version));
429 } else { 428 } else {
430 dev_notice(dev_priv->dev->dev, 429 dev_notice(dev_priv->drm.dev,
431 "Failed to load DMC firmware" 430 "Failed to load DMC firmware"
432 " [" FIRMWARE_URL "]," 431 " [" FIRMWARE_URL "],"
433 " disabling runtime power management.\n"); 432 " disabling runtime power management.\n");
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 01e523df363b..dd1d6fe12297 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -318,7 +318,7 @@ static void ddi_get_encoder_port(struct intel_encoder *intel_encoder,
318 default: 318 default:
319 WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); 319 WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type);
320 /* fallthrough and treat as unknown */ 320 /* fallthrough and treat as unknown */
321 case INTEL_OUTPUT_DISPLAYPORT: 321 case INTEL_OUTPUT_DP:
322 case INTEL_OUTPUT_EDP: 322 case INTEL_OUTPUT_EDP:
323 case INTEL_OUTPUT_HDMI: 323 case INTEL_OUTPUT_HDMI:
324 case INTEL_OUTPUT_UNKNOWN: 324 case INTEL_OUTPUT_UNKNOWN:
@@ -482,7 +482,7 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
482 ddi_translations = ddi_translations_edp; 482 ddi_translations = ddi_translations_edp;
483 size = n_edp_entries; 483 size = n_edp_entries;
484 break; 484 break;
485 case INTEL_OUTPUT_DISPLAYPORT: 485 case INTEL_OUTPUT_DP:
486 case INTEL_OUTPUT_HDMI: 486 case INTEL_OUTPUT_HDMI:
487 ddi_translations = ddi_translations_dp; 487 ddi_translations = ddi_translations_dp;
488 size = n_dp_entries; 488 size = n_dp_entries;
@@ -543,7 +543,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
543void hsw_fdi_link_train(struct drm_crtc *crtc) 543void hsw_fdi_link_train(struct drm_crtc *crtc)
544{ 544{
545 struct drm_device *dev = crtc->dev; 545 struct drm_device *dev = crtc->dev;
546 struct drm_i915_private *dev_priv = dev->dev_private; 546 struct drm_i915_private *dev_priv = to_i915(dev);
547 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 547 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
548 struct intel_encoder *encoder; 548 struct intel_encoder *encoder;
549 u32 temp, i, rx_ctl_val; 549 u32 temp, i, rx_ctl_val;
@@ -834,7 +834,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
834 if (pipe_config->has_pch_encoder) 834 if (pipe_config->has_pch_encoder)
835 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 835 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
836 &pipe_config->fdi_m_n); 836 &pipe_config->fdi_m_n);
837 else if (pipe_config->has_dp_encoder) 837 else if (intel_crtc_has_dp_encoder(pipe_config))
838 dotclock = intel_dotclock_calculate(pipe_config->port_clock, 838 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
839 &pipe_config->dp_m_n); 839 &pipe_config->dp_m_n);
840 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36) 840 else if (pipe_config->has_hdmi_sink && pipe_config->pipe_bpp == 36)
@@ -851,7 +851,7 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
851static void skl_ddi_clock_get(struct intel_encoder *encoder, 851static void skl_ddi_clock_get(struct intel_encoder *encoder,
852 struct intel_crtc_state *pipe_config) 852 struct intel_crtc_state *pipe_config)
853{ 853{
854 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 854 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
855 int link_clock = 0; 855 int link_clock = 0;
856 uint32_t dpll_ctl1, dpll; 856 uint32_t dpll_ctl1, dpll;
857 857
@@ -899,7 +899,7 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder,
899static void hsw_ddi_clock_get(struct intel_encoder *encoder, 899static void hsw_ddi_clock_get(struct intel_encoder *encoder,
900 struct intel_crtc_state *pipe_config) 900 struct intel_crtc_state *pipe_config)
901{ 901{
902 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 902 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
903 int link_clock = 0; 903 int link_clock = 0;
904 u32 val, pll; 904 u32 val, pll;
905 905
@@ -948,7 +948,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
948{ 948{
949 struct intel_shared_dpll *pll; 949 struct intel_shared_dpll *pll;
950 struct intel_dpll_hw_state *state; 950 struct intel_dpll_hw_state *state;
951 intel_clock_t clock; 951 struct dpll clock;
952 952
953 /* For DDI ports we always use a shared PLL. */ 953 /* For DDI ports we always use a shared PLL. */
954 if (WARN_ON(dpll == DPLL_ID_PRIVATE)) 954 if (WARN_ON(dpll == DPLL_ID_PRIVATE))
@@ -971,7 +971,7 @@ static int bxt_calc_pll_link(struct drm_i915_private *dev_priv,
971static void bxt_ddi_clock_get(struct intel_encoder *encoder, 971static void bxt_ddi_clock_get(struct intel_encoder *encoder,
972 struct intel_crtc_state *pipe_config) 972 struct intel_crtc_state *pipe_config)
973{ 973{
974 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 974 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
975 enum port port = intel_ddi_get_encoder_port(encoder); 975 enum port port = intel_ddi_get_encoder_port(encoder);
976 uint32_t dpll = port; 976 uint32_t dpll = port;
977 977
@@ -1061,14 +1061,14 @@ bool intel_ddi_pll_select(struct intel_crtc *intel_crtc,
1061 1061
1062void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 1062void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
1063{ 1063{
1064 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1064 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1065 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1066 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1066 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1067 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1067 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1068 int type = intel_encoder->type; 1068 int type = intel_encoder->type;
1069 uint32_t temp; 1069 uint32_t temp;
1070 1070
1071 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) { 1071 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP || type == INTEL_OUTPUT_DP_MST) {
1072 WARN_ON(transcoder_is_dsi(cpu_transcoder)); 1072 WARN_ON(transcoder_is_dsi(cpu_transcoder));
1073 1073
1074 temp = TRANS_MSA_SYNC_CLK; 1074 temp = TRANS_MSA_SYNC_CLK;
@@ -1096,7 +1096,7 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state)
1096{ 1096{
1097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1097 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1098 struct drm_device *dev = crtc->dev; 1098 struct drm_device *dev = crtc->dev;
1099 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = to_i915(dev);
1100 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1100 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1101 uint32_t temp; 1101 uint32_t temp;
1102 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 1102 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
@@ -1113,7 +1113,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1113 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1113 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1114 struct drm_encoder *encoder = &intel_encoder->base; 1114 struct drm_encoder *encoder = &intel_encoder->base;
1115 struct drm_device *dev = crtc->dev; 1115 struct drm_device *dev = crtc->dev;
1116 struct drm_i915_private *dev_priv = dev->dev_private; 1116 struct drm_i915_private *dev_priv = to_i915(dev);
1117 enum pipe pipe = intel_crtc->pipe; 1117 enum pipe pipe = intel_crtc->pipe;
1118 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1118 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1119 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1119 enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1182,7 +1182,7 @@ void intel_ddi_enable_transcoder_func(struct drm_crtc *crtc)
1182 temp |= TRANS_DDI_MODE_SELECT_FDI; 1182 temp |= TRANS_DDI_MODE_SELECT_FDI;
1183 temp |= (intel_crtc->config->fdi_lanes - 1) << 1; 1183 temp |= (intel_crtc->config->fdi_lanes - 1) << 1;
1184 1184
1185 } else if (type == INTEL_OUTPUT_DISPLAYPORT || 1185 } else if (type == INTEL_OUTPUT_DP ||
1186 type == INTEL_OUTPUT_EDP) { 1186 type == INTEL_OUTPUT_EDP) {
1187 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1187 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1188 1188
@@ -1223,7 +1223,7 @@ void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
1223bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector) 1223bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
1224{ 1224{
1225 struct drm_device *dev = intel_connector->base.dev; 1225 struct drm_device *dev = intel_connector->base.dev;
1226 struct drm_i915_private *dev_priv = dev->dev_private; 1226 struct drm_i915_private *dev_priv = to_i915(dev);
1227 struct intel_encoder *intel_encoder = intel_connector->encoder; 1227 struct intel_encoder *intel_encoder = intel_connector->encoder;
1228 int type = intel_connector->base.connector_type; 1228 int type = intel_connector->base.connector_type;
1229 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1229 enum port port = intel_ddi_get_encoder_port(intel_encoder);
@@ -1285,7 +1285,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1285 enum pipe *pipe) 1285 enum pipe *pipe)
1286{ 1286{
1287 struct drm_device *dev = encoder->base.dev; 1287 struct drm_device *dev = encoder->base.dev;
1288 struct drm_i915_private *dev_priv = dev->dev_private; 1288 struct drm_i915_private *dev_priv = to_i915(dev);
1289 enum port port = intel_ddi_get_encoder_port(encoder); 1289 enum port port = intel_ddi_get_encoder_port(encoder);
1290 enum intel_display_power_domain power_domain; 1290 enum intel_display_power_domain power_domain;
1291 u32 tmp; 1291 u32 tmp;
@@ -1342,6 +1342,14 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1342 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port)); 1342 DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
1343 1343
1344out: 1344out:
1345 if (ret && IS_BROXTON(dev_priv)) {
1346 tmp = I915_READ(BXT_PHY_CTL(port));
1347 if ((tmp & (BXT_PHY_LANE_POWERDOWN_ACK |
1348 BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
1349 DRM_ERROR("Port %c enabled but PHY powered down? "
1350 "(PHY_CTL %08x)\n", port_name(port), tmp);
1351 }
1352
1345 intel_display_power_put(dev_priv, power_domain); 1353 intel_display_power_put(dev_priv, power_domain);
1346 1354
1347 return ret; 1355 return ret;
@@ -1351,7 +1359,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1351{ 1359{
1352 struct drm_crtc *crtc = &intel_crtc->base; 1360 struct drm_crtc *crtc = &intel_crtc->base;
1353 struct drm_device *dev = crtc->dev; 1361 struct drm_device *dev = crtc->dev;
1354 struct drm_i915_private *dev_priv = dev->dev_private; 1362 struct drm_i915_private *dev_priv = to_i915(dev);
1355 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1363 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
1356 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1364 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1357 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1365 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -1363,7 +1371,7 @@ void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
1363 1371
1364void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc) 1372void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1365{ 1373{
1366 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private; 1374 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
1367 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 1375 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1368 1376
1369 if (cpu_transcoder != TRANSCODER_EDP) 1377 if (cpu_transcoder != TRANSCODER_EDP)
@@ -1384,7 +1392,7 @@ static void skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
1384 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level; 1392 dp_iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
1385 hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level; 1393 hdmi_iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
1386 1394
1387 if (type == INTEL_OUTPUT_DISPLAYPORT) { 1395 if (type == INTEL_OUTPUT_DP) {
1388 if (dp_iboost) { 1396 if (dp_iboost) {
1389 iboost = dp_iboost; 1397 iboost = dp_iboost;
1390 } else { 1398 } else {
@@ -1442,7 +1450,7 @@ static void bxt_ddi_vswing_sequence(struct drm_i915_private *dev_priv,
1442 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) { 1450 if (type == INTEL_OUTPUT_EDP && dev_priv->vbt.edp.low_vswing) {
1443 n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); 1451 n_entries = ARRAY_SIZE(bxt_ddi_translations_edp);
1444 ddi_translations = bxt_ddi_translations_edp; 1452 ddi_translations = bxt_ddi_translations_edp;
1445 } else if (type == INTEL_OUTPUT_DISPLAYPORT 1453 } else if (type == INTEL_OUTPUT_DP
1446 || type == INTEL_OUTPUT_EDP) { 1454 || type == INTEL_OUTPUT_EDP) {
1447 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); 1455 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp);
1448 ddi_translations = bxt_ddi_translations_dp; 1456 ddi_translations = bxt_ddi_translations_dp;
@@ -1616,7 +1624,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1616 1624
1617 intel_ddi_clk_select(intel_encoder, crtc->config); 1625 intel_ddi_clk_select(intel_encoder, crtc->config);
1618 1626
1619 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1627 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
1620 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1628 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1621 1629
1622 intel_dp_set_link_params(intel_dp, crtc->config); 1630 intel_dp_set_link_params(intel_dp, crtc->config);
@@ -1640,7 +1648,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1640{ 1648{
1641 struct drm_encoder *encoder = &intel_encoder->base; 1649 struct drm_encoder *encoder = &intel_encoder->base;
1642 struct drm_device *dev = encoder->dev; 1650 struct drm_device *dev = encoder->dev;
1643 struct drm_i915_private *dev_priv = dev->dev_private; 1651 struct drm_i915_private *dev_priv = to_i915(dev);
1644 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1652 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1645 int type = intel_encoder->type; 1653 int type = intel_encoder->type;
1646 uint32_t val; 1654 uint32_t val;
@@ -1661,7 +1669,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1661 if (wait) 1669 if (wait)
1662 intel_wait_ddi_buf_idle(dev_priv, port); 1670 intel_wait_ddi_buf_idle(dev_priv, port);
1663 1671
1664 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1672 if (type == INTEL_OUTPUT_DP || type == INTEL_OUTPUT_EDP) {
1665 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1673 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1666 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF); 1674 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
1667 intel_edp_panel_vdd_on(intel_dp); 1675 intel_edp_panel_vdd_on(intel_dp);
@@ -1687,7 +1695,7 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1687 struct drm_crtc *crtc = encoder->crtc; 1695 struct drm_crtc *crtc = encoder->crtc;
1688 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1696 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1689 struct drm_device *dev = encoder->dev; 1697 struct drm_device *dev = encoder->dev;
1690 struct drm_i915_private *dev_priv = dev->dev_private; 1698 struct drm_i915_private *dev_priv = to_i915(dev);
1691 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1699 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1692 int type = intel_encoder->type; 1700 int type = intel_encoder->type;
1693 1701
@@ -1726,7 +1734,7 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1726 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1734 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1727 int type = intel_encoder->type; 1735 int type = intel_encoder->type;
1728 struct drm_device *dev = encoder->dev; 1736 struct drm_device *dev = encoder->dev;
1729 struct drm_i915_private *dev_priv = dev->dev_private; 1737 struct drm_i915_private *dev_priv = to_i915(dev);
1730 1738
1731 if (intel_crtc->config->has_audio) { 1739 if (intel_crtc->config->has_audio) {
1732 intel_audio_codec_disable(intel_encoder); 1740 intel_audio_codec_disable(intel_encoder);
@@ -1742,9 +1750,11 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1742 } 1750 }
1743} 1751}
1744 1752
1745static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv, 1753bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
1746 enum dpio_phy phy) 1754 enum dpio_phy phy)
1747{ 1755{
1756 enum port port;
1757
1748 if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy))) 1758 if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
1749 return false; 1759 return false;
1750 1760
@@ -1770,38 +1780,51 @@ static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
1770 return false; 1780 return false;
1771 } 1781 }
1772 1782
1783 for_each_port_masked(port,
1784 phy == DPIO_PHY0 ? BIT(PORT_B) | BIT(PORT_C) :
1785 BIT(PORT_A)) {
1786 u32 tmp = I915_READ(BXT_PHY_CTL(port));
1787
1788 if (tmp & BXT_PHY_CMNLANE_POWERDOWN_ACK) {
1789 DRM_DEBUG_DRIVER("DDI PHY %d powered, but common lane "
1790 "for port %c powered down "
1791 "(PHY_CTL %08x)\n",
1792 phy, port_name(port), tmp);
1793
1794 return false;
1795 }
1796 }
1797
1773 return true; 1798 return true;
1774} 1799}
1775 1800
1776static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy) 1801static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1777{ 1802{
1778 u32 val = I915_READ(BXT_PORT_REF_DW6(phy)); 1803 u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
1779 1804
1780 return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT; 1805 return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
1781} 1806}
1782 1807
1783static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv, 1808static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
1784 enum dpio_phy phy) 1809 enum dpio_phy phy)
1785{ 1810{
1786 if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10)) 1811 if (intel_wait_for_register(dev_priv,
1812 BXT_PORT_REF_DW3(phy),
1813 GRC_DONE, GRC_DONE,
1814 10))
1787 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy); 1815 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
1788} 1816}
1789 1817
1790static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, 1818void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1791 enum dpio_phy phy);
1792
1793static void broxton_phy_init(struct drm_i915_private *dev_priv,
1794 enum dpio_phy phy)
1795{ 1819{
1796 enum port port; 1820 u32 val;
1797 u32 ports, val;
1798 1821
1799 if (broxton_phy_is_enabled(dev_priv, phy)) { 1822 if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
1800 /* Still read out the GRC value for state verification */ 1823 /* Still read out the GRC value for state verification */
1801 if (phy == DPIO_PHY0) 1824 if (phy == DPIO_PHY0)
1802 dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy); 1825 dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
1803 1826
1804 if (broxton_phy_verify_state(dev_priv, phy)) { 1827 if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
1805 DRM_DEBUG_DRIVER("DDI PHY %d already enabled, " 1828 DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
1806 "won't reprogram it\n", phy); 1829 "won't reprogram it\n", phy);
1807 1830
@@ -1810,8 +1833,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1810 1833
1811 DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, " 1834 DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
1812 "force reprogramming it\n", phy); 1835 "force reprogramming it\n", phy);
1813 } else {
1814 DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
1815 } 1836 }
1816 1837
1817 val = I915_READ(BXT_P_CR_GT_DISP_PWRON); 1838 val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
@@ -1831,28 +1852,6 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1831 DRM_ERROR("timeout during PHY%d power on\n", phy); 1852 DRM_ERROR("timeout during PHY%d power on\n", phy);
1832 } 1853 }
1833 1854
1834 if (phy == DPIO_PHY0)
1835 ports = BIT(PORT_B) | BIT(PORT_C);
1836 else
1837 ports = BIT(PORT_A);
1838
1839 for_each_port_masked(port, ports) {
1840 int lane;
1841
1842 for (lane = 0; lane < 4; lane++) {
1843 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
1844 /*
1845 * Note that on CHV this flag is called UPAR, but has
1846 * the same function.
1847 */
1848 val &= ~LATENCY_OPTIM;
1849 if (lane != 1)
1850 val |= LATENCY_OPTIM;
1851
1852 I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
1853 }
1854 }
1855
1856 /* Program PLL Rcomp code offset */ 1855 /* Program PLL Rcomp code offset */
1857 val = I915_READ(BXT_PORT_CL1CM_DW9(phy)); 1856 val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
1858 val &= ~IREF0RC_OFFSET_MASK; 1857 val &= ~IREF0RC_OFFSET_MASK;
@@ -1899,10 +1898,7 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1899 * the corresponding calibrated value from PHY1, and disable 1898 * the corresponding calibrated value from PHY1, and disable
1900 * the automatic calibration on PHY0. 1899 * the automatic calibration on PHY0.
1901 */ 1900 */
1902 broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1); 1901 val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, DPIO_PHY1);
1903
1904 val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
1905 DPIO_PHY1);
1906 grc_code = val << GRC_CODE_FAST_SHIFT | 1902 grc_code = val << GRC_CODE_FAST_SHIFT |
1907 val << GRC_CODE_SLOW_SHIFT | 1903 val << GRC_CODE_SLOW_SHIFT |
1908 val; 1904 val;
@@ -1912,31 +1908,16 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
1912 val |= GRC_DIS | GRC_RDY_OVRD; 1908 val |= GRC_DIS | GRC_RDY_OVRD;
1913 I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val); 1909 I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
1914 } 1910 }
1915 /*
1916 * During PHY1 init delay waiting for GRC calibration to finish, since
1917 * it can happen in parallel with the subsequent PHY0 init.
1918 */
1919 1911
1920 val = I915_READ(BXT_PHY_CTL_FAMILY(phy)); 1912 val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
1921 val |= COMMON_RESET_DIS; 1913 val |= COMMON_RESET_DIS;
1922 I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val); 1914 I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
1923}
1924
1925void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
1926{
1927 /* Enable PHY1 first since it provides Rcomp for PHY0 */
1928 broxton_phy_init(dev_priv, DPIO_PHY1);
1929 broxton_phy_init(dev_priv, DPIO_PHY0);
1930 1915
1931 /* 1916 if (phy == DPIO_PHY1)
1932 * If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the 1917 bxt_phy_wait_grc_done(dev_priv, DPIO_PHY1);
1933 * PHY1 GRC calibration to finish, so wait for it here.
1934 */
1935 broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
1936} 1918}
1937 1919
1938static void broxton_phy_uninit(struct drm_i915_private *dev_priv, 1920void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
1939 enum dpio_phy phy)
1940{ 1921{
1941 uint32_t val; 1922 uint32_t val;
1942 1923
@@ -1949,12 +1930,6 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
1949 I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val); 1930 I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
1950} 1931}
1951 1932
1952void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
1953{
1954 broxton_phy_uninit(dev_priv, DPIO_PHY1);
1955 broxton_phy_uninit(dev_priv, DPIO_PHY0);
1956}
1957
1958static bool __printf(6, 7) 1933static bool __printf(6, 7)
1959__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy, 1934__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1960 i915_reg_t reg, u32 mask, u32 expected, 1935 i915_reg_t reg, u32 mask, u32 expected,
@@ -1982,11 +1957,9 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
1982 return false; 1957 return false;
1983} 1958}
1984 1959
1985static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv, 1960bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
1986 enum dpio_phy phy) 1961 enum dpio_phy phy)
1987{ 1962{
1988 enum port port;
1989 u32 ports;
1990 uint32_t mask; 1963 uint32_t mask;
1991 bool ok; 1964 bool ok;
1992 1965
@@ -1994,27 +1967,11 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
1994 __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \ 1967 __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
1995 ## __VA_ARGS__) 1968 ## __VA_ARGS__)
1996 1969
1997 /* We expect the PHY to be always enabled */ 1970 if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
1998 if (!broxton_phy_is_enabled(dev_priv, phy))
1999 return false; 1971 return false;
2000 1972
2001 ok = true; 1973 ok = true;
2002 1974
2003 if (phy == DPIO_PHY0)
2004 ports = BIT(PORT_B) | BIT(PORT_C);
2005 else
2006 ports = BIT(PORT_A);
2007
2008 for_each_port_masked(port, ports) {
2009 int lane;
2010
2011 for (lane = 0; lane < 4; lane++)
2012 ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
2013 LATENCY_OPTIM,
2014 lane != 1 ? LATENCY_OPTIM : 0,
2015 "BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
2016 }
2017
2018 /* PLL Rcomp code offset */ 1975 /* PLL Rcomp code offset */
2019 ok &= _CHK(BXT_PORT_CL1CM_DW9(phy), 1976 ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
2020 IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT, 1977 IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
@@ -2058,11 +2015,65 @@ static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
2058#undef _CHK 2015#undef _CHK
2059} 2016}
2060 2017
2061void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv) 2018static uint8_t
2019bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
2020 struct intel_crtc_state *pipe_config)
2062{ 2021{
2063 if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) || 2022 switch (pipe_config->lane_count) {
2064 !broxton_phy_verify_state(dev_priv, DPIO_PHY1)) 2023 case 1:
2065 i915_report_error(dev_priv, "DDI PHY state mismatch\n"); 2024 return 0;
2025 case 2:
2026 return BIT(2) | BIT(0);
2027 case 4:
2028 return BIT(3) | BIT(2) | BIT(0);
2029 default:
2030 MISSING_CASE(pipe_config->lane_count);
2031
2032 return 0;
2033 }
2034}
2035
2036static void bxt_ddi_pre_pll_enable(struct intel_encoder *encoder)
2037{
2038 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2039 struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
2040 enum port port = dport->port;
2041 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2042 int lane;
2043
2044 for (lane = 0; lane < 4; lane++) {
2045 u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
2046
2047 /*
2048 * Note that on CHV this flag is called UPAR, but has
2049 * the same function.
2050 */
2051 val &= ~LATENCY_OPTIM;
2052 if (intel_crtc->config->lane_lat_optim_mask & BIT(lane))
2053 val |= LATENCY_OPTIM;
2054
2055 I915_WRITE(BXT_PORT_TX_DW14_LN(port, lane), val);
2056 }
2057}
2058
2059static uint8_t
2060bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
2061{
2062 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2063 struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
2064 enum port port = dport->port;
2065 int lane;
2066 uint8_t mask;
2067
2068 mask = 0;
2069 for (lane = 0; lane < 4; lane++) {
2070 u32 val = I915_READ(BXT_PORT_TX_DW14_LN(port, lane));
2071
2072 if (val & LATENCY_OPTIM)
2073 mask |= BIT(lane);
2074 }
2075
2076 return mask;
2066} 2077}
2067 2078
2068void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) 2079void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
@@ -2113,7 +2124,7 @@ void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
2113 2124
2114void intel_ddi_fdi_disable(struct drm_crtc *crtc) 2125void intel_ddi_fdi_disable(struct drm_crtc *crtc)
2115{ 2126{
2116 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 2127 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
2117 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 2128 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc);
2118 uint32_t val; 2129 uint32_t val;
2119 2130
@@ -2146,7 +2157,7 @@ void intel_ddi_fdi_disable(struct drm_crtc *crtc)
2146void intel_ddi_get_config(struct intel_encoder *encoder, 2157void intel_ddi_get_config(struct intel_encoder *encoder,
2147 struct intel_crtc_state *pipe_config) 2158 struct intel_crtc_state *pipe_config)
2148{ 2159{
2149 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 2160 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2150 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2161 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2151 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 2162 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
2152 struct intel_hdmi *intel_hdmi; 2163 struct intel_hdmi *intel_hdmi;
@@ -2200,7 +2211,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2200 break; 2211 break;
2201 case TRANS_DDI_MODE_SELECT_DP_SST: 2212 case TRANS_DDI_MODE_SELECT_DP_SST:
2202 case TRANS_DDI_MODE_SELECT_DP_MST: 2213 case TRANS_DDI_MODE_SELECT_DP_MST:
2203 pipe_config->has_dp_encoder = true;
2204 pipe_config->lane_count = 2214 pipe_config->lane_count =
2205 ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; 2215 ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1;
2206 intel_dp_get_m_n(intel_crtc, pipe_config); 2216 intel_dp_get_m_n(intel_crtc, pipe_config);
@@ -2236,13 +2246,19 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
2236 } 2246 }
2237 2247
2238 intel_ddi_clock_get(encoder, pipe_config); 2248 intel_ddi_clock_get(encoder, pipe_config);
2249
2250 if (IS_BROXTON(dev_priv))
2251 pipe_config->lane_lat_optim_mask =
2252 bxt_ddi_phy_get_lane_lat_optim_mask(encoder);
2239} 2253}
2240 2254
2241static bool intel_ddi_compute_config(struct intel_encoder *encoder, 2255static bool intel_ddi_compute_config(struct intel_encoder *encoder,
2242 struct intel_crtc_state *pipe_config) 2256 struct intel_crtc_state *pipe_config)
2243{ 2257{
2258 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2244 int type = encoder->type; 2259 int type = encoder->type;
2245 int port = intel_ddi_get_encoder_port(encoder); 2260 int port = intel_ddi_get_encoder_port(encoder);
2261 int ret;
2246 2262
2247 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n"); 2263 WARN(type == INTEL_OUTPUT_UNKNOWN, "compute_config() on unknown output!\n");
2248 2264
@@ -2250,9 +2266,17 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
2250 pipe_config->cpu_transcoder = TRANSCODER_EDP; 2266 pipe_config->cpu_transcoder = TRANSCODER_EDP;
2251 2267
2252 if (type == INTEL_OUTPUT_HDMI) 2268 if (type == INTEL_OUTPUT_HDMI)
2253 return intel_hdmi_compute_config(encoder, pipe_config); 2269 ret = intel_hdmi_compute_config(encoder, pipe_config);
2254 else 2270 else
2255 return intel_dp_compute_config(encoder, pipe_config); 2271 ret = intel_dp_compute_config(encoder, pipe_config);
2272
2273 if (IS_BROXTON(dev_priv) && ret)
2274 pipe_config->lane_lat_optim_mask =
2275 bxt_ddi_phy_calc_lane_lat_optim_mask(encoder,
2276 pipe_config);
2277
2278 return ret;
2279
2256} 2280}
2257 2281
2258static const struct drm_encoder_funcs intel_ddi_funcs = { 2282static const struct drm_encoder_funcs intel_ddi_funcs = {
@@ -2297,7 +2321,7 @@ intel_ddi_init_hdmi_connector(struct intel_digital_port *intel_dig_port)
2297 2321
2298void intel_ddi_init(struct drm_device *dev, enum port port) 2322void intel_ddi_init(struct drm_device *dev, enum port port)
2299{ 2323{
2300 struct drm_i915_private *dev_priv = dev->dev_private; 2324 struct drm_i915_private *dev_priv = to_i915(dev);
2301 struct intel_digital_port *intel_dig_port; 2325 struct intel_digital_port *intel_dig_port;
2302 struct intel_encoder *intel_encoder; 2326 struct intel_encoder *intel_encoder;
2303 struct drm_encoder *encoder; 2327 struct drm_encoder *encoder;
@@ -2347,10 +2371,12 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
2347 encoder = &intel_encoder->base; 2371 encoder = &intel_encoder->base;
2348 2372
2349 drm_encoder_init(dev, encoder, &intel_ddi_funcs, 2373 drm_encoder_init(dev, encoder, &intel_ddi_funcs,
2350 DRM_MODE_ENCODER_TMDS, NULL); 2374 DRM_MODE_ENCODER_TMDS, "DDI %c", port_name(port));
2351 2375
2352 intel_encoder->compute_config = intel_ddi_compute_config; 2376 intel_encoder->compute_config = intel_ddi_compute_config;
2353 intel_encoder->enable = intel_enable_ddi; 2377 intel_encoder->enable = intel_enable_ddi;
2378 if (IS_BROXTON(dev_priv))
2379 intel_encoder->pre_pll_enable = bxt_ddi_pre_pll_enable;
2354 intel_encoder->pre_enable = intel_ddi_pre_enable; 2380 intel_encoder->pre_enable = intel_ddi_pre_enable;
2355 intel_encoder->disable = intel_disable_ddi; 2381 intel_encoder->disable = intel_disable_ddi;
2356 intel_encoder->post_disable = intel_ddi_post_disable; 2382 intel_encoder->post_disable = intel_ddi_post_disable;
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
new file mode 100644
index 000000000000..cba137f9ad3e
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -0,0 +1,388 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26
27void intel_device_info_dump(struct drm_i915_private *dev_priv)
28{
29 const struct intel_device_info *info = &dev_priv->info;
30
31#define PRINT_S(name) "%s"
32#define SEP_EMPTY
33#define PRINT_FLAG(name) info->name ? #name "," : ""
34#define SEP_COMMA ,
35 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
36 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
37 info->gen,
38 dev_priv->drm.pdev->device,
39 dev_priv->drm.pdev->revision,
40 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
41#undef PRINT_S
42#undef SEP_EMPTY
43#undef PRINT_FLAG
44#undef SEP_COMMA
45}
46
47static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv)
48{
49 struct intel_device_info *info = mkwrite_device_info(dev_priv);
50 u32 fuse, eu_dis;
51
52 fuse = I915_READ(CHV_FUSE_GT);
53
54 info->slice_total = 1;
55
56 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
57 info->subslice_per_slice++;
58 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
59 CHV_FGT_EU_DIS_SS0_R1_MASK);
60 info->eu_total += 8 - hweight32(eu_dis);
61 }
62
63 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
64 info->subslice_per_slice++;
65 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
66 CHV_FGT_EU_DIS_SS1_R1_MASK);
67 info->eu_total += 8 - hweight32(eu_dis);
68 }
69
70 info->subslice_total = info->subslice_per_slice;
71 /*
72 * CHV expected to always have a uniform distribution of EU
73 * across subslices.
74 */
75 info->eu_per_subslice = info->subslice_total ?
76 info->eu_total / info->subslice_total :
77 0;
78 /*
79 * CHV supports subslice power gating on devices with more than
80 * one subslice, and supports EU power gating on devices with
81 * more than one EU pair per subslice.
82 */
83 info->has_slice_pg = 0;
84 info->has_subslice_pg = (info->subslice_total > 1);
85 info->has_eu_pg = (info->eu_per_subslice > 2);
86}
87
88static void gen9_sseu_info_init(struct drm_i915_private *dev_priv)
89{
90 struct intel_device_info *info = mkwrite_device_info(dev_priv);
91 int s_max = 3, ss_max = 4, eu_max = 8;
92 int s, ss;
93 u32 fuse2, s_enable, ss_disable, eu_disable;
94 u8 eu_mask = 0xff;
95
96 fuse2 = I915_READ(GEN8_FUSE2);
97 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
98 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >> GEN9_F2_SS_DIS_SHIFT;
99
100 info->slice_total = hweight32(s_enable);
101 /*
102 * The subslice disable field is global, i.e. it applies
103 * to each of the enabled slices.
104 */
105 info->subslice_per_slice = ss_max - hweight32(ss_disable);
106 info->subslice_total = info->slice_total * info->subslice_per_slice;
107
108 /*
109 * Iterate through enabled slices and subslices to
110 * count the total enabled EU.
111 */
112 for (s = 0; s < s_max; s++) {
113 if (!(s_enable & BIT(s)))
114 /* skip disabled slice */
115 continue;
116
117 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
118 for (ss = 0; ss < ss_max; ss++) {
119 int eu_per_ss;
120
121 if (ss_disable & BIT(ss))
122 /* skip disabled subslice */
123 continue;
124
125 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
126 eu_mask);
127
128 /*
129 * Record which subslice(s) has(have) 7 EUs. we
130 * can tune the hash used to spread work among
131 * subslices if they are unbalanced.
132 */
133 if (eu_per_ss == 7)
134 info->subslice_7eu[s] |= BIT(ss);
135
136 info->eu_total += eu_per_ss;
137 }
138 }
139
140 /*
141 * SKL is expected to always have a uniform distribution
142 * of EU across subslices with the exception that any one
143 * EU in any one subslice may be fused off for die
144 * recovery. BXT is expected to be perfectly uniform in EU
145 * distribution.
146 */
147 info->eu_per_subslice = info->subslice_total ?
148 DIV_ROUND_UP(info->eu_total,
149 info->subslice_total) : 0;
150 /*
151 * SKL supports slice power gating on devices with more than
152 * one slice, and supports EU power gating on devices with
153 * more than one EU pair per subslice. BXT supports subslice
154 * power gating on devices with more than one subslice, and
155 * supports EU power gating on devices with more than one EU
156 * pair per subslice.
157 */
158 info->has_slice_pg =
159 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
160 info->slice_total > 1;
161 info->has_subslice_pg =
162 IS_BROXTON(dev_priv) && info->subslice_total > 1;
163 info->has_eu_pg = info->eu_per_subslice > 2;
164
165 if (IS_BROXTON(dev_priv)) {
166#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & BIT(ss))
167 /*
168 * There is a HW issue in 2x6 fused down parts that requires
169 * Pooled EU to be enabled as a WA. The pool configuration
170 * changes depending upon which subslice is fused down. This
171 * doesn't affect if the device has all 3 subslices enabled.
172 */
173 /* WaEnablePooledEuFor2x6:bxt */
174 info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
175 (info->subslice_per_slice == 2 &&
176 INTEL_REVID(dev_priv) < BXT_REVID_C0));
177
178 info->min_eu_in_pool = 0;
179 if (info->has_pooled_eu) {
180 if (IS_SS_DISABLED(ss_disable, 0) ||
181 IS_SS_DISABLED(ss_disable, 2))
182 info->min_eu_in_pool = 3;
183 else if (IS_SS_DISABLED(ss_disable, 1))
184 info->min_eu_in_pool = 6;
185 else
186 info->min_eu_in_pool = 9;
187 }
188#undef IS_SS_DISABLED
189 }
190}
191
192static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv)
193{
194 struct intel_device_info *info = mkwrite_device_info(dev_priv);
195 const int s_max = 3, ss_max = 3, eu_max = 8;
196 int s, ss;
197 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
198
199 fuse2 = I915_READ(GEN8_FUSE2);
200 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
201 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
202
203 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
204 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
205 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
206 (32 - GEN8_EU_DIS0_S1_SHIFT));
207 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
208 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
209 (32 - GEN8_EU_DIS1_S2_SHIFT));
210
211 info->slice_total = hweight32(s_enable);
212
213 /*
214 * The subslice disable field is global, i.e. it applies
215 * to each of the enabled slices.
216 */
217 info->subslice_per_slice = ss_max - hweight32(ss_disable);
218 info->subslice_total = info->slice_total * info->subslice_per_slice;
219
220 /*
221 * Iterate through enabled slices and subslices to
222 * count the total enabled EU.
223 */
224 for (s = 0; s < s_max; s++) {
225 if (!(s_enable & (0x1 << s)))
226 /* skip disabled slice */
227 continue;
228
229 for (ss = 0; ss < ss_max; ss++) {
230 u32 n_disabled;
231
232 if (ss_disable & (0x1 << ss))
233 /* skip disabled subslice */
234 continue;
235
236 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
237
238 /*
239 * Record which subslices have 7 EUs.
240 */
241 if (eu_max - n_disabled == 7)
242 info->subslice_7eu[s] |= 1 << ss;
243
244 info->eu_total += eu_max - n_disabled;
245 }
246 }
247
248 /*
249 * BDW is expected to always have a uniform distribution of EU across
250 * subslices with the exception that any one EU in any one subslice may
251 * be fused off for die recovery.
252 */
253 info->eu_per_subslice = info->subslice_total ?
254 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
255
256 /*
257 * BDW supports slice power gating on devices with more than
258 * one slice.
259 */
260 info->has_slice_pg = (info->slice_total > 1);
261 info->has_subslice_pg = 0;
262 info->has_eu_pg = 0;
263}
264
265/*
266 * Determine various intel_device_info fields at runtime.
267 *
268 * Use it when either:
269 * - it's judged too laborious to fill n static structures with the limit
270 * when a simple if statement does the job,
271 * - run-time checks (eg read fuse/strap registers) are needed.
272 *
273 * This function needs to be called:
274 * - after the MMIO has been setup as we are reading registers,
275 * - after the PCH has been detected,
276 * - before the first usage of the fields it can tweak.
277 */
278void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
279{
280 struct intel_device_info *info = mkwrite_device_info(dev_priv);
281 enum pipe pipe;
282
283 /*
284 * Skylake and Broxton currently don't expose the topmost plane as its
285 * use is exclusive with the legacy cursor and we only want to expose
286 * one of those, not both. Until we can safely expose the topmost plane
287 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
288 * we don't expose the topmost plane at all to prevent ABI breakage
289 * down the line.
290 */
291 if (IS_BROXTON(dev_priv)) {
292 info->num_sprites[PIPE_A] = 2;
293 info->num_sprites[PIPE_B] = 2;
294 info->num_sprites[PIPE_C] = 1;
295 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
296 for_each_pipe(dev_priv, pipe)
297 info->num_sprites[pipe] = 2;
298 else
299 for_each_pipe(dev_priv, pipe)
300 info->num_sprites[pipe] = 1;
301
302 if (i915.disable_display) {
303 DRM_INFO("Display disabled (module parameter)\n");
304 info->num_pipes = 0;
305 } else if (info->num_pipes > 0 &&
306 (IS_GEN7(dev_priv) || IS_GEN8(dev_priv)) &&
307 HAS_PCH_SPLIT(dev_priv)) {
308 u32 fuse_strap = I915_READ(FUSE_STRAP);
309 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
310
311 /*
312 * SFUSE_STRAP is supposed to have a bit signalling the display
313 * is fused off. Unfortunately it seems that, at least in
314 * certain cases, fused off display means that PCH display
315 * reads don't land anywhere. In that case, we read 0s.
316 *
317 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
318 * should be set when taking over after the firmware.
319 */
320 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
321 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
322 (dev_priv->pch_type == PCH_CPT &&
323 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
324 DRM_INFO("Display fused off, disabling\n");
325 info->num_pipes = 0;
326 } else if (fuse_strap & IVB_PIPE_C_DISABLE) {
327 DRM_INFO("PipeC fused off\n");
328 info->num_pipes -= 1;
329 }
330 } else if (info->num_pipes > 0 && IS_GEN9(dev_priv)) {
331 u32 dfsm = I915_READ(SKL_DFSM);
332 u8 disabled_mask = 0;
333 bool invalid;
334 int num_bits;
335
336 if (dfsm & SKL_DFSM_PIPE_A_DISABLE)
337 disabled_mask |= BIT(PIPE_A);
338 if (dfsm & SKL_DFSM_PIPE_B_DISABLE)
339 disabled_mask |= BIT(PIPE_B);
340 if (dfsm & SKL_DFSM_PIPE_C_DISABLE)
341 disabled_mask |= BIT(PIPE_C);
342
343 num_bits = hweight8(disabled_mask);
344
345 switch (disabled_mask) {
346 case BIT(PIPE_A):
347 case BIT(PIPE_B):
348 case BIT(PIPE_A) | BIT(PIPE_B):
349 case BIT(PIPE_A) | BIT(PIPE_C):
350 invalid = true;
351 break;
352 default:
353 invalid = false;
354 }
355
356 if (num_bits > info->num_pipes || invalid)
357 DRM_ERROR("invalid pipe fuse configuration: 0x%x\n",
358 disabled_mask);
359 else
360 info->num_pipes -= num_bits;
361 }
362
363 /* Initialize slice/subslice/EU info */
364 if (IS_CHERRYVIEW(dev_priv))
365 cherryview_sseu_info_init(dev_priv);
366 else if (IS_BROADWELL(dev_priv))
367 broadwell_sseu_info_init(dev_priv);
368 else if (INTEL_INFO(dev_priv)->gen >= 9)
369 gen9_sseu_info_init(dev_priv);
370
371 info->has_snoop = !info->has_llc;
372
373 /* Snooping is broken on BXT A stepping. */
374 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
375 info->has_snoop = false;
376
377 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
378 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
379 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
380 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
381 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
382 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
383 info->has_slice_pg ? "y" : "n");
384 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
385 info->has_subslice_pg ? "y" : "n");
386 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
387 info->has_eu_pg ? "y" : "n");
388}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3074c56a643d..c457eed76f1f 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -36,6 +36,7 @@
36#include "intel_drv.h" 36#include "intel_drv.h"
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39#include "i915_gem_dmabuf.h"
39#include "intel_dsi.h" 40#include "intel_dsi.h"
40#include "i915_trace.h" 41#include "i915_trace.h"
41#include <drm/drm_atomic.h> 42#include <drm/drm_atomic.h>
@@ -46,7 +47,11 @@
46#include <drm/drm_rect.h> 47#include <drm/drm_rect.h>
47#include <linux/dma_remapping.h> 48#include <linux/dma_remapping.h>
48#include <linux/reservation.h> 49#include <linux/reservation.h>
49#include <linux/dma-buf.h> 50
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
50 55
51/* Primary plane formats for gen <= 3 */ 56/* Primary plane formats for gen <= 3 */
52static const uint32_t i8xx_primary_formats[] = { 57static const uint32_t i8xx_primary_formats[] = {
@@ -117,20 +122,18 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc); 122static void ironlake_pfit_enable(struct intel_crtc *crtc);
118static void intel_modeset_setup_hw_state(struct drm_device *dev); 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
119static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc); 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
126static int bxt_calc_cdclk(int max_pixclk);
120 127
121typedef struct {
122 int min, max;
123} intel_range_t;
124
125typedef struct {
126 int dot_limit;
127 int p2_slow, p2_fast;
128} intel_p2_t;
129
130typedef struct intel_limit intel_limit_t;
131struct intel_limit { 128struct intel_limit {
132 intel_range_t dot, vco, n, m, m1, m2, p, p1; 129 struct {
133 intel_p2_t p2; 130 int min, max;
131 } dot, vco, n, m, m1, m2, p, p1;
132
133 struct {
134 int dot_limit;
135 int p2_slow, p2_fast;
136 } p2;
134}; 137};
135 138
136/* returns HPLL frequency in kHz */ 139/* returns HPLL frequency in kHz */
@@ -185,6 +188,7 @@ intel_pch_rawclk(struct drm_i915_private *dev_priv)
185static int 188static int
186intel_vlv_hrawclk(struct drm_i915_private *dev_priv) 189intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
187{ 190{
191 /* RAWCLK_FREQ_VLV register updated from power well code */
188 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk", 192 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
189 CCK_DISPLAY_REF_CLOCK_CONTROL); 193 CCK_DISPLAY_REF_CLOCK_CONTROL);
190} 194}
@@ -218,7 +222,7 @@ intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
218 } 222 }
219} 223}
220 224
221static void intel_update_rawclk(struct drm_i915_private *dev_priv) 225void intel_update_rawclk(struct drm_i915_private *dev_priv)
222{ 226{
223 if (HAS_PCH_SPLIT(dev_priv)) 227 if (HAS_PCH_SPLIT(dev_priv))
224 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv); 228 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
@@ -255,7 +259,7 @@ intel_fdi_link_freq(struct drm_i915_private *dev_priv,
255 return 270000; 259 return 270000;
256} 260}
257 261
258static const intel_limit_t intel_limits_i8xx_dac = { 262static const struct intel_limit intel_limits_i8xx_dac = {
259 .dot = { .min = 25000, .max = 350000 }, 263 .dot = { .min = 25000, .max = 350000 },
260 .vco = { .min = 908000, .max = 1512000 }, 264 .vco = { .min = 908000, .max = 1512000 },
261 .n = { .min = 2, .max = 16 }, 265 .n = { .min = 2, .max = 16 },
@@ -268,7 +272,7 @@ static const intel_limit_t intel_limits_i8xx_dac = {
268 .p2_slow = 4, .p2_fast = 2 }, 272 .p2_slow = 4, .p2_fast = 2 },
269}; 273};
270 274
271static const intel_limit_t intel_limits_i8xx_dvo = { 275static const struct intel_limit intel_limits_i8xx_dvo = {
272 .dot = { .min = 25000, .max = 350000 }, 276 .dot = { .min = 25000, .max = 350000 },
273 .vco = { .min = 908000, .max = 1512000 }, 277 .vco = { .min = 908000, .max = 1512000 },
274 .n = { .min = 2, .max = 16 }, 278 .n = { .min = 2, .max = 16 },
@@ -281,7 +285,7 @@ static const intel_limit_t intel_limits_i8xx_dvo = {
281 .p2_slow = 4, .p2_fast = 4 }, 285 .p2_slow = 4, .p2_fast = 4 },
282}; 286};
283 287
284static const intel_limit_t intel_limits_i8xx_lvds = { 288static const struct intel_limit intel_limits_i8xx_lvds = {
285 .dot = { .min = 25000, .max = 350000 }, 289 .dot = { .min = 25000, .max = 350000 },
286 .vco = { .min = 908000, .max = 1512000 }, 290 .vco = { .min = 908000, .max = 1512000 },
287 .n = { .min = 2, .max = 16 }, 291 .n = { .min = 2, .max = 16 },
@@ -294,7 +298,7 @@ static const intel_limit_t intel_limits_i8xx_lvds = {
294 .p2_slow = 14, .p2_fast = 7 }, 298 .p2_slow = 14, .p2_fast = 7 },
295}; 299};
296 300
297static const intel_limit_t intel_limits_i9xx_sdvo = { 301static const struct intel_limit intel_limits_i9xx_sdvo = {
298 .dot = { .min = 20000, .max = 400000 }, 302 .dot = { .min = 20000, .max = 400000 },
299 .vco = { .min = 1400000, .max = 2800000 }, 303 .vco = { .min = 1400000, .max = 2800000 },
300 .n = { .min = 1, .max = 6 }, 304 .n = { .min = 1, .max = 6 },
@@ -307,7 +311,7 @@ static const intel_limit_t intel_limits_i9xx_sdvo = {
307 .p2_slow = 10, .p2_fast = 5 }, 311 .p2_slow = 10, .p2_fast = 5 },
308}; 312};
309 313
310static const intel_limit_t intel_limits_i9xx_lvds = { 314static const struct intel_limit intel_limits_i9xx_lvds = {
311 .dot = { .min = 20000, .max = 400000 }, 315 .dot = { .min = 20000, .max = 400000 },
312 .vco = { .min = 1400000, .max = 2800000 }, 316 .vco = { .min = 1400000, .max = 2800000 },
313 .n = { .min = 1, .max = 6 }, 317 .n = { .min = 1, .max = 6 },
@@ -321,7 +325,7 @@ static const intel_limit_t intel_limits_i9xx_lvds = {
321}; 325};
322 326
323 327
324static const intel_limit_t intel_limits_g4x_sdvo = { 328static const struct intel_limit intel_limits_g4x_sdvo = {
325 .dot = { .min = 25000, .max = 270000 }, 329 .dot = { .min = 25000, .max = 270000 },
326 .vco = { .min = 1750000, .max = 3500000}, 330 .vco = { .min = 1750000, .max = 3500000},
327 .n = { .min = 1, .max = 4 }, 331 .n = { .min = 1, .max = 4 },
@@ -336,7 +340,7 @@ static const intel_limit_t intel_limits_g4x_sdvo = {
336 }, 340 },
337}; 341};
338 342
339static const intel_limit_t intel_limits_g4x_hdmi = { 343static const struct intel_limit intel_limits_g4x_hdmi = {
340 .dot = { .min = 22000, .max = 400000 }, 344 .dot = { .min = 22000, .max = 400000 },
341 .vco = { .min = 1750000, .max = 3500000}, 345 .vco = { .min = 1750000, .max = 3500000},
342 .n = { .min = 1, .max = 4 }, 346 .n = { .min = 1, .max = 4 },
@@ -349,7 +353,7 @@ static const intel_limit_t intel_limits_g4x_hdmi = {
349 .p2_slow = 10, .p2_fast = 5 }, 353 .p2_slow = 10, .p2_fast = 5 },
350}; 354};
351 355
352static const intel_limit_t intel_limits_g4x_single_channel_lvds = { 356static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
353 .dot = { .min = 20000, .max = 115000 }, 357 .dot = { .min = 20000, .max = 115000 },
354 .vco = { .min = 1750000, .max = 3500000 }, 358 .vco = { .min = 1750000, .max = 3500000 },
355 .n = { .min = 1, .max = 3 }, 359 .n = { .min = 1, .max = 3 },
@@ -363,7 +367,7 @@ static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
363 }, 367 },
364}; 368};
365 369
366static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { 370static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
367 .dot = { .min = 80000, .max = 224000 }, 371 .dot = { .min = 80000, .max = 224000 },
368 .vco = { .min = 1750000, .max = 3500000 }, 372 .vco = { .min = 1750000, .max = 3500000 },
369 .n = { .min = 1, .max = 3 }, 373 .n = { .min = 1, .max = 3 },
@@ -377,7 +381,7 @@ static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
377 }, 381 },
378}; 382};
379 383
380static const intel_limit_t intel_limits_pineview_sdvo = { 384static const struct intel_limit intel_limits_pineview_sdvo = {
381 .dot = { .min = 20000, .max = 400000}, 385 .dot = { .min = 20000, .max = 400000},
382 .vco = { .min = 1700000, .max = 3500000 }, 386 .vco = { .min = 1700000, .max = 3500000 },
383 /* Pineview's Ncounter is a ring counter */ 387 /* Pineview's Ncounter is a ring counter */
@@ -392,7 +396,7 @@ static const intel_limit_t intel_limits_pineview_sdvo = {
392 .p2_slow = 10, .p2_fast = 5 }, 396 .p2_slow = 10, .p2_fast = 5 },
393}; 397};
394 398
395static const intel_limit_t intel_limits_pineview_lvds = { 399static const struct intel_limit intel_limits_pineview_lvds = {
396 .dot = { .min = 20000, .max = 400000 }, 400 .dot = { .min = 20000, .max = 400000 },
397 .vco = { .min = 1700000, .max = 3500000 }, 401 .vco = { .min = 1700000, .max = 3500000 },
398 .n = { .min = 3, .max = 6 }, 402 .n = { .min = 3, .max = 6 },
@@ -410,7 +414,7 @@ static const intel_limit_t intel_limits_pineview_lvds = {
410 * We calculate clock using (register_value + 2) for N/M1/M2, so here 414 * We calculate clock using (register_value + 2) for N/M1/M2, so here
411 * the range value for them is (actual_value - 2). 415 * the range value for them is (actual_value - 2).
412 */ 416 */
413static const intel_limit_t intel_limits_ironlake_dac = { 417static const struct intel_limit intel_limits_ironlake_dac = {
414 .dot = { .min = 25000, .max = 350000 }, 418 .dot = { .min = 25000, .max = 350000 },
415 .vco = { .min = 1760000, .max = 3510000 }, 419 .vco = { .min = 1760000, .max = 3510000 },
416 .n = { .min = 1, .max = 5 }, 420 .n = { .min = 1, .max = 5 },
@@ -423,7 +427,7 @@ static const intel_limit_t intel_limits_ironlake_dac = {
423 .p2_slow = 10, .p2_fast = 5 }, 427 .p2_slow = 10, .p2_fast = 5 },
424}; 428};
425 429
426static const intel_limit_t intel_limits_ironlake_single_lvds = { 430static const struct intel_limit intel_limits_ironlake_single_lvds = {
427 .dot = { .min = 25000, .max = 350000 }, 431 .dot = { .min = 25000, .max = 350000 },
428 .vco = { .min = 1760000, .max = 3510000 }, 432 .vco = { .min = 1760000, .max = 3510000 },
429 .n = { .min = 1, .max = 3 }, 433 .n = { .min = 1, .max = 3 },
@@ -436,7 +440,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds = {
436 .p2_slow = 14, .p2_fast = 14 }, 440 .p2_slow = 14, .p2_fast = 14 },
437}; 441};
438 442
439static const intel_limit_t intel_limits_ironlake_dual_lvds = { 443static const struct intel_limit intel_limits_ironlake_dual_lvds = {
440 .dot = { .min = 25000, .max = 350000 }, 444 .dot = { .min = 25000, .max = 350000 },
441 .vco = { .min = 1760000, .max = 3510000 }, 445 .vco = { .min = 1760000, .max = 3510000 },
442 .n = { .min = 1, .max = 3 }, 446 .n = { .min = 1, .max = 3 },
@@ -450,7 +454,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds = {
450}; 454};
451 455
452/* LVDS 100mhz refclk limits. */ 456/* LVDS 100mhz refclk limits. */
453static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { 457static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
454 .dot = { .min = 25000, .max = 350000 }, 458 .dot = { .min = 25000, .max = 350000 },
455 .vco = { .min = 1760000, .max = 3510000 }, 459 .vco = { .min = 1760000, .max = 3510000 },
456 .n = { .min = 1, .max = 2 }, 460 .n = { .min = 1, .max = 2 },
@@ -463,7 +467,7 @@ static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
463 .p2_slow = 14, .p2_fast = 14 }, 467 .p2_slow = 14, .p2_fast = 14 },
464}; 468};
465 469
466static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { 470static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
467 .dot = { .min = 25000, .max = 350000 }, 471 .dot = { .min = 25000, .max = 350000 },
468 .vco = { .min = 1760000, .max = 3510000 }, 472 .vco = { .min = 1760000, .max = 3510000 },
469 .n = { .min = 1, .max = 3 }, 473 .n = { .min = 1, .max = 3 },
@@ -476,7 +480,7 @@ static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
476 .p2_slow = 7, .p2_fast = 7 }, 480 .p2_slow = 7, .p2_fast = 7 },
477}; 481};
478 482
479static const intel_limit_t intel_limits_vlv = { 483static const struct intel_limit intel_limits_vlv = {
480 /* 484 /*
481 * These are the data rate limits (measured in fast clocks) 485 * These are the data rate limits (measured in fast clocks)
482 * since those are the strictest limits we have. The fast 486 * since those are the strictest limits we have. The fast
@@ -492,7 +496,7 @@ static const intel_limit_t intel_limits_vlv = {
492 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */ 496 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
493}; 497};
494 498
495static const intel_limit_t intel_limits_chv = { 499static const struct intel_limit intel_limits_chv = {
496 /* 500 /*
497 * These are the data rate limits (measured in fast clocks) 501 * These are the data rate limits (measured in fast clocks)
498 * since those are the strictest limits we have. The fast 502 * since those are the strictest limits we have. The fast
@@ -508,7 +512,7 @@ static const intel_limit_t intel_limits_chv = {
508 .p2 = { .p2_slow = 1, .p2_fast = 14 }, 512 .p2 = { .p2_slow = 1, .p2_fast = 14 },
509}; 513};
510 514
511static const intel_limit_t intel_limits_bxt = { 515static const struct intel_limit intel_limits_bxt = {
512 /* FIXME: find real dot limits */ 516 /* FIXME: find real dot limits */
513 .dot = { .min = 0, .max = INT_MAX }, 517 .dot = { .min = 0, .max = INT_MAX },
514 .vco = { .min = 4800000, .max = 6700000 }, 518 .vco = { .min = 4800000, .max = 6700000 },
@@ -526,52 +530,6 @@ needs_modeset(struct drm_crtc_state *state)
526 return drm_atomic_crtc_needs_modeset(state); 530 return drm_atomic_crtc_needs_modeset(state);
527} 531}
528 532
529/**
530 * Returns whether any output on the specified pipe is of the specified type
531 */
532bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
533{
534 struct drm_device *dev = crtc->base.dev;
535 struct intel_encoder *encoder;
536
537 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
538 if (encoder->type == type)
539 return true;
540
541 return false;
542}
543
544/**
545 * Returns whether any output on the specified pipe will have the specified
546 * type after a staged modeset is complete, i.e., the same as
547 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
548 * encoder->crtc.
549 */
550static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
551 int type)
552{
553 struct drm_atomic_state *state = crtc_state->base.state;
554 struct drm_connector *connector;
555 struct drm_connector_state *connector_state;
556 struct intel_encoder *encoder;
557 int i, num_connectors = 0;
558
559 for_each_connector_in_state(state, connector, connector_state, i) {
560 if (connector_state->crtc != crtc_state->base.crtc)
561 continue;
562
563 num_connectors++;
564
565 encoder = to_intel_encoder(connector_state->best_encoder);
566 if (encoder->type == type)
567 return true;
568 }
569
570 WARN_ON(num_connectors == 0);
571
572 return false;
573}
574
575/* 533/*
576 * Platform specific helpers to calculate the port PLL loopback- (clock.m), 534 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
577 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast 535 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
@@ -581,7 +539,7 @@ static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
581 * divided-down version of it. 539 * divided-down version of it.
582 */ 540 */
583/* m1 is reserved as 0 in Pineview, n is a ring counter */ 541/* m1 is reserved as 0 in Pineview, n is a ring counter */
584static int pnv_calc_dpll_params(int refclk, intel_clock_t *clock) 542static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
585{ 543{
586 clock->m = clock->m2 + 2; 544 clock->m = clock->m2 + 2;
587 clock->p = clock->p1 * clock->p2; 545 clock->p = clock->p1 * clock->p2;
@@ -598,7 +556,7 @@ static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
598 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2); 556 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
599} 557}
600 558
601static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock) 559static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
602{ 560{
603 clock->m = i9xx_dpll_compute_m(clock); 561 clock->m = i9xx_dpll_compute_m(clock);
604 clock->p = clock->p1 * clock->p2; 562 clock->p = clock->p1 * clock->p2;
@@ -610,7 +568,7 @@ static int i9xx_calc_dpll_params(int refclk, intel_clock_t *clock)
610 return clock->dot; 568 return clock->dot;
611} 569}
612 570
613static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock) 571static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
614{ 572{
615 clock->m = clock->m1 * clock->m2; 573 clock->m = clock->m1 * clock->m2;
616 clock->p = clock->p1 * clock->p2; 574 clock->p = clock->p1 * clock->p2;
@@ -622,7 +580,7 @@ static int vlv_calc_dpll_params(int refclk, intel_clock_t *clock)
622 return clock->dot / 5; 580 return clock->dot / 5;
623} 581}
624 582
625int chv_calc_dpll_params(int refclk, intel_clock_t *clock) 583int chv_calc_dpll_params(int refclk, struct dpll *clock)
626{ 584{
627 clock->m = clock->m1 * clock->m2; 585 clock->m = clock->m1 * clock->m2;
628 clock->p = clock->p1 * clock->p2; 586 clock->p = clock->p1 * clock->p2;
@@ -642,8 +600,8 @@ int chv_calc_dpll_params(int refclk, intel_clock_t *clock)
642 */ 600 */
643 601
644static bool intel_PLL_is_valid(struct drm_device *dev, 602static bool intel_PLL_is_valid(struct drm_device *dev,
645 const intel_limit_t *limit, 603 const struct intel_limit *limit,
646 const intel_clock_t *clock) 604 const struct dpll *clock)
647{ 605{
648 if (clock->n < limit->n.min || limit->n.max < clock->n) 606 if (clock->n < limit->n.min || limit->n.max < clock->n)
649 INTELPllInvalid("n out of range\n"); 607 INTELPllInvalid("n out of range\n");
@@ -678,13 +636,13 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
678} 636}
679 637
680static int 638static int
681i9xx_select_p2_div(const intel_limit_t *limit, 639i9xx_select_p2_div(const struct intel_limit *limit,
682 const struct intel_crtc_state *crtc_state, 640 const struct intel_crtc_state *crtc_state,
683 int target) 641 int target)
684{ 642{
685 struct drm_device *dev = crtc_state->base.crtc->dev; 643 struct drm_device *dev = crtc_state->base.crtc->dev;
686 644
687 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 645 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
688 /* 646 /*
689 * For LVDS just rely on its current settings for dual-channel. 647 * For LVDS just rely on its current settings for dual-channel.
690 * We haven't figured out how to reliably set up different 648 * We haven't figured out how to reliably set up different
@@ -713,13 +671,13 @@ i9xx_select_p2_div(const intel_limit_t *limit,
713 * divider from @match_clock used for LVDS downclocking. 671 * divider from @match_clock used for LVDS downclocking.
714 */ 672 */
715static bool 673static bool
716i9xx_find_best_dpll(const intel_limit_t *limit, 674i9xx_find_best_dpll(const struct intel_limit *limit,
717 struct intel_crtc_state *crtc_state, 675 struct intel_crtc_state *crtc_state,
718 int target, int refclk, intel_clock_t *match_clock, 676 int target, int refclk, struct dpll *match_clock,
719 intel_clock_t *best_clock) 677 struct dpll *best_clock)
720{ 678{
721 struct drm_device *dev = crtc_state->base.crtc->dev; 679 struct drm_device *dev = crtc_state->base.crtc->dev;
722 intel_clock_t clock; 680 struct dpll clock;
723 int err = target; 681 int err = target;
724 682
725 memset(best_clock, 0, sizeof(*best_clock)); 683 memset(best_clock, 0, sizeof(*best_clock));
@@ -770,13 +728,13 @@ i9xx_find_best_dpll(const intel_limit_t *limit,
770 * divider from @match_clock used for LVDS downclocking. 728 * divider from @match_clock used for LVDS downclocking.
771 */ 729 */
772static bool 730static bool
773pnv_find_best_dpll(const intel_limit_t *limit, 731pnv_find_best_dpll(const struct intel_limit *limit,
774 struct intel_crtc_state *crtc_state, 732 struct intel_crtc_state *crtc_state,
775 int target, int refclk, intel_clock_t *match_clock, 733 int target, int refclk, struct dpll *match_clock,
776 intel_clock_t *best_clock) 734 struct dpll *best_clock)
777{ 735{
778 struct drm_device *dev = crtc_state->base.crtc->dev; 736 struct drm_device *dev = crtc_state->base.crtc->dev;
779 intel_clock_t clock; 737 struct dpll clock;
780 int err = target; 738 int err = target;
781 739
782 memset(best_clock, 0, sizeof(*best_clock)); 740 memset(best_clock, 0, sizeof(*best_clock));
@@ -825,13 +783,13 @@ pnv_find_best_dpll(const intel_limit_t *limit,
825 * divider from @match_clock used for LVDS downclocking. 783 * divider from @match_clock used for LVDS downclocking.
826 */ 784 */
827static bool 785static bool
828g4x_find_best_dpll(const intel_limit_t *limit, 786g4x_find_best_dpll(const struct intel_limit *limit,
829 struct intel_crtc_state *crtc_state, 787 struct intel_crtc_state *crtc_state,
830 int target, int refclk, intel_clock_t *match_clock, 788 int target, int refclk, struct dpll *match_clock,
831 intel_clock_t *best_clock) 789 struct dpll *best_clock)
832{ 790{
833 struct drm_device *dev = crtc_state->base.crtc->dev; 791 struct drm_device *dev = crtc_state->base.crtc->dev;
834 intel_clock_t clock; 792 struct dpll clock;
835 int max_n; 793 int max_n;
836 bool found = false; 794 bool found = false;
837 /* approximately equals target * 0.00585 */ 795 /* approximately equals target * 0.00585 */
@@ -877,8 +835,8 @@ g4x_find_best_dpll(const intel_limit_t *limit,
877 * best configuration and error found so far. Return the calculated error. 835 * best configuration and error found so far. Return the calculated error.
878 */ 836 */
879static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq, 837static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
880 const intel_clock_t *calculated_clock, 838 const struct dpll *calculated_clock,
881 const intel_clock_t *best_clock, 839 const struct dpll *best_clock,
882 unsigned int best_error_ppm, 840 unsigned int best_error_ppm,
883 unsigned int *error_ppm) 841 unsigned int *error_ppm)
884{ 842{
@@ -918,14 +876,14 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
918 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 876 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
919 */ 877 */
920static bool 878static bool
921vlv_find_best_dpll(const intel_limit_t *limit, 879vlv_find_best_dpll(const struct intel_limit *limit,
922 struct intel_crtc_state *crtc_state, 880 struct intel_crtc_state *crtc_state,
923 int target, int refclk, intel_clock_t *match_clock, 881 int target, int refclk, struct dpll *match_clock,
924 intel_clock_t *best_clock) 882 struct dpll *best_clock)
925{ 883{
926 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 884 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
927 struct drm_device *dev = crtc->base.dev; 885 struct drm_device *dev = crtc->base.dev;
928 intel_clock_t clock; 886 struct dpll clock;
929 unsigned int bestppm = 1000000; 887 unsigned int bestppm = 1000000;
930 /* min update 19.2 MHz */ 888 /* min update 19.2 MHz */
931 int max_n = min(limit->n.max, refclk / 19200); 889 int max_n = min(limit->n.max, refclk / 19200);
@@ -977,15 +935,15 @@ vlv_find_best_dpll(const intel_limit_t *limit,
977 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 935 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
978 */ 936 */
979static bool 937static bool
980chv_find_best_dpll(const intel_limit_t *limit, 938chv_find_best_dpll(const struct intel_limit *limit,
981 struct intel_crtc_state *crtc_state, 939 struct intel_crtc_state *crtc_state,
982 int target, int refclk, intel_clock_t *match_clock, 940 int target, int refclk, struct dpll *match_clock,
983 intel_clock_t *best_clock) 941 struct dpll *best_clock)
984{ 942{
985 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); 943 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
986 struct drm_device *dev = crtc->base.dev; 944 struct drm_device *dev = crtc->base.dev;
987 unsigned int best_error_ppm; 945 unsigned int best_error_ppm;
988 intel_clock_t clock; 946 struct dpll clock;
989 uint64_t m2; 947 uint64_t m2;
990 int found = false; 948 int found = false;
991 949
@@ -1035,10 +993,10 @@ chv_find_best_dpll(const intel_limit_t *limit,
1035} 993}
1036 994
1037bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 995bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1038 intel_clock_t *best_clock) 996 struct dpll *best_clock)
1039{ 997{
1040 int refclk = 100000; 998 int refclk = 100000;
1041 const intel_limit_t *limit = &intel_limits_bxt; 999 const struct intel_limit *limit = &intel_limits_bxt;
1042 1000
1043 return chv_find_best_dpll(limit, crtc_state, 1001 return chv_find_best_dpll(limit, crtc_state,
1044 target_clock, refclk, NULL, best_clock); 1002 target_clock, refclk, NULL, best_clock);
@@ -1076,7 +1034,7 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1076 1034
1077static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 1035static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1078{ 1036{
1079 struct drm_i915_private *dev_priv = dev->dev_private; 1037 struct drm_i915_private *dev_priv = to_i915(dev);
1080 i915_reg_t reg = PIPEDSL(pipe); 1038 i915_reg_t reg = PIPEDSL(pipe);
1081 u32 line1, line2; 1039 u32 line1, line2;
1082 u32 line_mask; 1040 u32 line_mask;
@@ -1112,7 +1070,7 @@ static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1112static void intel_wait_for_pipe_off(struct intel_crtc *crtc) 1070static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1113{ 1071{
1114 struct drm_device *dev = crtc->base.dev; 1072 struct drm_device *dev = crtc->base.dev;
1115 struct drm_i915_private *dev_priv = dev->dev_private; 1073 struct drm_i915_private *dev_priv = to_i915(dev);
1116 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1074 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1117 enum pipe pipe = crtc->pipe; 1075 enum pipe pipe = crtc->pipe;
1118 1076
@@ -1120,8 +1078,9 @@ static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1120 i915_reg_t reg = PIPECONF(cpu_transcoder); 1078 i915_reg_t reg = PIPECONF(cpu_transcoder);
1121 1079
1122 /* Wait for the Pipe State to go off */ 1080 /* Wait for the Pipe State to go off */
1123 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 1081 if (intel_wait_for_register(dev_priv,
1124 100)) 1082 reg, I965_PIPECONF_ACTIVE, 0,
1083 100))
1125 WARN(1, "pipe_off wait timed out\n"); 1084 WARN(1, "pipe_off wait timed out\n");
1126 } else { 1085 } else {
1127 /* Wait for the display line to settle */ 1086 /* Wait for the display line to settle */
@@ -1203,7 +1162,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1203 u32 val; 1162 u32 val;
1204 1163
1205 /* ILK FDI PLL is always enabled */ 1164 /* ILK FDI PLL is always enabled */
1206 if (INTEL_INFO(dev_priv)->gen == 5) 1165 if (IS_GEN5(dev_priv))
1207 return; 1166 return;
1208 1167
1209 /* On Haswell, DDI ports are responsible for the FDI PLL setup */ 1168 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
@@ -1230,7 +1189,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1230void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1189void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1231 enum pipe pipe) 1190 enum pipe pipe)
1232{ 1191{
1233 struct drm_device *dev = dev_priv->dev; 1192 struct drm_device *dev = &dev_priv->drm;
1234 i915_reg_t pp_reg; 1193 i915_reg_t pp_reg;
1235 u32 val; 1194 u32 val;
1236 enum pipe panel_pipe = PIPE_A; 1195 enum pipe panel_pipe = PIPE_A;
@@ -1272,7 +1231,7 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1272static void assert_cursor(struct drm_i915_private *dev_priv, 1231static void assert_cursor(struct drm_i915_private *dev_priv,
1273 enum pipe pipe, bool state) 1232 enum pipe pipe, bool state)
1274{ 1233{
1275 struct drm_device *dev = dev_priv->dev; 1234 struct drm_device *dev = &dev_priv->drm;
1276 bool cur_state; 1235 bool cur_state;
1277 1236
1278 if (IS_845G(dev) || IS_I865G(dev)) 1237 if (IS_845G(dev) || IS_I865G(dev))
@@ -1334,7 +1293,7 @@ static void assert_plane(struct drm_i915_private *dev_priv,
1334static void assert_planes_disabled(struct drm_i915_private *dev_priv, 1293static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1335 enum pipe pipe) 1294 enum pipe pipe)
1336{ 1295{
1337 struct drm_device *dev = dev_priv->dev; 1296 struct drm_device *dev = &dev_priv->drm;
1338 int i; 1297 int i;
1339 1298
1340 /* Primary planes are fixed to pipes on gen4+ */ 1299 /* Primary planes are fixed to pipes on gen4+ */
@@ -1360,7 +1319,7 @@ static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1360static void assert_sprites_disabled(struct drm_i915_private *dev_priv, 1319static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1361 enum pipe pipe) 1320 enum pipe pipe)
1362{ 1321{
1363 struct drm_device *dev = dev_priv->dev; 1322 struct drm_device *dev = &dev_priv->drm;
1364 int sprite; 1323 int sprite;
1365 1324
1366 if (INTEL_INFO(dev)->gen >= 9) { 1325 if (INTEL_INFO(dev)->gen >= 9) {
@@ -1540,7 +1499,11 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
1540 POSTING_READ(DPLL(pipe)); 1499 POSTING_READ(DPLL(pipe));
1541 udelay(150); 1500 udelay(150);
1542 1501
1543 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1502 if (intel_wait_for_register(dev_priv,
1503 DPLL(pipe),
1504 DPLL_LOCK_VLV,
1505 DPLL_LOCK_VLV,
1506 1))
1544 DRM_ERROR("DPLL %d failed to lock\n", pipe); 1507 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1545} 1508}
1546 1509
@@ -1589,7 +1552,9 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
1589 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll); 1552 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1590 1553
1591 /* Check PLL is locked */ 1554 /* Check PLL is locked */
1592 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1555 if (intel_wait_for_register(dev_priv,
1556 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1557 1))
1593 DRM_ERROR("PLL %d failed to lock\n", pipe); 1558 DRM_ERROR("PLL %d failed to lock\n", pipe);
1594} 1559}
1595 1560
@@ -1635,9 +1600,10 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
1635 struct intel_crtc *crtc; 1600 struct intel_crtc *crtc;
1636 int count = 0; 1601 int count = 0;
1637 1602
1638 for_each_intel_crtc(dev, crtc) 1603 for_each_intel_crtc(dev, crtc) {
1639 count += crtc->base.state->active && 1604 count += crtc->base.state->active &&
1640 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO); 1605 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO);
1606 }
1641 1607
1642 return count; 1608 return count;
1643} 1609}
@@ -1645,7 +1611,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
1645static void i9xx_enable_pll(struct intel_crtc *crtc) 1611static void i9xx_enable_pll(struct intel_crtc *crtc)
1646{ 1612{
1647 struct drm_device *dev = crtc->base.dev; 1613 struct drm_device *dev = crtc->base.dev;
1648 struct drm_i915_private *dev_priv = dev->dev_private; 1614 struct drm_i915_private *dev_priv = to_i915(dev);
1649 i915_reg_t reg = DPLL(crtc->pipe); 1615 i915_reg_t reg = DPLL(crtc->pipe);
1650 u32 dpll = crtc->config->dpll_hw_state.dpll; 1616 u32 dpll = crtc->config->dpll_hw_state.dpll;
1651 1617
@@ -1717,12 +1683,12 @@ static void i9xx_enable_pll(struct intel_crtc *crtc)
1717static void i9xx_disable_pll(struct intel_crtc *crtc) 1683static void i9xx_disable_pll(struct intel_crtc *crtc)
1718{ 1684{
1719 struct drm_device *dev = crtc->base.dev; 1685 struct drm_device *dev = crtc->base.dev;
1720 struct drm_i915_private *dev_priv = dev->dev_private; 1686 struct drm_i915_private *dev_priv = to_i915(dev);
1721 enum pipe pipe = crtc->pipe; 1687 enum pipe pipe = crtc->pipe;
1722 1688
1723 /* Disable DVO 2x clock on both PLLs if necessary */ 1689 /* Disable DVO 2x clock on both PLLs if necessary */
1724 if (IS_I830(dev) && 1690 if (IS_I830(dev) &&
1725 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) && 1691 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DVO) &&
1726 !intel_num_dvo_pipes(dev)) { 1692 !intel_num_dvo_pipes(dev)) {
1727 I915_WRITE(DPLL(PIPE_B), 1693 I915_WRITE(DPLL(PIPE_B),
1728 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1694 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1809,7 +1775,9 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1809 BUG(); 1775 BUG();
1810 } 1776 }
1811 1777
1812 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000)) 1778 if (intel_wait_for_register(dev_priv,
1779 dpll_reg, port_mask, expected_mask,
1780 1000))
1813 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", 1781 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1814 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask); 1782 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1815} 1783}
@@ -1817,7 +1785,7 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1817static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv, 1785static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1818 enum pipe pipe) 1786 enum pipe pipe)
1819{ 1787{
1820 struct drm_device *dev = dev_priv->dev; 1788 struct drm_device *dev = &dev_priv->drm;
1821 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1789 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1822 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1790 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1823 i915_reg_t reg; 1791 i915_reg_t reg;
@@ -1850,7 +1818,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1850 * here for both 8bpc and 12bpc. 1818 * here for both 8bpc and 12bpc.
1851 */ 1819 */
1852 val &= ~PIPECONF_BPC_MASK; 1820 val &= ~PIPECONF_BPC_MASK;
1853 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI)) 1821 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_HDMI))
1854 val |= PIPECONF_8BPC; 1822 val |= PIPECONF_8BPC;
1855 else 1823 else
1856 val |= pipeconf_val & PIPECONF_BPC_MASK; 1824 val |= pipeconf_val & PIPECONF_BPC_MASK;
@@ -1859,7 +1827,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1859 val &= ~TRANS_INTERLACE_MASK; 1827 val &= ~TRANS_INTERLACE_MASK;
1860 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1828 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1861 if (HAS_PCH_IBX(dev_priv) && 1829 if (HAS_PCH_IBX(dev_priv) &&
1862 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 1830 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
1863 val |= TRANS_LEGACY_INTERLACED_ILK; 1831 val |= TRANS_LEGACY_INTERLACED_ILK;
1864 else 1832 else
1865 val |= TRANS_INTERLACED; 1833 val |= TRANS_INTERLACED;
@@ -1867,7 +1835,9 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1867 val |= TRANS_PROGRESSIVE; 1835 val |= TRANS_PROGRESSIVE;
1868 1836
1869 I915_WRITE(reg, val | TRANS_ENABLE); 1837 I915_WRITE(reg, val | TRANS_ENABLE);
1870 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 1838 if (intel_wait_for_register(dev_priv,
1839 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1840 100))
1871 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe)); 1841 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1872} 1842}
1873 1843
@@ -1895,14 +1865,18 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1895 val |= TRANS_PROGRESSIVE; 1865 val |= TRANS_PROGRESSIVE;
1896 1866
1897 I915_WRITE(LPT_TRANSCONF, val); 1867 I915_WRITE(LPT_TRANSCONF, val);
1898 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100)) 1868 if (intel_wait_for_register(dev_priv,
1869 LPT_TRANSCONF,
1870 TRANS_STATE_ENABLE,
1871 TRANS_STATE_ENABLE,
1872 100))
1899 DRM_ERROR("Failed to enable PCH transcoder\n"); 1873 DRM_ERROR("Failed to enable PCH transcoder\n");
1900} 1874}
1901 1875
1902static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv, 1876static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1903 enum pipe pipe) 1877 enum pipe pipe)
1904{ 1878{
1905 struct drm_device *dev = dev_priv->dev; 1879 struct drm_device *dev = &dev_priv->drm;
1906 i915_reg_t reg; 1880 i915_reg_t reg;
1907 uint32_t val; 1881 uint32_t val;
1908 1882
@@ -1918,7 +1892,9 @@ static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1918 val &= ~TRANS_ENABLE; 1892 val &= ~TRANS_ENABLE;
1919 I915_WRITE(reg, val); 1893 I915_WRITE(reg, val);
1920 /* wait for PCH transcoder off, transcoder state */ 1894 /* wait for PCH transcoder off, transcoder state */
1921 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) 1895 if (intel_wait_for_register(dev_priv,
1896 reg, TRANS_STATE_ENABLE, 0,
1897 50))
1922 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe)); 1898 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1923 1899
1924 if (HAS_PCH_CPT(dev)) { 1900 if (HAS_PCH_CPT(dev)) {
@@ -1938,7 +1914,9 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1938 val &= ~TRANS_ENABLE; 1914 val &= ~TRANS_ENABLE;
1939 I915_WRITE(LPT_TRANSCONF, val); 1915 I915_WRITE(LPT_TRANSCONF, val);
1940 /* wait for PCH transcoder off, transcoder state */ 1916 /* wait for PCH transcoder off, transcoder state */
1941 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50)) 1917 if (intel_wait_for_register(dev_priv,
1918 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1919 50))
1942 DRM_ERROR("Failed to disable PCH transcoder\n"); 1920 DRM_ERROR("Failed to disable PCH transcoder\n");
1943 1921
1944 /* Workaround: clear timing override bit. */ 1922 /* Workaround: clear timing override bit. */
@@ -1957,7 +1935,7 @@ static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1957static void intel_enable_pipe(struct intel_crtc *crtc) 1935static void intel_enable_pipe(struct intel_crtc *crtc)
1958{ 1936{
1959 struct drm_device *dev = crtc->base.dev; 1937 struct drm_device *dev = crtc->base.dev;
1960 struct drm_i915_private *dev_priv = dev->dev_private; 1938 struct drm_i915_private *dev_priv = to_i915(dev);
1961 enum pipe pipe = crtc->pipe; 1939 enum pipe pipe = crtc->pipe;
1962 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 1940 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1963 enum pipe pch_transcoder; 1941 enum pipe pch_transcoder;
@@ -1981,7 +1959,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
1981 * need the check. 1959 * need the check.
1982 */ 1960 */
1983 if (HAS_GMCH_DISPLAY(dev_priv)) 1961 if (HAS_GMCH_DISPLAY(dev_priv))
1984 if (crtc->config->has_dsi_encoder) 1962 if (intel_crtc_has_type(crtc->config, INTEL_OUTPUT_DSI))
1985 assert_dsi_pll_enabled(dev_priv); 1963 assert_dsi_pll_enabled(dev_priv);
1986 else 1964 else
1987 assert_pll_enabled(dev_priv, pipe); 1965 assert_pll_enabled(dev_priv, pipe);
@@ -2030,7 +2008,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2030 */ 2008 */
2031static void intel_disable_pipe(struct intel_crtc *crtc) 2009static void intel_disable_pipe(struct intel_crtc *crtc)
2032{ 2010{
2033 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 2011 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2034 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 2012 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2035 enum pipe pipe = crtc->pipe; 2013 enum pipe pipe = crtc->pipe;
2036 i915_reg_t reg; 2014 i915_reg_t reg;
@@ -2068,15 +2046,6 @@ static void intel_disable_pipe(struct intel_crtc *crtc)
2068 intel_wait_for_pipe_off(crtc); 2046 intel_wait_for_pipe_off(crtc);
2069} 2047}
2070 2048
2071static bool need_vtd_wa(struct drm_device *dev)
2072{
2073#ifdef CONFIG_INTEL_IOMMU
2074 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2075 return true;
2076#endif
2077 return false;
2078}
2079
2080static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv) 2049static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2081{ 2050{
2082 return IS_GEN2(dev_priv) ? 2048 : 4096; 2051 return IS_GEN2(dev_priv) ? 2048 : 4096;
@@ -2241,7 +2210,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2241 unsigned int rotation) 2210 unsigned int rotation)
2242{ 2211{
2243 struct drm_device *dev = fb->dev; 2212 struct drm_device *dev = fb->dev;
2244 struct drm_i915_private *dev_priv = dev->dev_private; 2213 struct drm_i915_private *dev_priv = to_i915(dev);
2245 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2214 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2246 struct i915_ggtt_view view; 2215 struct i915_ggtt_view view;
2247 u32 alignment; 2216 u32 alignment;
@@ -2258,7 +2227,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2258 * we should always have valid PTE following the scanout preventing 2227 * we should always have valid PTE following the scanout preventing
2259 * the VT-d warning. 2228 * the VT-d warning.
2260 */ 2229 */
2261 if (need_vtd_wa(dev) && alignment < 256 * 1024) 2230 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
2262 alignment = 256 * 1024; 2231 alignment = 256 * 1024;
2263 2232
2264 /* 2233 /*
@@ -2309,7 +2278,7 @@ err_pm:
2309 return ret; 2278 return ret;
2310} 2279}
2311 2280
2312static void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2281void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2313{ 2282{
2314 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2283 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2315 struct i915_ggtt_view view; 2284 struct i915_ggtt_view view;
@@ -2543,7 +2512,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2543 struct intel_initial_plane_config *plane_config) 2512 struct intel_initial_plane_config *plane_config)
2544{ 2513{
2545 struct drm_device *dev = intel_crtc->base.dev; 2514 struct drm_device *dev = intel_crtc->base.dev;
2546 struct drm_i915_private *dev_priv = dev->dev_private; 2515 struct drm_i915_private *dev_priv = to_i915(dev);
2547 struct drm_crtc *c; 2516 struct drm_crtc *c;
2548 struct intel_crtc *i; 2517 struct intel_crtc *i;
2549 struct drm_i915_gem_object *obj; 2518 struct drm_i915_gem_object *obj;
@@ -2639,7 +2608,7 @@ static void i9xx_update_primary_plane(struct drm_plane *primary,
2639 const struct intel_plane_state *plane_state) 2608 const struct intel_plane_state *plane_state)
2640{ 2609{
2641 struct drm_device *dev = primary->dev; 2610 struct drm_device *dev = primary->dev;
2642 struct drm_i915_private *dev_priv = dev->dev_private; 2611 struct drm_i915_private *dev_priv = to_i915(dev);
2643 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2612 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2644 struct drm_framebuffer *fb = plane_state->base.fb; 2613 struct drm_framebuffer *fb = plane_state->base.fb;
2645 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2614 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2752,7 +2721,7 @@ static void i9xx_disable_primary_plane(struct drm_plane *primary,
2752 struct drm_crtc *crtc) 2721 struct drm_crtc *crtc)
2753{ 2722{
2754 struct drm_device *dev = crtc->dev; 2723 struct drm_device *dev = crtc->dev;
2755 struct drm_i915_private *dev_priv = dev->dev_private; 2724 struct drm_i915_private *dev_priv = to_i915(dev);
2756 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2725 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2757 int plane = intel_crtc->plane; 2726 int plane = intel_crtc->plane;
2758 2727
@@ -2769,7 +2738,7 @@ static void ironlake_update_primary_plane(struct drm_plane *primary,
2769 const struct intel_plane_state *plane_state) 2738 const struct intel_plane_state *plane_state)
2770{ 2739{
2771 struct drm_device *dev = primary->dev; 2740 struct drm_device *dev = primary->dev;
2772 struct drm_i915_private *dev_priv = dev->dev_private; 2741 struct drm_i915_private *dev_priv = to_i915(dev);
2773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2774 struct drm_framebuffer *fb = plane_state->base.fb; 2743 struct drm_framebuffer *fb = plane_state->base.fb;
2775 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2744 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -2897,7 +2866,7 @@ u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2897static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id) 2866static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2898{ 2867{
2899 struct drm_device *dev = intel_crtc->base.dev; 2868 struct drm_device *dev = intel_crtc->base.dev;
2900 struct drm_i915_private *dev_priv = dev->dev_private; 2869 struct drm_i915_private *dev_priv = to_i915(dev);
2901 2870
2902 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0); 2871 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2903 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0); 2872 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
@@ -3007,7 +2976,7 @@ static void skylake_update_primary_plane(struct drm_plane *plane,
3007 const struct intel_plane_state *plane_state) 2976 const struct intel_plane_state *plane_state)
3008{ 2977{
3009 struct drm_device *dev = plane->dev; 2978 struct drm_device *dev = plane->dev;
3010 struct drm_i915_private *dev_priv = dev->dev_private; 2979 struct drm_i915_private *dev_priv = to_i915(dev);
3011 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); 2980 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3012 struct drm_framebuffer *fb = plane_state->base.fb; 2981 struct drm_framebuffer *fb = plane_state->base.fb;
3013 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2982 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -3091,7 +3060,7 @@ static void skylake_disable_primary_plane(struct drm_plane *primary,
3091 struct drm_crtc *crtc) 3060 struct drm_crtc *crtc)
3092{ 3061{
3093 struct drm_device *dev = crtc->dev; 3062 struct drm_device *dev = crtc->dev;
3094 struct drm_i915_private *dev_priv = dev->dev_private; 3063 struct drm_i915_private *dev_priv = to_i915(dev);
3095 int pipe = to_intel_crtc(crtc)->pipe; 3064 int pipe = to_intel_crtc(crtc)->pipe;
3096 3065
3097 I915_WRITE(PLANE_CTL(pipe, 0), 0); 3066 I915_WRITE(PLANE_CTL(pipe, 0), 0);
@@ -3110,17 +3079,12 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3110 return -ENODEV; 3079 return -ENODEV;
3111} 3080}
3112 3081
3113static void intel_complete_page_flips(struct drm_device *dev) 3082static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3114{ 3083{
3115 struct drm_crtc *crtc; 3084 struct intel_crtc *crtc;
3116
3117 for_each_crtc(dev, crtc) {
3118 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3119 enum plane plane = intel_crtc->plane;
3120 3085
3121 intel_prepare_page_flip(dev, plane); 3086 for_each_intel_crtc(&dev_priv->drm, crtc)
3122 intel_finish_page_flip_plane(dev, plane); 3087 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3123 }
3124} 3088}
3125 3089
3126static void intel_update_primary_planes(struct drm_device *dev) 3090static void intel_update_primary_planes(struct drm_device *dev)
@@ -3143,41 +3107,39 @@ static void intel_update_primary_planes(struct drm_device *dev)
3143 } 3107 }
3144} 3108}
3145 3109
3146void intel_prepare_reset(struct drm_device *dev) 3110void intel_prepare_reset(struct drm_i915_private *dev_priv)
3147{ 3111{
3148 /* no reset support for gen2 */ 3112 /* no reset support for gen2 */
3149 if (IS_GEN2(dev)) 3113 if (IS_GEN2(dev_priv))
3150 return; 3114 return;
3151 3115
3152 /* reset doesn't touch the display */ 3116 /* reset doesn't touch the display */
3153 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) 3117 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3154 return; 3118 return;
3155 3119
3156 drm_modeset_lock_all(dev); 3120 drm_modeset_lock_all(&dev_priv->drm);
3157 /* 3121 /*
3158 * Disabling the crtcs gracefully seems nicer. Also the 3122 * Disabling the crtcs gracefully seems nicer. Also the
3159 * g33 docs say we should at least disable all the planes. 3123 * g33 docs say we should at least disable all the planes.
3160 */ 3124 */
3161 intel_display_suspend(dev); 3125 intel_display_suspend(&dev_priv->drm);
3162} 3126}
3163 3127
3164void intel_finish_reset(struct drm_device *dev) 3128void intel_finish_reset(struct drm_i915_private *dev_priv)
3165{ 3129{
3166 struct drm_i915_private *dev_priv = to_i915(dev);
3167
3168 /* 3130 /*
3169 * Flips in the rings will be nuked by the reset, 3131 * Flips in the rings will be nuked by the reset,
3170 * so complete all pending flips so that user space 3132 * so complete all pending flips so that user space
3171 * will get its events and not get stuck. 3133 * will get its events and not get stuck.
3172 */ 3134 */
3173 intel_complete_page_flips(dev); 3135 intel_complete_page_flips(dev_priv);
3174 3136
3175 /* no reset support for gen2 */ 3137 /* no reset support for gen2 */
3176 if (IS_GEN2(dev)) 3138 if (IS_GEN2(dev_priv))
3177 return; 3139 return;
3178 3140
3179 /* reset doesn't touch the display */ 3141 /* reset doesn't touch the display */
3180 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { 3142 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3181 /* 3143 /*
3182 * Flips in the rings have been nuked by the reset, 3144 * Flips in the rings have been nuked by the reset,
3183 * so update the base address of all primary 3145 * so update the base address of all primary
@@ -3187,7 +3149,7 @@ void intel_finish_reset(struct drm_device *dev)
3187 * FIXME: Atomic will make this obsolete since we won't schedule 3149 * FIXME: Atomic will make this obsolete since we won't schedule
3188 * CS-based flips (which might get lost in gpu resets) any more. 3150 * CS-based flips (which might get lost in gpu resets) any more.
3189 */ 3151 */
3190 intel_update_primary_planes(dev); 3152 intel_update_primary_planes(&dev_priv->drm);
3191 return; 3153 return;
3192 } 3154 }
3193 3155
@@ -3198,18 +3160,18 @@ void intel_finish_reset(struct drm_device *dev)
3198 intel_runtime_pm_disable_interrupts(dev_priv); 3160 intel_runtime_pm_disable_interrupts(dev_priv);
3199 intel_runtime_pm_enable_interrupts(dev_priv); 3161 intel_runtime_pm_enable_interrupts(dev_priv);
3200 3162
3201 intel_modeset_init_hw(dev); 3163 intel_modeset_init_hw(&dev_priv->drm);
3202 3164
3203 spin_lock_irq(&dev_priv->irq_lock); 3165 spin_lock_irq(&dev_priv->irq_lock);
3204 if (dev_priv->display.hpd_irq_setup) 3166 if (dev_priv->display.hpd_irq_setup)
3205 dev_priv->display.hpd_irq_setup(dev); 3167 dev_priv->display.hpd_irq_setup(dev_priv);
3206 spin_unlock_irq(&dev_priv->irq_lock); 3168 spin_unlock_irq(&dev_priv->irq_lock);
3207 3169
3208 intel_display_resume(dev); 3170 intel_display_resume(&dev_priv->drm);
3209 3171
3210 intel_hpd_init(dev_priv); 3172 intel_hpd_init(dev_priv);
3211 3173
3212 drm_modeset_unlock_all(dev); 3174 drm_modeset_unlock_all(&dev_priv->drm);
3213} 3175}
3214 3176
3215static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc) 3177static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
@@ -3224,7 +3186,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3224 return false; 3186 return false;
3225 3187
3226 spin_lock_irq(&dev->event_lock); 3188 spin_lock_irq(&dev->event_lock);
3227 pending = to_intel_crtc(crtc)->unpin_work != NULL; 3189 pending = to_intel_crtc(crtc)->flip_work != NULL;
3228 spin_unlock_irq(&dev->event_lock); 3190 spin_unlock_irq(&dev->event_lock);
3229 3191
3230 return pending; 3192 return pending;
@@ -3234,7 +3196,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3234 struct intel_crtc_state *old_crtc_state) 3196 struct intel_crtc_state *old_crtc_state)
3235{ 3197{
3236 struct drm_device *dev = crtc->base.dev; 3198 struct drm_device *dev = crtc->base.dev;
3237 struct drm_i915_private *dev_priv = dev->dev_private; 3199 struct drm_i915_private *dev_priv = to_i915(dev);
3238 struct intel_crtc_state *pipe_config = 3200 struct intel_crtc_state *pipe_config =
3239 to_intel_crtc_state(crtc->base.state); 3201 to_intel_crtc_state(crtc->base.state);
3240 3202
@@ -3275,7 +3237,7 @@ static void intel_update_pipe_config(struct intel_crtc *crtc,
3275static void intel_fdi_normal_train(struct drm_crtc *crtc) 3237static void intel_fdi_normal_train(struct drm_crtc *crtc)
3276{ 3238{
3277 struct drm_device *dev = crtc->dev; 3239 struct drm_device *dev = crtc->dev;
3278 struct drm_i915_private *dev_priv = dev->dev_private; 3240 struct drm_i915_private *dev_priv = to_i915(dev);
3279 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3280 int pipe = intel_crtc->pipe; 3242 int pipe = intel_crtc->pipe;
3281 i915_reg_t reg; 3243 i915_reg_t reg;
@@ -3318,7 +3280,7 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc)
3318static void ironlake_fdi_link_train(struct drm_crtc *crtc) 3280static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3319{ 3281{
3320 struct drm_device *dev = crtc->dev; 3282 struct drm_device *dev = crtc->dev;
3321 struct drm_i915_private *dev_priv = dev->dev_private; 3283 struct drm_i915_private *dev_priv = to_i915(dev);
3322 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3284 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3323 int pipe = intel_crtc->pipe; 3285 int pipe = intel_crtc->pipe;
3324 i915_reg_t reg; 3286 i915_reg_t reg;
@@ -3419,7 +3381,7 @@ static const int snb_b_fdi_train_param[] = {
3419static void gen6_fdi_link_train(struct drm_crtc *crtc) 3381static void gen6_fdi_link_train(struct drm_crtc *crtc)
3420{ 3382{
3421 struct drm_device *dev = crtc->dev; 3383 struct drm_device *dev = crtc->dev;
3422 struct drm_i915_private *dev_priv = dev->dev_private; 3384 struct drm_i915_private *dev_priv = to_i915(dev);
3423 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3424 int pipe = intel_crtc->pipe; 3386 int pipe = intel_crtc->pipe;
3425 i915_reg_t reg; 3387 i915_reg_t reg;
@@ -3552,7 +3514,7 @@ static void gen6_fdi_link_train(struct drm_crtc *crtc)
3552static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) 3514static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3553{ 3515{
3554 struct drm_device *dev = crtc->dev; 3516 struct drm_device *dev = crtc->dev;
3555 struct drm_i915_private *dev_priv = dev->dev_private; 3517 struct drm_i915_private *dev_priv = to_i915(dev);
3556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3518 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3557 int pipe = intel_crtc->pipe; 3519 int pipe = intel_crtc->pipe;
3558 i915_reg_t reg; 3520 i915_reg_t reg;
@@ -3671,7 +3633,7 @@ train_done:
3671static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc) 3633static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3672{ 3634{
3673 struct drm_device *dev = intel_crtc->base.dev; 3635 struct drm_device *dev = intel_crtc->base.dev;
3674 struct drm_i915_private *dev_priv = dev->dev_private; 3636 struct drm_i915_private *dev_priv = to_i915(dev);
3675 int pipe = intel_crtc->pipe; 3637 int pipe = intel_crtc->pipe;
3676 i915_reg_t reg; 3638 i915_reg_t reg;
3677 u32 temp; 3639 u32 temp;
@@ -3708,7 +3670,7 @@ static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3708static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) 3670static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3709{ 3671{
3710 struct drm_device *dev = intel_crtc->base.dev; 3672 struct drm_device *dev = intel_crtc->base.dev;
3711 struct drm_i915_private *dev_priv = dev->dev_private; 3673 struct drm_i915_private *dev_priv = to_i915(dev);
3712 int pipe = intel_crtc->pipe; 3674 int pipe = intel_crtc->pipe;
3713 i915_reg_t reg; 3675 i915_reg_t reg;
3714 u32 temp; 3676 u32 temp;
@@ -3738,7 +3700,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3738static void ironlake_fdi_disable(struct drm_crtc *crtc) 3700static void ironlake_fdi_disable(struct drm_crtc *crtc)
3739{ 3701{
3740 struct drm_device *dev = crtc->dev; 3702 struct drm_device *dev = crtc->dev;
3741 struct drm_i915_private *dev_priv = dev->dev_private; 3703 struct drm_i915_private *dev_priv = to_i915(dev);
3742 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3743 int pipe = intel_crtc->pipe; 3705 int pipe = intel_crtc->pipe;
3744 i915_reg_t reg; 3706 i915_reg_t reg;
@@ -3803,7 +3765,7 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3803 if (atomic_read(&crtc->unpin_work_count) == 0) 3765 if (atomic_read(&crtc->unpin_work_count) == 0)
3804 continue; 3766 continue;
3805 3767
3806 if (crtc->unpin_work) 3768 if (crtc->flip_work)
3807 intel_wait_for_vblank(dev, crtc->pipe); 3769 intel_wait_for_vblank(dev, crtc->pipe);
3808 3770
3809 return true; 3771 return true;
@@ -3815,11 +3777,9 @@ bool intel_has_pending_fb_unpin(struct drm_device *dev)
3815static void page_flip_completed(struct intel_crtc *intel_crtc) 3777static void page_flip_completed(struct intel_crtc *intel_crtc)
3816{ 3778{
3817 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); 3779 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3818 struct intel_unpin_work *work = intel_crtc->unpin_work; 3780 struct intel_flip_work *work = intel_crtc->flip_work;
3819 3781
3820 /* ensure that the unpin work is consistent wrt ->pending. */ 3782 intel_crtc->flip_work = NULL;
3821 smp_rmb();
3822 intel_crtc->unpin_work = NULL;
3823 3783
3824 if (work->event) 3784 if (work->event)
3825 drm_crtc_send_vblank_event(&intel_crtc->base, work->event); 3785 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
@@ -3827,7 +3787,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3827 drm_crtc_vblank_put(&intel_crtc->base); 3787 drm_crtc_vblank_put(&intel_crtc->base);
3828 3788
3829 wake_up_all(&dev_priv->pending_flip_queue); 3789 wake_up_all(&dev_priv->pending_flip_queue);
3830 queue_work(dev_priv->wq, &work->work); 3790 queue_work(dev_priv->wq, &work->unpin_work);
3831 3791
3832 trace_i915_flip_complete(intel_crtc->plane, 3792 trace_i915_flip_complete(intel_crtc->plane,
3833 work->pending_flip_obj); 3793 work->pending_flip_obj);
@@ -3836,7 +3796,7 @@ static void page_flip_completed(struct intel_crtc *intel_crtc)
3836static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 3796static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3837{ 3797{
3838 struct drm_device *dev = crtc->dev; 3798 struct drm_device *dev = crtc->dev;
3839 struct drm_i915_private *dev_priv = dev->dev_private; 3799 struct drm_i915_private *dev_priv = to_i915(dev);
3840 long ret; 3800 long ret;
3841 3801
3842 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue)); 3802 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
@@ -3851,9 +3811,11 @@ static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3851 3811
3852 if (ret == 0) { 3812 if (ret == 0) {
3853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3813 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3814 struct intel_flip_work *work;
3854 3815
3855 spin_lock_irq(&dev->event_lock); 3816 spin_lock_irq(&dev->event_lock);
3856 if (intel_crtc->unpin_work) { 3817 work = intel_crtc->flip_work;
3818 if (work && !is_mmio_work(work)) {
3857 WARN_ONCE(1, "Removing stuck page flip\n"); 3819 WARN_ONCE(1, "Removing stuck page flip\n");
3858 page_flip_completed(intel_crtc); 3820 page_flip_completed(intel_crtc);
3859 } 3821 }
@@ -3997,7 +3959,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3997 enum pipe pch_transcoder) 3959 enum pipe pch_transcoder)
3998{ 3960{
3999 struct drm_device *dev = crtc->base.dev; 3961 struct drm_device *dev = crtc->base.dev;
4000 struct drm_i915_private *dev_priv = dev->dev_private; 3962 struct drm_i915_private *dev_priv = to_i915(dev);
4001 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 3963 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
4002 3964
4003 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder), 3965 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
@@ -4019,7 +3981,7 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
4019 3981
4020static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) 3982static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
4021{ 3983{
4022 struct drm_i915_private *dev_priv = dev->dev_private; 3984 struct drm_i915_private *dev_priv = to_i915(dev);
4023 uint32_t temp; 3985 uint32_t temp;
4024 3986
4025 temp = I915_READ(SOUTH_CHICKEN1); 3987 temp = I915_READ(SOUTH_CHICKEN1);
@@ -4069,7 +4031,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
4069 struct intel_encoder *encoder; 4031 struct intel_encoder *encoder;
4070 4032
4071 for_each_encoder_on_crtc(dev, crtc, encoder) { 4033 for_each_encoder_on_crtc(dev, crtc, encoder) {
4072 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 4034 if (encoder->type == INTEL_OUTPUT_DP ||
4073 encoder->type == INTEL_OUTPUT_EDP) 4035 encoder->type == INTEL_OUTPUT_EDP)
4074 return enc_to_dig_port(&encoder->base)->port; 4036 return enc_to_dig_port(&encoder->base)->port;
4075 } 4037 }
@@ -4088,7 +4050,7 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
4088static void ironlake_pch_enable(struct drm_crtc *crtc) 4050static void ironlake_pch_enable(struct drm_crtc *crtc)
4089{ 4051{
4090 struct drm_device *dev = crtc->dev; 4052 struct drm_device *dev = crtc->dev;
4091 struct drm_i915_private *dev_priv = dev->dev_private; 4053 struct drm_i915_private *dev_priv = to_i915(dev);
4092 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4093 int pipe = intel_crtc->pipe; 4055 int pipe = intel_crtc->pipe;
4094 u32 temp; 4056 u32 temp;
@@ -4138,7 +4100,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4138 intel_fdi_normal_train(crtc); 4100 intel_fdi_normal_train(crtc);
4139 4101
4140 /* For PCH DP, enable TRANS_DP_CTL */ 4102 /* For PCH DP, enable TRANS_DP_CTL */
4141 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) { 4103 if (HAS_PCH_CPT(dev) && intel_crtc_has_dp_encoder(intel_crtc->config)) {
4142 const struct drm_display_mode *adjusted_mode = 4104 const struct drm_display_mode *adjusted_mode =
4143 &intel_crtc->config->base.adjusted_mode; 4105 &intel_crtc->config->base.adjusted_mode;
4144 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 4106 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
@@ -4178,7 +4140,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
4178static void lpt_pch_enable(struct drm_crtc *crtc) 4140static void lpt_pch_enable(struct drm_crtc *crtc)
4179{ 4141{
4180 struct drm_device *dev = crtc->dev; 4142 struct drm_device *dev = crtc->dev;
4181 struct drm_i915_private *dev_priv = dev->dev_private; 4143 struct drm_i915_private *dev_priv = to_i915(dev);
4182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4144 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4183 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 4145 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4184 4146
@@ -4194,7 +4156,7 @@ static void lpt_pch_enable(struct drm_crtc *crtc)
4194 4156
4195static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4157static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4196{ 4158{
4197 struct drm_i915_private *dev_priv = dev->dev_private; 4159 struct drm_i915_private *dev_priv = to_i915(dev);
4198 i915_reg_t dslreg = PIPEDSL(pipe); 4160 i915_reg_t dslreg = PIPEDSL(pipe);
4199 u32 temp; 4161 u32 temp;
4200 4162
@@ -4281,8 +4243,9 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
4281 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4243 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4282 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4244 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4283 4245
4284 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4246 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4285 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); 4247 intel_crtc->base.base.id, intel_crtc->base.name,
4248 intel_crtc->pipe, SKL_CRTC_INDEX);
4286 4249
4287 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4250 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4288 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0), 4251 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
@@ -4312,9 +4275,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4312 4275
4313 bool force_detach = !fb || !plane_state->visible; 4276 bool force_detach = !fb || !plane_state->visible;
4314 4277
4315 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n", 4278 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4316 intel_plane->base.base.id, intel_crtc->pipe, 4279 intel_plane->base.base.id, intel_plane->base.name,
4317 drm_plane_index(&intel_plane->base)); 4280 intel_crtc->pipe, drm_plane_index(&intel_plane->base));
4318 4281
4319 ret = skl_update_scaler(crtc_state, force_detach, 4282 ret = skl_update_scaler(crtc_state, force_detach,
4320 drm_plane_index(&intel_plane->base), 4283 drm_plane_index(&intel_plane->base),
@@ -4330,8 +4293,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4330 4293
4331 /* check colorkey */ 4294 /* check colorkey */
4332 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) { 4295 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4333 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed", 4296 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4334 intel_plane->base.base.id); 4297 intel_plane->base.base.id,
4298 intel_plane->base.name);
4335 return -EINVAL; 4299 return -EINVAL;
4336 } 4300 }
4337 4301
@@ -4350,8 +4314,9 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4350 case DRM_FORMAT_VYUY: 4314 case DRM_FORMAT_VYUY:
4351 break; 4315 break;
4352 default: 4316 default:
4353 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n", 4317 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4354 intel_plane->base.base.id, fb->base.id, fb->pixel_format); 4318 intel_plane->base.base.id, intel_plane->base.name,
4319 fb->base.id, fb->pixel_format);
4355 return -EINVAL; 4320 return -EINVAL;
4356 } 4321 }
4357 4322
@@ -4369,7 +4334,7 @@ static void skylake_scaler_disable(struct intel_crtc *crtc)
4369static void skylake_pfit_enable(struct intel_crtc *crtc) 4334static void skylake_pfit_enable(struct intel_crtc *crtc)
4370{ 4335{
4371 struct drm_device *dev = crtc->base.dev; 4336 struct drm_device *dev = crtc->base.dev;
4372 struct drm_i915_private *dev_priv = dev->dev_private; 4337 struct drm_i915_private *dev_priv = to_i915(dev);
4373 int pipe = crtc->pipe; 4338 int pipe = crtc->pipe;
4374 struct intel_crtc_scaler_state *scaler_state = 4339 struct intel_crtc_scaler_state *scaler_state =
4375 &crtc->config->scaler_state; 4340 &crtc->config->scaler_state;
@@ -4397,7 +4362,7 @@ static void skylake_pfit_enable(struct intel_crtc *crtc)
4397static void ironlake_pfit_enable(struct intel_crtc *crtc) 4362static void ironlake_pfit_enable(struct intel_crtc *crtc)
4398{ 4363{
4399 struct drm_device *dev = crtc->base.dev; 4364 struct drm_device *dev = crtc->base.dev;
4400 struct drm_i915_private *dev_priv = dev->dev_private; 4365 struct drm_i915_private *dev_priv = to_i915(dev);
4401 int pipe = crtc->pipe; 4366 int pipe = crtc->pipe;
4402 4367
4403 if (crtc->config->pch_pfit.enabled) { 4368 if (crtc->config->pch_pfit.enabled) {
@@ -4418,7 +4383,7 @@ static void ironlake_pfit_enable(struct intel_crtc *crtc)
4418void hsw_enable_ips(struct intel_crtc *crtc) 4383void hsw_enable_ips(struct intel_crtc *crtc)
4419{ 4384{
4420 struct drm_device *dev = crtc->base.dev; 4385 struct drm_device *dev = crtc->base.dev;
4421 struct drm_i915_private *dev_priv = dev->dev_private; 4386 struct drm_i915_private *dev_priv = to_i915(dev);
4422 4387
4423 if (!crtc->config->ips_enabled) 4388 if (!crtc->config->ips_enabled)
4424 return; 4389 return;
@@ -4446,7 +4411,9 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4446 * and don't wait for vblanks until the end of crtc_enable, then 4411 * and don't wait for vblanks until the end of crtc_enable, then
4447 * the HW state readout code will complain that the expected 4412 * the HW state readout code will complain that the expected
4448 * IPS_CTL value is not the one we read. */ 4413 * IPS_CTL value is not the one we read. */
4449 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50)) 4414 if (intel_wait_for_register(dev_priv,
4415 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4416 50))
4450 DRM_ERROR("Timed out waiting for IPS enable\n"); 4417 DRM_ERROR("Timed out waiting for IPS enable\n");
4451 } 4418 }
4452} 4419}
@@ -4454,7 +4421,7 @@ void hsw_enable_ips(struct intel_crtc *crtc)
4454void hsw_disable_ips(struct intel_crtc *crtc) 4421void hsw_disable_ips(struct intel_crtc *crtc)
4455{ 4422{
4456 struct drm_device *dev = crtc->base.dev; 4423 struct drm_device *dev = crtc->base.dev;
4457 struct drm_i915_private *dev_priv = dev->dev_private; 4424 struct drm_i915_private *dev_priv = to_i915(dev);
4458 4425
4459 if (!crtc->config->ips_enabled) 4426 if (!crtc->config->ips_enabled)
4460 return; 4427 return;
@@ -4465,7 +4432,9 @@ void hsw_disable_ips(struct intel_crtc *crtc)
4465 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); 4432 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4466 mutex_unlock(&dev_priv->rps.hw_lock); 4433 mutex_unlock(&dev_priv->rps.hw_lock);
4467 /* wait for pcode to finish disabling IPS, which may take up to 42ms */ 4434 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4468 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42)) 4435 if (intel_wait_for_register(dev_priv,
4436 IPS_CTL, IPS_ENABLE, 0,
4437 42))
4469 DRM_ERROR("Timed out waiting for IPS disable\n"); 4438 DRM_ERROR("Timed out waiting for IPS disable\n");
4470 } else { 4439 } else {
4471 I915_WRITE(IPS_CTL, 0); 4440 I915_WRITE(IPS_CTL, 0);
@@ -4480,7 +4449,7 @@ static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4480{ 4449{
4481 if (intel_crtc->overlay) { 4450 if (intel_crtc->overlay) {
4482 struct drm_device *dev = intel_crtc->base.dev; 4451 struct drm_device *dev = intel_crtc->base.dev;
4483 struct drm_i915_private *dev_priv = dev->dev_private; 4452 struct drm_i915_private *dev_priv = to_i915(dev);
4484 4453
4485 mutex_lock(&dev->struct_mutex); 4454 mutex_lock(&dev->struct_mutex);
4486 dev_priv->mm.interruptible = false; 4455 dev_priv->mm.interruptible = false;
@@ -4508,7 +4477,7 @@ static void
4508intel_post_enable_primary(struct drm_crtc *crtc) 4477intel_post_enable_primary(struct drm_crtc *crtc)
4509{ 4478{
4510 struct drm_device *dev = crtc->dev; 4479 struct drm_device *dev = crtc->dev;
4511 struct drm_i915_private *dev_priv = dev->dev_private; 4480 struct drm_i915_private *dev_priv = to_i915(dev);
4512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4481 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4513 int pipe = intel_crtc->pipe; 4482 int pipe = intel_crtc->pipe;
4514 4483
@@ -4540,7 +4509,7 @@ static void
4540intel_pre_disable_primary(struct drm_crtc *crtc) 4509intel_pre_disable_primary(struct drm_crtc *crtc)
4541{ 4510{
4542 struct drm_device *dev = crtc->dev; 4511 struct drm_device *dev = crtc->dev;
4543 struct drm_i915_private *dev_priv = dev->dev_private; 4512 struct drm_i915_private *dev_priv = to_i915(dev);
4544 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4513 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4545 int pipe = intel_crtc->pipe; 4514 int pipe = intel_crtc->pipe;
4546 4515
@@ -4567,7 +4536,7 @@ static void
4567intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) 4536intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4568{ 4537{
4569 struct drm_device *dev = crtc->dev; 4538 struct drm_device *dev = crtc->dev;
4570 struct drm_i915_private *dev_priv = dev->dev_private; 4539 struct drm_i915_private *dev_priv = to_i915(dev);
4571 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4540 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4572 int pipe = intel_crtc->pipe; 4541 int pipe = intel_crtc->pipe;
4573 4542
@@ -4626,7 +4595,7 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4626{ 4595{
4627 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); 4596 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4628 struct drm_device *dev = crtc->base.dev; 4597 struct drm_device *dev = crtc->base.dev;
4629 struct drm_i915_private *dev_priv = dev->dev_private; 4598 struct drm_i915_private *dev_priv = to_i915(dev);
4630 struct intel_crtc_state *pipe_config = 4599 struct intel_crtc_state *pipe_config =
4631 to_intel_crtc_state(crtc->base.state); 4600 to_intel_crtc_state(crtc->base.state);
4632 struct drm_atomic_state *old_state = old_crtc_state->base.state; 4601 struct drm_atomic_state *old_state = old_crtc_state->base.state;
@@ -4641,14 +4610,14 @@ static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4641 struct intel_plane_state *old_primary_state = 4610 struct intel_plane_state *old_primary_state =
4642 to_intel_plane_state(old_pri_state); 4611 to_intel_plane_state(old_pri_state);
4643 4612
4644 intel_fbc_pre_update(crtc); 4613 intel_fbc_pre_update(crtc, pipe_config, primary_state);
4645 4614
4646 if (old_primary_state->visible && 4615 if (old_primary_state->visible &&
4647 (modeset || !primary_state->visible)) 4616 (modeset || !primary_state->visible))
4648 intel_pre_disable_primary(&crtc->base); 4617 intel_pre_disable_primary(&crtc->base);
4649 } 4618 }
4650 4619
4651 if (pipe_config->disable_cxsr) { 4620 if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
4652 crtc->wm.cxsr_allowed = false; 4621 crtc->wm.cxsr_allowed = false;
4653 4622
4654 /* 4623 /*
@@ -4729,7 +4698,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
4729static void ironlake_crtc_enable(struct drm_crtc *crtc) 4698static void ironlake_crtc_enable(struct drm_crtc *crtc)
4730{ 4699{
4731 struct drm_device *dev = crtc->dev; 4700 struct drm_device *dev = crtc->dev;
4732 struct drm_i915_private *dev_priv = dev->dev_private; 4701 struct drm_i915_private *dev_priv = to_i915(dev);
4733 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4702 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4734 struct intel_encoder *encoder; 4703 struct intel_encoder *encoder;
4735 int pipe = intel_crtc->pipe; 4704 int pipe = intel_crtc->pipe;
@@ -4757,7 +4726,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4757 if (intel_crtc->config->has_pch_encoder) 4726 if (intel_crtc->config->has_pch_encoder)
4758 intel_prepare_shared_dpll(intel_crtc); 4727 intel_prepare_shared_dpll(intel_crtc);
4759 4728
4760 if (intel_crtc->config->has_dp_encoder) 4729 if (intel_crtc_has_dp_encoder(intel_crtc->config))
4761 intel_dp_set_m_n(intel_crtc, M1_N1); 4730 intel_dp_set_m_n(intel_crtc, M1_N1);
4762 4731
4763 intel_set_pipe_timings(intel_crtc); 4732 intel_set_pipe_timings(intel_crtc);
@@ -4826,7 +4795,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4826static void haswell_crtc_enable(struct drm_crtc *crtc) 4795static void haswell_crtc_enable(struct drm_crtc *crtc)
4827{ 4796{
4828 struct drm_device *dev = crtc->dev; 4797 struct drm_device *dev = crtc->dev;
4829 struct drm_i915_private *dev_priv = dev->dev_private; 4798 struct drm_i915_private *dev_priv = to_i915(dev);
4830 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4799 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4831 struct intel_encoder *encoder; 4800 struct intel_encoder *encoder;
4832 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4801 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
@@ -4841,13 +4810,17 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4841 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, 4810 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4842 false); 4811 false);
4843 4812
4813 for_each_encoder_on_crtc(dev, crtc, encoder)
4814 if (encoder->pre_pll_enable)
4815 encoder->pre_pll_enable(encoder);
4816
4844 if (intel_crtc->config->shared_dpll) 4817 if (intel_crtc->config->shared_dpll)
4845 intel_enable_shared_dpll(intel_crtc); 4818 intel_enable_shared_dpll(intel_crtc);
4846 4819
4847 if (intel_crtc->config->has_dp_encoder) 4820 if (intel_crtc_has_dp_encoder(intel_crtc->config))
4848 intel_dp_set_m_n(intel_crtc, M1_N1); 4821 intel_dp_set_m_n(intel_crtc, M1_N1);
4849 4822
4850 if (!intel_crtc->config->has_dsi_encoder) 4823 if (!transcoder_is_dsi(cpu_transcoder))
4851 intel_set_pipe_timings(intel_crtc); 4824 intel_set_pipe_timings(intel_crtc);
4852 4825
4853 intel_set_pipe_src_size(intel_crtc); 4826 intel_set_pipe_src_size(intel_crtc);
@@ -4863,7 +4836,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4863 &intel_crtc->config->fdi_m_n, NULL); 4836 &intel_crtc->config->fdi_m_n, NULL);
4864 } 4837 }
4865 4838
4866 if (!intel_crtc->config->has_dsi_encoder) 4839 if (!transcoder_is_dsi(cpu_transcoder))
4867 haswell_set_pipeconf(crtc); 4840 haswell_set_pipeconf(crtc);
4868 4841
4869 haswell_set_pipemisc(crtc); 4842 haswell_set_pipemisc(crtc);
@@ -4885,7 +4858,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4885 if (intel_crtc->config->has_pch_encoder) 4858 if (intel_crtc->config->has_pch_encoder)
4886 dev_priv->display.fdi_link_train(crtc); 4859 dev_priv->display.fdi_link_train(crtc);
4887 4860
4888 if (!intel_crtc->config->has_dsi_encoder) 4861 if (!transcoder_is_dsi(cpu_transcoder))
4889 intel_ddi_enable_pipe_clock(intel_crtc); 4862 intel_ddi_enable_pipe_clock(intel_crtc);
4890 4863
4891 if (INTEL_INFO(dev)->gen >= 9) 4864 if (INTEL_INFO(dev)->gen >= 9)
@@ -4900,7 +4873,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4900 intel_color_load_luts(&pipe_config->base); 4873 intel_color_load_luts(&pipe_config->base);
4901 4874
4902 intel_ddi_set_pipe_settings(crtc); 4875 intel_ddi_set_pipe_settings(crtc);
4903 if (!intel_crtc->config->has_dsi_encoder) 4876 if (!transcoder_is_dsi(cpu_transcoder))
4904 intel_ddi_enable_transcoder_func(crtc); 4877 intel_ddi_enable_transcoder_func(crtc);
4905 4878
4906 if (dev_priv->display.initial_watermarks != NULL) 4879 if (dev_priv->display.initial_watermarks != NULL)
@@ -4909,7 +4882,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4909 intel_update_watermarks(crtc); 4882 intel_update_watermarks(crtc);
4910 4883
4911 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 4884 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4912 if (!intel_crtc->config->has_dsi_encoder) 4885 if (!transcoder_is_dsi(cpu_transcoder))
4913 intel_enable_pipe(intel_crtc); 4886 intel_enable_pipe(intel_crtc);
4914 4887
4915 if (intel_crtc->config->has_pch_encoder) 4888 if (intel_crtc->config->has_pch_encoder)
@@ -4946,7 +4919,7 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4946static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force) 4919static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4947{ 4920{
4948 struct drm_device *dev = crtc->base.dev; 4921 struct drm_device *dev = crtc->base.dev;
4949 struct drm_i915_private *dev_priv = dev->dev_private; 4922 struct drm_i915_private *dev_priv = to_i915(dev);
4950 int pipe = crtc->pipe; 4923 int pipe = crtc->pipe;
4951 4924
4952 /* To avoid upsetting the power well on haswell only disable the pfit if 4925 /* To avoid upsetting the power well on haswell only disable the pfit if
@@ -4961,7 +4934,7 @@ static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4961static void ironlake_crtc_disable(struct drm_crtc *crtc) 4934static void ironlake_crtc_disable(struct drm_crtc *crtc)
4962{ 4935{
4963 struct drm_device *dev = crtc->dev; 4936 struct drm_device *dev = crtc->dev;
4964 struct drm_i915_private *dev_priv = dev->dev_private; 4937 struct drm_i915_private *dev_priv = to_i915(dev);
4965 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4938 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4966 struct intel_encoder *encoder; 4939 struct intel_encoder *encoder;
4967 int pipe = intel_crtc->pipe; 4940 int pipe = intel_crtc->pipe;
@@ -5024,7 +4997,7 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
5024static void haswell_crtc_disable(struct drm_crtc *crtc) 4997static void haswell_crtc_disable(struct drm_crtc *crtc)
5025{ 4998{
5026 struct drm_device *dev = crtc->dev; 4999 struct drm_device *dev = crtc->dev;
5027 struct drm_i915_private *dev_priv = dev->dev_private; 5000 struct drm_i915_private *dev_priv = to_i915(dev);
5028 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5001 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5029 struct intel_encoder *encoder; 5002 struct intel_encoder *encoder;
5030 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5003 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
@@ -5042,13 +5015,13 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5042 assert_vblank_disabled(crtc); 5015 assert_vblank_disabled(crtc);
5043 5016
5044 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5017 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5045 if (!intel_crtc->config->has_dsi_encoder) 5018 if (!transcoder_is_dsi(cpu_transcoder))
5046 intel_disable_pipe(intel_crtc); 5019 intel_disable_pipe(intel_crtc);
5047 5020
5048 if (intel_crtc->config->dp_encoder_is_mst) 5021 if (intel_crtc->config->dp_encoder_is_mst)
5049 intel_ddi_set_vc_payload_alloc(crtc, false); 5022 intel_ddi_set_vc_payload_alloc(crtc, false);
5050 5023
5051 if (!intel_crtc->config->has_dsi_encoder) 5024 if (!transcoder_is_dsi(cpu_transcoder))
5052 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5025 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5053 5026
5054 if (INTEL_INFO(dev)->gen >= 9) 5027 if (INTEL_INFO(dev)->gen >= 9)
@@ -5056,7 +5029,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5056 else 5029 else
5057 ironlake_pfit_disable(intel_crtc, false); 5030 ironlake_pfit_disable(intel_crtc, false);
5058 5031
5059 if (!intel_crtc->config->has_dsi_encoder) 5032 if (!transcoder_is_dsi(cpu_transcoder))
5060 intel_ddi_disable_pipe_clock(intel_crtc); 5033 intel_ddi_disable_pipe_clock(intel_crtc);
5061 5034
5062 for_each_encoder_on_crtc(dev, crtc, encoder) 5035 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -5076,7 +5049,7 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
5076static void i9xx_pfit_enable(struct intel_crtc *crtc) 5049static void i9xx_pfit_enable(struct intel_crtc *crtc)
5077{ 5050{
5078 struct drm_device *dev = crtc->base.dev; 5051 struct drm_device *dev = crtc->base.dev;
5079 struct drm_i915_private *dev_priv = dev->dev_private; 5052 struct drm_i915_private *dev_priv = to_i915(dev);
5080 struct intel_crtc_state *pipe_config = crtc->config; 5053 struct intel_crtc_state *pipe_config = crtc->config;
5081 5054
5082 if (!pipe_config->gmch_pfit.control) 5055 if (!pipe_config->gmch_pfit.control)
@@ -5146,7 +5119,7 @@ intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5146 case INTEL_OUTPUT_UNKNOWN: 5119 case INTEL_OUTPUT_UNKNOWN:
5147 /* Only DDI platforms should ever use this output type */ 5120 /* Only DDI platforms should ever use this output type */
5148 WARN_ON_ONCE(!HAS_DDI(dev)); 5121 WARN_ON_ONCE(!HAS_DDI(dev));
5149 case INTEL_OUTPUT_DISPLAYPORT: 5122 case INTEL_OUTPUT_DP:
5150 case INTEL_OUTPUT_HDMI: 5123 case INTEL_OUTPUT_HDMI:
5151 case INTEL_OUTPUT_EDP: 5124 case INTEL_OUTPUT_EDP:
5152 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5125 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
@@ -5180,7 +5153,7 @@ intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5180 * run the DP detection too. 5153 * run the DP detection too.
5181 */ 5154 */
5182 WARN_ON_ONCE(!HAS_DDI(dev)); 5155 WARN_ON_ONCE(!HAS_DDI(dev));
5183 case INTEL_OUTPUT_DISPLAYPORT: 5156 case INTEL_OUTPUT_DP:
5184 case INTEL_OUTPUT_EDP: 5157 case INTEL_OUTPUT_EDP:
5185 intel_dig_port = enc_to_dig_port(&intel_encoder->base); 5158 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5186 return port_to_aux_power_domain(intel_dig_port->port); 5159 return port_to_aux_power_domain(intel_dig_port->port);
@@ -5228,7 +5201,7 @@ static unsigned long
5228modeset_get_crtc_power_domains(struct drm_crtc *crtc, 5201modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5229 struct intel_crtc_state *crtc_state) 5202 struct intel_crtc_state *crtc_state)
5230{ 5203{
5231 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 5204 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
5232 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5233 enum intel_display_power_domain domain; 5206 enum intel_display_power_domain domain;
5234 unsigned long domains, new_domains, old_domains; 5207 unsigned long domains, new_domains, old_domains;
@@ -5269,21 +5242,34 @@ static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5269 return max_cdclk_freq*90/100; 5242 return max_cdclk_freq*90/100;
5270} 5243}
5271 5244
5245static int skl_calc_cdclk(int max_pixclk, int vco);
5246
5272static void intel_update_max_cdclk(struct drm_device *dev) 5247static void intel_update_max_cdclk(struct drm_device *dev)
5273{ 5248{
5274 struct drm_i915_private *dev_priv = dev->dev_private; 5249 struct drm_i915_private *dev_priv = to_i915(dev);
5275 5250
5276 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5251 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5277 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; 5252 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5253 int max_cdclk, vco;
5254
5255 vco = dev_priv->skl_preferred_vco_freq;
5256 WARN_ON(vco != 8100000 && vco != 8640000);
5278 5257
5258 /*
5259 * Use the lower (vco 8640) cdclk values as a
5260 * first guess. skl_calc_cdclk() will correct it
5261 * if the preferred vco is 8100 instead.
5262 */
5279 if (limit == SKL_DFSM_CDCLK_LIMIT_675) 5263 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5280 dev_priv->max_cdclk_freq = 675000; 5264 max_cdclk = 617143;
5281 else if (limit == SKL_DFSM_CDCLK_LIMIT_540) 5265 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5282 dev_priv->max_cdclk_freq = 540000; 5266 max_cdclk = 540000;
5283 else if (limit == SKL_DFSM_CDCLK_LIMIT_450) 5267 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5284 dev_priv->max_cdclk_freq = 450000; 5268 max_cdclk = 432000;
5285 else 5269 else
5286 dev_priv->max_cdclk_freq = 337500; 5270 max_cdclk = 308571;
5271
5272 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
5287 } else if (IS_BROXTON(dev)) { 5273 } else if (IS_BROXTON(dev)) {
5288 dev_priv->max_cdclk_freq = 624000; 5274 dev_priv->max_cdclk_freq = 624000;
5289 } else if (IS_BROADWELL(dev)) { 5275 } else if (IS_BROADWELL(dev)) {
@@ -5321,267 +5307,322 @@ static void intel_update_max_cdclk(struct drm_device *dev)
5321 5307
5322static void intel_update_cdclk(struct drm_device *dev) 5308static void intel_update_cdclk(struct drm_device *dev)
5323{ 5309{
5324 struct drm_i915_private *dev_priv = dev->dev_private; 5310 struct drm_i915_private *dev_priv = to_i915(dev);
5325 5311
5326 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 5312 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5327 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n", 5313
5328 dev_priv->cdclk_freq); 5314 if (INTEL_GEN(dev_priv) >= 9)
5315 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5316 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5317 dev_priv->cdclk_pll.ref);
5318 else
5319 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5320 dev_priv->cdclk_freq);
5329 5321
5330 /* 5322 /*
5331 * Program the gmbus_freq based on the cdclk frequency. 5323 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5332 * BSpec erroneously claims we should aim for 4MHz, but 5324 * Programmng [sic] note: bit[9:2] should be programmed to the number
5333 * in fact 1MHz is the correct frequency. 5325 * of cdclk that generates 4MHz reference clock freq which is used to
5326 * generate GMBus clock. This will vary with the cdclk freq.
5334 */ 5327 */
5335 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 5328 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5336 /*
5337 * Program the gmbus_freq based on the cdclk frequency.
5338 * BSpec erroneously claims we should aim for 4MHz, but
5339 * in fact 1MHz is the correct frequency.
5340 */
5341 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000)); 5329 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5342 } 5330}
5343 5331
5344 if (dev_priv->max_cdclk_freq == 0) 5332/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5345 intel_update_max_cdclk(dev); 5333static int skl_cdclk_decimal(int cdclk)
5334{
5335 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5346} 5336}
5347 5337
5348static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int frequency) 5338static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5349{ 5339{
5350 uint32_t divider; 5340 int ratio;
5351 uint32_t ratio;
5352 uint32_t current_freq;
5353 int ret;
5354 5341
5355 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */ 5342 if (cdclk == dev_priv->cdclk_pll.ref)
5356 switch (frequency) { 5343 return 0;
5344
5345 switch (cdclk) {
5346 default:
5347 MISSING_CASE(cdclk);
5357 case 144000: 5348 case 144000:
5349 case 288000:
5350 case 384000:
5351 case 576000:
5352 ratio = 60;
5353 break;
5354 case 624000:
5355 ratio = 65;
5356 break;
5357 }
5358
5359 return dev_priv->cdclk_pll.ref * ratio;
5360}
5361
5362static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5363{
5364 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5365
5366 /* Timeout 200us */
5367 if (intel_wait_for_register(dev_priv,
5368 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
5369 1))
5370 DRM_ERROR("timeout waiting for DE PLL unlock\n");
5371
5372 dev_priv->cdclk_pll.vco = 0;
5373}
5374
5375static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
5376{
5377 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
5378 u32 val;
5379
5380 val = I915_READ(BXT_DE_PLL_CTL);
5381 val &= ~BXT_DE_PLL_RATIO_MASK;
5382 val |= BXT_DE_PLL_RATIO(ratio);
5383 I915_WRITE(BXT_DE_PLL_CTL, val);
5384
5385 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5386
5387 /* Timeout 200us */
5388 if (intel_wait_for_register(dev_priv,
5389 BXT_DE_PLL_ENABLE,
5390 BXT_DE_PLL_LOCK,
5391 BXT_DE_PLL_LOCK,
5392 1))
5393 DRM_ERROR("timeout waiting for DE PLL lock\n");
5394
5395 dev_priv->cdclk_pll.vco = vco;
5396}
5397
5398static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5399{
5400 u32 val, divider;
5401 int vco, ret;
5402
5403 vco = bxt_de_pll_vco(dev_priv, cdclk);
5404
5405 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5406
5407 /* cdclk = vco / 2 / div{1,1.5,2,4} */
5408 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5409 case 8:
5358 divider = BXT_CDCLK_CD2X_DIV_SEL_4; 5410 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5359 ratio = BXT_DE_PLL_RATIO(60);
5360 break; 5411 break;
5361 case 288000: 5412 case 4:
5362 divider = BXT_CDCLK_CD2X_DIV_SEL_2; 5413 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5363 ratio = BXT_DE_PLL_RATIO(60);
5364 break; 5414 break;
5365 case 384000: 5415 case 3:
5366 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; 5416 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5367 ratio = BXT_DE_PLL_RATIO(60);
5368 break;
5369 case 576000:
5370 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5371 ratio = BXT_DE_PLL_RATIO(60);
5372 break; 5417 break;
5373 case 624000: 5418 case 2:
5374 divider = BXT_CDCLK_CD2X_DIV_SEL_1; 5419 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5375 ratio = BXT_DE_PLL_RATIO(65);
5376 break;
5377 case 19200:
5378 /*
5379 * Bypass frequency with DE PLL disabled. Init ratio, divider
5380 * to suppress GCC warning.
5381 */
5382 ratio = 0;
5383 divider = 0;
5384 break; 5420 break;
5385 default: 5421 default:
5386 DRM_ERROR("unsupported CDCLK freq %d", frequency); 5422 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5423 WARN_ON(vco != 0);
5387 5424
5388 return; 5425 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5426 break;
5389 } 5427 }
5390 5428
5391 mutex_lock(&dev_priv->rps.hw_lock);
5392 /* Inform power controller of upcoming frequency change */ 5429 /* Inform power controller of upcoming frequency change */
5430 mutex_lock(&dev_priv->rps.hw_lock);
5393 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5431 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5394 0x80000000); 5432 0x80000000);
5395 mutex_unlock(&dev_priv->rps.hw_lock); 5433 mutex_unlock(&dev_priv->rps.hw_lock);
5396 5434
5397 if (ret) { 5435 if (ret) {
5398 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", 5436 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5399 ret, frequency); 5437 ret, cdclk);
5400 return; 5438 return;
5401 } 5439 }
5402 5440
5403 current_freq = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK; 5441 if (dev_priv->cdclk_pll.vco != 0 &&
5404 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */ 5442 dev_priv->cdclk_pll.vco != vco)
5405 current_freq = current_freq * 500 + 1000; 5443 bxt_de_pll_disable(dev_priv);
5406 5444
5407 /* 5445 if (dev_priv->cdclk_pll.vco != vco)
5408 * DE PLL has to be disabled when 5446 bxt_de_pll_enable(dev_priv, vco);
5409 * - setting to 19.2MHz (bypass, PLL isn't used)
5410 * - before setting to 624MHz (PLL needs toggling)
5411 * - before setting to any frequency from 624MHz (PLL needs toggling)
5412 */
5413 if (frequency == 19200 || frequency == 624000 ||
5414 current_freq == 624000) {
5415 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5416 /* Timeout 200us */
5417 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5418 1))
5419 DRM_ERROR("timout waiting for DE PLL unlock\n");
5420 }
5421
5422 if (frequency != 19200) {
5423 uint32_t val;
5424
5425 val = I915_READ(BXT_DE_PLL_CTL);
5426 val &= ~BXT_DE_PLL_RATIO_MASK;
5427 val |= ratio;
5428 I915_WRITE(BXT_DE_PLL_CTL, val);
5429
5430 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5431 /* Timeout 200us */
5432 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5433 DRM_ERROR("timeout waiting for DE PLL lock\n");
5434
5435 val = I915_READ(CDCLK_CTL);
5436 val &= ~BXT_CDCLK_CD2X_DIV_SEL_MASK;
5437 val |= divider;
5438 /*
5439 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5440 * enable otherwise.
5441 */
5442 val &= ~BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5443 if (frequency >= 500000)
5444 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5445 5447
5446 val &= ~CDCLK_FREQ_DECIMAL_MASK; 5448 val = divider | skl_cdclk_decimal(cdclk);
5447 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */ 5449 /*
5448 val |= (frequency - 1000) / 500; 5450 * FIXME if only the cd2x divider needs changing, it could be done
5449 I915_WRITE(CDCLK_CTL, val); 5451 * without shutting off the pipe (if only one pipe is active).
5450 } 5452 */
5453 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5454 /*
5455 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5456 * enable otherwise.
5457 */
5458 if (cdclk >= 500000)
5459 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5460 I915_WRITE(CDCLK_CTL, val);
5451 5461
5452 mutex_lock(&dev_priv->rps.hw_lock); 5462 mutex_lock(&dev_priv->rps.hw_lock);
5453 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, 5463 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5454 DIV_ROUND_UP(frequency, 25000)); 5464 DIV_ROUND_UP(cdclk, 25000));
5455 mutex_unlock(&dev_priv->rps.hw_lock); 5465 mutex_unlock(&dev_priv->rps.hw_lock);
5456 5466
5457 if (ret) { 5467 if (ret) {
5458 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", 5468 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5459 ret, frequency); 5469 ret, cdclk);
5460 return; 5470 return;
5461 } 5471 }
5462 5472
5463 intel_update_cdclk(dev_priv->dev); 5473 intel_update_cdclk(&dev_priv->drm);
5464} 5474}
5465 5475
5466static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv) 5476static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
5467{ 5477{
5468 if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE)) 5478 u32 cdctl, expected;
5469 return false;
5470 5479
5471 /* TODO: Check for a valid CDCLK rate */ 5480 intel_update_cdclk(&dev_priv->drm);
5472 5481
5473 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) { 5482 if (dev_priv->cdclk_pll.vco == 0 ||
5474 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n"); 5483 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5484 goto sanitize;
5475 5485
5476 return false; 5486 /* DPLL okay; verify the cdclock
5477 } 5487 *
5488 * Some BIOS versions leave an incorrect decimal frequency value and
5489 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5490 * so sanitize this register.
5491 */
5492 cdctl = I915_READ(CDCLK_CTL);
5493 /*
5494 * Let's ignore the pipe field, since BIOS could have configured the
5495 * dividers both synching to an active pipe, or asynchronously
5496 * (PIPE_NONE).
5497 */
5498 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5478 5499
5479 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) { 5500 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5480 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n"); 5501 skl_cdclk_decimal(dev_priv->cdclk_freq);
5502 /*
5503 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5504 * enable otherwise.
5505 */
5506 if (dev_priv->cdclk_freq >= 500000)
5507 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5481 5508
5482 return false; 5509 if (cdctl == expected)
5483 } 5510 /* All well; nothing to sanitize */
5511 return;
5484 5512
5485 return true; 5513sanitize:
5486} 5514 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5487 5515
5488bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv) 5516 /* force cdclk programming */
5489{ 5517 dev_priv->cdclk_freq = 0;
5490 return broxton_cdclk_is_enabled(dev_priv); 5518
5519 /* force full PLL disable + enable */
5520 dev_priv->cdclk_pll.vco = -1;
5491} 5521}
5492 5522
5493void broxton_init_cdclk(struct drm_i915_private *dev_priv) 5523void bxt_init_cdclk(struct drm_i915_private *dev_priv)
5494{ 5524{
5495 /* check if cd clock is enabled */ 5525 bxt_sanitize_cdclk(dev_priv);
5496 if (broxton_cdclk_is_enabled(dev_priv)) {
5497 DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5498 return;
5499 }
5500 5526
5501 DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n"); 5527 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
5528 return;
5502 5529
5503 /* 5530 /*
5504 * FIXME: 5531 * FIXME:
5505 * - The initial CDCLK needs to be read from VBT. 5532 * - The initial CDCLK needs to be read from VBT.
5506 * Need to make this change after VBT has changes for BXT. 5533 * Need to make this change after VBT has changes for BXT.
5507 * - check if setting the max (or any) cdclk freq is really necessary
5508 * here, it belongs to modeset time
5509 */ 5534 */
5510 broxton_set_cdclk(dev_priv, 624000); 5535 bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
5511 5536}
5512 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5513 POSTING_READ(DBUF_CTL);
5514 5537
5515 udelay(10); 5538void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
5539{
5540 bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
5541}
5516 5542
5517 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5543static int skl_calc_cdclk(int max_pixclk, int vco)
5518 DRM_ERROR("DBuf power enable timeout!\n"); 5544{
5545 if (vco == 8640000) {
5546 if (max_pixclk > 540000)
5547 return 617143;
5548 else if (max_pixclk > 432000)
5549 return 540000;
5550 else if (max_pixclk > 308571)
5551 return 432000;
5552 else
5553 return 308571;
5554 } else {
5555 if (max_pixclk > 540000)
5556 return 675000;
5557 else if (max_pixclk > 450000)
5558 return 540000;
5559 else if (max_pixclk > 337500)
5560 return 450000;
5561 else
5562 return 337500;
5563 }
5519} 5564}
5520 5565
5521void broxton_uninit_cdclk(struct drm_i915_private *dev_priv) 5566static void
5567skl_dpll0_update(struct drm_i915_private *dev_priv)
5522{ 5568{
5523 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); 5569 u32 val;
5524 POSTING_READ(DBUF_CTL);
5525 5570
5526 udelay(10); 5571 dev_priv->cdclk_pll.ref = 24000;
5572 dev_priv->cdclk_pll.vco = 0;
5527 5573
5528 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5574 val = I915_READ(LCPLL1_CTL);
5529 DRM_ERROR("DBuf power disable timeout!\n"); 5575 if ((val & LCPLL_PLL_ENABLE) == 0)
5576 return;
5530 5577
5531 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */ 5578 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5532 broxton_set_cdclk(dev_priv, 19200); 5579 return;
5533}
5534 5580
5535static const struct skl_cdclk_entry { 5581 val = I915_READ(DPLL_CTRL1);
5536 unsigned int freq;
5537 unsigned int vco;
5538} skl_cdclk_frequencies[] = {
5539 { .freq = 308570, .vco = 8640 },
5540 { .freq = 337500, .vco = 8100 },
5541 { .freq = 432000, .vco = 8640 },
5542 { .freq = 450000, .vco = 8100 },
5543 { .freq = 540000, .vco = 8100 },
5544 { .freq = 617140, .vco = 8640 },
5545 { .freq = 675000, .vco = 8100 },
5546};
5547 5582
5548static unsigned int skl_cdclk_decimal(unsigned int freq) 5583 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5549{ 5584 DPLL_CTRL1_SSC(SKL_DPLL0) |
5550 return (freq - 1000) / 500; 5585 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5586 DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5587 return;
5588
5589 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5590 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5591 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5592 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5593 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
5594 dev_priv->cdclk_pll.vco = 8100000;
5595 break;
5596 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5597 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
5598 dev_priv->cdclk_pll.vco = 8640000;
5599 break;
5600 default:
5601 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5602 break;
5603 }
5551} 5604}
5552 5605
5553static unsigned int skl_cdclk_get_vco(unsigned int freq) 5606void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5554{ 5607{
5555 unsigned int i; 5608 bool changed = dev_priv->skl_preferred_vco_freq != vco;
5556
5557 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5558 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5559 5609
5560 if (e->freq == freq) 5610 dev_priv->skl_preferred_vco_freq = vco;
5561 return e->vco;
5562 }
5563 5611
5564 return 8100; 5612 if (changed)
5613 intel_update_max_cdclk(&dev_priv->drm);
5565} 5614}
5566 5615
5567static void 5616static void
5568skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco) 5617skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5569{ 5618{
5570 unsigned int min_freq; 5619 int min_cdclk = skl_calc_cdclk(0, vco);
5571 u32 val; 5620 u32 val;
5572 5621
5573 /* select the minimum CDCLK before enabling DPLL 0 */ 5622 WARN_ON(vco != 8100000 && vco != 8640000);
5574 val = I915_READ(CDCLK_CTL);
5575 val &= ~CDCLK_FREQ_SEL_MASK | ~CDCLK_FREQ_DECIMAL_MASK;
5576 val |= CDCLK_FREQ_337_308;
5577
5578 if (required_vco == 8640)
5579 min_freq = 308570;
5580 else
5581 min_freq = 337500;
5582
5583 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_freq);
5584 5623
5624 /* select the minimum CDCLK before enabling DPLL 0 */
5625 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5585 I915_WRITE(CDCLK_CTL, val); 5626 I915_WRITE(CDCLK_CTL, val);
5586 POSTING_READ(CDCLK_CTL); 5627 POSTING_READ(CDCLK_CTL);
5587 5628
@@ -5592,14 +5633,14 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5592 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640. 5633 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5593 * The modeset code is responsible for the selection of the exact link 5634 * The modeset code is responsible for the selection of the exact link
5594 * rate later on, with the constraint of choosing a frequency that 5635 * rate later on, with the constraint of choosing a frequency that
5595 * works with required_vco. 5636 * works with vco.
5596 */ 5637 */
5597 val = I915_READ(DPLL_CTRL1); 5638 val = I915_READ(DPLL_CTRL1);
5598 5639
5599 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) | 5640 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5600 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)); 5641 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5601 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0); 5642 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5602 if (required_vco == 8640) 5643 if (vco == 8640000)
5603 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 5644 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5604 SKL_DPLL0); 5645 SKL_DPLL0);
5605 else 5646 else
@@ -5611,8 +5652,27 @@ skl_dpll0_enable(struct drm_i915_private *dev_priv, unsigned int required_vco)
5611 5652
5612 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE); 5653 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5613 5654
5614 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5)) 5655 if (intel_wait_for_register(dev_priv,
5656 LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5657 5))
5615 DRM_ERROR("DPLL0 not locked\n"); 5658 DRM_ERROR("DPLL0 not locked\n");
5659
5660 dev_priv->cdclk_pll.vco = vco;
5661
5662 /* We'll want to keep using the current vco from now on. */
5663 skl_set_preferred_cdclk_vco(dev_priv, vco);
5664}
5665
5666static void
5667skl_dpll0_disable(struct drm_i915_private *dev_priv)
5668{
5669 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5670 if (intel_wait_for_register(dev_priv,
5671 LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
5672 1))
5673 DRM_ERROR("Couldn't disable DPLL0\n");
5674
5675 dev_priv->cdclk_pll.vco = 0;
5616} 5676}
5617 5677
5618static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 5678static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
@@ -5642,12 +5702,14 @@ static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5642 return false; 5702 return false;
5643} 5703}
5644 5704
5645static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq) 5705static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5646{ 5706{
5647 struct drm_device *dev = dev_priv->dev; 5707 struct drm_device *dev = &dev_priv->drm;
5648 u32 freq_select, pcu_ack; 5708 u32 freq_select, pcu_ack;
5649 5709
5650 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", freq); 5710 WARN_ON((cdclk == 24000) != (vco == 0));
5711
5712 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5651 5713
5652 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 5714 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5653 DRM_ERROR("failed to inform PCU about cdclk change\n"); 5715 DRM_ERROR("failed to inform PCU about cdclk change\n");
@@ -5655,7 +5717,7 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5655 } 5717 }
5656 5718
5657 /* set CDCLK_CTL */ 5719 /* set CDCLK_CTL */
5658 switch(freq) { 5720 switch (cdclk) {
5659 case 450000: 5721 case 450000:
5660 case 432000: 5722 case 432000:
5661 freq_select = CDCLK_FREQ_450_432; 5723 freq_select = CDCLK_FREQ_450_432;
@@ -5665,20 +5727,27 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5665 freq_select = CDCLK_FREQ_540; 5727 freq_select = CDCLK_FREQ_540;
5666 pcu_ack = 2; 5728 pcu_ack = 2;
5667 break; 5729 break;
5668 case 308570: 5730 case 308571:
5669 case 337500: 5731 case 337500:
5670 default: 5732 default:
5671 freq_select = CDCLK_FREQ_337_308; 5733 freq_select = CDCLK_FREQ_337_308;
5672 pcu_ack = 0; 5734 pcu_ack = 0;
5673 break; 5735 break;
5674 case 617140: 5736 case 617143:
5675 case 675000: 5737 case 675000:
5676 freq_select = CDCLK_FREQ_675_617; 5738 freq_select = CDCLK_FREQ_675_617;
5677 pcu_ack = 3; 5739 pcu_ack = 3;
5678 break; 5740 break;
5679 } 5741 }
5680 5742
5681 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(freq)); 5743 if (dev_priv->cdclk_pll.vco != 0 &&
5744 dev_priv->cdclk_pll.vco != vco)
5745 skl_dpll0_disable(dev_priv);
5746
5747 if (dev_priv->cdclk_pll.vco != vco)
5748 skl_dpll0_enable(dev_priv, vco);
5749
5750 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5682 POSTING_READ(CDCLK_CTL); 5751 POSTING_READ(CDCLK_CTL);
5683 5752
5684 /* inform PCU of the change */ 5753 /* inform PCU of the change */
@@ -5689,52 +5758,41 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, unsigned int freq)
5689 intel_update_cdclk(dev); 5758 intel_update_cdclk(dev);
5690} 5759}
5691 5760
5761static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5762
5692void skl_uninit_cdclk(struct drm_i915_private *dev_priv) 5763void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5693{ 5764{
5694 /* disable DBUF power */ 5765 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5695 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5696 POSTING_READ(DBUF_CTL);
5697
5698 udelay(10);
5699
5700 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5701 DRM_ERROR("DBuf power disable timeout\n");
5702
5703 /* disable DPLL0 */
5704 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5705 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5706 DRM_ERROR("Couldn't disable DPLL0\n");
5707} 5766}
5708 5767
5709void skl_init_cdclk(struct drm_i915_private *dev_priv) 5768void skl_init_cdclk(struct drm_i915_private *dev_priv)
5710{ 5769{
5711 unsigned int required_vco; 5770 int cdclk, vco;
5712 5771
5713 /* DPLL0 not enabled (happens on early BIOS versions) */ 5772 skl_sanitize_cdclk(dev_priv);
5714 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5715 /* enable DPLL0 */
5716 required_vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5717 skl_dpll0_enable(dev_priv, required_vco);
5718 }
5719 5773
5720 /* set CDCLK to the frequency the BIOS chose */ 5774 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
5721 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk); 5775 /*
5722 5776 * Use the current vco as our initial
5723 /* enable DBUF power */ 5777 * guess as to what the preferred vco is.
5724 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); 5778 */
5725 POSTING_READ(DBUF_CTL); 5779 if (dev_priv->skl_preferred_vco_freq == 0)
5780 skl_set_preferred_cdclk_vco(dev_priv,
5781 dev_priv->cdclk_pll.vco);
5782 return;
5783 }
5726 5784
5727 udelay(10); 5785 vco = dev_priv->skl_preferred_vco_freq;
5786 if (vco == 0)
5787 vco = 8100000;
5788 cdclk = skl_calc_cdclk(0, vco);
5728 5789
5729 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5790 skl_set_cdclk(dev_priv, cdclk, vco);
5730 DRM_ERROR("DBuf power enable timeout\n");
5731} 5791}
5732 5792
5733int skl_sanitize_cdclk(struct drm_i915_private *dev_priv) 5793static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5734{ 5794{
5735 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 5795 uint32_t cdctl, expected;
5736 uint32_t cdctl = I915_READ(CDCLK_CTL);
5737 int freq = dev_priv->skl_boot_cdclk;
5738 5796
5739 /* 5797 /*
5740 * check if the pre-os intialized the display 5798 * check if the pre-os intialized the display
@@ -5744,8 +5802,10 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5744 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0) 5802 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5745 goto sanitize; 5803 goto sanitize;
5746 5804
5805 intel_update_cdclk(&dev_priv->drm);
5747 /* Is PLL enabled and locked ? */ 5806 /* Is PLL enabled and locked ? */
5748 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK))) 5807 if (dev_priv->cdclk_pll.vco == 0 ||
5808 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5749 goto sanitize; 5809 goto sanitize;
5750 5810
5751 /* DPLL okay; verify the cdclock 5811 /* DPLL okay; verify the cdclock
@@ -5754,25 +5814,26 @@ int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5754 * decimal part is programmed wrong from BIOS where pre-os does not 5814 * decimal part is programmed wrong from BIOS where pre-os does not
5755 * enable display. Verify the same as well. 5815 * enable display. Verify the same as well.
5756 */ 5816 */
5757 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq))) 5817 cdctl = I915_READ(CDCLK_CTL);
5818 expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5819 skl_cdclk_decimal(dev_priv->cdclk_freq);
5820 if (cdctl == expected)
5758 /* All well; nothing to sanitize */ 5821 /* All well; nothing to sanitize */
5759 return false; 5822 return;
5823
5760sanitize: 5824sanitize:
5761 /* 5825 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5762 * As of now initialize with max cdclk till
5763 * we get dynamic cdclk support
5764 * */
5765 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5766 skl_init_cdclk(dev_priv);
5767 5826
5768 /* we did have to sanitize */ 5827 /* force cdclk programming */
5769 return true; 5828 dev_priv->cdclk_freq = 0;
5829 /* force full PLL disable + enable */
5830 dev_priv->cdclk_pll.vco = -1;
5770} 5831}
5771 5832
5772/* Adjust CDclk dividers to allow high res or save power if possible */ 5833/* Adjust CDclk dividers to allow high res or save power if possible */
5773static void valleyview_set_cdclk(struct drm_device *dev, int cdclk) 5834static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5774{ 5835{
5775 struct drm_i915_private *dev_priv = dev->dev_private; 5836 struct drm_i915_private *dev_priv = to_i915(dev);
5776 u32 val, cmd; 5837 u32 val, cmd;
5777 5838
5778 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5839 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5837,7 +5898,7 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5837 5898
5838static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) 5899static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5839{ 5900{
5840 struct drm_i915_private *dev_priv = dev->dev_private; 5901 struct drm_i915_private *dev_priv = to_i915(dev);
5841 u32 val, cmd; 5902 u32 val, cmd;
5842 5903
5843 WARN_ON(dev_priv->display.get_display_clock_speed(dev) 5904 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
@@ -5906,21 +5967,15 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5906 return 200000; 5967 return 200000;
5907} 5968}
5908 5969
5909static int broxton_calc_cdclk(struct drm_i915_private *dev_priv, 5970static int bxt_calc_cdclk(int max_pixclk)
5910 int max_pixclk)
5911{ 5971{
5912 /* 5972 if (max_pixclk > 576000)
5913 * FIXME:
5914 * - remove the guardband, it's not needed on BXT
5915 * - set 19.2MHz bypass frequency if there are no active pipes
5916 */
5917 if (max_pixclk > 576000*9/10)
5918 return 624000; 5973 return 624000;
5919 else if (max_pixclk > 384000*9/10) 5974 else if (max_pixclk > 384000)
5920 return 576000; 5975 return 576000;
5921 else if (max_pixclk > 288000*9/10) 5976 else if (max_pixclk > 288000)
5922 return 384000; 5977 return 384000;
5923 else if (max_pixclk > 144000*9/10) 5978 else if (max_pixclk > 144000)
5924 return 288000; 5979 return 288000;
5925 else 5980 else
5926 return 144000; 5981 return 144000;
@@ -5931,7 +5986,7 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
5931 struct drm_atomic_state *state) 5986 struct drm_atomic_state *state)
5932{ 5987{
5933 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 5988 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5934 struct drm_i915_private *dev_priv = dev->dev_private; 5989 struct drm_i915_private *dev_priv = to_i915(dev);
5935 struct drm_crtc *crtc; 5990 struct drm_crtc *crtc;
5936 struct drm_crtc_state *crtc_state; 5991 struct drm_crtc_state *crtc_state;
5937 unsigned max_pixclk = 0, i; 5992 unsigned max_pixclk = 0, i;
@@ -5958,14 +6013,11 @@ static int intel_mode_max_pixclk(struct drm_device *dev,
5958static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state) 6013static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5959{ 6014{
5960 struct drm_device *dev = state->dev; 6015 struct drm_device *dev = state->dev;
5961 struct drm_i915_private *dev_priv = dev->dev_private; 6016 struct drm_i915_private *dev_priv = to_i915(dev);
5962 int max_pixclk = intel_mode_max_pixclk(dev, state); 6017 int max_pixclk = intel_mode_max_pixclk(dev, state);
5963 struct intel_atomic_state *intel_state = 6018 struct intel_atomic_state *intel_state =
5964 to_intel_atomic_state(state); 6019 to_intel_atomic_state(state);
5965 6020
5966 if (max_pixclk < 0)
5967 return max_pixclk;
5968
5969 intel_state->cdclk = intel_state->dev_cdclk = 6021 intel_state->cdclk = intel_state->dev_cdclk =
5970 valleyview_calc_cdclk(dev_priv, max_pixclk); 6022 valleyview_calc_cdclk(dev_priv, max_pixclk);
5971 6023
@@ -5975,22 +6027,17 @@ static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5975 return 0; 6027 return 0;
5976} 6028}
5977 6029
5978static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state) 6030static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
5979{ 6031{
5980 struct drm_device *dev = state->dev; 6032 int max_pixclk = ilk_max_pixel_rate(state);
5981 struct drm_i915_private *dev_priv = dev->dev_private;
5982 int max_pixclk = intel_mode_max_pixclk(dev, state);
5983 struct intel_atomic_state *intel_state = 6033 struct intel_atomic_state *intel_state =
5984 to_intel_atomic_state(state); 6034 to_intel_atomic_state(state);
5985 6035
5986 if (max_pixclk < 0)
5987 return max_pixclk;
5988
5989 intel_state->cdclk = intel_state->dev_cdclk = 6036 intel_state->cdclk = intel_state->dev_cdclk =
5990 broxton_calc_cdclk(dev_priv, max_pixclk); 6037 bxt_calc_cdclk(max_pixclk);
5991 6038
5992 if (!intel_state->active_crtcs) 6039 if (!intel_state->active_crtcs)
5993 intel_state->dev_cdclk = broxton_calc_cdclk(dev_priv, 0); 6040 intel_state->dev_cdclk = bxt_calc_cdclk(0);
5994 6041
5995 return 0; 6042 return 0;
5996} 6043}
@@ -6034,7 +6081,7 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6034static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state) 6081static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
6035{ 6082{
6036 struct drm_device *dev = old_state->dev; 6083 struct drm_device *dev = old_state->dev;
6037 struct drm_i915_private *dev_priv = dev->dev_private; 6084 struct drm_i915_private *dev_priv = to_i915(dev);
6038 struct intel_atomic_state *old_intel_state = 6085 struct intel_atomic_state *old_intel_state =
6039 to_intel_atomic_state(old_state); 6086 to_intel_atomic_state(old_state);
6040 unsigned req_cdclk = old_intel_state->dev_cdclk; 6087 unsigned req_cdclk = old_intel_state->dev_cdclk;
@@ -6073,14 +6120,14 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6073 if (WARN_ON(intel_crtc->active)) 6120 if (WARN_ON(intel_crtc->active))
6074 return; 6121 return;
6075 6122
6076 if (intel_crtc->config->has_dp_encoder) 6123 if (intel_crtc_has_dp_encoder(intel_crtc->config))
6077 intel_dp_set_m_n(intel_crtc, M1_N1); 6124 intel_dp_set_m_n(intel_crtc, M1_N1);
6078 6125
6079 intel_set_pipe_timings(intel_crtc); 6126 intel_set_pipe_timings(intel_crtc);
6080 intel_set_pipe_src_size(intel_crtc); 6127 intel_set_pipe_src_size(intel_crtc);
6081 6128
6082 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) { 6129 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6083 struct drm_i915_private *dev_priv = dev->dev_private; 6130 struct drm_i915_private *dev_priv = to_i915(dev);
6084 6131
6085 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY); 6132 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6086 I915_WRITE(CHV_CANVAS(pipe), 0); 6133 I915_WRITE(CHV_CANVAS(pipe), 0);
@@ -6125,7 +6172,7 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
6125static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 6172static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6126{ 6173{
6127 struct drm_device *dev = crtc->base.dev; 6174 struct drm_device *dev = crtc->base.dev;
6128 struct drm_i915_private *dev_priv = dev->dev_private; 6175 struct drm_i915_private *dev_priv = to_i915(dev);
6129 6176
6130 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0); 6177 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6131 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1); 6178 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
@@ -6146,7 +6193,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
6146 6193
6147 i9xx_set_pll_dividers(intel_crtc); 6194 i9xx_set_pll_dividers(intel_crtc);
6148 6195
6149 if (intel_crtc->config->has_dp_encoder) 6196 if (intel_crtc_has_dp_encoder(intel_crtc->config))
6150 intel_dp_set_m_n(intel_crtc, M1_N1); 6197 intel_dp_set_m_n(intel_crtc, M1_N1);
6151 6198
6152 intel_set_pipe_timings(intel_crtc); 6199 intel_set_pipe_timings(intel_crtc);
@@ -6182,7 +6229,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
6182static void i9xx_pfit_disable(struct intel_crtc *crtc) 6229static void i9xx_pfit_disable(struct intel_crtc *crtc)
6183{ 6230{
6184 struct drm_device *dev = crtc->base.dev; 6231 struct drm_device *dev = crtc->base.dev;
6185 struct drm_i915_private *dev_priv = dev->dev_private; 6232 struct drm_i915_private *dev_priv = to_i915(dev);
6186 6233
6187 if (!crtc->config->gmch_pfit.control) 6234 if (!crtc->config->gmch_pfit.control)
6188 return; 6235 return;
@@ -6197,7 +6244,7 @@ static void i9xx_pfit_disable(struct intel_crtc *crtc)
6197static void i9xx_crtc_disable(struct drm_crtc *crtc) 6244static void i9xx_crtc_disable(struct drm_crtc *crtc)
6198{ 6245{
6199 struct drm_device *dev = crtc->dev; 6246 struct drm_device *dev = crtc->dev;
6200 struct drm_i915_private *dev_priv = dev->dev_private; 6247 struct drm_i915_private *dev_priv = to_i915(dev);
6201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6248 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6202 struct intel_encoder *encoder; 6249 struct intel_encoder *encoder;
6203 int pipe = intel_crtc->pipe; 6250 int pipe = intel_crtc->pipe;
@@ -6223,7 +6270,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
6223 if (encoder->post_disable) 6270 if (encoder->post_disable)
6224 encoder->post_disable(encoder); 6271 encoder->post_disable(encoder);
6225 6272
6226 if (!intel_crtc->config->has_dsi_encoder) { 6273 if (!intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_DSI)) {
6227 if (IS_CHERRYVIEW(dev)) 6274 if (IS_CHERRYVIEW(dev))
6228 chv_disable_pll(dev_priv, pipe); 6275 chv_disable_pll(dev_priv, pipe);
6229 else if (IS_VALLEYVIEW(dev)) 6276 else if (IS_VALLEYVIEW(dev))
@@ -6252,7 +6299,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6252 return; 6299 return;
6253 6300
6254 if (to_intel_plane_state(crtc->primary->state)->visible) { 6301 if (to_intel_plane_state(crtc->primary->state)->visible) {
6255 WARN_ON(intel_crtc->unpin_work); 6302 WARN_ON(intel_crtc->flip_work);
6256 6303
6257 intel_pre_disable_primary_noatomic(crtc); 6304 intel_pre_disable_primary_noatomic(crtc);
6258 6305
@@ -6262,8 +6309,8 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6262 6309
6263 dev_priv->display.crtc_disable(crtc); 6310 dev_priv->display.crtc_disable(crtc);
6264 6311
6265 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n", 6312 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6266 crtc->base.id); 6313 crtc->base.id, crtc->name);
6267 6314
6268 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0); 6315 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6269 crtc->state->active = false; 6316 crtc->state->active = false;
@@ -6541,7 +6588,7 @@ static void hsw_compute_ips_config(struct intel_crtc *crtc,
6541 struct intel_crtc_state *pipe_config) 6588 struct intel_crtc_state *pipe_config)
6542{ 6589{
6543 struct drm_device *dev = crtc->base.dev; 6590 struct drm_device *dev = crtc->base.dev;
6544 struct drm_i915_private *dev_priv = dev->dev_private; 6591 struct drm_i915_private *dev_priv = to_i915(dev);
6545 6592
6546 pipe_config->ips_enabled = i915.enable_ips && 6593 pipe_config->ips_enabled = i915.enable_ips &&
6547 hsw_crtc_supports_ips(crtc) && 6594 hsw_crtc_supports_ips(crtc) &&
@@ -6561,12 +6608,12 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6561 struct intel_crtc_state *pipe_config) 6608 struct intel_crtc_state *pipe_config)
6562{ 6609{
6563 struct drm_device *dev = crtc->base.dev; 6610 struct drm_device *dev = crtc->base.dev;
6564 struct drm_i915_private *dev_priv = dev->dev_private; 6611 struct drm_i915_private *dev_priv = to_i915(dev);
6565 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6612 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6613 int clock_limit = dev_priv->max_dotclk_freq;
6566 6614
6567 /* FIXME should check pixel clock limits on all platforms */
6568 if (INTEL_INFO(dev)->gen < 4) { 6615 if (INTEL_INFO(dev)->gen < 4) {
6569 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10; 6616 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6570 6617
6571 /* 6618 /*
6572 * Enable double wide mode when the dot clock 6619 * Enable double wide mode when the dot clock
@@ -6574,16 +6621,16 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6574 */ 6621 */
6575 if (intel_crtc_supports_double_wide(crtc) && 6622 if (intel_crtc_supports_double_wide(crtc) &&
6576 adjusted_mode->crtc_clock > clock_limit) { 6623 adjusted_mode->crtc_clock > clock_limit) {
6577 clock_limit *= 2; 6624 clock_limit = dev_priv->max_dotclk_freq;
6578 pipe_config->double_wide = true; 6625 pipe_config->double_wide = true;
6579 } 6626 }
6627 }
6580 6628
6581 if (adjusted_mode->crtc_clock > clock_limit) { 6629 if (adjusted_mode->crtc_clock > clock_limit) {
6582 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", 6630 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6583 adjusted_mode->crtc_clock, clock_limit, 6631 adjusted_mode->crtc_clock, clock_limit,
6584 yesno(pipe_config->double_wide)); 6632 yesno(pipe_config->double_wide));
6585 return -EINVAL; 6633 return -EINVAL;
6586 }
6587 } 6634 }
6588 6635
6589 /* 6636 /*
@@ -6592,7 +6639,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6592 * - LVDS dual channel mode 6639 * - LVDS dual channel mode
6593 * - Double wide pipe 6640 * - Double wide pipe
6594 */ 6641 */
6595 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) && 6642 if ((intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6596 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 6643 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6597 pipe_config->pipe_src_w &= ~1; 6644 pipe_config->pipe_src_w &= ~1;
6598 6645
@@ -6615,81 +6662,103 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
6615static int skylake_get_display_clock_speed(struct drm_device *dev) 6662static int skylake_get_display_clock_speed(struct drm_device *dev)
6616{ 6663{
6617 struct drm_i915_private *dev_priv = to_i915(dev); 6664 struct drm_i915_private *dev_priv = to_i915(dev);
6618 uint32_t lcpll1 = I915_READ(LCPLL1_CTL); 6665 uint32_t cdctl;
6619 uint32_t cdctl = I915_READ(CDCLK_CTL);
6620 uint32_t linkrate;
6621 6666
6622 if (!(lcpll1 & LCPLL_PLL_ENABLE)) 6667 skl_dpll0_update(dev_priv);
6623 return 24000; /* 24MHz is the cd freq with NSSC ref */
6624 6668
6625 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540) 6669 if (dev_priv->cdclk_pll.vco == 0)
6626 return 540000; 6670 return dev_priv->cdclk_pll.ref;
6627 6671
6628 linkrate = (I915_READ(DPLL_CTRL1) & 6672 cdctl = I915_READ(CDCLK_CTL);
6629 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6630 6673
6631 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 || 6674 if (dev_priv->cdclk_pll.vco == 8640000) {
6632 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6633 /* vco 8640 */
6634 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6675 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6635 case CDCLK_FREQ_450_432: 6676 case CDCLK_FREQ_450_432:
6636 return 432000; 6677 return 432000;
6637 case CDCLK_FREQ_337_308: 6678 case CDCLK_FREQ_337_308:
6638 return 308570; 6679 return 308571;
6680 case CDCLK_FREQ_540:
6681 return 540000;
6639 case CDCLK_FREQ_675_617: 6682 case CDCLK_FREQ_675_617:
6640 return 617140; 6683 return 617143;
6641 default: 6684 default:
6642 WARN(1, "Unknown cd freq selection\n"); 6685 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6643 } 6686 }
6644 } else { 6687 } else {
6645 /* vco 8100 */
6646 switch (cdctl & CDCLK_FREQ_SEL_MASK) { 6688 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6647 case CDCLK_FREQ_450_432: 6689 case CDCLK_FREQ_450_432:
6648 return 450000; 6690 return 450000;
6649 case CDCLK_FREQ_337_308: 6691 case CDCLK_FREQ_337_308:
6650 return 337500; 6692 return 337500;
6693 case CDCLK_FREQ_540:
6694 return 540000;
6651 case CDCLK_FREQ_675_617: 6695 case CDCLK_FREQ_675_617:
6652 return 675000; 6696 return 675000;
6653 default: 6697 default:
6654 WARN(1, "Unknown cd freq selection\n"); 6698 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
6655 } 6699 }
6656 } 6700 }
6657 6701
6658 /* error case, do as if DPLL0 isn't enabled */ 6702 return dev_priv->cdclk_pll.ref;
6659 return 24000; 6703}
6704
6705static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6706{
6707 u32 val;
6708
6709 dev_priv->cdclk_pll.ref = 19200;
6710 dev_priv->cdclk_pll.vco = 0;
6711
6712 val = I915_READ(BXT_DE_PLL_ENABLE);
6713 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
6714 return;
6715
6716 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6717 return;
6718
6719 val = I915_READ(BXT_DE_PLL_CTL);
6720 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6721 dev_priv->cdclk_pll.ref;
6660} 6722}
6661 6723
6662static int broxton_get_display_clock_speed(struct drm_device *dev) 6724static int broxton_get_display_clock_speed(struct drm_device *dev)
6663{ 6725{
6664 struct drm_i915_private *dev_priv = to_i915(dev); 6726 struct drm_i915_private *dev_priv = to_i915(dev);
6665 uint32_t cdctl = I915_READ(CDCLK_CTL); 6727 u32 divider;
6666 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; 6728 int div, vco;
6667 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE); 6729
6668 int cdclk; 6730 bxt_de_pll_update(dev_priv);
6669 6731
6670 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE)) 6732 vco = dev_priv->cdclk_pll.vco;
6671 return 19200; 6733 if (vco == 0)
6734 return dev_priv->cdclk_pll.ref;
6672 6735
6673 cdclk = 19200 * pll_ratio / 2; 6736 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
6674 6737
6675 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) { 6738 switch (divider) {
6676 case BXT_CDCLK_CD2X_DIV_SEL_1: 6739 case BXT_CDCLK_CD2X_DIV_SEL_1:
6677 return cdclk; /* 576MHz or 624MHz */ 6740 div = 2;
6741 break;
6678 case BXT_CDCLK_CD2X_DIV_SEL_1_5: 6742 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6679 return cdclk * 2 / 3; /* 384MHz */ 6743 div = 3;
6744 break;
6680 case BXT_CDCLK_CD2X_DIV_SEL_2: 6745 case BXT_CDCLK_CD2X_DIV_SEL_2:
6681 return cdclk / 2; /* 288MHz */ 6746 div = 4;
6747 break;
6682 case BXT_CDCLK_CD2X_DIV_SEL_4: 6748 case BXT_CDCLK_CD2X_DIV_SEL_4:
6683 return cdclk / 4; /* 144MHz */ 6749 div = 8;
6750 break;
6751 default:
6752 MISSING_CASE(divider);
6753 return dev_priv->cdclk_pll.ref;
6684 } 6754 }
6685 6755
6686 /* error case, do as if DE PLL isn't enabled */ 6756 return DIV_ROUND_CLOSEST(vco, div);
6687 return 19200;
6688} 6757}
6689 6758
6690static int broadwell_get_display_clock_speed(struct drm_device *dev) 6759static int broadwell_get_display_clock_speed(struct drm_device *dev)
6691{ 6760{
6692 struct drm_i915_private *dev_priv = dev->dev_private; 6761 struct drm_i915_private *dev_priv = to_i915(dev);
6693 uint32_t lcpll = I915_READ(LCPLL_CTL); 6762 uint32_t lcpll = I915_READ(LCPLL_CTL);
6694 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6763 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6695 6764
@@ -6709,7 +6778,7 @@ static int broadwell_get_display_clock_speed(struct drm_device *dev)
6709 6778
6710static int haswell_get_display_clock_speed(struct drm_device *dev) 6779static int haswell_get_display_clock_speed(struct drm_device *dev)
6711{ 6780{
6712 struct drm_i915_private *dev_priv = dev->dev_private; 6781 struct drm_i915_private *dev_priv = to_i915(dev);
6713 uint32_t lcpll = I915_READ(LCPLL_CTL); 6782 uint32_t lcpll = I915_READ(LCPLL_CTL);
6714 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK; 6783 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6715 6784
@@ -6843,7 +6912,7 @@ static int i830_get_display_clock_speed(struct drm_device *dev)
6843 6912
6844static unsigned int intel_hpll_vco(struct drm_device *dev) 6913static unsigned int intel_hpll_vco(struct drm_device *dev)
6845{ 6914{
6846 struct drm_i915_private *dev_priv = dev->dev_private; 6915 struct drm_i915_private *dev_priv = to_i915(dev);
6847 static const unsigned int blb_vco[8] = { 6916 static const unsigned int blb_vco[8] = {
6848 [0] = 3200000, 6917 [0] = 3200000,
6849 [1] = 4000000, 6918 [1] = 4000000,
@@ -7063,7 +7132,7 @@ static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7063 7132
7064static void i9xx_update_pll_dividers(struct intel_crtc *crtc, 7133static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7065 struct intel_crtc_state *crtc_state, 7134 struct intel_crtc_state *crtc_state,
7066 intel_clock_t *reduced_clock) 7135 struct dpll *reduced_clock)
7067{ 7136{
7068 struct drm_device *dev = crtc->base.dev; 7137 struct drm_device *dev = crtc->base.dev;
7069 u32 fp, fp2 = 0; 7138 u32 fp, fp2 = 0;
@@ -7081,7 +7150,7 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
7081 crtc_state->dpll_hw_state.fp0 = fp; 7150 crtc_state->dpll_hw_state.fp0 = fp;
7082 7151
7083 crtc->lowfreq_avail = false; 7152 crtc->lowfreq_avail = false;
7084 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7153 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7085 reduced_clock) { 7154 reduced_clock) {
7086 crtc_state->dpll_hw_state.fp1 = fp2; 7155 crtc_state->dpll_hw_state.fp1 = fp2;
7087 crtc->lowfreq_avail = true; 7156 crtc->lowfreq_avail = true;
@@ -7123,7 +7192,7 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7123 struct intel_link_m_n *m_n) 7192 struct intel_link_m_n *m_n)
7124{ 7193{
7125 struct drm_device *dev = crtc->base.dev; 7194 struct drm_device *dev = crtc->base.dev;
7126 struct drm_i915_private *dev_priv = dev->dev_private; 7195 struct drm_i915_private *dev_priv = to_i915(dev);
7127 int pipe = crtc->pipe; 7196 int pipe = crtc->pipe;
7128 7197
7129 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m); 7198 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
@@ -7137,7 +7206,7 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7137 struct intel_link_m_n *m2_n2) 7206 struct intel_link_m_n *m2_n2)
7138{ 7207{
7139 struct drm_device *dev = crtc->base.dev; 7208 struct drm_device *dev = crtc->base.dev;
7140 struct drm_i915_private *dev_priv = dev->dev_private; 7209 struct drm_i915_private *dev_priv = to_i915(dev);
7141 int pipe = crtc->pipe; 7210 int pipe = crtc->pipe;
7142 enum transcoder transcoder = crtc->config->cpu_transcoder; 7211 enum transcoder transcoder = crtc->config->cpu_transcoder;
7143 7212
@@ -7200,7 +7269,7 @@ static void vlv_compute_dpll(struct intel_crtc *crtc,
7200 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7269 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7201 7270
7202 /* DPLL not used with DSI, but still need the rest set up */ 7271 /* DPLL not used with DSI, but still need the rest set up */
7203 if (!pipe_config->has_dsi_encoder) 7272 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7204 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE | 7273 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7205 DPLL_EXT_BUFFER_ENABLE_VLV; 7274 DPLL_EXT_BUFFER_ENABLE_VLV;
7206 7275
@@ -7217,7 +7286,7 @@ static void chv_compute_dpll(struct intel_crtc *crtc,
7217 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 7286 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7218 7287
7219 /* DPLL not used with DSI, but still need the rest set up */ 7288 /* DPLL not used with DSI, but still need the rest set up */
7220 if (!pipe_config->has_dsi_encoder) 7289 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
7221 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE; 7290 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7222 7291
7223 pipe_config->dpll_hw_state.dpll_md = 7292 pipe_config->dpll_hw_state.dpll_md =
@@ -7228,7 +7297,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7228 const struct intel_crtc_state *pipe_config) 7297 const struct intel_crtc_state *pipe_config)
7229{ 7298{
7230 struct drm_device *dev = crtc->base.dev; 7299 struct drm_device *dev = crtc->base.dev;
7231 struct drm_i915_private *dev_priv = dev->dev_private; 7300 struct drm_i915_private *dev_priv = to_i915(dev);
7232 enum pipe pipe = crtc->pipe; 7301 enum pipe pipe = crtc->pipe;
7233 u32 mdiv; 7302 u32 mdiv;
7234 u32 bestn, bestm1, bestm2, bestp1, bestp2; 7303 u32 bestn, bestm1, bestm2, bestp1, bestp2;
@@ -7287,15 +7356,15 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7287 7356
7288 /* Set HBR and RBR LPF coefficients */ 7357 /* Set HBR and RBR LPF coefficients */
7289 if (pipe_config->port_clock == 162000 || 7358 if (pipe_config->port_clock == 162000 ||
7290 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) || 7359 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_ANALOG) ||
7291 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 7360 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI))
7292 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7361 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7293 0x009f0003); 7362 0x009f0003);
7294 else 7363 else
7295 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 7364 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7296 0x00d0000f); 7365 0x00d0000f);
7297 7366
7298 if (pipe_config->has_dp_encoder) { 7367 if (intel_crtc_has_dp_encoder(pipe_config)) {
7299 /* Use SSC source */ 7368 /* Use SSC source */
7300 if (pipe == PIPE_A) 7369 if (pipe == PIPE_A)
7301 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 7370 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -7315,8 +7384,7 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
7315 7384
7316 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 7385 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7317 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 7386 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7318 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 7387 if (intel_crtc_has_dp_encoder(crtc->config))
7319 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7320 coreclk |= 0x01000000; 7388 coreclk |= 0x01000000;
7321 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 7389 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7322 7390
@@ -7328,7 +7396,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
7328 const struct intel_crtc_state *pipe_config) 7396 const struct intel_crtc_state *pipe_config)
7329{ 7397{
7330 struct drm_device *dev = crtc->base.dev; 7398 struct drm_device *dev = crtc->base.dev;
7331 struct drm_i915_private *dev_priv = dev->dev_private; 7399 struct drm_i915_private *dev_priv = to_i915(dev);
7332 enum pipe pipe = crtc->pipe; 7400 enum pipe pipe = crtc->pipe;
7333 enum dpio_channel port = vlv_pipe_to_channel(pipe); 7401 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7334 u32 loopfilter, tribuf_calcntr; 7402 u32 loopfilter, tribuf_calcntr;
@@ -7487,22 +7555,18 @@ void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7487 7555
7488static void i9xx_compute_dpll(struct intel_crtc *crtc, 7556static void i9xx_compute_dpll(struct intel_crtc *crtc,
7489 struct intel_crtc_state *crtc_state, 7557 struct intel_crtc_state *crtc_state,
7490 intel_clock_t *reduced_clock) 7558 struct dpll *reduced_clock)
7491{ 7559{
7492 struct drm_device *dev = crtc->base.dev; 7560 struct drm_device *dev = crtc->base.dev;
7493 struct drm_i915_private *dev_priv = dev->dev_private; 7561 struct drm_i915_private *dev_priv = to_i915(dev);
7494 u32 dpll; 7562 u32 dpll;
7495 bool is_sdvo;
7496 struct dpll *clock = &crtc_state->dpll; 7563 struct dpll *clock = &crtc_state->dpll;
7497 7564
7498 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock); 7565 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7499 7566
7500 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7501 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7502
7503 dpll = DPLL_VGA_MODE_DIS; 7567 dpll = DPLL_VGA_MODE_DIS;
7504 7568
7505 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) 7569 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
7506 dpll |= DPLLB_MODE_LVDS; 7570 dpll |= DPLLB_MODE_LVDS;
7507 else 7571 else
7508 dpll |= DPLLB_MODE_DAC_SERIAL; 7572 dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -7512,10 +7576,11 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7512 << SDVO_MULTIPLIER_SHIFT_HIRES; 7576 << SDVO_MULTIPLIER_SHIFT_HIRES;
7513 } 7577 }
7514 7578
7515 if (is_sdvo) 7579 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7580 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
7516 dpll |= DPLL_SDVO_HIGH_SPEED; 7581 dpll |= DPLL_SDVO_HIGH_SPEED;
7517 7582
7518 if (crtc_state->has_dp_encoder) 7583 if (intel_crtc_has_dp_encoder(crtc_state))
7519 dpll |= DPLL_SDVO_HIGH_SPEED; 7584 dpll |= DPLL_SDVO_HIGH_SPEED;
7520 7585
7521 /* compute bitmask from p1 value */ 7586 /* compute bitmask from p1 value */
@@ -7545,7 +7610,7 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7545 7610
7546 if (crtc_state->sdvo_tv_clock) 7611 if (crtc_state->sdvo_tv_clock)
7547 dpll |= PLL_REF_INPUT_TVCLKINBC; 7612 dpll |= PLL_REF_INPUT_TVCLKINBC;
7548 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7613 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7549 intel_panel_use_ssc(dev_priv)) 7614 intel_panel_use_ssc(dev_priv))
7550 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7615 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7551 else 7616 else
@@ -7563,10 +7628,10 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
7563 7628
7564static void i8xx_compute_dpll(struct intel_crtc *crtc, 7629static void i8xx_compute_dpll(struct intel_crtc *crtc,
7565 struct intel_crtc_state *crtc_state, 7630 struct intel_crtc_state *crtc_state,
7566 intel_clock_t *reduced_clock) 7631 struct dpll *reduced_clock)
7567{ 7632{
7568 struct drm_device *dev = crtc->base.dev; 7633 struct drm_device *dev = crtc->base.dev;
7569 struct drm_i915_private *dev_priv = dev->dev_private; 7634 struct drm_i915_private *dev_priv = to_i915(dev);
7570 u32 dpll; 7635 u32 dpll;
7571 struct dpll *clock = &crtc_state->dpll; 7636 struct dpll *clock = &crtc_state->dpll;
7572 7637
@@ -7574,7 +7639,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7574 7639
7575 dpll = DPLL_VGA_MODE_DIS; 7640 dpll = DPLL_VGA_MODE_DIS;
7576 7641
7577 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7642 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7578 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7643 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7579 } else { 7644 } else {
7580 if (clock->p1 == 2) 7645 if (clock->p1 == 2)
@@ -7585,10 +7650,10 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7585 dpll |= PLL_P2_DIVIDE_BY_4; 7650 dpll |= PLL_P2_DIVIDE_BY_4;
7586 } 7651 }
7587 7652
7588 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) 7653 if (!IS_I830(dev) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
7589 dpll |= DPLL_DVO_2X_MODE; 7654 dpll |= DPLL_DVO_2X_MODE;
7590 7655
7591 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 7656 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7592 intel_panel_use_ssc(dev_priv)) 7657 intel_panel_use_ssc(dev_priv))
7593 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 7658 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7594 else 7659 else
@@ -7601,7 +7666,7 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
7601static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 7666static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7602{ 7667{
7603 struct drm_device *dev = intel_crtc->base.dev; 7668 struct drm_device *dev = intel_crtc->base.dev;
7604 struct drm_i915_private *dev_priv = dev->dev_private; 7669 struct drm_i915_private *dev_priv = to_i915(dev);
7605 enum pipe pipe = intel_crtc->pipe; 7670 enum pipe pipe = intel_crtc->pipe;
7606 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7671 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7607 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7672 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -7618,7 +7683,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7618 crtc_vtotal -= 1; 7683 crtc_vtotal -= 1;
7619 crtc_vblank_end -= 1; 7684 crtc_vblank_end -= 1;
7620 7685
7621 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7686 if (intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7622 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 7687 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7623 else 7688 else
7624 vsyncshift = adjusted_mode->crtc_hsync_start - 7689 vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -7663,7 +7728,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7663static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc) 7728static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7664{ 7729{
7665 struct drm_device *dev = intel_crtc->base.dev; 7730 struct drm_device *dev = intel_crtc->base.dev;
7666 struct drm_i915_private *dev_priv = dev->dev_private; 7731 struct drm_i915_private *dev_priv = to_i915(dev);
7667 enum pipe pipe = intel_crtc->pipe; 7732 enum pipe pipe = intel_crtc->pipe;
7668 7733
7669 /* pipesrc controls the size that is scaled from, which should 7734 /* pipesrc controls the size that is scaled from, which should
@@ -7678,7 +7743,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
7678 struct intel_crtc_state *pipe_config) 7743 struct intel_crtc_state *pipe_config)
7679{ 7744{
7680 struct drm_device *dev = crtc->base.dev; 7745 struct drm_device *dev = crtc->base.dev;
7681 struct drm_i915_private *dev_priv = dev->dev_private; 7746 struct drm_i915_private *dev_priv = to_i915(dev);
7682 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 7747 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7683 uint32_t tmp; 7748 uint32_t tmp;
7684 7749
@@ -7713,7 +7778,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7713 struct intel_crtc_state *pipe_config) 7778 struct intel_crtc_state *pipe_config)
7714{ 7779{
7715 struct drm_device *dev = crtc->base.dev; 7780 struct drm_device *dev = crtc->base.dev;
7716 struct drm_i915_private *dev_priv = dev->dev_private; 7781 struct drm_i915_private *dev_priv = to_i915(dev);
7717 u32 tmp; 7782 u32 tmp;
7718 7783
7719 tmp = I915_READ(PIPESRC(crtc->pipe)); 7784 tmp = I915_READ(PIPESRC(crtc->pipe));
@@ -7751,7 +7816,7 @@ void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7751static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc) 7816static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7752{ 7817{
7753 struct drm_device *dev = intel_crtc->base.dev; 7818 struct drm_device *dev = intel_crtc->base.dev;
7754 struct drm_i915_private *dev_priv = dev->dev_private; 7819 struct drm_i915_private *dev_priv = to_i915(dev);
7755 uint32_t pipeconf; 7820 uint32_t pipeconf;
7756 7821
7757 pipeconf = 0; 7822 pipeconf = 0;
@@ -7797,7 +7862,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7797 7862
7798 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 7863 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7799 if (INTEL_INFO(dev)->gen < 4 || 7864 if (INTEL_INFO(dev)->gen < 4 ||
7800 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO)) 7865 intel_crtc_has_type(intel_crtc->config, INTEL_OUTPUT_SDVO))
7801 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 7866 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7802 else 7867 else
7803 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 7868 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -7816,21 +7881,21 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7816 struct intel_crtc_state *crtc_state) 7881 struct intel_crtc_state *crtc_state)
7817{ 7882{
7818 struct drm_device *dev = crtc->base.dev; 7883 struct drm_device *dev = crtc->base.dev;
7819 struct drm_i915_private *dev_priv = dev->dev_private; 7884 struct drm_i915_private *dev_priv = to_i915(dev);
7820 const intel_limit_t *limit; 7885 const struct intel_limit *limit;
7821 int refclk = 48000; 7886 int refclk = 48000;
7822 7887
7823 memset(&crtc_state->dpll_hw_state, 0, 7888 memset(&crtc_state->dpll_hw_state, 0,
7824 sizeof(crtc_state->dpll_hw_state)); 7889 sizeof(crtc_state->dpll_hw_state));
7825 7890
7826 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7891 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7827 if (intel_panel_use_ssc(dev_priv)) { 7892 if (intel_panel_use_ssc(dev_priv)) {
7828 refclk = dev_priv->vbt.lvds_ssc_freq; 7893 refclk = dev_priv->vbt.lvds_ssc_freq;
7829 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7894 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7830 } 7895 }
7831 7896
7832 limit = &intel_limits_i8xx_lvds; 7897 limit = &intel_limits_i8xx_lvds;
7833 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) { 7898 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
7834 limit = &intel_limits_i8xx_dvo; 7899 limit = &intel_limits_i8xx_dvo;
7835 } else { 7900 } else {
7836 limit = &intel_limits_i8xx_dac; 7901 limit = &intel_limits_i8xx_dac;
@@ -7852,14 +7917,14 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7852 struct intel_crtc_state *crtc_state) 7917 struct intel_crtc_state *crtc_state)
7853{ 7918{
7854 struct drm_device *dev = crtc->base.dev; 7919 struct drm_device *dev = crtc->base.dev;
7855 struct drm_i915_private *dev_priv = dev->dev_private; 7920 struct drm_i915_private *dev_priv = to_i915(dev);
7856 const intel_limit_t *limit; 7921 const struct intel_limit *limit;
7857 int refclk = 96000; 7922 int refclk = 96000;
7858 7923
7859 memset(&crtc_state->dpll_hw_state, 0, 7924 memset(&crtc_state->dpll_hw_state, 0,
7860 sizeof(crtc_state->dpll_hw_state)); 7925 sizeof(crtc_state->dpll_hw_state));
7861 7926
7862 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7927 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7863 if (intel_panel_use_ssc(dev_priv)) { 7928 if (intel_panel_use_ssc(dev_priv)) {
7864 refclk = dev_priv->vbt.lvds_ssc_freq; 7929 refclk = dev_priv->vbt.lvds_ssc_freq;
7865 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7930 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7869,10 +7934,10 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7869 limit = &intel_limits_g4x_dual_channel_lvds; 7934 limit = &intel_limits_g4x_dual_channel_lvds;
7870 else 7935 else
7871 limit = &intel_limits_g4x_single_channel_lvds; 7936 limit = &intel_limits_g4x_single_channel_lvds;
7872 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) || 7937 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7873 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) { 7938 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7874 limit = &intel_limits_g4x_hdmi; 7939 limit = &intel_limits_g4x_hdmi;
7875 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) { 7940 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7876 limit = &intel_limits_g4x_sdvo; 7941 limit = &intel_limits_g4x_sdvo;
7877 } else { 7942 } else {
7878 /* The option is for other outputs */ 7943 /* The option is for other outputs */
@@ -7895,14 +7960,14 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7895 struct intel_crtc_state *crtc_state) 7960 struct intel_crtc_state *crtc_state)
7896{ 7961{
7897 struct drm_device *dev = crtc->base.dev; 7962 struct drm_device *dev = crtc->base.dev;
7898 struct drm_i915_private *dev_priv = dev->dev_private; 7963 struct drm_i915_private *dev_priv = to_i915(dev);
7899 const intel_limit_t *limit; 7964 const struct intel_limit *limit;
7900 int refclk = 96000; 7965 int refclk = 96000;
7901 7966
7902 memset(&crtc_state->dpll_hw_state, 0, 7967 memset(&crtc_state->dpll_hw_state, 0,
7903 sizeof(crtc_state->dpll_hw_state)); 7968 sizeof(crtc_state->dpll_hw_state));
7904 7969
7905 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 7970 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7906 if (intel_panel_use_ssc(dev_priv)) { 7971 if (intel_panel_use_ssc(dev_priv)) {
7907 refclk = dev_priv->vbt.lvds_ssc_freq; 7972 refclk = dev_priv->vbt.lvds_ssc_freq;
7908 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 7973 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7929,14 +7994,14 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7929 struct intel_crtc_state *crtc_state) 7994 struct intel_crtc_state *crtc_state)
7930{ 7995{
7931 struct drm_device *dev = crtc->base.dev; 7996 struct drm_device *dev = crtc->base.dev;
7932 struct drm_i915_private *dev_priv = dev->dev_private; 7997 struct drm_i915_private *dev_priv = to_i915(dev);
7933 const intel_limit_t *limit; 7998 const struct intel_limit *limit;
7934 int refclk = 96000; 7999 int refclk = 96000;
7935 8000
7936 memset(&crtc_state->dpll_hw_state, 0, 8001 memset(&crtc_state->dpll_hw_state, 0,
7937 sizeof(crtc_state->dpll_hw_state)); 8002 sizeof(crtc_state->dpll_hw_state));
7938 8003
7939 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8004 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7940 if (intel_panel_use_ssc(dev_priv)) { 8005 if (intel_panel_use_ssc(dev_priv)) {
7941 refclk = dev_priv->vbt.lvds_ssc_freq; 8006 refclk = dev_priv->vbt.lvds_ssc_freq;
7942 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 8007 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -7963,7 +8028,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7963 struct intel_crtc_state *crtc_state) 8028 struct intel_crtc_state *crtc_state)
7964{ 8029{
7965 int refclk = 100000; 8030 int refclk = 100000;
7966 const intel_limit_t *limit = &intel_limits_chv; 8031 const struct intel_limit *limit = &intel_limits_chv;
7967 8032
7968 memset(&crtc_state->dpll_hw_state, 0, 8033 memset(&crtc_state->dpll_hw_state, 0,
7969 sizeof(crtc_state->dpll_hw_state)); 8034 sizeof(crtc_state->dpll_hw_state));
@@ -7984,7 +8049,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7984 struct intel_crtc_state *crtc_state) 8049 struct intel_crtc_state *crtc_state)
7985{ 8050{
7986 int refclk = 100000; 8051 int refclk = 100000;
7987 const intel_limit_t *limit = &intel_limits_vlv; 8052 const struct intel_limit *limit = &intel_limits_vlv;
7988 8053
7989 memset(&crtc_state->dpll_hw_state, 0, 8054 memset(&crtc_state->dpll_hw_state, 0,
7990 sizeof(crtc_state->dpll_hw_state)); 8055 sizeof(crtc_state->dpll_hw_state));
@@ -8005,7 +8070,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
8005 struct intel_crtc_state *pipe_config) 8070 struct intel_crtc_state *pipe_config)
8006{ 8071{
8007 struct drm_device *dev = crtc->base.dev; 8072 struct drm_device *dev = crtc->base.dev;
8008 struct drm_i915_private *dev_priv = dev->dev_private; 8073 struct drm_i915_private *dev_priv = to_i915(dev);
8009 uint32_t tmp; 8074 uint32_t tmp;
8010 8075
8011 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 8076 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
@@ -8032,9 +8097,9 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc,
8032 struct intel_crtc_state *pipe_config) 8097 struct intel_crtc_state *pipe_config)
8033{ 8098{
8034 struct drm_device *dev = crtc->base.dev; 8099 struct drm_device *dev = crtc->base.dev;
8035 struct drm_i915_private *dev_priv = dev->dev_private; 8100 struct drm_i915_private *dev_priv = to_i915(dev);
8036 int pipe = pipe_config->cpu_transcoder; 8101 int pipe = pipe_config->cpu_transcoder;
8037 intel_clock_t clock; 8102 struct dpll clock;
8038 u32 mdiv; 8103 u32 mdiv;
8039 int refclk = 100000; 8104 int refclk = 100000;
8040 8105
@@ -8060,7 +8125,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8060 struct intel_initial_plane_config *plane_config) 8125 struct intel_initial_plane_config *plane_config)
8061{ 8126{
8062 struct drm_device *dev = crtc->base.dev; 8127 struct drm_device *dev = crtc->base.dev;
8063 struct drm_i915_private *dev_priv = dev->dev_private; 8128 struct drm_i915_private *dev_priv = to_i915(dev);
8064 u32 val, base, offset; 8129 u32 val, base, offset;
8065 int pipe = crtc->pipe, plane = crtc->plane; 8130 int pipe = crtc->pipe, plane = crtc->plane;
8066 int fourcc, pixel_format; 8131 int fourcc, pixel_format;
@@ -8128,10 +8193,10 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc,
8128 struct intel_crtc_state *pipe_config) 8193 struct intel_crtc_state *pipe_config)
8129{ 8194{
8130 struct drm_device *dev = crtc->base.dev; 8195 struct drm_device *dev = crtc->base.dev;
8131 struct drm_i915_private *dev_priv = dev->dev_private; 8196 struct drm_i915_private *dev_priv = to_i915(dev);
8132 int pipe = pipe_config->cpu_transcoder; 8197 int pipe = pipe_config->cpu_transcoder;
8133 enum dpio_channel port = vlv_pipe_to_channel(pipe); 8198 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8134 intel_clock_t clock; 8199 struct dpll clock;
8135 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; 8200 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8136 int refclk = 100000; 8201 int refclk = 100000;
8137 8202
@@ -8162,7 +8227,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8162 struct intel_crtc_state *pipe_config) 8227 struct intel_crtc_state *pipe_config)
8163{ 8228{
8164 struct drm_device *dev = crtc->base.dev; 8229 struct drm_device *dev = crtc->base.dev;
8165 struct drm_i915_private *dev_priv = dev->dev_private; 8230 struct drm_i915_private *dev_priv = to_i915(dev);
8166 enum intel_display_power_domain power_domain; 8231 enum intel_display_power_domain power_domain;
8167 uint32_t tmp; 8232 uint32_t tmp;
8168 bool ret; 8233 bool ret;
@@ -8273,7 +8338,7 @@ out:
8273 8338
8274static void ironlake_init_pch_refclk(struct drm_device *dev) 8339static void ironlake_init_pch_refclk(struct drm_device *dev)
8275{ 8340{
8276 struct drm_i915_private *dev_priv = dev->dev_private; 8341 struct drm_i915_private *dev_priv = to_i915(dev);
8277 struct intel_encoder *encoder; 8342 struct intel_encoder *encoder;
8278 int i; 8343 int i;
8279 u32 val, final; 8344 u32 val, final;
@@ -8544,7 +8609,7 @@ static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8544static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread, 8609static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8545 bool with_fdi) 8610 bool with_fdi)
8546{ 8611{
8547 struct drm_i915_private *dev_priv = dev->dev_private; 8612 struct drm_i915_private *dev_priv = to_i915(dev);
8548 uint32_t reg, tmp; 8613 uint32_t reg, tmp;
8549 8614
8550 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n")) 8615 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
@@ -8583,7 +8648,7 @@ static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8583/* Sequence to disable CLKOUT_DP */ 8648/* Sequence to disable CLKOUT_DP */
8584static void lpt_disable_clkout_dp(struct drm_device *dev) 8649static void lpt_disable_clkout_dp(struct drm_device *dev)
8585{ 8650{
8586 struct drm_i915_private *dev_priv = dev->dev_private; 8651 struct drm_i915_private *dev_priv = to_i915(dev);
8587 uint32_t reg, tmp; 8652 uint32_t reg, tmp;
8588 8653
8589 mutex_lock(&dev_priv->sb_lock); 8654 mutex_lock(&dev_priv->sb_lock);
@@ -8704,7 +8769,7 @@ void intel_init_pch_refclk(struct drm_device *dev)
8704 8769
8705static void ironlake_set_pipeconf(struct drm_crtc *crtc) 8770static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8706{ 8771{
8707 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8772 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8708 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8709 int pipe = intel_crtc->pipe; 8774 int pipe = intel_crtc->pipe;
8710 uint32_t val; 8775 uint32_t val;
@@ -8746,7 +8811,7 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8746 8811
8747static void haswell_set_pipeconf(struct drm_crtc *crtc) 8812static void haswell_set_pipeconf(struct drm_crtc *crtc)
8748{ 8813{
8749 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8814 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8750 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8751 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 8816 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8752 u32 val = 0; 8817 u32 val = 0;
@@ -8765,7 +8830,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
8765 8830
8766static void haswell_set_pipemisc(struct drm_crtc *crtc) 8831static void haswell_set_pipemisc(struct drm_crtc *crtc)
8767{ 8832{
8768 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 8833 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
8769 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8834 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8770 8835
8771 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8836 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
@@ -8814,41 +8879,17 @@ static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8814 8879
8815static void ironlake_compute_dpll(struct intel_crtc *intel_crtc, 8880static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8816 struct intel_crtc_state *crtc_state, 8881 struct intel_crtc_state *crtc_state,
8817 intel_clock_t *reduced_clock) 8882 struct dpll *reduced_clock)
8818{ 8883{
8819 struct drm_crtc *crtc = &intel_crtc->base; 8884 struct drm_crtc *crtc = &intel_crtc->base;
8820 struct drm_device *dev = crtc->dev; 8885 struct drm_device *dev = crtc->dev;
8821 struct drm_i915_private *dev_priv = dev->dev_private; 8886 struct drm_i915_private *dev_priv = to_i915(dev);
8822 struct drm_atomic_state *state = crtc_state->base.state;
8823 struct drm_connector *connector;
8824 struct drm_connector_state *connector_state;
8825 struct intel_encoder *encoder;
8826 u32 dpll, fp, fp2; 8887 u32 dpll, fp, fp2;
8827 int factor, i; 8888 int factor;
8828 bool is_lvds = false, is_sdvo = false;
8829
8830 for_each_connector_in_state(state, connector, connector_state, i) {
8831 if (connector_state->crtc != crtc_state->base.crtc)
8832 continue;
8833
8834 encoder = to_intel_encoder(connector_state->best_encoder);
8835
8836 switch (encoder->type) {
8837 case INTEL_OUTPUT_LVDS:
8838 is_lvds = true;
8839 break;
8840 case INTEL_OUTPUT_SDVO:
8841 case INTEL_OUTPUT_HDMI:
8842 is_sdvo = true;
8843 break;
8844 default:
8845 break;
8846 }
8847 }
8848 8889
8849 /* Enable autotuning of the PLL clock (if permissible) */ 8890 /* Enable autotuning of the PLL clock (if permissible) */
8850 factor = 21; 8891 factor = 21;
8851 if (is_lvds) { 8892 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8852 if ((intel_panel_use_ssc(dev_priv) && 8893 if ((intel_panel_use_ssc(dev_priv) &&
8853 dev_priv->vbt.lvds_ssc_freq == 100000) || 8894 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8854 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 8895 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
@@ -8872,7 +8913,7 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8872 8913
8873 dpll = 0; 8914 dpll = 0;
8874 8915
8875 if (is_lvds) 8916 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
8876 dpll |= DPLLB_MODE_LVDS; 8917 dpll |= DPLLB_MODE_LVDS;
8877 else 8918 else
8878 dpll |= DPLLB_MODE_DAC_SERIAL; 8919 dpll |= DPLLB_MODE_DAC_SERIAL;
@@ -8880,9 +8921,11 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8880 dpll |= (crtc_state->pixel_multiplier - 1) 8921 dpll |= (crtc_state->pixel_multiplier - 1)
8881 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 8922 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8882 8923
8883 if (is_sdvo) 8924 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
8925 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
8884 dpll |= DPLL_SDVO_HIGH_SPEED; 8926 dpll |= DPLL_SDVO_HIGH_SPEED;
8885 if (crtc_state->has_dp_encoder) 8927
8928 if (intel_crtc_has_dp_encoder(crtc_state))
8886 dpll |= DPLL_SDVO_HIGH_SPEED; 8929 dpll |= DPLL_SDVO_HIGH_SPEED;
8887 8930
8888 /* compute bitmask from p1 value */ 8931 /* compute bitmask from p1 value */
@@ -8905,7 +8948,8 @@ static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8905 break; 8948 break;
8906 } 8949 }
8907 8950
8908 if (is_lvds && intel_panel_use_ssc(dev_priv)) 8951 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8952 intel_panel_use_ssc(dev_priv))
8909 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 8953 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8910 else 8954 else
8911 dpll |= PLL_REF_INPUT_DREFCLK; 8955 dpll |= PLL_REF_INPUT_DREFCLK;
@@ -8921,11 +8965,11 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8921 struct intel_crtc_state *crtc_state) 8965 struct intel_crtc_state *crtc_state)
8922{ 8966{
8923 struct drm_device *dev = crtc->base.dev; 8967 struct drm_device *dev = crtc->base.dev;
8924 struct drm_i915_private *dev_priv = dev->dev_private; 8968 struct drm_i915_private *dev_priv = to_i915(dev);
8925 intel_clock_t reduced_clock; 8969 struct dpll reduced_clock;
8926 bool has_reduced_clock = false; 8970 bool has_reduced_clock = false;
8927 struct intel_shared_dpll *pll; 8971 struct intel_shared_dpll *pll;
8928 const intel_limit_t *limit; 8972 const struct intel_limit *limit;
8929 int refclk = 120000; 8973 int refclk = 120000;
8930 8974
8931 memset(&crtc_state->dpll_hw_state, 0, 8975 memset(&crtc_state->dpll_hw_state, 0,
@@ -8937,7 +8981,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8937 if (!crtc_state->has_pch_encoder) 8981 if (!crtc_state->has_pch_encoder)
8938 return 0; 8982 return 0;
8939 8983
8940 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) { 8984 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8941 if (intel_panel_use_ssc(dev_priv)) { 8985 if (intel_panel_use_ssc(dev_priv)) {
8942 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", 8986 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8943 dev_priv->vbt.lvds_ssc_freq); 8987 dev_priv->vbt.lvds_ssc_freq);
@@ -8976,7 +9020,7 @@ static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8976 return -EINVAL; 9020 return -EINVAL;
8977 } 9021 }
8978 9022
8979 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) && 9023 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8980 has_reduced_clock) 9024 has_reduced_clock)
8981 crtc->lowfreq_avail = true; 9025 crtc->lowfreq_avail = true;
8982 9026
@@ -8987,7 +9031,7 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8987 struct intel_link_m_n *m_n) 9031 struct intel_link_m_n *m_n)
8988{ 9032{
8989 struct drm_device *dev = crtc->base.dev; 9033 struct drm_device *dev = crtc->base.dev;
8990 struct drm_i915_private *dev_priv = dev->dev_private; 9034 struct drm_i915_private *dev_priv = to_i915(dev);
8991 enum pipe pipe = crtc->pipe; 9035 enum pipe pipe = crtc->pipe;
8992 9036
8993 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe)); 9037 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
@@ -9005,7 +9049,7 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9005 struct intel_link_m_n *m2_n2) 9049 struct intel_link_m_n *m2_n2)
9006{ 9050{
9007 struct drm_device *dev = crtc->base.dev; 9051 struct drm_device *dev = crtc->base.dev;
9008 struct drm_i915_private *dev_priv = dev->dev_private; 9052 struct drm_i915_private *dev_priv = to_i915(dev);
9009 enum pipe pipe = crtc->pipe; 9053 enum pipe pipe = crtc->pipe;
9010 9054
9011 if (INTEL_INFO(dev)->gen >= 5) { 9055 if (INTEL_INFO(dev)->gen >= 5) {
@@ -9063,7 +9107,7 @@ static void skylake_get_pfit_config(struct intel_crtc *crtc,
9063 struct intel_crtc_state *pipe_config) 9107 struct intel_crtc_state *pipe_config)
9064{ 9108{
9065 struct drm_device *dev = crtc->base.dev; 9109 struct drm_device *dev = crtc->base.dev;
9066 struct drm_i915_private *dev_priv = dev->dev_private; 9110 struct drm_i915_private *dev_priv = to_i915(dev);
9067 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state; 9111 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9068 uint32_t ps_ctrl = 0; 9112 uint32_t ps_ctrl = 0;
9069 int id = -1; 9113 int id = -1;
@@ -9094,7 +9138,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc,
9094 struct intel_initial_plane_config *plane_config) 9138 struct intel_initial_plane_config *plane_config)
9095{ 9139{
9096 struct drm_device *dev = crtc->base.dev; 9140 struct drm_device *dev = crtc->base.dev;
9097 struct drm_i915_private *dev_priv = dev->dev_private; 9141 struct drm_i915_private *dev_priv = to_i915(dev);
9098 u32 val, base, offset, stride_mult, tiling; 9142 u32 val, base, offset, stride_mult, tiling;
9099 int pipe = crtc->pipe; 9143 int pipe = crtc->pipe;
9100 int fourcc, pixel_format; 9144 int fourcc, pixel_format;
@@ -9177,7 +9221,7 @@ static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9177 struct intel_crtc_state *pipe_config) 9221 struct intel_crtc_state *pipe_config)
9178{ 9222{
9179 struct drm_device *dev = crtc->base.dev; 9223 struct drm_device *dev = crtc->base.dev;
9180 struct drm_i915_private *dev_priv = dev->dev_private; 9224 struct drm_i915_private *dev_priv = to_i915(dev);
9181 uint32_t tmp; 9225 uint32_t tmp;
9182 9226
9183 tmp = I915_READ(PF_CTL(crtc->pipe)); 9227 tmp = I915_READ(PF_CTL(crtc->pipe));
@@ -9202,7 +9246,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9202 struct intel_initial_plane_config *plane_config) 9246 struct intel_initial_plane_config *plane_config)
9203{ 9247{
9204 struct drm_device *dev = crtc->base.dev; 9248 struct drm_device *dev = crtc->base.dev;
9205 struct drm_i915_private *dev_priv = dev->dev_private; 9249 struct drm_i915_private *dev_priv = to_i915(dev);
9206 u32 val, base, offset; 9250 u32 val, base, offset;
9207 int pipe = crtc->pipe; 9251 int pipe = crtc->pipe;
9208 int fourcc, pixel_format; 9252 int fourcc, pixel_format;
@@ -9270,7 +9314,7 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9270 struct intel_crtc_state *pipe_config) 9314 struct intel_crtc_state *pipe_config)
9271{ 9315{
9272 struct drm_device *dev = crtc->base.dev; 9316 struct drm_device *dev = crtc->base.dev;
9273 struct drm_i915_private *dev_priv = dev->dev_private; 9317 struct drm_i915_private *dev_priv = to_i915(dev);
9274 enum intel_display_power_domain power_domain; 9318 enum intel_display_power_domain power_domain;
9275 uint32_t tmp; 9319 uint32_t tmp;
9276 bool ret; 9320 bool ret;
@@ -9320,6 +9364,10 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9320 ironlake_get_fdi_m_n_config(crtc, pipe_config); 9364 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9321 9365
9322 if (HAS_PCH_IBX(dev_priv)) { 9366 if (HAS_PCH_IBX(dev_priv)) {
9367 /*
9368 * The pipe->pch transcoder and pch transcoder->pll
9369 * mapping is fixed.
9370 */
9323 pll_id = (enum intel_dpll_id) crtc->pipe; 9371 pll_id = (enum intel_dpll_id) crtc->pipe;
9324 } else { 9372 } else {
9325 tmp = I915_READ(PCH_DPLL_SEL); 9373 tmp = I915_READ(PCH_DPLL_SEL);
@@ -9361,7 +9409,7 @@ out:
9361 9409
9362static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) 9410static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9363{ 9411{
9364 struct drm_device *dev = dev_priv->dev; 9412 struct drm_device *dev = &dev_priv->drm;
9365 struct intel_crtc *crtc; 9413 struct intel_crtc *crtc;
9366 9414
9367 for_each_intel_crtc(dev, crtc) 9415 for_each_intel_crtc(dev, crtc)
@@ -9395,7 +9443,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9395 9443
9396static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv) 9444static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9397{ 9445{
9398 struct drm_device *dev = dev_priv->dev; 9446 struct drm_device *dev = &dev_priv->drm;
9399 9447
9400 if (IS_HASWELL(dev)) 9448 if (IS_HASWELL(dev))
9401 return I915_READ(D_COMP_HSW); 9449 return I915_READ(D_COMP_HSW);
@@ -9405,7 +9453,7 @@ static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9405 9453
9406static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val) 9454static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9407{ 9455{
9408 struct drm_device *dev = dev_priv->dev; 9456 struct drm_device *dev = &dev_priv->drm;
9409 9457
9410 if (IS_HASWELL(dev)) { 9458 if (IS_HASWELL(dev)) {
9411 mutex_lock(&dev_priv->rps.hw_lock); 9459 mutex_lock(&dev_priv->rps.hw_lock);
@@ -9451,7 +9499,7 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9451 I915_WRITE(LCPLL_CTL, val); 9499 I915_WRITE(LCPLL_CTL, val);
9452 POSTING_READ(LCPLL_CTL); 9500 POSTING_READ(LCPLL_CTL);
9453 9501
9454 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1)) 9502 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
9455 DRM_ERROR("LCPLL still locked\n"); 9503 DRM_ERROR("LCPLL still locked\n");
9456 9504
9457 val = hsw_read_dcomp(dev_priv); 9505 val = hsw_read_dcomp(dev_priv);
@@ -9506,7 +9554,9 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9506 val &= ~LCPLL_PLL_DISABLE; 9554 val &= ~LCPLL_PLL_DISABLE;
9507 I915_WRITE(LCPLL_CTL, val); 9555 I915_WRITE(LCPLL_CTL, val);
9508 9556
9509 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5)) 9557 if (intel_wait_for_register(dev_priv,
9558 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9559 5))
9510 DRM_ERROR("LCPLL not locked yet\n"); 9560 DRM_ERROR("LCPLL not locked yet\n");
9511 9561
9512 if (val & LCPLL_CD_SOURCE_FCLK) { 9562 if (val & LCPLL_CD_SOURCE_FCLK) {
@@ -9520,7 +9570,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9520 } 9570 }
9521 9571
9522 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 9572 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9523 intel_update_cdclk(dev_priv->dev); 9573 intel_update_cdclk(&dev_priv->drm);
9524} 9574}
9525 9575
9526/* 9576/*
@@ -9548,7 +9598,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9548 */ 9598 */
9549void hsw_enable_pc8(struct drm_i915_private *dev_priv) 9599void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9550{ 9600{
9551 struct drm_device *dev = dev_priv->dev; 9601 struct drm_device *dev = &dev_priv->drm;
9552 uint32_t val; 9602 uint32_t val;
9553 9603
9554 DRM_DEBUG_KMS("Enabling package C8+\n"); 9604 DRM_DEBUG_KMS("Enabling package C8+\n");
@@ -9565,7 +9615,7 @@ void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9565 9615
9566void hsw_disable_pc8(struct drm_i915_private *dev_priv) 9616void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9567{ 9617{
9568 struct drm_device *dev = dev_priv->dev; 9618 struct drm_device *dev = &dev_priv->drm;
9569 uint32_t val; 9619 uint32_t val;
9570 9620
9571 DRM_DEBUG_KMS("Disabling package C8+\n"); 9621 DRM_DEBUG_KMS("Disabling package C8+\n");
@@ -9580,21 +9630,21 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9580 } 9630 }
9581} 9631}
9582 9632
9583static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state) 9633static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9584{ 9634{
9585 struct drm_device *dev = old_state->dev; 9635 struct drm_device *dev = old_state->dev;
9586 struct intel_atomic_state *old_intel_state = 9636 struct intel_atomic_state *old_intel_state =
9587 to_intel_atomic_state(old_state); 9637 to_intel_atomic_state(old_state);
9588 unsigned int req_cdclk = old_intel_state->dev_cdclk; 9638 unsigned int req_cdclk = old_intel_state->dev_cdclk;
9589 9639
9590 broxton_set_cdclk(to_i915(dev), req_cdclk); 9640 bxt_set_cdclk(to_i915(dev), req_cdclk);
9591} 9641}
9592 9642
9593/* compute the max rate for new configuration */ 9643/* compute the max rate for new configuration */
9594static int ilk_max_pixel_rate(struct drm_atomic_state *state) 9644static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9595{ 9645{
9596 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 9646 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9597 struct drm_i915_private *dev_priv = state->dev->dev_private; 9647 struct drm_i915_private *dev_priv = to_i915(state->dev);
9598 struct drm_crtc *crtc; 9648 struct drm_crtc *crtc;
9599 struct drm_crtc_state *cstate; 9649 struct drm_crtc_state *cstate;
9600 struct intel_crtc_state *crtc_state; 9650 struct intel_crtc_state *crtc_state;
@@ -9630,7 +9680,7 @@ static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9630 9680
9631static void broadwell_set_cdclk(struct drm_device *dev, int cdclk) 9681static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9632{ 9682{
9633 struct drm_i915_private *dev_priv = dev->dev_private; 9683 struct drm_i915_private *dev_priv = to_i915(dev);
9634 uint32_t val, data; 9684 uint32_t val, data;
9635 int ret; 9685 int ret;
9636 9686
@@ -9707,6 +9757,18 @@ static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9707 cdclk, dev_priv->cdclk_freq); 9757 cdclk, dev_priv->cdclk_freq);
9708} 9758}
9709 9759
9760static int broadwell_calc_cdclk(int max_pixclk)
9761{
9762 if (max_pixclk > 540000)
9763 return 675000;
9764 else if (max_pixclk > 450000)
9765 return 540000;
9766 else if (max_pixclk > 337500)
9767 return 450000;
9768 else
9769 return 337500;
9770}
9771
9710static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state) 9772static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9711{ 9773{
9712 struct drm_i915_private *dev_priv = to_i915(state->dev); 9774 struct drm_i915_private *dev_priv = to_i915(state->dev);
@@ -9718,14 +9780,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9718 * FIXME should also account for plane ratio 9780 * FIXME should also account for plane ratio
9719 * once 64bpp pixel formats are supported. 9781 * once 64bpp pixel formats are supported.
9720 */ 9782 */
9721 if (max_pixclk > 540000) 9783 cdclk = broadwell_calc_cdclk(max_pixclk);
9722 cdclk = 675000;
9723 else if (max_pixclk > 450000)
9724 cdclk = 540000;
9725 else if (max_pixclk > 337500)
9726 cdclk = 450000;
9727 else
9728 cdclk = 337500;
9729 9784
9730 if (cdclk > dev_priv->max_cdclk_freq) { 9785 if (cdclk > dev_priv->max_cdclk_freq) {
9731 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n", 9786 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
@@ -9735,7 +9790,7 @@ static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9735 9790
9736 intel_state->cdclk = intel_state->dev_cdclk = cdclk; 9791 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9737 if (!intel_state->active_crtcs) 9792 if (!intel_state->active_crtcs)
9738 intel_state->dev_cdclk = 337500; 9793 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9739 9794
9740 return 0; 9795 return 0;
9741} 9796}
@@ -9750,13 +9805,51 @@ static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9750 broadwell_set_cdclk(dev, req_cdclk); 9805 broadwell_set_cdclk(dev, req_cdclk);
9751} 9806}
9752 9807
9808static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9809{
9810 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9811 struct drm_i915_private *dev_priv = to_i915(state->dev);
9812 const int max_pixclk = ilk_max_pixel_rate(state);
9813 int vco = intel_state->cdclk_pll_vco;
9814 int cdclk;
9815
9816 /*
9817 * FIXME should also account for plane ratio
9818 * once 64bpp pixel formats are supported.
9819 */
9820 cdclk = skl_calc_cdclk(max_pixclk, vco);
9821
9822 /*
9823 * FIXME move the cdclk caclulation to
9824 * compute_config() so we can fail gracegully.
9825 */
9826 if (cdclk > dev_priv->max_cdclk_freq) {
9827 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9828 cdclk, dev_priv->max_cdclk_freq);
9829 cdclk = dev_priv->max_cdclk_freq;
9830 }
9831
9832 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9833 if (!intel_state->active_crtcs)
9834 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
9835
9836 return 0;
9837}
9838
9839static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9840{
9841 struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9842 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9843 unsigned int req_cdclk = intel_state->dev_cdclk;
9844 unsigned int req_vco = intel_state->cdclk_pll_vco;
9845
9846 skl_set_cdclk(dev_priv, req_cdclk, req_vco);
9847}
9848
9753static int haswell_crtc_compute_clock(struct intel_crtc *crtc, 9849static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9754 struct intel_crtc_state *crtc_state) 9850 struct intel_crtc_state *crtc_state)
9755{ 9851{
9756 struct intel_encoder *intel_encoder = 9852 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI)) {
9757 intel_ddi_get_crtc_new_encoder(crtc_state);
9758
9759 if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9760 if (!intel_ddi_pll_select(crtc, crtc_state)) 9853 if (!intel_ddi_pll_select(crtc, crtc_state))
9761 return -EINVAL; 9854 return -EINVAL;
9762 } 9855 }
@@ -9866,10 +9959,14 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9866 unsigned long *power_domain_mask) 9959 unsigned long *power_domain_mask)
9867{ 9960{
9868 struct drm_device *dev = crtc->base.dev; 9961 struct drm_device *dev = crtc->base.dev;
9869 struct drm_i915_private *dev_priv = dev->dev_private; 9962 struct drm_i915_private *dev_priv = to_i915(dev);
9870 enum intel_display_power_domain power_domain; 9963 enum intel_display_power_domain power_domain;
9871 u32 tmp; 9964 u32 tmp;
9872 9965
9966 /*
9967 * The pipe->transcoder mapping is fixed with the exception of the eDP
9968 * transcoder handled below.
9969 */
9873 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 9970 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9874 9971
9875 /* 9972 /*
@@ -9913,14 +10010,12 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9913 unsigned long *power_domain_mask) 10010 unsigned long *power_domain_mask)
9914{ 10011{
9915 struct drm_device *dev = crtc->base.dev; 10012 struct drm_device *dev = crtc->base.dev;
9916 struct drm_i915_private *dev_priv = dev->dev_private; 10013 struct drm_i915_private *dev_priv = to_i915(dev);
9917 enum intel_display_power_domain power_domain; 10014 enum intel_display_power_domain power_domain;
9918 enum port port; 10015 enum port port;
9919 enum transcoder cpu_transcoder; 10016 enum transcoder cpu_transcoder;
9920 u32 tmp; 10017 u32 tmp;
9921 10018
9922 pipe_config->has_dsi_encoder = false;
9923
9924 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) { 10019 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9925 if (port == PORT_A) 10020 if (port == PORT_A)
9926 cpu_transcoder = TRANSCODER_DSI_A; 10021 cpu_transcoder = TRANSCODER_DSI_A;
@@ -9952,18 +10047,17 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9952 continue; 10047 continue;
9953 10048
9954 pipe_config->cpu_transcoder = cpu_transcoder; 10049 pipe_config->cpu_transcoder = cpu_transcoder;
9955 pipe_config->has_dsi_encoder = true;
9956 break; 10050 break;
9957 } 10051 }
9958 10052
9959 return pipe_config->has_dsi_encoder; 10053 return transcoder_is_dsi(pipe_config->cpu_transcoder);
9960} 10054}
9961 10055
9962static void haswell_get_ddi_port_state(struct intel_crtc *crtc, 10056static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9963 struct intel_crtc_state *pipe_config) 10057 struct intel_crtc_state *pipe_config)
9964{ 10058{
9965 struct drm_device *dev = crtc->base.dev; 10059 struct drm_device *dev = crtc->base.dev;
9966 struct drm_i915_private *dev_priv = dev->dev_private; 10060 struct drm_i915_private *dev_priv = to_i915(dev);
9967 struct intel_shared_dpll *pll; 10061 struct intel_shared_dpll *pll;
9968 enum port port; 10062 enum port port;
9969 uint32_t tmp; 10063 uint32_t tmp;
@@ -10006,7 +10100,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10006 struct intel_crtc_state *pipe_config) 10100 struct intel_crtc_state *pipe_config)
10007{ 10101{
10008 struct drm_device *dev = crtc->base.dev; 10102 struct drm_device *dev = crtc->base.dev;
10009 struct drm_i915_private *dev_priv = dev->dev_private; 10103 struct drm_i915_private *dev_priv = to_i915(dev);
10010 enum intel_display_power_domain power_domain; 10104 enum intel_display_power_domain power_domain;
10011 unsigned long power_domain_mask; 10105 unsigned long power_domain_mask;
10012 bool active; 10106 bool active;
@@ -10020,18 +10114,16 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
10020 10114
10021 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask); 10115 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
10022 10116
10023 if (IS_BROXTON(dev_priv)) { 10117 if (IS_BROXTON(dev_priv) &&
10024 bxt_get_dsi_transcoder_state(crtc, pipe_config, 10118 bxt_get_dsi_transcoder_state(crtc, pipe_config, &power_domain_mask)) {
10025 &power_domain_mask); 10119 WARN_ON(active);
10026 WARN_ON(active && pipe_config->has_dsi_encoder); 10120 active = true;
10027 if (pipe_config->has_dsi_encoder)
10028 active = true;
10029 } 10121 }
10030 10122
10031 if (!active) 10123 if (!active)
10032 goto out; 10124 goto out;
10033 10125
10034 if (!pipe_config->has_dsi_encoder) { 10126 if (!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
10035 haswell_get_ddi_port_state(crtc, pipe_config); 10127 haswell_get_ddi_port_state(crtc, pipe_config);
10036 intel_get_pipe_timings(crtc, pipe_config); 10128 intel_get_pipe_timings(crtc, pipe_config);
10037 } 10129 }
@@ -10082,7 +10174,7 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10082 const struct intel_plane_state *plane_state) 10174 const struct intel_plane_state *plane_state)
10083{ 10175{
10084 struct drm_device *dev = crtc->dev; 10176 struct drm_device *dev = crtc->dev;
10085 struct drm_i915_private *dev_priv = dev->dev_private; 10177 struct drm_i915_private *dev_priv = to_i915(dev);
10086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10087 uint32_t cntl = 0, size = 0; 10179 uint32_t cntl = 0, size = 0;
10088 10180
@@ -10145,7 +10237,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10145 const struct intel_plane_state *plane_state) 10237 const struct intel_plane_state *plane_state)
10146{ 10238{
10147 struct drm_device *dev = crtc->dev; 10239 struct drm_device *dev = crtc->dev;
10148 struct drm_i915_private *dev_priv = dev->dev_private; 10240 struct drm_i915_private *dev_priv = to_i915(dev);
10149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10150 int pipe = intel_crtc->pipe; 10242 int pipe = intel_crtc->pipe;
10151 uint32_t cntl = 0; 10243 uint32_t cntl = 0;
@@ -10193,7 +10285,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10193 const struct intel_plane_state *plane_state) 10285 const struct intel_plane_state *plane_state)
10194{ 10286{
10195 struct drm_device *dev = crtc->dev; 10287 struct drm_device *dev = crtc->dev;
10196 struct drm_i915_private *dev_priv = dev->dev_private; 10288 struct drm_i915_private *dev_priv = to_i915(dev);
10197 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10289 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10198 int pipe = intel_crtc->pipe; 10290 int pipe = intel_crtc->pipe;
10199 u32 base = intel_crtc->cursor_addr; 10291 u32 base = intel_crtc->cursor_addr;
@@ -10337,10 +10429,10 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
10337 struct drm_i915_gem_object *obj; 10429 struct drm_i915_gem_object *obj;
10338 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 10430 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10339 10431
10340 obj = i915_gem_alloc_object(dev, 10432 obj = i915_gem_object_create(dev,
10341 intel_framebuffer_size_for_mode(mode, bpp)); 10433 intel_framebuffer_size_for_mode(mode, bpp));
10342 if (obj == NULL) 10434 if (IS_ERR(obj))
10343 return ERR_PTR(-ENOMEM); 10435 return ERR_CAST(obj);
10344 10436
10345 mode_cmd.width = mode->hdisplay; 10437 mode_cmd.width = mode->hdisplay;
10346 mode_cmd.height = mode->vdisplay; 10438 mode_cmd.height = mode->vdisplay;
@@ -10360,7 +10452,7 @@ mode_fits_in_fbdev(struct drm_device *dev,
10360 struct drm_display_mode *mode) 10452 struct drm_display_mode *mode)
10361{ 10453{
10362#ifdef CONFIG_DRM_FBDEV_EMULATION 10454#ifdef CONFIG_DRM_FBDEV_EMULATION
10363 struct drm_i915_private *dev_priv = dev->dev_private; 10455 struct drm_i915_private *dev_priv = to_i915(dev);
10364 struct drm_i915_gem_object *obj; 10456 struct drm_i915_gem_object *obj;
10365 struct drm_framebuffer *fb; 10457 struct drm_framebuffer *fb;
10366 10458
@@ -10630,7 +10722,7 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
10630static int i9xx_pll_refclk(struct drm_device *dev, 10722static int i9xx_pll_refclk(struct drm_device *dev,
10631 const struct intel_crtc_state *pipe_config) 10723 const struct intel_crtc_state *pipe_config)
10632{ 10724{
10633 struct drm_i915_private *dev_priv = dev->dev_private; 10725 struct drm_i915_private *dev_priv = to_i915(dev);
10634 u32 dpll = pipe_config->dpll_hw_state.dpll; 10726 u32 dpll = pipe_config->dpll_hw_state.dpll;
10635 10727
10636 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN) 10728 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
@@ -10648,11 +10740,11 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10648 struct intel_crtc_state *pipe_config) 10740 struct intel_crtc_state *pipe_config)
10649{ 10741{
10650 struct drm_device *dev = crtc->base.dev; 10742 struct drm_device *dev = crtc->base.dev;
10651 struct drm_i915_private *dev_priv = dev->dev_private; 10743 struct drm_i915_private *dev_priv = to_i915(dev);
10652 int pipe = pipe_config->cpu_transcoder; 10744 int pipe = pipe_config->cpu_transcoder;
10653 u32 dpll = pipe_config->dpll_hw_state.dpll; 10745 u32 dpll = pipe_config->dpll_hw_state.dpll;
10654 u32 fp; 10746 u32 fp;
10655 intel_clock_t clock; 10747 struct dpll clock;
10656 int port_clock; 10748 int port_clock;
10657 int refclk = i9xx_pll_refclk(dev, pipe_config); 10749 int refclk = i9xx_pll_refclk(dev, pipe_config);
10658 10750
@@ -10774,7 +10866,7 @@ static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10774struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 10866struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10775 struct drm_crtc *crtc) 10867 struct drm_crtc *crtc)
10776{ 10868{
10777 struct drm_i915_private *dev_priv = dev->dev_private; 10869 struct drm_i915_private *dev_priv = to_i915(dev);
10778 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10870 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10779 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 10871 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10780 struct drm_display_mode *mode; 10872 struct drm_display_mode *mode;
@@ -10826,48 +10918,20 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10826 return mode; 10918 return mode;
10827} 10919}
10828 10920
10829void intel_mark_busy(struct drm_device *dev)
10830{
10831 struct drm_i915_private *dev_priv = dev->dev_private;
10832
10833 if (dev_priv->mm.busy)
10834 return;
10835
10836 intel_runtime_pm_get(dev_priv);
10837 i915_update_gfx_val(dev_priv);
10838 if (INTEL_INFO(dev)->gen >= 6)
10839 gen6_rps_busy(dev_priv);
10840 dev_priv->mm.busy = true;
10841}
10842
10843void intel_mark_idle(struct drm_device *dev)
10844{
10845 struct drm_i915_private *dev_priv = dev->dev_private;
10846
10847 if (!dev_priv->mm.busy)
10848 return;
10849
10850 dev_priv->mm.busy = false;
10851
10852 if (INTEL_INFO(dev)->gen >= 6)
10853 gen6_rps_idle(dev->dev_private);
10854
10855 intel_runtime_pm_put(dev_priv);
10856}
10857
10858static void intel_crtc_destroy(struct drm_crtc *crtc) 10921static void intel_crtc_destroy(struct drm_crtc *crtc)
10859{ 10922{
10860 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 10923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10861 struct drm_device *dev = crtc->dev; 10924 struct drm_device *dev = crtc->dev;
10862 struct intel_unpin_work *work; 10925 struct intel_flip_work *work;
10863 10926
10864 spin_lock_irq(&dev->event_lock); 10927 spin_lock_irq(&dev->event_lock);
10865 work = intel_crtc->unpin_work; 10928 work = intel_crtc->flip_work;
10866 intel_crtc->unpin_work = NULL; 10929 intel_crtc->flip_work = NULL;
10867 spin_unlock_irq(&dev->event_lock); 10930 spin_unlock_irq(&dev->event_lock);
10868 10931
10869 if (work) { 10932 if (work) {
10870 cancel_work_sync(&work->work); 10933 cancel_work_sync(&work->mmio_work);
10934 cancel_work_sync(&work->unpin_work);
10871 kfree(work); 10935 kfree(work);
10872 } 10936 }
10873 10937
@@ -10878,12 +10942,15 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
10878 10942
10879static void intel_unpin_work_fn(struct work_struct *__work) 10943static void intel_unpin_work_fn(struct work_struct *__work)
10880{ 10944{
10881 struct intel_unpin_work *work = 10945 struct intel_flip_work *work =
10882 container_of(__work, struct intel_unpin_work, work); 10946 container_of(__work, struct intel_flip_work, unpin_work);
10883 struct intel_crtc *crtc = to_intel_crtc(work->crtc); 10947 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10884 struct drm_device *dev = crtc->base.dev; 10948 struct drm_device *dev = crtc->base.dev;
10885 struct drm_plane *primary = crtc->base.primary; 10949 struct drm_plane *primary = crtc->base.primary;
10886 10950
10951 if (is_mmio_work(work))
10952 flush_work(&work->mmio_work);
10953
10887 mutex_lock(&dev->struct_mutex); 10954 mutex_lock(&dev->struct_mutex);
10888 intel_unpin_fb_obj(work->old_fb, primary->state->rotation); 10955 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
10889 drm_gem_object_unreference(&work->pending_flip_obj->base); 10956 drm_gem_object_unreference(&work->pending_flip_obj->base);
@@ -10902,63 +10969,17 @@ static void intel_unpin_work_fn(struct work_struct *__work)
10902 kfree(work); 10969 kfree(work);
10903} 10970}
10904 10971
10905static void do_intel_finish_page_flip(struct drm_device *dev,
10906 struct drm_crtc *crtc)
10907{
10908 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10909 struct intel_unpin_work *work;
10910 unsigned long flags;
10911
10912 /* Ignore early vblank irqs */
10913 if (intel_crtc == NULL)
10914 return;
10915
10916 /*
10917 * This is called both by irq handlers and the reset code (to complete
10918 * lost pageflips) so needs the full irqsave spinlocks.
10919 */
10920 spin_lock_irqsave(&dev->event_lock, flags);
10921 work = intel_crtc->unpin_work;
10922
10923 /* Ensure we don't miss a work->pending update ... */
10924 smp_rmb();
10925
10926 if (work == NULL || atomic_read(&work->pending) < INTEL_FLIP_COMPLETE) {
10927 spin_unlock_irqrestore(&dev->event_lock, flags);
10928 return;
10929 }
10930
10931 page_flip_completed(intel_crtc);
10932
10933 spin_unlock_irqrestore(&dev->event_lock, flags);
10934}
10935
10936void intel_finish_page_flip(struct drm_device *dev, int pipe)
10937{
10938 struct drm_i915_private *dev_priv = dev->dev_private;
10939 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10940
10941 do_intel_finish_page_flip(dev, crtc);
10942}
10943
10944void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
10945{
10946 struct drm_i915_private *dev_priv = dev->dev_private;
10947 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
10948
10949 do_intel_finish_page_flip(dev, crtc);
10950}
10951
10952/* Is 'a' after or equal to 'b'? */ 10972/* Is 'a' after or equal to 'b'? */
10953static bool g4x_flip_count_after_eq(u32 a, u32 b) 10973static bool g4x_flip_count_after_eq(u32 a, u32 b)
10954{ 10974{
10955 return !((a - b) & 0x80000000); 10975 return !((a - b) & 0x80000000);
10956} 10976}
10957 10977
10958static bool page_flip_finished(struct intel_crtc *crtc) 10978static bool __pageflip_finished_cs(struct intel_crtc *crtc,
10979 struct intel_flip_work *work)
10959{ 10980{
10960 struct drm_device *dev = crtc->base.dev; 10981 struct drm_device *dev = crtc->base.dev;
10961 struct drm_i915_private *dev_priv = dev->dev_private; 10982 struct drm_i915_private *dev_priv = to_i915(dev);
10962 unsigned reset_counter; 10983 unsigned reset_counter;
10963 10984
10964 reset_counter = i915_reset_counter(&dev_priv->gpu_error); 10985 reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -10997,40 +11018,103 @@ static bool page_flip_finished(struct intel_crtc *crtc)
10997 * anyway, we don't really care. 11018 * anyway, we don't really care.
10998 */ 11019 */
10999 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) == 11020 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11000 crtc->unpin_work->gtt_offset && 11021 crtc->flip_work->gtt_offset &&
11001 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)), 11022 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11002 crtc->unpin_work->flip_count); 11023 crtc->flip_work->flip_count);
11003} 11024}
11004 11025
11005void intel_prepare_page_flip(struct drm_device *dev, int plane) 11026static bool
11027__pageflip_finished_mmio(struct intel_crtc *crtc,
11028 struct intel_flip_work *work)
11006{ 11029{
11007 struct drm_i915_private *dev_priv = dev->dev_private; 11030 /*
11008 struct intel_crtc *intel_crtc = 11031 * MMIO work completes when vblank is different from
11009 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 11032 * flip_queued_vblank.
11033 *
11034 * Reset counter value doesn't matter, this is handled by
11035 * i915_wait_request finishing early, so no need to handle
11036 * reset here.
11037 */
11038 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
11039}
11040
11041
11042static bool pageflip_finished(struct intel_crtc *crtc,
11043 struct intel_flip_work *work)
11044{
11045 if (!atomic_read(&work->pending))
11046 return false;
11047
11048 smp_rmb();
11049
11050 if (is_mmio_work(work))
11051 return __pageflip_finished_mmio(crtc, work);
11052 else
11053 return __pageflip_finished_cs(crtc, work);
11054}
11055
11056void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11057{
11058 struct drm_device *dev = &dev_priv->drm;
11059 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11060 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11061 struct intel_flip_work *work;
11010 unsigned long flags; 11062 unsigned long flags;
11011 11063
11064 /* Ignore early vblank irqs */
11065 if (!crtc)
11066 return;
11012 11067
11013 /* 11068 /*
11014 * This is called both by irq handlers and the reset code (to complete 11069 * This is called both by irq handlers and the reset code (to complete
11015 * lost pageflips) so needs the full irqsave spinlocks. 11070 * lost pageflips) so needs the full irqsave spinlocks.
11016 *
11017 * NB: An MMIO update of the plane base pointer will also
11018 * generate a page-flip completion irq, i.e. every modeset
11019 * is also accompanied by a spurious intel_prepare_page_flip().
11020 */ 11071 */
11021 spin_lock_irqsave(&dev->event_lock, flags); 11072 spin_lock_irqsave(&dev->event_lock, flags);
11022 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc)) 11073 work = intel_crtc->flip_work;
11023 atomic_inc_not_zero(&intel_crtc->unpin_work->pending); 11074
11075 if (work != NULL &&
11076 !is_mmio_work(work) &&
11077 pageflip_finished(intel_crtc, work))
11078 page_flip_completed(intel_crtc);
11079
11024 spin_unlock_irqrestore(&dev->event_lock, flags); 11080 spin_unlock_irqrestore(&dev->event_lock, flags);
11025} 11081}
11026 11082
11027static inline void intel_mark_page_flip_active(struct intel_unpin_work *work) 11083void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
11028{ 11084{
11085 struct drm_device *dev = &dev_priv->drm;
11086 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11088 struct intel_flip_work *work;
11089 unsigned long flags;
11090
11091 /* Ignore early vblank irqs */
11092 if (!crtc)
11093 return;
11094
11095 /*
11096 * This is called both by irq handlers and the reset code (to complete
11097 * lost pageflips) so needs the full irqsave spinlocks.
11098 */
11099 spin_lock_irqsave(&dev->event_lock, flags);
11100 work = intel_crtc->flip_work;
11101
11102 if (work != NULL &&
11103 is_mmio_work(work) &&
11104 pageflip_finished(intel_crtc, work))
11105 page_flip_completed(intel_crtc);
11106
11107 spin_unlock_irqrestore(&dev->event_lock, flags);
11108}
11109
11110static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11111 struct intel_flip_work *work)
11112{
11113 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
11114
11029 /* Ensure that the work item is consistent when activating it ... */ 11115 /* Ensure that the work item is consistent when activating it ... */
11030 smp_wmb(); 11116 smp_mb__before_atomic();
11031 atomic_set(&work->pending, INTEL_FLIP_PENDING); 11117 atomic_set(&work->pending, 1);
11032 /* and that it is marked active as soon as the irq could fire. */
11033 smp_wmb();
11034} 11118}
11035 11119
11036static int intel_gen2_queue_flip(struct drm_device *dev, 11120static int intel_gen2_queue_flip(struct drm_device *dev,
@@ -11061,10 +11145,9 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
11061 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11145 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11062 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11146 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11063 intel_ring_emit(engine, fb->pitches[0]); 11147 intel_ring_emit(engine, fb->pitches[0]);
11064 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11148 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11065 intel_ring_emit(engine, 0); /* aux display base address, unused */ 11149 intel_ring_emit(engine, 0); /* aux display base address, unused */
11066 11150
11067 intel_mark_page_flip_active(intel_crtc->unpin_work);
11068 return 0; 11151 return 0;
11069} 11152}
11070 11153
@@ -11093,10 +11176,9 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
11093 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | 11176 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11094 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11177 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11095 intel_ring_emit(engine, fb->pitches[0]); 11178 intel_ring_emit(engine, fb->pitches[0]);
11096 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11179 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11097 intel_ring_emit(engine, MI_NOOP); 11180 intel_ring_emit(engine, MI_NOOP);
11098 11181
11099 intel_mark_page_flip_active(intel_crtc->unpin_work);
11100 return 0; 11182 return 0;
11101} 11183}
11102 11184
@@ -11108,7 +11190,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11108 uint32_t flags) 11190 uint32_t flags)
11109{ 11191{
11110 struct intel_engine_cs *engine = req->engine; 11192 struct intel_engine_cs *engine = req->engine;
11111 struct drm_i915_private *dev_priv = dev->dev_private; 11193 struct drm_i915_private *dev_priv = to_i915(dev);
11112 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11113 uint32_t pf, pipesrc; 11195 uint32_t pf, pipesrc;
11114 int ret; 11196 int ret;
@@ -11124,7 +11206,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11124 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11206 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11125 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11207 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11126 intel_ring_emit(engine, fb->pitches[0]); 11208 intel_ring_emit(engine, fb->pitches[0]);
11127 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset | 11209 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11128 obj->tiling_mode); 11210 obj->tiling_mode);
11129 11211
11130 /* XXX Enabling the panel-fitter across page-flip is so far 11212 /* XXX Enabling the panel-fitter across page-flip is so far
@@ -11135,7 +11217,6 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
11135 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11217 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11136 intel_ring_emit(engine, pf | pipesrc); 11218 intel_ring_emit(engine, pf | pipesrc);
11137 11219
11138 intel_mark_page_flip_active(intel_crtc->unpin_work);
11139 return 0; 11220 return 0;
11140} 11221}
11141 11222
@@ -11147,7 +11228,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11147 uint32_t flags) 11228 uint32_t flags)
11148{ 11229{
11149 struct intel_engine_cs *engine = req->engine; 11230 struct intel_engine_cs *engine = req->engine;
11150 struct drm_i915_private *dev_priv = dev->dev_private; 11231 struct drm_i915_private *dev_priv = to_i915(dev);
11151 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11232 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11152 uint32_t pf, pipesrc; 11233 uint32_t pf, pipesrc;
11153 int ret; 11234 int ret;
@@ -11159,7 +11240,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11159 intel_ring_emit(engine, MI_DISPLAY_FLIP | 11240 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11160 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 11241 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11161 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode); 11242 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11162 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11243 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11163 11244
11164 /* Contrary to the suggestions in the documentation, 11245 /* Contrary to the suggestions in the documentation,
11165 * "Enable Panel Fitter" does not seem to be required when page 11246 * "Enable Panel Fitter" does not seem to be required when page
@@ -11171,7 +11252,6 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
11171 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 11252 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11172 intel_ring_emit(engine, pf | pipesrc); 11253 intel_ring_emit(engine, pf | pipesrc);
11173 11254
11174 intel_mark_page_flip_active(intel_crtc->unpin_work);
11175 return 0; 11255 return 0;
11176} 11256}
11177 11257
@@ -11263,16 +11343,17 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
11263 11343
11264 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit); 11344 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11265 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode)); 11345 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11266 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset); 11346 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11267 intel_ring_emit(engine, (MI_NOOP)); 11347 intel_ring_emit(engine, (MI_NOOP));
11268 11348
11269 intel_mark_page_flip_active(intel_crtc->unpin_work);
11270 return 0; 11349 return 0;
11271} 11350}
11272 11351
11273static bool use_mmio_flip(struct intel_engine_cs *engine, 11352static bool use_mmio_flip(struct intel_engine_cs *engine,
11274 struct drm_i915_gem_object *obj) 11353 struct drm_i915_gem_object *obj)
11275{ 11354{
11355 struct reservation_object *resv;
11356
11276 /* 11357 /*
11277 * This is not being used for older platforms, because 11358 * This is not being used for older platforms, because
11278 * non-availability of flip done interrupt forces us to use 11359 * non-availability of flip done interrupt forces us to use
@@ -11284,7 +11365,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11284 if (engine == NULL) 11365 if (engine == NULL)
11285 return true; 11366 return true;
11286 11367
11287 if (INTEL_INFO(engine->dev)->gen < 5) 11368 if (INTEL_GEN(engine->i915) < 5)
11288 return false; 11369 return false;
11289 11370
11290 if (i915.use_mmio_flip < 0) 11371 if (i915.use_mmio_flip < 0)
@@ -11293,20 +11374,20 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
11293 return true; 11374 return true;
11294 else if (i915.enable_execlists) 11375 else if (i915.enable_execlists)
11295 return true; 11376 return true;
11296 else if (obj->base.dma_buf && 11377
11297 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv, 11378 resv = i915_gem_object_get_dmabuf_resv(obj);
11298 false)) 11379 if (resv && !reservation_object_test_signaled_rcu(resv, false))
11299 return true; 11380 return true;
11300 else 11381
11301 return engine != i915_gem_request_get_engine(obj->last_write_req); 11382 return engine != i915_gem_request_get_engine(obj->last_write_req);
11302} 11383}
11303 11384
11304static void skl_do_mmio_flip(struct intel_crtc *intel_crtc, 11385static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11305 unsigned int rotation, 11386 unsigned int rotation,
11306 struct intel_unpin_work *work) 11387 struct intel_flip_work *work)
11307{ 11388{
11308 struct drm_device *dev = intel_crtc->base.dev; 11389 struct drm_device *dev = intel_crtc->base.dev;
11309 struct drm_i915_private *dev_priv = dev->dev_private; 11390 struct drm_i915_private *dev_priv = to_i915(dev);
11310 struct drm_framebuffer *fb = intel_crtc->base.primary->fb; 11391 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11311 const enum pipe pipe = intel_crtc->pipe; 11392 const enum pipe pipe = intel_crtc->pipe;
11312 u32 ctl, stride, tile_height; 11393 u32 ctl, stride, tile_height;
@@ -11355,10 +11436,10 @@ static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11355} 11436}
11356 11437
11357static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc, 11438static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11358 struct intel_unpin_work *work) 11439 struct intel_flip_work *work)
11359{ 11440{
11360 struct drm_device *dev = intel_crtc->base.dev; 11441 struct drm_device *dev = intel_crtc->base.dev;
11361 struct drm_i915_private *dev_priv = dev->dev_private; 11442 struct drm_i915_private *dev_priv = to_i915(dev);
11362 struct intel_framebuffer *intel_fb = 11443 struct intel_framebuffer *intel_fb =
11363 to_intel_framebuffer(intel_crtc->base.primary->fb); 11444 to_intel_framebuffer(intel_crtc->base.primary->fb);
11364 struct drm_i915_gem_object *obj = intel_fb->obj; 11445 struct drm_i915_gem_object *obj = intel_fb->obj;
@@ -11378,78 +11459,37 @@ static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11378 POSTING_READ(DSPSURF(intel_crtc->plane)); 11459 POSTING_READ(DSPSURF(intel_crtc->plane));
11379} 11460}
11380 11461
11381/* 11462static void intel_mmio_flip_work_func(struct work_struct *w)
11382 * XXX: This is the temporary way to update the plane registers until we get
11383 * around to using the usual plane update functions for MMIO flips
11384 */
11385static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
11386{
11387 struct intel_crtc *crtc = mmio_flip->crtc;
11388 struct intel_unpin_work *work;
11389
11390 spin_lock_irq(&crtc->base.dev->event_lock);
11391 work = crtc->unpin_work;
11392 spin_unlock_irq(&crtc->base.dev->event_lock);
11393 if (work == NULL)
11394 return;
11395
11396 intel_mark_page_flip_active(work);
11397
11398 intel_pipe_update_start(crtc);
11399
11400 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
11401 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
11402 else
11403 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11404 ilk_do_mmio_flip(crtc, work);
11405
11406 intel_pipe_update_end(crtc);
11407}
11408
11409static void intel_mmio_flip_work_func(struct work_struct *work)
11410{ 11463{
11411 struct intel_mmio_flip *mmio_flip = 11464 struct intel_flip_work *work =
11412 container_of(work, struct intel_mmio_flip, work); 11465 container_of(w, struct intel_flip_work, mmio_work);
11466 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11467 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11413 struct intel_framebuffer *intel_fb = 11468 struct intel_framebuffer *intel_fb =
11414 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb); 11469 to_intel_framebuffer(crtc->base.primary->fb);
11415 struct drm_i915_gem_object *obj = intel_fb->obj; 11470 struct drm_i915_gem_object *obj = intel_fb->obj;
11471 struct reservation_object *resv;
11416 11472
11417 if (mmio_flip->req) { 11473 if (work->flip_queued_req)
11418 WARN_ON(__i915_wait_request(mmio_flip->req, 11474 WARN_ON(__i915_wait_request(work->flip_queued_req,
11419 false, NULL, 11475 false, NULL,
11420 &mmio_flip->i915->rps.mmioflips)); 11476 &dev_priv->rps.mmioflips));
11421 i915_gem_request_unreference__unlocked(mmio_flip->req);
11422 }
11423 11477
11424 /* For framebuffer backed by dmabuf, wait for fence */ 11478 /* For framebuffer backed by dmabuf, wait for fence */
11425 if (obj->base.dma_buf) 11479 resv = i915_gem_object_get_dmabuf_resv(obj);
11426 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 11480 if (resv)
11427 false, false, 11481 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
11428 MAX_SCHEDULE_TIMEOUT) < 0); 11482 MAX_SCHEDULE_TIMEOUT) < 0);
11429 11483
11430 intel_do_mmio_flip(mmio_flip); 11484 intel_pipe_update_start(crtc);
11431 kfree(mmio_flip);
11432}
11433
11434static int intel_queue_mmio_flip(struct drm_device *dev,
11435 struct drm_crtc *crtc,
11436 struct drm_i915_gem_object *obj)
11437{
11438 struct intel_mmio_flip *mmio_flip;
11439
11440 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11441 if (mmio_flip == NULL)
11442 return -ENOMEM;
11443
11444 mmio_flip->i915 = to_i915(dev);
11445 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
11446 mmio_flip->crtc = to_intel_crtc(crtc);
11447 mmio_flip->rotation = crtc->primary->state->rotation;
11448 11485
11449 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func); 11486 if (INTEL_GEN(dev_priv) >= 9)
11450 schedule_work(&mmio_flip->work); 11487 skl_do_mmio_flip(crtc, work->rotation, work);
11488 else
11489 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11490 ilk_do_mmio_flip(crtc, work);
11451 11491
11452 return 0; 11492 intel_pipe_update_end(crtc, work);
11453} 11493}
11454 11494
11455static int intel_default_queue_flip(struct drm_device *dev, 11495static int intel_default_queue_flip(struct drm_device *dev,
@@ -11462,37 +11502,32 @@ static int intel_default_queue_flip(struct drm_device *dev,
11462 return -ENODEV; 11502 return -ENODEV;
11463} 11503}
11464 11504
11465static bool __intel_pageflip_stall_check(struct drm_device *dev, 11505static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11466 struct drm_crtc *crtc) 11506 struct intel_crtc *intel_crtc,
11507 struct intel_flip_work *work)
11467{ 11508{
11468 struct drm_i915_private *dev_priv = dev->dev_private; 11509 u32 addr, vblank;
11469 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11470 struct intel_unpin_work *work = intel_crtc->unpin_work;
11471 u32 addr;
11472
11473 if (atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE)
11474 return true;
11475 11510
11476 if (atomic_read(&work->pending) < INTEL_FLIP_PENDING) 11511 if (!atomic_read(&work->pending))
11477 return false; 11512 return false;
11478 11513
11479 if (!work->enable_stall_check) 11514 smp_rmb();
11480 return false;
11481 11515
11516 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11482 if (work->flip_ready_vblank == 0) { 11517 if (work->flip_ready_vblank == 0) {
11483 if (work->flip_queued_req && 11518 if (work->flip_queued_req &&
11484 !i915_gem_request_completed(work->flip_queued_req, true)) 11519 !i915_gem_request_completed(work->flip_queued_req))
11485 return false; 11520 return false;
11486 11521
11487 work->flip_ready_vblank = drm_crtc_vblank_count(crtc); 11522 work->flip_ready_vblank = vblank;
11488 } 11523 }
11489 11524
11490 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3) 11525 if (vblank - work->flip_ready_vblank < 3)
11491 return false; 11526 return false;
11492 11527
11493 /* Potential stall - if we see that the flip has happened, 11528 /* Potential stall - if we see that the flip has happened,
11494 * assume a missed interrupt. */ 11529 * assume a missed interrupt. */
11495 if (INTEL_INFO(dev)->gen >= 4) 11530 if (INTEL_GEN(dev_priv) >= 4)
11496 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane))); 11531 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11497 else 11532 else
11498 addr = I915_READ(DSPADDR(intel_crtc->plane)); 11533 addr = I915_READ(DSPADDR(intel_crtc->plane));
@@ -11504,12 +11539,12 @@ static bool __intel_pageflip_stall_check(struct drm_device *dev,
11504 return addr == work->gtt_offset; 11539 return addr == work->gtt_offset;
11505} 11540}
11506 11541
11507void intel_check_page_flip(struct drm_device *dev, int pipe) 11542void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11508{ 11543{
11509 struct drm_i915_private *dev_priv = dev->dev_private; 11544 struct drm_device *dev = &dev_priv->drm;
11510 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 11545 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11511 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11512 struct intel_unpin_work *work; 11547 struct intel_flip_work *work;
11513 11548
11514 WARN_ON(!in_interrupt()); 11549 WARN_ON(!in_interrupt());
11515 11550
@@ -11517,16 +11552,20 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
11517 return; 11552 return;
11518 11553
11519 spin_lock(&dev->event_lock); 11554 spin_lock(&dev->event_lock);
11520 work = intel_crtc->unpin_work; 11555 work = intel_crtc->flip_work;
11521 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) { 11556
11522 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 11557 if (work != NULL && !is_mmio_work(work) &&
11523 work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 11558 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11559 WARN_ONCE(1,
11560 "Kicking stuck page flip: queued at %d, now %d\n",
11561 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11524 page_flip_completed(intel_crtc); 11562 page_flip_completed(intel_crtc);
11525 work = NULL; 11563 work = NULL;
11526 } 11564 }
11527 if (work != NULL && 11565
11528 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1) 11566 if (work != NULL && !is_mmio_work(work) &&
11529 intel_queue_rps_boost_for_request(dev, work->flip_queued_req); 11567 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11568 intel_queue_rps_boost_for_request(work->flip_queued_req);
11530 spin_unlock(&dev->event_lock); 11569 spin_unlock(&dev->event_lock);
11531} 11570}
11532 11571
@@ -11536,13 +11575,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11536 uint32_t page_flip_flags) 11575 uint32_t page_flip_flags)
11537{ 11576{
11538 struct drm_device *dev = crtc->dev; 11577 struct drm_device *dev = crtc->dev;
11539 struct drm_i915_private *dev_priv = dev->dev_private; 11578 struct drm_i915_private *dev_priv = to_i915(dev);
11540 struct drm_framebuffer *old_fb = crtc->primary->fb; 11579 struct drm_framebuffer *old_fb = crtc->primary->fb;
11541 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11580 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11542 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11581 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11543 struct drm_plane *primary = crtc->primary; 11582 struct drm_plane *primary = crtc->primary;
11544 enum pipe pipe = intel_crtc->pipe; 11583 enum pipe pipe = intel_crtc->pipe;
11545 struct intel_unpin_work *work; 11584 struct intel_flip_work *work;
11546 struct intel_engine_cs *engine; 11585 struct intel_engine_cs *engine;
11547 bool mmio_flip; 11586 bool mmio_flip;
11548 struct drm_i915_gem_request *request = NULL; 11587 struct drm_i915_gem_request *request = NULL;
@@ -11579,19 +11618,19 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11579 work->event = event; 11618 work->event = event;
11580 work->crtc = crtc; 11619 work->crtc = crtc;
11581 work->old_fb = old_fb; 11620 work->old_fb = old_fb;
11582 INIT_WORK(&work->work, intel_unpin_work_fn); 11621 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11583 11622
11584 ret = drm_crtc_vblank_get(crtc); 11623 ret = drm_crtc_vblank_get(crtc);
11585 if (ret) 11624 if (ret)
11586 goto free_work; 11625 goto free_work;
11587 11626
11588 /* We borrow the event spin lock for protecting unpin_work */ 11627 /* We borrow the event spin lock for protecting flip_work */
11589 spin_lock_irq(&dev->event_lock); 11628 spin_lock_irq(&dev->event_lock);
11590 if (intel_crtc->unpin_work) { 11629 if (intel_crtc->flip_work) {
11591 /* Before declaring the flip queue wedged, check if 11630 /* Before declaring the flip queue wedged, check if
11592 * the hardware completed the operation behind our backs. 11631 * the hardware completed the operation behind our backs.
11593 */ 11632 */
11594 if (__intel_pageflip_stall_check(dev, crtc)) { 11633 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11595 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n"); 11634 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11596 page_flip_completed(intel_crtc); 11635 page_flip_completed(intel_crtc);
11597 } else { 11636 } else {
@@ -11603,7 +11642,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11603 return -EBUSY; 11642 return -EBUSY;
11604 } 11643 }
11605 } 11644 }
11606 intel_crtc->unpin_work = work; 11645 intel_crtc->flip_work = work;
11607 spin_unlock_irq(&dev->event_lock); 11646 spin_unlock_irq(&dev->event_lock);
11608 11647
11609 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 11648 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
@@ -11615,7 +11654,9 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11615 11654
11616 crtc->primary->fb = fb; 11655 crtc->primary->fb = fb;
11617 update_state_fb(crtc->primary); 11656 update_state_fb(crtc->primary);
11618 intel_fbc_pre_update(intel_crtc); 11657
11658 intel_fbc_pre_update(intel_crtc, intel_crtc->config,
11659 to_intel_plane_state(primary->state));
11619 11660
11620 work->pending_flip_obj = obj; 11661 work->pending_flip_obj = obj;
11621 11662
@@ -11658,6 +11699,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11658 */ 11699 */
11659 if (!mmio_flip) { 11700 if (!mmio_flip) {
11660 ret = i915_gem_object_sync(obj, engine, &request); 11701 ret = i915_gem_object_sync(obj, engine, &request);
11702 if (!ret && !request) {
11703 request = i915_gem_request_alloc(engine, NULL);
11704 ret = PTR_ERR_OR_ZERO(request);
11705 }
11706
11661 if (ret) 11707 if (ret)
11662 goto cleanup_pending; 11708 goto cleanup_pending;
11663 } 11709 }
@@ -11669,38 +11715,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
11669 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary), 11715 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11670 obj, 0); 11716 obj, 0);
11671 work->gtt_offset += intel_crtc->dspaddr_offset; 11717 work->gtt_offset += intel_crtc->dspaddr_offset;
11718 work->rotation = crtc->primary->state->rotation;
11672 11719
11673 if (mmio_flip) { 11720 if (mmio_flip) {
11674 ret = intel_queue_mmio_flip(dev, crtc, obj); 11721 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11675 if (ret)
11676 goto cleanup_unpin;
11677 11722
11678 i915_gem_request_assign(&work->flip_queued_req, 11723 i915_gem_request_assign(&work->flip_queued_req,
11679 obj->last_write_req); 11724 obj->last_write_req);
11680 } else {
11681 if (!request) {
11682 request = i915_gem_request_alloc(engine, NULL);
11683 if (IS_ERR(request)) {
11684 ret = PTR_ERR(request);
11685 goto cleanup_unpin;
11686 }
11687 }
11688 11725
11726 schedule_work(&work->mmio_work);
11727 } else {
11728 i915_gem_request_assign(&work->flip_queued_req, request);
11689 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request, 11729 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11690 page_flip_flags); 11730 page_flip_flags);
11691 if (ret) 11731 if (ret)
11692 goto cleanup_unpin; 11732 goto cleanup_unpin;
11693 11733
11694 i915_gem_request_assign(&work->flip_queued_req, request); 11734 intel_mark_page_flip_active(intel_crtc, work);
11695 }
11696 11735
11697 if (request)
11698 i915_add_request_no_flush(request); 11736 i915_add_request_no_flush(request);
11737 }
11699 11738
11700 work->flip_queued_vblank = drm_crtc_vblank_count(crtc); 11739 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11701 work->enable_stall_check = true;
11702
11703 i915_gem_track_fb(intel_fb_obj(work->old_fb), obj,
11704 to_intel_plane(primary)->frontbuffer_bit); 11740 to_intel_plane(primary)->frontbuffer_bit);
11705 mutex_unlock(&dev->struct_mutex); 11741 mutex_unlock(&dev->struct_mutex);
11706 11742
@@ -11726,7 +11762,7 @@ cleanup:
11726 drm_framebuffer_unreference(work->old_fb); 11762 drm_framebuffer_unreference(work->old_fb);
11727 11763
11728 spin_lock_irq(&dev->event_lock); 11764 spin_lock_irq(&dev->event_lock);
11729 intel_crtc->unpin_work = NULL; 11765 intel_crtc->flip_work = NULL;
11730 spin_unlock_irq(&dev->event_lock); 11766 spin_unlock_irq(&dev->event_lock);
11731 11767
11732 drm_crtc_vblank_put(crtc); 11768 drm_crtc_vblank_put(crtc);
@@ -11828,15 +11864,14 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11828 struct drm_i915_private *dev_priv = to_i915(dev); 11864 struct drm_i915_private *dev_priv = to_i915(dev);
11829 struct intel_plane_state *old_plane_state = 11865 struct intel_plane_state *old_plane_state =
11830 to_intel_plane_state(plane->state); 11866 to_intel_plane_state(plane->state);
11831 int idx = intel_crtc->base.base.id, ret;
11832 bool mode_changed = needs_modeset(crtc_state); 11867 bool mode_changed = needs_modeset(crtc_state);
11833 bool was_crtc_enabled = crtc->state->active; 11868 bool was_crtc_enabled = crtc->state->active;
11834 bool is_crtc_enabled = crtc_state->active; 11869 bool is_crtc_enabled = crtc_state->active;
11835 bool turn_off, turn_on, visible, was_visible; 11870 bool turn_off, turn_on, visible, was_visible;
11836 struct drm_framebuffer *fb = plane_state->fb; 11871 struct drm_framebuffer *fb = plane_state->fb;
11872 int ret;
11837 11873
11838 if (crtc_state && INTEL_INFO(dev)->gen >= 9 && 11874 if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
11839 plane->type != DRM_PLANE_TYPE_CURSOR) {
11840 ret = skl_update_scaler_plane( 11875 ret = skl_update_scaler_plane(
11841 to_intel_crtc_state(crtc_state), 11876 to_intel_crtc_state(crtc_state),
11842 to_intel_plane_state(plane_state)); 11877 to_intel_plane_state(plane_state));
@@ -11854,6 +11889,11 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11854 * Visibility is calculated as if the crtc was on, but 11889 * Visibility is calculated as if the crtc was on, but
11855 * after scaler setup everything depends on it being off 11890 * after scaler setup everything depends on it being off
11856 * when the crtc isn't active. 11891 * when the crtc isn't active.
11892 *
11893 * FIXME this is wrong for watermarks. Watermarks should also
11894 * be computed as if the pipe would be active. Perhaps move
11895 * per-plane wm computation to the .check_plane() hook, and
11896 * only combine the results from all planes in the current place?
11857 */ 11897 */
11858 if (!is_crtc_enabled) 11898 if (!is_crtc_enabled)
11859 to_intel_plane_state(plane_state)->visible = visible = false; 11899 to_intel_plane_state(plane_state)->visible = visible = false;
@@ -11867,11 +11907,15 @@ int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11867 turn_off = was_visible && (!visible || mode_changed); 11907 turn_off = was_visible && (!visible || mode_changed);
11868 turn_on = visible && (!was_visible || mode_changed); 11908 turn_on = visible && (!was_visible || mode_changed);
11869 11909
11870 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx, 11910 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
11871 plane->base.id, fb ? fb->base.id : -1); 11911 intel_crtc->base.base.id,
11912 intel_crtc->base.name,
11913 plane->base.id, plane->name,
11914 fb ? fb->base.id : -1);
11872 11915
11873 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n", 11916 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11874 plane->base.id, was_visible, visible, 11917 plane->base.id, plane->name,
11918 was_visible, visible,
11875 turn_off, turn_on, mode_changed); 11919 turn_off, turn_on, mode_changed);
11876 11920
11877 if (turn_on) { 11921 if (turn_on) {
@@ -11944,31 +11988,11 @@ static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11944 return true; 11988 return true;
11945} 11989}
11946 11990
11947static bool check_encoder_cloning(struct drm_atomic_state *state,
11948 struct intel_crtc *crtc)
11949{
11950 struct intel_encoder *encoder;
11951 struct drm_connector *connector;
11952 struct drm_connector_state *connector_state;
11953 int i;
11954
11955 for_each_connector_in_state(state, connector, connector_state, i) {
11956 if (connector_state->crtc != &crtc->base)
11957 continue;
11958
11959 encoder = to_intel_encoder(connector_state->best_encoder);
11960 if (!check_single_encoder_cloning(state, crtc, encoder))
11961 return false;
11962 }
11963
11964 return true;
11965}
11966
11967static int intel_crtc_atomic_check(struct drm_crtc *crtc, 11991static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11968 struct drm_crtc_state *crtc_state) 11992 struct drm_crtc_state *crtc_state)
11969{ 11993{
11970 struct drm_device *dev = crtc->dev; 11994 struct drm_device *dev = crtc->dev;
11971 struct drm_i915_private *dev_priv = dev->dev_private; 11995 struct drm_i915_private *dev_priv = to_i915(dev);
11972 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11996 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11973 struct intel_crtc_state *pipe_config = 11997 struct intel_crtc_state *pipe_config =
11974 to_intel_crtc_state(crtc_state); 11998 to_intel_crtc_state(crtc_state);
@@ -11976,11 +12000,6 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11976 int ret; 12000 int ret;
11977 bool mode_changed = needs_modeset(crtc_state); 12001 bool mode_changed = needs_modeset(crtc_state);
11978 12002
11979 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11980 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11981 return -EINVAL;
11982 }
11983
11984 if (mode_changed && !crtc_state->active) 12003 if (mode_changed && !crtc_state->active)
11985 pipe_config->update_wm_post = true; 12004 pipe_config->update_wm_post = true;
11986 12005
@@ -12033,7 +12052,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12033 } 12052 }
12034 } else if (dev_priv->display.compute_intermediate_wm) { 12053 } else if (dev_priv->display.compute_intermediate_wm) {
12035 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 12054 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12036 pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; 12055 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
12037 } 12056 }
12038 12057
12039 if (INTEL_INFO(dev)->gen >= 9) { 12058 if (INTEL_INFO(dev)->gen >= 9) {
@@ -12168,7 +12187,8 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12168 struct intel_plane_state *state; 12187 struct intel_plane_state *state;
12169 struct drm_framebuffer *fb; 12188 struct drm_framebuffer *fb;
12170 12189
12171 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id, 12190 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12191 crtc->base.base.id, crtc->base.name,
12172 context, pipe_config, pipe_name(crtc->pipe)); 12192 context, pipe_config, pipe_name(crtc->pipe));
12173 12193
12174 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder)); 12194 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
@@ -12181,14 +12201,14 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12181 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n, 12201 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12182 pipe_config->fdi_m_n.tu); 12202 pipe_config->fdi_m_n.tu);
12183 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", 12203 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12184 pipe_config->has_dp_encoder, 12204 intel_crtc_has_dp_encoder(pipe_config),
12185 pipe_config->lane_count, 12205 pipe_config->lane_count,
12186 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n, 12206 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12187 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n, 12207 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12188 pipe_config->dp_m_n.tu); 12208 pipe_config->dp_m_n.tu);
12189 12209
12190 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n", 12210 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
12191 pipe_config->has_dp_encoder, 12211 intel_crtc_has_dp_encoder(pipe_config),
12192 pipe_config->lane_count, 12212 pipe_config->lane_count,
12193 pipe_config->dp_m2_n2.gmch_m, 12213 pipe_config->dp_m2_n2.gmch_m,
12194 pipe_config->dp_m2_n2.gmch_n, 12214 pipe_config->dp_m2_n2.gmch_n,
@@ -12269,29 +12289,24 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
12269 state = to_intel_plane_state(plane->state); 12289 state = to_intel_plane_state(plane->state);
12270 fb = state->base.fb; 12290 fb = state->base.fb;
12271 if (!fb) { 12291 if (!fb) {
12272 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d " 12292 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12273 "disabled, scaler_id = %d\n", 12293 plane->base.id, plane->name, state->scaler_id);
12274 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12275 plane->base.id, intel_plane->pipe,
12276 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12277 drm_plane_index(plane), state->scaler_id);
12278 continue; 12294 continue;
12279 } 12295 }
12280 12296
12281 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled", 12297 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12282 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD", 12298 plane->base.id, plane->name);
12283 plane->base.id, intel_plane->pipe, 12299 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12284 crtc->base.primary == plane ? 0 : intel_plane->plane + 1, 12300 fb->base.id, fb->width, fb->height,
12285 drm_plane_index(plane)); 12301 drm_get_format_name(fb->pixel_format));
12286 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x", 12302 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12287 fb->base.id, fb->width, fb->height, fb->pixel_format); 12303 state->scaler_id,
12288 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n", 12304 state->src.x1 >> 16, state->src.y1 >> 16,
12289 state->scaler_id, 12305 drm_rect_width(&state->src) >> 16,
12290 state->src.x1 >> 16, state->src.y1 >> 16, 12306 drm_rect_height(&state->src) >> 16,
12291 drm_rect_width(&state->src) >> 16, 12307 state->dst.x1, state->dst.y1,
12292 drm_rect_height(&state->src) >> 16, 12308 drm_rect_width(&state->dst),
12293 state->dst.x1, state->dst.y1, 12309 drm_rect_height(&state->dst));
12294 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12295 } 12310 }
12296} 12311}
12297 12312
@@ -12326,7 +12341,7 @@ static bool check_digital_port_conflicts(struct drm_atomic_state *state)
12326 case INTEL_OUTPUT_UNKNOWN: 12341 case INTEL_OUTPUT_UNKNOWN:
12327 if (WARN_ON(!HAS_DDI(dev))) 12342 if (WARN_ON(!HAS_DDI(dev)))
12328 break; 12343 break;
12329 case INTEL_OUTPUT_DISPLAYPORT: 12344 case INTEL_OUTPUT_DP:
12330 case INTEL_OUTPUT_HDMI: 12345 case INTEL_OUTPUT_HDMI:
12331 case INTEL_OUTPUT_EDP: 12346 case INTEL_OUTPUT_EDP:
12332 port_mask = 1 << enc_to_dig_port(&encoder->base)->port; 12347 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
@@ -12423,6 +12438,24 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
12423 &pipe_config->pipe_src_w, 12438 &pipe_config->pipe_src_w,
12424 &pipe_config->pipe_src_h); 12439 &pipe_config->pipe_src_h);
12425 12440
12441 for_each_connector_in_state(state, connector, connector_state, i) {
12442 if (connector_state->crtc != crtc)
12443 continue;
12444
12445 encoder = to_intel_encoder(connector_state->best_encoder);
12446
12447 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12448 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12449 goto fail;
12450 }
12451
12452 /*
12453 * Determine output_types before calling the .compute_config()
12454 * hooks so that the hooks can use this information safely.
12455 */
12456 pipe_config->output_types |= 1 << encoder->type;
12457 }
12458
12426encoder_retry: 12459encoder_retry:
12427 /* Ensure the port clock defaults are reset when retrying. */ 12460 /* Ensure the port clock defaults are reset when retrying. */
12428 pipe_config->port_clock = 0; 12461 pipe_config->port_clock = 0;
@@ -12708,8 +12741,8 @@ intel_pipe_config_compare(struct drm_device *dev,
12708 PIPE_CONF_CHECK_I(fdi_lanes); 12741 PIPE_CONF_CHECK_I(fdi_lanes);
12709 PIPE_CONF_CHECK_M_N(fdi_m_n); 12742 PIPE_CONF_CHECK_M_N(fdi_m_n);
12710 12743
12711 PIPE_CONF_CHECK_I(has_dp_encoder);
12712 PIPE_CONF_CHECK_I(lane_count); 12744 PIPE_CONF_CHECK_I(lane_count);
12745 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
12713 12746
12714 if (INTEL_INFO(dev)->gen < 8) { 12747 if (INTEL_INFO(dev)->gen < 8) {
12715 PIPE_CONF_CHECK_M_N(dp_m_n); 12748 PIPE_CONF_CHECK_M_N(dp_m_n);
@@ -12719,7 +12752,7 @@ intel_pipe_config_compare(struct drm_device *dev,
12719 } else 12752 } else
12720 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); 12753 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12721 12754
12722 PIPE_CONF_CHECK_I(has_dsi_encoder); 12755 PIPE_CONF_CHECK_X(output_types);
12723 12756
12724 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay); 12757 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12725 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal); 12758 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
@@ -12838,7 +12871,7 @@ static void verify_wm_state(struct drm_crtc *crtc,
12838 struct drm_crtc_state *new_state) 12871 struct drm_crtc_state *new_state)
12839{ 12872{
12840 struct drm_device *dev = crtc->dev; 12873 struct drm_device *dev = crtc->dev;
12841 struct drm_i915_private *dev_priv = dev->dev_private; 12874 struct drm_i915_private *dev_priv = to_i915(dev);
12842 struct skl_ddb_allocation hw_ddb, *sw_ddb; 12875 struct skl_ddb_allocation hw_ddb, *sw_ddb;
12843 struct skl_ddb_entry *hw_entry, *sw_entry; 12876 struct skl_ddb_entry *hw_entry, *sw_entry;
12844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12877 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -12944,7 +12977,7 @@ verify_crtc_state(struct drm_crtc *crtc,
12944 struct drm_crtc_state *new_crtc_state) 12977 struct drm_crtc_state *new_crtc_state)
12945{ 12978{
12946 struct drm_device *dev = crtc->dev; 12979 struct drm_device *dev = crtc->dev;
12947 struct drm_i915_private *dev_priv = dev->dev_private; 12980 struct drm_i915_private *dev_priv = to_i915(dev);
12948 struct intel_encoder *encoder; 12981 struct intel_encoder *encoder;
12949 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12982 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12950 struct intel_crtc_state *pipe_config, *sw_config; 12983 struct intel_crtc_state *pipe_config, *sw_config;
@@ -12958,7 +12991,7 @@ verify_crtc_state(struct drm_crtc *crtc,
12958 pipe_config->base.crtc = crtc; 12991 pipe_config->base.crtc = crtc;
12959 pipe_config->base.state = old_state; 12992 pipe_config->base.state = old_state;
12960 12993
12961 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id); 12994 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
12962 12995
12963 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config); 12996 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12964 12997
@@ -12987,8 +13020,10 @@ verify_crtc_state(struct drm_crtc *crtc,
12987 "Encoder connected to wrong pipe %c\n", 13020 "Encoder connected to wrong pipe %c\n",
12988 pipe_name(pipe)); 13021 pipe_name(pipe));
12989 13022
12990 if (active) 13023 if (active) {
13024 pipe_config->output_types |= 1 << encoder->type;
12991 encoder->get_config(encoder, pipe_config); 13025 encoder->get_config(encoder, pipe_config);
13026 }
12992 } 13027 }
12993 13028
12994 if (!new_crtc_state->active) 13029 if (!new_crtc_state->active)
@@ -13067,7 +13102,7 @@ verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13067 struct drm_crtc_state *old_crtc_state, 13102 struct drm_crtc_state *old_crtc_state,
13068 struct drm_crtc_state *new_crtc_state) 13103 struct drm_crtc_state *new_crtc_state)
13069{ 13104{
13070 struct drm_i915_private *dev_priv = dev->dev_private; 13105 struct drm_i915_private *dev_priv = to_i915(dev);
13071 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state); 13106 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13072 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state); 13107 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13073 13108
@@ -13106,7 +13141,7 @@ intel_modeset_verify_crtc(struct drm_crtc *crtc,
13106static void 13141static void
13107verify_disabled_dpll_state(struct drm_device *dev) 13142verify_disabled_dpll_state(struct drm_device *dev)
13108{ 13143{
13109 struct drm_i915_private *dev_priv = dev->dev_private; 13144 struct drm_i915_private *dev_priv = to_i915(dev);
13110 int i; 13145 int i;
13111 13146
13112 for (i = 0; i < dev_priv->num_shared_dpll; i++) 13147 for (i = 0; i < dev_priv->num_shared_dpll; i++)
@@ -13153,7 +13188,7 @@ static void update_scanline_offset(struct intel_crtc *crtc)
13153 13188
13154 crtc->scanline_offset = vtotal - 1; 13189 crtc->scanline_offset = vtotal - 1;
13155 } else if (HAS_DDI(dev) && 13190 } else if (HAS_DDI(dev) &&
13156 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) { 13191 intel_crtc_has_type(crtc->config, INTEL_OUTPUT_HDMI)) {
13157 crtc->scanline_offset = 2; 13192 crtc->scanline_offset = 2;
13158 } else 13193 } else
13159 crtc->scanline_offset = 1; 13194 crtc->scanline_offset = 1;
@@ -13288,7 +13323,7 @@ static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13288static int intel_modeset_checks(struct drm_atomic_state *state) 13323static int intel_modeset_checks(struct drm_atomic_state *state)
13289{ 13324{
13290 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13325 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13291 struct drm_i915_private *dev_priv = state->dev->dev_private; 13326 struct drm_i915_private *dev_priv = to_i915(state->dev);
13292 struct drm_crtc *crtc; 13327 struct drm_crtc *crtc;
13293 struct drm_crtc_state *crtc_state; 13328 struct drm_crtc_state *crtc_state;
13294 int ret = 0, i; 13329 int ret = 0, i;
@@ -13306,6 +13341,9 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13306 intel_state->active_crtcs |= 1 << i; 13341 intel_state->active_crtcs |= 1 << i;
13307 else 13342 else
13308 intel_state->active_crtcs &= ~(1 << i); 13343 intel_state->active_crtcs &= ~(1 << i);
13344
13345 if (crtc_state->active != crtc->state->active)
13346 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
13309 } 13347 }
13310 13348
13311 /* 13349 /*
@@ -13316,9 +13354,17 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13316 * adjusted_mode bits in the crtc directly. 13354 * adjusted_mode bits in the crtc directly.
13317 */ 13355 */
13318 if (dev_priv->display.modeset_calc_cdclk) { 13356 if (dev_priv->display.modeset_calc_cdclk) {
13357 if (!intel_state->cdclk_pll_vco)
13358 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
13359 if (!intel_state->cdclk_pll_vco)
13360 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
13361
13319 ret = dev_priv->display.modeset_calc_cdclk(state); 13362 ret = dev_priv->display.modeset_calc_cdclk(state);
13363 if (ret < 0)
13364 return ret;
13320 13365
13321 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq) 13366 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13367 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
13322 ret = intel_modeset_all_pipes(state); 13368 ret = intel_modeset_all_pipes(state);
13323 13369
13324 if (ret < 0) 13370 if (ret < 0)
@@ -13342,38 +13388,16 @@ static int intel_modeset_checks(struct drm_atomic_state *state)
13342 * phase. The code here should be run after the per-crtc and per-plane 'check' 13388 * phase. The code here should be run after the per-crtc and per-plane 'check'
13343 * handlers to ensure that all derived state has been updated. 13389 * handlers to ensure that all derived state has been updated.
13344 */ 13390 */
13345static void calc_watermark_data(struct drm_atomic_state *state) 13391static int calc_watermark_data(struct drm_atomic_state *state)
13346{ 13392{
13347 struct drm_device *dev = state->dev; 13393 struct drm_device *dev = state->dev;
13348 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13394 struct drm_i915_private *dev_priv = to_i915(dev);
13349 struct drm_crtc *crtc;
13350 struct drm_crtc_state *cstate;
13351 struct drm_plane *plane;
13352 struct drm_plane_state *pstate;
13353
13354 /*
13355 * Calculate watermark configuration details now that derived
13356 * plane/crtc state is all properly updated.
13357 */
13358 drm_for_each_crtc(crtc, dev) {
13359 cstate = drm_atomic_get_existing_crtc_state(state, crtc) ?:
13360 crtc->state;
13361
13362 if (cstate->active)
13363 intel_state->wm_config.num_pipes_active++;
13364 }
13365 drm_for_each_legacy_plane(plane, dev) {
13366 pstate = drm_atomic_get_existing_plane_state(state, plane) ?:
13367 plane->state;
13368 13395
13369 if (!to_intel_plane_state(pstate)->visible) 13396 /* Is there platform-specific watermark information to calculate? */
13370 continue; 13397 if (dev_priv->display.compute_global_watermarks)
13398 return dev_priv->display.compute_global_watermarks(state);
13371 13399
13372 intel_state->wm_config.sprites_enabled = true; 13400 return 0;
13373 if (pstate->crtc_w != pstate->src_w >> 16 ||
13374 pstate->crtc_h != pstate->src_h >> 16)
13375 intel_state->wm_config.sprites_scaled = true;
13376 }
13377} 13401}
13378 13402
13379/** 13403/**
@@ -13403,14 +13427,13 @@ static int intel_atomic_check(struct drm_device *dev,
13403 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags) 13427 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13404 crtc_state->mode_changed = true; 13428 crtc_state->mode_changed = true;
13405 13429
13406 if (!crtc_state->enable) { 13430 if (!needs_modeset(crtc_state))
13407 if (needs_modeset(crtc_state))
13408 any_ms = true;
13409 continue; 13431 continue;
13410 }
13411 13432
13412 if (!needs_modeset(crtc_state)) 13433 if (!crtc_state->enable) {
13434 any_ms = true;
13413 continue; 13435 continue;
13436 }
13414 13437
13415 /* FIXME: For only active_changed we shouldn't need to do any 13438 /* FIXME: For only active_changed we shouldn't need to do any
13416 * state recomputation at all. */ 13439 * state recomputation at all. */
@@ -13420,8 +13443,11 @@ static int intel_atomic_check(struct drm_device *dev,
13420 return ret; 13443 return ret;
13421 13444
13422 ret = intel_modeset_pipe_config(crtc, pipe_config); 13445 ret = intel_modeset_pipe_config(crtc, pipe_config);
13423 if (ret) 13446 if (ret) {
13447 intel_dump_pipe_config(to_intel_crtc(crtc),
13448 pipe_config, "[failed]");
13424 return ret; 13449 return ret;
13450 }
13425 13451
13426 if (i915.fastboot && 13452 if (i915.fastboot &&
13427 intel_pipe_config_compare(dev, 13453 intel_pipe_config_compare(dev,
@@ -13431,13 +13457,12 @@ static int intel_atomic_check(struct drm_device *dev,
13431 to_intel_crtc_state(crtc_state)->update_pipe = true; 13457 to_intel_crtc_state(crtc_state)->update_pipe = true;
13432 } 13458 }
13433 13459
13434 if (needs_modeset(crtc_state)) { 13460 if (needs_modeset(crtc_state))
13435 any_ms = true; 13461 any_ms = true;
13436 13462
13437 ret = drm_atomic_add_affected_planes(state, crtc); 13463 ret = drm_atomic_add_affected_planes(state, crtc);
13438 if (ret) 13464 if (ret)
13439 return ret; 13465 return ret;
13440 }
13441 13466
13442 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, 13467 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13443 needs_modeset(crtc_state) ? 13468 needs_modeset(crtc_state) ?
@@ -13457,27 +13482,20 @@ static int intel_atomic_check(struct drm_device *dev,
13457 return ret; 13482 return ret;
13458 13483
13459 intel_fbc_choose_crtc(dev_priv, state); 13484 intel_fbc_choose_crtc(dev_priv, state);
13460 calc_watermark_data(state); 13485 return calc_watermark_data(state);
13461
13462 return 0;
13463} 13486}
13464 13487
13465static int intel_atomic_prepare_commit(struct drm_device *dev, 13488static int intel_atomic_prepare_commit(struct drm_device *dev,
13466 struct drm_atomic_state *state, 13489 struct drm_atomic_state *state,
13467 bool nonblock) 13490 bool nonblock)
13468{ 13491{
13469 struct drm_i915_private *dev_priv = dev->dev_private; 13492 struct drm_i915_private *dev_priv = to_i915(dev);
13470 struct drm_plane_state *plane_state; 13493 struct drm_plane_state *plane_state;
13471 struct drm_crtc_state *crtc_state; 13494 struct drm_crtc_state *crtc_state;
13472 struct drm_plane *plane; 13495 struct drm_plane *plane;
13473 struct drm_crtc *crtc; 13496 struct drm_crtc *crtc;
13474 int i, ret; 13497 int i, ret;
13475 13498
13476 if (nonblock) {
13477 DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
13478 return -EINVAL;
13479 }
13480
13481 for_each_crtc_in_state(state, crtc, crtc_state, i) { 13499 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13482 if (state->legacy_cursor_update) 13500 if (state->legacy_cursor_update)
13483 continue; 13501 continue;
@@ -13521,6 +13539,16 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
13521 return ret; 13539 return ret;
13522} 13540}
13523 13541
13542u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13543{
13544 struct drm_device *dev = crtc->base.dev;
13545
13546 if (!dev->max_vblank_count)
13547 return drm_accurate_vblank_count(&crtc->base);
13548
13549 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13550}
13551
13524static void intel_atomic_wait_for_vblanks(struct drm_device *dev, 13552static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13525 struct drm_i915_private *dev_priv, 13553 struct drm_i915_private *dev_priv,
13526 unsigned crtc_mask) 13554 unsigned crtc_mask)
@@ -13586,45 +13614,36 @@ static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13586 return false; 13614 return false;
13587} 13615}
13588 13616
13589/** 13617static void intel_atomic_commit_tail(struct drm_atomic_state *state)
13590 * intel_atomic_commit - commit validated state object
13591 * @dev: DRM device
13592 * @state: the top-level driver state object
13593 * @nonblock: nonblocking commit
13594 *
13595 * This function commits a top-level state object that has been validated
13596 * with drm_atomic_helper_check().
13597 *
13598 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13599 * we can only handle plane-related operations and do not yet support
13600 * nonblocking commit.
13601 *
13602 * RETURNS
13603 * Zero for success or -errno.
13604 */
13605static int intel_atomic_commit(struct drm_device *dev,
13606 struct drm_atomic_state *state,
13607 bool nonblock)
13608{ 13618{
13619 struct drm_device *dev = state->dev;
13609 struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 13620 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13610 struct drm_i915_private *dev_priv = dev->dev_private; 13621 struct drm_i915_private *dev_priv = to_i915(dev);
13611 struct drm_crtc_state *old_crtc_state; 13622 struct drm_crtc_state *old_crtc_state;
13612 struct drm_crtc *crtc; 13623 struct drm_crtc *crtc;
13613 struct intel_crtc_state *intel_cstate; 13624 struct intel_crtc_state *intel_cstate;
13614 int ret = 0, i; 13625 struct drm_plane *plane;
13626 struct drm_plane_state *plane_state;
13615 bool hw_check = intel_state->modeset; 13627 bool hw_check = intel_state->modeset;
13616 unsigned long put_domains[I915_MAX_PIPES] = {}; 13628 unsigned long put_domains[I915_MAX_PIPES] = {};
13617 unsigned crtc_vblank_mask = 0; 13629 unsigned crtc_vblank_mask = 0;
13630 int i, ret;
13618 13631
13619 ret = intel_atomic_prepare_commit(dev, state, nonblock); 13632 for_each_plane_in_state(state, plane, plane_state, i) {
13620 if (ret) { 13633 struct intel_plane_state *intel_plane_state =
13621 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); 13634 to_intel_plane_state(plane_state);
13622 return ret; 13635
13636 if (!intel_plane_state->wait_req)
13637 continue;
13638
13639 ret = __i915_wait_request(intel_plane_state->wait_req,
13640 true, NULL, NULL);
13641 /* EIO should be eaten, and we can't get interrupted in the
13642 * worker, and blocking commits have waited already. */
13643 WARN_ON(ret);
13623 } 13644 }
13624 13645
13625 drm_atomic_helper_swap_state(dev, state); 13646 drm_atomic_helper_wait_for_dependencies(state);
13626 dev_priv->wm.config = intel_state->wm_config;
13627 intel_shared_dpll_commit(state);
13628 13647
13629 if (intel_state->modeset) { 13648 if (intel_state->modeset) {
13630 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk, 13649 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
@@ -13679,7 +13698,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13679 drm_atomic_helper_update_legacy_modeset_state(state->dev, state); 13698 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13680 13699
13681 if (dev_priv->display.modeset_commit_cdclk && 13700 if (dev_priv->display.modeset_commit_cdclk &&
13682 intel_state->dev_cdclk != dev_priv->cdclk_freq) 13701 (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
13702 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
13683 dev_priv->display.modeset_commit_cdclk(state); 13703 dev_priv->display.modeset_commit_cdclk(state);
13684 13704
13685 intel_modeset_verify_disabled(dev); 13705 intel_modeset_verify_disabled(dev);
@@ -13691,30 +13711,44 @@ static int intel_atomic_commit(struct drm_device *dev,
13691 bool modeset = needs_modeset(crtc->state); 13711 bool modeset = needs_modeset(crtc->state);
13692 struct intel_crtc_state *pipe_config = 13712 struct intel_crtc_state *pipe_config =
13693 to_intel_crtc_state(crtc->state); 13713 to_intel_crtc_state(crtc->state);
13694 bool update_pipe = !modeset && pipe_config->update_pipe;
13695 13714
13696 if (modeset && crtc->state->active) { 13715 if (modeset && crtc->state->active) {
13697 update_scanline_offset(to_intel_crtc(crtc)); 13716 update_scanline_offset(to_intel_crtc(crtc));
13698 dev_priv->display.crtc_enable(crtc); 13717 dev_priv->display.crtc_enable(crtc);
13699 } 13718 }
13700 13719
13720 /* Complete events for now disable pipes here. */
13721 if (modeset && !crtc->state->active && crtc->state->event) {
13722 spin_lock_irq(&dev->event_lock);
13723 drm_crtc_send_vblank_event(crtc, crtc->state->event);
13724 spin_unlock_irq(&dev->event_lock);
13725
13726 crtc->state->event = NULL;
13727 }
13728
13701 if (!modeset) 13729 if (!modeset)
13702 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state)); 13730 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13703 13731
13704 if (crtc->state->active && 13732 if (crtc->state->active &&
13705 drm_atomic_get_existing_plane_state(state, crtc->primary)) 13733 drm_atomic_get_existing_plane_state(state, crtc->primary))
13706 intel_fbc_enable(intel_crtc); 13734 intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
13707 13735
13708 if (crtc->state->active && 13736 if (crtc->state->active)
13709 (crtc->state->planes_changed || update_pipe))
13710 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state); 13737 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13711 13738
13712 if (pipe_config->base.active && needs_vblank_wait(pipe_config)) 13739 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13713 crtc_vblank_mask |= 1 << i; 13740 crtc_vblank_mask |= 1 << i;
13714 } 13741 }
13715 13742
13716 /* FIXME: add subpixel order */ 13743 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13717 13744 * already, but still need the state for the delayed optimization. To
13745 * fix this:
13746 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13747 * - schedule that vblank worker _before_ calling hw_done
13748 * - at the start of commit_tail, cancel it _synchrously
13749 * - switch over to the vblank wait helper in the core after that since
13750 * we don't need out special handling any more.
13751 */
13718 if (!state->legacy_cursor_update) 13752 if (!state->legacy_cursor_update)
13719 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask); 13753 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13720 13754
@@ -13741,6 +13775,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13741 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state); 13775 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13742 } 13776 }
13743 13777
13778 drm_atomic_helper_commit_hw_done(state);
13779
13744 if (intel_state->modeset) 13780 if (intel_state->modeset)
13745 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET); 13781 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13746 13782
@@ -13748,6 +13784,8 @@ static int intel_atomic_commit(struct drm_device *dev,
13748 drm_atomic_helper_cleanup_planes(dev, state); 13784 drm_atomic_helper_cleanup_planes(dev, state);
13749 mutex_unlock(&dev->struct_mutex); 13785 mutex_unlock(&dev->struct_mutex);
13750 13786
13787 drm_atomic_helper_commit_cleanup_done(state);
13788
13751 drm_atomic_state_free(state); 13789 drm_atomic_state_free(state);
13752 13790
13753 /* As one of the primary mmio accessors, KMS has a high likelihood 13791 /* As one of the primary mmio accessors, KMS has a high likelihood
@@ -13762,6 +13800,86 @@ static int intel_atomic_commit(struct drm_device *dev,
13762 * can happen also when the device is completely off. 13800 * can happen also when the device is completely off.
13763 */ 13801 */
13764 intel_uncore_arm_unclaimed_mmio_detection(dev_priv); 13802 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13803}
13804
13805static void intel_atomic_commit_work(struct work_struct *work)
13806{
13807 struct drm_atomic_state *state = container_of(work,
13808 struct drm_atomic_state,
13809 commit_work);
13810 intel_atomic_commit_tail(state);
13811}
13812
13813static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13814{
13815 struct drm_plane_state *old_plane_state;
13816 struct drm_plane *plane;
13817 struct drm_i915_gem_object *obj, *old_obj;
13818 struct intel_plane *intel_plane;
13819 int i;
13820
13821 mutex_lock(&state->dev->struct_mutex);
13822 for_each_plane_in_state(state, plane, old_plane_state, i) {
13823 obj = intel_fb_obj(plane->state->fb);
13824 old_obj = intel_fb_obj(old_plane_state->fb);
13825 intel_plane = to_intel_plane(plane);
13826
13827 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13828 }
13829 mutex_unlock(&state->dev->struct_mutex);
13830}
13831
13832/**
13833 * intel_atomic_commit - commit validated state object
13834 * @dev: DRM device
13835 * @state: the top-level driver state object
13836 * @nonblock: nonblocking commit
13837 *
13838 * This function commits a top-level state object that has been validated
13839 * with drm_atomic_helper_check().
13840 *
13841 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13842 * nonblocking commits are only safe for pure plane updates. Everything else
13843 * should work though.
13844 *
13845 * RETURNS
13846 * Zero for success or -errno.
13847 */
13848static int intel_atomic_commit(struct drm_device *dev,
13849 struct drm_atomic_state *state,
13850 bool nonblock)
13851{
13852 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13853 struct drm_i915_private *dev_priv = to_i915(dev);
13854 int ret = 0;
13855
13856 if (intel_state->modeset && nonblock) {
13857 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
13858 return -EINVAL;
13859 }
13860
13861 ret = drm_atomic_helper_setup_commit(state, nonblock);
13862 if (ret)
13863 return ret;
13864
13865 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13866
13867 ret = intel_atomic_prepare_commit(dev, state, nonblock);
13868 if (ret) {
13869 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13870 return ret;
13871 }
13872
13873 drm_atomic_helper_swap_state(state, true);
13874 dev_priv->wm.distrust_bios_wm = false;
13875 dev_priv->wm.skl_results = intel_state->wm_results;
13876 intel_shared_dpll_commit(state);
13877 intel_atomic_track_fbs(state);
13878
13879 if (nonblock)
13880 queue_work(system_unbound_wq, &state->commit_work);
13881 else
13882 intel_atomic_commit_tail(state);
13765 13883
13766 return 0; 13884 return 0;
13767} 13885}
@@ -13775,8 +13893,8 @@ void intel_crtc_restore_mode(struct drm_crtc *crtc)
13775 13893
13776 state = drm_atomic_state_alloc(dev); 13894 state = drm_atomic_state_alloc(dev);
13777 if (!state) { 13895 if (!state) {
13778 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory", 13896 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13779 crtc->base.id); 13897 crtc->base.id, crtc->name);
13780 return; 13898 return;
13781 } 13899 }
13782 13900
@@ -13806,8 +13924,50 @@ out:
13806 13924
13807#undef for_each_intel_crtc_masked 13925#undef for_each_intel_crtc_masked
13808 13926
13927/*
13928 * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling
13929 * drm_atomic_helper_legacy_gamma_set() directly.
13930 */
13931static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc,
13932 u16 *red, u16 *green, u16 *blue,
13933 uint32_t size)
13934{
13935 struct drm_device *dev = crtc->dev;
13936 struct drm_mode_config *config = &dev->mode_config;
13937 struct drm_crtc_state *state;
13938 int ret;
13939
13940 ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size);
13941 if (ret)
13942 return ret;
13943
13944 /*
13945 * Make sure we update the legacy properties so this works when
13946 * atomic is not enabled.
13947 */
13948
13949 state = crtc->state;
13950
13951 drm_object_property_set_value(&crtc->base,
13952 config->degamma_lut_property,
13953 (state->degamma_lut) ?
13954 state->degamma_lut->base.id : 0);
13955
13956 drm_object_property_set_value(&crtc->base,
13957 config->ctm_property,
13958 (state->ctm) ?
13959 state->ctm->base.id : 0);
13960
13961 drm_object_property_set_value(&crtc->base,
13962 config->gamma_lut_property,
13963 (state->gamma_lut) ?
13964 state->gamma_lut->base.id : 0);
13965
13966 return 0;
13967}
13968
13809static const struct drm_crtc_funcs intel_crtc_funcs = { 13969static const struct drm_crtc_funcs intel_crtc_funcs = {
13810 .gamma_set = drm_atomic_helper_legacy_gamma_set, 13970 .gamma_set = intel_atomic_legacy_gamma_set,
13811 .set_config = drm_atomic_helper_set_config, 13971 .set_config = drm_atomic_helper_set_config,
13812 .set_property = drm_atomic_helper_crtc_set_property, 13972 .set_property = drm_atomic_helper_crtc_set_property,
13813 .destroy = intel_crtc_destroy, 13973 .destroy = intel_crtc_destroy,
@@ -13836,9 +13996,9 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13836{ 13996{
13837 struct drm_device *dev = plane->dev; 13997 struct drm_device *dev = plane->dev;
13838 struct drm_framebuffer *fb = new_state->fb; 13998 struct drm_framebuffer *fb = new_state->fb;
13839 struct intel_plane *intel_plane = to_intel_plane(plane);
13840 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 13999 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13841 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); 14000 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
14001 struct reservation_object *resv;
13842 int ret = 0; 14002 int ret = 0;
13843 14003
13844 if (!obj && !old_obj) 14004 if (!obj && !old_obj)
@@ -13868,12 +14028,15 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13868 } 14028 }
13869 } 14029 }
13870 14030
14031 if (!obj)
14032 return 0;
14033
13871 /* For framebuffer backed by dmabuf, wait for fence */ 14034 /* For framebuffer backed by dmabuf, wait for fence */
13872 if (obj && obj->base.dma_buf) { 14035 resv = i915_gem_object_get_dmabuf_resv(obj);
14036 if (resv) {
13873 long lret; 14037 long lret;
13874 14038
13875 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv, 14039 lret = reservation_object_wait_timeout_rcu(resv, false, true,
13876 false, true,
13877 MAX_SCHEDULE_TIMEOUT); 14040 MAX_SCHEDULE_TIMEOUT);
13878 if (lret == -ERESTARTSYS) 14041 if (lret == -ERESTARTSYS)
13879 return lret; 14042 return lret;
@@ -13881,9 +14044,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13881 WARN(lret < 0, "waiting returns %li\n", lret); 14044 WARN(lret < 0, "waiting returns %li\n", lret);
13882 } 14045 }
13883 14046
13884 if (!obj) { 14047 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13885 ret = 0;
13886 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13887 INTEL_INFO(dev)->cursor_needs_physical) { 14048 INTEL_INFO(dev)->cursor_needs_physical) {
13888 int align = IS_I830(dev) ? 16 * 1024 : 256; 14049 int align = IS_I830(dev) ? 16 * 1024 : 256;
13889 ret = i915_gem_object_attach_phys(obj, align); 14050 ret = i915_gem_object_attach_phys(obj, align);
@@ -13894,15 +14055,11 @@ intel_prepare_plane_fb(struct drm_plane *plane,
13894 } 14055 }
13895 14056
13896 if (ret == 0) { 14057 if (ret == 0) {
13897 if (obj) { 14058 struct intel_plane_state *plane_state =
13898 struct intel_plane_state *plane_state = 14059 to_intel_plane_state(new_state);
13899 to_intel_plane_state(new_state);
13900 14060
13901 i915_gem_request_assign(&plane_state->wait_req, 14061 i915_gem_request_assign(&plane_state->wait_req,
13902 obj->last_write_req); 14062 obj->last_write_req);
13903 }
13904
13905 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13906 } 14063 }
13907 14064
13908 return ret; 14065 return ret;
@@ -13922,7 +14079,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
13922 const struct drm_plane_state *old_state) 14079 const struct drm_plane_state *old_state)
13923{ 14080{
13924 struct drm_device *dev = plane->dev; 14081 struct drm_device *dev = plane->dev;
13925 struct intel_plane *intel_plane = to_intel_plane(plane);
13926 struct intel_plane_state *old_intel_state; 14082 struct intel_plane_state *old_intel_state;
13927 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb); 14083 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13928 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb); 14084 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
@@ -13936,11 +14092,6 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
13936 !INTEL_INFO(dev)->cursor_needs_physical)) 14092 !INTEL_INFO(dev)->cursor_needs_physical))
13937 intel_unpin_fb_obj(old_state->fb, old_state->rotation); 14093 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
13938 14094
13939 /* prepare_fb aborted? */
13940 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13941 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13942 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13943
13944 i915_gem_request_assign(&old_intel_state->wait_req, NULL); 14095 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13945} 14096}
13946 14097
@@ -13948,15 +14099,11 @@ int
13948skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state) 14099skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13949{ 14100{
13950 int max_scale; 14101 int max_scale;
13951 struct drm_device *dev;
13952 struct drm_i915_private *dev_priv;
13953 int crtc_clock, cdclk; 14102 int crtc_clock, cdclk;
13954 14103
13955 if (!intel_crtc || !crtc_state->base.enable) 14104 if (!intel_crtc || !crtc_state->base.enable)
13956 return DRM_PLANE_HELPER_NO_SCALING; 14105 return DRM_PLANE_HELPER_NO_SCALING;
13957 14106
13958 dev = intel_crtc->base.dev;
13959 dev_priv = dev->dev_private;
13960 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; 14107 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13961 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk; 14108 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13962 14109
@@ -13996,6 +14143,7 @@ intel_check_primary_plane(struct drm_plane *plane,
13996 14143
13997 return drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14144 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13998 &state->dst, &state->clip, 14145 &state->dst, &state->clip,
14146 state->base.rotation,
13999 min_scale, max_scale, 14147 min_scale, max_scale,
14000 can_position, true, 14148 can_position, true,
14001 &state->visible); 14149 &state->visible);
@@ -14032,7 +14180,7 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14032{ 14180{
14033 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 14181 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14034 14182
14035 intel_pipe_update_end(intel_crtc); 14183 intel_pipe_update_end(intel_crtc, NULL);
14036} 14184}
14037 14185
14038/** 14186/**
@@ -14044,9 +14192,11 @@ static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14044 */ 14192 */
14045void intel_plane_destroy(struct drm_plane *plane) 14193void intel_plane_destroy(struct drm_plane *plane)
14046{ 14194{
14047 struct intel_plane *intel_plane = to_intel_plane(plane); 14195 if (!plane)
14196 return;
14197
14048 drm_plane_cleanup(plane); 14198 drm_plane_cleanup(plane);
14049 kfree(intel_plane); 14199 kfree(to_intel_plane(plane));
14050} 14200}
14051 14201
14052const struct drm_plane_funcs intel_plane_funcs = { 14202const struct drm_plane_funcs intel_plane_funcs = {
@@ -14118,10 +14268,24 @@ static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14118 primary->disable_plane = i9xx_disable_primary_plane; 14268 primary->disable_plane = i9xx_disable_primary_plane;
14119 } 14269 }
14120 14270
14121 ret = drm_universal_plane_init(dev, &primary->base, 0, 14271 if (INTEL_INFO(dev)->gen >= 9)
14122 &intel_plane_funcs, 14272 ret = drm_universal_plane_init(dev, &primary->base, 0,
14123 intel_primary_formats, num_formats, 14273 &intel_plane_funcs,
14124 DRM_PLANE_TYPE_PRIMARY, NULL); 14274 intel_primary_formats, num_formats,
14275 DRM_PLANE_TYPE_PRIMARY,
14276 "plane 1%c", pipe_name(pipe));
14277 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14278 ret = drm_universal_plane_init(dev, &primary->base, 0,
14279 &intel_plane_funcs,
14280 intel_primary_formats, num_formats,
14281 DRM_PLANE_TYPE_PRIMARY,
14282 "primary %c", pipe_name(pipe));
14283 else
14284 ret = drm_universal_plane_init(dev, &primary->base, 0,
14285 &intel_plane_funcs,
14286 intel_primary_formats, num_formats,
14287 DRM_PLANE_TYPE_PRIMARY,
14288 "plane %c", plane_name(primary->plane));
14125 if (ret) 14289 if (ret)
14126 goto fail; 14290 goto fail;
14127 14291
@@ -14171,6 +14335,7 @@ intel_check_cursor_plane(struct drm_plane *plane,
14171 14335
14172 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src, 14336 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14173 &state->dst, &state->clip, 14337 &state->dst, &state->clip,
14338 state->base.rotation,
14174 DRM_PLANE_HELPER_NO_SCALING, 14339 DRM_PLANE_HELPER_NO_SCALING,
14175 DRM_PLANE_HELPER_NO_SCALING, 14340 DRM_PLANE_HELPER_NO_SCALING,
14176 true, true, &state->visible); 14341 true, true, &state->visible);
@@ -14279,7 +14444,8 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14279 &intel_plane_funcs, 14444 &intel_plane_funcs,
14280 intel_cursor_formats, 14445 intel_cursor_formats,
14281 ARRAY_SIZE(intel_cursor_formats), 14446 ARRAY_SIZE(intel_cursor_formats),
14282 DRM_PLANE_TYPE_CURSOR, NULL); 14447 DRM_PLANE_TYPE_CURSOR,
14448 "cursor %c", pipe_name(pipe));
14283 if (ret) 14449 if (ret)
14284 goto fail; 14450 goto fail;
14285 14451
@@ -14327,7 +14493,7 @@ static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_cr
14327 14493
14328static void intel_crtc_init(struct drm_device *dev, int pipe) 14494static void intel_crtc_init(struct drm_device *dev, int pipe)
14329{ 14495{
14330 struct drm_i915_private *dev_priv = dev->dev_private; 14496 struct drm_i915_private *dev_priv = to_i915(dev);
14331 struct intel_crtc *intel_crtc; 14497 struct intel_crtc *intel_crtc;
14332 struct intel_crtc_state *crtc_state = NULL; 14498 struct intel_crtc_state *crtc_state = NULL;
14333 struct drm_plane *primary = NULL; 14499 struct drm_plane *primary = NULL;
@@ -14364,7 +14530,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14364 goto fail; 14530 goto fail;
14365 14531
14366 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary, 14532 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
14367 cursor, &intel_crtc_funcs, NULL); 14533 cursor, &intel_crtc_funcs,
14534 "pipe %c", pipe_name(pipe));
14368 if (ret) 14535 if (ret)
14369 goto fail; 14536 goto fail;
14370 14537
@@ -14398,10 +14565,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
14398 return; 14565 return;
14399 14566
14400fail: 14567fail:
14401 if (primary) 14568 intel_plane_destroy(primary);
14402 drm_plane_cleanup(primary); 14569 intel_plane_destroy(cursor);
14403 if (cursor)
14404 drm_plane_cleanup(cursor);
14405 kfree(crtc_state); 14570 kfree(crtc_state);
14406 kfree(intel_crtc); 14571 kfree(intel_crtc);
14407} 14572}
@@ -14427,11 +14592,8 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
14427 struct intel_crtc *crtc; 14592 struct intel_crtc *crtc;
14428 14593
14429 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id); 14594 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
14430 14595 if (!drmmode_crtc)
14431 if (!drmmode_crtc) {
14432 DRM_ERROR("no such CRTC id\n");
14433 return -ENOENT; 14596 return -ENOENT;
14434 }
14435 14597
14436 crtc = to_intel_crtc(drmmode_crtc); 14598 crtc = to_intel_crtc(drmmode_crtc);
14437 pipe_from_crtc_id->pipe = crtc->pipe; 14599 pipe_from_crtc_id->pipe = crtc->pipe;
@@ -14458,7 +14620,7 @@ static int intel_encoder_clones(struct intel_encoder *encoder)
14458 14620
14459static bool has_edp_a(struct drm_device *dev) 14621static bool has_edp_a(struct drm_device *dev)
14460{ 14622{
14461 struct drm_i915_private *dev_priv = dev->dev_private; 14623 struct drm_i915_private *dev_priv = to_i915(dev);
14462 14624
14463 if (!IS_MOBILE(dev)) 14625 if (!IS_MOBILE(dev))
14464 return false; 14626 return false;
@@ -14474,7 +14636,7 @@ static bool has_edp_a(struct drm_device *dev)
14474 14636
14475static bool intel_crt_present(struct drm_device *dev) 14637static bool intel_crt_present(struct drm_device *dev)
14476{ 14638{
14477 struct drm_i915_private *dev_priv = dev->dev_private; 14639 struct drm_i915_private *dev_priv = to_i915(dev);
14478 14640
14479 if (INTEL_INFO(dev)->gen >= 9) 14641 if (INTEL_INFO(dev)->gen >= 9)
14480 return false; 14642 return false;
@@ -14500,10 +14662,15 @@ static bool intel_crt_present(struct drm_device *dev)
14500 14662
14501static void intel_setup_outputs(struct drm_device *dev) 14663static void intel_setup_outputs(struct drm_device *dev)
14502{ 14664{
14503 struct drm_i915_private *dev_priv = dev->dev_private; 14665 struct drm_i915_private *dev_priv = to_i915(dev);
14504 struct intel_encoder *encoder; 14666 struct intel_encoder *encoder;
14505 bool dpd_is_edp = false; 14667 bool dpd_is_edp = false;
14506 14668
14669 /*
14670 * intel_edp_init_connector() depends on this completing first, to
14671 * prevent the registeration of both eDP and LVDS and the incorrect
14672 * sharing of the PPS.
14673 */
14507 intel_lvds_init(dev); 14674 intel_lvds_init(dev);
14508 14675
14509 if (intel_crt_present(dev)) 14676 if (intel_crt_present(dev))
@@ -15088,12 +15255,13 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15088 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 15255 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
15089 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 15256 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
15090 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 15257 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
15091 if (IS_BROADWELL(dev_priv)) { 15258 }
15092 dev_priv->display.modeset_commit_cdclk = 15259
15093 broadwell_modeset_commit_cdclk; 15260 if (IS_BROADWELL(dev_priv)) {
15094 dev_priv->display.modeset_calc_cdclk = 15261 dev_priv->display.modeset_commit_cdclk =
15095 broadwell_modeset_calc_cdclk; 15262 broadwell_modeset_commit_cdclk;
15096 } 15263 dev_priv->display.modeset_calc_cdclk =
15264 broadwell_modeset_calc_cdclk;
15097 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 15265 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
15098 dev_priv->display.modeset_commit_cdclk = 15266 dev_priv->display.modeset_commit_cdclk =
15099 valleyview_modeset_commit_cdclk; 15267 valleyview_modeset_commit_cdclk;
@@ -15101,9 +15269,14 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15101 valleyview_modeset_calc_cdclk; 15269 valleyview_modeset_calc_cdclk;
15102 } else if (IS_BROXTON(dev_priv)) { 15270 } else if (IS_BROXTON(dev_priv)) {
15103 dev_priv->display.modeset_commit_cdclk = 15271 dev_priv->display.modeset_commit_cdclk =
15104 broxton_modeset_commit_cdclk; 15272 bxt_modeset_commit_cdclk;
15105 dev_priv->display.modeset_calc_cdclk = 15273 dev_priv->display.modeset_calc_cdclk =
15106 broxton_modeset_calc_cdclk; 15274 bxt_modeset_calc_cdclk;
15275 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15276 dev_priv->display.modeset_commit_cdclk =
15277 skl_modeset_commit_cdclk;
15278 dev_priv->display.modeset_calc_cdclk =
15279 skl_modeset_calc_cdclk;
15107 } 15280 }
15108 15281
15109 switch (INTEL_INFO(dev_priv)->gen) { 15282 switch (INTEL_INFO(dev_priv)->gen) {
@@ -15142,7 +15315,7 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
15142 */ 15315 */
15143static void quirk_pipea_force(struct drm_device *dev) 15316static void quirk_pipea_force(struct drm_device *dev)
15144{ 15317{
15145 struct drm_i915_private *dev_priv = dev->dev_private; 15318 struct drm_i915_private *dev_priv = to_i915(dev);
15146 15319
15147 dev_priv->quirks |= QUIRK_PIPEA_FORCE; 15320 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
15148 DRM_INFO("applying pipe a force quirk\n"); 15321 DRM_INFO("applying pipe a force quirk\n");
@@ -15150,7 +15323,7 @@ static void quirk_pipea_force(struct drm_device *dev)
15150 15323
15151static void quirk_pipeb_force(struct drm_device *dev) 15324static void quirk_pipeb_force(struct drm_device *dev)
15152{ 15325{
15153 struct drm_i915_private *dev_priv = dev->dev_private; 15326 struct drm_i915_private *dev_priv = to_i915(dev);
15154 15327
15155 dev_priv->quirks |= QUIRK_PIPEB_FORCE; 15328 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15156 DRM_INFO("applying pipe b force quirk\n"); 15329 DRM_INFO("applying pipe b force quirk\n");
@@ -15161,7 +15334,7 @@ static void quirk_pipeb_force(struct drm_device *dev)
15161 */ 15334 */
15162static void quirk_ssc_force_disable(struct drm_device *dev) 15335static void quirk_ssc_force_disable(struct drm_device *dev)
15163{ 15336{
15164 struct drm_i915_private *dev_priv = dev->dev_private; 15337 struct drm_i915_private *dev_priv = to_i915(dev);
15165 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; 15338 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
15166 DRM_INFO("applying lvds SSC disable quirk\n"); 15339 DRM_INFO("applying lvds SSC disable quirk\n");
15167} 15340}
@@ -15172,7 +15345,7 @@ static void quirk_ssc_force_disable(struct drm_device *dev)
15172 */ 15345 */
15173static void quirk_invert_brightness(struct drm_device *dev) 15346static void quirk_invert_brightness(struct drm_device *dev)
15174{ 15347{
15175 struct drm_i915_private *dev_priv = dev->dev_private; 15348 struct drm_i915_private *dev_priv = to_i915(dev);
15176 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS; 15349 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
15177 DRM_INFO("applying inverted panel brightness quirk\n"); 15350 DRM_INFO("applying inverted panel brightness quirk\n");
15178} 15351}
@@ -15180,7 +15353,7 @@ static void quirk_invert_brightness(struct drm_device *dev)
15180/* Some VBT's incorrectly indicate no backlight is present */ 15353/* Some VBT's incorrectly indicate no backlight is present */
15181static void quirk_backlight_present(struct drm_device *dev) 15354static void quirk_backlight_present(struct drm_device *dev)
15182{ 15355{
15183 struct drm_i915_private *dev_priv = dev->dev_private; 15356 struct drm_i915_private *dev_priv = to_i915(dev);
15184 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT; 15357 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15185 DRM_INFO("applying backlight present quirk\n"); 15358 DRM_INFO("applying backlight present quirk\n");
15186} 15359}
@@ -15306,7 +15479,7 @@ static void intel_init_quirks(struct drm_device *dev)
15306/* Disable the VGA plane that we never use */ 15479/* Disable the VGA plane that we never use */
15307static void i915_disable_vga(struct drm_device *dev) 15480static void i915_disable_vga(struct drm_device *dev)
15308{ 15481{
15309 struct drm_i915_private *dev_priv = dev->dev_private; 15482 struct drm_i915_private *dev_priv = to_i915(dev);
15310 u8 sr1; 15483 u8 sr1;
15311 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15484 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15312 15485
@@ -15324,14 +15497,14 @@ static void i915_disable_vga(struct drm_device *dev)
15324 15497
15325void intel_modeset_init_hw(struct drm_device *dev) 15498void intel_modeset_init_hw(struct drm_device *dev)
15326{ 15499{
15327 struct drm_i915_private *dev_priv = dev->dev_private; 15500 struct drm_i915_private *dev_priv = to_i915(dev);
15328 15501
15329 intel_update_cdclk(dev); 15502 intel_update_cdclk(dev);
15330 15503
15331 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq; 15504 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15332 15505
15333 intel_init_clock_gating(dev); 15506 intel_init_clock_gating(dev);
15334 intel_enable_gt_powersave(dev); 15507 intel_enable_gt_powersave(dev_priv);
15335} 15508}
15336 15509
15337/* 15510/*
@@ -15401,7 +15574,6 @@ retry:
15401 } 15574 }
15402 15575
15403 /* Write calculated watermark values back */ 15576 /* Write calculated watermark values back */
15404 to_i915(dev)->wm.config = to_intel_atomic_state(state)->wm_config;
15405 for_each_crtc_in_state(state, crtc, cstate, i) { 15577 for_each_crtc_in_state(state, crtc, cstate, i) {
15406 struct intel_crtc_state *cs = to_intel_crtc_state(cstate); 15578 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15407 15579
@@ -15499,11 +15671,13 @@ void intel_modeset_init(struct drm_device *dev)
15499 } 15671 }
15500 15672
15501 intel_update_czclk(dev_priv); 15673 intel_update_czclk(dev_priv);
15502 intel_update_rawclk(dev_priv);
15503 intel_update_cdclk(dev); 15674 intel_update_cdclk(dev);
15504 15675
15505 intel_shared_dpll_init(dev); 15676 intel_shared_dpll_init(dev);
15506 15677
15678 if (dev_priv->max_cdclk_freq == 0)
15679 intel_update_max_cdclk(dev);
15680
15507 /* Just disable it once at startup */ 15681 /* Just disable it once at startup */
15508 i915_disable_vga(dev); 15682 i915_disable_vga(dev);
15509 intel_setup_outputs(dev); 15683 intel_setup_outputs(dev);
@@ -15571,7 +15745,7 @@ static bool
15571intel_check_plane_mapping(struct intel_crtc *crtc) 15745intel_check_plane_mapping(struct intel_crtc *crtc)
15572{ 15746{
15573 struct drm_device *dev = crtc->base.dev; 15747 struct drm_device *dev = crtc->base.dev;
15574 struct drm_i915_private *dev_priv = dev->dev_private; 15748 struct drm_i915_private *dev_priv = to_i915(dev);
15575 u32 val; 15749 u32 val;
15576 15750
15577 if (INTEL_INFO(dev)->num_pipes == 1) 15751 if (INTEL_INFO(dev)->num_pipes == 1)
@@ -15611,7 +15785,7 @@ static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15611static void intel_sanitize_crtc(struct intel_crtc *crtc) 15785static void intel_sanitize_crtc(struct intel_crtc *crtc)
15612{ 15786{
15613 struct drm_device *dev = crtc->base.dev; 15787 struct drm_device *dev = crtc->base.dev;
15614 struct drm_i915_private *dev_priv = dev->dev_private; 15788 struct drm_i915_private *dev_priv = to_i915(dev);
15615 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 15789 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
15616 15790
15617 /* Clear any frame start delays used for debugging left by the BIOS */ 15791 /* Clear any frame start delays used for debugging left by the BIOS */
@@ -15644,8 +15818,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
15644 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) { 15818 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15645 bool plane; 15819 bool plane;
15646 15820
15647 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n", 15821 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15648 crtc->base.base.id); 15822 crtc->base.base.id, crtc->base.name);
15649 15823
15650 /* Pipe has the wrong plane attached and the plane is active. 15824 /* Pipe has the wrong plane attached and the plane is active.
15651 * Temporarily change the plane mapping and disable everything 15825 * Temporarily change the plane mapping and disable everything
@@ -15736,7 +15910,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
15736 15910
15737void i915_redisable_vga_power_on(struct drm_device *dev) 15911void i915_redisable_vga_power_on(struct drm_device *dev)
15738{ 15912{
15739 struct drm_i915_private *dev_priv = dev->dev_private; 15913 struct drm_i915_private *dev_priv = to_i915(dev);
15740 i915_reg_t vga_reg = i915_vgacntrl_reg(dev); 15914 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15741 15915
15742 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { 15916 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
@@ -15747,7 +15921,7 @@ void i915_redisable_vga_power_on(struct drm_device *dev)
15747 15921
15748void i915_redisable_vga(struct drm_device *dev) 15922void i915_redisable_vga(struct drm_device *dev)
15749{ 15923{
15750 struct drm_i915_private *dev_priv = dev->dev_private; 15924 struct drm_i915_private *dev_priv = to_i915(dev);
15751 15925
15752 /* This function can be called both from intel_modeset_setup_hw_state or 15926 /* This function can be called both from intel_modeset_setup_hw_state or
15753 * at a very early point in our resume sequence, where the power well 15927 * at a very early point in our resume sequence, where the power well
@@ -15787,7 +15961,7 @@ static void readout_plane_state(struct intel_crtc *crtc)
15787 15961
15788static void intel_modeset_readout_hw_state(struct drm_device *dev) 15962static void intel_modeset_readout_hw_state(struct drm_device *dev)
15789{ 15963{
15790 struct drm_i915_private *dev_priv = dev->dev_private; 15964 struct drm_i915_private *dev_priv = to_i915(dev);
15791 enum pipe pipe; 15965 enum pipe pipe;
15792 struct intel_crtc *crtc; 15966 struct intel_crtc *crtc;
15793 struct intel_encoder *encoder; 15967 struct intel_encoder *encoder;
@@ -15813,26 +15987,24 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15813 if (crtc_state->base.active) { 15987 if (crtc_state->base.active) {
15814 dev_priv->active_crtcs |= 1 << crtc->pipe; 15988 dev_priv->active_crtcs |= 1 << crtc->pipe;
15815 15989
15816 if (IS_BROADWELL(dev_priv)) { 15990 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
15817 pixclk = ilk_pipe_pixel_rate(crtc_state); 15991 pixclk = ilk_pipe_pixel_rate(crtc_state);
15818 15992 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15819 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15820 if (crtc_state->ips_enabled)
15821 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15822 } else if (IS_VALLEYVIEW(dev_priv) ||
15823 IS_CHERRYVIEW(dev_priv) ||
15824 IS_BROXTON(dev_priv))
15825 pixclk = crtc_state->base.adjusted_mode.crtc_clock; 15993 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15826 else 15994 else
15827 WARN_ON(dev_priv->display.modeset_calc_cdclk); 15995 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15996
15997 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15998 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
15999 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15828 } 16000 }
15829 16001
15830 dev_priv->min_pixclk[crtc->pipe] = pixclk; 16002 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15831 16003
15832 readout_plane_state(crtc); 16004 readout_plane_state(crtc);
15833 16005
15834 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n", 16006 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
15835 crtc->base.base.id, 16007 crtc->base.base.id, crtc->base.name,
15836 crtc->active ? "enabled" : "disabled"); 16008 crtc->active ? "enabled" : "disabled");
15837 } 16009 }
15838 16010
@@ -15858,6 +16030,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15858 if (encoder->get_hw_state(encoder, &pipe)) { 16030 if (encoder->get_hw_state(encoder, &pipe)) {
15859 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 16031 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15860 encoder->base.crtc = &crtc->base; 16032 encoder->base.crtc = &crtc->base;
16033 crtc->config->output_types |= 1 << encoder->type;
15861 encoder->get_config(encoder, crtc->config); 16034 encoder->get_config(encoder, crtc->config);
15862 } else { 16035 } else {
15863 encoder->base.crtc = NULL; 16036 encoder->base.crtc = NULL;
@@ -15942,7 +16115,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
15942static void 16115static void
15943intel_modeset_setup_hw_state(struct drm_device *dev) 16116intel_modeset_setup_hw_state(struct drm_device *dev)
15944{ 16117{
15945 struct drm_i915_private *dev_priv = dev->dev_private; 16118 struct drm_i915_private *dev_priv = to_i915(dev);
15946 enum pipe pipe; 16119 enum pipe pipe;
15947 struct intel_crtc *crtc; 16120 struct intel_crtc *crtc;
15948 struct intel_encoder *encoder; 16121 struct intel_encoder *encoder;
@@ -16063,15 +16236,16 @@ retry:
16063 16236
16064void intel_modeset_gem_init(struct drm_device *dev) 16237void intel_modeset_gem_init(struct drm_device *dev)
16065{ 16238{
16239 struct drm_i915_private *dev_priv = to_i915(dev);
16066 struct drm_crtc *c; 16240 struct drm_crtc *c;
16067 struct drm_i915_gem_object *obj; 16241 struct drm_i915_gem_object *obj;
16068 int ret; 16242 int ret;
16069 16243
16070 intel_init_gt_powersave(dev); 16244 intel_init_gt_powersave(dev_priv);
16071 16245
16072 intel_modeset_init_hw(dev); 16246 intel_modeset_init_hw(dev);
16073 16247
16074 intel_setup_overlay(dev); 16248 intel_setup_overlay(dev_priv);
16075 16249
16076 /* 16250 /*
16077 * Make sure any fbs we allocated at startup are properly 16251 * Make sure any fbs we allocated at startup are properly
@@ -16097,26 +16271,36 @@ void intel_modeset_gem_init(struct drm_device *dev)
16097 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary)); 16271 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
16098 } 16272 }
16099 } 16273 }
16274}
16275
16276int intel_connector_register(struct drm_connector *connector)
16277{
16278 struct intel_connector *intel_connector = to_intel_connector(connector);
16279 int ret;
16280
16281 ret = intel_backlight_device_register(intel_connector);
16282 if (ret)
16283 goto err;
16100 16284
16101 intel_backlight_register(dev); 16285 return 0;
16286
16287err:
16288 return ret;
16102} 16289}
16103 16290
16104void intel_connector_unregister(struct intel_connector *intel_connector) 16291void intel_connector_unregister(struct drm_connector *connector)
16105{ 16292{
16106 struct drm_connector *connector = &intel_connector->base; 16293 struct intel_connector *intel_connector = to_intel_connector(connector);
16107 16294
16295 intel_backlight_device_unregister(intel_connector);
16108 intel_panel_destroy_backlight(connector); 16296 intel_panel_destroy_backlight(connector);
16109 drm_connector_unregister(connector);
16110} 16297}
16111 16298
16112void intel_modeset_cleanup(struct drm_device *dev) 16299void intel_modeset_cleanup(struct drm_device *dev)
16113{ 16300{
16114 struct drm_i915_private *dev_priv = dev->dev_private; 16301 struct drm_i915_private *dev_priv = to_i915(dev);
16115 struct intel_connector *connector;
16116
16117 intel_disable_gt_powersave(dev);
16118 16302
16119 intel_backlight_unregister(dev); 16303 intel_disable_gt_powersave(dev_priv);
16120 16304
16121 /* 16305 /*
16122 * Interrupts and polling as the first thing to avoid creating havoc. 16306 * Interrupts and polling as the first thing to avoid creating havoc.
@@ -16138,27 +16322,15 @@ void intel_modeset_cleanup(struct drm_device *dev)
16138 /* flush any delayed tasks or pending work */ 16322 /* flush any delayed tasks or pending work */
16139 flush_scheduled_work(); 16323 flush_scheduled_work();
16140 16324
16141 /* destroy the backlight and sysfs files before encoders/connectors */
16142 for_each_intel_connector(dev, connector)
16143 connector->unregister(connector);
16144
16145 drm_mode_config_cleanup(dev); 16325 drm_mode_config_cleanup(dev);
16146 16326
16147 intel_cleanup_overlay(dev); 16327 intel_cleanup_overlay(dev_priv);
16148 16328
16149 intel_cleanup_gt_powersave(dev); 16329 intel_cleanup_gt_powersave(dev_priv);
16150 16330
16151 intel_teardown_gmbus(dev); 16331 intel_teardown_gmbus(dev);
16152} 16332}
16153 16333
16154/*
16155 * Return which encoder is currently attached for connector.
16156 */
16157struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
16158{
16159 return &intel_attached_encoder(connector)->base;
16160}
16161
16162void intel_connector_attach_encoder(struct intel_connector *connector, 16334void intel_connector_attach_encoder(struct intel_connector *connector,
16163 struct intel_encoder *encoder) 16335 struct intel_encoder *encoder)
16164{ 16336{
@@ -16172,7 +16344,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
16172 */ 16344 */
16173int intel_modeset_vga_set_state(struct drm_device *dev, bool state) 16345int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16174{ 16346{
16175 struct drm_i915_private *dev_priv = dev->dev_private; 16347 struct drm_i915_private *dev_priv = to_i915(dev);
16176 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; 16348 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
16177 u16 gmch_ctrl; 16349 u16 gmch_ctrl;
16178 16350
@@ -16242,9 +16414,8 @@ struct intel_display_error_state {
16242}; 16414};
16243 16415
16244struct intel_display_error_state * 16416struct intel_display_error_state *
16245intel_display_capture_error_state(struct drm_device *dev) 16417intel_display_capture_error_state(struct drm_i915_private *dev_priv)
16246{ 16418{
16247 struct drm_i915_private *dev_priv = dev->dev_private;
16248 struct intel_display_error_state *error; 16419 struct intel_display_error_state *error;
16249 int transcoders[] = { 16420 int transcoders[] = {
16250 TRANSCODER_A, 16421 TRANSCODER_A,
@@ -16254,14 +16425,14 @@ intel_display_capture_error_state(struct drm_device *dev)
16254 }; 16425 };
16255 int i; 16426 int i;
16256 16427
16257 if (INTEL_INFO(dev)->num_pipes == 0) 16428 if (INTEL_INFO(dev_priv)->num_pipes == 0)
16258 return NULL; 16429 return NULL;
16259 16430
16260 error = kzalloc(sizeof(*error), GFP_ATOMIC); 16431 error = kzalloc(sizeof(*error), GFP_ATOMIC);
16261 if (error == NULL) 16432 if (error == NULL)
16262 return NULL; 16433 return NULL;
16263 16434
16264 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 16435 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
16265 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); 16436 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16266 16437
16267 for_each_pipe(dev_priv, i) { 16438 for_each_pipe(dev_priv, i) {
@@ -16277,25 +16448,25 @@ intel_display_capture_error_state(struct drm_device *dev)
16277 16448
16278 error->plane[i].control = I915_READ(DSPCNTR(i)); 16449 error->plane[i].control = I915_READ(DSPCNTR(i));
16279 error->plane[i].stride = I915_READ(DSPSTRIDE(i)); 16450 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
16280 if (INTEL_INFO(dev)->gen <= 3) { 16451 if (INTEL_GEN(dev_priv) <= 3) {
16281 error->plane[i].size = I915_READ(DSPSIZE(i)); 16452 error->plane[i].size = I915_READ(DSPSIZE(i));
16282 error->plane[i].pos = I915_READ(DSPPOS(i)); 16453 error->plane[i].pos = I915_READ(DSPPOS(i));
16283 } 16454 }
16284 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 16455 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
16285 error->plane[i].addr = I915_READ(DSPADDR(i)); 16456 error->plane[i].addr = I915_READ(DSPADDR(i));
16286 if (INTEL_INFO(dev)->gen >= 4) { 16457 if (INTEL_GEN(dev_priv) >= 4) {
16287 error->plane[i].surface = I915_READ(DSPSURF(i)); 16458 error->plane[i].surface = I915_READ(DSPSURF(i));
16288 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); 16459 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16289 } 16460 }
16290 16461
16291 error->pipe[i].source = I915_READ(PIPESRC(i)); 16462 error->pipe[i].source = I915_READ(PIPESRC(i));
16292 16463
16293 if (HAS_GMCH_DISPLAY(dev)) 16464 if (HAS_GMCH_DISPLAY(dev_priv))
16294 error->pipe[i].stat = I915_READ(PIPESTAT(i)); 16465 error->pipe[i].stat = I915_READ(PIPESTAT(i));
16295 } 16466 }
16296 16467
16297 /* Note: this does not include DSI transcoders. */ 16468 /* Note: this does not include DSI transcoders. */
16298 error->num_transcoders = INTEL_INFO(dev)->num_pipes; 16469 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
16299 if (HAS_DDI(dev_priv)) 16470 if (HAS_DDI(dev_priv))
16300 error->num_transcoders++; /* Account for eDP. */ 16471 error->num_transcoders++; /* Account for eDP. */
16301 16472
@@ -16329,7 +16500,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m,
16329 struct drm_device *dev, 16500 struct drm_device *dev,
16330 struct intel_display_error_state *error) 16501 struct intel_display_error_state *error)
16331{ 16502{
16332 struct drm_i915_private *dev_priv = dev->dev_private; 16503 struct drm_i915_private *dev_priv = to_i915(dev);
16333 int i; 16504 int i;
16334 16505
16335 if (!error) 16506 if (!error)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891107f92d9f..21b04c3eda41 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -131,11 +131,6 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe); 131 enum pipe pipe);
132static void intel_dp_unset_edid(struct intel_dp *intel_dp); 132static void intel_dp_unset_edid(struct intel_dp *intel_dp);
133 133
134static unsigned int intel_dp_unused_lane_mask(int lane_count)
135{
136 return ~((1 << lane_count) - 1) & 0xf;
137}
138
139static int 134static int
140intel_dp_max_link_bw(struct intel_dp *intel_dp) 135intel_dp_max_link_bw(struct intel_dp *intel_dp)
141{ 136{
@@ -267,7 +262,7 @@ static void pps_lock(struct intel_dp *intel_dp)
267 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 262 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
268 struct intel_encoder *encoder = &intel_dig_port->base; 263 struct intel_encoder *encoder = &intel_dig_port->base;
269 struct drm_device *dev = encoder->base.dev; 264 struct drm_device *dev = encoder->base.dev;
270 struct drm_i915_private *dev_priv = dev->dev_private; 265 struct drm_i915_private *dev_priv = to_i915(dev);
271 enum intel_display_power_domain power_domain; 266 enum intel_display_power_domain power_domain;
272 267
273 /* 268 /*
@@ -285,7 +280,7 @@ static void pps_unlock(struct intel_dp *intel_dp)
285 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 280 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
286 struct intel_encoder *encoder = &intel_dig_port->base; 281 struct intel_encoder *encoder = &intel_dig_port->base;
287 struct drm_device *dev = encoder->base.dev; 282 struct drm_device *dev = encoder->base.dev;
288 struct drm_i915_private *dev_priv = dev->dev_private; 283 struct drm_i915_private *dev_priv = to_i915(dev);
289 enum intel_display_power_domain power_domain; 284 enum intel_display_power_domain power_domain;
290 285
291 mutex_unlock(&dev_priv->pps_mutex); 286 mutex_unlock(&dev_priv->pps_mutex);
@@ -299,7 +294,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
299{ 294{
300 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 295 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
301 struct drm_device *dev = intel_dig_port->base.base.dev; 296 struct drm_device *dev = intel_dig_port->base.base.dev;
302 struct drm_i915_private *dev_priv = dev->dev_private; 297 struct drm_i915_private *dev_priv = to_i915(dev);
303 enum pipe pipe = intel_dp->pps_pipe; 298 enum pipe pipe = intel_dp->pps_pipe;
304 bool pll_enabled, release_cl_override = false; 299 bool pll_enabled, release_cl_override = false;
305 enum dpio_phy phy = DPIO_PHY(pipe); 300 enum dpio_phy phy = DPIO_PHY(pipe);
@@ -373,7 +368,7 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
373{ 368{
374 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 369 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
375 struct drm_device *dev = intel_dig_port->base.base.dev; 370 struct drm_device *dev = intel_dig_port->base.base.dev;
376 struct drm_i915_private *dev_priv = dev->dev_private; 371 struct drm_i915_private *dev_priv = to_i915(dev);
377 struct intel_encoder *encoder; 372 struct intel_encoder *encoder;
378 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 373 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
379 enum pipe pipe; 374 enum pipe pipe;
@@ -431,6 +426,37 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
431 return intel_dp->pps_pipe; 426 return intel_dp->pps_pipe;
432} 427}
433 428
429static int
430bxt_power_sequencer_idx(struct intel_dp *intel_dp)
431{
432 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
433 struct drm_device *dev = intel_dig_port->base.base.dev;
434 struct drm_i915_private *dev_priv = to_i915(dev);
435
436 lockdep_assert_held(&dev_priv->pps_mutex);
437
438 /* We should never land here with regular DP ports */
439 WARN_ON(!is_edp(intel_dp));
440
441 /*
442 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
443 * mapping needs to be retrieved from VBT, for now just hard-code to
444 * use instance #0 always.
445 */
446 if (!intel_dp->pps_reset)
447 return 0;
448
449 intel_dp->pps_reset = false;
450
451 /*
452 * Only the HW needs to be reprogrammed, the SW state is fixed and
453 * has been setup during connector init.
454 */
455 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
456
457 return 0;
458}
459
434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv, 460typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe); 461 enum pipe pipe);
436 462
@@ -480,7 +506,7 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{ 506{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 507 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev; 508 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private; 509 struct drm_i915_private *dev_priv = to_i915(dev);
484 enum port port = intel_dig_port->port; 510 enum port port = intel_dig_port->port;
485 511
486 lockdep_assert_held(&dev_priv->pps_mutex); 512 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -512,12 +538,13 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp); 538 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
513} 539}
514 540
515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) 541void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{ 542{
517 struct drm_device *dev = dev_priv->dev; 543 struct drm_device *dev = &dev_priv->drm;
518 struct intel_encoder *encoder; 544 struct intel_encoder *encoder;
519 545
520 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))) 546 if (WARN_ON(!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
547 !IS_BROXTON(dev)))
521 return; 548 return;
522 549
523 /* 550 /*
@@ -537,34 +564,71 @@ void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
537 continue; 564 continue;
538 565
539 intel_dp = enc_to_intel_dp(&encoder->base); 566 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE; 567 if (IS_BROXTON(dev))
568 intel_dp->pps_reset = true;
569 else
570 intel_dp->pps_pipe = INVALID_PIPE;
571 }
572}
573
574struct pps_registers {
575 i915_reg_t pp_ctrl;
576 i915_reg_t pp_stat;
577 i915_reg_t pp_on;
578 i915_reg_t pp_off;
579 i915_reg_t pp_div;
580};
581
582static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
583 struct intel_dp *intel_dp,
584 struct pps_registers *regs)
585{
586 memset(regs, 0, sizeof(*regs));
587
588 if (IS_BROXTON(dev_priv)) {
589 int idx = bxt_power_sequencer_idx(intel_dp);
590
591 regs->pp_ctrl = BXT_PP_CONTROL(idx);
592 regs->pp_stat = BXT_PP_STATUS(idx);
593 regs->pp_on = BXT_PP_ON_DELAYS(idx);
594 regs->pp_off = BXT_PP_OFF_DELAYS(idx);
595 } else if (HAS_PCH_SPLIT(dev_priv)) {
596 regs->pp_ctrl = PCH_PP_CONTROL;
597 regs->pp_stat = PCH_PP_STATUS;
598 regs->pp_on = PCH_PP_ON_DELAYS;
599 regs->pp_off = PCH_PP_OFF_DELAYS;
600 regs->pp_div = PCH_PP_DIVISOR;
601 } else {
602 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
603
604 regs->pp_ctrl = VLV_PIPE_PP_CONTROL(pipe);
605 regs->pp_stat = VLV_PIPE_PP_STATUS(pipe);
606 regs->pp_on = VLV_PIPE_PP_ON_DELAYS(pipe);
607 regs->pp_off = VLV_PIPE_PP_OFF_DELAYS(pipe);
608 regs->pp_div = VLV_PIPE_PP_DIVISOR(pipe);
541 } 609 }
542} 610}
543 611
544static i915_reg_t 612static i915_reg_t
545_pp_ctrl_reg(struct intel_dp *intel_dp) 613_pp_ctrl_reg(struct intel_dp *intel_dp)
546{ 614{
547 struct drm_device *dev = intel_dp_to_dev(intel_dp); 615 struct pps_registers regs;
548 616
549 if (IS_BROXTON(dev)) 617 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
550 return BXT_PP_CONTROL(0); 618 &regs);
551 else if (HAS_PCH_SPLIT(dev)) 619
552 return PCH_PP_CONTROL; 620 return regs.pp_ctrl;
553 else
554 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
555} 621}
556 622
557static i915_reg_t 623static i915_reg_t
558_pp_stat_reg(struct intel_dp *intel_dp) 624_pp_stat_reg(struct intel_dp *intel_dp)
559{ 625{
560 struct drm_device *dev = intel_dp_to_dev(intel_dp); 626 struct pps_registers regs;
561 627
562 if (IS_BROXTON(dev)) 628 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
563 return BXT_PP_STATUS(0); 629 &regs);
564 else if (HAS_PCH_SPLIT(dev)) 630
565 return PCH_PP_STATUS; 631 return regs.pp_stat;
566 else
567 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
568} 632}
569 633
570/* Reboot notifier handler to shutdown panel power to guarantee T12 timing 634/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
@@ -575,7 +639,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
575 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp), 639 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
576 edp_notifier); 640 edp_notifier);
577 struct drm_device *dev = intel_dp_to_dev(intel_dp); 641 struct drm_device *dev = intel_dp_to_dev(intel_dp);
578 struct drm_i915_private *dev_priv = dev->dev_private; 642 struct drm_i915_private *dev_priv = to_i915(dev);
579 643
580 if (!is_edp(intel_dp) || code != SYS_RESTART) 644 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0; 645 return 0;
@@ -606,7 +670,7 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
606static bool edp_have_panel_power(struct intel_dp *intel_dp) 670static bool edp_have_panel_power(struct intel_dp *intel_dp)
607{ 671{
608 struct drm_device *dev = intel_dp_to_dev(intel_dp); 672 struct drm_device *dev = intel_dp_to_dev(intel_dp);
609 struct drm_i915_private *dev_priv = dev->dev_private; 673 struct drm_i915_private *dev_priv = to_i915(dev);
610 674
611 lockdep_assert_held(&dev_priv->pps_mutex); 675 lockdep_assert_held(&dev_priv->pps_mutex);
612 676
@@ -620,7 +684,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
620static bool edp_have_panel_vdd(struct intel_dp *intel_dp) 684static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
621{ 685{
622 struct drm_device *dev = intel_dp_to_dev(intel_dp); 686 struct drm_device *dev = intel_dp_to_dev(intel_dp);
623 struct drm_i915_private *dev_priv = dev->dev_private; 687 struct drm_i915_private *dev_priv = to_i915(dev);
624 688
625 lockdep_assert_held(&dev_priv->pps_mutex); 689 lockdep_assert_held(&dev_priv->pps_mutex);
626 690
@@ -635,7 +699,7 @@ static void
635intel_dp_check_edp(struct intel_dp *intel_dp) 699intel_dp_check_edp(struct intel_dp *intel_dp)
636{ 700{
637 struct drm_device *dev = intel_dp_to_dev(intel_dp); 701 struct drm_device *dev = intel_dp_to_dev(intel_dp);
638 struct drm_i915_private *dev_priv = dev->dev_private; 702 struct drm_i915_private *dev_priv = to_i915(dev);
639 703
640 if (!is_edp(intel_dp)) 704 if (!is_edp(intel_dp))
641 return; 705 return;
@@ -653,7 +717,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
653{ 717{
654 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 718 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
655 struct drm_device *dev = intel_dig_port->base.base.dev; 719 struct drm_device *dev = intel_dig_port->base.base.dev;
656 struct drm_i915_private *dev_priv = dev->dev_private; 720 struct drm_i915_private *dev_priv = to_i915(dev);
657 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 721 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
658 uint32_t status; 722 uint32_t status;
659 bool done; 723 bool done;
@@ -775,6 +839,7 @@ static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
775 DP_AUX_CH_CTL_TIME_OUT_1600us | 839 DP_AUX_CH_CTL_TIME_OUT_1600us |
776 DP_AUX_CH_CTL_RECEIVE_ERROR | 840 DP_AUX_CH_CTL_RECEIVE_ERROR |
777 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 841 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
842 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
778 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32); 843 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
779} 844}
780 845
@@ -785,7 +850,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
785{ 850{
786 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 851 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
787 struct drm_device *dev = intel_dig_port->base.base.dev; 852 struct drm_device *dev = intel_dig_port->base.base.dev;
788 struct drm_i915_private *dev_priv = dev->dev_private; 853 struct drm_i915_private *dev_priv = to_i915(dev);
789 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg; 854 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
790 uint32_t aux_clock_divider; 855 uint32_t aux_clock_divider;
791 int i, ret, recv_bytes; 856 int i, ret, recv_bytes;
@@ -1181,48 +1246,21 @@ static void intel_aux_reg_init(struct intel_dp *intel_dp)
1181static void 1246static void
1182intel_dp_aux_fini(struct intel_dp *intel_dp) 1247intel_dp_aux_fini(struct intel_dp *intel_dp)
1183{ 1248{
1184 drm_dp_aux_unregister(&intel_dp->aux);
1185 kfree(intel_dp->aux.name); 1249 kfree(intel_dp->aux.name);
1186} 1250}
1187 1251
1188static int 1252static void
1189intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector) 1253intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1190{ 1254{
1191 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1255 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1192 enum port port = intel_dig_port->port; 1256 enum port port = intel_dig_port->port;
1193 int ret;
1194 1257
1195 intel_aux_reg_init(intel_dp); 1258 intel_aux_reg_init(intel_dp);
1259 drm_dp_aux_init(&intel_dp->aux);
1196 1260
1261 /* Failure to allocate our preferred name is not critical */
1197 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port)); 1262 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1198 if (!intel_dp->aux.name)
1199 return -ENOMEM;
1200
1201 intel_dp->aux.dev = connector->base.kdev;
1202 intel_dp->aux.transfer = intel_dp_aux_transfer; 1263 intel_dp->aux.transfer = intel_dp_aux_transfer;
1203
1204 DRM_DEBUG_KMS("registering %s bus for %s\n",
1205 intel_dp->aux.name,
1206 connector->base.kdev->kobj.name);
1207
1208 ret = drm_dp_aux_register(&intel_dp->aux);
1209 if (ret < 0) {
1210 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
1211 intel_dp->aux.name, ret);
1212 kfree(intel_dp->aux.name);
1213 return ret;
1214 }
1215
1216 return 0;
1217}
1218
1219static void
1220intel_dp_connector_unregister(struct intel_connector *intel_connector)
1221{
1222 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1223
1224 intel_dp_aux_fini(intel_dp);
1225 intel_connector_unregister(intel_connector);
1226} 1264}
1227 1265
1228static int 1266static int
@@ -1435,7 +1473,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1435 struct intel_crtc_state *pipe_config) 1473 struct intel_crtc_state *pipe_config)
1436{ 1474{
1437 struct drm_device *dev = encoder->base.dev; 1475 struct drm_device *dev = encoder->base.dev;
1438 struct drm_i915_private *dev_priv = dev->dev_private; 1476 struct drm_i915_private *dev_priv = to_i915(dev);
1439 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 1477 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1440 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1478 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1441 enum port port = dp_to_dig_port(intel_dp)->port; 1479 enum port port = dp_to_dig_port(intel_dp)->port;
@@ -1463,7 +1501,6 @@ intel_dp_compute_config(struct intel_encoder *encoder,
1463 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) 1501 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
1464 pipe_config->has_pch_encoder = true; 1502 pipe_config->has_pch_encoder = true;
1465 1503
1466 pipe_config->has_dp_encoder = true;
1467 pipe_config->has_drrs = false; 1504 pipe_config->has_drrs = false;
1468 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A; 1505 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1469 1506
@@ -1582,6 +1619,27 @@ found:
1582 &pipe_config->dp_m2_n2); 1619 &pipe_config->dp_m2_n2);
1583 } 1620 }
1584 1621
1622 /*
1623 * DPLL0 VCO may need to be adjusted to get the correct
1624 * clock for eDP. This will affect cdclk as well.
1625 */
1626 if (is_edp(intel_dp) &&
1627 (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))) {
1628 int vco;
1629
1630 switch (pipe_config->port_clock / 2) {
1631 case 108000:
1632 case 216000:
1633 vco = 8640000;
1634 break;
1635 default:
1636 vco = 8100000;
1637 break;
1638 }
1639
1640 to_intel_atomic_state(pipe_config->base.state)->cdclk_pll_vco = vco;
1641 }
1642
1585 if (!HAS_DDI(dev)) 1643 if (!HAS_DDI(dev))
1586 intel_dp_set_clock(encoder, pipe_config); 1644 intel_dp_set_clock(encoder, pipe_config);
1587 1645
@@ -1598,7 +1656,7 @@ void intel_dp_set_link_params(struct intel_dp *intel_dp,
1598static void intel_dp_prepare(struct intel_encoder *encoder) 1656static void intel_dp_prepare(struct intel_encoder *encoder)
1599{ 1657{
1600 struct drm_device *dev = encoder->base.dev; 1658 struct drm_device *dev = encoder->base.dev;
1601 struct drm_i915_private *dev_priv = dev->dev_private; 1659 struct drm_i915_private *dev_priv = to_i915(dev);
1602 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1660 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1603 enum port port = dp_to_dig_port(intel_dp)->port; 1661 enum port port = dp_to_dig_port(intel_dp)->port;
1604 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1662 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
@@ -1686,16 +1744,21 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1686#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 1744#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1687#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 1745#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1688 1746
1747static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1748 struct intel_dp *intel_dp);
1749
1689static void wait_panel_status(struct intel_dp *intel_dp, 1750static void wait_panel_status(struct intel_dp *intel_dp,
1690 u32 mask, 1751 u32 mask,
1691 u32 value) 1752 u32 value)
1692{ 1753{
1693 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1754 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1694 struct drm_i915_private *dev_priv = dev->dev_private; 1755 struct drm_i915_private *dev_priv = to_i915(dev);
1695 i915_reg_t pp_stat_reg, pp_ctrl_reg; 1756 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1696 1757
1697 lockdep_assert_held(&dev_priv->pps_mutex); 1758 lockdep_assert_held(&dev_priv->pps_mutex);
1698 1759
1760 intel_pps_verify_state(dev_priv, intel_dp);
1761
1699 pp_stat_reg = _pp_stat_reg(intel_dp); 1762 pp_stat_reg = _pp_stat_reg(intel_dp);
1700 pp_ctrl_reg = _pp_ctrl_reg(intel_dp); 1763 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1701 1764
@@ -1704,8 +1767,9 @@ static void wait_panel_status(struct intel_dp *intel_dp,
1704 I915_READ(pp_stat_reg), 1767 I915_READ(pp_stat_reg),
1705 I915_READ(pp_ctrl_reg)); 1768 I915_READ(pp_ctrl_reg));
1706 1769
1707 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 1770 if (intel_wait_for_register(dev_priv,
1708 5 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) 1771 pp_stat_reg, mask, value,
1772 5000))
1709 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 1773 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1710 I915_READ(pp_stat_reg), 1774 I915_READ(pp_stat_reg),
1711 I915_READ(pp_ctrl_reg)); 1775 I915_READ(pp_ctrl_reg));
@@ -1765,7 +1829,7 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1765static u32 ironlake_get_pp_control(struct intel_dp *intel_dp) 1829static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1766{ 1830{
1767 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1831 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1768 struct drm_i915_private *dev_priv = dev->dev_private; 1832 struct drm_i915_private *dev_priv = to_i915(dev);
1769 u32 control; 1833 u32 control;
1770 1834
1771 lockdep_assert_held(&dev_priv->pps_mutex); 1835 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -1788,7 +1852,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1788 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1852 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1853 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1790 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1854 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1791 struct drm_i915_private *dev_priv = dev->dev_private; 1855 struct drm_i915_private *dev_priv = to_i915(dev);
1792 enum intel_display_power_domain power_domain; 1856 enum intel_display_power_domain power_domain;
1793 u32 pp; 1857 u32 pp;
1794 i915_reg_t pp_stat_reg, pp_ctrl_reg; 1858 i915_reg_t pp_stat_reg, pp_ctrl_reg;
@@ -1861,7 +1925,7 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1861static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1925static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1862{ 1926{
1863 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1927 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1864 struct drm_i915_private *dev_priv = dev->dev_private; 1928 struct drm_i915_private *dev_priv = to_i915(dev);
1865 struct intel_digital_port *intel_dig_port = 1929 struct intel_digital_port *intel_dig_port =
1866 dp_to_dig_port(intel_dp); 1930 dp_to_dig_port(intel_dp);
1867 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1931 struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1930,8 +1994,7 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1930 */ 1994 */
1931static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1995static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1932{ 1996{
1933 struct drm_i915_private *dev_priv = 1997 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1934 intel_dp_to_dev(intel_dp)->dev_private;
1935 1998
1936 lockdep_assert_held(&dev_priv->pps_mutex); 1999 lockdep_assert_held(&dev_priv->pps_mutex);
1937 2000
@@ -1952,7 +2015,7 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1952static void edp_panel_on(struct intel_dp *intel_dp) 2015static void edp_panel_on(struct intel_dp *intel_dp)
1953{ 2016{
1954 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1955 struct drm_i915_private *dev_priv = dev->dev_private; 2018 struct drm_i915_private *dev_priv = to_i915(dev);
1956 u32 pp; 2019 u32 pp;
1957 i915_reg_t pp_ctrl_reg; 2020 i915_reg_t pp_ctrl_reg;
1958 2021
@@ -2013,7 +2076,7 @@ static void edp_panel_off(struct intel_dp *intel_dp)
2013 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2076 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2014 struct intel_encoder *intel_encoder = &intel_dig_port->base; 2077 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2015 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2078 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2016 struct drm_i915_private *dev_priv = dev->dev_private; 2079 struct drm_i915_private *dev_priv = to_i915(dev);
2017 enum intel_display_power_domain power_domain; 2080 enum intel_display_power_domain power_domain;
2018 u32 pp; 2081 u32 pp;
2019 i915_reg_t pp_ctrl_reg; 2082 i915_reg_t pp_ctrl_reg;
@@ -2065,7 +2128,7 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2065{ 2128{
2066 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2129 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2067 struct drm_device *dev = intel_dig_port->base.base.dev; 2130 struct drm_device *dev = intel_dig_port->base.base.dev;
2068 struct drm_i915_private *dev_priv = dev->dev_private; 2131 struct drm_i915_private *dev_priv = to_i915(dev);
2069 u32 pp; 2132 u32 pp;
2070 i915_reg_t pp_ctrl_reg; 2133 i915_reg_t pp_ctrl_reg;
2071 2134
@@ -2106,7 +2169,7 @@ void intel_edp_backlight_on(struct intel_dp *intel_dp)
2106static void _intel_edp_backlight_off(struct intel_dp *intel_dp) 2169static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2107{ 2170{
2108 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2171 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2109 struct drm_i915_private *dev_priv = dev->dev_private; 2172 struct drm_i915_private *dev_priv = to_i915(dev);
2110 u32 pp; 2173 u32 pp;
2111 i915_reg_t pp_ctrl_reg; 2174 i915_reg_t pp_ctrl_reg;
2112 2175
@@ -2222,7 +2285,7 @@ static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
2222 * 2. Program DP PLL enable 2285 * 2. Program DP PLL enable
2223 */ 2286 */
2224 if (IS_GEN5(dev_priv)) 2287 if (IS_GEN5(dev_priv))
2225 intel_wait_for_vblank_if_active(dev_priv->dev, !crtc->pipe); 2288 intel_wait_for_vblank_if_active(&dev_priv->drm, !crtc->pipe);
2226 2289
2227 intel_dp->DP |= DP_PLL_ENABLE; 2290 intel_dp->DP |= DP_PLL_ENABLE;
2228 2291
@@ -2287,7 +2350,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2287 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2350 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2288 enum port port = dp_to_dig_port(intel_dp)->port; 2351 enum port port = dp_to_dig_port(intel_dp)->port;
2289 struct drm_device *dev = encoder->base.dev; 2352 struct drm_device *dev = encoder->base.dev;
2290 struct drm_i915_private *dev_priv = dev->dev_private; 2353 struct drm_i915_private *dev_priv = to_i915(dev);
2291 enum intel_display_power_domain power_domain; 2354 enum intel_display_power_domain power_domain;
2292 u32 tmp; 2355 u32 tmp;
2293 bool ret; 2356 bool ret;
@@ -2340,7 +2403,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2340 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2403 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2341 u32 tmp, flags = 0; 2404 u32 tmp, flags = 0;
2342 struct drm_device *dev = encoder->base.dev; 2405 struct drm_device *dev = encoder->base.dev;
2343 struct drm_i915_private *dev_priv = dev->dev_private; 2406 struct drm_i915_private *dev_priv = to_i915(dev);
2344 enum port port = dp_to_dig_port(intel_dp)->port; 2407 enum port port = dp_to_dig_port(intel_dp)->port;
2345 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2408 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2346 2409
@@ -2378,8 +2441,6 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
2378 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235) 2441 !IS_CHERRYVIEW(dev) && tmp & DP_COLOR_RANGE_16_235)
2379 pipe_config->limited_color_range = true; 2442 pipe_config->limited_color_range = true;
2380 2443
2381 pipe_config->has_dp_encoder = true;
2382
2383 pipe_config->lane_count = 2444 pipe_config->lane_count =
2384 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; 2445 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2385 2446
@@ -2460,55 +2521,11 @@ static void vlv_post_disable_dp(struct intel_encoder *encoder)
2460 intel_dp_link_down(intel_dp); 2521 intel_dp_link_down(intel_dp);
2461} 2522}
2462 2523
2463static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2464 bool reset)
2465{
2466 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2467 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2468 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2469 enum pipe pipe = crtc->pipe;
2470 uint32_t val;
2471
2472 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2473 if (reset)
2474 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2475 else
2476 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2477 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
2478
2479 if (crtc->config->lane_count > 2) {
2480 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2481 if (reset)
2482 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2483 else
2484 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2485 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2486 }
2487
2488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2489 val |= CHV_PCS_REQ_SOFTRESET_EN;
2490 if (reset)
2491 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2492 else
2493 val |= DPIO_PCS_CLK_SOFT_RESET;
2494 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
2495
2496 if (crtc->config->lane_count > 2) {
2497 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2498 val |= CHV_PCS_REQ_SOFTRESET_EN;
2499 if (reset)
2500 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2501 else
2502 val |= DPIO_PCS_CLK_SOFT_RESET;
2503 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2504 }
2505}
2506
2507static void chv_post_disable_dp(struct intel_encoder *encoder) 2524static void chv_post_disable_dp(struct intel_encoder *encoder)
2508{ 2525{
2509 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2526 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2510 struct drm_device *dev = encoder->base.dev; 2527 struct drm_device *dev = encoder->base.dev;
2511 struct drm_i915_private *dev_priv = dev->dev_private; 2528 struct drm_i915_private *dev_priv = to_i915(dev);
2512 2529
2513 intel_dp_link_down(intel_dp); 2530 intel_dp_link_down(intel_dp);
2514 2531
@@ -2527,7 +2544,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
2527{ 2544{
2528 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2545 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2529 struct drm_device *dev = intel_dig_port->base.base.dev; 2546 struct drm_device *dev = intel_dig_port->base.base.dev;
2530 struct drm_i915_private *dev_priv = dev->dev_private; 2547 struct drm_i915_private *dev_priv = to_i915(dev);
2531 enum port port = intel_dig_port->port; 2548 enum port port = intel_dig_port->port;
2532 2549
2533 if (HAS_DDI(dev)) { 2550 if (HAS_DDI(dev)) {
@@ -2607,7 +2624,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
2607static void intel_dp_enable_port(struct intel_dp *intel_dp) 2624static void intel_dp_enable_port(struct intel_dp *intel_dp)
2608{ 2625{
2609 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2626 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2610 struct drm_i915_private *dev_priv = dev->dev_private; 2627 struct drm_i915_private *dev_priv = to_i915(dev);
2611 struct intel_crtc *crtc = 2628 struct intel_crtc *crtc =
2612 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc); 2629 to_intel_crtc(dp_to_dig_port(intel_dp)->base.base.crtc);
2613 2630
@@ -2636,7 +2653,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2636{ 2653{
2637 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2654 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2638 struct drm_device *dev = encoder->base.dev; 2655 struct drm_device *dev = encoder->base.dev;
2639 struct drm_i915_private *dev_priv = dev->dev_private; 2656 struct drm_i915_private *dev_priv = to_i915(dev);
2640 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2657 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2641 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2658 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2642 enum pipe pipe = crtc->pipe; 2659 enum pipe pipe = crtc->pipe;
@@ -2709,7 +2726,7 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2709static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) 2726static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2710{ 2727{
2711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2728 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2712 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; 2729 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2713 enum pipe pipe = intel_dp->pps_pipe; 2730 enum pipe pipe = intel_dp->pps_pipe;
2714 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe); 2731 i915_reg_t pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2715 2732
@@ -2735,7 +2752,7 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2735static void vlv_steal_power_sequencer(struct drm_device *dev, 2752static void vlv_steal_power_sequencer(struct drm_device *dev,
2736 enum pipe pipe) 2753 enum pipe pipe)
2737{ 2754{
2738 struct drm_i915_private *dev_priv = dev->dev_private; 2755 struct drm_i915_private *dev_priv = to_i915(dev);
2739 struct intel_encoder *encoder; 2756 struct intel_encoder *encoder;
2740 2757
2741 lockdep_assert_held(&dev_priv->pps_mutex); 2758 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2773,7 +2790,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2773 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 2790 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2774 struct intel_encoder *encoder = &intel_dig_port->base; 2791 struct intel_encoder *encoder = &intel_dig_port->base;
2775 struct drm_device *dev = encoder->base.dev; 2792 struct drm_device *dev = encoder->base.dev;
2776 struct drm_i915_private *dev_priv = dev->dev_private; 2793 struct drm_i915_private *dev_priv = to_i915(dev);
2777 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2794 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2778 2795
2779 lockdep_assert_held(&dev_priv->pps_mutex); 2796 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -2811,266 +2828,38 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2811 2828
2812static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2829static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2813{ 2830{
2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2831 vlv_phy_pre_encoder_enable(encoder);
2815 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2816 struct drm_device *dev = encoder->base.dev;
2817 struct drm_i915_private *dev_priv = dev->dev_private;
2818 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
2819 enum dpio_channel port = vlv_dport_to_channel(dport);
2820 int pipe = intel_crtc->pipe;
2821 u32 val;
2822
2823 mutex_lock(&dev_priv->sb_lock);
2824
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
2826 val = 0;
2827 if (pipe)
2828 val |= (1<<21);
2829 else
2830 val &= ~(1<<21);
2831 val |= 0x001000c4;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2833 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2834 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
2835
2836 mutex_unlock(&dev_priv->sb_lock);
2837 2832
2838 intel_enable_dp(encoder); 2833 intel_enable_dp(encoder);
2839} 2834}
2840 2835
2841static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 2836static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
2842{ 2837{
2843 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2844 struct drm_device *dev = encoder->base.dev;
2845 struct drm_i915_private *dev_priv = dev->dev_private;
2846 struct intel_crtc *intel_crtc =
2847 to_intel_crtc(encoder->base.crtc);
2848 enum dpio_channel port = vlv_dport_to_channel(dport);
2849 int pipe = intel_crtc->pipe;
2850
2851 intel_dp_prepare(encoder); 2838 intel_dp_prepare(encoder);
2852 2839
2853 /* Program Tx lane resets to default */ 2840 vlv_phy_pre_pll_enable(encoder);
2854 mutex_lock(&dev_priv->sb_lock);
2855 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
2856 DPIO_PCS_TX_LANE2_RESET |
2857 DPIO_PCS_TX_LANE1_RESET);
2858 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
2859 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2860 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2861 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2862 DPIO_PCS_CLK_SOFT_RESET);
2863
2864 /* Fix up inter-pair skew failure */
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2866 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2867 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
2868 mutex_unlock(&dev_priv->sb_lock);
2869} 2841}
2870 2842
2871static void chv_pre_enable_dp(struct intel_encoder *encoder) 2843static void chv_pre_enable_dp(struct intel_encoder *encoder)
2872{ 2844{
2873 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2845 chv_phy_pre_encoder_enable(encoder);
2874 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2875 struct drm_device *dev = encoder->base.dev;
2876 struct drm_i915_private *dev_priv = dev->dev_private;
2877 struct intel_crtc *intel_crtc =
2878 to_intel_crtc(encoder->base.crtc);
2879 enum dpio_channel ch = vlv_dport_to_channel(dport);
2880 int pipe = intel_crtc->pipe;
2881 int data, i, stagger;
2882 u32 val;
2883
2884 mutex_lock(&dev_priv->sb_lock);
2885
2886 /* allow hardware to manage TX FIFO reset source */
2887 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2888 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2889 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2890
2891 if (intel_crtc->config->lane_count > 2) {
2892 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2893 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2894 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2895 }
2896
2897 /* Program Tx lane latency optimal setting*/
2898 for (i = 0; i < intel_crtc->config->lane_count; i++) {
2899 /* Set the upar bit */
2900 if (intel_crtc->config->lane_count == 1)
2901 data = 0x0;
2902 else
2903 data = (i == 1) ? 0x0 : 0x1;
2904 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2905 data << DPIO_UPAR_SHIFT);
2906 }
2907
2908 /* Data lane stagger programming */
2909 if (intel_crtc->config->port_clock > 270000)
2910 stagger = 0x18;
2911 else if (intel_crtc->config->port_clock > 135000)
2912 stagger = 0xd;
2913 else if (intel_crtc->config->port_clock > 67500)
2914 stagger = 0x7;
2915 else if (intel_crtc->config->port_clock > 33750)
2916 stagger = 0x4;
2917 else
2918 stagger = 0x2;
2919
2920 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2921 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2922 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2923
2924 if (intel_crtc->config->lane_count > 2) {
2925 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2926 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2927 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2928 }
2929
2930 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2931 DPIO_LANESTAGGER_STRAP(stagger) |
2932 DPIO_LANESTAGGER_STRAP_OVRD |
2933 DPIO_TX1_STAGGER_MASK(0x1f) |
2934 DPIO_TX1_STAGGER_MULT(6) |
2935 DPIO_TX2_STAGGER_MULT(0));
2936
2937 if (intel_crtc->config->lane_count > 2) {
2938 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2939 DPIO_LANESTAGGER_STRAP(stagger) |
2940 DPIO_LANESTAGGER_STRAP_OVRD |
2941 DPIO_TX1_STAGGER_MASK(0x1f) |
2942 DPIO_TX1_STAGGER_MULT(7) |
2943 DPIO_TX2_STAGGER_MULT(5));
2944 }
2945
2946 /* Deassert data lane reset */
2947 chv_data_lane_soft_reset(encoder, false);
2948
2949 mutex_unlock(&dev_priv->sb_lock);
2950 2846
2951 intel_enable_dp(encoder); 2847 intel_enable_dp(encoder);
2952 2848
2953 /* Second common lane will stay alive on its own now */ 2849 /* Second common lane will stay alive on its own now */
2954 if (dport->release_cl2_override) { 2850 chv_phy_release_cl2_override(encoder);
2955 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2956 dport->release_cl2_override = false;
2957 }
2958} 2851}
2959 2852
2960static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) 2853static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2961{ 2854{
2962 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2963 struct drm_device *dev = encoder->base.dev;
2964 struct drm_i915_private *dev_priv = dev->dev_private;
2965 struct intel_crtc *intel_crtc =
2966 to_intel_crtc(encoder->base.crtc);
2967 enum dpio_channel ch = vlv_dport_to_channel(dport);
2968 enum pipe pipe = intel_crtc->pipe;
2969 unsigned int lane_mask =
2970 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
2971 u32 val;
2972
2973 intel_dp_prepare(encoder); 2855 intel_dp_prepare(encoder);
2974 2856
2975 /* 2857 chv_phy_pre_pll_enable(encoder);
2976 * Must trick the second common lane into life.
2977 * Otherwise we can't even access the PLL.
2978 */
2979 if (ch == DPIO_CH0 && pipe == PIPE_B)
2980 dport->release_cl2_override =
2981 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2982
2983 chv_phy_powergate_lanes(encoder, true, lane_mask);
2984
2985 mutex_lock(&dev_priv->sb_lock);
2986
2987 /* Assert data lane reset */
2988 chv_data_lane_soft_reset(encoder, true);
2989
2990 /* program left/right clock distribution */
2991 if (pipe != PIPE_B) {
2992 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2993 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2994 if (ch == DPIO_CH0)
2995 val |= CHV_BUFLEFTENA1_FORCE;
2996 if (ch == DPIO_CH1)
2997 val |= CHV_BUFRIGHTENA1_FORCE;
2998 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2999 } else {
3000 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3001 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3002 if (ch == DPIO_CH0)
3003 val |= CHV_BUFLEFTENA2_FORCE;
3004 if (ch == DPIO_CH1)
3005 val |= CHV_BUFRIGHTENA2_FORCE;
3006 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3007 }
3008
3009 /* program clock channel usage */
3010 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
3011 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3012 if (pipe != PIPE_B)
3013 val &= ~CHV_PCS_USEDCLKCHANNEL;
3014 else
3015 val |= CHV_PCS_USEDCLKCHANNEL;
3016 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
3017
3018 if (intel_crtc->config->lane_count > 2) {
3019 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
3020 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
3021 if (pipe != PIPE_B)
3022 val &= ~CHV_PCS_USEDCLKCHANNEL;
3023 else
3024 val |= CHV_PCS_USEDCLKCHANNEL;
3025 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
3026 }
3027
3028 /*
3029 * This a a bit weird since generally CL
3030 * matches the pipe, but here we need to
3031 * pick the CL based on the port.
3032 */
3033 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
3034 if (pipe != PIPE_B)
3035 val &= ~CHV_CMN_USEDCLKCHANNEL;
3036 else
3037 val |= CHV_CMN_USEDCLKCHANNEL;
3038 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
3039
3040 mutex_unlock(&dev_priv->sb_lock);
3041} 2858}
3042 2859
3043static void chv_dp_post_pll_disable(struct intel_encoder *encoder) 2860static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
3044{ 2861{
3045 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 2862 chv_phy_post_pll_disable(encoder);
3046 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
3047 u32 val;
3048
3049 mutex_lock(&dev_priv->sb_lock);
3050
3051 /* disable left/right clock distribution */
3052 if (pipe != PIPE_B) {
3053 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
3054 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
3055 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
3056 } else {
3057 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
3058 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
3059 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
3060 }
3061
3062 mutex_unlock(&dev_priv->sb_lock);
3063
3064 /*
3065 * Leave the power down bit cleared for at least one
3066 * lane so that chv_powergate_phy_ch() will power
3067 * on something when the channel is otherwise unused.
3068 * When the port is off and the override is removed
3069 * the lanes power down anyway, so otherwise it doesn't
3070 * really matter what the state of power down bits is
3071 * after this.
3072 */
3073 chv_phy_powergate_lanes(encoder, false, 0x0);
3074} 2863}
3075 2864
3076/* 2865/*
@@ -3089,7 +2878,7 @@ uint8_t
3089intel_dp_voltage_max(struct intel_dp *intel_dp) 2878intel_dp_voltage_max(struct intel_dp *intel_dp)
3090{ 2879{
3091 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2880 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3092 struct drm_i915_private *dev_priv = dev->dev_private; 2881 struct drm_i915_private *dev_priv = to_i915(dev);
3093 enum port port = dp_to_dig_port(intel_dp)->port; 2882 enum port port = dp_to_dig_port(intel_dp)->port;
3094 2883
3095 if (IS_BROXTON(dev)) 2884 if (IS_BROXTON(dev))
@@ -3178,16 +2967,10 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3178 2967
3179static uint32_t vlv_signal_levels(struct intel_dp *intel_dp) 2968static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3180{ 2969{
3181 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2970 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3182 struct drm_i915_private *dev_priv = dev->dev_private;
3183 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3184 struct intel_crtc *intel_crtc =
3185 to_intel_crtc(dport->base.base.crtc);
3186 unsigned long demph_reg_value, preemph_reg_value, 2971 unsigned long demph_reg_value, preemph_reg_value,
3187 uniqtranscale_reg_value; 2972 uniqtranscale_reg_value;
3188 uint8_t train_set = intel_dp->train_set[0]; 2973 uint8_t train_set = intel_dp->train_set[0];
3189 enum dpio_channel port = vlv_dport_to_channel(dport);
3190 int pipe = intel_crtc->pipe;
3191 2974
3192 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 2975 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3193 case DP_TRAIN_PRE_EMPH_LEVEL_0: 2976 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3262,37 +3045,18 @@ static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3262 return 0; 3045 return 0;
3263 } 3046 }
3264 3047
3265 mutex_lock(&dev_priv->sb_lock); 3048 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3266 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000); 3049 uniqtranscale_reg_value, 0);
3267 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3268 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
3269 uniqtranscale_reg_value);
3270 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3271 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3272 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3273 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
3274 mutex_unlock(&dev_priv->sb_lock);
3275 3050
3276 return 0; 3051 return 0;
3277} 3052}
3278 3053
3279static bool chv_need_uniq_trans_scale(uint8_t train_set)
3280{
3281 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3282 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3283}
3284
3285static uint32_t chv_signal_levels(struct intel_dp *intel_dp) 3054static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3286{ 3055{
3287 struct drm_device *dev = intel_dp_to_dev(intel_dp); 3056 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3288 struct drm_i915_private *dev_priv = dev->dev_private; 3057 u32 deemph_reg_value, margin_reg_value;
3289 struct intel_digital_port *dport = dp_to_dig_port(intel_dp); 3058 bool uniq_trans_scale = false;
3290 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
3291 u32 deemph_reg_value, margin_reg_value, val;
3292 uint8_t train_set = intel_dp->train_set[0]; 3059 uint8_t train_set = intel_dp->train_set[0];
3293 enum dpio_channel ch = vlv_dport_to_channel(dport);
3294 enum pipe pipe = intel_crtc->pipe;
3295 int i;
3296 3060
3297 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 3061 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3298 case DP_TRAIN_PRE_EMPH_LEVEL_0: 3062 case DP_TRAIN_PRE_EMPH_LEVEL_0:
@@ -3312,7 +3076,7 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: 3076 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3313 deemph_reg_value = 128; 3077 deemph_reg_value = 128;
3314 margin_reg_value = 154; 3078 margin_reg_value = 154;
3315 /* FIXME extra to set for 1200 */ 3079 uniq_trans_scale = true;
3316 break; 3080 break;
3317 default: 3081 default:
3318 return 0; 3082 return 0;
@@ -3364,88 +3128,8 @@ static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3364 return 0; 3128 return 0;
3365 } 3129 }
3366 3130
3367 mutex_lock(&dev_priv->sb_lock); 3131 chv_set_phy_signal_level(encoder, deemph_reg_value,
3368 3132 margin_reg_value, uniq_trans_scale);
3369 /* Clear calc init */
3370 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3371 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3372 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3373 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3374 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3375
3376 if (intel_crtc->config->lane_count > 2) {
3377 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3378 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3379 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3380 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3381 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3382 }
3383
3384 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3385 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3386 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3387 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3388
3389 if (intel_crtc->config->lane_count > 2) {
3390 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3391 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3392 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3393 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3394 }
3395
3396 /* Program swing deemph */
3397 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3398 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3399 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3400 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3401 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3402 }
3403
3404 /* Program swing margin */
3405 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3406 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
3407
3408 val &= ~DPIO_SWING_MARGIN000_MASK;
3409 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
3410
3411 /*
3412 * Supposedly this value shouldn't matter when unique transition
3413 * scale is disabled, but in fact it does matter. Let's just
3414 * always program the same value and hope it's OK.
3415 */
3416 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3417 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3418
3419 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3420 }
3421
3422 /*
3423 * The document said it needs to set bit 27 for ch0 and bit 26
3424 * for ch1. Might be a typo in the doc.
3425 * For now, for this unique transition scale selection, set bit
3426 * 27 for ch0 and ch1.
3427 */
3428 for (i = 0; i < intel_crtc->config->lane_count; i++) {
3429 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
3430 if (chv_need_uniq_trans_scale(train_set))
3431 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
3432 else
3433 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3434 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
3435 }
3436
3437 /* Start swing calculation */
3438 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3439 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3440 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3441
3442 if (intel_crtc->config->lane_count > 2) {
3443 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3444 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3445 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3446 }
3447
3448 mutex_unlock(&dev_priv->sb_lock);
3449 3133
3450 return 0; 3134 return 0;
3451} 3135}
@@ -3612,7 +3296,7 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3612{ 3296{
3613 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 3297 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3614 struct drm_device *dev = intel_dig_port->base.base.dev; 3298 struct drm_device *dev = intel_dig_port->base.base.dev;
3615 struct drm_i915_private *dev_priv = dev->dev_private; 3299 struct drm_i915_private *dev_priv = to_i915(dev);
3616 enum port port = intel_dig_port->port; 3300 enum port port = intel_dig_port->port;
3617 uint32_t val; 3301 uint32_t val;
3618 3302
@@ -3634,8 +3318,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3634 if (port == PORT_A) 3318 if (port == PORT_A)
3635 return; 3319 return;
3636 3320
3637 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), 3321 if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3638 1)) 3322 DP_TP_STATUS_IDLE_DONE,
3323 DP_TP_STATUS_IDLE_DONE,
3324 1))
3639 DRM_ERROR("Timed out waiting for DP idle patterns\n"); 3325 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3640} 3326}
3641 3327
@@ -3646,7 +3332,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3646 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 3332 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3647 enum port port = intel_dig_port->port; 3333 enum port port = intel_dig_port->port;
3648 struct drm_device *dev = intel_dig_port->base.base.dev; 3334 struct drm_device *dev = intel_dig_port->base.base.dev;
3649 struct drm_i915_private *dev_priv = dev->dev_private; 3335 struct drm_i915_private *dev_priv = to_i915(dev);
3650 uint32_t DP = intel_dp->DP; 3336 uint32_t DP = intel_dp->DP;
3651 3337
3652 if (WARN_ON(HAS_DDI(dev))) 3338 if (WARN_ON(HAS_DDI(dev)))
@@ -3698,7 +3384,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
3698 I915_WRITE(intel_dp->output_reg, DP); 3384 I915_WRITE(intel_dp->output_reg, DP);
3699 POSTING_READ(intel_dp->output_reg); 3385 POSTING_READ(intel_dp->output_reg);
3700 3386
3701 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 3387 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
3702 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3388 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3703 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 3389 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3704 } 3390 }
@@ -3713,8 +3399,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3713{ 3399{
3714 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3400 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3715 struct drm_device *dev = dig_port->base.base.dev; 3401 struct drm_device *dev = dig_port->base.base.dev;
3716 struct drm_i915_private *dev_priv = dev->dev_private; 3402 struct drm_i915_private *dev_priv = to_i915(dev);
3717 uint8_t rev;
3718 3403
3719 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd, 3404 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3720 sizeof(intel_dp->dpcd)) < 0) 3405 sizeof(intel_dp->dpcd)) < 0)
@@ -3771,6 +3456,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3771 DRM_DEBUG_KMS("PSR2 %s on sink", 3456 DRM_DEBUG_KMS("PSR2 %s on sink",
3772 dev_priv->psr.psr2_support ? "supported" : "not supported"); 3457 dev_priv->psr.psr2_support ? "supported" : "not supported");
3773 } 3458 }
3459
3460 /* Read the eDP Display control capabilities registers */
3461 memset(intel_dp->edp_dpcd, 0, sizeof(intel_dp->edp_dpcd));
3462 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3463 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3464 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3465 sizeof(intel_dp->edp_dpcd)))
3466 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3467 intel_dp->edp_dpcd);
3774 } 3468 }
3775 3469
3776 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n", 3470 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
@@ -3778,10 +3472,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3778 yesno(drm_dp_tps3_supported(intel_dp->dpcd))); 3472 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
3779 3473
3780 /* Intermediate frequency support */ 3474 /* Intermediate frequency support */
3781 if (is_edp(intel_dp) && 3475 if (is_edp(intel_dp) && (intel_dp->edp_dpcd[0] >= 0x03)) { /* eDp v1.4 or higher */
3782 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3783 (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
3784 (rev >= 0x03)) { /* eDp v1.4 or higher */
3785 __le16 sink_rates[DP_MAX_SUPPORTED_RATES]; 3476 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3786 int i; 3477 int i;
3787 3478
@@ -4559,7 +4250,7 @@ intel_dp_long_pulse(struct intel_connector *intel_connector)
4559 } 4250 }
4560 4251
4561 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4252 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4562 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4253 intel_encoder->type = INTEL_OUTPUT_DP;
4563 4254
4564 intel_dp_probe_oui(intel_dp); 4255 intel_dp_probe_oui(intel_dp);
4565 4256
@@ -4635,7 +4326,7 @@ intel_dp_detect(struct drm_connector *connector, bool force)
4635 /* MST devices are disconnected from a monitor POV */ 4326 /* MST devices are disconnected from a monitor POV */
4636 intel_dp_unset_edid(intel_dp); 4327 intel_dp_unset_edid(intel_dp);
4637 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4328 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4638 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4329 intel_encoder->type = INTEL_OUTPUT_DP;
4639 return connector_status_disconnected; 4330 return connector_status_disconnected;
4640 } 4331 }
4641 4332
@@ -4674,7 +4365,7 @@ intel_dp_force(struct drm_connector *connector)
4674 intel_display_power_put(dev_priv, power_domain); 4365 intel_display_power_put(dev_priv, power_domain);
4675 4366
4676 if (intel_encoder->type != INTEL_OUTPUT_EDP) 4367 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4677 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 4368 intel_encoder->type = INTEL_OUTPUT_DP;
4678} 4369}
4679 4370
4680static int intel_dp_get_modes(struct drm_connector *connector) 4371static int intel_dp_get_modes(struct drm_connector *connector)
@@ -4723,7 +4414,7 @@ intel_dp_set_property(struct drm_connector *connector,
4723 struct drm_property *property, 4414 struct drm_property *property,
4724 uint64_t val) 4415 uint64_t val)
4725{ 4416{
4726 struct drm_i915_private *dev_priv = connector->dev->dev_private; 4417 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4727 struct intel_connector *intel_connector = to_intel_connector(connector); 4418 struct intel_connector *intel_connector = to_intel_connector(connector);
4728 struct intel_encoder *intel_encoder = intel_attached_encoder(connector); 4419 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4729 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base); 4420 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
@@ -4811,6 +4502,32 @@ done:
4811 return 0; 4502 return 0;
4812} 4503}
4813 4504
4505static int
4506intel_dp_connector_register(struct drm_connector *connector)
4507{
4508 struct intel_dp *intel_dp = intel_attached_dp(connector);
4509 int ret;
4510
4511 ret = intel_connector_register(connector);
4512 if (ret)
4513 return ret;
4514
4515 i915_debugfs_connector_add(connector);
4516
4517 DRM_DEBUG_KMS("registering %s bus for %s\n",
4518 intel_dp->aux.name, connector->kdev->kobj.name);
4519
4520 intel_dp->aux.dev = connector->kdev;
4521 return drm_dp_aux_register(&intel_dp->aux);
4522}
4523
4524static void
4525intel_dp_connector_unregister(struct drm_connector *connector)
4526{
4527 drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4528 intel_connector_unregister(connector);
4529}
4530
4814static void 4531static void
4815intel_dp_connector_destroy(struct drm_connector *connector) 4532intel_dp_connector_destroy(struct drm_connector *connector)
4816{ 4533{
@@ -4851,6 +4568,9 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4851 intel_dp->edp_notifier.notifier_call = NULL; 4568 intel_dp->edp_notifier.notifier_call = NULL;
4852 } 4569 }
4853 } 4570 }
4571
4572 intel_dp_aux_fini(intel_dp);
4573
4854 drm_encoder_cleanup(encoder); 4574 drm_encoder_cleanup(encoder);
4855 kfree(intel_dig_port); 4575 kfree(intel_dig_port);
4856} 4576}
@@ -4876,7 +4596,7 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4876{ 4596{
4877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4597 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4878 struct drm_device *dev = intel_dig_port->base.base.dev; 4598 struct drm_device *dev = intel_dig_port->base.base.dev;
4879 struct drm_i915_private *dev_priv = dev->dev_private; 4599 struct drm_i915_private *dev_priv = to_i915(dev);
4880 enum intel_display_power_domain power_domain; 4600 enum intel_display_power_domain power_domain;
4881 4601
4882 lockdep_assert_held(&dev_priv->pps_mutex); 4602 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -4929,6 +4649,8 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4929 .fill_modes = drm_helper_probe_single_connector_modes, 4649 .fill_modes = drm_helper_probe_single_connector_modes,
4930 .set_property = intel_dp_set_property, 4650 .set_property = intel_dp_set_property,
4931 .atomic_get_property = intel_connector_atomic_get_property, 4651 .atomic_get_property = intel_connector_atomic_get_property,
4652 .late_register = intel_dp_connector_register,
4653 .early_unregister = intel_dp_connector_unregister,
4932 .destroy = intel_dp_connector_destroy, 4654 .destroy = intel_dp_connector_destroy,
4933 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 4655 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
4934 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 4656 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -4937,7 +4659,6 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
4937static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 4659static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
4938 .get_modes = intel_dp_get_modes, 4660 .get_modes = intel_dp_get_modes,
4939 .mode_valid = intel_dp_mode_valid, 4661 .mode_valid = intel_dp_mode_valid,
4940 .best_encoder = intel_best_encoder,
4941}; 4662};
4942 4663
4943static const struct drm_encoder_funcs intel_dp_enc_funcs = { 4664static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -4951,13 +4672,13 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4951 struct intel_dp *intel_dp = &intel_dig_port->dp; 4672 struct intel_dp *intel_dp = &intel_dig_port->dp;
4952 struct intel_encoder *intel_encoder = &intel_dig_port->base; 4673 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4953 struct drm_device *dev = intel_dig_port->base.base.dev; 4674 struct drm_device *dev = intel_dig_port->base.base.dev;
4954 struct drm_i915_private *dev_priv = dev->dev_private; 4675 struct drm_i915_private *dev_priv = to_i915(dev);
4955 enum intel_display_power_domain power_domain; 4676 enum intel_display_power_domain power_domain;
4956 enum irqreturn ret = IRQ_NONE; 4677 enum irqreturn ret = IRQ_NONE;
4957 4678
4958 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP && 4679 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
4959 intel_dig_port->base.type != INTEL_OUTPUT_HDMI) 4680 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
4960 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4681 intel_dig_port->base.type = INTEL_OUTPUT_DP;
4961 4682
4962 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) { 4683 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4963 /* 4684 /*
@@ -5019,7 +4740,7 @@ put_power:
5019/* check the VBT to see whether the eDP is on another port */ 4740/* check the VBT to see whether the eDP is on another port */
5020bool intel_dp_is_edp(struct drm_device *dev, enum port port) 4741bool intel_dp_is_edp(struct drm_device *dev, enum port port)
5021{ 4742{
5022 struct drm_i915_private *dev_priv = dev->dev_private; 4743 struct drm_i915_private *dev_priv = to_i915(dev);
5023 4744
5024 /* 4745 /*
5025 * eDP not supported on g4x. so bail out early just 4746 * eDP not supported on g4x. so bail out early just
@@ -5061,82 +4782,93 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5061} 4782}
5062 4783
5063static void 4784static void
5064intel_dp_init_panel_power_sequencer(struct drm_device *dev, 4785intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5065 struct intel_dp *intel_dp) 4786 struct intel_dp *intel_dp, struct edp_power_seq *seq)
5066{ 4787{
5067 struct drm_i915_private *dev_priv = dev->dev_private;
5068 struct edp_power_seq cur, vbt, spec,
5069 *final = &intel_dp->pps_delays;
5070 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0; 4788 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5071 i915_reg_t pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 4789 struct pps_registers regs;
5072
5073 lockdep_assert_held(&dev_priv->pps_mutex);
5074
5075 /* already initialized? */
5076 if (final->t11_t12 != 0)
5077 return;
5078
5079 if (IS_BROXTON(dev)) {
5080 /*
5081 * TODO: BXT has 2 sets of PPS registers.
5082 * Correct Register for Broxton need to be identified
5083 * using VBT. hardcoding for now
5084 */
5085 pp_ctrl_reg = BXT_PP_CONTROL(0);
5086 pp_on_reg = BXT_PP_ON_DELAYS(0);
5087 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5088 } else if (HAS_PCH_SPLIT(dev)) {
5089 pp_ctrl_reg = PCH_PP_CONTROL;
5090 pp_on_reg = PCH_PP_ON_DELAYS;
5091 pp_off_reg = PCH_PP_OFF_DELAYS;
5092 pp_div_reg = PCH_PP_DIVISOR;
5093 } else {
5094 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5095 4790
5096 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe); 4791 intel_pps_get_registers(dev_priv, intel_dp, &regs);
5097 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5098 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5099 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5100 }
5101 4792
5102 /* Workaround: Need to write PP_CONTROL with the unlock key as 4793 /* Workaround: Need to write PP_CONTROL with the unlock key as
5103 * the very first thing. */ 4794 * the very first thing. */
5104 pp_ctl = ironlake_get_pp_control(intel_dp); 4795 pp_ctl = ironlake_get_pp_control(intel_dp);
5105 4796
5106 pp_on = I915_READ(pp_on_reg); 4797 pp_on = I915_READ(regs.pp_on);
5107 pp_off = I915_READ(pp_off_reg); 4798 pp_off = I915_READ(regs.pp_off);
5108 if (!IS_BROXTON(dev)) { 4799 if (!IS_BROXTON(dev_priv)) {
5109 I915_WRITE(pp_ctrl_reg, pp_ctl); 4800 I915_WRITE(regs.pp_ctrl, pp_ctl);
5110 pp_div = I915_READ(pp_div_reg); 4801 pp_div = I915_READ(regs.pp_div);
5111 } 4802 }
5112 4803
5113 /* Pull timing values out of registers */ 4804 /* Pull timing values out of registers */
5114 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 4805 seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5115 PANEL_POWER_UP_DELAY_SHIFT; 4806 PANEL_POWER_UP_DELAY_SHIFT;
5116 4807
5117 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 4808 seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5118 PANEL_LIGHT_ON_DELAY_SHIFT; 4809 PANEL_LIGHT_ON_DELAY_SHIFT;
5119 4810
5120 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 4811 seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5121 PANEL_LIGHT_OFF_DELAY_SHIFT; 4812 PANEL_LIGHT_OFF_DELAY_SHIFT;
5122 4813
5123 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 4814 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5124 PANEL_POWER_DOWN_DELAY_SHIFT; 4815 PANEL_POWER_DOWN_DELAY_SHIFT;
5125 4816
5126 if (IS_BROXTON(dev)) { 4817 if (IS_BROXTON(dev_priv)) {
5127 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >> 4818 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5128 BXT_POWER_CYCLE_DELAY_SHIFT; 4819 BXT_POWER_CYCLE_DELAY_SHIFT;
5129 if (tmp > 0) 4820 if (tmp > 0)
5130 cur.t11_t12 = (tmp - 1) * 1000; 4821 seq->t11_t12 = (tmp - 1) * 1000;
5131 else 4822 else
5132 cur.t11_t12 = 0; 4823 seq->t11_t12 = 0;
5133 } else { 4824 } else {
5134 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 4825 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5135 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 4826 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5136 } 4827 }
4828}
4829
4830static void
4831intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
4832{
4833 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
4834 state_name,
4835 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
4836}
4837
4838static void
4839intel_pps_verify_state(struct drm_i915_private *dev_priv,
4840 struct intel_dp *intel_dp)
4841{
4842 struct edp_power_seq hw;
4843 struct edp_power_seq *sw = &intel_dp->pps_delays;
4844
4845 intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
4846
4847 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
4848 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
4849 DRM_ERROR("PPS state mismatch\n");
4850 intel_pps_dump_state("sw", sw);
4851 intel_pps_dump_state("hw", &hw);
4852 }
4853}
4854
4855static void
4856intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4857 struct intel_dp *intel_dp)
4858{
4859 struct drm_i915_private *dev_priv = to_i915(dev);
4860 struct edp_power_seq cur, vbt, spec,
4861 *final = &intel_dp->pps_delays;
4862
4863 lockdep_assert_held(&dev_priv->pps_mutex);
4864
4865 /* already initialized? */
4866 if (final->t11_t12 != 0)
4867 return;
5137 4868
5138 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 4869 intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5139 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 4870
4871 intel_pps_dump_state("cur", &cur);
5140 4872
5141 vbt = dev_priv->vbt.edp.pps; 4873 vbt = dev_priv->vbt.edp.pps;
5142 4874
@@ -5152,8 +4884,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5152 * too. */ 4884 * too. */
5153 spec.t11_t12 = (510 + 100) * 10; 4885 spec.t11_t12 = (510 + 100) * 10;
5154 4886
5155 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 4887 intel_pps_dump_state("vbt", &vbt);
5156 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5157 4888
5158 /* Use the max of the register settings and vbt. If both are 4889 /* Use the max of the register settings and vbt. If both are
5159 * unset, fall back to the spec limits. */ 4890 * unset, fall back to the spec limits. */
@@ -5181,59 +4912,41 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5181 4912
5182 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 4913 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5183 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 4914 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4915
4916 /*
4917 * We override the HW backlight delays to 1 because we do manual waits
4918 * on them. For T8, even BSpec recommends doing it. For T9, if we
4919 * don't do this, we'll end up waiting for the backlight off delay
4920 * twice: once when we do the manual sleep, and once when we disable
4921 * the panel and wait for the PP_STATUS bit to become zero.
4922 */
4923 final->t8 = 1;
4924 final->t9 = 1;
5184} 4925}
5185 4926
5186static void 4927static void
5187intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 4928intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5188 struct intel_dp *intel_dp) 4929 struct intel_dp *intel_dp)
5189{ 4930{
5190 struct drm_i915_private *dev_priv = dev->dev_private; 4931 struct drm_i915_private *dev_priv = to_i915(dev);
5191 u32 pp_on, pp_off, pp_div, port_sel = 0; 4932 u32 pp_on, pp_off, pp_div, port_sel = 0;
5192 int div = dev_priv->rawclk_freq / 1000; 4933 int div = dev_priv->rawclk_freq / 1000;
5193 i915_reg_t pp_on_reg, pp_off_reg, pp_div_reg, pp_ctrl_reg; 4934 struct pps_registers regs;
5194 enum port port = dp_to_dig_port(intel_dp)->port; 4935 enum port port = dp_to_dig_port(intel_dp)->port;
5195 const struct edp_power_seq *seq = &intel_dp->pps_delays; 4936 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5196 4937
5197 lockdep_assert_held(&dev_priv->pps_mutex); 4938 lockdep_assert_held(&dev_priv->pps_mutex);
5198 4939
5199 if (IS_BROXTON(dev)) { 4940 intel_pps_get_registers(dev_priv, intel_dp, &regs);
5200 /*
5201 * TODO: BXT has 2 sets of PPS registers.
5202 * Correct Register for Broxton need to be identified
5203 * using VBT. hardcoding for now
5204 */
5205 pp_ctrl_reg = BXT_PP_CONTROL(0);
5206 pp_on_reg = BXT_PP_ON_DELAYS(0);
5207 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5208
5209 } else if (HAS_PCH_SPLIT(dev)) {
5210 pp_on_reg = PCH_PP_ON_DELAYS;
5211 pp_off_reg = PCH_PP_OFF_DELAYS;
5212 pp_div_reg = PCH_PP_DIVISOR;
5213 } else {
5214 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5215 4941
5216 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5217 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5218 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
5219 }
5220
5221 /*
5222 * And finally store the new values in the power sequencer. The
5223 * backlight delays are set to 1 because we do manual waits on them. For
5224 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5225 * we'll end up waiting for the backlight off delay twice: once when we
5226 * do the manual sleep, and once when we disable the panel and wait for
5227 * the PP_STATUS bit to become zero.
5228 */
5229 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) | 4942 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5230 (1 << PANEL_LIGHT_ON_DELAY_SHIFT); 4943 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5231 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) | 4944 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5232 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT); 4945 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5233 /* Compute the divisor for the pp clock, simply match the Bspec 4946 /* Compute the divisor for the pp clock, simply match the Bspec
5234 * formula. */ 4947 * formula. */
5235 if (IS_BROXTON(dev)) { 4948 if (IS_BROXTON(dev)) {
5236 pp_div = I915_READ(pp_ctrl_reg); 4949 pp_div = I915_READ(regs.pp_ctrl);
5237 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK; 4950 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5238 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000) 4951 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5239 << BXT_POWER_CYCLE_DELAY_SHIFT); 4952 << BXT_POWER_CYCLE_DELAY_SHIFT);
@@ -5256,19 +4969,19 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5256 4969
5257 pp_on |= port_sel; 4970 pp_on |= port_sel;
5258 4971
5259 I915_WRITE(pp_on_reg, pp_on); 4972 I915_WRITE(regs.pp_on, pp_on);
5260 I915_WRITE(pp_off_reg, pp_off); 4973 I915_WRITE(regs.pp_off, pp_off);
5261 if (IS_BROXTON(dev)) 4974 if (IS_BROXTON(dev))
5262 I915_WRITE(pp_ctrl_reg, pp_div); 4975 I915_WRITE(regs.pp_ctrl, pp_div);
5263 else 4976 else
5264 I915_WRITE(pp_div_reg, pp_div); 4977 I915_WRITE(regs.pp_div, pp_div);
5265 4978
5266 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n", 4979 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5267 I915_READ(pp_on_reg), 4980 I915_READ(regs.pp_on),
5268 I915_READ(pp_off_reg), 4981 I915_READ(regs.pp_off),
5269 IS_BROXTON(dev) ? 4982 IS_BROXTON(dev) ?
5270 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) : 4983 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5271 I915_READ(pp_div_reg)); 4984 I915_READ(regs.pp_div));
5272} 4985}
5273 4986
5274/** 4987/**
@@ -5285,7 +4998,7 @@ intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5285 */ 4998 */
5286static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate) 4999static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5287{ 5000{
5288 struct drm_i915_private *dev_priv = dev->dev_private; 5001 struct drm_i915_private *dev_priv = to_i915(dev);
5289 struct intel_encoder *encoder; 5002 struct intel_encoder *encoder;
5290 struct intel_digital_port *dig_port = NULL; 5003 struct intel_digital_port *dig_port = NULL;
5291 struct intel_dp *intel_dp = dev_priv->drrs.dp; 5004 struct intel_dp *intel_dp = dev_priv->drrs.dp;
@@ -5384,7 +5097,7 @@ static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
5384void intel_edp_drrs_enable(struct intel_dp *intel_dp) 5097void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5385{ 5098{
5386 struct drm_device *dev = intel_dp_to_dev(intel_dp); 5099 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5387 struct drm_i915_private *dev_priv = dev->dev_private; 5100 struct drm_i915_private *dev_priv = to_i915(dev);
5388 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5101 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5389 struct drm_crtc *crtc = dig_port->base.base.crtc; 5102 struct drm_crtc *crtc = dig_port->base.base.crtc;
5390 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5103 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5416,7 +5129,7 @@ unlock:
5416void intel_edp_drrs_disable(struct intel_dp *intel_dp) 5129void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5417{ 5130{
5418 struct drm_device *dev = intel_dp_to_dev(intel_dp); 5131 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5419 struct drm_i915_private *dev_priv = dev->dev_private; 5132 struct drm_i915_private *dev_priv = to_i915(dev);
5420 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 5133 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5421 struct drm_crtc *crtc = dig_port->base.base.crtc; 5134 struct drm_crtc *crtc = dig_port->base.base.crtc;
5422 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5135 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -5431,9 +5144,9 @@ void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5431 } 5144 }
5432 5145
5433 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5146 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5434 intel_dp_set_drrs_state(dev_priv->dev, 5147 intel_dp_set_drrs_state(&dev_priv->drm,
5435 intel_dp->attached_connector->panel. 5148 intel_dp->attached_connector->panel.
5436 fixed_mode->vrefresh); 5149 fixed_mode->vrefresh);
5437 5150
5438 dev_priv->drrs.dp = NULL; 5151 dev_priv->drrs.dp = NULL;
5439 mutex_unlock(&dev_priv->drrs.mutex); 5152 mutex_unlock(&dev_priv->drrs.mutex);
@@ -5463,9 +5176,9 @@ static void intel_edp_drrs_downclock_work(struct work_struct *work)
5463 goto unlock; 5176 goto unlock;
5464 5177
5465 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) 5178 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5466 intel_dp_set_drrs_state(dev_priv->dev, 5179 intel_dp_set_drrs_state(&dev_priv->drm,
5467 intel_dp->attached_connector->panel. 5180 intel_dp->attached_connector->panel.
5468 downclock_mode->vrefresh); 5181 downclock_mode->vrefresh);
5469 5182
5470unlock: 5183unlock:
5471 mutex_unlock(&dev_priv->drrs.mutex); 5184 mutex_unlock(&dev_priv->drrs.mutex);
@@ -5484,7 +5197,7 @@ unlock:
5484void intel_edp_drrs_invalidate(struct drm_device *dev, 5197void intel_edp_drrs_invalidate(struct drm_device *dev,
5485 unsigned frontbuffer_bits) 5198 unsigned frontbuffer_bits)
5486{ 5199{
5487 struct drm_i915_private *dev_priv = dev->dev_private; 5200 struct drm_i915_private *dev_priv = to_i915(dev);
5488 struct drm_crtc *crtc; 5201 struct drm_crtc *crtc;
5489 enum pipe pipe; 5202 enum pipe pipe;
5490 5203
@@ -5507,9 +5220,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
5507 5220
5508 /* invalidate means busy screen hence upclock */ 5221 /* invalidate means busy screen hence upclock */
5509 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5222 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5510 intel_dp_set_drrs_state(dev_priv->dev, 5223 intel_dp_set_drrs_state(&dev_priv->drm,
5511 dev_priv->drrs.dp->attached_connector->panel. 5224 dev_priv->drrs.dp->attached_connector->panel.
5512 fixed_mode->vrefresh); 5225 fixed_mode->vrefresh);
5513 5226
5514 mutex_unlock(&dev_priv->drrs.mutex); 5227 mutex_unlock(&dev_priv->drrs.mutex);
5515} 5228}
@@ -5529,7 +5242,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
5529void intel_edp_drrs_flush(struct drm_device *dev, 5242void intel_edp_drrs_flush(struct drm_device *dev,
5530 unsigned frontbuffer_bits) 5243 unsigned frontbuffer_bits)
5531{ 5244{
5532 struct drm_i915_private *dev_priv = dev->dev_private; 5245 struct drm_i915_private *dev_priv = to_i915(dev);
5533 struct drm_crtc *crtc; 5246 struct drm_crtc *crtc;
5534 enum pipe pipe; 5247 enum pipe pipe;
5535 5248
@@ -5552,9 +5265,9 @@ void intel_edp_drrs_flush(struct drm_device *dev,
5552 5265
5553 /* flush means busy screen hence upclock */ 5266 /* flush means busy screen hence upclock */
5554 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) 5267 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5555 intel_dp_set_drrs_state(dev_priv->dev, 5268 intel_dp_set_drrs_state(&dev_priv->drm,
5556 dev_priv->drrs.dp->attached_connector->panel. 5269 dev_priv->drrs.dp->attached_connector->panel.
5557 fixed_mode->vrefresh); 5270 fixed_mode->vrefresh);
5558 5271
5559 /* 5272 /*
5560 * flush also means no more activity hence schedule downclock, if all 5273 * flush also means no more activity hence schedule downclock, if all
@@ -5589,14 +5302,14 @@ void intel_edp_drrs_flush(struct drm_device *dev,
5589 * 5302 *
5590 * DRRS saves power by switching to low RR based on usage scenarios. 5303 * DRRS saves power by switching to low RR based on usage scenarios.
5591 * 5304 *
5592 * eDP DRRS:- 5305 * The implementation is based on frontbuffer tracking implementation. When
5593 * The implementation is based on frontbuffer tracking implementation. 5306 * there is a disturbance on the screen triggered by user activity or a periodic
5594 * When there is a disturbance on the screen triggered by user activity or a 5307 * system activity, DRRS is disabled (RR is changed to high RR). When there is
5595 * periodic system activity, DRRS is disabled (RR is changed to high RR). 5308 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5596 * When there is no movement on screen, after a timeout of 1 second, a switch 5309 * made.
5597 * to low RR is made. 5310 *
5598 * For integration with frontbuffer tracking code, 5311 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5599 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called. 5312 * and intel_edp_drrs_flush() are called.
5600 * 5313 *
5601 * DRRS can be further extended to support other internal panels and also 5314 * DRRS can be further extended to support other internal panels and also
5602 * the scenario of video playback wherein RR is set based on the rate 5315 * the scenario of video playback wherein RR is set based on the rate
@@ -5622,7 +5335,7 @@ intel_dp_drrs_init(struct intel_connector *intel_connector,
5622{ 5335{
5623 struct drm_connector *connector = &intel_connector->base; 5336 struct drm_connector *connector = &intel_connector->base;
5624 struct drm_device *dev = connector->dev; 5337 struct drm_device *dev = connector->dev;
5625 struct drm_i915_private *dev_priv = dev->dev_private; 5338 struct drm_i915_private *dev_priv = to_i915(dev);
5626 struct drm_display_mode *downclock_mode = NULL; 5339 struct drm_display_mode *downclock_mode = NULL;
5627 5340
5628 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work); 5341 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
@@ -5660,7 +5373,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5660 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 5373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5661 struct intel_encoder *intel_encoder = &intel_dig_port->base; 5374 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5662 struct drm_device *dev = intel_encoder->base.dev; 5375 struct drm_device *dev = intel_encoder->base.dev;
5663 struct drm_i915_private *dev_priv = dev->dev_private; 5376 struct drm_i915_private *dev_priv = to_i915(dev);
5664 struct drm_display_mode *fixed_mode = NULL; 5377 struct drm_display_mode *fixed_mode = NULL;
5665 struct drm_display_mode *downclock_mode = NULL; 5378 struct drm_display_mode *downclock_mode = NULL;
5666 bool has_dpcd; 5379 bool has_dpcd;
@@ -5671,8 +5384,32 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5671 if (!is_edp(intel_dp)) 5384 if (!is_edp(intel_dp))
5672 return true; 5385 return true;
5673 5386
5387 /*
5388 * On IBX/CPT we may get here with LVDS already registered. Since the
5389 * driver uses the only internal power sequencer available for both
5390 * eDP and LVDS bail out early in this case to prevent interfering
5391 * with an already powered-on LVDS power sequencer.
5392 */
5393 if (intel_get_lvds_encoder(dev)) {
5394 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5395 DRM_INFO("LVDS was detected, not registering eDP\n");
5396
5397 return false;
5398 }
5399
5674 pps_lock(intel_dp); 5400 pps_lock(intel_dp);
5401
5402 intel_dp_init_panel_power_timestamps(intel_dp);
5403
5404 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
5405 vlv_initial_power_sequencer_setup(intel_dp);
5406 } else {
5407 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5408 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5409 }
5410
5675 intel_edp_panel_vdd_sanitize(intel_dp); 5411 intel_edp_panel_vdd_sanitize(intel_dp);
5412
5676 pps_unlock(intel_dp); 5413 pps_unlock(intel_dp);
5677 5414
5678 /* Cache DPCD and EDID for edp. */ 5415 /* Cache DPCD and EDID for edp. */
@@ -5686,14 +5423,9 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5686 } else { 5423 } else {
5687 /* if this fails, presume the device is a ghost */ 5424 /* if this fails, presume the device is a ghost */
5688 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 5425 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5689 return false; 5426 goto out_vdd_off;
5690 } 5427 }
5691 5428
5692 /* We now know it's not a ghost, init power sequence regs. */
5693 pps_lock(intel_dp);
5694 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5695 pps_unlock(intel_dp);
5696
5697 mutex_lock(&dev->mode_config.mutex); 5429 mutex_lock(&dev->mode_config.mutex);
5698 edid = drm_get_edid(connector, &intel_dp->aux.ddc); 5430 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5699 if (edid) { 5431 if (edid) {
@@ -5761,6 +5493,18 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5761 intel_panel_setup_backlight(connector, pipe); 5493 intel_panel_setup_backlight(connector, pipe);
5762 5494
5763 return true; 5495 return true;
5496
5497out_vdd_off:
5498 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5499 /*
5500 * vdd might still be enabled do to the delayed vdd off.
5501 * Make sure vdd is actually turned off here.
5502 */
5503 pps_lock(intel_dp);
5504 edp_panel_vdd_off_sync(intel_dp);
5505 pps_unlock(intel_dp);
5506
5507 return false;
5764} 5508}
5765 5509
5766bool 5510bool
@@ -5771,9 +5515,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5771 struct intel_dp *intel_dp = &intel_dig_port->dp; 5515 struct intel_dp *intel_dp = &intel_dig_port->dp;
5772 struct intel_encoder *intel_encoder = &intel_dig_port->base; 5516 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5773 struct drm_device *dev = intel_encoder->base.dev; 5517 struct drm_device *dev = intel_encoder->base.dev;
5774 struct drm_i915_private *dev_priv = dev->dev_private; 5518 struct drm_i915_private *dev_priv = to_i915(dev);
5775 enum port port = intel_dig_port->port; 5519 enum port port = intel_dig_port->port;
5776 int type, ret; 5520 int type;
5777 5521
5778 if (WARN(intel_dig_port->max_lanes < 1, 5522 if (WARN(intel_dig_port->max_lanes < 1,
5779 "Not enough lanes (%d) for DP on port %c\n", 5523 "Not enough lanes (%d) for DP on port %c\n",
@@ -5832,17 +5576,17 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5832 connector->interlace_allowed = true; 5576 connector->interlace_allowed = true;
5833 connector->doublescan_allowed = 0; 5577 connector->doublescan_allowed = 0;
5834 5578
5579 intel_dp_aux_init(intel_dp, intel_connector);
5580
5835 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 5581 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
5836 edp_panel_vdd_work); 5582 edp_panel_vdd_work);
5837 5583
5838 intel_connector_attach_encoder(intel_connector, intel_encoder); 5584 intel_connector_attach_encoder(intel_connector, intel_encoder);
5839 drm_connector_register(connector);
5840 5585
5841 if (HAS_DDI(dev)) 5586 if (HAS_DDI(dev))
5842 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 5587 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
5843 else 5588 else
5844 intel_connector->get_hw_state = intel_connector_get_hw_state; 5589 intel_connector->get_hw_state = intel_connector_get_hw_state;
5845 intel_connector->unregister = intel_dp_connector_unregister;
5846 5590
5847 /* Set up the hotplug pin. */ 5591 /* Set up the hotplug pin. */
5848 switch (port) { 5592 switch (port) {
@@ -5867,22 +5611,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5867 BUG(); 5611 BUG();
5868 } 5612 }
5869 5613
5870 if (is_edp(intel_dp)) {
5871 pps_lock(intel_dp);
5872 intel_dp_init_panel_power_timestamps(intel_dp);
5873 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
5874 vlv_initial_power_sequencer_setup(intel_dp);
5875 else
5876 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5877 pps_unlock(intel_dp);
5878 }
5879
5880 ret = intel_dp_aux_init(intel_dp, intel_connector);
5881 if (ret)
5882 goto fail;
5883
5884 /* init MST on ports that can support it */ 5614 /* init MST on ports that can support it */
5885 if (HAS_DP_MST(dev) && 5615 if (HAS_DP_MST(dev) && !is_edp(intel_dp) &&
5886 (port == PORT_B || port == PORT_C || port == PORT_D)) 5616 (port == PORT_B || port == PORT_C || port == PORT_D))
5887 intel_dp_mst_encoder_init(intel_dig_port, 5617 intel_dp_mst_encoder_init(intel_dig_port,
5888 intel_connector->base.base.id); 5618 intel_connector->base.base.id);
@@ -5904,22 +5634,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5904 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 5634 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
5905 } 5635 }
5906 5636
5907 i915_debugfs_connector_add(connector);
5908
5909 return true; 5637 return true;
5910 5638
5911fail: 5639fail:
5912 if (is_edp(intel_dp)) {
5913 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5914 /*
5915 * vdd might still be enabled do to the delayed vdd off.
5916 * Make sure vdd is actually turned off here.
5917 */
5918 pps_lock(intel_dp);
5919 edp_panel_vdd_off_sync(intel_dp);
5920 pps_unlock(intel_dp);
5921 }
5922 drm_connector_unregister(connector);
5923 drm_connector_cleanup(connector); 5640 drm_connector_cleanup(connector);
5924 5641
5925 return false; 5642 return false;
@@ -5929,7 +5646,7 @@ bool intel_dp_init(struct drm_device *dev,
5929 i915_reg_t output_reg, 5646 i915_reg_t output_reg,
5930 enum port port) 5647 enum port port)
5931{ 5648{
5932 struct drm_i915_private *dev_priv = dev->dev_private; 5649 struct drm_i915_private *dev_priv = to_i915(dev);
5933 struct intel_digital_port *intel_dig_port; 5650 struct intel_digital_port *intel_dig_port;
5934 struct intel_encoder *intel_encoder; 5651 struct intel_encoder *intel_encoder;
5935 struct drm_encoder *encoder; 5652 struct drm_encoder *encoder;
@@ -5947,7 +5664,7 @@ bool intel_dp_init(struct drm_device *dev,
5947 encoder = &intel_encoder->base; 5664 encoder = &intel_encoder->base;
5948 5665
5949 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 5666 if (drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
5950 DRM_MODE_ENCODER_TMDS, NULL)) 5667 DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port)))
5951 goto err_encoder_init; 5668 goto err_encoder_init;
5952 5669
5953 intel_encoder->compute_config = intel_dp_compute_config; 5670 intel_encoder->compute_config = intel_dp_compute_config;
@@ -5977,7 +5694,7 @@ bool intel_dp_init(struct drm_device *dev,
5977 intel_dig_port->dp.output_reg = output_reg; 5694 intel_dig_port->dp.output_reg = output_reg;
5978 intel_dig_port->max_lanes = 4; 5695 intel_dig_port->max_lanes = 4;
5979 5696
5980 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 5697 intel_encoder->type = INTEL_OUTPUT_DP;
5981 if (IS_CHERRYVIEW(dev)) { 5698 if (IS_CHERRYVIEW(dev)) {
5982 if (port == PORT_D) 5699 if (port == PORT_D)
5983 intel_encoder->crtc_mask = 1 << 2; 5700 intel_encoder->crtc_mask = 1 << 2;
@@ -6007,43 +5724,35 @@ err_connector_alloc:
6007 5724
6008void intel_dp_mst_suspend(struct drm_device *dev) 5725void intel_dp_mst_suspend(struct drm_device *dev)
6009{ 5726{
6010 struct drm_i915_private *dev_priv = dev->dev_private; 5727 struct drm_i915_private *dev_priv = to_i915(dev);
6011 int i; 5728 int i;
6012 5729
6013 /* disable MST */ 5730 /* disable MST */
6014 for (i = 0; i < I915_MAX_PORTS; i++) { 5731 for (i = 0; i < I915_MAX_PORTS; i++) {
6015 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; 5732 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6016 if (!intel_dig_port) 5733
5734 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6017 continue; 5735 continue;
6018 5736
6019 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) { 5737 if (intel_dig_port->dp.is_mst)
6020 if (!intel_dig_port->dp.can_mst) 5738 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6021 continue;
6022 if (intel_dig_port->dp.is_mst)
6023 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6024 }
6025 } 5739 }
6026} 5740}
6027 5741
6028void intel_dp_mst_resume(struct drm_device *dev) 5742void intel_dp_mst_resume(struct drm_device *dev)
6029{ 5743{
6030 struct drm_i915_private *dev_priv = dev->dev_private; 5744 struct drm_i915_private *dev_priv = to_i915(dev);
6031 int i; 5745 int i;
6032 5746
6033 for (i = 0; i < I915_MAX_PORTS; i++) { 5747 for (i = 0; i < I915_MAX_PORTS; i++) {
6034 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i]; 5748 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6035 if (!intel_dig_port) 5749 int ret;
6036 continue;
6037 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6038 int ret;
6039 5750
6040 if (!intel_dig_port->dp.can_mst) 5751 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6041 continue; 5752 continue;
6042 5753
6043 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr); 5754 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6044 if (ret != 0) { 5755 if (ret)
6045 intel_dp_check_mst_status(&intel_dig_port->dp); 5756 intel_dp_check_mst_status(&intel_dig_port->dp);
6046 }
6047 }
6048 } 5757 }
6049} 5758}
diff --git a/drivers/gpu/drm/i915/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
new file mode 100644
index 000000000000..6532e226db29
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dp_aux_backlight.c
@@ -0,0 +1,172 @@
1/*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "intel_drv.h"
26
27static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
28{
29 uint8_t reg_val = 0;
30
31 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
32 &reg_val) < 0) {
33 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
34 DP_EDP_DISPLAY_CONTROL_REGISTER);
35 return;
36 }
37 if (enable)
38 reg_val |= DP_EDP_BACKLIGHT_ENABLE;
39 else
40 reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE);
41
42 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER,
43 reg_val) != 1) {
44 DRM_DEBUG_KMS("Failed to %s aux backlight\n",
45 enable ? "enable" : "disable");
46 }
47}
48
49/*
50 * Read the current backlight value from DPCD register(s) based
51 * on if 8-bit(MSB) or 16-bit(MSB and LSB) values are supported
52 */
53static uint32_t intel_dp_aux_get_backlight(struct intel_connector *connector)
54{
55 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
56 uint8_t read_val[2] = { 0x0 };
57 uint16_t level = 0;
58
59 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
60 &read_val, sizeof(read_val)) < 0) {
61 DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
62 DP_EDP_BACKLIGHT_BRIGHTNESS_MSB);
63 return 0;
64 }
65 level = read_val[0];
66 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
67 level = (read_val[0] << 8 | read_val[1]);
68
69 return level;
70}
71
72/*
73 * Sends the current backlight level over the aux channel, checking if its using
74 * 8-bit or 16 bit value (MSB and LSB)
75 */
76static void
77intel_dp_aux_set_backlight(struct intel_connector *connector, u32 level)
78{
79 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
80 uint8_t vals[2] = { 0x0 };
81
82 vals[0] = level;
83
84 /* Write the MSB and/or LSB */
85 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) {
86 vals[0] = (level & 0xFF00) >> 8;
87 vals[1] = (level & 0xFF);
88 }
89 if (drm_dp_dpcd_write(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
90 vals, sizeof(vals)) < 0) {
91 DRM_DEBUG_KMS("Failed to write aux backlight level\n");
92 return;
93 }
94}
95
96static void intel_dp_aux_enable_backlight(struct intel_connector *connector)
97{
98 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
99 uint8_t dpcd_buf = 0;
100
101 set_aux_backlight_enable(intel_dp, true);
102
103 if ((drm_dp_dpcd_readb(&intel_dp->aux,
104 DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) == 1) &&
105 ((dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) ==
106 DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET))
107 drm_dp_dpcd_writeb(&intel_dp->aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
108 (dpcd_buf | DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD));
109}
110
111static void intel_dp_aux_disable_backlight(struct intel_connector *connector)
112{
113 set_aux_backlight_enable(enc_to_intel_dp(&connector->encoder->base), false);
114}
115
116static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
117 enum pipe pipe)
118{
119 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
120 struct intel_panel *panel = &connector->panel;
121
122 intel_dp_aux_enable_backlight(connector);
123
124 if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
125 panel->backlight.max = 0xFFFF;
126 else
127 panel->backlight.max = 0xFF;
128
129 panel->backlight.min = 0;
130 panel->backlight.level = intel_dp_aux_get_backlight(connector);
131
132 panel->backlight.enabled = panel->backlight.level != 0;
133
134 return 0;
135}
136
137static bool
138intel_dp_aux_display_control_capable(struct intel_connector *connector)
139{
140 struct intel_dp *intel_dp = enc_to_intel_dp(&connector->encoder->base);
141
142 /* Check the eDP Display control capabilities registers to determine if
143 * the panel can support backlight control over the aux channel
144 */
145 if (intel_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP &&
146 (intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
147 !((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_PIN_ENABLE_CAP) ||
148 (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP))) {
149 DRM_DEBUG_KMS("AUX Backlight Control Supported!\n");
150 return true;
151 }
152 return false;
153}
154
155int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
156{
157 struct intel_panel *panel = &intel_connector->panel;
158
159 if (!i915.enable_dpcd_backlight)
160 return -ENODEV;
161
162 if (!intel_dp_aux_display_control_capable(intel_connector))
163 return -ENODEV;
164
165 panel->backlight.setup = intel_dp_aux_setup_backlight;
166 panel->backlight.enable = intel_dp_aux_enable_backlight;
167 panel->backlight.disable = intel_dp_aux_disable_backlight;
168 panel->backlight.set = intel_dp_aux_set_backlight;
169 panel->backlight.get = intel_dp_aux_get_backlight;
170
171 return 0;
172}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 7a34090cef34..68a005d729e9 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -47,7 +47,6 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
47 47
48 pipe_config->dp_encoder_is_mst = true; 48 pipe_config->dp_encoder_is_mst = true;
49 pipe_config->has_pch_encoder = false; 49 pipe_config->has_pch_encoder = false;
50 pipe_config->has_dp_encoder = true;
51 bpp = 24; 50 bpp = 24;
52 /* 51 /*
53 * for MST we always configure max link bw - the spec doesn't 52 * for MST we always configure max link bw - the spec doesn't
@@ -140,7 +139,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder)
140 struct intel_digital_port *intel_dig_port = intel_mst->primary; 139 struct intel_digital_port *intel_dig_port = intel_mst->primary;
141 struct intel_dp *intel_dp = &intel_dig_port->dp; 140 struct intel_dp *intel_dp = &intel_dig_port->dp;
142 struct drm_device *dev = encoder->base.dev; 141 struct drm_device *dev = encoder->base.dev;
143 struct drm_i915_private *dev_priv = dev->dev_private; 142 struct drm_i915_private *dev_priv = to_i915(dev);
144 enum port port = intel_dig_port->port; 143 enum port port = intel_dig_port->port;
145 int ret; 144 int ret;
146 uint32_t temp; 145 uint32_t temp;
@@ -207,14 +206,17 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder)
207 struct intel_digital_port *intel_dig_port = intel_mst->primary; 206 struct intel_digital_port *intel_dig_port = intel_mst->primary;
208 struct intel_dp *intel_dp = &intel_dig_port->dp; 207 struct intel_dp *intel_dp = &intel_dig_port->dp;
209 struct drm_device *dev = intel_dig_port->base.base.dev; 208 struct drm_device *dev = intel_dig_port->base.base.dev;
210 struct drm_i915_private *dev_priv = dev->dev_private; 209 struct drm_i915_private *dev_priv = to_i915(dev);
211 enum port port = intel_dig_port->port; 210 enum port port = intel_dig_port->port;
212 int ret; 211 int ret;
213 212
214 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links); 213 DRM_DEBUG_KMS("%d\n", intel_dp->active_mst_links);
215 214
216 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_ACT_SENT), 215 if (intel_wait_for_register(dev_priv,
217 1)) 216 DP_TP_STATUS(port),
217 DP_TP_STATUS_ACT_SENT,
218 DP_TP_STATUS_ACT_SENT,
219 1))
218 DRM_ERROR("Timed out waiting for ACT sent\n"); 220 DRM_ERROR("Timed out waiting for ACT sent\n");
219 221
220 ret = drm_dp_check_act_status(&intel_dp->mst_mgr); 222 ret = drm_dp_check_act_status(&intel_dp->mst_mgr);
@@ -239,12 +241,10 @@ static void intel_dp_mst_enc_get_config(struct intel_encoder *encoder,
239 struct intel_digital_port *intel_dig_port = intel_mst->primary; 241 struct intel_digital_port *intel_dig_port = intel_mst->primary;
240 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 242 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
241 struct drm_device *dev = encoder->base.dev; 243 struct drm_device *dev = encoder->base.dev;
242 struct drm_i915_private *dev_priv = dev->dev_private; 244 struct drm_i915_private *dev_priv = to_i915(dev);
243 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; 245 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
244 u32 temp, flags = 0; 246 u32 temp, flags = 0;
245 247
246 pipe_config->has_dp_encoder = true;
247
248 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 248 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
249 if (temp & TRANS_DDI_PHSYNC) 249 if (temp & TRANS_DDI_PHSYNC)
250 flags |= DRM_MODE_FLAG_PHSYNC; 250 flags |= DRM_MODE_FLAG_PHSYNC;
@@ -336,6 +336,8 @@ static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
336 .fill_modes = drm_helper_probe_single_connector_modes, 336 .fill_modes = drm_helper_probe_single_connector_modes,
337 .set_property = intel_dp_mst_set_property, 337 .set_property = intel_dp_mst_set_property,
338 .atomic_get_property = intel_connector_atomic_get_property, 338 .atomic_get_property = intel_connector_atomic_get_property,
339 .late_register = intel_connector_register,
340 .early_unregister = intel_connector_unregister,
339 .destroy = intel_dp_mst_connector_destroy, 341 .destroy = intel_dp_mst_connector_destroy,
340 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 342 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
341 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 343 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -455,7 +457,6 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
455 drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort); 457 drm_connector_init(dev, connector, &intel_dp_mst_connector_funcs, DRM_MODE_CONNECTOR_DisplayPort);
456 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs); 458 drm_connector_helper_add(connector, &intel_dp_mst_connector_helper_funcs);
457 459
458 intel_connector->unregister = intel_connector_unregister;
459 intel_connector->get_hw_state = intel_dp_mst_get_hw_state; 460 intel_connector->get_hw_state = intel_dp_mst_get_hw_state;
460 intel_connector->mst_port = intel_dp; 461 intel_connector->mst_port = intel_dp;
461 intel_connector->port = port; 462 intel_connector->port = port;
@@ -477,9 +478,11 @@ static void intel_dp_register_mst_connector(struct drm_connector *connector)
477{ 478{
478 struct intel_connector *intel_connector = to_intel_connector(connector); 479 struct intel_connector *intel_connector = to_intel_connector(connector);
479 struct drm_device *dev = connector->dev; 480 struct drm_device *dev = connector->dev;
481
480 drm_modeset_lock_all(dev); 482 drm_modeset_lock_all(dev);
481 intel_connector_add_to_fbdev(intel_connector); 483 intel_connector_add_to_fbdev(intel_connector);
482 drm_modeset_unlock_all(dev); 484 drm_modeset_unlock_all(dev);
485
483 drm_connector_register(&intel_connector->base); 486 drm_connector_register(&intel_connector->base);
484} 487}
485 488
@@ -489,7 +492,7 @@ static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
489 struct intel_connector *intel_connector = to_intel_connector(connector); 492 struct intel_connector *intel_connector = to_intel_connector(connector);
490 struct drm_device *dev = connector->dev; 493 struct drm_device *dev = connector->dev;
491 494
492 intel_connector->unregister(intel_connector); 495 drm_connector_unregister(connector);
493 496
494 /* need to nuke the connector */ 497 /* need to nuke the connector */
495 drm_modeset_lock_all(dev); 498 drm_modeset_lock_all(dev);
@@ -534,7 +537,7 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum
534 intel_mst->primary = intel_dig_port; 537 intel_mst->primary = intel_dig_port;
535 538
536 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs, 539 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_mst_enc_funcs,
537 DRM_MODE_ENCODER_DPMST, NULL); 540 DRM_MODE_ENCODER_DPMST, "DP-MST %c", pipe_name(pipe));
538 541
539 intel_encoder->type = INTEL_OUTPUT_DP_MST; 542 intel_encoder->type = INTEL_OUTPUT_DP_MST;
540 intel_encoder->crtc_mask = 0x7; 543 intel_encoder->crtc_mask = 0x7;
diff --git a/drivers/gpu/drm/i915/intel_dpio_phy.c b/drivers/gpu/drm/i915/intel_dpio_phy.c
new file mode 100644
index 000000000000..047f48748944
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dpio_phy.c
@@ -0,0 +1,470 @@
1/*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include "intel_drv.h"
25
26void chv_set_phy_signal_level(struct intel_encoder *encoder,
27 u32 deemph_reg_value, u32 margin_reg_value,
28 bool uniq_trans_scale)
29{
30 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
31 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
32 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
33 enum dpio_channel ch = vlv_dport_to_channel(dport);
34 enum pipe pipe = intel_crtc->pipe;
35 u32 val;
36 int i;
37
38 mutex_lock(&dev_priv->sb_lock);
39
40 /* Clear calc init */
41 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
42 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
43 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
44 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
45 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
46
47 if (intel_crtc->config->lane_count > 2) {
48 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
49 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
50 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
51 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
52 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
53 }
54
55 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
56 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
57 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
58 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
59
60 if (intel_crtc->config->lane_count > 2) {
61 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
62 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
63 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
64 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
65 }
66
67 /* Program swing deemph */
68 for (i = 0; i < intel_crtc->config->lane_count; i++) {
69 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
70 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
71 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
72 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
73 }
74
75 /* Program swing margin */
76 for (i = 0; i < intel_crtc->config->lane_count; i++) {
77 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
78
79 val &= ~DPIO_SWING_MARGIN000_MASK;
80 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
81
82 /*
83 * Supposedly this value shouldn't matter when unique transition
84 * scale is disabled, but in fact it does matter. Let's just
85 * always program the same value and hope it's OK.
86 */
87 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
88 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
89
90 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
91 }
92
93 /*
94 * The document said it needs to set bit 27 for ch0 and bit 26
95 * for ch1. Might be a typo in the doc.
96 * For now, for this unique transition scale selection, set bit
97 * 27 for ch0 and ch1.
98 */
99 for (i = 0; i < intel_crtc->config->lane_count; i++) {
100 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
101 if (uniq_trans_scale)
102 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
103 else
104 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
105 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
106 }
107
108 /* Start swing calculation */
109 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
110 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
111 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
112
113 if (intel_crtc->config->lane_count > 2) {
114 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
115 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
116 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
117 }
118
119 mutex_unlock(&dev_priv->sb_lock);
120
121}
122
123void chv_data_lane_soft_reset(struct intel_encoder *encoder,
124 bool reset)
125{
126 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
127 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
128 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
129 enum pipe pipe = crtc->pipe;
130 uint32_t val;
131
132 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
133 if (reset)
134 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
135 else
136 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
137 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
138
139 if (crtc->config->lane_count > 2) {
140 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
141 if (reset)
142 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
143 else
144 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
145 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
146 }
147
148 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
149 val |= CHV_PCS_REQ_SOFTRESET_EN;
150 if (reset)
151 val &= ~DPIO_PCS_CLK_SOFT_RESET;
152 else
153 val |= DPIO_PCS_CLK_SOFT_RESET;
154 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
155
156 if (crtc->config->lane_count > 2) {
157 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
158 val |= CHV_PCS_REQ_SOFTRESET_EN;
159 if (reset)
160 val &= ~DPIO_PCS_CLK_SOFT_RESET;
161 else
162 val |= DPIO_PCS_CLK_SOFT_RESET;
163 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
164 }
165}
166
167void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
168{
169 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
170 struct drm_device *dev = encoder->base.dev;
171 struct drm_i915_private *dev_priv = to_i915(dev);
172 struct intel_crtc *intel_crtc =
173 to_intel_crtc(encoder->base.crtc);
174 enum dpio_channel ch = vlv_dport_to_channel(dport);
175 enum pipe pipe = intel_crtc->pipe;
176 unsigned int lane_mask =
177 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
178 u32 val;
179
180 /*
181 * Must trick the second common lane into life.
182 * Otherwise we can't even access the PLL.
183 */
184 if (ch == DPIO_CH0 && pipe == PIPE_B)
185 dport->release_cl2_override =
186 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
187
188 chv_phy_powergate_lanes(encoder, true, lane_mask);
189
190 mutex_lock(&dev_priv->sb_lock);
191
192 /* Assert data lane reset */
193 chv_data_lane_soft_reset(encoder, true);
194
195 /* program left/right clock distribution */
196 if (pipe != PIPE_B) {
197 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
198 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
199 if (ch == DPIO_CH0)
200 val |= CHV_BUFLEFTENA1_FORCE;
201 if (ch == DPIO_CH1)
202 val |= CHV_BUFRIGHTENA1_FORCE;
203 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
204 } else {
205 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
206 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
207 if (ch == DPIO_CH0)
208 val |= CHV_BUFLEFTENA2_FORCE;
209 if (ch == DPIO_CH1)
210 val |= CHV_BUFRIGHTENA2_FORCE;
211 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
212 }
213
214 /* program clock channel usage */
215 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
216 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
217 if (pipe != PIPE_B)
218 val &= ~CHV_PCS_USEDCLKCHANNEL;
219 else
220 val |= CHV_PCS_USEDCLKCHANNEL;
221 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
222
223 if (intel_crtc->config->lane_count > 2) {
224 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
225 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
226 if (pipe != PIPE_B)
227 val &= ~CHV_PCS_USEDCLKCHANNEL;
228 else
229 val |= CHV_PCS_USEDCLKCHANNEL;
230 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
231 }
232
233 /*
234 * This a a bit weird since generally CL
235 * matches the pipe, but here we need to
236 * pick the CL based on the port.
237 */
238 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
239 if (pipe != PIPE_B)
240 val &= ~CHV_CMN_USEDCLKCHANNEL;
241 else
242 val |= CHV_CMN_USEDCLKCHANNEL;
243 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
244
245 mutex_unlock(&dev_priv->sb_lock);
246}
247
248void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
249{
250 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
251 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
252 struct drm_device *dev = encoder->base.dev;
253 struct drm_i915_private *dev_priv = to_i915(dev);
254 struct intel_crtc *intel_crtc =
255 to_intel_crtc(encoder->base.crtc);
256 enum dpio_channel ch = vlv_dport_to_channel(dport);
257 int pipe = intel_crtc->pipe;
258 int data, i, stagger;
259 u32 val;
260
261 mutex_lock(&dev_priv->sb_lock);
262
263 /* allow hardware to manage TX FIFO reset source */
264 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
265 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
266 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
267
268 if (intel_crtc->config->lane_count > 2) {
269 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
270 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
271 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
272 }
273
274 /* Program Tx lane latency optimal setting*/
275 for (i = 0; i < intel_crtc->config->lane_count; i++) {
276 /* Set the upar bit */
277 if (intel_crtc->config->lane_count == 1)
278 data = 0x0;
279 else
280 data = (i == 1) ? 0x0 : 0x1;
281 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
282 data << DPIO_UPAR_SHIFT);
283 }
284
285 /* Data lane stagger programming */
286 if (intel_crtc->config->port_clock > 270000)
287 stagger = 0x18;
288 else if (intel_crtc->config->port_clock > 135000)
289 stagger = 0xd;
290 else if (intel_crtc->config->port_clock > 67500)
291 stagger = 0x7;
292 else if (intel_crtc->config->port_clock > 33750)
293 stagger = 0x4;
294 else
295 stagger = 0x2;
296
297 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
298 val |= DPIO_TX2_STAGGER_MASK(0x1f);
299 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
300
301 if (intel_crtc->config->lane_count > 2) {
302 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
303 val |= DPIO_TX2_STAGGER_MASK(0x1f);
304 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
305 }
306
307 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
308 DPIO_LANESTAGGER_STRAP(stagger) |
309 DPIO_LANESTAGGER_STRAP_OVRD |
310 DPIO_TX1_STAGGER_MASK(0x1f) |
311 DPIO_TX1_STAGGER_MULT(6) |
312 DPIO_TX2_STAGGER_MULT(0));
313
314 if (intel_crtc->config->lane_count > 2) {
315 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
316 DPIO_LANESTAGGER_STRAP(stagger) |
317 DPIO_LANESTAGGER_STRAP_OVRD |
318 DPIO_TX1_STAGGER_MASK(0x1f) |
319 DPIO_TX1_STAGGER_MULT(7) |
320 DPIO_TX2_STAGGER_MULT(5));
321 }
322
323 /* Deassert data lane reset */
324 chv_data_lane_soft_reset(encoder, false);
325
326 mutex_unlock(&dev_priv->sb_lock);
327}
328
329void chv_phy_release_cl2_override(struct intel_encoder *encoder)
330{
331 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
332 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
333
334 if (dport->release_cl2_override) {
335 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
336 dport->release_cl2_override = false;
337 }
338}
339
340void chv_phy_post_pll_disable(struct intel_encoder *encoder)
341{
342 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
343 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
344 u32 val;
345
346 mutex_lock(&dev_priv->sb_lock);
347
348 /* disable left/right clock distribution */
349 if (pipe != PIPE_B) {
350 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
351 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
352 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
353 } else {
354 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
355 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
356 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
357 }
358
359 mutex_unlock(&dev_priv->sb_lock);
360
361 /*
362 * Leave the power down bit cleared for at least one
363 * lane so that chv_powergate_phy_ch() will power
364 * on something when the channel is otherwise unused.
365 * When the port is off and the override is removed
366 * the lanes power down anyway, so otherwise it doesn't
367 * really matter what the state of power down bits is
368 * after this.
369 */
370 chv_phy_powergate_lanes(encoder, false, 0x0);
371}
372
373void vlv_set_phy_signal_level(struct intel_encoder *encoder,
374 u32 demph_reg_value, u32 preemph_reg_value,
375 u32 uniqtranscale_reg_value, u32 tx3_demph)
376{
377 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
378 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
379 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
380 enum dpio_channel port = vlv_dport_to_channel(dport);
381 int pipe = intel_crtc->pipe;
382
383 mutex_lock(&dev_priv->sb_lock);
384 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
385 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
386 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
387 uniqtranscale_reg_value);
388 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
389
390 if (tx3_demph)
391 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
392
393 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
394 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
395 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
396 mutex_unlock(&dev_priv->sb_lock);
397}
398
399void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
400{
401 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
402 struct drm_device *dev = encoder->base.dev;
403 struct drm_i915_private *dev_priv = to_i915(dev);
404 struct intel_crtc *intel_crtc =
405 to_intel_crtc(encoder->base.crtc);
406 enum dpio_channel port = vlv_dport_to_channel(dport);
407 int pipe = intel_crtc->pipe;
408
409 /* Program Tx lane resets to default */
410 mutex_lock(&dev_priv->sb_lock);
411 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
412 DPIO_PCS_TX_LANE2_RESET |
413 DPIO_PCS_TX_LANE1_RESET);
414 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
415 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
416 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
417 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
418 DPIO_PCS_CLK_SOFT_RESET);
419
420 /* Fix up inter-pair skew failure */
421 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
422 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
423 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
424 mutex_unlock(&dev_priv->sb_lock);
425}
426
427void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
428{
429 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
430 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
431 struct drm_device *dev = encoder->base.dev;
432 struct drm_i915_private *dev_priv = to_i915(dev);
433 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
434 enum dpio_channel port = vlv_dport_to_channel(dport);
435 int pipe = intel_crtc->pipe;
436 u32 val;
437
438 mutex_lock(&dev_priv->sb_lock);
439
440 /* Enable clock channels for this port */
441 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
442 val = 0;
443 if (pipe)
444 val |= (1<<21);
445 else
446 val &= ~(1<<21);
447 val |= 0x001000c4;
448 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
449
450 /* Program lane clock */
451 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
452 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
453
454 mutex_unlock(&dev_priv->sb_lock);
455}
456
457void vlv_phy_reset_lanes(struct intel_encoder *encoder)
458{
459 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
460 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
461 struct intel_crtc *intel_crtc =
462 to_intel_crtc(encoder->base.crtc);
463 enum dpio_channel port = vlv_dport_to_channel(dport);
464 int pipe = intel_crtc->pipe;
465
466 mutex_lock(&dev_priv->sb_lock);
467 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
468 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
469 mutex_unlock(&dev_priv->sb_lock);
470}
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c
index 58f60b27837e..5c1f2d235ffa 100644
--- a/drivers/gpu/drm/i915/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c
@@ -83,7 +83,7 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
83void intel_prepare_shared_dpll(struct intel_crtc *crtc) 83void intel_prepare_shared_dpll(struct intel_crtc *crtc)
84{ 84{
85 struct drm_device *dev = crtc->base.dev; 85 struct drm_device *dev = crtc->base.dev;
86 struct drm_i915_private *dev_priv = dev->dev_private; 86 struct drm_i915_private *dev_priv = to_i915(dev);
87 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 87 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
88 88
89 if (WARN_ON(pll == NULL)) 89 if (WARN_ON(pll == NULL))
@@ -112,7 +112,7 @@ void intel_prepare_shared_dpll(struct intel_crtc *crtc)
112void intel_enable_shared_dpll(struct intel_crtc *crtc) 112void intel_enable_shared_dpll(struct intel_crtc *crtc)
113{ 113{
114 struct drm_device *dev = crtc->base.dev; 114 struct drm_device *dev = crtc->base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private; 115 struct drm_i915_private *dev_priv = to_i915(dev);
116 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 116 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
117 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 117 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
118 unsigned old_mask; 118 unsigned old_mask;
@@ -151,7 +151,7 @@ out:
151void intel_disable_shared_dpll(struct intel_crtc *crtc) 151void intel_disable_shared_dpll(struct intel_crtc *crtc)
152{ 152{
153 struct drm_device *dev = crtc->base.dev; 153 struct drm_device *dev = crtc->base.dev;
154 struct drm_i915_private *dev_priv = dev->dev_private; 154 struct drm_i915_private *dev_priv = to_i915(dev);
155 struct intel_shared_dpll *pll = crtc->config->shared_dpll; 155 struct intel_shared_dpll *pll = crtc->config->shared_dpll;
156 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base); 156 unsigned crtc_mask = 1 << drm_crtc_index(&crtc->base);
157 157
@@ -191,7 +191,7 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
191 enum intel_dpll_id range_min, 191 enum intel_dpll_id range_min,
192 enum intel_dpll_id range_max) 192 enum intel_dpll_id range_max)
193{ 193{
194 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 194 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
195 struct intel_shared_dpll *pll; 195 struct intel_shared_dpll *pll;
196 struct intel_shared_dpll_config *shared_dpll; 196 struct intel_shared_dpll_config *shared_dpll;
197 enum intel_dpll_id i; 197 enum intel_dpll_id i;
@@ -208,8 +208,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
208 if (memcmp(&crtc_state->dpll_hw_state, 208 if (memcmp(&crtc_state->dpll_hw_state,
209 &shared_dpll[i].hw_state, 209 &shared_dpll[i].hw_state,
210 sizeof(crtc_state->dpll_hw_state)) == 0) { 210 sizeof(crtc_state->dpll_hw_state)) == 0) {
211 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, active %x)\n", 211 DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
212 crtc->base.base.id, pll->name, 212 crtc->base.base.id, crtc->base.name, pll->name,
213 shared_dpll[i].crtc_mask, 213 shared_dpll[i].crtc_mask,
214 pll->active_mask); 214 pll->active_mask);
215 return pll; 215 return pll;
@@ -220,8 +220,8 @@ intel_find_shared_dpll(struct intel_crtc *crtc,
220 for (i = range_min; i <= range_max; i++) { 220 for (i = range_min; i <= range_max; i++) {
221 pll = &dev_priv->shared_dplls[i]; 221 pll = &dev_priv->shared_dplls[i];
222 if (shared_dpll[i].crtc_mask == 0) { 222 if (shared_dpll[i].crtc_mask == 0) {
223 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 223 DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
224 crtc->base.base.id, pll->name); 224 crtc->base.base.id, crtc->base.name, pll->name);
225 return pll; 225 return pll;
226 } 226 }
227 } 227 }
@@ -331,7 +331,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
331static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv, 331static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
332 struct intel_shared_dpll *pll) 332 struct intel_shared_dpll *pll)
333{ 333{
334 struct drm_device *dev = dev_priv->dev; 334 struct drm_device *dev = &dev_priv->drm;
335 struct intel_crtc *crtc; 335 struct intel_crtc *crtc;
336 336
337 /* Make sure no transcoder isn't still depending on us. */ 337 /* Make sure no transcoder isn't still depending on us. */
@@ -358,8 +358,8 @@ ibx_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
358 i = (enum intel_dpll_id) crtc->pipe; 358 i = (enum intel_dpll_id) crtc->pipe;
359 pll = &dev_priv->shared_dplls[i]; 359 pll = &dev_priv->shared_dplls[i];
360 360
361 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 361 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
362 crtc->base.base.id, pll->name); 362 crtc->base.base.id, crtc->base.name, pll->name);
363 } else { 363 } else {
364 pll = intel_find_shared_dpll(crtc, crtc_state, 364 pll = intel_find_shared_dpll(crtc, crtc_state,
365 DPLL_ID_PCH_PLL_A, 365 DPLL_ID_PCH_PLL_A,
@@ -713,7 +713,7 @@ hsw_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
713 pll = intel_find_shared_dpll(crtc, crtc_state, 713 pll = intel_find_shared_dpll(crtc, crtc_state,
714 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); 714 DPLL_ID_WRPLL1, DPLL_ID_WRPLL2);
715 715
716 } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 716 } else if (encoder->type == INTEL_OUTPUT_DP ||
717 encoder->type == INTEL_OUTPUT_DP_MST || 717 encoder->type == INTEL_OUTPUT_DP_MST ||
718 encoder->type == INTEL_OUTPUT_EDP) { 718 encoder->type == INTEL_OUTPUT_EDP) {
719 enum intel_dpll_id pll_id; 719 enum intel_dpll_id pll_id;
@@ -856,7 +856,11 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
856 I915_WRITE(regs[pll->id].ctl, 856 I915_WRITE(regs[pll->id].ctl,
857 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE); 857 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
858 858
859 if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(pll->id), 5)) 859 if (intel_wait_for_register(dev_priv,
860 DPLL_STATUS,
861 DPLL_LOCK(pll->id),
862 DPLL_LOCK(pll->id),
863 5))
860 DRM_ERROR("DPLL %d not locked\n", pll->id); 864 DRM_ERROR("DPLL %d not locked\n", pll->id);
861} 865}
862 866
@@ -1222,7 +1226,7 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1222 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) | 1226 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1223 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) | 1227 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1224 wrpll_params.central_freq; 1228 wrpll_params.central_freq;
1225 } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 1229 } else if (encoder->type == INTEL_OUTPUT_DP ||
1226 encoder->type == INTEL_OUTPUT_DP_MST || 1230 encoder->type == INTEL_OUTPUT_DP_MST ||
1227 encoder->type == INTEL_OUTPUT_EDP) { 1231 encoder->type == INTEL_OUTPUT_EDP) {
1228 switch (crtc_state->port_clock / 2) { 1232 switch (crtc_state->port_clock / 2) {
@@ -1239,9 +1243,6 @@ skl_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1239 case 162000: 1243 case 162000:
1240 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0); 1244 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, 0);
1241 break; 1245 break;
1242 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1243 results in CDCLK change. Need to handle the change of CDCLK by
1244 disabling pipes and re-enabling them */
1245 case 108000: 1246 case 108000:
1246 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0); 1247 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, 0);
1247 break; 1248 break;
@@ -1511,7 +1512,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1511 int clock = crtc_state->port_clock; 1512 int clock = crtc_state->port_clock;
1512 1513
1513 if (encoder->type == INTEL_OUTPUT_HDMI) { 1514 if (encoder->type == INTEL_OUTPUT_HDMI) {
1514 intel_clock_t best_clock; 1515 struct dpll best_clock;
1515 1516
1516 /* Calculate HDMI div */ 1517 /* Calculate HDMI div */
1517 /* 1518 /*
@@ -1533,7 +1534,7 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1533 clk_div.m2_frac_en = clk_div.m2_frac != 0; 1534 clk_div.m2_frac_en = clk_div.m2_frac != 0;
1534 1535
1535 vco = best_clock.vco; 1536 vco = best_clock.vco;
1536 } else if (encoder->type == INTEL_OUTPUT_DISPLAYPORT || 1537 } else if (encoder->type == INTEL_OUTPUT_DP ||
1537 encoder->type == INTEL_OUTPUT_EDP) { 1538 encoder->type == INTEL_OUTPUT_EDP) {
1538 int i; 1539 int i;
1539 1540
@@ -1616,8 +1617,8 @@ bxt_get_dpll(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state,
1616 i = (enum intel_dpll_id) intel_dig_port->port; 1617 i = (enum intel_dpll_id) intel_dig_port->port;
1617 pll = intel_get_shared_dpll_by_id(dev_priv, i); 1618 pll = intel_get_shared_dpll_by_id(dev_priv, i);
1618 1619
1619 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 1620 DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
1620 crtc->base.base.id, pll->name); 1621 crtc->base.base.id, crtc->base.name, pll->name);
1621 1622
1622 intel_reference_shared_dpll(pll, crtc_state); 1623 intel_reference_shared_dpll(pll, crtc_state);
1623 1624
@@ -1635,19 +1636,11 @@ static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
1635 1636
1636static void intel_ddi_pll_init(struct drm_device *dev) 1637static void intel_ddi_pll_init(struct drm_device *dev)
1637{ 1638{
1638 struct drm_i915_private *dev_priv = dev->dev_private; 1639 struct drm_i915_private *dev_priv = to_i915(dev);
1639 uint32_t val = I915_READ(LCPLL_CTL); 1640
1640 1641 if (INTEL_GEN(dev_priv) < 9) {
1641 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 1642 uint32_t val = I915_READ(LCPLL_CTL);
1642 int cdclk_freq; 1643
1643
1644 cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
1645 dev_priv->skl_boot_cdclk = cdclk_freq;
1646 if (skl_sanitize_cdclk(dev_priv))
1647 DRM_DEBUG_KMS("Sanitized cdclk programmed by pre-os\n");
1648 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
1649 DRM_ERROR("LCPLL1 is disabled\n");
1650 } else if (!IS_BROXTON(dev_priv)) {
1651 /* 1644 /*
1652 * The LCPLL register should be turned on by the BIOS. For now 1645 * The LCPLL register should be turned on by the BIOS. For now
1653 * let's just check its state and print errors in case 1646 * let's just check its state and print errors in case
@@ -1730,7 +1723,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
1730 1723
1731void intel_shared_dpll_init(struct drm_device *dev) 1724void intel_shared_dpll_init(struct drm_device *dev)
1732{ 1725{
1733 struct drm_i915_private *dev_priv = dev->dev_private; 1726 struct drm_i915_private *dev_priv = to_i915(dev);
1734 const struct intel_dpll_mgr *dpll_mgr = NULL; 1727 const struct intel_dpll_mgr *dpll_mgr = NULL;
1735 const struct dpll_info *dpll_info; 1728 const struct dpll_info *dpll_info;
1736 int i; 1729 int i;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index f7f0f01814f6..3329fc6a95f4 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -69,39 +69,63 @@
69}) 69})
70 70
71#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000) 71#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 1000)
72#define wait_for_us(COND, US) _wait_for((COND), (US), 1)
73 72
74/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ 73/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */
75#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT) 74#if defined(CONFIG_DRM_I915_DEBUG) && defined(CONFIG_PREEMPT_COUNT)
76# define _WAIT_FOR_ATOMIC_CHECK WARN_ON_ONCE(!in_atomic()) 75# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic())
77#else 76#else
78# define _WAIT_FOR_ATOMIC_CHECK do { } while (0) 77# define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0)
79#endif 78#endif
80 79
81#define _wait_for_atomic(COND, US) ({ \ 80#define _wait_for_atomic(COND, US, ATOMIC) \
82 unsigned long end__; \ 81({ \
83 int ret__ = 0; \ 82 int cpu, ret, timeout = (US) * 1000; \
84 _WAIT_FOR_ATOMIC_CHECK; \ 83 u64 base; \
84 _WAIT_FOR_ATOMIC_CHECK(ATOMIC); \
85 BUILD_BUG_ON((US) > 50000); \ 85 BUILD_BUG_ON((US) > 50000); \
86 end__ = (local_clock() >> 10) + (US) + 1; \ 86 if (!(ATOMIC)) { \
87 while (!(COND)) { \ 87 preempt_disable(); \
88 if (time_after((unsigned long)(local_clock() >> 10), end__)) { \ 88 cpu = smp_processor_id(); \
89 /* Unlike the regular wait_for(), this atomic variant \ 89 } \
90 * cannot be preempted (and we'll just ignore the issue\ 90 base = local_clock(); \
91 * of irq interruptions) and so we know that no time \ 91 for (;;) { \
92 * has passed since the last check of COND and can \ 92 u64 now = local_clock(); \
93 * immediately report the timeout. \ 93 if (!(ATOMIC)) \
94 */ \ 94 preempt_enable(); \
95 ret__ = -ETIMEDOUT; \ 95 if (COND) { \
96 ret = 0; \
97 break; \
98 } \
99 if (now - base >= timeout) { \
100 ret = -ETIMEDOUT; \
96 break; \ 101 break; \
97 } \ 102 } \
98 cpu_relax(); \ 103 cpu_relax(); \
104 if (!(ATOMIC)) { \
105 preempt_disable(); \
106 if (unlikely(cpu != smp_processor_id())) { \
107 timeout -= now - base; \
108 cpu = smp_processor_id(); \
109 base = local_clock(); \
110 } \
111 } \
99 } \ 112 } \
113 ret; \
114})
115
116#define wait_for_us(COND, US) \
117({ \
118 int ret__; \
119 BUILD_BUG_ON(!__builtin_constant_p(US)); \
120 if ((US) > 10) \
121 ret__ = _wait_for((COND), (US), 10); \
122 else \
123 ret__ = _wait_for_atomic((COND), (US), 0); \
100 ret__; \ 124 ret__; \
101}) 125})
102 126
103#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000) 127#define wait_for_atomic(COND, MS) _wait_for_atomic((COND), (MS) * 1000, 1)
104#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US)) 128#define wait_for_atomic_us(COND, US) _wait_for_atomic((COND), (US), 1)
105 129
106#define KHz(x) (1000 * (x)) 130#define KHz(x) (1000 * (x))
107#define MHz(x) KHz(1000 * (x)) 131#define MHz(x) KHz(1000 * (x))
@@ -135,7 +159,7 @@ enum intel_output_type {
135 INTEL_OUTPUT_LVDS = 4, 159 INTEL_OUTPUT_LVDS = 4,
136 INTEL_OUTPUT_TVOUT = 5, 160 INTEL_OUTPUT_TVOUT = 5,
137 INTEL_OUTPUT_HDMI = 6, 161 INTEL_OUTPUT_HDMI = 6,
138 INTEL_OUTPUT_DISPLAYPORT = 7, 162 INTEL_OUTPUT_DP = 7,
139 INTEL_OUTPUT_EDP = 8, 163 INTEL_OUTPUT_EDP = 8,
140 INTEL_OUTPUT_DSI = 9, 164 INTEL_OUTPUT_DSI = 9,
141 INTEL_OUTPUT_UNKNOWN = 10, 165 INTEL_OUTPUT_UNKNOWN = 10,
@@ -159,6 +183,7 @@ struct intel_framebuffer {
159struct intel_fbdev { 183struct intel_fbdev {
160 struct drm_fb_helper helper; 184 struct drm_fb_helper helper;
161 struct intel_framebuffer *fb; 185 struct intel_framebuffer *fb;
186 async_cookie_t cookie;
162 int preferred_bpp; 187 int preferred_bpp;
163}; 188};
164 189
@@ -242,14 +267,6 @@ struct intel_connector {
242 * and active (i.e. dpms ON state). */ 267 * and active (i.e. dpms ON state). */
243 bool (*get_hw_state)(struct intel_connector *); 268 bool (*get_hw_state)(struct intel_connector *);
244 269
245 /*
246 * Removes all interfaces through which the connector is accessible
247 * - like sysfs, debugfs entries -, so that no new operations can be
248 * started on the connector. Also makes sure all currently pending
249 * operations finish before returing.
250 */
251 void (*unregister)(struct intel_connector *);
252
253 /* Panel info for eDP and LVDS */ 270 /* Panel info for eDP and LVDS */
254 struct intel_panel panel; 271 struct intel_panel panel;
255 272
@@ -266,7 +283,7 @@ struct intel_connector {
266 struct intel_dp *mst_port; 283 struct intel_dp *mst_port;
267}; 284};
268 285
269typedef struct dpll { 286struct dpll {
270 /* given values */ 287 /* given values */
271 int n; 288 int n;
272 int m1, m2; 289 int m1, m2;
@@ -276,7 +293,7 @@ typedef struct dpll {
276 int vco; 293 int vco;
277 int m; 294 int m;
278 int p; 295 int p;
279} intel_clock_t; 296};
280 297
281struct intel_atomic_state { 298struct intel_atomic_state {
282 struct drm_atomic_state base; 299 struct drm_atomic_state base;
@@ -291,17 +308,32 @@ struct intel_atomic_state {
291 308
292 bool dpll_set, modeset; 309 bool dpll_set, modeset;
293 310
311 /*
312 * Does this transaction change the pipes that are active? This mask
313 * tracks which CRTC's have changed their active state at the end of
314 * the transaction (not counting the temporary disable during modesets).
315 * This mask should only be non-zero when intel_state->modeset is true,
316 * but the converse is not necessarily true; simply changing a mode may
317 * not flip the final active status of any CRTC's
318 */
319 unsigned int active_pipe_changes;
320
294 unsigned int active_crtcs; 321 unsigned int active_crtcs;
295 unsigned int min_pixclk[I915_MAX_PIPES]; 322 unsigned int min_pixclk[I915_MAX_PIPES];
296 323
324 /* SKL/KBL Only */
325 unsigned int cdclk_pll_vco;
326
297 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS]; 327 struct intel_shared_dpll_config shared_dpll[I915_NUM_PLLS];
298 struct intel_wm_config wm_config;
299 328
300 /* 329 /*
301 * Current watermarks can't be trusted during hardware readout, so 330 * Current watermarks can't be trusted during hardware readout, so
302 * don't bother calculating intermediate watermarks. 331 * don't bother calculating intermediate watermarks.
303 */ 332 */
304 bool skip_intermediate_wm; 333 bool skip_intermediate_wm;
334
335 /* Gen9+ only */
336 struct skl_wm_values wm_results;
305}; 337};
306 338
307struct intel_plane_state { 339struct intel_plane_state {
@@ -405,6 +437,48 @@ struct skl_pipe_wm {
405 uint32_t linetime; 437 uint32_t linetime;
406}; 438};
407 439
440struct intel_crtc_wm_state {
441 union {
442 struct {
443 /*
444 * Intermediate watermarks; these can be
445 * programmed immediately since they satisfy
446 * both the current configuration we're
447 * switching away from and the new
448 * configuration we're switching to.
449 */
450 struct intel_pipe_wm intermediate;
451
452 /*
453 * Optimal watermarks, programmed post-vblank
454 * when this state is committed.
455 */
456 struct intel_pipe_wm optimal;
457 } ilk;
458
459 struct {
460 /* gen9+ only needs 1-step wm programming */
461 struct skl_pipe_wm optimal;
462
463 /* cached plane data rate */
464 unsigned plane_data_rate[I915_MAX_PLANES];
465 unsigned plane_y_data_rate[I915_MAX_PLANES];
466
467 /* minimum block allocation */
468 uint16_t minimum_blocks[I915_MAX_PLANES];
469 uint16_t minimum_y_blocks[I915_MAX_PLANES];
470 } skl;
471 };
472
473 /*
474 * Platforms with two-step watermark programming will need to
475 * update watermark programming post-vblank to switch from the
476 * safe intermediate watermarks to the optimal final
477 * watermarks.
478 */
479 bool need_postvbl_update;
480};
481
408struct intel_crtc_state { 482struct intel_crtc_state {
409 struct drm_crtc_state base; 483 struct drm_crtc_state base;
410 484
@@ -448,12 +522,10 @@ struct intel_crtc_state {
448 */ 522 */
449 bool limited_color_range; 523 bool limited_color_range;
450 524
451 /* DP has a bunch of special case unfortunately, so mark the pipe 525 /* Bitmask of encoder types (enum intel_output_type)
452 * accordingly. */ 526 * driven by the pipe.
453 bool has_dp_encoder; 527 */
454 528 unsigned int output_types;
455 /* DSI has special cases */
456 bool has_dsi_encoder;
457 529
458 /* Whether we should send NULL infoframes. Required for audio. */ 530 /* Whether we should send NULL infoframes. Required for audio. */
459 bool has_hdmi_sink; 531 bool has_hdmi_sink;
@@ -522,6 +594,12 @@ struct intel_crtc_state {
522 594
523 uint8_t lane_count; 595 uint8_t lane_count;
524 596
597 /*
598 * Used by platforms having DP/HDMI PHY with programmable lane
599 * latency optimization.
600 */
601 uint8_t lane_lat_optim_mask;
602
525 /* Panel fitter controls for gen2-gen4 + VLV */ 603 /* Panel fitter controls for gen2-gen4 + VLV */
526 struct { 604 struct {
527 u32 control; 605 u32 control;
@@ -558,32 +636,7 @@ struct intel_crtc_state {
558 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ 636 /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */
559 bool disable_lp_wm; 637 bool disable_lp_wm;
560 638
561 struct { 639 struct intel_crtc_wm_state wm;
562 /*
563 * Optimal watermarks, programmed post-vblank when this state
564 * is committed.
565 */
566 union {
567 struct intel_pipe_wm ilk;
568 struct skl_pipe_wm skl;
569 } optimal;
570
571 /*
572 * Intermediate watermarks; these can be programmed immediately
573 * since they satisfy both the current configuration we're
574 * switching away from and the new configuration we're switching
575 * to.
576 */
577 struct intel_pipe_wm intermediate;
578
579 /*
580 * Platforms with two-step watermark programming will need to
581 * update watermark programming post-vblank to switch from the
582 * safe intermediate watermarks to the optimal final
583 * watermarks.
584 */
585 bool need_postvbl_update;
586 } wm;
587 640
588 /* Gamma mode programmed on the pipe */ 641 /* Gamma mode programmed on the pipe */
589 uint32_t gamma_mode; 642 uint32_t gamma_mode;
@@ -598,14 +651,6 @@ struct vlv_wm_state {
598 bool cxsr; 651 bool cxsr;
599}; 652};
600 653
601struct intel_mmio_flip {
602 struct work_struct work;
603 struct drm_i915_private *i915;
604 struct drm_i915_gem_request *req;
605 struct intel_crtc *crtc;
606 unsigned int rotation;
607};
608
609struct intel_crtc { 654struct intel_crtc {
610 struct drm_crtc base; 655 struct drm_crtc base;
611 enum pipe pipe; 656 enum pipe pipe;
@@ -620,7 +665,7 @@ struct intel_crtc {
620 unsigned long enabled_power_domains; 665 unsigned long enabled_power_domains;
621 bool lowfreq_avail; 666 bool lowfreq_avail;
622 struct intel_overlay *overlay; 667 struct intel_overlay *overlay;
623 struct intel_unpin_work *unpin_work; 668 struct intel_flip_work *flip_work;
624 669
625 atomic_t unpin_work_count; 670 atomic_t unpin_work_count;
626 671
@@ -815,6 +860,7 @@ struct intel_dp {
815 uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; 860 uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
816 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; 861 uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE];
817 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; 862 uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
863 uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE];
818 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */ 864 /* sink rates as reported by DP_SUPPORTED_LINK_RATES */
819 uint8_t num_sink_rates; 865 uint8_t num_sink_rates;
820 int sink_rates[DP_MAX_SUPPORTED_RATES]; 866 int sink_rates[DP_MAX_SUPPORTED_RATES];
@@ -838,6 +884,11 @@ struct intel_dp {
838 * this port. Only relevant on VLV/CHV. 884 * this port. Only relevant on VLV/CHV.
839 */ 885 */
840 enum pipe pps_pipe; 886 enum pipe pps_pipe;
887 /*
888 * Set if the sequencer may be reset due to a power transition,
889 * requiring a reinitialization. Only relevant on BXT.
890 */
891 bool pps_reset;
841 struct edp_power_seq pps_delays; 892 struct edp_power_seq pps_delays;
842 893
843 bool can_mst; /* this port supports mst */ 894 bool can_mst; /* this port supports mst */
@@ -934,33 +985,32 @@ vlv_pipe_to_channel(enum pipe pipe)
934static inline struct drm_crtc * 985static inline struct drm_crtc *
935intel_get_crtc_for_pipe(struct drm_device *dev, int pipe) 986intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
936{ 987{
937 struct drm_i915_private *dev_priv = dev->dev_private; 988 struct drm_i915_private *dev_priv = to_i915(dev);
938 return dev_priv->pipe_to_crtc_mapping[pipe]; 989 return dev_priv->pipe_to_crtc_mapping[pipe];
939} 990}
940 991
941static inline struct drm_crtc * 992static inline struct drm_crtc *
942intel_get_crtc_for_plane(struct drm_device *dev, int plane) 993intel_get_crtc_for_plane(struct drm_device *dev, int plane)
943{ 994{
944 struct drm_i915_private *dev_priv = dev->dev_private; 995 struct drm_i915_private *dev_priv = to_i915(dev);
945 return dev_priv->plane_to_crtc_mapping[plane]; 996 return dev_priv->plane_to_crtc_mapping[plane];
946} 997}
947 998
948struct intel_unpin_work { 999struct intel_flip_work {
949 struct work_struct work; 1000 struct work_struct unpin_work;
1001 struct work_struct mmio_work;
1002
950 struct drm_crtc *crtc; 1003 struct drm_crtc *crtc;
951 struct drm_framebuffer *old_fb; 1004 struct drm_framebuffer *old_fb;
952 struct drm_i915_gem_object *pending_flip_obj; 1005 struct drm_i915_gem_object *pending_flip_obj;
953 struct drm_pending_vblank_event *event; 1006 struct drm_pending_vblank_event *event;
954 atomic_t pending; 1007 atomic_t pending;
955#define INTEL_FLIP_INACTIVE 0
956#define INTEL_FLIP_PENDING 1
957#define INTEL_FLIP_COMPLETE 2
958 u32 flip_count; 1008 u32 flip_count;
959 u32 gtt_offset; 1009 u32 gtt_offset;
960 struct drm_i915_gem_request *flip_queued_req; 1010 struct drm_i915_gem_request *flip_queued_req;
961 u32 flip_queued_vblank; 1011 u32 flip_queued_vblank;
962 u32 flip_ready_vblank; 1012 u32 flip_ready_vblank;
963 bool enable_stall_check; 1013 unsigned int rotation;
964}; 1014};
965 1015
966struct intel_load_detect_pipe { 1016struct intel_load_detect_pipe {
@@ -1029,9 +1079,9 @@ void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1029void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1079void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1030void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1080void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1031void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 1081void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
1032void gen6_reset_rps_interrupts(struct drm_device *dev); 1082void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv);
1033void gen6_enable_rps_interrupts(struct drm_device *dev); 1083void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv);
1034void gen6_disable_rps_interrupts(struct drm_device *dev); 1084void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv);
1035u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); 1085u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
1036void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); 1086void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
1037void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); 1087void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
@@ -1052,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
1052 1102
1053/* intel_crt.c */ 1103/* intel_crt.c */
1054void intel_crt_init(struct drm_device *dev); 1104void intel_crt_init(struct drm_device *dev);
1055 1105void intel_crt_reset(struct drm_encoder *encoder);
1056 1106
1057/* intel_ddi.c */ 1107/* intel_ddi.c */
1058void intel_ddi_clk_select(struct intel_encoder *encoder, 1108void intel_ddi_clk_select(struct intel_encoder *encoder,
@@ -1110,14 +1160,16 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv);
1110void i915_audio_component_cleanup(struct drm_i915_private *dev_priv); 1160void i915_audio_component_cleanup(struct drm_i915_private *dev_priv);
1111 1161
1112/* intel_display.c */ 1162/* intel_display.c */
1163void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco);
1164void intel_update_rawclk(struct drm_i915_private *dev_priv);
1113int vlv_get_cck_clock(struct drm_i915_private *dev_priv, 1165int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
1114 const char *name, u32 reg, int ref_freq); 1166 const char *name, u32 reg, int ref_freq);
1115extern const struct drm_plane_funcs intel_plane_funcs; 1167extern const struct drm_plane_funcs intel_plane_funcs;
1116void intel_init_display_hooks(struct drm_i915_private *dev_priv); 1168void intel_init_display_hooks(struct drm_i915_private *dev_priv);
1117unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info); 1169unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
1118bool intel_has_pending_fb_unpin(struct drm_device *dev); 1170bool intel_has_pending_fb_unpin(struct drm_device *dev);
1119void intel_mark_busy(struct drm_device *dev); 1171void intel_mark_busy(struct drm_i915_private *dev_priv);
1120void intel_mark_idle(struct drm_device *dev); 1172void intel_mark_idle(struct drm_i915_private *dev_priv);
1121void intel_crtc_restore_mode(struct drm_crtc *crtc); 1173void intel_crtc_restore_mode(struct drm_crtc *crtc);
1122int intel_display_suspend(struct drm_device *dev); 1174int intel_display_suspend(struct drm_device *dev);
1123void intel_encoder_destroy(struct drm_encoder *encoder); 1175void intel_encoder_destroy(struct drm_encoder *encoder);
@@ -1126,7 +1178,6 @@ struct intel_connector *intel_connector_alloc(void);
1126bool intel_connector_get_hw_state(struct intel_connector *connector); 1178bool intel_connector_get_hw_state(struct intel_connector *connector);
1127void intel_connector_attach_encoder(struct intel_connector *connector, 1179void intel_connector_attach_encoder(struct intel_connector *connector,
1128 struct intel_encoder *encoder); 1180 struct intel_encoder *encoder);
1129struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
1130struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, 1181struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
1131 struct drm_crtc *crtc); 1182 struct drm_crtc *crtc);
1132enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); 1183enum pipe intel_get_pipe_from_connector(struct intel_connector *connector);
@@ -1134,7 +1185,20 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
1134 struct drm_file *file_priv); 1185 struct drm_file *file_priv);
1135enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1186enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1136 enum pipe pipe); 1187 enum pipe pipe);
1137bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type); 1188static inline bool
1189intel_crtc_has_type(const struct intel_crtc_state *crtc_state,
1190 enum intel_output_type type)
1191{
1192 return crtc_state->output_types & (1 << type);
1193}
1194static inline bool
1195intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
1196{
1197 return crtc_state->output_types &
1198 ((1 << INTEL_OUTPUT_DP) |
1199 (1 << INTEL_OUTPUT_DP_MST) |
1200 (1 << INTEL_OUTPUT_EDP));
1201}
1138static inline void 1202static inline void
1139intel_wait_for_vblank(struct drm_device *dev, int pipe) 1203intel_wait_for_vblank(struct drm_device *dev, int pipe)
1140{ 1204{
@@ -1149,6 +1213,9 @@ intel_wait_for_vblank_if_active(struct drm_device *dev, int pipe)
1149 if (crtc->active) 1213 if (crtc->active)
1150 intel_wait_for_vblank(dev, pipe); 1214 intel_wait_for_vblank(dev, pipe);
1151} 1215}
1216
1217u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
1218
1152int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 1219int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
1153void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 1220void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1154 struct intel_digital_port *dport, 1221 struct intel_digital_port *dport,
@@ -1162,14 +1229,14 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
1162 struct drm_modeset_acquire_ctx *ctx); 1229 struct drm_modeset_acquire_ctx *ctx);
1163int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1230int intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
1164 unsigned int rotation); 1231 unsigned int rotation);
1232void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation);
1165struct drm_framebuffer * 1233struct drm_framebuffer *
1166__intel_framebuffer_create(struct drm_device *dev, 1234__intel_framebuffer_create(struct drm_device *dev,
1167 struct drm_mode_fb_cmd2 *mode_cmd, 1235 struct drm_mode_fb_cmd2 *mode_cmd,
1168 struct drm_i915_gem_object *obj); 1236 struct drm_i915_gem_object *obj);
1169void intel_prepare_page_flip(struct drm_device *dev, int plane); 1237void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe);
1170void intel_finish_page_flip(struct drm_device *dev, int pipe); 1238void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe);
1171void intel_finish_page_flip_plane(struct drm_device *dev, int plane); 1239void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe);
1172void intel_check_page_flip(struct drm_device *dev, int pipe);
1173int intel_prepare_plane_fb(struct drm_plane *plane, 1240int intel_prepare_plane_fb(struct drm_plane *plane,
1174 const struct drm_plane_state *new_state); 1241 const struct drm_plane_state *new_state);
1175void intel_cleanup_plane_fb(struct drm_plane *plane, 1242void intel_cleanup_plane_fb(struct drm_plane *plane,
@@ -1226,23 +1293,25 @@ u32 intel_compute_tile_offset(int *x, int *y,
1226 const struct drm_framebuffer *fb, int plane, 1293 const struct drm_framebuffer *fb, int plane,
1227 unsigned int pitch, 1294 unsigned int pitch,
1228 unsigned int rotation); 1295 unsigned int rotation);
1229void intel_prepare_reset(struct drm_device *dev); 1296void intel_prepare_reset(struct drm_i915_private *dev_priv);
1230void intel_finish_reset(struct drm_device *dev); 1297void intel_finish_reset(struct drm_i915_private *dev_priv);
1231void hsw_enable_pc8(struct drm_i915_private *dev_priv); 1298void hsw_enable_pc8(struct drm_i915_private *dev_priv);
1232void hsw_disable_pc8(struct drm_i915_private *dev_priv); 1299void hsw_disable_pc8(struct drm_i915_private *dev_priv);
1233void broxton_init_cdclk(struct drm_i915_private *dev_priv); 1300void bxt_init_cdclk(struct drm_i915_private *dev_priv);
1234void broxton_uninit_cdclk(struct drm_i915_private *dev_priv); 1301void bxt_uninit_cdclk(struct drm_i915_private *dev_priv);
1235bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv); 1302void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
1236void broxton_ddi_phy_init(struct drm_i915_private *dev_priv); 1303void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
1237void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv); 1304bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
1238void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv); 1305 enum dpio_phy phy);
1306bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
1307 enum dpio_phy phy);
1239void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv); 1308void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv);
1240void bxt_enable_dc9(struct drm_i915_private *dev_priv); 1309void bxt_enable_dc9(struct drm_i915_private *dev_priv);
1241void bxt_disable_dc9(struct drm_i915_private *dev_priv); 1310void bxt_disable_dc9(struct drm_i915_private *dev_priv);
1242void gen9_enable_dc5(struct drm_i915_private *dev_priv); 1311void gen9_enable_dc5(struct drm_i915_private *dev_priv);
1243void skl_init_cdclk(struct drm_i915_private *dev_priv); 1312void skl_init_cdclk(struct drm_i915_private *dev_priv);
1244int skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
1245void skl_uninit_cdclk(struct drm_i915_private *dev_priv); 1313void skl_uninit_cdclk(struct drm_i915_private *dev_priv);
1314unsigned int skl_cdclk_get_vco(unsigned int freq);
1246void skl_enable_dc6(struct drm_i915_private *dev_priv); 1315void skl_enable_dc6(struct drm_i915_private *dev_priv);
1247void skl_disable_dc6(struct drm_i915_private *dev_priv); 1316void skl_disable_dc6(struct drm_i915_private *dev_priv);
1248void intel_dp_get_m_n(struct intel_crtc *crtc, 1317void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -1250,8 +1319,8 @@ void intel_dp_get_m_n(struct intel_crtc *crtc,
1250void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n); 1319void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n);
1251int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); 1320int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
1252bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock, 1321bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1253 intel_clock_t *best_clock); 1322 struct dpll *best_clock);
1254int chv_calc_dpll_params(int refclk, intel_clock_t *pll_clock); 1323int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
1255 1324
1256bool intel_crtc_active(struct drm_crtc *crtc); 1325bool intel_crtc_active(struct drm_crtc *crtc);
1257void hsw_enable_ips(struct intel_crtc *crtc); 1326void hsw_enable_ips(struct intel_crtc *crtc);
@@ -1310,7 +1379,7 @@ void intel_dp_mst_resume(struct drm_device *dev);
1310int intel_dp_max_link_rate(struct intel_dp *intel_dp); 1379int intel_dp_max_link_rate(struct intel_dp *intel_dp);
1311int intel_dp_rate_select(struct intel_dp *intel_dp, int rate); 1380int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
1312void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1381void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
1313void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1382void intel_power_sequencer_reset(struct drm_i915_private *dev_priv);
1314uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes); 1383uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
1315void intel_plane_destroy(struct drm_plane *plane); 1384void intel_plane_destroy(struct drm_plane *plane);
1316void intel_edp_drrs_enable(struct intel_dp *intel_dp); 1385void intel_edp_drrs_enable(struct intel_dp *intel_dp);
@@ -1337,15 +1406,27 @@ bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
1337bool 1406bool
1338intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]); 1407intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]);
1339 1408
1409static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
1410{
1411 return ~((1 << lane_count) - 1) & 0xf;
1412}
1413
1414/* intel_dp_aux_backlight.c */
1415int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector);
1416
1340/* intel_dp_mst.c */ 1417/* intel_dp_mst.c */
1341int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1418int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
1342void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1419void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
1343/* intel_dsi.c */ 1420/* intel_dsi.c */
1344void intel_dsi_init(struct drm_device *dev); 1421void intel_dsi_init(struct drm_device *dev);
1345 1422
1423/* intel_dsi_dcs_backlight.c */
1424int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector);
1346 1425
1347/* intel_dvo.c */ 1426/* intel_dvo.c */
1348void intel_dvo_init(struct drm_device *dev); 1427void intel_dvo_init(struct drm_device *dev);
1428/* intel_hotplug.c */
1429void intel_hpd_poll_init(struct drm_i915_private *dev_priv);
1349 1430
1350 1431
1351/* legacy fbdev emulation in intel_fbdev.c */ 1432/* legacy fbdev emulation in intel_fbdev.c */
@@ -1383,11 +1464,15 @@ static inline void intel_fbdev_restore_mode(struct drm_device *dev)
1383void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv, 1464void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
1384 struct drm_atomic_state *state); 1465 struct drm_atomic_state *state);
1385bool intel_fbc_is_active(struct drm_i915_private *dev_priv); 1466bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
1386void intel_fbc_pre_update(struct intel_crtc *crtc); 1467void intel_fbc_pre_update(struct intel_crtc *crtc,
1468 struct intel_crtc_state *crtc_state,
1469 struct intel_plane_state *plane_state);
1387void intel_fbc_post_update(struct intel_crtc *crtc); 1470void intel_fbc_post_update(struct intel_crtc *crtc);
1388void intel_fbc_init(struct drm_i915_private *dev_priv); 1471void intel_fbc_init(struct drm_i915_private *dev_priv);
1389void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv); 1472void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv);
1390void intel_fbc_enable(struct intel_crtc *crtc); 1473void intel_fbc_enable(struct intel_crtc *crtc,
1474 struct intel_crtc_state *crtc_state,
1475 struct intel_plane_state *plane_state);
1391void intel_fbc_disable(struct intel_crtc *crtc); 1476void intel_fbc_disable(struct intel_crtc *crtc);
1392void intel_fbc_global_disable(struct drm_i915_private *dev_priv); 1477void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
1393void intel_fbc_invalidate(struct drm_i915_private *dev_priv, 1478void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
@@ -1409,6 +1494,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable);
1409 1494
1410/* intel_lvds.c */ 1495/* intel_lvds.c */
1411void intel_lvds_init(struct drm_device *dev); 1496void intel_lvds_init(struct drm_device *dev);
1497struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev);
1412bool intel_is_dual_link_lvds(struct drm_device *dev); 1498bool intel_is_dual_link_lvds(struct drm_device *dev);
1413 1499
1414 1500
@@ -1422,13 +1508,13 @@ void intel_attach_aspect_ratio_property(struct drm_connector *connector);
1422 1508
1423 1509
1424/* intel_overlay.c */ 1510/* intel_overlay.c */
1425void intel_setup_overlay(struct drm_device *dev); 1511void intel_setup_overlay(struct drm_i915_private *dev_priv);
1426void intel_cleanup_overlay(struct drm_device *dev); 1512void intel_cleanup_overlay(struct drm_i915_private *dev_priv);
1427int intel_overlay_switch_off(struct intel_overlay *overlay); 1513int intel_overlay_switch_off(struct intel_overlay *overlay);
1428int intel_overlay_put_image(struct drm_device *dev, void *data, 1514int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1429 struct drm_file *file_priv); 1515 struct drm_file *file_priv);
1430int intel_overlay_attrs(struct drm_device *dev, void *data, 1516int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1431 struct drm_file *file_priv); 1517 struct drm_file *file_priv);
1432void intel_overlay_reset(struct drm_i915_private *dev_priv); 1518void intel_overlay_reset(struct drm_i915_private *dev_priv);
1433 1519
1434 1520
@@ -1447,7 +1533,8 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
1447 int fitting_mode); 1533 int fitting_mode);
1448void intel_panel_set_backlight_acpi(struct intel_connector *connector, 1534void intel_panel_set_backlight_acpi(struct intel_connector *connector,
1449 u32 level, u32 max); 1535 u32 level, u32 max);
1450int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe); 1536int intel_panel_setup_backlight(struct drm_connector *connector,
1537 enum pipe pipe);
1451void intel_panel_enable_backlight(struct intel_connector *connector); 1538void intel_panel_enable_backlight(struct intel_connector *connector);
1452void intel_panel_disable_backlight(struct intel_connector *connector); 1539void intel_panel_disable_backlight(struct intel_connector *connector);
1453void intel_panel_destroy_backlight(struct drm_connector *connector); 1540void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1456,8 +1543,19 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1456 struct drm_device *dev, 1543 struct drm_device *dev,
1457 struct drm_display_mode *fixed_mode, 1544 struct drm_display_mode *fixed_mode,
1458 struct drm_connector *connector); 1545 struct drm_connector *connector);
1459void intel_backlight_register(struct drm_device *dev); 1546
1460void intel_backlight_unregister(struct drm_device *dev); 1547#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
1548int intel_backlight_device_register(struct intel_connector *connector);
1549void intel_backlight_device_unregister(struct intel_connector *connector);
1550#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1551static int intel_backlight_device_register(struct intel_connector *connector)
1552{
1553 return 0;
1554}
1555static inline void intel_backlight_device_unregister(struct intel_connector *connector)
1556{
1557}
1558#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1461 1559
1462 1560
1463/* intel_psr.c */ 1561/* intel_psr.c */
@@ -1599,21 +1697,20 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
1599void intel_pm_setup(struct drm_device *dev); 1697void intel_pm_setup(struct drm_device *dev);
1600void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1698void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1601void intel_gpu_ips_teardown(void); 1699void intel_gpu_ips_teardown(void);
1602void intel_init_gt_powersave(struct drm_device *dev); 1700void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
1603void intel_cleanup_gt_powersave(struct drm_device *dev); 1701void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
1604void intel_enable_gt_powersave(struct drm_device *dev); 1702void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
1605void intel_disable_gt_powersave(struct drm_device *dev); 1703void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
1606void intel_suspend_gt_powersave(struct drm_device *dev); 1704void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
1607void intel_reset_gt_powersave(struct drm_device *dev); 1705void intel_reset_gt_powersave(struct drm_i915_private *dev_priv);
1608void gen6_update_ring_freq(struct drm_device *dev); 1706void gen6_update_ring_freq(struct drm_i915_private *dev_priv);
1609void gen6_rps_busy(struct drm_i915_private *dev_priv); 1707void gen6_rps_busy(struct drm_i915_private *dev_priv);
1610void gen6_rps_reset_ei(struct drm_i915_private *dev_priv); 1708void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
1611void gen6_rps_idle(struct drm_i915_private *dev_priv); 1709void gen6_rps_idle(struct drm_i915_private *dev_priv);
1612void gen6_rps_boost(struct drm_i915_private *dev_priv, 1710void gen6_rps_boost(struct drm_i915_private *dev_priv,
1613 struct intel_rps_client *rps, 1711 struct intel_rps_client *rps,
1614 unsigned long submitted); 1712 unsigned long submitted);
1615void intel_queue_rps_boost_for_request(struct drm_device *dev, 1713void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req);
1616 struct drm_i915_gem_request *req);
1617void vlv_wm_get_hw_state(struct drm_device *dev); 1714void vlv_wm_get_hw_state(struct drm_device *dev);
1618void ilk_wm_get_hw_state(struct drm_device *dev); 1715void ilk_wm_get_hw_state(struct drm_device *dev);
1619void skl_wm_get_hw_state(struct drm_device *dev); 1716void skl_wm_get_hw_state(struct drm_device *dev);
@@ -1621,7 +1718,11 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1621 struct skl_ddb_allocation *ddb /* out */); 1718 struct skl_ddb_allocation *ddb /* out */);
1622uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config); 1719uint32_t ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config);
1623bool ilk_disable_lp_wm(struct drm_device *dev); 1720bool ilk_disable_lp_wm(struct drm_device *dev);
1624int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6); 1721int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6);
1722static inline int intel_enable_rc6(void)
1723{
1724 return i915.enable_rc6;
1725}
1625 1726
1626/* intel_sdvo.c */ 1727/* intel_sdvo.c */
1627bool intel_sdvo_init(struct drm_device *dev, 1728bool intel_sdvo_init(struct drm_device *dev,
@@ -1633,7 +1734,7 @@ int intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane);
1633int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1734int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1634 struct drm_file *file_priv); 1735 struct drm_file *file_priv);
1635void intel_pipe_update_start(struct intel_crtc *crtc); 1736void intel_pipe_update_start(struct intel_crtc *crtc);
1636void intel_pipe_update_end(struct intel_crtc *crtc); 1737void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work);
1637 1738
1638/* intel_tv.c */ 1739/* intel_tv.c */
1639void intel_tv_init(struct drm_device *dev); 1740void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 4756ef639648..de8e9fb51595 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -84,13 +84,15 @@ static void wait_for_dsi_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
84{ 84{
85 struct drm_encoder *encoder = &intel_dsi->base.base; 85 struct drm_encoder *encoder = &intel_dsi->base.base;
86 struct drm_device *dev = encoder->dev; 86 struct drm_device *dev = encoder->dev;
87 struct drm_i915_private *dev_priv = dev->dev_private; 87 struct drm_i915_private *dev_priv = to_i915(dev);
88 u32 mask; 88 u32 mask;
89 89
90 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY | 90 mask = LP_CTRL_FIFO_EMPTY | HS_CTRL_FIFO_EMPTY |
91 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY; 91 LP_DATA_FIFO_EMPTY | HS_DATA_FIFO_EMPTY;
92 92
93 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & mask) == mask, 100)) 93 if (intel_wait_for_register(dev_priv,
94 MIPI_GEN_FIFO_STAT(port), mask, mask,
95 100))
94 DRM_ERROR("DPI FIFOs are not empty\n"); 96 DRM_ERROR("DPI FIFOs are not empty\n");
95} 97}
96 98
@@ -129,7 +131,7 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
129{ 131{
130 struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host); 132 struct intel_dsi_host *intel_dsi_host = to_intel_dsi_host(host);
131 struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev; 133 struct drm_device *dev = intel_dsi_host->intel_dsi->base.base.dev;
132 struct drm_i915_private *dev_priv = dev->dev_private; 134 struct drm_i915_private *dev_priv = to_i915(dev);
133 enum port port = intel_dsi_host->port; 135 enum port port = intel_dsi_host->port;
134 struct mipi_dsi_packet packet; 136 struct mipi_dsi_packet packet;
135 ssize_t ret; 137 ssize_t ret;
@@ -158,8 +160,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
158 160
159 /* note: this is never true for reads */ 161 /* note: this is never true for reads */
160 if (packet.payload_length) { 162 if (packet.payload_length) {
161 163 if (intel_wait_for_register(dev_priv,
162 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & data_mask) == 0, 50)) 164 MIPI_GEN_FIFO_STAT(port),
165 data_mask, 0,
166 50))
163 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n"); 167 DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
164 168
165 write_data(dev_priv, data_reg, packet.payload, 169 write_data(dev_priv, data_reg, packet.payload,
@@ -170,7 +174,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
170 I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL); 174 I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
171 } 175 }
172 176
173 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(port)) & ctrl_mask) == 0, 50)) { 177 if (intel_wait_for_register(dev_priv,
178 MIPI_GEN_FIFO_STAT(port),
179 ctrl_mask, 0,
180 50)) {
174 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n"); 181 DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
175 } 182 }
176 183
@@ -179,7 +186,10 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
179 /* ->rx_len is set only for reads */ 186 /* ->rx_len is set only for reads */
180 if (msg->rx_len) { 187 if (msg->rx_len) {
181 data_mask = GEN_READ_DATA_AVAIL; 188 data_mask = GEN_READ_DATA_AVAIL;
182 if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & data_mask) == data_mask, 50)) 189 if (intel_wait_for_register(dev_priv,
190 MIPI_INTR_STAT(port),
191 data_mask, data_mask,
192 50))
183 DRM_ERROR("Timeout waiting for read data.\n"); 193 DRM_ERROR("Timeout waiting for read data.\n");
184 194
185 read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len); 195 read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
@@ -250,7 +260,7 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
250{ 260{
251 struct drm_encoder *encoder = &intel_dsi->base.base; 261 struct drm_encoder *encoder = &intel_dsi->base.base;
252 struct drm_device *dev = encoder->dev; 262 struct drm_device *dev = encoder->dev;
253 struct drm_i915_private *dev_priv = dev->dev_private; 263 struct drm_i915_private *dev_priv = to_i915(dev);
254 u32 mask; 264 u32 mask;
255 265
256 /* XXX: pipe, hs */ 266 /* XXX: pipe, hs */
@@ -269,7 +279,9 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
269 I915_WRITE(MIPI_DPI_CONTROL(port), cmd); 279 I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
270 280
271 mask = SPL_PKT_SENT_INTERRUPT; 281 mask = SPL_PKT_SENT_INTERRUPT;
272 if (wait_for((I915_READ(MIPI_INTR_STAT(port)) & mask) == mask, 100)) 282 if (intel_wait_for_register(dev_priv,
283 MIPI_INTR_STAT(port), mask, mask,
284 100))
273 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd); 285 DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
274 286
275 return 0; 287 return 0;
@@ -302,7 +314,7 @@ static inline bool is_cmd_mode(struct intel_dsi *intel_dsi)
302static bool intel_dsi_compute_config(struct intel_encoder *encoder, 314static bool intel_dsi_compute_config(struct intel_encoder *encoder,
303 struct intel_crtc_state *pipe_config) 315 struct intel_crtc_state *pipe_config)
304{ 316{
305 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 317 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
306 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi, 318 struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
307 base); 319 base);
308 struct intel_connector *intel_connector = intel_dsi->attached_connector; 320 struct intel_connector *intel_connector = intel_dsi->attached_connector;
@@ -313,8 +325,6 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
313 325
314 DRM_DEBUG_KMS("\n"); 326 DRM_DEBUG_KMS("\n");
315 327
316 pipe_config->has_dsi_encoder = true;
317
318 if (fixed_mode) { 328 if (fixed_mode) {
319 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 329 intel_fixed_panel_mode(fixed_mode, adjusted_mode);
320 330
@@ -348,7 +358,7 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
348 358
349static void bxt_dsi_device_ready(struct intel_encoder *encoder) 359static void bxt_dsi_device_ready(struct intel_encoder *encoder)
350{ 360{
351 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 361 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
352 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 362 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
353 enum port port; 363 enum port port;
354 u32 val; 364 u32 val;
@@ -387,7 +397,7 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
387 397
388static void vlv_dsi_device_ready(struct intel_encoder *encoder) 398static void vlv_dsi_device_ready(struct intel_encoder *encoder)
389{ 399{
390 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 400 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
391 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 401 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
392 enum port port; 402 enum port port;
393 u32 val; 403 u32 val;
@@ -437,7 +447,7 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
437static void intel_dsi_port_enable(struct intel_encoder *encoder) 447static void intel_dsi_port_enable(struct intel_encoder *encoder)
438{ 448{
439 struct drm_device *dev = encoder->base.dev; 449 struct drm_device *dev = encoder->base.dev;
440 struct drm_i915_private *dev_priv = dev->dev_private; 450 struct drm_i915_private *dev_priv = to_i915(dev);
441 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 451 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
442 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 452 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
443 enum port port; 453 enum port port;
@@ -478,7 +488,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder)
478static void intel_dsi_port_disable(struct intel_encoder *encoder) 488static void intel_dsi_port_disable(struct intel_encoder *encoder)
479{ 489{
480 struct drm_device *dev = encoder->base.dev; 490 struct drm_device *dev = encoder->base.dev;
481 struct drm_i915_private *dev_priv = dev->dev_private; 491 struct drm_i915_private *dev_priv = to_i915(dev);
482 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 492 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
483 enum port port; 493 enum port port;
484 494
@@ -497,7 +507,7 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
497static void intel_dsi_enable(struct intel_encoder *encoder) 507static void intel_dsi_enable(struct intel_encoder *encoder)
498{ 508{
499 struct drm_device *dev = encoder->base.dev; 509 struct drm_device *dev = encoder->base.dev;
500 struct drm_i915_private *dev_priv = dev->dev_private; 510 struct drm_i915_private *dev_priv = to_i915(dev);
501 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 511 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
502 enum port port; 512 enum port port;
503 513
@@ -528,11 +538,10 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder);
528static void intel_dsi_pre_enable(struct intel_encoder *encoder) 538static void intel_dsi_pre_enable(struct intel_encoder *encoder)
529{ 539{
530 struct drm_device *dev = encoder->base.dev; 540 struct drm_device *dev = encoder->base.dev;
531 struct drm_i915_private *dev_priv = dev->dev_private; 541 struct drm_i915_private *dev_priv = to_i915(dev);
532 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 542 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
533 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 543 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
534 enum port port; 544 enum port port;
535 u32 tmp;
536 545
537 DRM_DEBUG_KMS("\n"); 546 DRM_DEBUG_KMS("\n");
538 547
@@ -551,11 +560,13 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder)
551 560
552 msleep(intel_dsi->panel_on_delay); 561 msleep(intel_dsi->panel_on_delay);
553 562
554 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 563 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
564 u32 val;
565
555 /* Disable DPOunit clock gating, can stall pipe */ 566 /* Disable DPOunit clock gating, can stall pipe */
556 tmp = I915_READ(DSPCLK_GATE_D); 567 val = I915_READ(DSPCLK_GATE_D);
557 tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 568 val |= DPOUNIT_CLOCK_GATE_DISABLE;
558 I915_WRITE(DSPCLK_GATE_D, tmp); 569 I915_WRITE(DSPCLK_GATE_D, val);
559 } 570 }
560 571
561 /* put device in ready state */ 572 /* put device in ready state */
@@ -601,7 +612,7 @@ static void intel_dsi_pre_disable(struct intel_encoder *encoder)
601static void intel_dsi_disable(struct intel_encoder *encoder) 612static void intel_dsi_disable(struct intel_encoder *encoder)
602{ 613{
603 struct drm_device *dev = encoder->base.dev; 614 struct drm_device *dev = encoder->base.dev;
604 struct drm_i915_private *dev_priv = dev->dev_private; 615 struct drm_i915_private *dev_priv = to_i915(dev);
605 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 616 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
606 enum port port; 617 enum port port;
607 u32 temp; 618 u32 temp;
@@ -640,7 +651,7 @@ static void intel_dsi_disable(struct intel_encoder *encoder)
640static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) 651static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
641{ 652{
642 struct drm_device *dev = encoder->base.dev; 653 struct drm_device *dev = encoder->base.dev;
643 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 654 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
644 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 655 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
645 enum port port; 656 enum port port;
646 657
@@ -666,8 +677,9 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
666 /* Wait till Clock lanes are in LP-00 state for MIPI Port A 677 /* Wait till Clock lanes are in LP-00 state for MIPI Port A
667 * only. MIPI Port C has no similar bit for checking 678 * only. MIPI Port C has no similar bit for checking
668 */ 679 */
669 if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT) 680 if (intel_wait_for_register(dev_priv,
670 == 0x00000), 30)) 681 port_ctrl, AFE_LATCHOUT, 0,
682 30))
671 DRM_ERROR("DSI LP not going Low\n"); 683 DRM_ERROR("DSI LP not going Low\n");
672 684
673 /* Disable MIPI PHY transparent latch */ 685 /* Disable MIPI PHY transparent latch */
@@ -684,7 +696,7 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
684 696
685static void intel_dsi_post_disable(struct intel_encoder *encoder) 697static void intel_dsi_post_disable(struct intel_encoder *encoder)
686{ 698{
687 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 699 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
688 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 700 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
689 701
690 DRM_DEBUG_KMS("\n"); 702 DRM_DEBUG_KMS("\n");
@@ -693,7 +705,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
693 705
694 intel_dsi_clear_device_ready(encoder); 706 intel_dsi_clear_device_ready(encoder);
695 707
696 if (!IS_BROXTON(dev_priv)) { 708 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
697 u32 val; 709 u32 val;
698 710
699 val = I915_READ(DSPCLK_GATE_D); 711 val = I915_READ(DSPCLK_GATE_D);
@@ -719,7 +731,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder)
719static bool intel_dsi_get_hw_state(struct intel_encoder *encoder, 731static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
720 enum pipe *pipe) 732 enum pipe *pipe)
721{ 733{
722 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 734 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
723 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 735 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
724 struct drm_device *dev = encoder->base.dev; 736 struct drm_device *dev = encoder->base.dev;
725 enum intel_display_power_domain power_domain; 737 enum intel_display_power_domain power_domain;
@@ -793,7 +805,7 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
793 struct intel_crtc_state *pipe_config) 805 struct intel_crtc_state *pipe_config)
794{ 806{
795 struct drm_device *dev = encoder->base.dev; 807 struct drm_device *dev = encoder->base.dev;
796 struct drm_i915_private *dev_priv = dev->dev_private; 808 struct drm_i915_private *dev_priv = to_i915(dev);
797 struct drm_display_mode *adjusted_mode = 809 struct drm_display_mode *adjusted_mode =
798 &pipe_config->base.adjusted_mode; 810 &pipe_config->base.adjusted_mode;
799 struct drm_display_mode *adjusted_mode_sw; 811 struct drm_display_mode *adjusted_mode_sw;
@@ -953,8 +965,6 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
953 u32 pclk; 965 u32 pclk;
954 DRM_DEBUG_KMS("\n"); 966 DRM_DEBUG_KMS("\n");
955 967
956 pipe_config->has_dsi_encoder = true;
957
958 if (IS_BROXTON(dev)) 968 if (IS_BROXTON(dev))
959 bxt_dsi_get_pipe_config(encoder, pipe_config); 969 bxt_dsi_get_pipe_config(encoder, pipe_config);
960 970
@@ -1012,7 +1022,7 @@ static void set_dsi_timings(struct drm_encoder *encoder,
1012 const struct drm_display_mode *adjusted_mode) 1022 const struct drm_display_mode *adjusted_mode)
1013{ 1023{
1014 struct drm_device *dev = encoder->dev; 1024 struct drm_device *dev = encoder->dev;
1015 struct drm_i915_private *dev_priv = dev->dev_private; 1025 struct drm_i915_private *dev_priv = to_i915(dev);
1016 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1026 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
1017 enum port port; 1027 enum port port;
1018 unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format); 1028 unsigned int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
@@ -1098,7 +1108,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
1098{ 1108{
1099 struct drm_encoder *encoder = &intel_encoder->base; 1109 struct drm_encoder *encoder = &intel_encoder->base;
1100 struct drm_device *dev = encoder->dev; 1110 struct drm_device *dev = encoder->dev;
1101 struct drm_i915_private *dev_priv = dev->dev_private; 1111 struct drm_i915_private *dev_priv = to_i915(dev);
1102 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 1112 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
1103 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 1113 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
1104 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1114 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
@@ -1171,6 +1181,12 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder)
1171 if (intel_dsi->clock_stop) 1181 if (intel_dsi->clock_stop)
1172 tmp |= CLOCKSTOP; 1182 tmp |= CLOCKSTOP;
1173 1183
1184 if (IS_BROXTON(dev_priv)) {
1185 tmp |= BXT_DPHY_DEFEATURE_EN;
1186 if (!is_cmd_mode(intel_dsi))
1187 tmp |= BXT_DEFEATURE_DPI_FIFO_CTR;
1188 }
1189
1174 for_each_dsi_port(port, intel_dsi->ports) { 1190 for_each_dsi_port(port, intel_dsi->ports) {
1175 I915_WRITE(MIPI_DSI_FUNC_PRG(port), val); 1191 I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
1176 1192
@@ -1378,12 +1394,13 @@ static const struct drm_encoder_funcs intel_dsi_funcs = {
1378static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = { 1394static const struct drm_connector_helper_funcs intel_dsi_connector_helper_funcs = {
1379 .get_modes = intel_dsi_get_modes, 1395 .get_modes = intel_dsi_get_modes,
1380 .mode_valid = intel_dsi_mode_valid, 1396 .mode_valid = intel_dsi_mode_valid,
1381 .best_encoder = intel_best_encoder,
1382}; 1397};
1383 1398
1384static const struct drm_connector_funcs intel_dsi_connector_funcs = { 1399static const struct drm_connector_funcs intel_dsi_connector_funcs = {
1385 .dpms = drm_atomic_helper_connector_dpms, 1400 .dpms = drm_atomic_helper_connector_dpms,
1386 .detect = intel_dsi_detect, 1401 .detect = intel_dsi_detect,
1402 .late_register = intel_connector_register,
1403 .early_unregister = intel_connector_unregister,
1387 .destroy = intel_dsi_connector_destroy, 1404 .destroy = intel_dsi_connector_destroy,
1388 .fill_modes = drm_helper_probe_single_connector_modes, 1405 .fill_modes = drm_helper_probe_single_connector_modes,
1389 .set_property = intel_dsi_set_property, 1406 .set_property = intel_dsi_set_property,
@@ -1413,7 +1430,7 @@ void intel_dsi_init(struct drm_device *dev)
1413 struct intel_connector *intel_connector; 1430 struct intel_connector *intel_connector;
1414 struct drm_connector *connector; 1431 struct drm_connector *connector;
1415 struct drm_display_mode *scan, *fixed_mode = NULL; 1432 struct drm_display_mode *scan, *fixed_mode = NULL;
1416 struct drm_i915_private *dev_priv = dev->dev_private; 1433 struct drm_i915_private *dev_priv = to_i915(dev);
1417 enum port port; 1434 enum port port;
1418 unsigned int i; 1435 unsigned int i;
1419 1436
@@ -1449,7 +1466,7 @@ void intel_dsi_init(struct drm_device *dev)
1449 connector = &intel_connector->base; 1466 connector = &intel_connector->base;
1450 1467
1451 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI, 1468 drm_encoder_init(dev, encoder, &intel_dsi_funcs, DRM_MODE_ENCODER_DSI,
1452 NULL); 1469 "DSI %c", port_name(port));
1453 1470
1454 intel_encoder->compute_config = intel_dsi_compute_config; 1471 intel_encoder->compute_config = intel_dsi_compute_config;
1455 intel_encoder->pre_enable = intel_dsi_pre_enable; 1472 intel_encoder->pre_enable = intel_dsi_pre_enable;
@@ -1460,7 +1477,6 @@ void intel_dsi_init(struct drm_device *dev)
1460 intel_encoder->get_config = intel_dsi_get_config; 1477 intel_encoder->get_config = intel_dsi_get_config;
1461 1478
1462 intel_connector->get_hw_state = intel_connector_get_hw_state; 1479 intel_connector->get_hw_state = intel_connector_get_hw_state;
1463 intel_connector->unregister = intel_connector_unregister;
1464 1480
1465 /* 1481 /*
1466 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI 1482 * On BYT/CHV, pipe A maps to MIPI DSI port A, pipe B maps to MIPI DSI
@@ -1473,10 +1489,42 @@ void intel_dsi_init(struct drm_device *dev)
1473 else 1489 else
1474 intel_encoder->crtc_mask = BIT(PIPE_B); 1490 intel_encoder->crtc_mask = BIT(PIPE_B);
1475 1491
1476 if (dev_priv->vbt.dsi.config->dual_link) 1492 if (dev_priv->vbt.dsi.config->dual_link) {
1477 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); 1493 intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C);
1478 else 1494
1495 switch (dev_priv->vbt.dsi.config->dl_dcs_backlight_ports) {
1496 case DL_DCS_PORT_A:
1497 intel_dsi->dcs_backlight_ports = BIT(PORT_A);
1498 break;
1499 case DL_DCS_PORT_C:
1500 intel_dsi->dcs_backlight_ports = BIT(PORT_C);
1501 break;
1502 default:
1503 case DL_DCS_PORT_A_AND_C:
1504 intel_dsi->dcs_backlight_ports = BIT(PORT_A) | BIT(PORT_C);
1505 break;
1506 }
1507
1508 switch (dev_priv->vbt.dsi.config->dl_dcs_cabc_ports) {
1509 case DL_DCS_PORT_A:
1510 intel_dsi->dcs_cabc_ports = BIT(PORT_A);
1511 break;
1512 case DL_DCS_PORT_C:
1513 intel_dsi->dcs_cabc_ports = BIT(PORT_C);
1514 break;
1515 default:
1516 case DL_DCS_PORT_A_AND_C:
1517 intel_dsi->dcs_cabc_ports = BIT(PORT_A) | BIT(PORT_C);
1518 break;
1519 }
1520 } else {
1479 intel_dsi->ports = BIT(port); 1521 intel_dsi->ports = BIT(port);
1522 intel_dsi->dcs_backlight_ports = BIT(port);
1523 intel_dsi->dcs_cabc_ports = BIT(port);
1524 }
1525
1526 if (!dev_priv->vbt.dsi.config->cabc_supported)
1527 intel_dsi->dcs_cabc_ports = 0;
1480 1528
1481 /* Create a DSI host (and a device) for each port. */ 1529 /* Create a DSI host (and a device) for each port. */
1482 for_each_dsi_port(port, intel_dsi->ports) { 1530 for_each_dsi_port(port, intel_dsi->ports) {
@@ -1549,13 +1597,10 @@ void intel_dsi_init(struct drm_device *dev)
1549 connector->display_info.height_mm = fixed_mode->height_mm; 1597 connector->display_info.height_mm = fixed_mode->height_mm;
1550 1598
1551 intel_panel_init(&intel_connector->panel, fixed_mode, NULL); 1599 intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
1600 intel_panel_setup_backlight(connector, INVALID_PIPE);
1552 1601
1553 intel_dsi_add_properties(intel_connector); 1602 intel_dsi_add_properties(intel_connector);
1554 1603
1555 drm_connector_register(connector);
1556
1557 intel_panel_setup_backlight(connector, INVALID_PIPE);
1558
1559 return; 1604 return;
1560 1605
1561err: 1606err:
diff --git a/drivers/gpu/drm/i915/intel_dsi.h b/drivers/gpu/drm/i915/intel_dsi.h
index 61a6957fc6c2..5967ea6d6045 100644
--- a/drivers/gpu/drm/i915/intel_dsi.h
+++ b/drivers/gpu/drm/i915/intel_dsi.h
@@ -78,6 +78,10 @@ struct intel_dsi {
78 78
79 u8 escape_clk_div; 79 u8 escape_clk_div;
80 u8 dual_link; 80 u8 dual_link;
81
82 u16 dcs_backlight_ports;
83 u16 dcs_cabc_ports;
84
81 u8 pixel_overlap; 85 u8 pixel_overlap;
82 u32 port_bits; 86 u32 port_bits;
83 u32 bw_timer; 87 u32 bw_timer;
diff --git a/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
new file mode 100644
index 000000000000..ac7c6020c443
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dsi_dcs_backlight.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Deepak M <m.deepak at intel.com>
24 */
25
26#include "intel_drv.h"
27#include "intel_dsi.h"
28#include "i915_drv.h"
29#include <video/mipi_display.h>
30#include <drm/drm_mipi_dsi.h>
31
32#define CONTROL_DISPLAY_BCTRL (1 << 5)
33#define CONTROL_DISPLAY_DD (1 << 3)
34#define CONTROL_DISPLAY_BL (1 << 2)
35
36#define POWER_SAVE_OFF (0 << 0)
37#define POWER_SAVE_LOW (1 << 0)
38#define POWER_SAVE_MEDIUM (2 << 0)
39#define POWER_SAVE_HIGH (3 << 0)
40#define POWER_SAVE_OUTDOOR_MODE (4 << 0)
41
42#define PANEL_PWM_MAX_VALUE 0xFF
43
44static u32 dcs_get_backlight(struct intel_connector *connector)
45{
46 struct intel_encoder *encoder = connector->encoder;
47 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
48 struct mipi_dsi_device *dsi_device;
49 u8 data;
50 enum port port;
51
52 /* FIXME: Need to take care of 16 bit brightness level */
53 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
54 dsi_device = intel_dsi->dsi_hosts[port]->device;
55 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
56 &data, sizeof(data));
57 break;
58 }
59
60 return data;
61}
62
63static void dcs_set_backlight(struct intel_connector *connector, u32 level)
64{
65 struct intel_encoder *encoder = connector->encoder;
66 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
67 struct mipi_dsi_device *dsi_device;
68 u8 data = level;
69 enum port port;
70
71 /* FIXME: Need to take care of 16 bit brightness level */
72 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
73 dsi_device = intel_dsi->dsi_hosts[port]->device;
74 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
75 &data, sizeof(data));
76 }
77}
78
79static void dcs_disable_backlight(struct intel_connector *connector)
80{
81 struct intel_encoder *encoder = connector->encoder;
82 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
83 struct mipi_dsi_device *dsi_device;
84 enum port port;
85
86 dcs_set_backlight(connector, 0);
87
88 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
89 u8 cabc = POWER_SAVE_OFF;
90
91 dsi_device = intel_dsi->dsi_hosts[port]->device;
92 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
93 &cabc, sizeof(cabc));
94 }
95
96 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
97 u8 ctrl = 0;
98
99 dsi_device = intel_dsi->dsi_hosts[port]->device;
100
101 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
102 &ctrl, sizeof(ctrl));
103
104 ctrl &= ~CONTROL_DISPLAY_BL;
105 ctrl &= ~CONTROL_DISPLAY_DD;
106 ctrl &= ~CONTROL_DISPLAY_BCTRL;
107
108 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
109 &ctrl, sizeof(ctrl));
110 }
111}
112
113static void dcs_enable_backlight(struct intel_connector *connector)
114{
115 struct intel_encoder *encoder = connector->encoder;
116 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
117 struct intel_panel *panel = &connector->panel;
118 struct mipi_dsi_device *dsi_device;
119 enum port port;
120
121 for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
122 u8 ctrl = 0;
123
124 dsi_device = intel_dsi->dsi_hosts[port]->device;
125
126 mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_CONTROL_DISPLAY,
127 &ctrl, sizeof(ctrl));
128
129 ctrl |= CONTROL_DISPLAY_BL;
130 ctrl |= CONTROL_DISPLAY_DD;
131 ctrl |= CONTROL_DISPLAY_BCTRL;
132
133 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_CONTROL_DISPLAY,
134 &ctrl, sizeof(ctrl));
135 }
136
137 for_each_dsi_port(port, intel_dsi->dcs_cabc_ports) {
138 u8 cabc = POWER_SAVE_MEDIUM;
139
140 dsi_device = intel_dsi->dsi_hosts[port]->device;
141 mipi_dsi_dcs_write(dsi_device, MIPI_DCS_WRITE_POWER_SAVE,
142 &cabc, sizeof(cabc));
143 }
144
145 dcs_set_backlight(connector, panel->backlight.level);
146}
147
148static int dcs_setup_backlight(struct intel_connector *connector,
149 enum pipe unused)
150{
151 struct intel_panel *panel = &connector->panel;
152
153 panel->backlight.max = PANEL_PWM_MAX_VALUE;
154 panel->backlight.level = PANEL_PWM_MAX_VALUE;
155
156 return 0;
157}
158
159int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
160{
161 struct drm_device *dev = intel_connector->base.dev;
162 struct drm_i915_private *dev_priv = to_i915(dev);
163 struct intel_encoder *encoder = intel_connector->encoder;
164 struct intel_panel *panel = &intel_connector->panel;
165
166 if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
167 return -ENODEV;
168
169 if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
170 return -EINVAL;
171
172 panel->backlight.setup = dcs_setup_backlight;
173 panel->backlight.enable = dcs_enable_backlight;
174 panel->backlight.disable = dcs_disable_backlight;
175 panel->backlight.set = dcs_set_backlight;
176 panel->backlight.get = dcs_get_backlight;
177
178 return 0;
179}
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index e498f1c3221e..cd154ce6b6c1 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -95,6 +95,24 @@ static struct gpio_map vlv_gpio_table[] = {
95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL }, 95 { VLV_GPIO_NC_11_PANEL1_BKLTCTL },
96}; 96};
97 97
98#define CHV_GPIO_IDX_START_N 0
99#define CHV_GPIO_IDX_START_E 73
100#define CHV_GPIO_IDX_START_SW 100
101#define CHV_GPIO_IDX_START_SE 198
102
103#define CHV_VBT_MAX_PINS_PER_FMLY 15
104
105#define CHV_GPIO_PAD_CFG0(f, i) (0x4400 + (f) * 0x400 + (i) * 8)
106#define CHV_GPIO_GPIOEN (1 << 15)
107#define CHV_GPIO_GPIOCFG_GPIO (0 << 8)
108#define CHV_GPIO_GPIOCFG_GPO (1 << 8)
109#define CHV_GPIO_GPIOCFG_GPI (2 << 8)
110#define CHV_GPIO_GPIOCFG_HIZ (3 << 8)
111#define CHV_GPIO_GPIOTXSTATE(state) ((!!(state)) << 1)
112
113#define CHV_GPIO_PAD_CFG1(f, i) (0x4400 + (f) * 0x400 + (i) * 8 + 4)
114#define CHV_GPIO_CFGLOCK (1 << 31)
115
98static inline enum port intel_dsi_seq_port_to_port(u8 port) 116static inline enum port intel_dsi_seq_port_to_port(u8 port)
99{ 117{
100 return port ? PORT_C : PORT_A; 118 return port ? PORT_C : PORT_A;
@@ -203,13 +221,14 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
203 map = &vlv_gpio_table[gpio_index]; 221 map = &vlv_gpio_table[gpio_index];
204 222
205 if (dev_priv->vbt.dsi.seq_version >= 3) { 223 if (dev_priv->vbt.dsi.seq_version >= 3) {
206 DRM_DEBUG_KMS("GPIO element v3 not supported\n"); 224 /* XXX: this assumes vlv_gpio_table only has NC GPIOs. */
207 return; 225 port = IOSF_PORT_GPIO_NC;
208 } else { 226 } else {
209 if (gpio_source == 0) { 227 if (gpio_source == 0) {
210 port = IOSF_PORT_GPIO_NC; 228 port = IOSF_PORT_GPIO_NC;
211 } else if (gpio_source == 1) { 229 } else if (gpio_source == 1) {
212 port = IOSF_PORT_GPIO_SC; 230 DRM_DEBUG_KMS("SC gpio not supported\n");
231 return;
213 } else { 232 } else {
214 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source); 233 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
215 return; 234 return;
@@ -231,10 +250,60 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
231 mutex_unlock(&dev_priv->sb_lock); 250 mutex_unlock(&dev_priv->sb_lock);
232} 251}
233 252
253static void chv_exec_gpio(struct drm_i915_private *dev_priv,
254 u8 gpio_source, u8 gpio_index, bool value)
255{
256 u16 cfg0, cfg1;
257 u16 family_num;
258 u8 port;
259
260 if (dev_priv->vbt.dsi.seq_version >= 3) {
261 if (gpio_index >= CHV_GPIO_IDX_START_SE) {
262 /* XXX: it's unclear whether 255->57 is part of SE. */
263 gpio_index -= CHV_GPIO_IDX_START_SE;
264 port = CHV_IOSF_PORT_GPIO_SE;
265 } else if (gpio_index >= CHV_GPIO_IDX_START_SW) {
266 gpio_index -= CHV_GPIO_IDX_START_SW;
267 port = CHV_IOSF_PORT_GPIO_SW;
268 } else if (gpio_index >= CHV_GPIO_IDX_START_E) {
269 gpio_index -= CHV_GPIO_IDX_START_E;
270 port = CHV_IOSF_PORT_GPIO_E;
271 } else {
272 port = CHV_IOSF_PORT_GPIO_N;
273 }
274 } else {
275 /* XXX: The spec is unclear about CHV GPIO on seq v2 */
276 if (gpio_source != 0) {
277 DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
278 return;
279 }
280
281 if (gpio_index >= CHV_GPIO_IDX_START_E) {
282 DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
283 gpio_index);
284 return;
285 }
286
287 port = CHV_IOSF_PORT_GPIO_N;
288 }
289
290 family_num = gpio_index / CHV_VBT_MAX_PINS_PER_FMLY;
291 gpio_index = gpio_index % CHV_VBT_MAX_PINS_PER_FMLY;
292
293 cfg0 = CHV_GPIO_PAD_CFG0(family_num, gpio_index);
294 cfg1 = CHV_GPIO_PAD_CFG1(family_num, gpio_index);
295
296 mutex_lock(&dev_priv->sb_lock);
297 vlv_iosf_sb_write(dev_priv, port, cfg1, 0);
298 vlv_iosf_sb_write(dev_priv, port, cfg0,
299 CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value));
300 mutex_unlock(&dev_priv->sb_lock);
301}
302
234static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data) 303static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
235{ 304{
236 struct drm_device *dev = intel_dsi->base.base.dev; 305 struct drm_device *dev = intel_dsi->base.base.dev;
237 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = to_i915(dev);
238 u8 gpio_source, gpio_index; 307 u8 gpio_source, gpio_index;
239 bool value; 308 bool value;
240 309
@@ -254,6 +323,8 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
254 323
255 if (IS_VALLEYVIEW(dev_priv)) 324 if (IS_VALLEYVIEW(dev_priv))
256 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value); 325 vlv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
326 else if (IS_CHERRYVIEW(dev_priv))
327 chv_exec_gpio(dev_priv, gpio_source, gpio_index, value);
257 else 328 else
258 DRM_DEBUG_KMS("GPIO element not supported on this platform\n"); 329 DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
259 330
@@ -398,7 +469,7 @@ static int vbt_panel_get_modes(struct drm_panel *panel)
398 struct vbt_panel *vbt_panel = to_vbt_panel(panel); 469 struct vbt_panel *vbt_panel = to_vbt_panel(panel);
399 struct intel_dsi *intel_dsi = vbt_panel->intel_dsi; 470 struct intel_dsi *intel_dsi = vbt_panel->intel_dsi;
400 struct drm_device *dev = intel_dsi->base.base.dev; 471 struct drm_device *dev = intel_dsi->base.base.dev;
401 struct drm_i915_private *dev_priv = dev->dev_private; 472 struct drm_i915_private *dev_priv = to_i915(dev);
402 struct drm_display_mode *mode; 473 struct drm_display_mode *mode;
403 474
404 if (!panel->connector) 475 if (!panel->connector)
@@ -426,7 +497,7 @@ static const struct drm_panel_funcs vbt_panel_funcs = {
426struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id) 497struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
427{ 498{
428 struct drm_device *dev = intel_dsi->base.base.dev; 499 struct drm_device *dev = intel_dsi->base.base.dev;
429 struct drm_i915_private *dev_priv = dev->dev_private; 500 struct drm_i915_private *dev_priv = to_i915(dev);
430 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config; 501 struct mipi_config *mipi_config = dev_priv->vbt.dsi.config;
431 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps; 502 struct mipi_pps_data *pps = dev_priv->vbt.dsi.pps;
432 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode; 503 struct drm_display_mode *mode = dev_priv->vbt.lfp_lvds_vbt_mode;
@@ -578,14 +649,13 @@ struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id)
578 ); 649 );
579 650
580 /* 651 /*
581 * Exit zero is unified val ths_zero and ths_exit 652 * Exit zero is unified val ths_zero and ths_exit
582 * minimum value for ths_exit = 110ns 653 * minimum value for ths_exit = 110ns
583 * min (exit_zero_cnt * 2) = 110/UI 654 * min (exit_zero_cnt * 2) = 110/UI
584 * exit_zero_cnt = 55/UI 655 * exit_zero_cnt = 55/UI
585 */ 656 */
586 if (exit_zero_cnt < (55 * ui_den / ui_num)) 657 if (exit_zero_cnt < (55 * ui_den / ui_num) && (55 * ui_den) % ui_num)
587 if ((55 * ui_den) % ui_num) 658 exit_zero_cnt += 1;
588 exit_zero_cnt += 1;
589 659
590 /* clk zero count */ 660 /* clk zero count */
591 clk_zero_cnt = DIV_ROUND_UP( 661 clk_zero_cnt = DIV_ROUND_UP(
diff --git a/drivers/gpu/drm/i915/intel_dsi_pll.c b/drivers/gpu/drm/i915/intel_dsi_pll.c
index 1765e6e18f2c..6ab58a01b18e 100644
--- a/drivers/gpu/drm/i915/intel_dsi_pll.c
+++ b/drivers/gpu/drm/i915/intel_dsi_pll.c
@@ -55,12 +55,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
55 struct intel_crtc_state *config, 55 struct intel_crtc_state *config,
56 int target_dsi_clk) 56 int target_dsi_clk)
57{ 57{
58 unsigned int calc_m = 0, calc_p = 0;
59 unsigned int m_min, m_max, p_min = 2, p_max = 6; 58 unsigned int m_min, m_max, p_min = 2, p_max = 6;
60 unsigned int m, n, p; 59 unsigned int m, n, p;
61 int ref_clk; 60 unsigned int calc_m, calc_p;
62 int delta = target_dsi_clk; 61 int delta, ref_clk;
63 u32 m_seed;
64 62
65 /* target_dsi_clk is expected in kHz */ 63 /* target_dsi_clk is expected in kHz */
66 if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) { 64 if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
@@ -80,6 +78,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
80 m_max = 92; 78 m_max = 92;
81 } 79 }
82 80
81 calc_p = p_min;
82 calc_m = m_min;
83 delta = abs(target_dsi_clk - (m_min * ref_clk) / (p_min * n));
84
83 for (m = m_min; m <= m_max && delta; m++) { 85 for (m = m_min; m <= m_max && delta; m++) {
84 for (p = p_min; p <= p_max && delta; p++) { 86 for (p = p_min; p <= p_max && delta; p++) {
85 /* 87 /*
@@ -97,11 +99,10 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
97 } 99 }
98 100
99 /* register has log2(N1), this works fine for powers of two */ 101 /* register has log2(N1), this works fine for powers of two */
100 n = ffs(n) - 1;
101 m_seed = lfsr_converts[calc_m - 62];
102 config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2); 102 config->dsi_pll.ctrl = 1 << (DSI_PLL_P1_POST_DIV_SHIFT + calc_p - 2);
103 config->dsi_pll.div = n << DSI_PLL_N1_DIV_SHIFT | 103 config->dsi_pll.div =
104 m_seed << DSI_PLL_M1_DIV_SHIFT; 104 (ffs(n) - 1) << DSI_PLL_N1_DIV_SHIFT |
105 (u32)lfsr_converts[calc_m - 62] << DSI_PLL_M1_DIV_SHIFT;
105 106
106 return 0; 107 return 0;
107} 108}
@@ -113,7 +114,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
113static int vlv_compute_dsi_pll(struct intel_encoder *encoder, 114static int vlv_compute_dsi_pll(struct intel_encoder *encoder,
114 struct intel_crtc_state *config) 115 struct intel_crtc_state *config)
115{ 116{
116 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 117 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
117 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 118 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
118 int ret; 119 int ret;
119 u32 dsi_clk; 120 u32 dsi_clk;
@@ -234,8 +235,11 @@ static void bxt_disable_dsi_pll(struct intel_encoder *encoder)
234 * PLL lock should deassert within 200us. 235 * PLL lock should deassert within 200us.
235 * Wait up to 1ms before timing out. 236 * Wait up to 1ms before timing out.
236 */ 237 */
237 if (wait_for((I915_READ(BXT_DSI_PLL_ENABLE) 238 if (intel_wait_for_register(dev_priv,
238 & BXT_DSI_PLL_LOCKED) == 0, 1)) 239 BXT_DSI_PLL_ENABLE,
240 BXT_DSI_PLL_LOCKED,
241 0,
242 1))
239 DRM_ERROR("Timeout waiting for PLL lock deassertion\n"); 243 DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
240} 244}
241 245
@@ -321,7 +325,7 @@ static u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
321 u32 dsi_clk; 325 u32 dsi_clk;
322 u32 dsi_ratio; 326 u32 dsi_ratio;
323 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 327 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
324 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 328 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
325 329
326 /* Divide by zero */ 330 /* Divide by zero */
327 if (!pipe_bpp) { 331 if (!pipe_bpp) {
@@ -356,7 +360,7 @@ u32 intel_dsi_get_pclk(struct intel_encoder *encoder, int pipe_bpp,
356static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) 360static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
357{ 361{
358 u32 temp; 362 u32 temp;
359 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 363 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
360 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 364 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
361 365
362 temp = I915_READ(MIPI_CTRL(port)); 366 temp = I915_READ(MIPI_CTRL(port));
@@ -370,7 +374,7 @@ static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
370static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port, 374static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
371 const struct intel_crtc_state *config) 375 const struct intel_crtc_state *config)
372{ 376{
373 struct drm_i915_private *dev_priv = dev->dev_private; 377 struct drm_i915_private *dev_priv = to_i915(dev);
374 u32 tmp; 378 u32 tmp;
375 u32 dsi_rate = 0; 379 u32 dsi_rate = 0;
376 u32 pll_ratio = 0; 380 u32 pll_ratio = 0;
@@ -465,7 +469,7 @@ static int bxt_compute_dsi_pll(struct intel_encoder *encoder,
465static void bxt_enable_dsi_pll(struct intel_encoder *encoder, 469static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
466 const struct intel_crtc_state *config) 470 const struct intel_crtc_state *config)
467{ 471{
468 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 472 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
469 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 473 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base);
470 enum port port; 474 enum port port;
471 u32 val; 475 u32 val;
@@ -486,7 +490,11 @@ static void bxt_enable_dsi_pll(struct intel_encoder *encoder,
486 I915_WRITE(BXT_DSI_PLL_ENABLE, val); 490 I915_WRITE(BXT_DSI_PLL_ENABLE, val);
487 491
488 /* Timeout and fail if PLL not locked */ 492 /* Timeout and fail if PLL not locked */
489 if (wait_for(I915_READ(BXT_DSI_PLL_ENABLE) & BXT_DSI_PLL_LOCKED, 1)) { 493 if (intel_wait_for_register(dev_priv,
494 BXT_DSI_PLL_ENABLE,
495 BXT_DSI_PLL_LOCKED,
496 BXT_DSI_PLL_LOCKED,
497 1)) {
490 DRM_ERROR("Timed out waiting for DSI PLL to lock\n"); 498 DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
491 return; 499 return;
492 } 500 }
@@ -542,7 +550,7 @@ static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
542{ 550{
543 u32 tmp; 551 u32 tmp;
544 struct drm_device *dev = encoder->base.dev; 552 struct drm_device *dev = encoder->base.dev;
545 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = to_i915(dev);
546 554
547 /* Clear old configurations */ 555 /* Clear old configurations */
548 tmp = I915_READ(BXT_MIPI_CLOCK_CTL); 556 tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 286baec979c8..47bdf9dad0d3 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -122,7 +122,7 @@ static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
122static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector) 122static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
123{ 123{
124 struct drm_device *dev = connector->base.dev; 124 struct drm_device *dev = connector->base.dev;
125 struct drm_i915_private *dev_priv = dev->dev_private; 125 struct drm_i915_private *dev_priv = to_i915(dev);
126 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base); 126 struct intel_dvo *intel_dvo = intel_attached_dvo(&connector->base);
127 u32 tmp; 127 u32 tmp;
128 128
@@ -138,7 +138,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
138 enum pipe *pipe) 138 enum pipe *pipe)
139{ 139{
140 struct drm_device *dev = encoder->base.dev; 140 struct drm_device *dev = encoder->base.dev;
141 struct drm_i915_private *dev_priv = dev->dev_private; 141 struct drm_i915_private *dev_priv = to_i915(dev);
142 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 142 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
143 u32 tmp; 143 u32 tmp;
144 144
@@ -155,7 +155,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
155static void intel_dvo_get_config(struct intel_encoder *encoder, 155static void intel_dvo_get_config(struct intel_encoder *encoder,
156 struct intel_crtc_state *pipe_config) 156 struct intel_crtc_state *pipe_config)
157{ 157{
158 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 158 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
159 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 159 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
160 u32 tmp, flags = 0; 160 u32 tmp, flags = 0;
161 161
@@ -176,7 +176,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
176 176
177static void intel_disable_dvo(struct intel_encoder *encoder) 177static void intel_disable_dvo(struct intel_encoder *encoder)
178{ 178{
179 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 179 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
180 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 180 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
181 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; 181 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
182 u32 temp = I915_READ(dvo_reg); 182 u32 temp = I915_READ(dvo_reg);
@@ -188,7 +188,7 @@ static void intel_disable_dvo(struct intel_encoder *encoder)
188 188
189static void intel_enable_dvo(struct intel_encoder *encoder) 189static void intel_enable_dvo(struct intel_encoder *encoder)
190{ 190{
191 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 191 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 192 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
193 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 193 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
194 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; 194 i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
@@ -256,7 +256,7 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
256static void intel_dvo_pre_enable(struct intel_encoder *encoder) 256static void intel_dvo_pre_enable(struct intel_encoder *encoder)
257{ 257{
258 struct drm_device *dev = encoder->base.dev; 258 struct drm_device *dev = encoder->base.dev;
259 struct drm_i915_private *dev_priv = dev->dev_private; 259 struct drm_i915_private *dev_priv = to_i915(dev);
260 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 260 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
261 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 261 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
262 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 262 struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
@@ -305,7 +305,7 @@ intel_dvo_detect(struct drm_connector *connector, bool force)
305 305
306static int intel_dvo_get_modes(struct drm_connector *connector) 306static int intel_dvo_get_modes(struct drm_connector *connector)
307{ 307{
308 struct drm_i915_private *dev_priv = connector->dev->dev_private; 308 struct drm_i915_private *dev_priv = to_i915(connector->dev);
309 const struct drm_display_mode *fixed_mode = 309 const struct drm_display_mode *fixed_mode =
310 to_intel_connector(connector)->panel.fixed_mode; 310 to_intel_connector(connector)->panel.fixed_mode;
311 311
@@ -341,6 +341,8 @@ static void intel_dvo_destroy(struct drm_connector *connector)
341static const struct drm_connector_funcs intel_dvo_connector_funcs = { 341static const struct drm_connector_funcs intel_dvo_connector_funcs = {
342 .dpms = drm_atomic_helper_connector_dpms, 342 .dpms = drm_atomic_helper_connector_dpms,
343 .detect = intel_dvo_detect, 343 .detect = intel_dvo_detect,
344 .late_register = intel_connector_register,
345 .early_unregister = intel_connector_unregister,
344 .destroy = intel_dvo_destroy, 346 .destroy = intel_dvo_destroy,
345 .fill_modes = drm_helper_probe_single_connector_modes, 347 .fill_modes = drm_helper_probe_single_connector_modes,
346 .atomic_get_property = intel_connector_atomic_get_property, 348 .atomic_get_property = intel_connector_atomic_get_property,
@@ -351,7 +353,6 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
351static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = { 353static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
352 .mode_valid = intel_dvo_mode_valid, 354 .mode_valid = intel_dvo_mode_valid,
353 .get_modes = intel_dvo_get_modes, 355 .get_modes = intel_dvo_get_modes,
354 .best_encoder = intel_best_encoder,
355}; 356};
356 357
357static void intel_dvo_enc_destroy(struct drm_encoder *encoder) 358static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -378,7 +379,7 @@ static struct drm_display_mode *
378intel_dvo_get_current_mode(struct drm_connector *connector) 379intel_dvo_get_current_mode(struct drm_connector *connector)
379{ 380{
380 struct drm_device *dev = connector->dev; 381 struct drm_device *dev = connector->dev;
381 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = to_i915(dev);
382 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 383 struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
383 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg); 384 uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
384 struct drm_display_mode *mode = NULL; 385 struct drm_display_mode *mode = NULL;
@@ -406,9 +407,21 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
406 return mode; 407 return mode;
407} 408}
408 409
410static char intel_dvo_port_name(i915_reg_t dvo_reg)
411{
412 if (i915_mmio_reg_equal(dvo_reg, DVOA))
413 return 'A';
414 else if (i915_mmio_reg_equal(dvo_reg, DVOB))
415 return 'B';
416 else if (i915_mmio_reg_equal(dvo_reg, DVOC))
417 return 'C';
418 else
419 return '?';
420}
421
409void intel_dvo_init(struct drm_device *dev) 422void intel_dvo_init(struct drm_device *dev)
410{ 423{
411 struct drm_i915_private *dev_priv = dev->dev_private; 424 struct drm_i915_private *dev_priv = to_i915(dev);
412 struct intel_encoder *intel_encoder; 425 struct intel_encoder *intel_encoder;
413 struct intel_dvo *intel_dvo; 426 struct intel_dvo *intel_dvo;
414 struct intel_connector *intel_connector; 427 struct intel_connector *intel_connector;
@@ -428,8 +441,6 @@ void intel_dvo_init(struct drm_device *dev)
428 intel_dvo->attached_connector = intel_connector; 441 intel_dvo->attached_connector = intel_connector;
429 442
430 intel_encoder = &intel_dvo->base; 443 intel_encoder = &intel_dvo->base;
431 drm_encoder_init(dev, &intel_encoder->base,
432 &intel_dvo_enc_funcs, encoder_type, NULL);
433 444
434 intel_encoder->disable = intel_disable_dvo; 445 intel_encoder->disable = intel_disable_dvo;
435 intel_encoder->enable = intel_enable_dvo; 446 intel_encoder->enable = intel_enable_dvo;
@@ -438,7 +449,6 @@ void intel_dvo_init(struct drm_device *dev)
438 intel_encoder->compute_config = intel_dvo_compute_config; 449 intel_encoder->compute_config = intel_dvo_compute_config;
439 intel_encoder->pre_enable = intel_dvo_pre_enable; 450 intel_encoder->pre_enable = intel_dvo_pre_enable;
440 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state; 451 intel_connector->get_hw_state = intel_dvo_connector_get_hw_state;
441 intel_connector->unregister = intel_connector_unregister;
442 452
443 /* Now, try to find a controller */ 453 /* Now, try to find a controller */
444 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) { 454 for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
@@ -496,6 +506,10 @@ void intel_dvo_init(struct drm_device *dev)
496 if (!dvoinit) 506 if (!dvoinit)
497 continue; 507 continue;
498 508
509 drm_encoder_init(dev, &intel_encoder->base,
510 &intel_dvo_enc_funcs, encoder_type,
511 "DVO %c", intel_dvo_port_name(dvo->dvo_reg));
512
499 intel_encoder->type = INTEL_OUTPUT_DVO; 513 intel_encoder->type = INTEL_OUTPUT_DVO;
500 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 514 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
501 switch (dvo->type) { 515 switch (dvo->type) {
@@ -537,7 +551,6 @@ void intel_dvo_init(struct drm_device *dev)
537 intel_dvo->panel_wants_dither = true; 551 intel_dvo->panel_wants_dither = true;
538 } 552 }
539 553
540 drm_connector_register(connector);
541 return; 554 return;
542 } 555 }
543 556
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 647127f3aaff..6a7ad3ed1463 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -124,7 +124,9 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
124 I915_WRITE(FBC_CONTROL, fbc_ctl); 124 I915_WRITE(FBC_CONTROL, fbc_ctl);
125 125
126 /* Wait for compressing bit to clear */ 126 /* Wait for compressing bit to clear */
127 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { 127 if (intel_wait_for_register(dev_priv,
128 FBC_STATUS, FBC_STAT_COMPRESSING, 0,
129 10)) {
128 DRM_DEBUG_KMS("FBC idle timed out\n"); 130 DRM_DEBUG_KMS("FBC idle timed out\n");
129 return; 131 return;
130 } 132 }
@@ -374,8 +376,9 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
374 * @dev_priv: i915 device instance 376 * @dev_priv: i915 device instance
375 * 377 *
376 * This function is used to verify the current state of FBC. 378 * This function is used to verify the current state of FBC.
379 *
377 * FIXME: This should be tracked in the plane config eventually 380 * FIXME: This should be tracked in the plane config eventually
378 * instead of queried at runtime for most callers. 381 * instead of queried at runtime for most callers.
379 */ 382 */
380bool intel_fbc_is_active(struct drm_i915_private *dev_priv) 383bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
381{ 384{
@@ -389,7 +392,7 @@ static void intel_fbc_work_fn(struct work_struct *__work)
389 struct intel_fbc *fbc = &dev_priv->fbc; 392 struct intel_fbc *fbc = &dev_priv->fbc;
390 struct intel_fbc_work *work = &fbc->work; 393 struct intel_fbc_work *work = &fbc->work;
391 struct intel_crtc *crtc = fbc->crtc; 394 struct intel_crtc *crtc = fbc->crtc;
392 struct drm_vblank_crtc *vblank = &dev_priv->dev->vblank[crtc->pipe]; 395 struct drm_vblank_crtc *vblank = &dev_priv->drm.vblank[crtc->pipe];
393 396
394 if (drm_crtc_vblank_get(&crtc->base)) { 397 if (drm_crtc_vblank_get(&crtc->base)) {
395 DRM_ERROR("vblank not available for FBC on pipe %c\n", 398 DRM_ERROR("vblank not available for FBC on pipe %c\n",
@@ -442,7 +445,7 @@ out:
442 445
443static void intel_fbc_schedule_activation(struct intel_crtc *crtc) 446static void intel_fbc_schedule_activation(struct intel_crtc *crtc)
444{ 447{
445 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 448 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
446 struct intel_fbc *fbc = &dev_priv->fbc; 449 struct intel_fbc *fbc = &dev_priv->fbc;
447 struct intel_fbc_work *work = &fbc->work; 450 struct intel_fbc_work *work = &fbc->work;
448 451
@@ -480,10 +483,10 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv)
480 intel_fbc_hw_deactivate(dev_priv); 483 intel_fbc_hw_deactivate(dev_priv);
481} 484}
482 485
483static bool multiple_pipes_ok(struct intel_crtc *crtc) 486static bool multiple_pipes_ok(struct intel_crtc *crtc,
487 struct intel_plane_state *plane_state)
484{ 488{
485 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 489 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
486 struct drm_plane *primary = crtc->base.primary;
487 struct intel_fbc *fbc = &dev_priv->fbc; 490 struct intel_fbc *fbc = &dev_priv->fbc;
488 enum pipe pipe = crtc->pipe; 491 enum pipe pipe = crtc->pipe;
489 492
@@ -491,9 +494,7 @@ static bool multiple_pipes_ok(struct intel_crtc *crtc)
491 if (!no_fbc_on_multiple_pipes(dev_priv)) 494 if (!no_fbc_on_multiple_pipes(dev_priv))
492 return true; 495 return true;
493 496
494 WARN_ON(!drm_modeset_is_locked(&primary->mutex)); 497 if (plane_state->visible)
495
496 if (to_intel_plane_state(primary->state)->visible)
497 fbc->visible_pipes_mask |= (1 << pipe); 498 fbc->visible_pipes_mask |= (1 << pipe);
498 else 499 else
499 fbc->visible_pipes_mask &= ~(1 << pipe); 500 fbc->visible_pipes_mask &= ~(1 << pipe);
@@ -554,7 +555,7 @@ again:
554 555
555static int intel_fbc_alloc_cfb(struct intel_crtc *crtc) 556static int intel_fbc_alloc_cfb(struct intel_crtc *crtc)
556{ 557{
557 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 558 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
558 struct intel_fbc *fbc = &dev_priv->fbc; 559 struct intel_fbc *fbc = &dev_priv->fbc;
559 struct drm_mm_node *uninitialized_var(compressed_llb); 560 struct drm_mm_node *uninitialized_var(compressed_llb);
560 int size, fb_cpp, ret; 561 int size, fb_cpp, ret;
@@ -685,7 +686,7 @@ static bool pixel_format_is_valid(struct drm_i915_private *dev_priv,
685 */ 686 */
686static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 687static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
687{ 688{
688 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 689 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
689 struct intel_fbc *fbc = &dev_priv->fbc; 690 struct intel_fbc *fbc = &dev_priv->fbc;
690 unsigned int effective_w, effective_h, max_w, max_h; 691 unsigned int effective_w, effective_h, max_w, max_h;
691 692
@@ -708,21 +709,16 @@ static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc)
708 return effective_w <= max_w && effective_h <= max_h; 709 return effective_w <= max_w && effective_h <= max_h;
709} 710}
710 711
711static void intel_fbc_update_state_cache(struct intel_crtc *crtc) 712static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
713 struct intel_crtc_state *crtc_state,
714 struct intel_plane_state *plane_state)
712{ 715{
713 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 716 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
714 struct intel_fbc *fbc = &dev_priv->fbc; 717 struct intel_fbc *fbc = &dev_priv->fbc;
715 struct intel_fbc_state_cache *cache = &fbc->state_cache; 718 struct intel_fbc_state_cache *cache = &fbc->state_cache;
716 struct intel_crtc_state *crtc_state =
717 to_intel_crtc_state(crtc->base.state);
718 struct intel_plane_state *plane_state =
719 to_intel_plane_state(crtc->base.primary->state);
720 struct drm_framebuffer *fb = plane_state->base.fb; 719 struct drm_framebuffer *fb = plane_state->base.fb;
721 struct drm_i915_gem_object *obj; 720 struct drm_i915_gem_object *obj;
722 721
723 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
724 WARN_ON(!drm_modeset_is_locked(&crtc->base.primary->mutex));
725
726 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 722 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags;
727 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 723 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
728 cache->crtc.hsw_bdw_pixel_rate = 724 cache->crtc.hsw_bdw_pixel_rate =
@@ -740,7 +736,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
740 736
741 /* FIXME: We lack the proper locking here, so only run this on the 737 /* FIXME: We lack the proper locking here, so only run this on the
742 * platforms that need. */ 738 * platforms that need. */
743 if (INTEL_INFO(dev_priv)->gen >= 5 && INTEL_INFO(dev_priv)->gen < 7) 739 if (IS_GEN(dev_priv, 5, 6))
744 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj); 740 cache->fb.ilk_ggtt_offset = i915_gem_obj_ggtt_offset(obj);
745 cache->fb.pixel_format = fb->pixel_format; 741 cache->fb.pixel_format = fb->pixel_format;
746 cache->fb.stride = fb->pitches[0]; 742 cache->fb.stride = fb->pitches[0];
@@ -750,7 +746,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc)
750 746
751static bool intel_fbc_can_activate(struct intel_crtc *crtc) 747static bool intel_fbc_can_activate(struct intel_crtc *crtc)
752{ 748{
753 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 749 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
754 struct intel_fbc *fbc = &dev_priv->fbc; 750 struct intel_fbc *fbc = &dev_priv->fbc;
755 struct intel_fbc_state_cache *cache = &fbc->state_cache; 751 struct intel_fbc_state_cache *cache = &fbc->state_cache;
756 752
@@ -822,22 +818,16 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
822 818
823static bool intel_fbc_can_choose(struct intel_crtc *crtc) 819static bool intel_fbc_can_choose(struct intel_crtc *crtc)
824{ 820{
825 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
826 struct intel_fbc *fbc = &dev_priv->fbc; 822 struct intel_fbc *fbc = &dev_priv->fbc;
827 bool enable_by_default = IS_BROADWELL(dev_priv);
828 823
829 if (intel_vgpu_active(dev_priv->dev)) { 824 if (intel_vgpu_active(dev_priv)) {
830 fbc->no_fbc_reason = "VGPU is active"; 825 fbc->no_fbc_reason = "VGPU is active";
831 return false; 826 return false;
832 } 827 }
833 828
834 if (i915.enable_fbc < 0 && !enable_by_default) {
835 fbc->no_fbc_reason = "disabled per chip default";
836 return false;
837 }
838
839 if (!i915.enable_fbc) { 829 if (!i915.enable_fbc) {
840 fbc->no_fbc_reason = "disabled per module param"; 830 fbc->no_fbc_reason = "disabled per module param or by default";
841 return false; 831 return false;
842 } 832 }
843 833
@@ -857,7 +847,7 @@ static bool intel_fbc_can_choose(struct intel_crtc *crtc)
857static void intel_fbc_get_reg_params(struct intel_crtc *crtc, 847static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
858 struct intel_fbc_reg_params *params) 848 struct intel_fbc_reg_params *params)
859{ 849{
860 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 850 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
861 struct intel_fbc *fbc = &dev_priv->fbc; 851 struct intel_fbc *fbc = &dev_priv->fbc;
862 struct intel_fbc_state_cache *cache = &fbc->state_cache; 852 struct intel_fbc_state_cache *cache = &fbc->state_cache;
863 853
@@ -886,9 +876,11 @@ static bool intel_fbc_reg_params_equal(struct intel_fbc_reg_params *params1,
886 return memcmp(params1, params2, sizeof(*params1)) == 0; 876 return memcmp(params1, params2, sizeof(*params1)) == 0;
887} 877}
888 878
889void intel_fbc_pre_update(struct intel_crtc *crtc) 879void intel_fbc_pre_update(struct intel_crtc *crtc,
880 struct intel_crtc_state *crtc_state,
881 struct intel_plane_state *plane_state)
890{ 882{
891 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 883 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
892 struct intel_fbc *fbc = &dev_priv->fbc; 884 struct intel_fbc *fbc = &dev_priv->fbc;
893 885
894 if (!fbc_supported(dev_priv)) 886 if (!fbc_supported(dev_priv))
@@ -896,7 +888,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
896 888
897 mutex_lock(&fbc->lock); 889 mutex_lock(&fbc->lock);
898 890
899 if (!multiple_pipes_ok(crtc)) { 891 if (!multiple_pipes_ok(crtc, plane_state)) {
900 fbc->no_fbc_reason = "more than one pipe active"; 892 fbc->no_fbc_reason = "more than one pipe active";
901 goto deactivate; 893 goto deactivate;
902 } 894 }
@@ -904,7 +896,7 @@ void intel_fbc_pre_update(struct intel_crtc *crtc)
904 if (!fbc->enabled || fbc->crtc != crtc) 896 if (!fbc->enabled || fbc->crtc != crtc)
905 goto unlock; 897 goto unlock;
906 898
907 intel_fbc_update_state_cache(crtc); 899 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
908 900
909deactivate: 901deactivate:
910 intel_fbc_deactivate(dev_priv); 902 intel_fbc_deactivate(dev_priv);
@@ -914,7 +906,7 @@ unlock:
914 906
915static void __intel_fbc_post_update(struct intel_crtc *crtc) 907static void __intel_fbc_post_update(struct intel_crtc *crtc)
916{ 908{
917 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 909 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
918 struct intel_fbc *fbc = &dev_priv->fbc; 910 struct intel_fbc *fbc = &dev_priv->fbc;
919 struct intel_fbc_reg_params old_params; 911 struct intel_fbc_reg_params old_params;
920 912
@@ -947,7 +939,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
947 939
948void intel_fbc_post_update(struct intel_crtc *crtc) 940void intel_fbc_post_update(struct intel_crtc *crtc)
949{ 941{
950 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 942 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
951 struct intel_fbc *fbc = &dev_priv->fbc; 943 struct intel_fbc *fbc = &dev_priv->fbc;
952 944
953 if (!fbc_supported(dev_priv)) 945 if (!fbc_supported(dev_priv))
@@ -996,13 +988,13 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
996 if (!fbc_supported(dev_priv)) 988 if (!fbc_supported(dev_priv))
997 return; 989 return;
998 990
999 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
1000 return;
1001
1002 mutex_lock(&fbc->lock); 991 mutex_lock(&fbc->lock);
1003 992
1004 fbc->busy_bits &= ~frontbuffer_bits; 993 fbc->busy_bits &= ~frontbuffer_bits;
1005 994
995 if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
996 goto out;
997
1006 if (!fbc->busy_bits && fbc->enabled && 998 if (!fbc->busy_bits && fbc->enabled &&
1007 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) { 999 (frontbuffer_bits & intel_fbc_get_frontbuffer_bit(fbc))) {
1008 if (fbc->active) 1000 if (fbc->active)
@@ -1011,6 +1003,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
1011 __intel_fbc_post_update(fbc->crtc); 1003 __intel_fbc_post_update(fbc->crtc);
1012 } 1004 }
1013 1005
1006out:
1014 mutex_unlock(&fbc->lock); 1007 mutex_unlock(&fbc->lock);
1015} 1008}
1016 1009
@@ -1088,9 +1081,11 @@ out:
1088 * intel_fbc_enable multiple times for the same pipe without an 1081 * intel_fbc_enable multiple times for the same pipe without an
1089 * intel_fbc_disable in the middle, as long as it is deactivated. 1082 * intel_fbc_disable in the middle, as long as it is deactivated.
1090 */ 1083 */
1091void intel_fbc_enable(struct intel_crtc *crtc) 1084void intel_fbc_enable(struct intel_crtc *crtc,
1085 struct intel_crtc_state *crtc_state,
1086 struct intel_plane_state *plane_state)
1092{ 1087{
1093 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1088 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1094 struct intel_fbc *fbc = &dev_priv->fbc; 1089 struct intel_fbc *fbc = &dev_priv->fbc;
1095 1090
1096 if (!fbc_supported(dev_priv)) 1091 if (!fbc_supported(dev_priv))
@@ -1101,19 +1096,19 @@ void intel_fbc_enable(struct intel_crtc *crtc)
1101 if (fbc->enabled) { 1096 if (fbc->enabled) {
1102 WARN_ON(fbc->crtc == NULL); 1097 WARN_ON(fbc->crtc == NULL);
1103 if (fbc->crtc == crtc) { 1098 if (fbc->crtc == crtc) {
1104 WARN_ON(!crtc->config->enable_fbc); 1099 WARN_ON(!crtc_state->enable_fbc);
1105 WARN_ON(fbc->active); 1100 WARN_ON(fbc->active);
1106 } 1101 }
1107 goto out; 1102 goto out;
1108 } 1103 }
1109 1104
1110 if (!crtc->config->enable_fbc) 1105 if (!crtc_state->enable_fbc)
1111 goto out; 1106 goto out;
1112 1107
1113 WARN_ON(fbc->active); 1108 WARN_ON(fbc->active);
1114 WARN_ON(fbc->crtc != NULL); 1109 WARN_ON(fbc->crtc != NULL);
1115 1110
1116 intel_fbc_update_state_cache(crtc); 1111 intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
1117 if (intel_fbc_alloc_cfb(crtc)) { 1112 if (intel_fbc_alloc_cfb(crtc)) {
1118 fbc->no_fbc_reason = "not enough stolen memory"; 1113 fbc->no_fbc_reason = "not enough stolen memory";
1119 goto out; 1114 goto out;
@@ -1161,7 +1156,7 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
1161 */ 1156 */
1162void intel_fbc_disable(struct intel_crtc *crtc) 1157void intel_fbc_disable(struct intel_crtc *crtc)
1163{ 1158{
1164 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 1159 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1165 struct intel_fbc *fbc = &dev_priv->fbc; 1160 struct intel_fbc *fbc = &dev_priv->fbc;
1166 1161
1167 if (!fbc_supported(dev_priv)) 1162 if (!fbc_supported(dev_priv))
@@ -1215,12 +1210,32 @@ void intel_fbc_init_pipe_state(struct drm_i915_private *dev_priv)
1215 if (!no_fbc_on_multiple_pipes(dev_priv)) 1210 if (!no_fbc_on_multiple_pipes(dev_priv))
1216 return; 1211 return;
1217 1212
1218 for_each_intel_crtc(dev_priv->dev, crtc) 1213 for_each_intel_crtc(&dev_priv->drm, crtc)
1219 if (intel_crtc_active(&crtc->base) && 1214 if (intel_crtc_active(&crtc->base) &&
1220 to_intel_plane_state(crtc->base.primary->state)->visible) 1215 to_intel_plane_state(crtc->base.primary->state)->visible)
1221 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe); 1216 dev_priv->fbc.visible_pipes_mask |= (1 << crtc->pipe);
1222} 1217}
1223 1218
1219/*
1220 * The DDX driver changes its behavior depending on the value it reads from
1221 * i915.enable_fbc, so sanitize it by translating the default value into either
1222 * 0 or 1 in order to allow it to know what's going on.
1223 *
1224 * Notice that this is done at driver initialization and we still allow user
1225 * space to change the value during runtime without sanitizing it again. IGT
1226 * relies on being able to change i915.enable_fbc at runtime.
1227 */
1228static int intel_sanitize_fbc_option(struct drm_i915_private *dev_priv)
1229{
1230 if (i915.enable_fbc >= 0)
1231 return !!i915.enable_fbc;
1232
1233 if (IS_BROADWELL(dev_priv))
1234 return 1;
1235
1236 return 0;
1237}
1238
1224/** 1239/**
1225 * intel_fbc_init - Initialize FBC 1240 * intel_fbc_init - Initialize FBC
1226 * @dev_priv: the i915 device 1241 * @dev_priv: the i915 device
@@ -1238,6 +1253,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
1238 fbc->active = false; 1253 fbc->active = false;
1239 fbc->work.scheduled = false; 1254 fbc->work.scheduled = false;
1240 1255
1256 i915.enable_fbc = intel_sanitize_fbc_option(dev_priv);
1257 DRM_DEBUG_KMS("Sanitized enable_fbc value: %d\n", i915.enable_fbc);
1258
1241 if (!HAS_FBC(dev_priv)) { 1259 if (!HAS_FBC(dev_priv)) {
1242 fbc->no_fbc_reason = "unsupported by this chipset"; 1260 fbc->no_fbc_reason = "unsupported by this chipset";
1243 return; 1261 return;
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ab8d09a81f14..86b00c6db1a6 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -150,10 +150,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
150 if (size * 2 < ggtt->stolen_usable_size) 150 if (size * 2 < ggtt->stolen_usable_size)
151 obj = i915_gem_object_create_stolen(dev, size); 151 obj = i915_gem_object_create_stolen(dev, size);
152 if (obj == NULL) 152 if (obj == NULL)
153 obj = i915_gem_alloc_object(dev, size); 153 obj = i915_gem_object_create(dev, size);
154 if (!obj) { 154 if (IS_ERR(obj)) {
155 DRM_ERROR("failed to allocate framebuffer\n"); 155 DRM_ERROR("failed to allocate framebuffer\n");
156 ret = -ENOMEM; 156 ret = PTR_ERR(obj);
157 goto out; 157 goto out;
158 } 158 }
159 159
@@ -186,9 +186,11 @@ static int intelfb_create(struct drm_fb_helper *helper,
186 struct i915_ggtt *ggtt = &dev_priv->ggtt; 186 struct i915_ggtt *ggtt = &dev_priv->ggtt;
187 struct fb_info *info; 187 struct fb_info *info;
188 struct drm_framebuffer *fb; 188 struct drm_framebuffer *fb;
189 struct i915_vma *vma;
189 struct drm_i915_gem_object *obj; 190 struct drm_i915_gem_object *obj;
190 int size, ret;
191 bool prealloc = false; 191 bool prealloc = false;
192 void *vaddr;
193 int ret;
192 194
193 if (intel_fb && 195 if (intel_fb &&
194 (sizes->fb_width > intel_fb->base.width || 196 (sizes->fb_width > intel_fb->base.width ||
@@ -214,7 +216,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
214 } 216 }
215 217
216 obj = intel_fb->obj; 218 obj = intel_fb->obj;
217 size = obj->base.size;
218 219
219 mutex_lock(&dev->struct_mutex); 220 mutex_lock(&dev->struct_mutex);
220 221
@@ -244,22 +245,23 @@ static int intelfb_create(struct drm_fb_helper *helper,
244 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; 245 info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
245 info->fbops = &intelfb_ops; 246 info->fbops = &intelfb_ops;
246 247
248 vma = i915_gem_obj_to_ggtt(obj);
249
247 /* setup aperture base/size for vesafb takeover */ 250 /* setup aperture base/size for vesafb takeover */
248 info->apertures->ranges[0].base = dev->mode_config.fb_base; 251 info->apertures->ranges[0].base = dev->mode_config.fb_base;
249 info->apertures->ranges[0].size = ggtt->mappable_end; 252 info->apertures->ranges[0].size = ggtt->mappable_end;
250 253
251 info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj); 254 info->fix.smem_start = dev->mode_config.fb_base + vma->node.start;
252 info->fix.smem_len = size; 255 info->fix.smem_len = vma->node.size;
253 256
254 info->screen_base = 257 vaddr = i915_vma_pin_iomap(vma);
255 ioremap_wc(ggtt->mappable_base + i915_gem_obj_ggtt_offset(obj), 258 if (IS_ERR(vaddr)) {
256 size);
257 if (!info->screen_base) {
258 DRM_ERROR("Failed to remap framebuffer into virtual memory\n"); 259 DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
259 ret = -ENOSPC; 260 ret = PTR_ERR(vaddr);
260 goto out_destroy_fbi; 261 goto out_destroy_fbi;
261 } 262 }
262 info->screen_size = size; 263 info->screen_base = vaddr;
264 info->screen_size = vma->node.size;
263 265
264 /* This driver doesn't need a VT switch to restore the mode on resume */ 266 /* This driver doesn't need a VT switch to restore the mode on resume */
265 info->skip_vt_switch = true; 267 info->skip_vt_switch = true;
@@ -287,7 +289,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
287out_destroy_fbi: 289out_destroy_fbi:
288 drm_fb_helper_release_fbi(helper); 290 drm_fb_helper_release_fbi(helper);
289out_unpin: 291out_unpin:
290 i915_gem_object_ggtt_unpin(obj); 292 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
291out_unlock: 293out_unlock:
292 mutex_unlock(&dev->struct_mutex); 294 mutex_unlock(&dev->struct_mutex);
293 return ret; 295 return ret;
@@ -360,23 +362,24 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
360 bool *enabled, int width, int height) 362 bool *enabled, int width, int height)
361{ 363{
362 struct drm_device *dev = fb_helper->dev; 364 struct drm_device *dev = fb_helper->dev;
365 unsigned long conn_configured, mask;
366 unsigned int count = min(fb_helper->connector_count, BITS_PER_LONG);
363 int i, j; 367 int i, j;
364 bool *save_enabled; 368 bool *save_enabled;
365 bool fallback = true; 369 bool fallback = true;
366 int num_connectors_enabled = 0; 370 int num_connectors_enabled = 0;
367 int num_connectors_detected = 0; 371 int num_connectors_detected = 0;
368 uint64_t conn_configured = 0, mask;
369 int pass = 0; 372 int pass = 0;
370 373
371 save_enabled = kcalloc(fb_helper->connector_count, sizeof(bool), 374 save_enabled = kcalloc(count, sizeof(bool), GFP_KERNEL);
372 GFP_KERNEL);
373 if (!save_enabled) 375 if (!save_enabled)
374 return false; 376 return false;
375 377
376 memcpy(save_enabled, enabled, fb_helper->connector_count); 378 memcpy(save_enabled, enabled, count);
377 mask = (1 << fb_helper->connector_count) - 1; 379 mask = BIT(count) - 1;
380 conn_configured = 0;
378retry: 381retry:
379 for (i = 0; i < fb_helper->connector_count; i++) { 382 for (i = 0; i < count; i++) {
380 struct drm_fb_helper_connector *fb_conn; 383 struct drm_fb_helper_connector *fb_conn;
381 struct drm_connector *connector; 384 struct drm_connector *connector;
382 struct drm_encoder *encoder; 385 struct drm_encoder *encoder;
@@ -386,7 +389,7 @@ retry:
386 fb_conn = fb_helper->connector_info[i]; 389 fb_conn = fb_helper->connector_info[i];
387 connector = fb_conn->connector; 390 connector = fb_conn->connector;
388 391
389 if (conn_configured & (1 << i)) 392 if (conn_configured & BIT(i))
390 continue; 393 continue;
391 394
392 if (pass == 0 && !connector->has_tile) 395 if (pass == 0 && !connector->has_tile)
@@ -398,7 +401,7 @@ retry:
398 if (!enabled[i]) { 401 if (!enabled[i]) {
399 DRM_DEBUG_KMS("connector %s not enabled, skipping\n", 402 DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
400 connector->name); 403 connector->name);
401 conn_configured |= (1 << i); 404 conn_configured |= BIT(i);
402 continue; 405 continue;
403 } 406 }
404 407
@@ -417,7 +420,7 @@ retry:
417 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 420 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
418 connector->name); 421 connector->name);
419 enabled[i] = false; 422 enabled[i] = false;
420 conn_configured |= (1 << i); 423 conn_configured |= BIT(i);
421 continue; 424 continue;
422 } 425 }
423 426
@@ -430,14 +433,15 @@ retry:
430 intel_crtc->lut_b[j] = j; 433 intel_crtc->lut_b[j] = j;
431 } 434 }
432 435
433 new_crtc = intel_fb_helper_crtc(fb_helper, connector->state->crtc); 436 new_crtc = intel_fb_helper_crtc(fb_helper,
437 connector->state->crtc);
434 438
435 /* 439 /*
436 * Make sure we're not trying to drive multiple connectors 440 * Make sure we're not trying to drive multiple connectors
437 * with a single CRTC, since our cloning support may not 441 * with a single CRTC, since our cloning support may not
438 * match the BIOS. 442 * match the BIOS.
439 */ 443 */
440 for (j = 0; j < fb_helper->connector_count; j++) { 444 for (j = 0; j < count; j++) {
441 if (crtcs[j] == new_crtc) { 445 if (crtcs[j] == new_crtc) {
442 DRM_DEBUG_KMS("fallback: cloned configuration\n"); 446 DRM_DEBUG_KMS("fallback: cloned configuration\n");
443 goto bail; 447 goto bail;
@@ -488,15 +492,15 @@ retry:
488 } 492 }
489 crtcs[i] = new_crtc; 493 crtcs[i] = new_crtc;
490 494
491 DRM_DEBUG_KMS("connector %s on pipe %c [CRTC:%d]: %dx%d%s\n", 495 DRM_DEBUG_KMS("connector %s on [CRTC:%d:%s]: %dx%d%s\n",
492 connector->name, 496 connector->name,
493 pipe_name(to_intel_crtc(connector->state->crtc)->pipe),
494 connector->state->crtc->base.id, 497 connector->state->crtc->base.id,
498 connector->state->crtc->name,
495 modes[i]->hdisplay, modes[i]->vdisplay, 499 modes[i]->hdisplay, modes[i]->vdisplay,
496 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 500 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
497 501
498 fallback = false; 502 fallback = false;
499 conn_configured |= (1 << i); 503 conn_configured |= BIT(i);
500 } 504 }
501 505
502 if ((conn_configured & mask) != mask) { 506 if ((conn_configured & mask) != mask) {
@@ -520,7 +524,7 @@ retry:
520 if (fallback) { 524 if (fallback) {
521bail: 525bail:
522 DRM_DEBUG_KMS("Not using firmware configuration\n"); 526 DRM_DEBUG_KMS("Not using firmware configuration\n");
523 memcpy(enabled, save_enabled, fb_helper->connector_count); 527 memcpy(enabled, save_enabled, count);
524 kfree(save_enabled); 528 kfree(save_enabled);
525 return false; 529 return false;
526 } 530 }
@@ -536,8 +540,7 @@ static const struct drm_fb_helper_funcs intel_fb_helper_funcs = {
536 .fb_probe = intelfb_create, 540 .fb_probe = intelfb_create,
537}; 541};
538 542
539static void intel_fbdev_destroy(struct drm_device *dev, 543static void intel_fbdev_destroy(struct intel_fbdev *ifbdev)
540 struct intel_fbdev *ifbdev)
541{ 544{
542 /* We rely on the object-free to release the VMA pinning for 545 /* We rely on the object-free to release the VMA pinning for
543 * the info->screen_base mmaping. Leaking the VMA is simpler than 546 * the info->screen_base mmaping. Leaking the VMA is simpler than
@@ -550,9 +553,14 @@ static void intel_fbdev_destroy(struct drm_device *dev,
550 drm_fb_helper_fini(&ifbdev->helper); 553 drm_fb_helper_fini(&ifbdev->helper);
551 554
552 if (ifbdev->fb) { 555 if (ifbdev->fb) {
553 drm_framebuffer_unregister_private(&ifbdev->fb->base); 556 mutex_lock(&ifbdev->helper.dev->struct_mutex);
557 intel_unpin_fb_obj(&ifbdev->fb->base, BIT(DRM_ROTATE_0));
558 mutex_unlock(&ifbdev->helper.dev->struct_mutex);
559
554 drm_framebuffer_remove(&ifbdev->fb->base); 560 drm_framebuffer_remove(&ifbdev->fb->base);
555 } 561 }
562
563 kfree(ifbdev);
556} 564}
557 565
558/* 566/*
@@ -685,9 +693,9 @@ out:
685 693
686static void intel_fbdev_suspend_worker(struct work_struct *work) 694static void intel_fbdev_suspend_worker(struct work_struct *work)
687{ 695{
688 intel_fbdev_set_suspend(container_of(work, 696 intel_fbdev_set_suspend(&container_of(work,
689 struct drm_i915_private, 697 struct drm_i915_private,
690 fbdev_suspend_work)->dev, 698 fbdev_suspend_work)->drm,
691 FBINFO_STATE_RUNNING, 699 FBINFO_STATE_RUNNING,
692 true); 700 true);
693} 701}
@@ -695,7 +703,7 @@ static void intel_fbdev_suspend_worker(struct work_struct *work)
695int intel_fbdev_init(struct drm_device *dev) 703int intel_fbdev_init(struct drm_device *dev)
696{ 704{
697 struct intel_fbdev *ifbdev; 705 struct intel_fbdev *ifbdev;
698 struct drm_i915_private *dev_priv = dev->dev_private; 706 struct drm_i915_private *dev_priv = to_i915(dev);
699 int ret; 707 int ret;
700 708
701 if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0)) 709 if (WARN_ON(INTEL_INFO(dev)->num_pipes == 0))
@@ -717,8 +725,6 @@ int intel_fbdev_init(struct drm_device *dev)
717 return ret; 725 return ret;
718 } 726 }
719 727
720 ifbdev->helper.atomic = true;
721
722 dev_priv->fbdev = ifbdev; 728 dev_priv->fbdev = ifbdev;
723 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker); 729 INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
724 730
@@ -729,38 +735,50 @@ int intel_fbdev_init(struct drm_device *dev)
729 735
730static void intel_fbdev_initial_config(void *data, async_cookie_t cookie) 736static void intel_fbdev_initial_config(void *data, async_cookie_t cookie)
731{ 737{
732 struct drm_i915_private *dev_priv = data; 738 struct intel_fbdev *ifbdev = data;
733 struct intel_fbdev *ifbdev = dev_priv->fbdev;
734 739
735 /* Due to peculiar init order wrt to hpd handling this is separate. */ 740 /* Due to peculiar init order wrt to hpd handling this is separate. */
736 if (drm_fb_helper_initial_config(&ifbdev->helper, 741 if (drm_fb_helper_initial_config(&ifbdev->helper,
737 ifbdev->preferred_bpp)) 742 ifbdev->preferred_bpp))
738 intel_fbdev_fini(dev_priv->dev); 743 intel_fbdev_fini(ifbdev->helper.dev);
739} 744}
740 745
741void intel_fbdev_initial_config_async(struct drm_device *dev) 746void intel_fbdev_initial_config_async(struct drm_device *dev)
742{ 747{
743 async_schedule(intel_fbdev_initial_config, to_i915(dev)); 748 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
749
750 ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
751}
752
753static void intel_fbdev_sync(struct intel_fbdev *ifbdev)
754{
755 if (!ifbdev->cookie)
756 return;
757
758 /* Only serialises with all preceding async calls, hence +1 */
759 async_synchronize_cookie(ifbdev->cookie + 1);
760 ifbdev->cookie = 0;
744} 761}
745 762
746void intel_fbdev_fini(struct drm_device *dev) 763void intel_fbdev_fini(struct drm_device *dev)
747{ 764{
748 struct drm_i915_private *dev_priv = dev->dev_private; 765 struct drm_i915_private *dev_priv = to_i915(dev);
749 if (!dev_priv->fbdev) 766 struct intel_fbdev *ifbdev = dev_priv->fbdev;
767
768 if (!ifbdev)
750 return; 769 return;
751 770
752 flush_work(&dev_priv->fbdev_suspend_work); 771 flush_work(&dev_priv->fbdev_suspend_work);
753
754 if (!current_is_async()) 772 if (!current_is_async())
755 async_synchronize_full(); 773 intel_fbdev_sync(ifbdev);
756 intel_fbdev_destroy(dev, dev_priv->fbdev); 774
757 kfree(dev_priv->fbdev); 775 intel_fbdev_destroy(ifbdev);
758 dev_priv->fbdev = NULL; 776 dev_priv->fbdev = NULL;
759} 777}
760 778
761void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous) 779void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous)
762{ 780{
763 struct drm_i915_private *dev_priv = dev->dev_private; 781 struct drm_i915_private *dev_priv = to_i915(dev);
764 struct intel_fbdev *ifbdev = dev_priv->fbdev; 782 struct intel_fbdev *ifbdev = dev_priv->fbdev;
765 struct fb_info *info; 783 struct fb_info *info;
766 784
@@ -809,7 +827,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
809 827
810void intel_fbdev_output_poll_changed(struct drm_device *dev) 828void intel_fbdev_output_poll_changed(struct drm_device *dev)
811{ 829{
812 struct drm_i915_private *dev_priv = dev->dev_private; 830 struct drm_i915_private *dev_priv = to_i915(dev);
813 if (dev_priv->fbdev) 831 if (dev_priv->fbdev)
814 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper); 832 drm_fb_helper_hotplug_event(&dev_priv->fbdev->helper);
815} 833}
@@ -817,13 +835,15 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev)
817void intel_fbdev_restore_mode(struct drm_device *dev) 835void intel_fbdev_restore_mode(struct drm_device *dev)
818{ 836{
819 int ret; 837 int ret;
820 struct drm_i915_private *dev_priv = dev->dev_private; 838 struct drm_i915_private *dev_priv = to_i915(dev);
821 struct intel_fbdev *ifbdev = dev_priv->fbdev; 839 struct intel_fbdev *ifbdev = dev_priv->fbdev;
822 struct drm_fb_helper *fb_helper; 840 struct drm_fb_helper *fb_helper;
823 841
824 if (!ifbdev) 842 if (!ifbdev)
825 return; 843 return;
826 844
845 intel_fbdev_sync(ifbdev);
846
827 fb_helper = &ifbdev->helper; 847 fb_helper = &ifbdev->helper;
828 848
829 ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); 849 ret = drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
index 9be839a242f9..2aa744081f09 100644
--- a/drivers/gpu/drm/i915/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -50,7 +50,7 @@
50 50
51static bool ivb_can_enable_err_int(struct drm_device *dev) 51static bool ivb_can_enable_err_int(struct drm_device *dev)
52{ 52{
53 struct drm_i915_private *dev_priv = dev->dev_private; 53 struct drm_i915_private *dev_priv = to_i915(dev);
54 struct intel_crtc *crtc; 54 struct intel_crtc *crtc;
55 enum pipe pipe; 55 enum pipe pipe;
56 56
@@ -68,7 +68,7 @@ static bool ivb_can_enable_err_int(struct drm_device *dev)
68 68
69static bool cpt_can_enable_serr_int(struct drm_device *dev) 69static bool cpt_can_enable_serr_int(struct drm_device *dev)
70{ 70{
71 struct drm_i915_private *dev_priv = dev->dev_private; 71 struct drm_i915_private *dev_priv = to_i915(dev);
72 enum pipe pipe; 72 enum pipe pipe;
73 struct intel_crtc *crtc; 73 struct intel_crtc *crtc;
74 74
@@ -105,7 +105,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
105 enum pipe pipe, 105 enum pipe pipe,
106 bool enable, bool old) 106 bool enable, bool old)
107{ 107{
108 struct drm_i915_private *dev_priv = dev->dev_private; 108 struct drm_i915_private *dev_priv = to_i915(dev);
109 i915_reg_t reg = PIPESTAT(pipe); 109 i915_reg_t reg = PIPESTAT(pipe);
110 u32 pipestat = I915_READ(reg) & 0xffff0000; 110 u32 pipestat = I915_READ(reg) & 0xffff0000;
111 111
@@ -123,7 +123,7 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
123static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 123static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
124 enum pipe pipe, bool enable) 124 enum pipe pipe, bool enable)
125{ 125{
126 struct drm_i915_private *dev_priv = dev->dev_private; 126 struct drm_i915_private *dev_priv = to_i915(dev);
127 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : 127 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
128 DE_PIPEB_FIFO_UNDERRUN; 128 DE_PIPEB_FIFO_UNDERRUN;
129 129
@@ -154,7 +154,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
154 enum pipe pipe, 154 enum pipe pipe,
155 bool enable, bool old) 155 bool enable, bool old)
156{ 156{
157 struct drm_i915_private *dev_priv = dev->dev_private; 157 struct drm_i915_private *dev_priv = to_i915(dev);
158 if (enable) { 158 if (enable) {
159 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); 159 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
160 160
@@ -176,7 +176,7 @@ static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
176static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 176static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
177 enum pipe pipe, bool enable) 177 enum pipe pipe, bool enable)
178{ 178{
179 struct drm_i915_private *dev_priv = dev->dev_private; 179 struct drm_i915_private *dev_priv = to_i915(dev);
180 180
181 if (enable) 181 if (enable)
182 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN); 182 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_FIFO_UNDERRUN);
@@ -188,7 +188,7 @@ static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
188 enum transcoder pch_transcoder, 188 enum transcoder pch_transcoder,
189 bool enable) 189 bool enable)
190{ 190{
191 struct drm_i915_private *dev_priv = dev->dev_private; 191 struct drm_i915_private *dev_priv = to_i915(dev);
192 uint32_t bit = (pch_transcoder == TRANSCODER_A) ? 192 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
193 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; 193 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
194 194
@@ -220,7 +220,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
220 enum transcoder pch_transcoder, 220 enum transcoder pch_transcoder,
221 bool enable, bool old) 221 bool enable, bool old)
222{ 222{
223 struct drm_i915_private *dev_priv = dev->dev_private; 223 struct drm_i915_private *dev_priv = to_i915(dev);
224 224
225 if (enable) { 225 if (enable) {
226 I915_WRITE(SERR_INT, 226 I915_WRITE(SERR_INT,
@@ -244,7 +244,7 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
244static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 244static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
245 enum pipe pipe, bool enable) 245 enum pipe pipe, bool enable)
246{ 246{
247 struct drm_i915_private *dev_priv = dev->dev_private; 247 struct drm_i915_private *dev_priv = to_i915(dev);
248 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 248 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
249 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 249 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
250 bool old; 250 bool old;
@@ -289,7 +289,7 @@ bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
289 bool ret; 289 bool ret;
290 290
291 spin_lock_irqsave(&dev_priv->irq_lock, flags); 291 spin_lock_irqsave(&dev_priv->irq_lock, flags);
292 ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe, 292 ret = __intel_set_cpu_fifo_underrun_reporting(&dev_priv->drm, pipe,
293 enable); 293 enable);
294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 294 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
295 295
@@ -334,10 +334,12 @@ bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
334 intel_crtc->pch_fifo_underrun_disabled = !enable; 334 intel_crtc->pch_fifo_underrun_disabled = !enable;
335 335
336 if (HAS_PCH_IBX(dev_priv)) 336 if (HAS_PCH_IBX(dev_priv))
337 ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 337 ibx_set_fifo_underrun_reporting(&dev_priv->drm,
338 pch_transcoder,
338 enable); 339 enable);
339 else 340 else
340 cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder, 341 cpt_set_fifo_underrun_reporting(&dev_priv->drm,
342 pch_transcoder,
341 enable, old); 343 enable, old);
342 344
343 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 345 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
@@ -405,7 +407,7 @@ void intel_check_cpu_fifo_underruns(struct drm_i915_private *dev_priv)
405 407
406 spin_lock_irq(&dev_priv->irq_lock); 408 spin_lock_irq(&dev_priv->irq_lock);
407 409
408 for_each_intel_crtc(dev_priv->dev, crtc) { 410 for_each_intel_crtc(&dev_priv->drm, crtc) {
409 if (crtc->cpu_fifo_underrun_disabled) 411 if (crtc->cpu_fifo_underrun_disabled)
410 continue; 412 continue;
411 413
@@ -432,7 +434,7 @@ void intel_check_pch_fifo_underruns(struct drm_i915_private *dev_priv)
432 434
433 spin_lock_irq(&dev_priv->irq_lock); 435 spin_lock_irq(&dev_priv->irq_lock);
434 436
435 for_each_intel_crtc(dev_priv->dev, crtc) { 437 for_each_intel_crtc(&dev_priv->drm, crtc) {
436 if (crtc->pch_fifo_underrun_disabled) 438 if (crtc->pch_fifo_underrun_disabled)
437 continue; 439 continue;
438 440
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 9d79c4c3e256..3e3e743740c0 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -26,6 +26,7 @@
26 26
27#include "intel_guc_fwif.h" 27#include "intel_guc_fwif.h"
28#include "i915_guc_reg.h" 28#include "i915_guc_reg.h"
29#include "intel_ringbuffer.h"
29 30
30struct drm_i915_gem_request; 31struct drm_i915_gem_request;
31 32
@@ -48,14 +49,23 @@ struct drm_i915_gem_request;
48 * queue (a circular array of work items), again described in the process 49 * queue (a circular array of work items), again described in the process
49 * descriptor. Work queue pages are mapped momentarily as required. 50 * descriptor. Work queue pages are mapped momentarily as required.
50 * 51 *
51 * Finally, we also keep a few statistics here, including the number of 52 * We also keep a few statistics on failures. Ideally, these should all
52 * submissions to each engine, and a record of the last submission failure 53 * be zero!
53 * (if any). 54 * no_wq_space: times that the submission pre-check found no space was
55 * available in the work queue (note, the queue is shared,
56 * not per-engine). It is OK for this to be nonzero, but
57 * it should not be huge!
58 * q_fail: failed to enqueue a work item. This should never happen,
59 * because we check for space beforehand.
60 * b_fail: failed to ring the doorbell. This should never happen, unless
61 * somehow the hardware misbehaves, or maybe if the GuC firmware
62 * crashes? We probably need to reset the GPU to recover.
63 * retcode: errno from last guc_submit()
54 */ 64 */
55struct i915_guc_client { 65struct i915_guc_client {
56 struct drm_i915_gem_object *client_obj; 66 struct drm_i915_gem_object *client_obj;
57 void *client_base; /* first page (only) of above */ 67 void *client_base; /* first page (only) of above */
58 struct intel_context *owner; 68 struct i915_gem_context *owner;
59 struct intel_guc *guc; 69 struct intel_guc *guc;
60 uint32_t priority; 70 uint32_t priority;
61 uint32_t ctx_index; 71 uint32_t ctx_index;
@@ -71,12 +81,13 @@ struct i915_guc_client {
71 uint32_t wq_tail; 81 uint32_t wq_tail;
72 uint32_t unused; /* Was 'wq_head' */ 82 uint32_t unused; /* Was 'wq_head' */
73 83
74 /* GuC submission statistics & status */ 84 uint32_t no_wq_space;
75 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 85 uint32_t q_fail; /* No longer used */
76 uint32_t q_fail;
77 uint32_t b_fail; 86 uint32_t b_fail;
78 int retcode; 87 int retcode;
79 int spare; /* pad to 32 DWords */ 88
89 /* Per-engine counts of GuC submissions */
90 uint64_t submissions[I915_NUM_ENGINES];
80}; 91};
81 92
82enum intel_guc_fw_status { 93enum intel_guc_fw_status {
@@ -133,25 +144,24 @@ struct intel_guc {
133 uint32_t action_fail; /* Total number of failures */ 144 uint32_t action_fail; /* Total number of failures */
134 int32_t action_err; /* Last error code */ 145 int32_t action_err; /* Last error code */
135 146
136 uint64_t submissions[GUC_MAX_ENGINES_NUM]; 147 uint64_t submissions[I915_NUM_ENGINES];
137 uint32_t last_seqno[GUC_MAX_ENGINES_NUM]; 148 uint32_t last_seqno[I915_NUM_ENGINES];
138}; 149};
139 150
140/* intel_guc_loader.c */ 151/* intel_guc_loader.c */
141extern void intel_guc_ucode_init(struct drm_device *dev); 152extern void intel_guc_init(struct drm_device *dev);
142extern int intel_guc_ucode_load(struct drm_device *dev); 153extern int intel_guc_setup(struct drm_device *dev);
143extern void intel_guc_ucode_fini(struct drm_device *dev); 154extern void intel_guc_fini(struct drm_device *dev);
144extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); 155extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status);
145extern int intel_guc_suspend(struct drm_device *dev); 156extern int intel_guc_suspend(struct drm_device *dev);
146extern int intel_guc_resume(struct drm_device *dev); 157extern int intel_guc_resume(struct drm_device *dev);
147 158
148/* i915_guc_submission.c */ 159/* i915_guc_submission.c */
149int i915_guc_submission_init(struct drm_device *dev); 160int i915_guc_submission_init(struct drm_i915_private *dev_priv);
150int i915_guc_submission_enable(struct drm_device *dev); 161int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
151int i915_guc_submit(struct i915_guc_client *client, 162int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
152 struct drm_i915_gem_request *rq); 163int i915_guc_submit(struct drm_i915_gem_request *rq);
153void i915_guc_submission_disable(struct drm_device *dev); 164void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
154void i915_guc_submission_fini(struct drm_device *dev); 165void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
155int i915_guc_wq_check_space(struct i915_guc_client *client);
156 166
157#endif 167#endif
diff --git a/drivers/gpu/drm/i915/intel_guc_fwif.h b/drivers/gpu/drm/i915/intel_guc_fwif.h
index 2de57ffe5e18..944786d7075b 100644
--- a/drivers/gpu/drm/i915/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/intel_guc_fwif.h
@@ -71,7 +71,8 @@
71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT) 71#define WQ_WORKLOAD_TOUCH (2 << WQ_WORKLOAD_SHIFT)
72 72
73#define WQ_RING_TAIL_SHIFT 20 73#define WQ_RING_TAIL_SHIFT 20
74#define WQ_RING_TAIL_MASK (0x7FF << WQ_RING_TAIL_SHIFT) 74#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
75#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
75 76
76#define GUC_DOORBELL_ENABLED 1 77#define GUC_DOORBELL_ENABLED 1
77#define GUC_DOORBELL_DISABLED 0 78#define GUC_DOORBELL_DISABLED 0
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index 876e5da44c4e..605c69658d2c 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -59,9 +59,15 @@
59 * 59 *
60 */ 60 */
61 61
62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6.bin" 62#define I915_SKL_GUC_UCODE "i915/skl_guc_ver6_1.bin"
63MODULE_FIRMWARE(I915_SKL_GUC_UCODE); 63MODULE_FIRMWARE(I915_SKL_GUC_UCODE);
64 64
65#define I915_BXT_GUC_UCODE "i915/bxt_guc_ver8_7.bin"
66MODULE_FIRMWARE(I915_BXT_GUC_UCODE);
67
68#define I915_KBL_GUC_UCODE "i915/kbl_guc_ver9_14.bin"
69MODULE_FIRMWARE(I915_KBL_GUC_UCODE);
70
65/* User-friendly representation of an enum */ 71/* User-friendly representation of an enum */
66const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status) 72const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status)
67{ 73{
@@ -84,7 +90,7 @@ static void direct_interrupts_to_host(struct drm_i915_private *dev_priv)
84 struct intel_engine_cs *engine; 90 struct intel_engine_cs *engine;
85 int irqs; 91 int irqs;
86 92
87 /* tell all command streamers NOT to forward interrupts and vblank to GuC */ 93 /* tell all command streamers NOT to forward interrupts or vblank to GuC */
88 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); 94 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
89 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); 95 irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
90 for_each_engine(engine, dev_priv) 96 for_each_engine(engine, dev_priv)
@@ -100,10 +106,10 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
100{ 106{
101 struct intel_engine_cs *engine; 107 struct intel_engine_cs *engine;
102 int irqs; 108 int irqs;
109 u32 tmp;
103 110
104 /* tell all command streamers to forward interrupts and vblank to GuC */ 111 /* tell all command streamers to forward interrupts (but not vblank) to GuC */
105 irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_ALWAYS); 112 irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
106 irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
107 for_each_engine(engine, dev_priv) 113 for_each_engine(engine, dev_priv)
108 I915_WRITE(RING_MODE_GEN7(engine), irqs); 114 I915_WRITE(RING_MODE_GEN7(engine), irqs);
109 115
@@ -114,6 +120,16 @@ static void direct_interrupts_to_guc(struct drm_i915_private *dev_priv)
114 I915_WRITE(GUC_BCS_RCS_IER, ~irqs); 120 I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
115 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs); 121 I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
116 I915_WRITE(GUC_WD_VECS_IER, ~irqs); 122 I915_WRITE(GUC_WD_VECS_IER, ~irqs);
123
124 /*
125 * If GuC has routed PM interrupts to itself, don't keep it.
126 * and keep other interrupts those are unmasked by GuC.
127 */
128 tmp = I915_READ(GEN6_PMINTRMSK);
129 if (tmp & GEN8_PMINTR_REDIRECT_TO_NON_DISP) {
130 dev_priv->rps.pm_intr_keep |= ~(tmp & ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
131 dev_priv->rps.pm_intr_keep &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
132 }
117} 133}
118 134
119static u32 get_gttype(struct drm_i915_private *dev_priv) 135static u32 get_gttype(struct drm_i915_private *dev_priv)
@@ -281,13 +297,24 @@ static int guc_ucode_xfer_dma(struct drm_i915_private *dev_priv)
281 return ret; 297 return ret;
282} 298}
283 299
300static u32 guc_wopcm_size(struct drm_i915_private *dev_priv)
301{
302 u32 wopcm_size = GUC_WOPCM_TOP;
303
304 /* On BXT, the top of WOPCM is reserved for RC6 context */
305 if (IS_BROXTON(dev_priv))
306 wopcm_size -= BXT_GUC_WOPCM_RC6_RESERVED;
307
308 return wopcm_size;
309}
310
284/* 311/*
285 * Load the GuC firmware blob into the MinuteIA. 312 * Load the GuC firmware blob into the MinuteIA.
286 */ 313 */
287static int guc_ucode_xfer(struct drm_i915_private *dev_priv) 314static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
288{ 315{
289 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 316 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
290 struct drm_device *dev = dev_priv->dev; 317 struct drm_device *dev = &dev_priv->drm;
291 int ret; 318 int ret;
292 319
293 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false); 320 ret = i915_gem_object_set_to_gtt_domain(guc_fw->guc_fw_obj, false);
@@ -308,7 +335,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
308 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 335 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
309 336
310 /* init WOPCM */ 337 /* init WOPCM */
311 I915_WRITE(GUC_WOPCM_SIZE, GUC_WOPCM_SIZE_VALUE); 338 I915_WRITE(GUC_WOPCM_SIZE, guc_wopcm_size(dev_priv));
312 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE); 339 I915_WRITE(DMA_GUC_WOPCM_OFFSET, GUC_WOPCM_OFFSET_VALUE);
313 340
314 /* Enable MIA caching. GuC clock gating is disabled. */ 341 /* Enable MIA caching. GuC clock gating is disabled. */
@@ -372,66 +399,63 @@ static int i915_reset_guc(struct drm_i915_private *dev_priv)
372} 399}
373 400
374/** 401/**
375 * intel_guc_ucode_load() - load GuC uCode into the device 402 * intel_guc_setup() - finish preparing the GuC for activity
376 * @dev: drm device 403 * @dev: drm device
377 * 404 *
378 * Called from gem_init_hw() during driver loading and also after a GPU reset. 405 * Called from gem_init_hw() during driver loading and also after a GPU reset.
379 * 406 *
407 * The main action required here it to load the GuC uCode into the device.
380 * The firmware image should have already been fetched into memory by the 408 * The firmware image should have already been fetched into memory by the
381 * earlier call to intel_guc_ucode_init(), so here we need only check that 409 * earlier call to intel_guc_init(), so here we need only check that worked,
382 * is succeeded, and then transfer the image to the h/w. 410 * and then transfer the image to the h/w.
383 * 411 *
384 * Return: non-zero code on error 412 * Return: non-zero code on error
385 */ 413 */
386int intel_guc_ucode_load(struct drm_device *dev) 414int intel_guc_setup(struct drm_device *dev)
387{ 415{
388 struct drm_i915_private *dev_priv = dev->dev_private; 416 struct drm_i915_private *dev_priv = to_i915(dev);
389 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 417 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
390 int retries, err = 0; 418 const char *fw_path = guc_fw->guc_fw_path;
419 int retries, ret, err;
391 420
392 if (!i915.enable_guc_submission) 421 DRM_DEBUG_DRIVER("GuC fw status: path %s, fetch %s, load %s\n",
393 return 0; 422 fw_path,
394
395 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
396 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status), 423 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
397 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 424 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
398 425
399 direct_interrupts_to_host(dev_priv); 426 /* Loading forbidden, or no firmware to load? */
400 427 if (!i915.enable_guc_loading) {
401 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) 428 err = 0;
402 return 0;
403
404 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_SUCCESS &&
405 guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL)
406 return -ENOEXEC;
407
408 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
409
410 DRM_DEBUG_DRIVER("GuC fw fetch status %s\n",
411 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status));
412
413 switch (guc_fw->guc_fw_fetch_status) {
414 case GUC_FIRMWARE_FAIL:
415 /* something went wrong :( */
416 err = -EIO;
417 goto fail; 429 goto fail;
418 430 } else if (fw_path == NULL) {
419 case GUC_FIRMWARE_NONE: 431 /* Device is known to have no uCode (e.g. no GuC) */
420 case GUC_FIRMWARE_PENDING:
421 default:
422 /* "can't happen" */
423 WARN_ONCE(1, "GuC fw %s invalid guc_fw_fetch_status %s [%d]\n",
424 guc_fw->guc_fw_path,
425 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
426 guc_fw->guc_fw_fetch_status);
427 err = -ENXIO; 432 err = -ENXIO;
428 goto fail; 433 goto fail;
434 } else if (*fw_path == '\0') {
435 /* Device has a GuC but we don't know what f/w to load? */
436 DRM_INFO("No GuC firmware known for this platform\n");
437 err = -ENODEV;
438 goto fail;
439 }
429 440
430 case GUC_FIRMWARE_SUCCESS: 441 /* Fetch failed, or already fetched but failed to load? */
431 break; 442 if (guc_fw->guc_fw_fetch_status != GUC_FIRMWARE_SUCCESS) {
443 err = -EIO;
444 goto fail;
445 } else if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_FAIL) {
446 err = -ENOEXEC;
447 goto fail;
432 } 448 }
433 449
434 err = i915_guc_submission_init(dev); 450 direct_interrupts_to_host(dev_priv);
451
452 guc_fw->guc_fw_load_status = GUC_FIRMWARE_PENDING;
453
454 DRM_DEBUG_DRIVER("GuC fw status: fetch %s, load %s\n",
455 intel_guc_fw_status_repr(guc_fw->guc_fw_fetch_status),
456 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
457
458 err = i915_guc_submission_init(dev_priv);
435 if (err) 459 if (err)
436 goto fail; 460 goto fail;
437 461
@@ -448,7 +472,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
448 */ 472 */
449 err = i915_reset_guc(dev_priv); 473 err = i915_reset_guc(dev_priv);
450 if (err) { 474 if (err) {
451 DRM_ERROR("GuC reset failed, err %d\n", err); 475 DRM_ERROR("GuC reset failed: %d\n", err);
452 goto fail; 476 goto fail;
453 } 477 }
454 478
@@ -459,8 +483,8 @@ int intel_guc_ucode_load(struct drm_device *dev)
459 if (--retries == 0) 483 if (--retries == 0)
460 goto fail; 484 goto fail;
461 485
462 DRM_INFO("GuC fw load failed, err %d; will reset and " 486 DRM_INFO("GuC fw load failed: %d; will reset and "
463 "retry %d more time(s)\n", err, retries); 487 "retry %d more time(s)\n", err, retries);
464 } 488 }
465 489
466 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS; 490 guc_fw->guc_fw_load_status = GUC_FIRMWARE_SUCCESS;
@@ -470,10 +494,7 @@ int intel_guc_ucode_load(struct drm_device *dev)
470 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 494 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status));
471 495
472 if (i915.enable_guc_submission) { 496 if (i915.enable_guc_submission) {
473 /* The execbuf_client will be recreated. Release it first. */ 497 err = i915_guc_submission_enable(dev_priv);
474 i915_guc_submission_disable(dev);
475
476 err = i915_guc_submission_enable(dev);
477 if (err) 498 if (err)
478 goto fail; 499 goto fail;
479 direct_interrupts_to_guc(dev_priv); 500 direct_interrupts_to_guc(dev_priv);
@@ -482,15 +503,50 @@ int intel_guc_ucode_load(struct drm_device *dev)
482 return 0; 503 return 0;
483 504
484fail: 505fail:
485 DRM_ERROR("GuC firmware load failed, err %d\n", err);
486 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING) 506 if (guc_fw->guc_fw_load_status == GUC_FIRMWARE_PENDING)
487 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL; 507 guc_fw->guc_fw_load_status = GUC_FIRMWARE_FAIL;
488 508
489 direct_interrupts_to_host(dev_priv); 509 direct_interrupts_to_host(dev_priv);
490 i915_guc_submission_disable(dev); 510 i915_guc_submission_disable(dev_priv);
491 i915_guc_submission_fini(dev); 511 i915_guc_submission_fini(dev_priv);
492 512
493 return err; 513 /*
514 * We've failed to load the firmware :(
515 *
516 * Decide whether to disable GuC submission and fall back to
517 * execlist mode, and whether to hide the error by returning
518 * zero or to return -EIO, which the caller will treat as a
519 * nonfatal error (i.e. it doesn't prevent driver load, but
520 * marks the GPU as wedged until reset).
521 */
522 if (i915.enable_guc_loading > 1) {
523 ret = -EIO;
524 } else if (i915.enable_guc_submission > 1) {
525 ret = -EIO;
526 } else {
527 ret = 0;
528 }
529
530 if (err == 0 && !HAS_GUC_UCODE(dev))
531 ; /* Don't mention the GuC! */
532 else if (err == 0)
533 DRM_INFO("GuC firmware load skipped\n");
534 else if (ret != -EIO)
535 DRM_INFO("GuC firmware load failed: %d\n", err);
536 else
537 DRM_ERROR("GuC firmware load failed: %d\n", err);
538
539 if (i915.enable_guc_submission) {
540 if (fw_path == NULL)
541 DRM_INFO("GuC submission without firmware not supported\n");
542 if (ret == 0)
543 DRM_INFO("Falling back from GuC submission to execlist mode\n");
544 else
545 DRM_ERROR("GuC init failed: %d\n", ret);
546 }
547 i915.enable_guc_submission = 0;
548
549 return ret;
494} 550}
495 551
496static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw) 552static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
@@ -552,9 +608,7 @@ static void guc_fw_fetch(struct drm_device *dev, struct intel_guc_fw *guc_fw)
552 608
553 /* Header and uCode will be loaded to WOPCM. Size of the two. */ 609 /* Header and uCode will be loaded to WOPCM. Size of the two. */
554 size = guc_fw->header_size + guc_fw->ucode_size; 610 size = guc_fw->header_size + guc_fw->ucode_size;
555 611 if (size > guc_wopcm_size(to_i915(dev))) {
556 /* Top 32k of WOPCM is reserved (8K stack + 24k RC6 context). */
557 if (size > GUC_WOPCM_SIZE_VALUE - 0x8000) {
558 DRM_ERROR("Firmware is too large to fit in WOPCM\n"); 612 DRM_ERROR("Firmware is too large to fit in WOPCM\n");
559 goto fail; 613 goto fail;
560 } 614 }
@@ -617,22 +671,25 @@ fail:
617} 671}
618 672
619/** 673/**
620 * intel_guc_ucode_init() - define parameters and fetch firmware 674 * intel_guc_init() - define parameters and fetch firmware
621 * @dev: drm device 675 * @dev: drm device
622 * 676 *
623 * Called early during driver load, but after GEM is initialised. 677 * Called early during driver load, but after GEM is initialised.
624 * 678 *
625 * The firmware will be transferred to the GuC's memory later, 679 * The firmware will be transferred to the GuC's memory later,
626 * when intel_guc_ucode_load() is called. 680 * when intel_guc_setup() is called.
627 */ 681 */
628void intel_guc_ucode_init(struct drm_device *dev) 682void intel_guc_init(struct drm_device *dev)
629{ 683{
630 struct drm_i915_private *dev_priv = dev->dev_private; 684 struct drm_i915_private *dev_priv = to_i915(dev);
631 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 685 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
632 const char *fw_path; 686 const char *fw_path;
633 687
634 if (!HAS_GUC_SCHED(dev)) 688 /* A negative value means "use platform default" */
635 i915.enable_guc_submission = false; 689 if (i915.enable_guc_loading < 0)
690 i915.enable_guc_loading = HAS_GUC_UCODE(dev);
691 if (i915.enable_guc_submission < 0)
692 i915.enable_guc_submission = HAS_GUC_SCHED(dev);
636 693
637 if (!HAS_GUC_UCODE(dev)) { 694 if (!HAS_GUC_UCODE(dev)) {
638 fw_path = NULL; 695 fw_path = NULL;
@@ -640,27 +697,30 @@ void intel_guc_ucode_init(struct drm_device *dev)
640 fw_path = I915_SKL_GUC_UCODE; 697 fw_path = I915_SKL_GUC_UCODE;
641 guc_fw->guc_fw_major_wanted = 6; 698 guc_fw->guc_fw_major_wanted = 6;
642 guc_fw->guc_fw_minor_wanted = 1; 699 guc_fw->guc_fw_minor_wanted = 1;
700 } else if (IS_BROXTON(dev)) {
701 fw_path = I915_BXT_GUC_UCODE;
702 guc_fw->guc_fw_major_wanted = 8;
703 guc_fw->guc_fw_minor_wanted = 7;
704 } else if (IS_KABYLAKE(dev)) {
705 fw_path = I915_KBL_GUC_UCODE;
706 guc_fw->guc_fw_major_wanted = 9;
707 guc_fw->guc_fw_minor_wanted = 14;
643 } else { 708 } else {
644 i915.enable_guc_submission = false;
645 fw_path = ""; /* unknown device */ 709 fw_path = ""; /* unknown device */
646 } 710 }
647 711
648 if (!i915.enable_guc_submission)
649 return;
650
651 guc_fw->guc_dev = dev; 712 guc_fw->guc_dev = dev;
652 guc_fw->guc_fw_path = fw_path; 713 guc_fw->guc_fw_path = fw_path;
653 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE; 714 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_NONE;
654 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE; 715 guc_fw->guc_fw_load_status = GUC_FIRMWARE_NONE;
655 716
717 /* Early (and silent) return if GuC loading is disabled */
718 if (!i915.enable_guc_loading)
719 return;
656 if (fw_path == NULL) 720 if (fw_path == NULL)
657 return; 721 return;
658 722 if (*fw_path == '\0')
659 if (*fw_path == '\0') {
660 DRM_ERROR("No GuC firmware known for this platform\n");
661 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_FAIL;
662 return; 723 return;
663 }
664 724
665 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING; 725 guc_fw->guc_fw_fetch_status = GUC_FIRMWARE_PENDING;
666 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path); 726 DRM_DEBUG_DRIVER("GuC firmware pending, path %s\n", fw_path);
@@ -669,18 +729,18 @@ void intel_guc_ucode_init(struct drm_device *dev)
669} 729}
670 730
671/** 731/**
672 * intel_guc_ucode_fini() - clean up all allocated resources 732 * intel_guc_fini() - clean up all allocated resources
673 * @dev: drm device 733 * @dev: drm device
674 */ 734 */
675void intel_guc_ucode_fini(struct drm_device *dev) 735void intel_guc_fini(struct drm_device *dev)
676{ 736{
677 struct drm_i915_private *dev_priv = dev->dev_private; 737 struct drm_i915_private *dev_priv = to_i915(dev);
678 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw; 738 struct intel_guc_fw *guc_fw = &dev_priv->guc.guc_fw;
679 739
680 mutex_lock(&dev->struct_mutex); 740 mutex_lock(&dev->struct_mutex);
681 direct_interrupts_to_host(dev_priv); 741 direct_interrupts_to_host(dev_priv);
682 i915_guc_submission_disable(dev); 742 i915_guc_submission_disable(dev_priv);
683 i915_guc_submission_fini(dev); 743 i915_guc_submission_fini(dev_priv);
684 744
685 if (guc_fw->guc_fw_obj) 745 if (guc_fw->guc_fw_obj)
686 drm_gem_object_unreference(&guc_fw->guc_fw_obj->base); 746 drm_gem_object_unreference(&guc_fw->guc_fw_obj->base);
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
new file mode 100644
index 000000000000..434f4d5c553d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -0,0 +1,104 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#include "i915_drv.h"
25#include "intel_gvt.h"
26
27/**
28 * DOC: Intel GVT-g host support
29 *
30 * Intel GVT-g is a graphics virtualization technology which shares the
31 * GPU among multiple virtual machines on a time-sharing basis. Each
32 * virtual machine is presented a virtual GPU (vGPU), which has equivalent
33 * features as the underlying physical GPU (pGPU), so i915 driver can run
34 * seamlessly in a virtual machine. This file provides the englightments
35 * of GVT and the necessary components used by GVT in i915 driver.
36 */
37
38static bool is_supported_device(struct drm_i915_private *dev_priv)
39{
40 if (IS_BROADWELL(dev_priv))
41 return true;
42 return false;
43}
44
45/**
46 * intel_gvt_init - initialize GVT components
47 * @dev_priv: drm i915 private data
48 *
49 * This function is called at the initialization stage to create a GVT device.
50 *
51 * Returns:
52 * Zero on success, negative error code if failed.
53 *
54 */
55int intel_gvt_init(struct drm_i915_private *dev_priv)
56{
57 int ret;
58
59 if (!i915.enable_gvt) {
60 DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
61 return 0;
62 }
63
64 if (!is_supported_device(dev_priv)) {
65 DRM_DEBUG_DRIVER("Unsupported device. GVT-g is disabled\n");
66 goto bail;
67 }
68
69 /*
70 * We're not in host or fail to find a MPT module, disable GVT-g
71 */
72 ret = intel_gvt_init_host();
73 if (ret) {
74 DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
75 goto bail;
76 }
77
78 ret = intel_gvt_init_device(dev_priv);
79 if (ret) {
80 DRM_DEBUG_DRIVER("Fail to init GVT device\n");
81 goto bail;
82 }
83
84 return 0;
85
86bail:
87 i915.enable_gvt = 0;
88 return 0;
89}
90
91/**
92 * intel_gvt_cleanup - cleanup GVT components when i915 driver is unloading
93 * @dev_priv: drm i915 private *
94 *
95 * This function is called at the i915 driver unloading stage, to shutdown
96 * GVT components and release the related resources.
97 */
98void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
99{
100 if (!intel_gvt_active(dev_priv))
101 return;
102
103 intel_gvt_clean_device(dev_priv);
104}
diff --git a/drivers/gpu/drm/i915/intel_gvt.h b/drivers/gpu/drm/i915/intel_gvt.h
new file mode 100644
index 000000000000..960211df74db
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_gvt.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 */
23
24#ifndef _INTEL_GVT_H_
25#define _INTEL_GVT_H_
26
27#include "gvt/gvt.h"
28
29#ifdef CONFIG_DRM_I915_GVT
30int intel_gvt_init(struct drm_i915_private *dev_priv);
31void intel_gvt_cleanup(struct drm_i915_private *dev_priv);
32int intel_gvt_init_device(struct drm_i915_private *dev_priv);
33void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
34int intel_gvt_init_host(void);
35#else
36static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
37{
38 return 0;
39}
40static inline void intel_gvt_cleanup(struct drm_i915_private *dev_priv)
41{
42}
43#endif
44
45#endif /* _INTEL_GVT_H_ */
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index a8844702d11b..4df9f384910c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -47,7 +47,7 @@ static void
47assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi) 47assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
48{ 48{
49 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi); 49 struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
50 struct drm_i915_private *dev_priv = dev->dev_private; 50 struct drm_i915_private *dev_priv = to_i915(dev);
51 uint32_t enabled_bits; 51 uint32_t enabled_bits;
52 52
53 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE; 53 enabled_bits = HAS_DDI(dev) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
@@ -138,7 +138,7 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
138{ 138{
139 const uint32_t *data = frame; 139 const uint32_t *data = frame;
140 struct drm_device *dev = encoder->dev; 140 struct drm_device *dev = encoder->dev;
141 struct drm_i915_private *dev_priv = dev->dev_private; 141 struct drm_i915_private *dev_priv = to_i915(dev);
142 u32 val = I915_READ(VIDEO_DIP_CTL); 142 u32 val = I915_READ(VIDEO_DIP_CTL);
143 int i; 143 int i;
144 144
@@ -192,7 +192,7 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
192{ 192{
193 const uint32_t *data = frame; 193 const uint32_t *data = frame;
194 struct drm_device *dev = encoder->dev; 194 struct drm_device *dev = encoder->dev;
195 struct drm_i915_private *dev_priv = dev->dev_private; 195 struct drm_i915_private *dev_priv = to_i915(dev);
196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 196 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
197 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 197 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
198 u32 val = I915_READ(reg); 198 u32 val = I915_READ(reg);
@@ -251,7 +251,7 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
251{ 251{
252 const uint32_t *data = frame; 252 const uint32_t *data = frame;
253 struct drm_device *dev = encoder->dev; 253 struct drm_device *dev = encoder->dev;
254 struct drm_i915_private *dev_priv = dev->dev_private; 254 struct drm_i915_private *dev_priv = to_i915(dev);
255 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 255 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
256 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 256 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
257 u32 val = I915_READ(reg); 257 u32 val = I915_READ(reg);
@@ -308,7 +308,7 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
308{ 308{
309 const uint32_t *data = frame; 309 const uint32_t *data = frame;
310 struct drm_device *dev = encoder->dev; 310 struct drm_device *dev = encoder->dev;
311 struct drm_i915_private *dev_priv = dev->dev_private; 311 struct drm_i915_private *dev_priv = to_i915(dev);
312 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 312 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
313 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe); 313 i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
314 u32 val = I915_READ(reg); 314 u32 val = I915_READ(reg);
@@ -366,7 +366,7 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
366{ 366{
367 const uint32_t *data = frame; 367 const uint32_t *data = frame;
368 struct drm_device *dev = encoder->dev; 368 struct drm_device *dev = encoder->dev;
369 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = to_i915(dev);
370 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 370 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
371 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 371 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
372 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 372 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -508,7 +508,7 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
508 bool enable, 508 bool enable,
509 const struct drm_display_mode *adjusted_mode) 509 const struct drm_display_mode *adjusted_mode)
510{ 510{
511 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 511 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
512 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 512 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
513 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 513 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
514 i915_reg_t reg = VIDEO_DIP_CTL; 514 i915_reg_t reg = VIDEO_DIP_CTL;
@@ -629,7 +629,7 @@ static bool gcp_default_phase_possible(int pipe_bpp,
629 629
630static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder) 630static bool intel_hdmi_set_gcp_infoframe(struct drm_encoder *encoder)
631{ 631{
632 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 632 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
633 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 633 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
634 i915_reg_t reg; 634 i915_reg_t reg;
635 u32 val = 0; 635 u32 val = 0;
@@ -661,7 +661,7 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
661 bool enable, 661 bool enable,
662 const struct drm_display_mode *adjusted_mode) 662 const struct drm_display_mode *adjusted_mode)
663{ 663{
664 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 664 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
665 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 665 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
666 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 666 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
667 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 667 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
@@ -713,7 +713,7 @@ static void cpt_set_infoframes(struct drm_encoder *encoder,
713 bool enable, 713 bool enable,
714 const struct drm_display_mode *adjusted_mode) 714 const struct drm_display_mode *adjusted_mode)
715{ 715{
716 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 716 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
717 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 717 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
718 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 718 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
719 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe); 719 i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
@@ -755,7 +755,7 @@ static void vlv_set_infoframes(struct drm_encoder *encoder,
755 bool enable, 755 bool enable,
756 const struct drm_display_mode *adjusted_mode) 756 const struct drm_display_mode *adjusted_mode)
757{ 757{
758 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 758 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
759 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); 759 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
760 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 760 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
761 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 761 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -807,7 +807,7 @@ static void hsw_set_infoframes(struct drm_encoder *encoder,
807 bool enable, 807 bool enable,
808 const struct drm_display_mode *adjusted_mode) 808 const struct drm_display_mode *adjusted_mode)
809{ 809{
810 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 810 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
811 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 811 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
812 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 812 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
813 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder); 813 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config->cpu_transcoder);
@@ -855,7 +855,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
855static void intel_hdmi_prepare(struct intel_encoder *encoder) 855static void intel_hdmi_prepare(struct intel_encoder *encoder)
856{ 856{
857 struct drm_device *dev = encoder->base.dev; 857 struct drm_device *dev = encoder->base.dev;
858 struct drm_i915_private *dev_priv = dev->dev_private; 858 struct drm_i915_private *dev_priv = to_i915(dev);
859 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 859 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
860 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 860 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
861 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 861 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
@@ -894,7 +894,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
894 enum pipe *pipe) 894 enum pipe *pipe)
895{ 895{
896 struct drm_device *dev = encoder->base.dev; 896 struct drm_device *dev = encoder->base.dev;
897 struct drm_i915_private *dev_priv = dev->dev_private; 897 struct drm_i915_private *dev_priv = to_i915(dev);
898 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 898 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
899 enum intel_display_power_domain power_domain; 899 enum intel_display_power_domain power_domain;
900 u32 tmp; 900 u32 tmp;
@@ -931,7 +931,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
931{ 931{
932 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 932 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
933 struct drm_device *dev = encoder->base.dev; 933 struct drm_device *dev = encoder->base.dev;
934 struct drm_i915_private *dev_priv = dev->dev_private; 934 struct drm_i915_private *dev_priv = to_i915(dev);
935 u32 tmp, flags = 0; 935 u32 tmp, flags = 0;
936 int dotclock; 936 int dotclock;
937 937
@@ -988,7 +988,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder)
988static void g4x_enable_hdmi(struct intel_encoder *encoder) 988static void g4x_enable_hdmi(struct intel_encoder *encoder)
989{ 989{
990 struct drm_device *dev = encoder->base.dev; 990 struct drm_device *dev = encoder->base.dev;
991 struct drm_i915_private *dev_priv = dev->dev_private; 991 struct drm_i915_private *dev_priv = to_i915(dev);
992 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 992 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
993 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 993 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
994 u32 temp; 994 u32 temp;
@@ -1009,7 +1009,7 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder)
1009static void ibx_enable_hdmi(struct intel_encoder *encoder) 1009static void ibx_enable_hdmi(struct intel_encoder *encoder)
1010{ 1010{
1011 struct drm_device *dev = encoder->base.dev; 1011 struct drm_device *dev = encoder->base.dev;
1012 struct drm_i915_private *dev_priv = dev->dev_private; 1012 struct drm_i915_private *dev_priv = to_i915(dev);
1013 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1013 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1014 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1014 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1015 u32 temp; 1015 u32 temp;
@@ -1058,7 +1058,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder)
1058static void cpt_enable_hdmi(struct intel_encoder *encoder) 1058static void cpt_enable_hdmi(struct intel_encoder *encoder)
1059{ 1059{
1060 struct drm_device *dev = encoder->base.dev; 1060 struct drm_device *dev = encoder->base.dev;
1061 struct drm_i915_private *dev_priv = dev->dev_private; 1061 struct drm_i915_private *dev_priv = to_i915(dev);
1062 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1062 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1063 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1063 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1064 enum pipe pipe = crtc->pipe; 1064 enum pipe pipe = crtc->pipe;
@@ -1115,7 +1115,7 @@ static void vlv_enable_hdmi(struct intel_encoder *encoder)
1115static void intel_disable_hdmi(struct intel_encoder *encoder) 1115static void intel_disable_hdmi(struct intel_encoder *encoder)
1116{ 1116{
1117 struct drm_device *dev = encoder->base.dev; 1117 struct drm_device *dev = encoder->base.dev;
1118 struct drm_i915_private *dev_priv = dev->dev_private; 1118 struct drm_i915_private *dev_priv = to_i915(dev);
1119 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1119 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
1120 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1120 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1121 u32 temp; 1121 u32 temp;
@@ -1154,7 +1154,7 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
1154 I915_WRITE(intel_hdmi->hdmi_reg, temp); 1154 I915_WRITE(intel_hdmi->hdmi_reg, temp);
1155 POSTING_READ(intel_hdmi->hdmi_reg); 1155 POSTING_READ(intel_hdmi->hdmi_reg);
1156 1156
1157 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 1157 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
1158 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1158 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1159 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1159 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1160 } 1160 }
@@ -1273,33 +1273,15 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
1273static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state) 1273static bool hdmi_12bpc_possible(struct intel_crtc_state *crtc_state)
1274{ 1274{
1275 struct drm_device *dev = crtc_state->base.crtc->dev; 1275 struct drm_device *dev = crtc_state->base.crtc->dev;
1276 struct drm_atomic_state *state;
1277 struct intel_encoder *encoder;
1278 struct drm_connector *connector;
1279 struct drm_connector_state *connector_state;
1280 int count = 0, count_hdmi = 0;
1281 int i;
1282 1276
1283 if (HAS_GMCH_DISPLAY(dev)) 1277 if (HAS_GMCH_DISPLAY(dev))
1284 return false; 1278 return false;
1285 1279
1286 state = crtc_state->base.state;
1287
1288 for_each_connector_in_state(state, connector, connector_state, i) {
1289 if (connector_state->crtc != crtc_state->base.crtc)
1290 continue;
1291
1292 encoder = to_intel_encoder(connector_state->best_encoder);
1293
1294 count_hdmi += encoder->type == INTEL_OUTPUT_HDMI;
1295 count++;
1296 }
1297
1298 /* 1280 /*
1299 * HDMI 12bpc affects the clocks, so it's only possible 1281 * HDMI 12bpc affects the clocks, so it's only possible
1300 * when not cloning with other encoder types. 1282 * when not cloning with other encoder types.
1301 */ 1283 */
1302 return count_hdmi > 0 && count_hdmi == count; 1284 return crtc_state->output_types == 1 << INTEL_OUTPUT_HDMI;
1303} 1285}
1304 1286
1305bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1287bool intel_hdmi_compute_config(struct intel_encoder *encoder,
@@ -1575,7 +1557,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
1575 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector); 1557 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
1576 struct intel_digital_port *intel_dig_port = 1558 struct intel_digital_port *intel_dig_port =
1577 hdmi_to_dig_port(intel_hdmi); 1559 hdmi_to_dig_port(intel_hdmi);
1578 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1560 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1579 int ret; 1561 int ret;
1580 1562
1581 ret = drm_object_property_set_value(&connector->base, property, val); 1563 ret = drm_object_property_set_value(&connector->base, property, val);
@@ -1674,39 +1656,16 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1674 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1656 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1675 struct intel_hdmi *intel_hdmi = &dport->hdmi; 1657 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1676 struct drm_device *dev = encoder->base.dev; 1658 struct drm_device *dev = encoder->base.dev;
1677 struct drm_i915_private *dev_priv = dev->dev_private; 1659 struct drm_i915_private *dev_priv = to_i915(dev);
1678 struct intel_crtc *intel_crtc = 1660 struct intel_crtc *intel_crtc =
1679 to_intel_crtc(encoder->base.crtc); 1661 to_intel_crtc(encoder->base.crtc);
1680 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1662 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1681 enum dpio_channel port = vlv_dport_to_channel(dport);
1682 int pipe = intel_crtc->pipe;
1683 u32 val;
1684 1663
1685 /* Enable clock channels for this port */ 1664 vlv_phy_pre_encoder_enable(encoder);
1686 mutex_lock(&dev_priv->sb_lock);
1687 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1688 val = 0;
1689 if (pipe)
1690 val |= (1<<21);
1691 else
1692 val &= ~(1<<21);
1693 val |= 0x001000c4;
1694 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1695 1665
1696 /* HDMI 1.0V-2dB */ 1666 /* HDMI 1.0V-2dB */
1697 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0); 1667 vlv_set_phy_signal_level(encoder, 0x2b245f5f, 0x00002000, 0x5578b83a,
1698 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), 0x2b245f5f); 1668 0x2b247878);
1699 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port), 0x5578b83a);
1700 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0c782040);
1701 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), 0x2b247878);
1702 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1703 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1704 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1705
1706 /* Program lane clock */
1707 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1708 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1709 mutex_unlock(&dev_priv->sb_lock);
1710 1669
1711 intel_hdmi->set_infoframes(&encoder->base, 1670 intel_hdmi->set_infoframes(&encoder->base,
1712 intel_crtc->config->has_hdmi_sink, 1671 intel_crtc->config->has_hdmi_sink,
@@ -1719,213 +1678,33 @@ static void vlv_hdmi_pre_enable(struct intel_encoder *encoder)
1719 1678
1720static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1679static void vlv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1721{ 1680{
1722 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1723 struct drm_device *dev = encoder->base.dev;
1724 struct drm_i915_private *dev_priv = dev->dev_private;
1725 struct intel_crtc *intel_crtc =
1726 to_intel_crtc(encoder->base.crtc);
1727 enum dpio_channel port = vlv_dport_to_channel(dport);
1728 int pipe = intel_crtc->pipe;
1729
1730 intel_hdmi_prepare(encoder); 1681 intel_hdmi_prepare(encoder);
1731 1682
1732 /* Program Tx lane resets to default */ 1683 vlv_phy_pre_pll_enable(encoder);
1733 mutex_lock(&dev_priv->sb_lock);
1734 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1735 DPIO_PCS_TX_LANE2_RESET |
1736 DPIO_PCS_TX_LANE1_RESET);
1737 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1738 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1739 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1740 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1741 DPIO_PCS_CLK_SOFT_RESET);
1742
1743 /* Fix up inter-pair skew failure */
1744 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1745 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1746 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1747
1748 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), 0x00002000);
1749 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1750 mutex_unlock(&dev_priv->sb_lock);
1751}
1752
1753static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
1754 bool reset)
1755{
1756 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1757 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
1758 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1759 enum pipe pipe = crtc->pipe;
1760 uint32_t val;
1761
1762 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
1763 if (reset)
1764 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1765 else
1766 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1767 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
1768
1769 if (crtc->config->lane_count > 2) {
1770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
1771 if (reset)
1772 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
1773 else
1774 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
1775 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
1776 }
1777
1778 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1779 val |= CHV_PCS_REQ_SOFTRESET_EN;
1780 if (reset)
1781 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1782 else
1783 val |= DPIO_PCS_CLK_SOFT_RESET;
1784 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
1785
1786 if (crtc->config->lane_count > 2) {
1787 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
1788 val |= CHV_PCS_REQ_SOFTRESET_EN;
1789 if (reset)
1790 val &= ~DPIO_PCS_CLK_SOFT_RESET;
1791 else
1792 val |= DPIO_PCS_CLK_SOFT_RESET;
1793 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
1794 }
1795} 1684}
1796 1685
1797static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder) 1686static void chv_hdmi_pre_pll_enable(struct intel_encoder *encoder)
1798{ 1687{
1799 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1800 struct drm_device *dev = encoder->base.dev;
1801 struct drm_i915_private *dev_priv = dev->dev_private;
1802 struct intel_crtc *intel_crtc =
1803 to_intel_crtc(encoder->base.crtc);
1804 enum dpio_channel ch = vlv_dport_to_channel(dport);
1805 enum pipe pipe = intel_crtc->pipe;
1806 u32 val;
1807
1808 intel_hdmi_prepare(encoder); 1688 intel_hdmi_prepare(encoder);
1809 1689
1810 /* 1690 chv_phy_pre_pll_enable(encoder);
1811 * Must trick the second common lane into life.
1812 * Otherwise we can't even access the PLL.
1813 */
1814 if (ch == DPIO_CH0 && pipe == PIPE_B)
1815 dport->release_cl2_override =
1816 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
1817
1818 chv_phy_powergate_lanes(encoder, true, 0x0);
1819
1820 mutex_lock(&dev_priv->sb_lock);
1821
1822 /* Assert data lane reset */
1823 chv_data_lane_soft_reset(encoder, true);
1824
1825 /* program left/right clock distribution */
1826 if (pipe != PIPE_B) {
1827 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1828 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1829 if (ch == DPIO_CH0)
1830 val |= CHV_BUFLEFTENA1_FORCE;
1831 if (ch == DPIO_CH1)
1832 val |= CHV_BUFRIGHTENA1_FORCE;
1833 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1834 } else {
1835 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1836 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1837 if (ch == DPIO_CH0)
1838 val |= CHV_BUFLEFTENA2_FORCE;
1839 if (ch == DPIO_CH1)
1840 val |= CHV_BUFRIGHTENA2_FORCE;
1841 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1842 }
1843
1844 /* program clock channel usage */
1845 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
1846 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1847 if (pipe != PIPE_B)
1848 val &= ~CHV_PCS_USEDCLKCHANNEL;
1849 else
1850 val |= CHV_PCS_USEDCLKCHANNEL;
1851 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
1852
1853 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
1854 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
1855 if (pipe != PIPE_B)
1856 val &= ~CHV_PCS_USEDCLKCHANNEL;
1857 else
1858 val |= CHV_PCS_USEDCLKCHANNEL;
1859 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
1860
1861 /*
1862 * This a a bit weird since generally CL
1863 * matches the pipe, but here we need to
1864 * pick the CL based on the port.
1865 */
1866 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
1867 if (pipe != PIPE_B)
1868 val &= ~CHV_CMN_USEDCLKCHANNEL;
1869 else
1870 val |= CHV_CMN_USEDCLKCHANNEL;
1871 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
1872
1873 mutex_unlock(&dev_priv->sb_lock);
1874} 1691}
1875 1692
1876static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder) 1693static void chv_hdmi_post_pll_disable(struct intel_encoder *encoder)
1877{ 1694{
1878 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 1695 chv_phy_post_pll_disable(encoder);
1879 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
1880 u32 val;
1881
1882 mutex_lock(&dev_priv->sb_lock);
1883
1884 /* disable left/right clock distribution */
1885 if (pipe != PIPE_B) {
1886 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
1887 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
1888 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
1889 } else {
1890 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
1891 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
1892 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
1893 }
1894
1895 mutex_unlock(&dev_priv->sb_lock);
1896
1897 /*
1898 * Leave the power down bit cleared for at least one
1899 * lane so that chv_powergate_phy_ch() will power
1900 * on something when the channel is otherwise unused.
1901 * When the port is off and the override is removed
1902 * the lanes power down anyway, so otherwise it doesn't
1903 * really matter what the state of power down bits is
1904 * after this.
1905 */
1906 chv_phy_powergate_lanes(encoder, false, 0x0);
1907} 1696}
1908 1697
1909static void vlv_hdmi_post_disable(struct intel_encoder *encoder) 1698static void vlv_hdmi_post_disable(struct intel_encoder *encoder)
1910{ 1699{
1911 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1912 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1913 struct intel_crtc *intel_crtc =
1914 to_intel_crtc(encoder->base.crtc);
1915 enum dpio_channel port = vlv_dport_to_channel(dport);
1916 int pipe = intel_crtc->pipe;
1917
1918 /* Reset lanes to avoid HDMI flicker (VLV w/a) */ 1700 /* Reset lanes to avoid HDMI flicker (VLV w/a) */
1919 mutex_lock(&dev_priv->sb_lock); 1701 vlv_phy_reset_lanes(encoder);
1920 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1921 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1922 mutex_unlock(&dev_priv->sb_lock);
1923} 1702}
1924 1703
1925static void chv_hdmi_post_disable(struct intel_encoder *encoder) 1704static void chv_hdmi_post_disable(struct intel_encoder *encoder)
1926{ 1705{
1927 struct drm_device *dev = encoder->base.dev; 1706 struct drm_device *dev = encoder->base.dev;
1928 struct drm_i915_private *dev_priv = dev->dev_private; 1707 struct drm_i915_private *dev_priv = to_i915(dev);
1929 1708
1930 mutex_lock(&dev_priv->sb_lock); 1709 mutex_lock(&dev_priv->sb_lock);
1931 1710
@@ -1940,142 +1719,16 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1940 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1719 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1941 struct intel_hdmi *intel_hdmi = &dport->hdmi; 1720 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1942 struct drm_device *dev = encoder->base.dev; 1721 struct drm_device *dev = encoder->base.dev;
1943 struct drm_i915_private *dev_priv = dev->dev_private; 1722 struct drm_i915_private *dev_priv = to_i915(dev);
1944 struct intel_crtc *intel_crtc = 1723 struct intel_crtc *intel_crtc =
1945 to_intel_crtc(encoder->base.crtc); 1724 to_intel_crtc(encoder->base.crtc);
1946 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1725 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1947 enum dpio_channel ch = vlv_dport_to_channel(dport);
1948 int pipe = intel_crtc->pipe;
1949 int data, i, stagger;
1950 u32 val;
1951 1726
1952 mutex_lock(&dev_priv->sb_lock); 1727 chv_phy_pre_encoder_enable(encoder);
1953
1954 /* allow hardware to manage TX FIFO reset source */
1955 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1956 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1957 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1958
1959 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1960 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1961 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1962
1963 /* Program Tx latency optimal setting */
1964 for (i = 0; i < 4; i++) {
1965 /* Set the upar bit */
1966 data = (i == 1) ? 0x0 : 0x1;
1967 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
1968 data << DPIO_UPAR_SHIFT);
1969 }
1970
1971 /* Data lane stagger programming */
1972 if (intel_crtc->config->port_clock > 270000)
1973 stagger = 0x18;
1974 else if (intel_crtc->config->port_clock > 135000)
1975 stagger = 0xd;
1976 else if (intel_crtc->config->port_clock > 67500)
1977 stagger = 0x7;
1978 else if (intel_crtc->config->port_clock > 33750)
1979 stagger = 0x4;
1980 else
1981 stagger = 0x2;
1982
1983 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1984 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1985 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1986
1987 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1988 val |= DPIO_TX2_STAGGER_MASK(0x1f);
1989 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1990
1991 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
1992 DPIO_LANESTAGGER_STRAP(stagger) |
1993 DPIO_LANESTAGGER_STRAP_OVRD |
1994 DPIO_TX1_STAGGER_MASK(0x1f) |
1995 DPIO_TX1_STAGGER_MULT(6) |
1996 DPIO_TX2_STAGGER_MULT(0));
1997
1998 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
1999 DPIO_LANESTAGGER_STRAP(stagger) |
2000 DPIO_LANESTAGGER_STRAP_OVRD |
2001 DPIO_TX1_STAGGER_MASK(0x1f) |
2002 DPIO_TX1_STAGGER_MULT(7) |
2003 DPIO_TX2_STAGGER_MULT(5));
2004
2005 /* Deassert data lane reset */
2006 chv_data_lane_soft_reset(encoder, false);
2007
2008 /* Clear calc init */
2009 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2010 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2011 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2012 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2013 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2014
2015 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2016 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2017 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2018 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
2019 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2020
2021 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
2022 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2023 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2024 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
2025
2026 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
2027 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2028 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2029 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
2030 1728
2031 /* FIXME: Program the support xxx V-dB */ 1729 /* FIXME: Program the support xxx V-dB */
2032 /* Use 800mV-0dB */ 1730 /* Use 800mV-0dB */
2033 for (i = 0; i < 4; i++) { 1731 chv_set_phy_signal_level(encoder, 128, 102, false);
2034 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
2035 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
2036 val |= 128 << DPIO_SWING_DEEMPH9P5_SHIFT;
2037 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
2038 }
2039
2040 for (i = 0; i < 4; i++) {
2041 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
2042
2043 val &= ~DPIO_SWING_MARGIN000_MASK;
2044 val |= 102 << DPIO_SWING_MARGIN000_SHIFT;
2045
2046 /*
2047 * Supposedly this value shouldn't matter when unique transition
2048 * scale is disabled, but in fact it does matter. Let's just
2049 * always program the same value and hope it's OK.
2050 */
2051 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
2052 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
2053
2054 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
2055 }
2056
2057 /*
2058 * The document said it needs to set bit 27 for ch0 and bit 26
2059 * for ch1. Might be a typo in the doc.
2060 * For now, for this unique transition scale selection, set bit
2061 * 27 for ch0 and ch1.
2062 */
2063 for (i = 0; i < 4; i++) {
2064 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
2065 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
2066 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
2067 }
2068
2069 /* Start swing calculation */
2070 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
2071 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2072 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
2073
2074 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
2075 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
2076 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
2077
2078 mutex_unlock(&dev_priv->sb_lock);
2079 1732
2080 intel_hdmi->set_infoframes(&encoder->base, 1733 intel_hdmi->set_infoframes(&encoder->base,
2081 intel_crtc->config->has_hdmi_sink, 1734 intel_crtc->config->has_hdmi_sink,
@@ -2086,10 +1739,7 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
2086 vlv_wait_port_ready(dev_priv, dport, 0x0); 1739 vlv_wait_port_ready(dev_priv, dport, 0x0);
2087 1740
2088 /* Second common lane will stay alive on its own now */ 1741 /* Second common lane will stay alive on its own now */
2089 if (dport->release_cl2_override) { 1742 chv_phy_release_cl2_override(encoder);
2090 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2091 dport->release_cl2_override = false;
2092 }
2093} 1743}
2094 1744
2095static void intel_hdmi_destroy(struct drm_connector *connector) 1745static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -2106,6 +1756,8 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2106 .fill_modes = drm_helper_probe_single_connector_modes, 1756 .fill_modes = drm_helper_probe_single_connector_modes,
2107 .set_property = intel_hdmi_set_property, 1757 .set_property = intel_hdmi_set_property,
2108 .atomic_get_property = intel_connector_atomic_get_property, 1758 .atomic_get_property = intel_connector_atomic_get_property,
1759 .late_register = intel_connector_register,
1760 .early_unregister = intel_connector_unregister,
2109 .destroy = intel_hdmi_destroy, 1761 .destroy = intel_hdmi_destroy,
2110 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1762 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2111 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1763 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2114,7 +1766,6 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
2114static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = { 1766static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
2115 .get_modes = intel_hdmi_get_modes, 1767 .get_modes = intel_hdmi_get_modes,
2116 .mode_valid = intel_hdmi_mode_valid, 1768 .mode_valid = intel_hdmi_mode_valid,
2117 .best_encoder = intel_best_encoder,
2118}; 1769};
2119 1770
2120static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 1771static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
@@ -2138,7 +1789,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2138 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi; 1789 struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
2139 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1790 struct intel_encoder *intel_encoder = &intel_dig_port->base;
2140 struct drm_device *dev = intel_encoder->base.dev; 1791 struct drm_device *dev = intel_encoder->base.dev;
2141 struct drm_i915_private *dev_priv = dev->dev_private; 1792 struct drm_i915_private *dev_priv = to_i915(dev);
2142 enum port port = intel_dig_port->port; 1793 enum port port = intel_dig_port->port;
2143 uint8_t alternate_ddc_pin; 1794 uint8_t alternate_ddc_pin;
2144 1795
@@ -2242,12 +1893,10 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
2242 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state; 1893 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
2243 else 1894 else
2244 intel_connector->get_hw_state = intel_connector_get_hw_state; 1895 intel_connector->get_hw_state = intel_connector_get_hw_state;
2245 intel_connector->unregister = intel_connector_unregister;
2246 1896
2247 intel_hdmi_add_properties(intel_hdmi, connector); 1897 intel_hdmi_add_properties(intel_hdmi, connector);
2248 1898
2249 intel_connector_attach_encoder(intel_connector, intel_encoder); 1899 intel_connector_attach_encoder(intel_connector, intel_encoder);
2250 drm_connector_register(connector);
2251 intel_hdmi->attached_connector = intel_connector; 1900 intel_hdmi->attached_connector = intel_connector;
2252 1901
2253 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 1902 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -2280,7 +1929,7 @@ void intel_hdmi_init(struct drm_device *dev,
2280 intel_encoder = &intel_dig_port->base; 1929 intel_encoder = &intel_dig_port->base;
2281 1930
2282 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs, 1931 drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
2283 DRM_MODE_ENCODER_TMDS, NULL); 1932 DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port));
2284 1933
2285 intel_encoder->compute_config = intel_hdmi_compute_config; 1934 intel_encoder->compute_config = intel_hdmi_compute_config;
2286 if (HAS_PCH_SPLIT(dev)) { 1935 if (HAS_PCH_SPLIT(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c
index bee673005d48..f48957ea100d 100644
--- a/drivers/gpu/drm/i915/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/intel_hotplug.c
@@ -144,7 +144,7 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
144 144
145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv) 145static void intel_hpd_irq_storm_disable(struct drm_i915_private *dev_priv)
146{ 146{
147 struct drm_device *dev = dev_priv->dev; 147 struct drm_device *dev = &dev_priv->drm;
148 struct drm_mode_config *mode_config = &dev->mode_config; 148 struct drm_mode_config *mode_config = &dev->mode_config;
149 struct intel_connector *intel_connector; 149 struct intel_connector *intel_connector;
150 struct intel_encoder *intel_encoder; 150 struct intel_encoder *intel_encoder;
@@ -191,7 +191,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
191 struct drm_i915_private *dev_priv = 191 struct drm_i915_private *dev_priv =
192 container_of(work, typeof(*dev_priv), 192 container_of(work, typeof(*dev_priv),
193 hotplug.reenable_work.work); 193 hotplug.reenable_work.work);
194 struct drm_device *dev = dev_priv->dev; 194 struct drm_device *dev = &dev_priv->drm;
195 struct drm_mode_config *mode_config = &dev->mode_config; 195 struct drm_mode_config *mode_config = &dev->mode_config;
196 int i; 196 int i;
197 197
@@ -220,7 +220,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
220 } 220 }
221 } 221 }
222 if (dev_priv->display.hpd_irq_setup) 222 if (dev_priv->display.hpd_irq_setup)
223 dev_priv->display.hpd_irq_setup(dev); 223 dev_priv->display.hpd_irq_setup(dev_priv);
224 spin_unlock_irq(&dev_priv->irq_lock); 224 spin_unlock_irq(&dev_priv->irq_lock);
225 225
226 intel_runtime_pm_put(dev_priv); 226 intel_runtime_pm_put(dev_priv);
@@ -302,7 +302,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
302{ 302{
303 struct drm_i915_private *dev_priv = 303 struct drm_i915_private *dev_priv =
304 container_of(work, struct drm_i915_private, hotplug.hotplug_work); 304 container_of(work, struct drm_i915_private, hotplug.hotplug_work);
305 struct drm_device *dev = dev_priv->dev; 305 struct drm_device *dev = &dev_priv->drm;
306 struct drm_mode_config *mode_config = &dev->mode_config; 306 struct drm_mode_config *mode_config = &dev->mode_config;
307 struct intel_connector *intel_connector; 307 struct intel_connector *intel_connector;
308 struct intel_encoder *intel_encoder; 308 struct intel_encoder *intel_encoder;
@@ -346,7 +346,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
346 346
347/** 347/**
348 * intel_hpd_irq_handler - main hotplug irq handler 348 * intel_hpd_irq_handler - main hotplug irq handler
349 * @dev: drm device 349 * @dev_priv: drm_i915_private
350 * @pin_mask: a mask of hpd pins that have triggered the irq 350 * @pin_mask: a mask of hpd pins that have triggered the irq
351 * @long_mask: a mask of hpd pins that may be long hpd pulses 351 * @long_mask: a mask of hpd pins that may be long hpd pulses
352 * 352 *
@@ -360,10 +360,9 @@ static void i915_hotplug_work_func(struct work_struct *work)
360 * Here, we do hotplug irq storm detection and mitigation, and pass further 360 * Here, we do hotplug irq storm detection and mitigation, and pass further
361 * processing to appropriate bottom halves. 361 * processing to appropriate bottom halves.
362 */ 362 */
363void intel_hpd_irq_handler(struct drm_device *dev, 363void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
364 u32 pin_mask, u32 long_mask) 364 u32 pin_mask, u32 long_mask)
365{ 365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int i; 366 int i;
368 enum port port; 367 enum port port;
369 bool storm_detected = false; 368 bool storm_detected = false;
@@ -407,7 +406,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
407 * hotplug bits itself. So only WARN about unexpected 406 * hotplug bits itself. So only WARN about unexpected
408 * interrupts on saner platforms. 407 * interrupts on saner platforms.
409 */ 408 */
410 WARN_ONCE(!HAS_GMCH_DISPLAY(dev), 409 WARN_ONCE(!HAS_GMCH_DISPLAY(dev_priv),
411 "Received HPD interrupt on pin %d although disabled\n", i); 410 "Received HPD interrupt on pin %d although disabled\n", i);
412 continue; 411 continue;
413 } 412 }
@@ -427,7 +426,7 @@ void intel_hpd_irq_handler(struct drm_device *dev,
427 } 426 }
428 427
429 if (storm_detected) 428 if (storm_detected)
430 dev_priv->display.hpd_irq_setup(dev); 429 dev_priv->display.hpd_irq_setup(dev_priv);
431 spin_unlock(&dev_priv->irq_lock); 430 spin_unlock(&dev_priv->irq_lock);
432 431
433 /* 432 /*
@@ -453,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_device *dev,
453 * 452 *
454 * This is a separate step from interrupt enabling to simplify the locking rules 453 * This is a separate step from interrupt enabling to simplify the locking rules
455 * in the driver load and resume code. 454 * in the driver load and resume code.
455 *
456 * Also see: intel_hpd_poll_init(), which enables connector polling
456 */ 457 */
457void intel_hpd_init(struct drm_i915_private *dev_priv) 458void intel_hpd_init(struct drm_i915_private *dev_priv)
458{ 459{
459 struct drm_device *dev = dev_priv->dev;
460 struct drm_mode_config *mode_config = &dev->mode_config;
461 struct drm_connector *connector;
462 int i; 460 int i;
463 461
464 for_each_hpd_pin(i) { 462 for_each_hpd_pin(i) {
465 dev_priv->hotplug.stats[i].count = 0; 463 dev_priv->hotplug.stats[i].count = 0;
466 dev_priv->hotplug.stats[i].state = HPD_ENABLED; 464 dev_priv->hotplug.stats[i].state = HPD_ENABLED;
467 } 465 }
466
467 WRITE_ONCE(dev_priv->hotplug.poll_enabled, false);
468 schedule_work(&dev_priv->hotplug.poll_init_work);
469
470 /*
471 * Interrupt setup is already guaranteed to be single-threaded, this is
472 * just to make the assert_spin_locked checks happy.
473 */
474 spin_lock_irq(&dev_priv->irq_lock);
475 if (dev_priv->display.hpd_irq_setup)
476 dev_priv->display.hpd_irq_setup(dev_priv);
477 spin_unlock_irq(&dev_priv->irq_lock);
478}
479
480void i915_hpd_poll_init_work(struct work_struct *work) {
481 struct drm_i915_private *dev_priv =
482 container_of(work, struct drm_i915_private,
483 hotplug.poll_init_work);
484 struct drm_device *dev = &dev_priv->drm;
485 struct drm_mode_config *mode_config = &dev->mode_config;
486 struct drm_connector *connector;
487 bool enabled;
488
489 mutex_lock(&dev->mode_config.mutex);
490
491 enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
492
468 list_for_each_entry(connector, &mode_config->connector_list, head) { 493 list_for_each_entry(connector, &mode_config->connector_list, head) {
469 struct intel_connector *intel_connector = to_intel_connector(connector); 494 struct intel_connector *intel_connector =
495 to_intel_connector(connector);
470 connector->polled = intel_connector->polled; 496 connector->polled = intel_connector->polled;
471 497
472 /* MST has a dynamic intel_connector->encoder and it's reprobing 498 /* MST has a dynamic intel_connector->encoder and it's reprobing
@@ -475,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv)
475 continue; 501 continue;
476 502
477 if (!connector->polled && I915_HAS_HOTPLUG(dev) && 503 if (!connector->polled && I915_HAS_HOTPLUG(dev) &&
478 intel_connector->encoder->hpd_pin > HPD_NONE) 504 intel_connector->encoder->hpd_pin > HPD_NONE) {
479 connector->polled = DRM_CONNECTOR_POLL_HPD; 505 connector->polled = enabled ?
506 DRM_CONNECTOR_POLL_CONNECT |
507 DRM_CONNECTOR_POLL_DISCONNECT :
508 DRM_CONNECTOR_POLL_HPD;
509 }
480 } 510 }
481 511
512 if (enabled)
513 drm_kms_helper_poll_enable_locked(dev);
514
515 mutex_unlock(&dev->mode_config.mutex);
516
482 /* 517 /*
483 * Interrupt setup is already guaranteed to be single-threaded, this is 518 * We might have missed any hotplugs that happened while we were
484 * just to make the assert_spin_locked checks happy. 519 * in the middle of disabling polling
485 */ 520 */
486 spin_lock_irq(&dev_priv->irq_lock); 521 if (!enabled)
487 if (dev_priv->display.hpd_irq_setup) 522 drm_helper_hpd_irq_event(dev);
488 dev_priv->display.hpd_irq_setup(dev); 523}
489 spin_unlock_irq(&dev_priv->irq_lock); 524
525/**
526 * intel_hpd_poll_init - enables/disables polling for connectors with hpd
527 * @dev_priv: i915 device instance
528 * @enabled: Whether to enable or disable polling
529 *
530 * This function enables polling for all connectors, regardless of whether or
531 * not they support hotplug detection. Under certain conditions HPD may not be
532 * functional. On most Intel GPUs, this happens when we enter runtime suspend.
533 * On Valleyview and Cherryview systems, this also happens when we shut off all
534 * of the powerwells.
535 *
536 * Since this function can get called in contexts where we're already holding
537 * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate
538 * worker.
539 *
540 * Also see: intel_hpd_init(), which restores hpd handling.
541 */
542void intel_hpd_poll_init(struct drm_i915_private *dev_priv)
543{
544 WRITE_ONCE(dev_priv->hotplug.poll_enabled, true);
545
546 /*
547 * We might already be holding dev->mode_config.mutex, so do this in a
548 * seperate worker
549 * As well, there's no issue if we race here since we always reschedule
550 * this worker anyway
551 */
552 schedule_work(&dev_priv->hotplug.poll_init_work);
490} 553}
491 554
492void intel_hpd_init_work(struct drm_i915_private *dev_priv) 555void intel_hpd_init_work(struct drm_i915_private *dev_priv)
493{ 556{
494 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); 557 INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func);
495 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); 558 INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func);
559 INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work);
496 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, 560 INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work,
497 intel_hpd_irq_storm_reenable_work); 561 intel_hpd_irq_storm_reenable_work);
498} 562}
@@ -509,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv)
509 573
510 cancel_work_sync(&dev_priv->hotplug.dig_port_work); 574 cancel_work_sync(&dev_priv->hotplug.dig_port_work);
511 cancel_work_sync(&dev_priv->hotplug.hotplug_work); 575 cancel_work_sync(&dev_priv->hotplug.hotplug_work);
576 cancel_work_sync(&dev_priv->hotplug.poll_init_work);
512 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); 577 cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work);
513} 578}
579
580bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
581{
582 bool ret = false;
583
584 if (pin == HPD_NONE)
585 return false;
586
587 spin_lock_irq(&dev_priv->irq_lock);
588 if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) {
589 dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
590 ret = true;
591 }
592 spin_unlock_irq(&dev_priv->irq_lock);
593
594 return ret;
595}
596
597void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin)
598{
599 if (pin == HPD_NONE)
600 return;
601
602 spin_lock_irq(&dev_priv->irq_lock);
603 dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
604 spin_unlock_irq(&dev_priv->irq_lock);
605}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 81de23098be7..1f266d7df2ec 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -113,7 +113,7 @@ to_intel_gmbus(struct i2c_adapter *i2c)
113void 113void
114intel_i2c_reset(struct drm_device *dev) 114intel_i2c_reset(struct drm_device *dev)
115{ 115{
116 struct drm_i915_private *dev_priv = dev->dev_private; 116 struct drm_i915_private *dev_priv = to_i915(dev);
117 117
118 I915_WRITE(GMBUS0, 0); 118 I915_WRITE(GMBUS0, 0);
119 I915_WRITE(GMBUS4, 0); 119 I915_WRITE(GMBUS4, 0);
@@ -138,7 +138,7 @@ static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
138static u32 get_reserved(struct intel_gmbus *bus) 138static u32 get_reserved(struct intel_gmbus *bus)
139{ 139{
140 struct drm_i915_private *dev_priv = bus->dev_priv; 140 struct drm_i915_private *dev_priv = bus->dev_priv;
141 struct drm_device *dev = dev_priv->dev; 141 struct drm_device *dev = &dev_priv->drm;
142 u32 reserved = 0; 142 u32 reserved = 0;
143 143
144 /* On most chips, these bits must be preserved in software. */ 144 /* On most chips, these bits must be preserved in software. */
@@ -212,7 +212,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
212 adapter); 212 adapter);
213 struct drm_i915_private *dev_priv = bus->dev_priv; 213 struct drm_i915_private *dev_priv = bus->dev_priv;
214 214
215 intel_i2c_reset(dev_priv->dev); 215 intel_i2c_reset(&dev_priv->drm);
216 intel_i2c_quirk_set(dev_priv, true); 216 intel_i2c_quirk_set(dev_priv, true);
217 set_data(bus, 1); 217 set_data(bus, 1);
218 set_clock(bus, 1); 218 set_clock(bus, 1);
@@ -298,15 +298,16 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
298{ 298{
299 int ret; 299 int ret;
300 300
301#define C ((I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0)
302
303 if (!HAS_GMBUS_IRQ(dev_priv)) 301 if (!HAS_GMBUS_IRQ(dev_priv))
304 return wait_for(C, 10); 302 return intel_wait_for_register(dev_priv,
303 GMBUS2, GMBUS_ACTIVE, 0,
304 10);
305 305
306 /* Important: The hw handles only the first bit, so set only one! */ 306 /* Important: The hw handles only the first bit, so set only one! */
307 I915_WRITE(GMBUS4, GMBUS_IDLE_EN); 307 I915_WRITE(GMBUS4, GMBUS_IDLE_EN);
308 308
309 ret = wait_event_timeout(dev_priv->gmbus_wait_queue, C, 309 ret = wait_event_timeout(dev_priv->gmbus_wait_queue,
310 (I915_READ_NOTRACE(GMBUS2) & GMBUS_ACTIVE) == 0,
310 msecs_to_jiffies_timeout(10)); 311 msecs_to_jiffies_timeout(10));
311 312
312 I915_WRITE(GMBUS4, 0); 313 I915_WRITE(GMBUS4, 0);
@@ -315,7 +316,6 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
315 return 0; 316 return 0;
316 else 317 else
317 return -ETIMEDOUT; 318 return -ETIMEDOUT;
318#undef C
319} 319}
320 320
321static int 321static int
@@ -632,7 +632,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
632 */ 632 */
633int intel_setup_gmbus(struct drm_device *dev) 633int intel_setup_gmbus(struct drm_device *dev)
634{ 634{
635 struct drm_i915_private *dev_priv = dev->dev_private; 635 struct drm_i915_private *dev_priv = to_i915(dev);
636 struct intel_gmbus *bus; 636 struct intel_gmbus *bus;
637 unsigned int pin; 637 unsigned int pin;
638 int ret; 638 int ret;
@@ -688,7 +688,7 @@ int intel_setup_gmbus(struct drm_device *dev)
688 goto err; 688 goto err;
689 } 689 }
690 690
691 intel_i2c_reset(dev_priv->dev); 691 intel_i2c_reset(&dev_priv->drm);
692 692
693 return 0; 693 return 0;
694 694
@@ -736,7 +736,7 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
736 736
737void intel_teardown_gmbus(struct drm_device *dev) 737void intel_teardown_gmbus(struct drm_device *dev)
738{ 738{
739 struct drm_i915_private *dev_priv = dev->dev_private; 739 struct drm_i915_private *dev_priv = to_i915(dev);
740 struct intel_gmbus *bus; 740 struct intel_gmbus *bus;
741 unsigned int pin; 741 unsigned int pin;
742 742
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7f2d8415ed8b..414ddda43922 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -208,31 +208,27 @@
208} while (0) 208} while (0)
209 209
210enum { 210enum {
211 ADVANCED_CONTEXT = 0,
212 LEGACY_32B_CONTEXT,
213 ADVANCED_AD_CONTEXT,
214 LEGACY_64B_CONTEXT
215};
216#define GEN8_CTX_ADDRESSING_MODE_SHIFT 3
217#define GEN8_CTX_ADDRESSING_MODE(dev) (USES_FULL_48BIT_PPGTT(dev) ?\
218 LEGACY_64B_CONTEXT :\
219 LEGACY_32B_CONTEXT)
220enum {
221 FAULT_AND_HANG = 0, 211 FAULT_AND_HANG = 0,
222 FAULT_AND_HALT, /* Debug only */ 212 FAULT_AND_HALT, /* Debug only */
223 FAULT_AND_STREAM, 213 FAULT_AND_STREAM,
224 FAULT_AND_CONTINUE /* Unsupported */ 214 FAULT_AND_CONTINUE /* Unsupported */
225}; 215};
226#define GEN8_CTX_ID_SHIFT 32 216#define GEN8_CTX_ID_SHIFT 32
217#define GEN8_CTX_ID_WIDTH 21
227#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 218#define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17
228#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26 219#define GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x26
229 220
230static int intel_lr_context_pin(struct intel_context *ctx, 221/* Typical size of the average request (2 pipecontrols and a MI_BB) */
222#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
223
224static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
225 struct intel_engine_cs *engine);
226static int intel_lr_context_pin(struct i915_gem_context *ctx,
231 struct intel_engine_cs *engine); 227 struct intel_engine_cs *engine);
232 228
233/** 229/**
234 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 230 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
235 * @dev: DRM device. 231 * @dev_priv: i915 device private
236 * @enable_execlists: value of i915.enable_execlists module parameter. 232 * @enable_execlists: value of i915.enable_execlists module parameter.
237 * 233 *
238 * Only certain platforms support Execlists (the prerequisites being 234 * Only certain platforms support Execlists (the prerequisites being
@@ -240,23 +236,22 @@ static int intel_lr_context_pin(struct intel_context *ctx,
240 * 236 *
241 * Return: 1 if Execlists is supported and has to be enabled. 237 * Return: 1 if Execlists is supported and has to be enabled.
242 */ 238 */
243int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists) 239int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv, int enable_execlists)
244{ 240{
245 WARN_ON(i915.enable_ppgtt == -1);
246
247 /* On platforms with execlist available, vGPU will only 241 /* On platforms with execlist available, vGPU will only
248 * support execlist mode, no ring buffer mode. 242 * support execlist mode, no ring buffer mode.
249 */ 243 */
250 if (HAS_LOGICAL_RING_CONTEXTS(dev) && intel_vgpu_active(dev)) 244 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) && intel_vgpu_active(dev_priv))
251 return 1; 245 return 1;
252 246
253 if (INTEL_INFO(dev)->gen >= 9) 247 if (INTEL_GEN(dev_priv) >= 9)
254 return 1; 248 return 1;
255 249
256 if (enable_execlists == 0) 250 if (enable_execlists == 0)
257 return 0; 251 return 0;
258 252
259 if (HAS_LOGICAL_RING_CONTEXTS(dev) && USES_PPGTT(dev) && 253 if (HAS_LOGICAL_RING_CONTEXTS(dev_priv) &&
254 USES_PPGTT(dev_priv) &&
260 i915.use_mmio_flip >= 0) 255 i915.use_mmio_flip >= 0)
261 return 1; 256 return 1;
262 257
@@ -266,19 +261,17 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
266static void 261static void
267logical_ring_init_platform_invariants(struct intel_engine_cs *engine) 262logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
268{ 263{
269 struct drm_device *dev = engine->dev; 264 struct drm_i915_private *dev_priv = engine->i915;
270 265
271 if (IS_GEN8(dev) || IS_GEN9(dev)) 266 if (IS_GEN8(dev_priv) || IS_GEN9(dev_priv))
272 engine->idle_lite_restore_wa = ~0; 267 engine->idle_lite_restore_wa = ~0;
273 268
274 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 269 engine->disable_lite_restore_wa = (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
275 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) && 270 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) &&
276 (engine->id == VCS || engine->id == VCS2); 271 (engine->id == VCS || engine->id == VCS2);
277 272
278 engine->ctx_desc_template = GEN8_CTX_VALID; 273 engine->ctx_desc_template = GEN8_CTX_VALID;
279 engine->ctx_desc_template |= GEN8_CTX_ADDRESSING_MODE(dev) << 274 if (IS_GEN8(dev_priv))
280 GEN8_CTX_ADDRESSING_MODE_SHIFT;
281 if (IS_GEN8(dev))
282 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT; 275 engine->ctx_desc_template |= GEN8_CTX_L3LLC_COHERENT;
283 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE; 276 engine->ctx_desc_template |= GEN8_CTX_PRIVILEGE;
284 277
@@ -297,7 +290,7 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
297 * descriptor for a pinned context 290 * descriptor for a pinned context
298 * 291 *
299 * @ctx: Context to work on 292 * @ctx: Context to work on
300 * @ring: Engine the descriptor will be used with 293 * @engine: Engine the descriptor will be used with
301 * 294 *
302 * The context descriptor encodes various attributes of a context, 295 * The context descriptor encodes various attributes of a context,
303 * including its GTT address and some flags. Because it's fairly 296 * including its GTT address and some flags. Because it's fairly
@@ -305,62 +298,42 @@ logical_ring_init_platform_invariants(struct intel_engine_cs *engine)
305 * which remains valid until the context is unpinned. 298 * which remains valid until the context is unpinned.
306 * 299 *
307 * This is what a descriptor looks like, from LSB to MSB: 300 * This is what a descriptor looks like, from LSB to MSB:
308 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template) 301 * bits 0-11: flags, GEN8_CTX_* (cached in ctx_desc_template)
309 * bits 12-31: LRCA, GTT address of (the HWSP of) this context 302 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
310 * bits 32-51: ctx ID, a globally unique tag (the LRCA again!) 303 * bits 32-52: ctx ID, a globally unique tag
311 * bits 52-63: reserved, may encode the engine ID (for GuC) 304 * bits 53-54: mbz, reserved for use by hardware
305 * bits 55-63: group ID, currently unused and set to 0
312 */ 306 */
313static void 307static void
314intel_lr_context_descriptor_update(struct intel_context *ctx, 308intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
315 struct intel_engine_cs *engine) 309 struct intel_engine_cs *engine)
316{ 310{
317 uint64_t lrca, desc; 311 struct intel_context *ce = &ctx->engine[engine->id];
312 u64 desc;
318 313
319 lrca = ctx->engine[engine->id].lrc_vma->node.start + 314 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
320 LRC_PPHWSP_PN * PAGE_SIZE;
321 315
322 desc = engine->ctx_desc_template; /* bits 0-11 */ 316 desc = ctx->desc_template; /* bits 3-4 */
323 desc |= lrca; /* bits 12-31 */ 317 desc |= engine->ctx_desc_template; /* bits 0-11 */
324 desc |= (lrca >> PAGE_SHIFT) << GEN8_CTX_ID_SHIFT; /* bits 32-51 */ 318 desc |= ce->lrc_vma->node.start + LRC_PPHWSP_PN * PAGE_SIZE;
319 /* bits 12-31 */
320 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
325 321
326 ctx->engine[engine->id].lrc_desc = desc; 322 ce->lrc_desc = desc;
327} 323}
328 324
329uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 325uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
330 struct intel_engine_cs *engine) 326 struct intel_engine_cs *engine)
331{ 327{
332 return ctx->engine[engine->id].lrc_desc; 328 return ctx->engine[engine->id].lrc_desc;
333} 329}
334 330
335/**
336 * intel_execlists_ctx_id() - get the Execlists Context ID
337 * @ctx: Context to get the ID for
338 * @ring: Engine to get the ID for
339 *
340 * Do not confuse with ctx->id! Unfortunately we have a name overload
341 * here: the old context ID we pass to userspace as a handler so that
342 * they can refer to a context, and the new context ID we pass to the
343 * ELSP so that the GPU can inform us of the context status via
344 * interrupts.
345 *
346 * The context ID is a portion of the context descriptor, so we can
347 * just extract the required part from the cached descriptor.
348 *
349 * Return: 20-bits globally unique context ID.
350 */
351u32 intel_execlists_ctx_id(struct intel_context *ctx,
352 struct intel_engine_cs *engine)
353{
354 return intel_lr_context_descriptor(ctx, engine) >> GEN8_CTX_ID_SHIFT;
355}
356
357static void execlists_elsp_write(struct drm_i915_gem_request *rq0, 331static void execlists_elsp_write(struct drm_i915_gem_request *rq0,
358 struct drm_i915_gem_request *rq1) 332 struct drm_i915_gem_request *rq1)
359{ 333{
360 334
361 struct intel_engine_cs *engine = rq0->engine; 335 struct intel_engine_cs *engine = rq0->engine;
362 struct drm_device *dev = engine->dev; 336 struct drm_i915_private *dev_priv = rq0->i915;
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 uint64_t desc[2]; 337 uint64_t desc[2];
365 338
366 if (rq1) { 339 if (rq1) {
@@ -431,6 +404,20 @@ static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
431 spin_unlock_irq(&dev_priv->uncore.lock); 404 spin_unlock_irq(&dev_priv->uncore.lock);
432} 405}
433 406
407static inline void execlists_context_status_change(
408 struct drm_i915_gem_request *rq,
409 unsigned long status)
410{
411 /*
412 * Only used when GVT-g is enabled now. When GVT-g is disabled,
413 * The compiler should eliminate this function as dead-code.
414 */
415 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
416 return;
417
418 atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
419}
420
434static void execlists_context_unqueue(struct intel_engine_cs *engine) 421static void execlists_context_unqueue(struct intel_engine_cs *engine)
435{ 422{
436 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL; 423 struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
@@ -442,7 +429,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
442 * If irqs are not active generate a warning as batches that finish 429 * If irqs are not active generate a warning as batches that finish
443 * without the irqs may get lost and a GPU Hang may occur. 430 * without the irqs may get lost and a GPU Hang may occur.
444 */ 431 */
445 WARN_ON(!intel_irqs_enabled(engine->dev->dev_private)); 432 WARN_ON(!intel_irqs_enabled(engine->i915));
446 433
447 /* Try to read in pairs */ 434 /* Try to read in pairs */
448 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue, 435 list_for_each_entry_safe(cursor, tmp, &engine->execlist_queue,
@@ -453,10 +440,24 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
453 /* Same ctx: ignore first request, as second request 440 /* Same ctx: ignore first request, as second request
454 * will update tail past first request's workload */ 441 * will update tail past first request's workload */
455 cursor->elsp_submitted = req0->elsp_submitted; 442 cursor->elsp_submitted = req0->elsp_submitted;
456 list_move_tail(&req0->execlist_link, 443 list_del(&req0->execlist_link);
457 &engine->execlist_retired_req_list); 444 i915_gem_request_unreference(req0);
458 req0 = cursor; 445 req0 = cursor;
459 } else { 446 } else {
447 if (IS_ENABLED(CONFIG_DRM_I915_GVT)) {
448 /*
449 * req0 (after merged) ctx requires single
450 * submission, stop picking
451 */
452 if (req0->ctx->execlists_force_single_submission)
453 break;
454 /*
455 * req0 ctx doesn't require single submission,
456 * but next req ctx requires, stop picking
457 */
458 if (cursor->ctx->execlists_force_single_submission)
459 break;
460 }
460 req1 = cursor; 461 req1 = cursor;
461 WARN_ON(req1->elsp_submitted); 462 WARN_ON(req1->elsp_submitted);
462 break; 463 break;
@@ -466,6 +467,12 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
466 if (unlikely(!req0)) 467 if (unlikely(!req0))
467 return; 468 return;
468 469
470 execlists_context_status_change(req0, INTEL_CONTEXT_SCHEDULE_IN);
471
472 if (req1)
473 execlists_context_status_change(req1,
474 INTEL_CONTEXT_SCHEDULE_IN);
475
469 if (req0->elsp_submitted & engine->idle_lite_restore_wa) { 476 if (req0->elsp_submitted & engine->idle_lite_restore_wa) {
470 /* 477 /*
471 * WaIdleLiteRestore: make sure we never cause a lite restore 478 * WaIdleLiteRestore: make sure we never cause a lite restore
@@ -486,7 +493,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
486} 493}
487 494
488static unsigned int 495static unsigned int
489execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id) 496execlists_check_remove_request(struct intel_engine_cs *engine, u32 ctx_id)
490{ 497{
491 struct drm_i915_gem_request *head_req; 498 struct drm_i915_gem_request *head_req;
492 499
@@ -496,19 +503,18 @@ execlists_check_remove_request(struct intel_engine_cs *engine, u32 request_id)
496 struct drm_i915_gem_request, 503 struct drm_i915_gem_request,
497 execlist_link); 504 execlist_link);
498 505
499 if (!head_req) 506 if (WARN_ON(!head_req || (head_req->ctx_hw_id != ctx_id)))
500 return 0; 507 return 0;
501
502 if (unlikely(intel_execlists_ctx_id(head_req->ctx, engine) != request_id))
503 return 0;
504 508
505 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n"); 509 WARN(head_req->elsp_submitted == 0, "Never submitted head request\n");
506 510
507 if (--head_req->elsp_submitted > 0) 511 if (--head_req->elsp_submitted > 0)
508 return 0; 512 return 0;
509 513
510 list_move_tail(&head_req->execlist_link, 514 execlists_context_status_change(head_req, INTEL_CONTEXT_SCHEDULE_OUT);
511 &engine->execlist_retired_req_list); 515
516 list_del(&head_req->execlist_link);
517 i915_gem_request_unreference(head_req);
512 518
513 return 1; 519 return 1;
514} 520}
@@ -517,7 +523,7 @@ static u32
517get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer, 523get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
518 u32 *context_id) 524 u32 *context_id)
519{ 525{
520 struct drm_i915_private *dev_priv = engine->dev->dev_private; 526 struct drm_i915_private *dev_priv = engine->i915;
521 u32 status; 527 u32 status;
522 528
523 read_pointer %= GEN8_CSB_ENTRIES; 529 read_pointer %= GEN8_CSB_ENTRIES;
@@ -535,7 +541,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
535 541
536/** 542/**
537 * intel_lrc_irq_handler() - handle Context Switch interrupts 543 * intel_lrc_irq_handler() - handle Context Switch interrupts
538 * @engine: Engine Command Streamer to handle. 544 * @data: tasklet handler passed in unsigned long
539 * 545 *
540 * Check the unread Context Status Buffers and manage the submission of new 546 * Check the unread Context Status Buffers and manage the submission of new
541 * contexts to the ELSP accordingly. 547 * contexts to the ELSP accordingly.
@@ -543,7 +549,7 @@ get_context_status(struct intel_engine_cs *engine, unsigned int read_pointer,
543static void intel_lrc_irq_handler(unsigned long data) 549static void intel_lrc_irq_handler(unsigned long data)
544{ 550{
545 struct intel_engine_cs *engine = (struct intel_engine_cs *)data; 551 struct intel_engine_cs *engine = (struct intel_engine_cs *)data;
546 struct drm_i915_private *dev_priv = engine->dev->dev_private; 552 struct drm_i915_private *dev_priv = engine->i915;
547 u32 status_pointer; 553 u32 status_pointer;
548 unsigned int read_pointer, write_pointer; 554 unsigned int read_pointer, write_pointer;
549 u32 csb[GEN8_CSB_ENTRIES][2]; 555 u32 csb[GEN8_CSB_ENTRIES][2];
@@ -612,11 +618,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
612 struct drm_i915_gem_request *cursor; 618 struct drm_i915_gem_request *cursor;
613 int num_elements = 0; 619 int num_elements = 0;
614 620
615 if (request->ctx != request->i915->kernel_context)
616 intel_lr_context_pin(request->ctx, engine);
617
618 i915_gem_request_reference(request);
619
620 spin_lock_bh(&engine->execlist_lock); 621 spin_lock_bh(&engine->execlist_lock);
621 622
622 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link) 623 list_for_each_entry(cursor, &engine->execlist_queue, execlist_link)
@@ -633,12 +634,14 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
633 if (request->ctx == tail_req->ctx) { 634 if (request->ctx == tail_req->ctx) {
634 WARN(tail_req->elsp_submitted != 0, 635 WARN(tail_req->elsp_submitted != 0,
635 "More than 2 already-submitted reqs queued\n"); 636 "More than 2 already-submitted reqs queued\n");
636 list_move_tail(&tail_req->execlist_link, 637 list_del(&tail_req->execlist_link);
637 &engine->execlist_retired_req_list); 638 i915_gem_request_unreference(tail_req);
638 } 639 }
639 } 640 }
640 641
642 i915_gem_request_reference(request);
641 list_add_tail(&request->execlist_link, &engine->execlist_queue); 643 list_add_tail(&request->execlist_link, &engine->execlist_queue);
644 request->ctx_hw_id = request->ctx->hw_id;
642 if (num_elements == 0) 645 if (num_elements == 0)
643 execlists_context_unqueue(engine); 646 execlists_context_unqueue(engine);
644 647
@@ -698,9 +701,23 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
698 701
699int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) 702int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
700{ 703{
701 int ret = 0; 704 struct intel_engine_cs *engine = request->engine;
705 struct intel_context *ce = &request->ctx->engine[engine->id];
706 int ret;
702 707
703 request->ringbuf = request->ctx->engine[request->engine->id].ringbuf; 708 /* Flush enough space to reduce the likelihood of waiting after
709 * we start building the request - in which case we will just
710 * have to repeat work.
711 */
712 request->reserved_space += EXECLISTS_REQUEST_SIZE;
713
714 if (!ce->state) {
715 ret = execlists_context_deferred_alloc(request->ctx, engine);
716 if (ret)
717 return ret;
718 }
719
720 request->ringbuf = ce->ringbuf;
704 721
705 if (i915.enable_guc_submission) { 722 if (i915.enable_guc_submission) {
706 /* 723 /*
@@ -708,16 +725,39 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
708 * going any further, as the i915_add_request() call 725 * going any further, as the i915_add_request() call
709 * later on mustn't fail ... 726 * later on mustn't fail ...
710 */ 727 */
711 struct intel_guc *guc = &request->i915->guc; 728 ret = i915_guc_wq_check_space(request);
712
713 ret = i915_guc_wq_check_space(guc->execbuf_client);
714 if (ret) 729 if (ret)
715 return ret; 730 return ret;
716 } 731 }
717 732
718 if (request->ctx != request->i915->kernel_context) 733 ret = intel_lr_context_pin(request->ctx, engine);
719 ret = intel_lr_context_pin(request->ctx, request->engine); 734 if (ret)
735 return ret;
736
737 ret = intel_ring_begin(request, 0);
738 if (ret)
739 goto err_unpin;
740
741 if (!ce->initialised) {
742 ret = engine->init_context(request);
743 if (ret)
744 goto err_unpin;
745
746 ce->initialised = true;
747 }
748
749 /* Note that after this point, we have committed to using
750 * this request as it is being used to both track the
751 * state of engine initialisation and liveness of the
752 * golden renderstate above. Think twice before you try
753 * to cancel/unwind this request now.
754 */
720 755
756 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
757 return 0;
758
759err_unpin:
760 intel_lr_context_unpin(request->ctx, engine);
721 return ret; 761 return ret;
722} 762}
723 763
@@ -734,7 +774,6 @@ static int
734intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request) 774intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
735{ 775{
736 struct intel_ringbuffer *ringbuf = request->ringbuf; 776 struct intel_ringbuffer *ringbuf = request->ringbuf;
737 struct drm_i915_private *dev_priv = request->i915;
738 struct intel_engine_cs *engine = request->engine; 777 struct intel_engine_cs *engine = request->engine;
739 778
740 intel_logical_ring_advance(ringbuf); 779 intel_logical_ring_advance(ringbuf);
@@ -750,54 +789,28 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
750 intel_logical_ring_emit(ringbuf, MI_NOOP); 789 intel_logical_ring_emit(ringbuf, MI_NOOP);
751 intel_logical_ring_advance(ringbuf); 790 intel_logical_ring_advance(ringbuf);
752 791
753 if (intel_engine_stopped(engine)) 792 /* We keep the previous context alive until we retire the following
754 return 0; 793 * request. This ensures that any the context object is still pinned
755 794 * for any residual writes the HW makes into it on the context switch
756 if (engine->last_context != request->ctx) { 795 * into the next object following the breadcrumb. Otherwise, we may
757 if (engine->last_context) 796 * retire the context too early.
758 intel_lr_context_unpin(engine->last_context, engine); 797 */
759 if (request->ctx != request->i915->kernel_context) { 798 request->previous_context = engine->last_context;
760 intel_lr_context_pin(request->ctx, engine); 799 engine->last_context = request->ctx;
761 engine->last_context = request->ctx;
762 } else {
763 engine->last_context = NULL;
764 }
765 }
766 800
767 if (dev_priv->guc.execbuf_client) 801 if (i915.enable_guc_submission)
768 i915_guc_submit(dev_priv->guc.execbuf_client, request); 802 i915_guc_submit(request);
769 else 803 else
770 execlists_context_queue(request); 804 execlists_context_queue(request);
771 805
772 return 0; 806 return 0;
773} 807}
774 808
775int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request)
776{
777 /*
778 * The first call merely notes the reserve request and is common for
779 * all back ends. The subsequent localised _begin() call actually
780 * ensures that the reservation is available. Without the begin, if
781 * the request creator immediately submitted the request without
782 * adding any commands to it then there might not actually be
783 * sufficient room for the submission commands.
784 */
785 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST);
786
787 return intel_ring_begin(request, 0);
788}
789
790/** 809/**
791 * execlists_submission() - submit a batchbuffer for execution, Execlists style 810 * execlists_submission() - submit a batchbuffer for execution, Execlists style
792 * @dev: DRM device. 811 * @params: execbuffer call parameters.
793 * @file: DRM file.
794 * @ring: Engine Command Streamer to submit to.
795 * @ctx: Context to employ for this submission.
796 * @args: execbuffer call arguments. 812 * @args: execbuffer call arguments.
797 * @vmas: list of vmas. 813 * @vmas: list of vmas.
798 * @batch_obj: the batchbuffer to submit.
799 * @exec_start: batchbuffer start virtual address pointer.
800 * @dispatch_flags: translated execbuffer call flags.
801 * 814 *
802 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts 815 * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
803 * away the submission details of the execbuffer ioctl call. 816 * away the submission details of the execbuffer ioctl call.
@@ -810,7 +823,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
810{ 823{
811 struct drm_device *dev = params->dev; 824 struct drm_device *dev = params->dev;
812 struct intel_engine_cs *engine = params->engine; 825 struct intel_engine_cs *engine = params->engine;
813 struct drm_i915_private *dev_priv = dev->dev_private; 826 struct drm_i915_private *dev_priv = to_i915(dev);
814 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf; 827 struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
815 u64 exec_start; 828 u64 exec_start;
816 int instp_mode; 829 int instp_mode;
@@ -881,28 +894,18 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
881 return 0; 894 return 0;
882} 895}
883 896
884void intel_execlists_retire_requests(struct intel_engine_cs *engine) 897void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
885{ 898{
886 struct drm_i915_gem_request *req, *tmp; 899 struct drm_i915_gem_request *req, *tmp;
887 struct list_head retired_list; 900 LIST_HEAD(cancel_list);
888 901
889 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 902 WARN_ON(!mutex_is_locked(&engine->i915->drm.struct_mutex));
890 if (list_empty(&engine->execlist_retired_req_list))
891 return;
892 903
893 INIT_LIST_HEAD(&retired_list);
894 spin_lock_bh(&engine->execlist_lock); 904 spin_lock_bh(&engine->execlist_lock);
895 list_replace_init(&engine->execlist_retired_req_list, &retired_list); 905 list_replace_init(&engine->execlist_queue, &cancel_list);
896 spin_unlock_bh(&engine->execlist_lock); 906 spin_unlock_bh(&engine->execlist_lock);
897 907
898 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) { 908 list_for_each_entry_safe(req, tmp, &cancel_list, execlist_link) {
899 struct intel_context *ctx = req->ctx;
900 struct drm_i915_gem_object *ctx_obj =
901 ctx->engine[engine->id].state;
902
903 if (ctx_obj && (ctx != req->i915->kernel_context))
904 intel_lr_context_unpin(ctx, engine);
905
906 list_del(&req->execlist_link); 909 list_del(&req->execlist_link);
907 i915_gem_request_unreference(req); 910 i915_gem_request_unreference(req);
908 } 911 }
@@ -910,7 +913,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *engine)
910 913
911void intel_logical_ring_stop(struct intel_engine_cs *engine) 914void intel_logical_ring_stop(struct intel_engine_cs *engine)
912{ 915{
913 struct drm_i915_private *dev_priv = engine->dev->dev_private; 916 struct drm_i915_private *dev_priv = engine->i915;
914 int ret; 917 int ret;
915 918
916 if (!intel_engine_initialized(engine)) 919 if (!intel_engine_initialized(engine))
@@ -923,7 +926,10 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
923 926
924 /* TODO: Is this correct with Execlists enabled? */ 927 /* TODO: Is this correct with Execlists enabled? */
925 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 928 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
926 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 929 if (intel_wait_for_register(dev_priv,
930 RING_MI_MODE(engine->mmio_base),
931 MODE_IDLE, MODE_IDLE,
932 1000)) {
927 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name); 933 DRM_ERROR("%s :timed out trying to stop ring\n", engine->name);
928 return; 934 return;
929 } 935 }
@@ -946,25 +952,26 @@ int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
946 return 0; 952 return 0;
947} 953}
948 954
949static int intel_lr_context_do_pin(struct intel_context *ctx, 955static int intel_lr_context_pin(struct i915_gem_context *ctx,
950 struct intel_engine_cs *engine) 956 struct intel_engine_cs *engine)
951{ 957{
952 struct drm_device *dev = engine->dev; 958 struct drm_i915_private *dev_priv = ctx->i915;
953 struct drm_i915_private *dev_priv = dev->dev_private; 959 struct intel_context *ce = &ctx->engine[engine->id];
954 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
955 struct intel_ringbuffer *ringbuf = ctx->engine[engine->id].ringbuf;
956 void *vaddr; 960 void *vaddr;
957 u32 *lrc_reg_state; 961 u32 *lrc_reg_state;
958 int ret; 962 int ret;
959 963
960 WARN_ON(!mutex_is_locked(&engine->dev->struct_mutex)); 964 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
961 965
962 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 966 if (ce->pin_count++)
963 PIN_OFFSET_BIAS | GUC_WOPCM_TOP); 967 return 0;
968
969 ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
970 PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
964 if (ret) 971 if (ret)
965 return ret; 972 goto err;
966 973
967 vaddr = i915_gem_object_pin_map(ctx_obj); 974 vaddr = i915_gem_object_pin_map(ce->state);
968 if (IS_ERR(vaddr)) { 975 if (IS_ERR(vaddr)) {
969 ret = PTR_ERR(vaddr); 976 ret = PTR_ERR(vaddr);
970 goto unpin_ctx_obj; 977 goto unpin_ctx_obj;
@@ -972,65 +979,54 @@ static int intel_lr_context_do_pin(struct intel_context *ctx,
972 979
973 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; 980 lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
974 981
975 ret = intel_pin_and_map_ringbuffer_obj(engine->dev, ringbuf); 982 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
976 if (ret) 983 if (ret)
977 goto unpin_map; 984 goto unpin_map;
978 985
979 ctx->engine[engine->id].lrc_vma = i915_gem_obj_to_ggtt(ctx_obj); 986 i915_gem_context_reference(ctx);
987 ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
980 intel_lr_context_descriptor_update(ctx, engine); 988 intel_lr_context_descriptor_update(ctx, engine);
981 lrc_reg_state[CTX_RING_BUFFER_START+1] = ringbuf->vma->node.start; 989
982 ctx->engine[engine->id].lrc_reg_state = lrc_reg_state; 990 lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
983 ctx_obj->dirty = true; 991 ce->lrc_reg_state = lrc_reg_state;
992 ce->state->dirty = true;
984 993
985 /* Invalidate GuC TLB. */ 994 /* Invalidate GuC TLB. */
986 if (i915.enable_guc_submission) 995 if (i915.enable_guc_submission)
987 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); 996 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
988 997
989 return ret; 998 return 0;
990 999
991unpin_map: 1000unpin_map:
992 i915_gem_object_unpin_map(ctx_obj); 1001 i915_gem_object_unpin_map(ce->state);
993unpin_ctx_obj: 1002unpin_ctx_obj:
994 i915_gem_object_ggtt_unpin(ctx_obj); 1003 i915_gem_object_ggtt_unpin(ce->state);
995 1004err:
1005 ce->pin_count = 0;
996 return ret; 1006 return ret;
997} 1007}
998 1008
999static int intel_lr_context_pin(struct intel_context *ctx, 1009void intel_lr_context_unpin(struct i915_gem_context *ctx,
1000 struct intel_engine_cs *engine) 1010 struct intel_engine_cs *engine)
1001{ 1011{
1002 int ret = 0; 1012 struct intel_context *ce = &ctx->engine[engine->id];
1003 1013
1004 if (ctx->engine[engine->id].pin_count++ == 0) { 1014 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1005 ret = intel_lr_context_do_pin(ctx, engine); 1015 GEM_BUG_ON(ce->pin_count == 0);
1006 if (ret)
1007 goto reset_pin_count;
1008 1016
1009 i915_gem_context_reference(ctx); 1017 if (--ce->pin_count)
1010 } 1018 return;
1011 return ret;
1012 1019
1013reset_pin_count: 1020 intel_unpin_ringbuffer_obj(ce->ringbuf);
1014 ctx->engine[engine->id].pin_count = 0;
1015 return ret;
1016}
1017 1021
1018void intel_lr_context_unpin(struct intel_context *ctx, 1022 i915_gem_object_unpin_map(ce->state);
1019 struct intel_engine_cs *engine) 1023 i915_gem_object_ggtt_unpin(ce->state);
1020{
1021 struct drm_i915_gem_object *ctx_obj = ctx->engine[engine->id].state;
1022 1024
1023 WARN_ON(!mutex_is_locked(&ctx->i915->dev->struct_mutex)); 1025 ce->lrc_vma = NULL;
1024 if (--ctx->engine[engine->id].pin_count == 0) { 1026 ce->lrc_desc = 0;
1025 i915_gem_object_unpin_map(ctx_obj); 1027 ce->lrc_reg_state = NULL;
1026 intel_unpin_ringbuffer_obj(ctx->engine[engine->id].ringbuf);
1027 i915_gem_object_ggtt_unpin(ctx_obj);
1028 ctx->engine[engine->id].lrc_vma = NULL;
1029 ctx->engine[engine->id].lrc_desc = 0;
1030 ctx->engine[engine->id].lrc_reg_state = NULL;
1031 1028
1032 i915_gem_context_unreference(ctx); 1029 i915_gem_context_unreference(ctx);
1033 }
1034} 1030}
1035 1031
1036static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) 1032static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
@@ -1038,9 +1034,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
1038 int ret, i; 1034 int ret, i;
1039 struct intel_engine_cs *engine = req->engine; 1035 struct intel_engine_cs *engine = req->engine;
1040 struct intel_ringbuffer *ringbuf = req->ringbuf; 1036 struct intel_ringbuffer *ringbuf = req->ringbuf;
1041 struct drm_device *dev = engine->dev; 1037 struct i915_workarounds *w = &req->i915->workarounds;
1042 struct drm_i915_private *dev_priv = dev->dev_private;
1043 struct i915_workarounds *w = &dev_priv->workarounds;
1044 1038
1045 if (w->count == 0) 1039 if (w->count == 0)
1046 return 0; 1040 return 0;
@@ -1103,7 +1097,7 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
1103 uint32_t *const batch, 1097 uint32_t *const batch,
1104 uint32_t index) 1098 uint32_t index)
1105{ 1099{
1106 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1100 struct drm_i915_private *dev_priv = engine->i915;
1107 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); 1101 uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
1108 1102
1109 /* 1103 /*
@@ -1165,7 +1159,7 @@ static inline int wa_ctx_end(struct i915_wa_ctx_bb *wa_ctx,
1165/** 1159/**
1166 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA 1160 * gen8_init_indirectctx_bb() - initialize indirect ctx batch with WA
1167 * 1161 *
1168 * @ring: only applicable for RCS 1162 * @engine: only applicable for RCS
1169 * @wa_ctx: structure representing wa_ctx 1163 * @wa_ctx: structure representing wa_ctx
1170 * offset: specifies start of the batch, should be cache-aligned. This is updated 1164 * offset: specifies start of the batch, should be cache-aligned. This is updated
1171 * with the offset value received as input. 1165 * with the offset value received as input.
@@ -1202,7 +1196,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1202 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1196 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1203 1197
1204 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */ 1198 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1205 if (IS_BROADWELL(engine->dev)) { 1199 if (IS_BROADWELL(engine->i915)) {
1206 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index); 1200 int rc = gen8_emit_flush_coherentl3_wa(engine, batch, index);
1207 if (rc < 0) 1201 if (rc < 0)
1208 return rc; 1202 return rc;
@@ -1239,7 +1233,7 @@ static int gen8_init_indirectctx_bb(struct intel_engine_cs *engine,
1239/** 1233/**
1240 * gen8_init_perctx_bb() - initialize per ctx batch with WA 1234 * gen8_init_perctx_bb() - initialize per ctx batch with WA
1241 * 1235 *
1242 * @ring: only applicable for RCS 1236 * @engine: only applicable for RCS
1243 * @wa_ctx: structure representing wa_ctx 1237 * @wa_ctx: structure representing wa_ctx
1244 * offset: specifies start of the batch, should be cache-aligned. 1238 * offset: specifies start of the batch, should be cache-aligned.
1245 * size: size of the batch in DWORDS but HW expects in terms of cachelines 1239 * size: size of the batch in DWORDS but HW expects in terms of cachelines
@@ -1274,13 +1268,12 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1274 uint32_t *offset) 1268 uint32_t *offset)
1275{ 1269{
1276 int ret; 1270 int ret;
1277 struct drm_device *dev = engine->dev; 1271 struct drm_i915_private *dev_priv = engine->i915;
1278 struct drm_i915_private *dev_priv = dev->dev_private;
1279 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1272 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1280 1273
1281 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1274 /* WaDisableCtxRestoreArbitration:skl,bxt */
1282 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1275 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
1283 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1276 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1284 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE); 1277 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_DISABLE);
1285 1278
1286 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */ 1279 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt */
@@ -1305,6 +1298,31 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
1305 wa_ctx_emit(batch, index, 0); 1298 wa_ctx_emit(batch, index, 0);
1306 wa_ctx_emit(batch, index, 0); 1299 wa_ctx_emit(batch, index, 0);
1307 } 1300 }
1301
1302 /* WaMediaPoolStateCmdInWABB:bxt */
1303 if (HAS_POOLED_EU(engine->i915)) {
1304 /*
1305 * EU pool configuration is setup along with golden context
1306 * during context initialization. This value depends on
1307 * device type (2x6 or 3x6) and needs to be updated based
1308 * on which subslice is disabled especially for 2x6
1309 * devices, however it is safe to load default
1310 * configuration of 3x6 device instead of masking off
1311 * corresponding bits because HW ignores bits of a disabled
1312 * subslice and drops down to appropriate config. Please
1313 * see render_state_setup() in i915_gem_render_state.c for
1314 * possible configurations, to avoid duplication they are
1315 * not shown here again.
1316 */
1317 u32 eu_pool_config = 0x00777000;
1318 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_STATE);
1319 wa_ctx_emit(batch, index, GEN9_MEDIA_POOL_ENABLE);
1320 wa_ctx_emit(batch, index, eu_pool_config);
1321 wa_ctx_emit(batch, index, 0);
1322 wa_ctx_emit(batch, index, 0);
1323 wa_ctx_emit(batch, index, 0);
1324 }
1325
1308 /* Pad to end of cacheline */ 1326 /* Pad to end of cacheline */
1309 while (index % CACHELINE_DWORDS) 1327 while (index % CACHELINE_DWORDS)
1310 wa_ctx_emit(batch, index, MI_NOOP); 1328 wa_ctx_emit(batch, index, MI_NOOP);
@@ -1317,12 +1335,11 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1317 uint32_t *const batch, 1335 uint32_t *const batch,
1318 uint32_t *offset) 1336 uint32_t *offset)
1319{ 1337{
1320 struct drm_device *dev = engine->dev;
1321 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); 1338 uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
1322 1339
1323 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 1340 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
1324 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 1341 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_B0) ||
1325 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1342 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1)) {
1326 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1)); 1343 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(1));
1327 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0); 1344 wa_ctx_emit_reg(batch, index, GEN9_SLICE_COMMON_ECO_CHICKEN0);
1328 wa_ctx_emit(batch, index, 1345 wa_ctx_emit(batch, index,
@@ -1331,7 +1348,7 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1331 } 1348 }
1332 1349
1333 /* WaClearTdlStateAckDirtyBits:bxt */ 1350 /* WaClearTdlStateAckDirtyBits:bxt */
1334 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1351 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_B0)) {
1335 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4)); 1352 wa_ctx_emit(batch, index, MI_LOAD_REGISTER_IMM(4));
1336 1353
1337 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK); 1354 wa_ctx_emit_reg(batch, index, GEN8_STATE_ACK);
@@ -1350,8 +1367,8 @@ static int gen9_init_perctx_bb(struct intel_engine_cs *engine,
1350 } 1367 }
1351 1368
1352 /* WaDisableCtxRestoreArbitration:skl,bxt */ 1369 /* WaDisableCtxRestoreArbitration:skl,bxt */
1353 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 1370 if (IS_SKL_REVID(engine->i915, 0, SKL_REVID_D0) ||
1354 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1371 IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
1355 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE); 1372 wa_ctx_emit(batch, index, MI_ARB_ON_OFF | MI_ARB_ENABLE);
1356 1373
1357 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END); 1374 wa_ctx_emit(batch, index, MI_BATCH_BUFFER_END);
@@ -1363,11 +1380,13 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
1363{ 1380{
1364 int ret; 1381 int ret;
1365 1382
1366 engine->wa_ctx.obj = i915_gem_alloc_object(engine->dev, 1383 engine->wa_ctx.obj = i915_gem_object_create(&engine->i915->drm,
1367 PAGE_ALIGN(size)); 1384 PAGE_ALIGN(size));
1368 if (!engine->wa_ctx.obj) { 1385 if (IS_ERR(engine->wa_ctx.obj)) {
1369 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n"); 1386 DRM_DEBUG_DRIVER("alloc LRC WA ctx backing obj failed.\n");
1370 return -ENOMEM; 1387 ret = PTR_ERR(engine->wa_ctx.obj);
1388 engine->wa_ctx.obj = NULL;
1389 return ret;
1371 } 1390 }
1372 1391
1373 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0); 1392 ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
@@ -1401,9 +1420,9 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1401 WARN_ON(engine->id != RCS); 1420 WARN_ON(engine->id != RCS);
1402 1421
1403 /* update this when WA for higher Gen are added */ 1422 /* update this when WA for higher Gen are added */
1404 if (INTEL_INFO(engine->dev)->gen > 9) { 1423 if (INTEL_GEN(engine->i915) > 9) {
1405 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n", 1424 DRM_ERROR("WA batch buffer is not initialized for Gen%d\n",
1406 INTEL_INFO(engine->dev)->gen); 1425 INTEL_GEN(engine->i915));
1407 return 0; 1426 return 0;
1408 } 1427 }
1409 1428
@@ -1423,7 +1442,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1423 batch = kmap_atomic(page); 1442 batch = kmap_atomic(page);
1424 offset = 0; 1443 offset = 0;
1425 1444
1426 if (INTEL_INFO(engine->dev)->gen == 8) { 1445 if (IS_GEN8(engine->i915)) {
1427 ret = gen8_init_indirectctx_bb(engine, 1446 ret = gen8_init_indirectctx_bb(engine,
1428 &wa_ctx->indirect_ctx, 1447 &wa_ctx->indirect_ctx,
1429 batch, 1448 batch,
@@ -1437,7 +1456,7 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1437 &offset); 1456 &offset);
1438 if (ret) 1457 if (ret)
1439 goto out; 1458 goto out;
1440 } else if (INTEL_INFO(engine->dev)->gen == 9) { 1459 } else if (IS_GEN9(engine->i915)) {
1441 ret = gen9_init_indirectctx_bb(engine, 1460 ret = gen9_init_indirectctx_bb(engine,
1442 &wa_ctx->indirect_ctx, 1461 &wa_ctx->indirect_ctx,
1443 batch, 1462 batch,
@@ -1463,7 +1482,7 @@ out:
1463 1482
1464static void lrc_init_hws(struct intel_engine_cs *engine) 1483static void lrc_init_hws(struct intel_engine_cs *engine)
1465{ 1484{
1466 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1485 struct drm_i915_private *dev_priv = engine->i915;
1467 1486
1468 I915_WRITE(RING_HWS_PGA(engine->mmio_base), 1487 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1469 (u32)engine->status_page.gfx_addr); 1488 (u32)engine->status_page.gfx_addr);
@@ -1472,8 +1491,7 @@ static void lrc_init_hws(struct intel_engine_cs *engine)
1472 1491
1473static int gen8_init_common_ring(struct intel_engine_cs *engine) 1492static int gen8_init_common_ring(struct intel_engine_cs *engine)
1474{ 1493{
1475 struct drm_device *dev = engine->dev; 1494 struct drm_i915_private *dev_priv = engine->i915;
1476 struct drm_i915_private *dev_priv = dev->dev_private;
1477 unsigned int next_context_status_buffer_hw; 1495 unsigned int next_context_status_buffer_hw;
1478 1496
1479 lrc_init_hws(engine); 1497 lrc_init_hws(engine);
@@ -1520,8 +1538,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *engine)
1520 1538
1521static int gen8_init_render_ring(struct intel_engine_cs *engine) 1539static int gen8_init_render_ring(struct intel_engine_cs *engine)
1522{ 1540{
1523 struct drm_device *dev = engine->dev; 1541 struct drm_i915_private *dev_priv = engine->i915;
1524 struct drm_i915_private *dev_priv = dev->dev_private;
1525 int ret; 1542 int ret;
1526 1543
1527 ret = gen8_init_common_ring(engine); 1544 ret = gen8_init_common_ring(engine);
@@ -1598,7 +1615,7 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1598 if (req->ctx->ppgtt && 1615 if (req->ctx->ppgtt &&
1599 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) { 1616 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings)) {
1600 if (!USES_FULL_48BIT_PPGTT(req->i915) && 1617 if (!USES_FULL_48BIT_PPGTT(req->i915) &&
1601 !intel_vgpu_active(req->i915->dev)) { 1618 !intel_vgpu_active(req->i915)) {
1602 ret = intel_logical_ring_emit_pdps(req); 1619 ret = intel_logical_ring_emit_pdps(req);
1603 if (ret) 1620 if (ret)
1604 return ret; 1621 return ret;
@@ -1624,38 +1641,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1624 return 0; 1641 return 0;
1625} 1642}
1626 1643
1627static bool gen8_logical_ring_get_irq(struct intel_engine_cs *engine) 1644static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1628{ 1645{
1629 struct drm_device *dev = engine->dev; 1646 struct drm_i915_private *dev_priv = engine->i915;
1630 struct drm_i915_private *dev_priv = dev->dev_private; 1647 I915_WRITE_IMR(engine,
1631 unsigned long flags; 1648 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1632 1649 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1633 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1634 return false;
1635
1636 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1637 if (engine->irq_refcount++ == 0) {
1638 I915_WRITE_IMR(engine,
1639 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1640 POSTING_READ(RING_IMR(engine->mmio_base));
1641 }
1642 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1643
1644 return true;
1645} 1650}
1646 1651
1647static void gen8_logical_ring_put_irq(struct intel_engine_cs *engine) 1652static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1648{ 1653{
1649 struct drm_device *dev = engine->dev; 1654 struct drm_i915_private *dev_priv = engine->i915;
1650 struct drm_i915_private *dev_priv = dev->dev_private; 1655 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1651 unsigned long flags;
1652
1653 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1654 if (--engine->irq_refcount == 0) {
1655 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1656 POSTING_READ(RING_IMR(engine->mmio_base));
1657 }
1658 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1659} 1656}
1660 1657
1661static int gen8_emit_flush(struct drm_i915_gem_request *request, 1658static int gen8_emit_flush(struct drm_i915_gem_request *request,
@@ -1664,8 +1661,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
1664{ 1661{
1665 struct intel_ringbuffer *ringbuf = request->ringbuf; 1662 struct intel_ringbuffer *ringbuf = request->ringbuf;
1666 struct intel_engine_cs *engine = ringbuf->engine; 1663 struct intel_engine_cs *engine = ringbuf->engine;
1667 struct drm_device *dev = engine->dev; 1664 struct drm_i915_private *dev_priv = request->i915;
1668 struct drm_i915_private *dev_priv = dev->dev_private;
1669 uint32_t cmd; 1665 uint32_t cmd;
1670 int ret; 1666 int ret;
1671 1667
@@ -1734,7 +1730,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1734 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL 1730 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1735 * pipe control. 1731 * pipe control.
1736 */ 1732 */
1737 if (IS_GEN9(engine->dev)) 1733 if (IS_GEN9(request->i915))
1738 vf_flush_wa = true; 1734 vf_flush_wa = true;
1739 1735
1740 /* WaForGAMHang:kbl */ 1736 /* WaForGAMHang:kbl */
@@ -1793,16 +1789,6 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1793 return 0; 1789 return 0;
1794} 1790}
1795 1791
1796static u32 gen8_get_seqno(struct intel_engine_cs *engine)
1797{
1798 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1799}
1800
1801static void gen8_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1802{
1803 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1804}
1805
1806static void bxt_a_seqno_barrier(struct intel_engine_cs *engine) 1792static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1807{ 1793{
1808 /* 1794 /*
@@ -1818,14 +1804,6 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
1818 intel_flush_status_page(engine, I915_GEM_HWS_INDEX); 1804 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1819} 1805}
1820 1806
1821static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1822{
1823 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1824
1825 /* See bxt_a_get_seqno() explaining the reason for the clflush. */
1826 intel_flush_status_page(engine, I915_GEM_HWS_INDEX);
1827}
1828
1829/* 1807/*
1830 * Reserve space for 2 NOOPs at the end of each request to be 1808 * Reserve space for 2 NOOPs at the end of each request to be
1831 * used as a workaround for not being allowed to do lite 1809 * used as a workaround for not being allowed to do lite
@@ -1833,11 +1811,6 @@ static void bxt_a_set_seqno(struct intel_engine_cs *engine, u32 seqno)
1833 */ 1811 */
1834#define WA_TAIL_DWORDS 2 1812#define WA_TAIL_DWORDS 2
1835 1813
1836static inline u32 hws_seqno_address(struct intel_engine_cs *engine)
1837{
1838 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
1839}
1840
1841static int gen8_emit_request(struct drm_i915_gem_request *request) 1814static int gen8_emit_request(struct drm_i915_gem_request *request)
1842{ 1815{
1843 struct intel_ringbuffer *ringbuf = request->ringbuf; 1816 struct intel_ringbuffer *ringbuf = request->ringbuf;
@@ -1853,10 +1826,10 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
1853 intel_logical_ring_emit(ringbuf, 1826 intel_logical_ring_emit(ringbuf,
1854 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW); 1827 (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
1855 intel_logical_ring_emit(ringbuf, 1828 intel_logical_ring_emit(ringbuf,
1856 hws_seqno_address(request->engine) | 1829 intel_hws_seqno_address(request->engine) |
1857 MI_FLUSH_DW_USE_GTT); 1830 MI_FLUSH_DW_USE_GTT);
1858 intel_logical_ring_emit(ringbuf, 0); 1831 intel_logical_ring_emit(ringbuf, 0);
1859 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1832 intel_logical_ring_emit(ringbuf, request->seqno);
1860 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT); 1833 intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
1861 intel_logical_ring_emit(ringbuf, MI_NOOP); 1834 intel_logical_ring_emit(ringbuf, MI_NOOP);
1862 return intel_logical_ring_advance_and_submit(request); 1835 return intel_logical_ring_advance_and_submit(request);
@@ -1883,7 +1856,8 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
1883 (PIPE_CONTROL_GLOBAL_GTT_IVB | 1856 (PIPE_CONTROL_GLOBAL_GTT_IVB |
1884 PIPE_CONTROL_CS_STALL | 1857 PIPE_CONTROL_CS_STALL |
1885 PIPE_CONTROL_QW_WRITE)); 1858 PIPE_CONTROL_QW_WRITE));
1886 intel_logical_ring_emit(ringbuf, hws_seqno_address(request->engine)); 1859 intel_logical_ring_emit(ringbuf,
1860 intel_hws_seqno_address(request->engine));
1887 intel_logical_ring_emit(ringbuf, 0); 1861 intel_logical_ring_emit(ringbuf, 0);
1888 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request)); 1862 intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
1889 /* We're thrashing one dword of HWS. */ 1863 /* We're thrashing one dword of HWS. */
@@ -1945,7 +1919,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1945/** 1919/**
1946 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer 1920 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1947 * 1921 *
1948 * @ring: Engine Command Streamer. 1922 * @engine: Engine Command Streamer.
1949 * 1923 *
1950 */ 1924 */
1951void intel_logical_ring_cleanup(struct intel_engine_cs *engine) 1925void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
@@ -1962,7 +1936,7 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1962 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state))) 1936 if (WARN_ON(test_bit(TASKLET_STATE_SCHED, &engine->irq_tasklet.state)))
1963 tasklet_kill(&engine->irq_tasklet); 1937 tasklet_kill(&engine->irq_tasklet);
1964 1938
1965 dev_priv = engine->dev->dev_private; 1939 dev_priv = engine->i915;
1966 1940
1967 if (engine->buffer) { 1941 if (engine->buffer) {
1968 intel_logical_ring_stop(engine); 1942 intel_logical_ring_stop(engine);
@@ -1975,36 +1949,34 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1975 i915_cmd_parser_fini_ring(engine); 1949 i915_cmd_parser_fini_ring(engine);
1976 i915_gem_batch_pool_fini(&engine->batch_pool); 1950 i915_gem_batch_pool_fini(&engine->batch_pool);
1977 1951
1952 intel_engine_fini_breadcrumbs(engine);
1953
1978 if (engine->status_page.obj) { 1954 if (engine->status_page.obj) {
1979 i915_gem_object_unpin_map(engine->status_page.obj); 1955 i915_gem_object_unpin_map(engine->status_page.obj);
1980 engine->status_page.obj = NULL; 1956 engine->status_page.obj = NULL;
1981 } 1957 }
1958 intel_lr_context_unpin(dev_priv->kernel_context, engine);
1982 1959
1983 engine->idle_lite_restore_wa = 0; 1960 engine->idle_lite_restore_wa = 0;
1984 engine->disable_lite_restore_wa = false; 1961 engine->disable_lite_restore_wa = false;
1985 engine->ctx_desc_template = 0; 1962 engine->ctx_desc_template = 0;
1986 1963
1987 lrc_destroy_wa_ctx_obj(engine); 1964 lrc_destroy_wa_ctx_obj(engine);
1988 engine->dev = NULL; 1965 engine->i915 = NULL;
1989} 1966}
1990 1967
1991static void 1968static void
1992logical_ring_default_vfuncs(struct drm_device *dev, 1969logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1993 struct intel_engine_cs *engine)
1994{ 1970{
1995 /* Default vfuncs which can be overriden by each engine. */ 1971 /* Default vfuncs which can be overriden by each engine. */
1996 engine->init_hw = gen8_init_common_ring; 1972 engine->init_hw = gen8_init_common_ring;
1997 engine->emit_request = gen8_emit_request; 1973 engine->emit_request = gen8_emit_request;
1998 engine->emit_flush = gen8_emit_flush; 1974 engine->emit_flush = gen8_emit_flush;
1999 engine->irq_get = gen8_logical_ring_get_irq; 1975 engine->irq_enable = gen8_logical_ring_enable_irq;
2000 engine->irq_put = gen8_logical_ring_put_irq; 1976 engine->irq_disable = gen8_logical_ring_disable_irq;
2001 engine->emit_bb_start = gen8_emit_bb_start; 1977 engine->emit_bb_start = gen8_emit_bb_start;
2002 engine->get_seqno = gen8_get_seqno; 1978 if (IS_BXT_REVID(engine->i915, 0, BXT_REVID_A1))
2003 engine->set_seqno = gen8_set_seqno;
2004 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) {
2005 engine->irq_seqno_barrier = bxt_a_seqno_barrier; 1979 engine->irq_seqno_barrier = bxt_a_seqno_barrier;
2006 engine->set_seqno = bxt_a_set_seqno;
2007 }
2008} 1980}
2009 1981
2010static inline void 1982static inline void
@@ -2033,60 +2005,28 @@ lrc_setup_hws(struct intel_engine_cs *engine,
2033} 2005}
2034 2006
2035static int 2007static int
2036logical_ring_init(struct drm_device *dev, struct intel_engine_cs *engine) 2008logical_ring_init(struct intel_engine_cs *engine)
2037{ 2009{
2038 struct drm_i915_private *dev_priv = to_i915(dev); 2010 struct i915_gem_context *dctx = engine->i915->kernel_context;
2039 struct intel_context *dctx = dev_priv->kernel_context;
2040 enum forcewake_domains fw_domains;
2041 int ret; 2011 int ret;
2042 2012
2043 /* Intentionally left blank. */ 2013 ret = intel_engine_init_breadcrumbs(engine);
2044 engine->buffer = NULL; 2014 if (ret)
2045 2015 goto error;
2046 engine->dev = dev;
2047 INIT_LIST_HEAD(&engine->active_list);
2048 INIT_LIST_HEAD(&engine->request_list);
2049 i915_gem_batch_pool_init(dev, &engine->batch_pool);
2050 init_waitqueue_head(&engine->irq_queue);
2051
2052 INIT_LIST_HEAD(&engine->buffers);
2053 INIT_LIST_HEAD(&engine->execlist_queue);
2054 INIT_LIST_HEAD(&engine->execlist_retired_req_list);
2055 spin_lock_init(&engine->execlist_lock);
2056
2057 tasklet_init(&engine->irq_tasklet,
2058 intel_lrc_irq_handler, (unsigned long)engine);
2059
2060 logical_ring_init_platform_invariants(engine);
2061
2062 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2063 RING_ELSP(engine),
2064 FW_REG_WRITE);
2065
2066 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2067 RING_CONTEXT_STATUS_PTR(engine),
2068 FW_REG_READ | FW_REG_WRITE);
2069
2070 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2071 RING_CONTEXT_STATUS_BUF_BASE(engine),
2072 FW_REG_READ);
2073
2074 engine->fw_domains = fw_domains;
2075 2016
2076 ret = i915_cmd_parser_init_ring(engine); 2017 ret = i915_cmd_parser_init_ring(engine);
2077 if (ret) 2018 if (ret)
2078 goto error; 2019 goto error;
2079 2020
2080 ret = intel_lr_context_deferred_alloc(dctx, engine); 2021 ret = execlists_context_deferred_alloc(dctx, engine);
2081 if (ret) 2022 if (ret)
2082 goto error; 2023 goto error;
2083 2024
2084 /* As this is the default context, always pin it */ 2025 /* As this is the default context, always pin it */
2085 ret = intel_lr_context_do_pin(dctx, engine); 2026 ret = intel_lr_context_pin(dctx, engine);
2086 if (ret) { 2027 if (ret) {
2087 DRM_ERROR( 2028 DRM_ERROR("Failed to pin context for %s: %d\n",
2088 "Failed to pin and map ringbuffer %s: %d\n", 2029 engine->name, ret);
2089 engine->name, ret);
2090 goto error; 2030 goto error;
2091 } 2031 }
2092 2032
@@ -2104,26 +2044,16 @@ error:
2104 return ret; 2044 return ret;
2105} 2045}
2106 2046
2107static int logical_render_ring_init(struct drm_device *dev) 2047static int logical_render_ring_init(struct intel_engine_cs *engine)
2108{ 2048{
2109 struct drm_i915_private *dev_priv = dev->dev_private; 2049 struct drm_i915_private *dev_priv = engine->i915;
2110 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2111 int ret; 2050 int ret;
2112 2051
2113 engine->name = "render ring"; 2052 if (HAS_L3_DPF(dev_priv))
2114 engine->id = RCS;
2115 engine->exec_id = I915_EXEC_RENDER;
2116 engine->guc_id = GUC_RENDER_ENGINE;
2117 engine->mmio_base = RENDER_RING_BASE;
2118
2119 logical_ring_default_irqs(engine, GEN8_RCS_IRQ_SHIFT);
2120 if (HAS_L3_DPF(dev))
2121 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 2053 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2122 2054
2123 logical_ring_default_vfuncs(dev, engine);
2124
2125 /* Override some for render ring. */ 2055 /* Override some for render ring. */
2126 if (INTEL_INFO(dev)->gen >= 9) 2056 if (INTEL_GEN(dev_priv) >= 9)
2127 engine->init_hw = gen9_init_render_ring; 2057 engine->init_hw = gen9_init_render_ring;
2128 else 2058 else
2129 engine->init_hw = gen8_init_render_ring; 2059 engine->init_hw = gen8_init_render_ring;
@@ -2132,9 +2062,7 @@ static int logical_render_ring_init(struct drm_device *dev)
2132 engine->emit_flush = gen8_emit_flush_render; 2062 engine->emit_flush = gen8_emit_flush_render;
2133 engine->emit_request = gen8_emit_request_render; 2063 engine->emit_request = gen8_emit_request_render;
2134 2064
2135 engine->dev = dev; 2065 ret = intel_init_pipe_control(engine, 4096);
2136
2137 ret = intel_init_pipe_control(engine);
2138 if (ret) 2066 if (ret)
2139 return ret; 2067 return ret;
2140 2068
@@ -2149,7 +2077,7 @@ static int logical_render_ring_init(struct drm_device *dev)
2149 ret); 2077 ret);
2150 } 2078 }
2151 2079
2152 ret = logical_ring_init(dev, engine); 2080 ret = logical_ring_init(engine);
2153 if (ret) { 2081 if (ret) {
2154 lrc_destroy_wa_ctx_obj(engine); 2082 lrc_destroy_wa_ctx_obj(engine);
2155 } 2083 }
@@ -2157,133 +2085,164 @@ static int logical_render_ring_init(struct drm_device *dev)
2157 return ret; 2085 return ret;
2158} 2086}
2159 2087
2160static int logical_bsd_ring_init(struct drm_device *dev) 2088static const struct logical_ring_info {
2161{ 2089 const char *name;
2162 struct drm_i915_private *dev_priv = dev->dev_private; 2090 unsigned exec_id;
2163 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 2091 unsigned guc_id;
2164 2092 u32 mmio_base;
2165 engine->name = "bsd ring"; 2093 unsigned irq_shift;
2166 engine->id = VCS; 2094 int (*init)(struct intel_engine_cs *engine);
2167 engine->exec_id = I915_EXEC_BSD; 2095} logical_rings[] = {
2168 engine->guc_id = GUC_VIDEO_ENGINE; 2096 [RCS] = {
2169 engine->mmio_base = GEN6_BSD_RING_BASE; 2097 .name = "render ring",
2170 2098 .exec_id = I915_EXEC_RENDER,
2171 logical_ring_default_irqs(engine, GEN8_VCS1_IRQ_SHIFT); 2099 .guc_id = GUC_RENDER_ENGINE,
2172 logical_ring_default_vfuncs(dev, engine); 2100 .mmio_base = RENDER_RING_BASE,
2173 2101 .irq_shift = GEN8_RCS_IRQ_SHIFT,
2174 return logical_ring_init(dev, engine); 2102 .init = logical_render_ring_init,
2175} 2103 },
2104 [BCS] = {
2105 .name = "blitter ring",
2106 .exec_id = I915_EXEC_BLT,
2107 .guc_id = GUC_BLITTER_ENGINE,
2108 .mmio_base = BLT_RING_BASE,
2109 .irq_shift = GEN8_BCS_IRQ_SHIFT,
2110 .init = logical_ring_init,
2111 },
2112 [VCS] = {
2113 .name = "bsd ring",
2114 .exec_id = I915_EXEC_BSD,
2115 .guc_id = GUC_VIDEO_ENGINE,
2116 .mmio_base = GEN6_BSD_RING_BASE,
2117 .irq_shift = GEN8_VCS1_IRQ_SHIFT,
2118 .init = logical_ring_init,
2119 },
2120 [VCS2] = {
2121 .name = "bsd2 ring",
2122 .exec_id = I915_EXEC_BSD,
2123 .guc_id = GUC_VIDEO_ENGINE2,
2124 .mmio_base = GEN8_BSD2_RING_BASE,
2125 .irq_shift = GEN8_VCS2_IRQ_SHIFT,
2126 .init = logical_ring_init,
2127 },
2128 [VECS] = {
2129 .name = "video enhancement ring",
2130 .exec_id = I915_EXEC_VEBOX,
2131 .guc_id = GUC_VIDEOENHANCE_ENGINE,
2132 .mmio_base = VEBOX_RING_BASE,
2133 .irq_shift = GEN8_VECS_IRQ_SHIFT,
2134 .init = logical_ring_init,
2135 },
2136};
2176 2137
2177static int logical_bsd2_ring_init(struct drm_device *dev) 2138static struct intel_engine_cs *
2139logical_ring_setup(struct drm_i915_private *dev_priv, enum intel_engine_id id)
2178{ 2140{
2179 struct drm_i915_private *dev_priv = dev->dev_private; 2141 const struct logical_ring_info *info = &logical_rings[id];
2180 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 2142 struct intel_engine_cs *engine = &dev_priv->engine[id];
2143 enum forcewake_domains fw_domains;
2181 2144
2182 engine->name = "bsd2 ring"; 2145 engine->id = id;
2183 engine->id = VCS2; 2146 engine->name = info->name;
2184 engine->exec_id = I915_EXEC_BSD; 2147 engine->exec_id = info->exec_id;
2185 engine->guc_id = GUC_VIDEO_ENGINE2; 2148 engine->guc_id = info->guc_id;
2186 engine->mmio_base = GEN8_BSD2_RING_BASE; 2149 engine->mmio_base = info->mmio_base;
2187 2150
2188 logical_ring_default_irqs(engine, GEN8_VCS2_IRQ_SHIFT); 2151 engine->i915 = dev_priv;
2189 logical_ring_default_vfuncs(dev, engine);
2190 2152
2191 return logical_ring_init(dev, engine); 2153 /* Intentionally left blank. */
2192} 2154 engine->buffer = NULL;
2193 2155
2194static int logical_blt_ring_init(struct drm_device *dev) 2156 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
2195{ 2157 RING_ELSP(engine),
2196 struct drm_i915_private *dev_priv = dev->dev_private; 2158 FW_REG_WRITE);
2197 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
2198 2159
2199 engine->name = "blitter ring"; 2160 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2200 engine->id = BCS; 2161 RING_CONTEXT_STATUS_PTR(engine),
2201 engine->exec_id = I915_EXEC_BLT; 2162 FW_REG_READ | FW_REG_WRITE);
2202 engine->guc_id = GUC_BLITTER_ENGINE;
2203 engine->mmio_base = BLT_RING_BASE;
2204 2163
2205 logical_ring_default_irqs(engine, GEN8_BCS_IRQ_SHIFT); 2164 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
2206 logical_ring_default_vfuncs(dev, engine); 2165 RING_CONTEXT_STATUS_BUF_BASE(engine),
2166 FW_REG_READ);
2207 2167
2208 return logical_ring_init(dev, engine); 2168 engine->fw_domains = fw_domains;
2209}
2210 2169
2211static int logical_vebox_ring_init(struct drm_device *dev) 2170 INIT_LIST_HEAD(&engine->active_list);
2212{ 2171 INIT_LIST_HEAD(&engine->request_list);
2213 struct drm_i915_private *dev_priv = dev->dev_private; 2172 INIT_LIST_HEAD(&engine->buffers);
2214 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 2173 INIT_LIST_HEAD(&engine->execlist_queue);
2174 spin_lock_init(&engine->execlist_lock);
2215 2175
2216 engine->name = "video enhancement ring"; 2176 tasklet_init(&engine->irq_tasklet,
2217 engine->id = VECS; 2177 intel_lrc_irq_handler, (unsigned long)engine);
2218 engine->exec_id = I915_EXEC_VEBOX;
2219 engine->guc_id = GUC_VIDEOENHANCE_ENGINE;
2220 engine->mmio_base = VEBOX_RING_BASE;
2221 2178
2222 logical_ring_default_irqs(engine, GEN8_VECS_IRQ_SHIFT); 2179 logical_ring_init_platform_invariants(engine);
2223 logical_ring_default_vfuncs(dev, engine); 2180 logical_ring_default_vfuncs(engine);
2181 logical_ring_default_irqs(engine, info->irq_shift);
2182
2183 intel_engine_init_hangcheck(engine);
2184 i915_gem_batch_pool_init(&dev_priv->drm, &engine->batch_pool);
2224 2185
2225 return logical_ring_init(dev, engine); 2186 return engine;
2226} 2187}
2227 2188
2228/** 2189/**
2229 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers 2190 * intel_logical_rings_init() - allocate, populate and init the Engine Command Streamers
2230 * @dev: DRM device. 2191 * @dev: DRM device.
2231 * 2192 *
2232 * This function inits the engines for an Execlists submission style (the equivalent in the 2193 * This function inits the engines for an Execlists submission style (the
2233 * legacy ringbuffer submission world would be i915_gem_init_engines). It does it only for 2194 * equivalent in the legacy ringbuffer submission world would be
2234 * those engines that are present in the hardware. 2195 * i915_gem_init_engines). It does it only for those engines that are present in
2196 * the hardware.
2235 * 2197 *
2236 * Return: non-zero if the initialization failed. 2198 * Return: non-zero if the initialization failed.
2237 */ 2199 */
2238int intel_logical_rings_init(struct drm_device *dev) 2200int intel_logical_rings_init(struct drm_device *dev)
2239{ 2201{
2240 struct drm_i915_private *dev_priv = dev->dev_private; 2202 struct drm_i915_private *dev_priv = to_i915(dev);
2203 unsigned int mask = 0;
2204 unsigned int i;
2241 int ret; 2205 int ret;
2242 2206
2243 ret = logical_render_ring_init(dev); 2207 WARN_ON(INTEL_INFO(dev_priv)->ring_mask &
2244 if (ret) 2208 GENMASK(sizeof(mask) * BITS_PER_BYTE - 1, I915_NUM_ENGINES));
2245 return ret;
2246 2209
2247 if (HAS_BSD(dev)) { 2210 for (i = 0; i < ARRAY_SIZE(logical_rings); i++) {
2248 ret = logical_bsd_ring_init(dev); 2211 if (!HAS_ENGINE(dev_priv, i))
2249 if (ret) 2212 continue;
2250 goto cleanup_render_ring;
2251 }
2252 2213
2253 if (HAS_BLT(dev)) { 2214 if (!logical_rings[i].init)
2254 ret = logical_blt_ring_init(dev); 2215 continue;
2255 if (ret)
2256 goto cleanup_bsd_ring;
2257 }
2258 2216
2259 if (HAS_VEBOX(dev)) { 2217 ret = logical_rings[i].init(logical_ring_setup(dev_priv, i));
2260 ret = logical_vebox_ring_init(dev);
2261 if (ret) 2218 if (ret)
2262 goto cleanup_blt_ring; 2219 goto cleanup;
2220
2221 mask |= ENGINE_MASK(i);
2263 } 2222 }
2264 2223
2265 if (HAS_BSD2(dev)) { 2224 /*
2266 ret = logical_bsd2_ring_init(dev); 2225 * Catch failures to update logical_rings table when the new engines
2267 if (ret) 2226 * are added to the driver by a warning and disabling the forgotten
2268 goto cleanup_vebox_ring; 2227 * engines.
2228 */
2229 if (WARN_ON(mask != INTEL_INFO(dev_priv)->ring_mask)) {
2230 struct intel_device_info *info =
2231 (struct intel_device_info *)&dev_priv->info;
2232 info->ring_mask = mask;
2269 } 2233 }
2270 2234
2271 return 0; 2235 return 0;
2272 2236
2273cleanup_vebox_ring: 2237cleanup:
2274 intel_logical_ring_cleanup(&dev_priv->engine[VECS]); 2238 for (i = 0; i < I915_NUM_ENGINES; i++)
2275cleanup_blt_ring: 2239 intel_logical_ring_cleanup(&dev_priv->engine[i]);
2276 intel_logical_ring_cleanup(&dev_priv->engine[BCS]);
2277cleanup_bsd_ring:
2278 intel_logical_ring_cleanup(&dev_priv->engine[VCS]);
2279cleanup_render_ring:
2280 intel_logical_ring_cleanup(&dev_priv->engine[RCS]);
2281 2240
2282 return ret; 2241 return ret;
2283} 2242}
2284 2243
2285static u32 2244static u32
2286make_rpcs(struct drm_device *dev) 2245make_rpcs(struct drm_i915_private *dev_priv)
2287{ 2246{
2288 u32 rpcs = 0; 2247 u32 rpcs = 0;
2289 2248
@@ -2291,7 +2250,7 @@ make_rpcs(struct drm_device *dev)
2291 * No explicit RPCS request is needed to ensure full 2250 * No explicit RPCS request is needed to ensure full
2292 * slice/subslice/EU enablement prior to Gen9. 2251 * slice/subslice/EU enablement prior to Gen9.
2293 */ 2252 */
2294 if (INTEL_INFO(dev)->gen < 9) 2253 if (INTEL_GEN(dev_priv) < 9)
2295 return 0; 2254 return 0;
2296 2255
2297 /* 2256 /*
@@ -2300,24 +2259,24 @@ make_rpcs(struct drm_device *dev)
2300 * must make an explicit request through RPCS for full 2259 * must make an explicit request through RPCS for full
2301 * enablement. 2260 * enablement.
2302 */ 2261 */
2303 if (INTEL_INFO(dev)->has_slice_pg) { 2262 if (INTEL_INFO(dev_priv)->has_slice_pg) {
2304 rpcs |= GEN8_RPCS_S_CNT_ENABLE; 2263 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2305 rpcs |= INTEL_INFO(dev)->slice_total << 2264 rpcs |= INTEL_INFO(dev_priv)->slice_total <<
2306 GEN8_RPCS_S_CNT_SHIFT; 2265 GEN8_RPCS_S_CNT_SHIFT;
2307 rpcs |= GEN8_RPCS_ENABLE; 2266 rpcs |= GEN8_RPCS_ENABLE;
2308 } 2267 }
2309 2268
2310 if (INTEL_INFO(dev)->has_subslice_pg) { 2269 if (INTEL_INFO(dev_priv)->has_subslice_pg) {
2311 rpcs |= GEN8_RPCS_SS_CNT_ENABLE; 2270 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2312 rpcs |= INTEL_INFO(dev)->subslice_per_slice << 2271 rpcs |= INTEL_INFO(dev_priv)->subslice_per_slice <<
2313 GEN8_RPCS_SS_CNT_SHIFT; 2272 GEN8_RPCS_SS_CNT_SHIFT;
2314 rpcs |= GEN8_RPCS_ENABLE; 2273 rpcs |= GEN8_RPCS_ENABLE;
2315 } 2274 }
2316 2275
2317 if (INTEL_INFO(dev)->has_eu_pg) { 2276 if (INTEL_INFO(dev_priv)->has_eu_pg) {
2318 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2277 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2319 GEN8_RPCS_EU_MIN_SHIFT; 2278 GEN8_RPCS_EU_MIN_SHIFT;
2320 rpcs |= INTEL_INFO(dev)->eu_per_subslice << 2279 rpcs |= INTEL_INFO(dev_priv)->eu_per_subslice <<
2321 GEN8_RPCS_EU_MAX_SHIFT; 2280 GEN8_RPCS_EU_MAX_SHIFT;
2322 rpcs |= GEN8_RPCS_ENABLE; 2281 rpcs |= GEN8_RPCS_ENABLE;
2323 } 2282 }
@@ -2329,9 +2288,9 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2329{ 2288{
2330 u32 indirect_ctx_offset; 2289 u32 indirect_ctx_offset;
2331 2290
2332 switch (INTEL_INFO(engine->dev)->gen) { 2291 switch (INTEL_GEN(engine->i915)) {
2333 default: 2292 default:
2334 MISSING_CASE(INTEL_INFO(engine->dev)->gen); 2293 MISSING_CASE(INTEL_GEN(engine->i915));
2335 /* fall through */ 2294 /* fall through */
2336 case 9: 2295 case 9:
2337 indirect_ctx_offset = 2296 indirect_ctx_offset =
@@ -2347,13 +2306,12 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2347} 2306}
2348 2307
2349static int 2308static int
2350populate_lr_context(struct intel_context *ctx, 2309populate_lr_context(struct i915_gem_context *ctx,
2351 struct drm_i915_gem_object *ctx_obj, 2310 struct drm_i915_gem_object *ctx_obj,
2352 struct intel_engine_cs *engine, 2311 struct intel_engine_cs *engine,
2353 struct intel_ringbuffer *ringbuf) 2312 struct intel_ringbuffer *ringbuf)
2354{ 2313{
2355 struct drm_device *dev = engine->dev; 2314 struct drm_i915_private *dev_priv = ctx->i915;
2356 struct drm_i915_private *dev_priv = dev->dev_private;
2357 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 2315 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
2358 void *vaddr; 2316 void *vaddr;
2359 u32 *reg_state; 2317 u32 *reg_state;
@@ -2391,7 +2349,7 @@ populate_lr_context(struct intel_context *ctx,
2391 RING_CONTEXT_CONTROL(engine), 2349 RING_CONTEXT_CONTROL(engine),
2392 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH | 2350 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2393 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2351 CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2394 (HAS_RESOURCE_STREAMER(dev) ? 2352 (HAS_RESOURCE_STREAMER(dev_priv) ?
2395 CTX_CTRL_RS_CTX_ENABLE : 0))); 2353 CTX_CTRL_RS_CTX_ENABLE : 0)));
2396 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base), 2354 ASSIGN_CTX_REG(reg_state, CTX_RING_HEAD, RING_HEAD(engine->mmio_base),
2397 0); 2355 0);
@@ -2480,7 +2438,7 @@ populate_lr_context(struct intel_context *ctx,
2480 if (engine->id == RCS) { 2438 if (engine->id == RCS) {
2481 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); 2439 reg_state[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2482 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 2440 ASSIGN_CTX_REG(reg_state, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2483 make_rpcs(dev)); 2441 make_rpcs(dev_priv));
2484 } 2442 }
2485 2443
2486 i915_gem_object_unpin_map(ctx_obj); 2444 i915_gem_object_unpin_map(ctx_obj);
@@ -2489,39 +2447,8 @@ populate_lr_context(struct intel_context *ctx,
2489} 2447}
2490 2448
2491/** 2449/**
2492 * intel_lr_context_free() - free the LRC specific bits of a context
2493 * @ctx: the LR context to free.
2494 *
2495 * The real context freeing is done in i915_gem_context_free: this only
2496 * takes care of the bits that are LRC related: the per-engine backing
2497 * objects and the logical ringbuffer.
2498 */
2499void intel_lr_context_free(struct intel_context *ctx)
2500{
2501 int i;
2502
2503 for (i = I915_NUM_ENGINES; --i >= 0; ) {
2504 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
2505 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
2506
2507 if (!ctx_obj)
2508 continue;
2509
2510 if (ctx == ctx->i915->kernel_context) {
2511 intel_unpin_ringbuffer_obj(ringbuf);
2512 i915_gem_object_ggtt_unpin(ctx_obj);
2513 i915_gem_object_unpin_map(ctx_obj);
2514 }
2515
2516 WARN_ON(ctx->engine[i].pin_count);
2517 intel_ringbuffer_free(ringbuf);
2518 drm_gem_object_unreference(&ctx_obj->base);
2519 }
2520}
2521
2522/**
2523 * intel_lr_context_size() - return the size of the context for an engine 2450 * intel_lr_context_size() - return the size of the context for an engine
2524 * @ring: which engine to find the context size for 2451 * @engine: which engine to find the context size for
2525 * 2452 *
2526 * Each engine may require a different amount of space for a context image, 2453 * Each engine may require a different amount of space for a context image,
2527 * so when allocating (or copying) an image, this function can be used to 2454 * so when allocating (or copying) an image, this function can be used to
@@ -2537,11 +2464,11 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2537{ 2464{
2538 int ret = 0; 2465 int ret = 0;
2539 2466
2540 WARN_ON(INTEL_INFO(engine->dev)->gen < 8); 2467 WARN_ON(INTEL_GEN(engine->i915) < 8);
2541 2468
2542 switch (engine->id) { 2469 switch (engine->id) {
2543 case RCS: 2470 case RCS:
2544 if (INTEL_INFO(engine->dev)->gen >= 9) 2471 if (INTEL_GEN(engine->i915) >= 9)
2545 ret = GEN9_LR_CONTEXT_RENDER_SIZE; 2472 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
2546 else 2473 else
2547 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 2474 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
@@ -2558,9 +2485,9 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2558} 2485}
2559 2486
2560/** 2487/**
2561 * intel_lr_context_deferred_alloc() - create the LRC specific bits of a context 2488 * execlists_context_deferred_alloc() - create the LRC specific bits of a context
2562 * @ctx: LR context to create. 2489 * @ctx: LR context to create.
2563 * @ring: engine to be used with the context. 2490 * @engine: engine to be used with the context.
2564 * 2491 *
2565 * This function can be called more than once, with different engines, if we plan 2492 * This function can be called more than once, with different engines, if we plan
2566 * to use the context with them. The context backing objects and the ringbuffers 2493 * to use the context with them. The context backing objects and the ringbuffers
@@ -2570,31 +2497,29 @@ uint32_t intel_lr_context_size(struct intel_engine_cs *engine)
2570 * 2497 *
2571 * Return: non-zero on error. 2498 * Return: non-zero on error.
2572 */ 2499 */
2573 2500static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2574int intel_lr_context_deferred_alloc(struct intel_context *ctx, 2501 struct intel_engine_cs *engine)
2575 struct intel_engine_cs *engine)
2576{ 2502{
2577 struct drm_device *dev = engine->dev;
2578 struct drm_i915_gem_object *ctx_obj; 2503 struct drm_i915_gem_object *ctx_obj;
2504 struct intel_context *ce = &ctx->engine[engine->id];
2579 uint32_t context_size; 2505 uint32_t context_size;
2580 struct intel_ringbuffer *ringbuf; 2506 struct intel_ringbuffer *ringbuf;
2581 int ret; 2507 int ret;
2582 2508
2583 WARN_ON(ctx->legacy_hw_ctx.rcs_state != NULL); 2509 WARN_ON(ce->state);
2584 WARN_ON(ctx->engine[engine->id].state);
2585 2510
2586 context_size = round_up(intel_lr_context_size(engine), 4096); 2511 context_size = round_up(intel_lr_context_size(engine), 4096);
2587 2512
2588 /* One extra page as the sharing data between driver and GuC */ 2513 /* One extra page as the sharing data between driver and GuC */
2589 context_size += PAGE_SIZE * LRC_PPHWSP_PN; 2514 context_size += PAGE_SIZE * LRC_PPHWSP_PN;
2590 2515
2591 ctx_obj = i915_gem_alloc_object(dev, context_size); 2516 ctx_obj = i915_gem_object_create(&ctx->i915->drm, context_size);
2592 if (!ctx_obj) { 2517 if (IS_ERR(ctx_obj)) {
2593 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n"); 2518 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2594 return -ENOMEM; 2519 return PTR_ERR(ctx_obj);
2595 } 2520 }
2596 2521
2597 ringbuf = intel_engine_create_ringbuffer(engine, 4 * PAGE_SIZE); 2522 ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
2598 if (IS_ERR(ringbuf)) { 2523 if (IS_ERR(ringbuf)) {
2599 ret = PTR_ERR(ringbuf); 2524 ret = PTR_ERR(ringbuf);
2600 goto error_deref_obj; 2525 goto error_deref_obj;
@@ -2606,48 +2531,29 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2606 goto error_ringbuf; 2531 goto error_ringbuf;
2607 } 2532 }
2608 2533
2609 ctx->engine[engine->id].ringbuf = ringbuf; 2534 ce->ringbuf = ringbuf;
2610 ctx->engine[engine->id].state = ctx_obj; 2535 ce->state = ctx_obj;
2536 ce->initialised = engine->init_context == NULL;
2611 2537
2612 if (ctx != ctx->i915->kernel_context && engine->init_context) {
2613 struct drm_i915_gem_request *req;
2614
2615 req = i915_gem_request_alloc(engine, ctx);
2616 if (IS_ERR(req)) {
2617 ret = PTR_ERR(req);
2618 DRM_ERROR("ring create req: %d\n", ret);
2619 goto error_ringbuf;
2620 }
2621
2622 ret = engine->init_context(req);
2623 i915_add_request_no_flush(req);
2624 if (ret) {
2625 DRM_ERROR("ring init context: %d\n",
2626 ret);
2627 goto error_ringbuf;
2628 }
2629 }
2630 return 0; 2538 return 0;
2631 2539
2632error_ringbuf: 2540error_ringbuf:
2633 intel_ringbuffer_free(ringbuf); 2541 intel_ringbuffer_free(ringbuf);
2634error_deref_obj: 2542error_deref_obj:
2635 drm_gem_object_unreference(&ctx_obj->base); 2543 drm_gem_object_unreference(&ctx_obj->base);
2636 ctx->engine[engine->id].ringbuf = NULL; 2544 ce->ringbuf = NULL;
2637 ctx->engine[engine->id].state = NULL; 2545 ce->state = NULL;
2638 return ret; 2546 return ret;
2639} 2547}
2640 2548
2641void intel_lr_context_reset(struct drm_i915_private *dev_priv, 2549void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2642 struct intel_context *ctx) 2550 struct i915_gem_context *ctx)
2643{ 2551{
2644 struct intel_engine_cs *engine; 2552 struct intel_engine_cs *engine;
2645 2553
2646 for_each_engine(engine, dev_priv) { 2554 for_each_engine(engine, dev_priv) {
2647 struct drm_i915_gem_object *ctx_obj = 2555 struct intel_context *ce = &ctx->engine[engine->id];
2648 ctx->engine[engine->id].state; 2556 struct drm_i915_gem_object *ctx_obj = ce->state;
2649 struct intel_ringbuffer *ringbuf =
2650 ctx->engine[engine->id].ringbuf;
2651 void *vaddr; 2557 void *vaddr;
2652 uint32_t *reg_state; 2558 uint32_t *reg_state;
2653 2559
@@ -2666,7 +2572,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
2666 2572
2667 i915_gem_object_unpin_map(ctx_obj); 2573 i915_gem_object_unpin_map(ctx_obj);
2668 2574
2669 ringbuf->head = 0; 2575 ce->ringbuf->head = 0;
2670 ringbuf->tail = 0; 2576 ce->ringbuf->tail = 0;
2671 } 2577 }
2672} 2578}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 60a7385bc531..2b8255c19dcc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -57,6 +57,11 @@
57#define GEN8_CSB_READ_PTR(csb_status) \ 57#define GEN8_CSB_READ_PTR(csb_status) \
58 (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8) 58 (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
59 59
60enum {
61 INTEL_CONTEXT_SCHEDULE_IN = 0,
62 INTEL_CONTEXT_SCHEDULE_OUT,
63};
64
60/* Logical Rings */ 65/* Logical Rings */
61int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request); 66int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
62int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request); 67int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
@@ -99,30 +104,27 @@ static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
99#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1) 104#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
100#define LRC_STATE_PN (LRC_PPHWSP_PN + 1) 105#define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
101 106
102void intel_lr_context_free(struct intel_context *ctx); 107struct i915_gem_context;
108
103uint32_t intel_lr_context_size(struct intel_engine_cs *engine); 109uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
104int intel_lr_context_deferred_alloc(struct intel_context *ctx, 110void intel_lr_context_unpin(struct i915_gem_context *ctx,
105 struct intel_engine_cs *engine);
106void intel_lr_context_unpin(struct intel_context *ctx,
107 struct intel_engine_cs *engine); 111 struct intel_engine_cs *engine);
108 112
109struct drm_i915_private; 113struct drm_i915_private;
110 114
111void intel_lr_context_reset(struct drm_i915_private *dev_priv, 115void intel_lr_context_reset(struct drm_i915_private *dev_priv,
112 struct intel_context *ctx); 116 struct i915_gem_context *ctx);
113uint64_t intel_lr_context_descriptor(struct intel_context *ctx, 117uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
114 struct intel_engine_cs *engine); 118 struct intel_engine_cs *engine);
115 119
116u32 intel_execlists_ctx_id(struct intel_context *ctx,
117 struct intel_engine_cs *engine);
118
119/* Execlists */ 120/* Execlists */
120int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 121int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
122 int enable_execlists);
121struct i915_execbuffer_params; 123struct i915_execbuffer_params;
122int intel_execlists_submission(struct i915_execbuffer_params *params, 124int intel_execlists_submission(struct i915_execbuffer_params *params,
123 struct drm_i915_gem_execbuffer2 *args, 125 struct drm_i915_gem_execbuffer2 *args,
124 struct list_head *vmas); 126 struct list_head *vmas);
125 127
126void intel_execlists_retire_requests(struct intel_engine_cs *engine); 128void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
127 129
128#endif /* _INTEL_LRC_H_ */ 130#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 96281e628d2a..49550470483e 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -72,7 +72,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
72 enum pipe *pipe) 72 enum pipe *pipe)
73{ 73{
74 struct drm_device *dev = encoder->base.dev; 74 struct drm_device *dev = encoder->base.dev;
75 struct drm_i915_private *dev_priv = dev->dev_private; 75 struct drm_i915_private *dev_priv = to_i915(dev);
76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 76 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
77 enum intel_display_power_domain power_domain; 77 enum intel_display_power_domain power_domain;
78 u32 tmp; 78 u32 tmp;
@@ -106,7 +106,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
106 struct intel_crtc_state *pipe_config) 106 struct intel_crtc_state *pipe_config)
107{ 107{
108 struct drm_device *dev = encoder->base.dev; 108 struct drm_device *dev = encoder->base.dev;
109 struct drm_i915_private *dev_priv = dev->dev_private; 109 struct drm_i915_private *dev_priv = to_i915(dev);
110 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 110 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
111 u32 tmp, flags = 0; 111 u32 tmp, flags = 0;
112 112
@@ -140,7 +140,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
140{ 140{
141 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 141 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
142 struct drm_device *dev = encoder->base.dev; 142 struct drm_device *dev = encoder->base.dev;
143 struct drm_i915_private *dev_priv = dev->dev_private; 143 struct drm_i915_private *dev_priv = to_i915(dev);
144 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 144 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
145 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 145 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
146 int pipe = crtc->pipe; 146 int pipe = crtc->pipe;
@@ -184,13 +184,13 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder)
184 * panels behave in the two modes. For now, let's just maintain the 184 * panels behave in the two modes. For now, let's just maintain the
185 * value we got from the BIOS. 185 * value we got from the BIOS.
186 */ 186 */
187 temp &= ~LVDS_A3_POWER_MASK; 187 temp &= ~LVDS_A3_POWER_MASK;
188 temp |= lvds_encoder->a3_power; 188 temp |= lvds_encoder->a3_power;
189 189
190 /* Set the dithering flag on LVDS as needed, note that there is no 190 /* Set the dithering flag on LVDS as needed, note that there is no
191 * special lvds dither control bit on pch-split platforms, dithering is 191 * special lvds dither control bit on pch-split platforms, dithering is
192 * only controlled through the PIPECONF reg. */ 192 * only controlled through the PIPECONF reg. */
193 if (INTEL_INFO(dev)->gen == 4) { 193 if (IS_GEN4(dev_priv)) {
194 /* Bspec wording suggests that LVDS port dithering only exists 194 /* Bspec wording suggests that LVDS port dithering only exists
195 * for 18bpp panels. */ 195 * for 18bpp panels. */
196 if (crtc->config->dither && crtc->config->pipe_bpp == 18) 196 if (crtc->config->dither && crtc->config->pipe_bpp == 18)
@@ -216,7 +216,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
216 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 216 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
217 struct intel_connector *intel_connector = 217 struct intel_connector *intel_connector =
218 &lvds_encoder->attached_connector->base; 218 &lvds_encoder->attached_connector->base;
219 struct drm_i915_private *dev_priv = dev->dev_private; 219 struct drm_i915_private *dev_priv = to_i915(dev);
220 i915_reg_t ctl_reg, stat_reg; 220 i915_reg_t ctl_reg, stat_reg;
221 221
222 if (HAS_PCH_SPLIT(dev)) { 222 if (HAS_PCH_SPLIT(dev)) {
@@ -231,7 +231,7 @@ static void intel_enable_lvds(struct intel_encoder *encoder)
231 231
232 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); 232 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
233 POSTING_READ(lvds_encoder->reg); 233 POSTING_READ(lvds_encoder->reg);
234 if (wait_for((I915_READ(stat_reg) & PP_ON) != 0, 1000)) 234 if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, PP_ON, 1000))
235 DRM_ERROR("timed out waiting for panel to power on\n"); 235 DRM_ERROR("timed out waiting for panel to power on\n");
236 236
237 intel_panel_enable_backlight(intel_connector); 237 intel_panel_enable_backlight(intel_connector);
@@ -241,7 +241,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
241{ 241{
242 struct drm_device *dev = encoder->base.dev; 242 struct drm_device *dev = encoder->base.dev;
243 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base); 243 struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
244 struct drm_i915_private *dev_priv = dev->dev_private; 244 struct drm_i915_private *dev_priv = to_i915(dev);
245 i915_reg_t ctl_reg, stat_reg; 245 i915_reg_t ctl_reg, stat_reg;
246 246
247 if (HAS_PCH_SPLIT(dev)) { 247 if (HAS_PCH_SPLIT(dev)) {
@@ -253,7 +253,7 @@ static void intel_disable_lvds(struct intel_encoder *encoder)
253 } 253 }
254 254
255 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); 255 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
256 if (wait_for((I915_READ(stat_reg) & PP_ON) == 0, 1000)) 256 if (intel_wait_for_register(dev_priv, stat_reg, PP_ON, 0, 1000))
257 DRM_ERROR("timed out waiting for panel to power off\n"); 257 DRM_ERROR("timed out waiting for panel to power off\n");
258 258
259 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN); 259 I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
@@ -442,7 +442,7 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
442 container_of(nb, struct intel_lvds_connector, lid_notifier); 442 container_of(nb, struct intel_lvds_connector, lid_notifier);
443 struct drm_connector *connector = &lvds_connector->base.base; 443 struct drm_connector *connector = &lvds_connector->base.base;
444 struct drm_device *dev = connector->dev; 444 struct drm_device *dev = connector->dev;
445 struct drm_i915_private *dev_priv = dev->dev_private; 445 struct drm_i915_private *dev_priv = to_i915(dev);
446 446
447 if (dev->switch_power_state != DRM_SWITCH_POWER_ON) 447 if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
448 return NOTIFY_OK; 448 return NOTIFY_OK;
@@ -547,7 +547,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
547static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = { 547static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
548 .get_modes = intel_lvds_get_modes, 548 .get_modes = intel_lvds_get_modes,
549 .mode_valid = intel_lvds_mode_valid, 549 .mode_valid = intel_lvds_mode_valid,
550 .best_encoder = intel_best_encoder,
551}; 550};
552 551
553static const struct drm_connector_funcs intel_lvds_connector_funcs = { 552static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -556,6 +555,8 @@ static const struct drm_connector_funcs intel_lvds_connector_funcs = {
556 .fill_modes = drm_helper_probe_single_connector_modes, 555 .fill_modes = drm_helper_probe_single_connector_modes,
557 .set_property = intel_lvds_set_property, 556 .set_property = intel_lvds_set_property,
558 .atomic_get_property = intel_connector_atomic_get_property, 557 .atomic_get_property = intel_connector_atomic_get_property,
558 .late_register = intel_connector_register,
559 .early_unregister = intel_connector_unregister,
559 .destroy = intel_lvds_destroy, 560 .destroy = intel_lvds_destroy,
560 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 561 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
561 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 562 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -810,27 +811,29 @@ static const struct dmi_system_id intel_dual_link_lvds[] = {
810 { } /* terminating entry */ 811 { } /* terminating entry */
811}; 812};
812 813
813bool intel_is_dual_link_lvds(struct drm_device *dev) 814struct intel_encoder *intel_get_lvds_encoder(struct drm_device *dev)
814{ 815{
815 struct intel_encoder *encoder; 816 struct intel_encoder *intel_encoder;
816 struct intel_lvds_encoder *lvds_encoder;
817 817
818 for_each_intel_encoder(dev, encoder) { 818 for_each_intel_encoder(dev, intel_encoder)
819 if (encoder->type == INTEL_OUTPUT_LVDS) { 819 if (intel_encoder->type == INTEL_OUTPUT_LVDS)
820 lvds_encoder = to_lvds_encoder(&encoder->base); 820 return intel_encoder;
821 821
822 return lvds_encoder->is_dual_link; 822 return NULL;
823 } 823}
824 }
825 824
826 return false; 825bool intel_is_dual_link_lvds(struct drm_device *dev)
826{
827 struct intel_encoder *encoder = intel_get_lvds_encoder(dev);
828
829 return encoder && to_lvds_encoder(&encoder->base)->is_dual_link;
827} 830}
828 831
829static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder) 832static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
830{ 833{
831 struct drm_device *dev = lvds_encoder->base.base.dev; 834 struct drm_device *dev = lvds_encoder->base.base.dev;
832 unsigned int val; 835 unsigned int val;
833 struct drm_i915_private *dev_priv = dev->dev_private; 836 struct drm_i915_private *dev_priv = to_i915(dev);
834 837
835 /* use the module option value if specified */ 838 /* use the module option value if specified */
836 if (i915.lvds_channel_mode > 0) 839 if (i915.lvds_channel_mode > 0)
@@ -880,7 +883,7 @@ static bool intel_lvds_supported(struct drm_device *dev)
880 */ 883 */
881void intel_lvds_init(struct drm_device *dev) 884void intel_lvds_init(struct drm_device *dev)
882{ 885{
883 struct drm_i915_private *dev_priv = dev->dev_private; 886 struct drm_i915_private *dev_priv = to_i915(dev);
884 struct intel_lvds_encoder *lvds_encoder; 887 struct intel_lvds_encoder *lvds_encoder;
885 struct intel_encoder *intel_encoder; 888 struct intel_encoder *intel_encoder;
886 struct intel_lvds_connector *lvds_connector; 889 struct intel_lvds_connector *lvds_connector;
@@ -978,7 +981,7 @@ void intel_lvds_init(struct drm_device *dev)
978 DRM_MODE_CONNECTOR_LVDS); 981 DRM_MODE_CONNECTOR_LVDS);
979 982
980 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs, 983 drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
981 DRM_MODE_ENCODER_LVDS, NULL); 984 DRM_MODE_ENCODER_LVDS, "LVDS");
982 985
983 intel_encoder->enable = intel_enable_lvds; 986 intel_encoder->enable = intel_enable_lvds;
984 intel_encoder->pre_enable = intel_pre_enable_lvds; 987 intel_encoder->pre_enable = intel_pre_enable_lvds;
@@ -992,7 +995,6 @@ void intel_lvds_init(struct drm_device *dev)
992 intel_encoder->get_hw_state = intel_lvds_get_hw_state; 995 intel_encoder->get_hw_state = intel_lvds_get_hw_state;
993 intel_encoder->get_config = intel_lvds_get_config; 996 intel_encoder->get_config = intel_lvds_get_config;
994 intel_connector->get_hw_state = intel_connector_get_hw_state; 997 intel_connector->get_hw_state = intel_connector_get_hw_state;
995 intel_connector->unregister = intel_connector_unregister;
996 998
997 intel_connector_attach_encoder(intel_connector, intel_encoder); 999 intel_connector_attach_encoder(intel_connector, intel_encoder);
998 intel_encoder->type = INTEL_OUTPUT_LVDS; 1000 intel_encoder->type = INTEL_OUTPUT_LVDS;
@@ -1119,6 +1121,7 @@ out:
1119 mutex_unlock(&dev->mode_config.mutex); 1121 mutex_unlock(&dev->mode_config.mutex);
1120 1122
1121 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 1123 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1124 intel_panel_setup_backlight(connector, INVALID_PIPE);
1122 1125
1123 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder); 1126 lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
1124 DRM_DEBUG_KMS("detected %s-link lvds configuration\n", 1127 DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
@@ -1131,9 +1134,6 @@ out:
1131 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1134 DRM_DEBUG_KMS("lid notifier registration failed\n");
1132 lvds_connector->lid_notifier.notifier_call = NULL; 1135 lvds_connector->lid_notifier.notifier_call = NULL;
1133 } 1136 }
1134 drm_connector_register(connector);
1135
1136 intel_panel_setup_backlight(connector, INVALID_PIPE);
1137 1137
1138 return; 1138 return;
1139 1139
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 6ba4bf7f2a89..927825f5b284 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -66,9 +66,10 @@ struct drm_i915_mocs_table {
66#define L3_WB 3 66#define L3_WB 3
67 67
68/* Target cache */ 68/* Target cache */
69#define ELLC 0 69#define LE_TC_PAGETABLE 0
70#define LLC 1 70#define LE_TC_LLC 1
71#define LLC_ELLC 2 71#define LE_TC_LLC_ELLC 2
72#define LE_TC_LLC_ELLC_ALT 3
72 73
73/* 74/*
74 * MOCS tables 75 * MOCS tables
@@ -96,34 +97,67 @@ struct drm_i915_mocs_table {
96 * end. 97 * end.
97 */ 98 */
98static const struct drm_i915_mocs_entry skylake_mocs_table[] = { 99static const struct drm_i915_mocs_entry skylake_mocs_table[] = {
99 /* { 0x00000009, 0x0010 } */ 100 { /* 0x00000009 */
100 { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | 101 .control_value = LE_CACHEABILITY(LE_UC) |
101 LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 102 LE_TGT_CACHE(LE_TC_LLC_ELLC) |
102 (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, 103 LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
103 /* { 0x00000038, 0x0030 } */ 104 LE_PFM(0) | LE_SCF(0),
104 { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 105
105 LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 106 /* 0x0010 */
106 (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, 107 .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
107 /* { 0x0000003b, 0x0030 } */ 108 },
108 { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 109 {
109 LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 110 /* 0x00000038 */
110 (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } 111 .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
112 LE_TGT_CACHE(LE_TC_LLC_ELLC) |
113 LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
114 LE_PFM(0) | LE_SCF(0),
115 /* 0x0030 */
116 .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
117 },
118 {
119 /* 0x0000003b */
120 .control_value = LE_CACHEABILITY(LE_WB) |
121 LE_TGT_CACHE(LE_TC_LLC_ELLC) |
122 LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
123 LE_PFM(0) | LE_SCF(0),
124 /* 0x0030 */
125 .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
126 },
111}; 127};
112 128
113/* NOTE: the LE_TGT_CACHE is not used on Broxton */ 129/* NOTE: the LE_TGT_CACHE is not used on Broxton */
114static const struct drm_i915_mocs_entry broxton_mocs_table[] = { 130static const struct drm_i915_mocs_entry broxton_mocs_table[] = {
115 /* { 0x00000009, 0x0010 } */ 131 {
116 { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | 132 /* 0x00000009 */
117 LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 133 .control_value = LE_CACHEABILITY(LE_UC) |
118 (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, 134 LE_TGT_CACHE(LE_TC_LLC_ELLC) |
119 /* { 0x00000038, 0x0030 } */ 135 LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
120 { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 136 LE_PFM(0) | LE_SCF(0),
121 LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 137
122 (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, 138 /* 0x0010 */
123 /* { 0x0000003b, 0x0030 } */ 139 .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC),
124 { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | 140 },
125 LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), 141 {
126 (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } 142 /* 0x00000038 */
143 .control_value = LE_CACHEABILITY(LE_PAGETABLE) |
144 LE_TGT_CACHE(LE_TC_LLC_ELLC) |
145 LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
146 LE_PFM(0) | LE_SCF(0),
147
148 /* 0x0030 */
149 .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
150 },
151 {
152 /* 0x00000039 */
153 .control_value = LE_CACHEABILITY(LE_UC) |
154 LE_TGT_CACHE(LE_TC_LLC_ELLC) |
155 LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) |
156 LE_PFM(0) | LE_SCF(0),
157
158 /* 0x0030 */
159 .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB),
160 },
127}; 161};
128 162
129/** 163/**
@@ -156,6 +190,16 @@ static bool get_mocs_settings(struct drm_i915_private *dev_priv,
156 "Platform that should have a MOCS table does not.\n"); 190 "Platform that should have a MOCS table does not.\n");
157 } 191 }
158 192
193 /* WaDisableSkipCaching:skl,bxt,kbl */
194 if (IS_GEN9(dev_priv)) {
195 int i;
196
197 for (i = 0; i < table->size; i++)
198 if (WARN_ON(table->table[i].l3cc_value &
199 (L3_ESC(1) | L3_SCC(0x7))))
200 return false;
201 }
202
159 return result; 203 return result;
160} 204}
161 205
@@ -189,7 +233,7 @@ static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
189 */ 233 */
190int intel_mocs_init_engine(struct intel_engine_cs *engine) 234int intel_mocs_init_engine(struct intel_engine_cs *engine)
191{ 235{
192 struct drm_i915_private *dev_priv = to_i915(engine->dev); 236 struct drm_i915_private *dev_priv = engine->i915;
193 struct drm_i915_mocs_table table; 237 struct drm_i915_mocs_table table;
194 unsigned int index; 238 unsigned int index;
195 239
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 38a4c8ce7e63..f2584d0a01ab 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -82,7 +82,7 @@ void
82intel_attach_force_audio_property(struct drm_connector *connector) 82intel_attach_force_audio_property(struct drm_connector *connector)
83{ 83{
84 struct drm_device *dev = connector->dev; 84 struct drm_device *dev = connector->dev;
85 struct drm_i915_private *dev_priv = dev->dev_private; 85 struct drm_i915_private *dev_priv = to_i915(dev);
86 struct drm_property *prop; 86 struct drm_property *prop;
87 87
88 prop = dev_priv->force_audio_property; 88 prop = dev_priv->force_audio_property;
@@ -109,7 +109,7 @@ void
109intel_attach_broadcast_rgb_property(struct drm_connector *connector) 109intel_attach_broadcast_rgb_property(struct drm_connector *connector)
110{ 110{
111 struct drm_device *dev = connector->dev; 111 struct drm_device *dev = connector->dev;
112 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = to_i915(dev);
113 struct drm_property *prop; 113 struct drm_property *prop;
114 114
115 prop = dev_priv->broadcast_rgb_property; 115 prop = dev_priv->broadcast_rgb_property;
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 16e209d326b6..adca262d591a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -232,18 +232,36 @@ struct opregion_asle_ext {
232#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19) 232#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
233#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21) 233#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
234 234
235#define ACPI_OTHER_OUTPUT (0<<8) 235/*
236#define ACPI_VGA_OUTPUT (1<<8) 236 * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
237#define ACPI_TV_OUTPUT (2<<8) 237 * Attached to the Display Adapter).
238#define ACPI_DIGITAL_OUTPUT (3<<8) 238 */
239#define ACPI_LVDS_OUTPUT (4<<8) 239#define ACPI_DISPLAY_INDEX_SHIFT 0
240#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
241#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
242#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
243#define ACPI_DISPLAY_TYPE_SHIFT 8
244#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
245#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
246#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
247#define ACPI_DISPLAY_TYPE_TV (2 << 8)
248#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
249#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
250#define ACPI_VENDOR_SPECIFIC_SHIFT 12
251#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
252#define ACPI_BIOS_CAN_DETECT (1 << 16)
253#define ACPI_DEPENDS_ON_VGA (1 << 17)
254#define ACPI_PIPE_ID_SHIFT 18
255#define ACPI_PIPE_ID_MASK (7 << 18)
256#define ACPI_DEVICE_ID_SCHEME (1 << 31)
240 257
241#define MAX_DSLP 1500 258#define MAX_DSLP 1500
242 259
243static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out) 260static int swsci(struct drm_i915_private *dev_priv,
261 u32 function, u32 parm, u32 *parm_out)
244{ 262{
245 struct drm_i915_private *dev_priv = dev->dev_private;
246 struct opregion_swsci *swsci = dev_priv->opregion.swsci; 263 struct opregion_swsci *swsci = dev_priv->opregion.swsci;
264 struct pci_dev *pdev = dev_priv->drm.pdev;
247 u32 main_function, sub_function, scic; 265 u32 main_function, sub_function, scic;
248 u16 swsci_val; 266 u16 swsci_val;
249 u32 dslp; 267 u32 dslp;
@@ -293,16 +311,16 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
293 swsci->scic = scic; 311 swsci->scic = scic;
294 312
295 /* Ensure SCI event is selected and event trigger is cleared. */ 313 /* Ensure SCI event is selected and event trigger is cleared. */
296 pci_read_config_word(dev->pdev, SWSCI, &swsci_val); 314 pci_read_config_word(pdev, SWSCI, &swsci_val);
297 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) { 315 if (!(swsci_val & SWSCI_SCISEL) || (swsci_val & SWSCI_GSSCIE)) {
298 swsci_val |= SWSCI_SCISEL; 316 swsci_val |= SWSCI_SCISEL;
299 swsci_val &= ~SWSCI_GSSCIE; 317 swsci_val &= ~SWSCI_GSSCIE;
300 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 318 pci_write_config_word(pdev, SWSCI, swsci_val);
301 } 319 }
302 320
303 /* Use event trigger to tell bios to check the mail. */ 321 /* Use event trigger to tell bios to check the mail. */
304 swsci_val |= SWSCI_GSSCIE; 322 swsci_val |= SWSCI_GSSCIE;
305 pci_write_config_word(dev->pdev, SWSCI, swsci_val); 323 pci_write_config_word(pdev, SWSCI, swsci_val);
306 324
307 /* Poll for the result. */ 325 /* Poll for the result. */
308#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) 326#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
@@ -336,13 +354,13 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
336int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, 354int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
337 bool enable) 355 bool enable)
338{ 356{
339 struct drm_device *dev = intel_encoder->base.dev; 357 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
340 u32 parm = 0; 358 u32 parm = 0;
341 u32 type = 0; 359 u32 type = 0;
342 u32 port; 360 u32 port;
343 361
344 /* don't care about old stuff for now */ 362 /* don't care about old stuff for now */
345 if (!HAS_DDI(dev)) 363 if (!HAS_DDI(dev_priv))
346 return 0; 364 return 0;
347 365
348 if (intel_encoder->type == INTEL_OUTPUT_DSI) 366 if (intel_encoder->type == INTEL_OUTPUT_DSI)
@@ -365,7 +383,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
365 type = DISPLAY_TYPE_CRT; 383 type = DISPLAY_TYPE_CRT;
366 break; 384 break;
367 case INTEL_OUTPUT_UNKNOWN: 385 case INTEL_OUTPUT_UNKNOWN:
368 case INTEL_OUTPUT_DISPLAYPORT: 386 case INTEL_OUTPUT_DP:
369 case INTEL_OUTPUT_HDMI: 387 case INTEL_OUTPUT_HDMI:
370 case INTEL_OUTPUT_DP_MST: 388 case INTEL_OUTPUT_DP_MST:
371 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; 389 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL;
@@ -382,7 +400,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
382 400
383 parm |= type << (16 + port * 3); 401 parm |= type << (16 + port * 3);
384 402
385 return swsci(dev, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL); 403 return swsci(dev_priv, SWSCI_SBCB_DISPLAY_POWER_STATE, parm, NULL);
386} 404}
387 405
388static const struct { 406static const struct {
@@ -396,27 +414,28 @@ static const struct {
396 { PCI_D3cold, 0x04 }, 414 { PCI_D3cold, 0x04 },
397}; 415};
398 416
399int intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) 417int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv,
418 pci_power_t state)
400{ 419{
401 int i; 420 int i;
402 421
403 if (!HAS_DDI(dev)) 422 if (!HAS_DDI(dev_priv))
404 return 0; 423 return 0;
405 424
406 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) { 425 for (i = 0; i < ARRAY_SIZE(power_state_map); i++) {
407 if (state == power_state_map[i].pci_power_state) 426 if (state == power_state_map[i].pci_power_state)
408 return swsci(dev, SWSCI_SBCB_ADAPTER_POWER_STATE, 427 return swsci(dev_priv, SWSCI_SBCB_ADAPTER_POWER_STATE,
409 power_state_map[i].parm, NULL); 428 power_state_map[i].parm, NULL);
410 } 429 }
411 430
412 return -EINVAL; 431 return -EINVAL;
413} 432}
414 433
415static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 434static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
416{ 435{
417 struct drm_i915_private *dev_priv = dev->dev_private;
418 struct intel_connector *connector; 436 struct intel_connector *connector;
419 struct opregion_asle *asle = dev_priv->opregion.asle; 437 struct opregion_asle *asle = dev_priv->opregion.asle;
438 struct drm_device *dev = &dev_priv->drm;
420 439
421 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 440 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
422 441
@@ -449,7 +468,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
449 return 0; 468 return 0;
450} 469}
451 470
452static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 471static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
453{ 472{
454 /* alsi is the current ALS reading in lux. 0 indicates below sensor 473 /* alsi is the current ALS reading in lux. 0 indicates below sensor
455 range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 474 range, 0xffff indicates above sensor range. 1-0xfffe are valid */
@@ -457,13 +476,13 @@ static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
457 return ASLC_ALS_ILLUM_FAILED; 476 return ASLC_ALS_ILLUM_FAILED;
458} 477}
459 478
460static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 479static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
461{ 480{
462 DRM_DEBUG_DRIVER("PWM freq is not supported\n"); 481 DRM_DEBUG_DRIVER("PWM freq is not supported\n");
463 return ASLC_PWM_FREQ_FAILED; 482 return ASLC_PWM_FREQ_FAILED;
464} 483}
465 484
466static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 485static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
467{ 486{
468 /* Panel fitting is currently controlled by the X code, so this is a 487 /* Panel fitting is currently controlled by the X code, so this is a
469 noop until modesetting support works fully */ 488 noop until modesetting support works fully */
@@ -471,13 +490,13 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
471 return ASLC_PFIT_FAILED; 490 return ASLC_PFIT_FAILED;
472} 491}
473 492
474static u32 asle_set_supported_rotation_angles(struct drm_device *dev, u32 srot) 493static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
475{ 494{
476 DRM_DEBUG_DRIVER("SROT is not supported\n"); 495 DRM_DEBUG_DRIVER("SROT is not supported\n");
477 return ASLC_ROTATION_ANGLES_FAILED; 496 return ASLC_ROTATION_ANGLES_FAILED;
478} 497}
479 498
480static u32 asle_set_button_array(struct drm_device *dev, u32 iuer) 499static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
481{ 500{
482 if (!iuer) 501 if (!iuer)
483 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n"); 502 DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
@@ -495,7 +514,7 @@ static u32 asle_set_button_array(struct drm_device *dev, u32 iuer)
495 return ASLC_BUTTON_ARRAY_FAILED; 514 return ASLC_BUTTON_ARRAY_FAILED;
496} 515}
497 516
498static u32 asle_set_convertible(struct drm_device *dev, u32 iuer) 517static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
499{ 518{
500 if (iuer & ASLE_IUER_CONVERTIBLE) 519 if (iuer & ASLE_IUER_CONVERTIBLE)
501 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n"); 520 DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
@@ -505,7 +524,7 @@ static u32 asle_set_convertible(struct drm_device *dev, u32 iuer)
505 return ASLC_CONVERTIBLE_FAILED; 524 return ASLC_CONVERTIBLE_FAILED;
506} 525}
507 526
508static u32 asle_set_docking(struct drm_device *dev, u32 iuer) 527static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
509{ 528{
510 if (iuer & ASLE_IUER_DOCKING) 529 if (iuer & ASLE_IUER_DOCKING)
511 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n"); 530 DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
@@ -515,7 +534,7 @@ static u32 asle_set_docking(struct drm_device *dev, u32 iuer)
515 return ASLC_DOCKING_FAILED; 534 return ASLC_DOCKING_FAILED;
516} 535}
517 536
518static u32 asle_isct_state(struct drm_device *dev) 537static u32 asle_isct_state(struct drm_i915_private *dev_priv)
519{ 538{
520 DRM_DEBUG_DRIVER("ISCT is not supported\n"); 539 DRM_DEBUG_DRIVER("ISCT is not supported\n");
521 return ASLC_ISCT_STATE_FAILED; 540 return ASLC_ISCT_STATE_FAILED;
@@ -527,7 +546,6 @@ static void asle_work(struct work_struct *work)
527 container_of(work, struct intel_opregion, asle_work); 546 container_of(work, struct intel_opregion, asle_work);
528 struct drm_i915_private *dev_priv = 547 struct drm_i915_private *dev_priv =
529 container_of(opregion, struct drm_i915_private, opregion); 548 container_of(opregion, struct drm_i915_private, opregion);
530 struct drm_device *dev = dev_priv->dev;
531 struct opregion_asle *asle = dev_priv->opregion.asle; 549 struct opregion_asle *asle = dev_priv->opregion.asle;
532 u32 aslc_stat = 0; 550 u32 aslc_stat = 0;
533 u32 aslc_req; 551 u32 aslc_req;
@@ -544,40 +562,38 @@ static void asle_work(struct work_struct *work)
544 } 562 }
545 563
546 if (aslc_req & ASLC_SET_ALS_ILLUM) 564 if (aslc_req & ASLC_SET_ALS_ILLUM)
547 aslc_stat |= asle_set_als_illum(dev, asle->alsi); 565 aslc_stat |= asle_set_als_illum(dev_priv, asle->alsi);
548 566
549 if (aslc_req & ASLC_SET_BACKLIGHT) 567 if (aslc_req & ASLC_SET_BACKLIGHT)
550 aslc_stat |= asle_set_backlight(dev, asle->bclp); 568 aslc_stat |= asle_set_backlight(dev_priv, asle->bclp);
551 569
552 if (aslc_req & ASLC_SET_PFIT) 570 if (aslc_req & ASLC_SET_PFIT)
553 aslc_stat |= asle_set_pfit(dev, asle->pfit); 571 aslc_stat |= asle_set_pfit(dev_priv, asle->pfit);
554 572
555 if (aslc_req & ASLC_SET_PWM_FREQ) 573 if (aslc_req & ASLC_SET_PWM_FREQ)
556 aslc_stat |= asle_set_pwm_freq(dev, asle->pfmb); 574 aslc_stat |= asle_set_pwm_freq(dev_priv, asle->pfmb);
557 575
558 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES) 576 if (aslc_req & ASLC_SUPPORTED_ROTATION_ANGLES)
559 aslc_stat |= asle_set_supported_rotation_angles(dev, 577 aslc_stat |= asle_set_supported_rotation_angles(dev_priv,
560 asle->srot); 578 asle->srot);
561 579
562 if (aslc_req & ASLC_BUTTON_ARRAY) 580 if (aslc_req & ASLC_BUTTON_ARRAY)
563 aslc_stat |= asle_set_button_array(dev, asle->iuer); 581 aslc_stat |= asle_set_button_array(dev_priv, asle->iuer);
564 582
565 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR) 583 if (aslc_req & ASLC_CONVERTIBLE_INDICATOR)
566 aslc_stat |= asle_set_convertible(dev, asle->iuer); 584 aslc_stat |= asle_set_convertible(dev_priv, asle->iuer);
567 585
568 if (aslc_req & ASLC_DOCKING_INDICATOR) 586 if (aslc_req & ASLC_DOCKING_INDICATOR)
569 aslc_stat |= asle_set_docking(dev, asle->iuer); 587 aslc_stat |= asle_set_docking(dev_priv, asle->iuer);
570 588
571 if (aslc_req & ASLC_ISCT_STATE_CHANGE) 589 if (aslc_req & ASLC_ISCT_STATE_CHANGE)
572 aslc_stat |= asle_isct_state(dev); 590 aslc_stat |= asle_isct_state(dev_priv);
573 591
574 asle->aslc = aslc_stat; 592 asle->aslc = aslc_stat;
575} 593}
576 594
577void intel_opregion_asle_intr(struct drm_device *dev) 595void intel_opregion_asle_intr(struct drm_i915_private *dev_priv)
578{ 596{
579 struct drm_i915_private *dev_priv = dev->dev_private;
580
581 if (dev_priv->opregion.asle) 597 if (dev_priv->opregion.asle)
582 schedule_work(&dev_priv->opregion.asle_work); 598 schedule_work(&dev_priv->opregion.asle_work);
583} 599}
@@ -658,10 +674,51 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
658 } 674 }
659} 675}
660 676
661static void intel_didl_outputs(struct drm_device *dev) 677static u32 acpi_display_type(struct drm_connector *connector)
678{
679 u32 display_type;
680
681 switch (connector->connector_type) {
682 case DRM_MODE_CONNECTOR_VGA:
683 case DRM_MODE_CONNECTOR_DVIA:
684 display_type = ACPI_DISPLAY_TYPE_VGA;
685 break;
686 case DRM_MODE_CONNECTOR_Composite:
687 case DRM_MODE_CONNECTOR_SVIDEO:
688 case DRM_MODE_CONNECTOR_Component:
689 case DRM_MODE_CONNECTOR_9PinDIN:
690 case DRM_MODE_CONNECTOR_TV:
691 display_type = ACPI_DISPLAY_TYPE_TV;
692 break;
693 case DRM_MODE_CONNECTOR_DVII:
694 case DRM_MODE_CONNECTOR_DVID:
695 case DRM_MODE_CONNECTOR_DisplayPort:
696 case DRM_MODE_CONNECTOR_HDMIA:
697 case DRM_MODE_CONNECTOR_HDMIB:
698 display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
699 break;
700 case DRM_MODE_CONNECTOR_LVDS:
701 case DRM_MODE_CONNECTOR_eDP:
702 case DRM_MODE_CONNECTOR_DSI:
703 display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
704 break;
705 case DRM_MODE_CONNECTOR_Unknown:
706 case DRM_MODE_CONNECTOR_VIRTUAL:
707 display_type = ACPI_DISPLAY_TYPE_OTHER;
708 break;
709 default:
710 MISSING_CASE(connector->connector_type);
711 display_type = ACPI_DISPLAY_TYPE_OTHER;
712 break;
713 }
714
715 return display_type;
716}
717
718static void intel_didl_outputs(struct drm_i915_private *dev_priv)
662{ 719{
663 struct drm_i915_private *dev_priv = dev->dev_private;
664 struct intel_opregion *opregion = &dev_priv->opregion; 720 struct intel_opregion *opregion = &dev_priv->opregion;
721 struct pci_dev *pdev = dev_priv->drm.pdev;
665 struct drm_connector *connector; 722 struct drm_connector *connector;
666 acpi_handle handle; 723 acpi_handle handle;
667 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; 724 struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL;
@@ -670,7 +727,7 @@ static void intel_didl_outputs(struct drm_device *dev)
670 u32 temp, max_outputs; 727 u32 temp, max_outputs;
671 int i = 0; 728 int i = 0;
672 729
673 handle = ACPI_HANDLE(&dev->pdev->dev); 730 handle = ACPI_HANDLE(&pdev->dev);
674 if (!handle || acpi_bus_get_device(handle, &acpi_dev)) 731 if (!handle || acpi_bus_get_device(handle, &acpi_dev))
675 return; 732 return;
676 733
@@ -725,45 +782,25 @@ end:
725 782
726blind_set: 783blind_set:
727 i = 0; 784 i = 0;
728 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 785 list_for_each_entry(connector,
729 int output_type = ACPI_OTHER_OUTPUT; 786 &dev_priv->drm.mode_config.connector_list, head) {
787 int display_type = acpi_display_type(connector);
788
730 if (i >= max_outputs) { 789 if (i >= max_outputs) {
731 DRM_DEBUG_KMS("More than %u outputs in connector list\n", 790 DRM_DEBUG_KMS("More than %u outputs in connector list\n",
732 max_outputs); 791 max_outputs);
733 return; 792 return;
734 } 793 }
735 switch (connector->connector_type) { 794
736 case DRM_MODE_CONNECTOR_VGA:
737 case DRM_MODE_CONNECTOR_DVIA:
738 output_type = ACPI_VGA_OUTPUT;
739 break;
740 case DRM_MODE_CONNECTOR_Composite:
741 case DRM_MODE_CONNECTOR_SVIDEO:
742 case DRM_MODE_CONNECTOR_Component:
743 case DRM_MODE_CONNECTOR_9PinDIN:
744 output_type = ACPI_TV_OUTPUT;
745 break;
746 case DRM_MODE_CONNECTOR_DVII:
747 case DRM_MODE_CONNECTOR_DVID:
748 case DRM_MODE_CONNECTOR_DisplayPort:
749 case DRM_MODE_CONNECTOR_HDMIA:
750 case DRM_MODE_CONNECTOR_HDMIB:
751 output_type = ACPI_DIGITAL_OUTPUT;
752 break;
753 case DRM_MODE_CONNECTOR_LVDS:
754 output_type = ACPI_LVDS_OUTPUT;
755 break;
756 }
757 temp = get_did(opregion, i); 795 temp = get_did(opregion, i);
758 set_did(opregion, i, temp | (1 << 31) | output_type | i); 796 set_did(opregion, i, temp | (1 << 31) | display_type | i);
759 i++; 797 i++;
760 } 798 }
761 goto end; 799 goto end;
762} 800}
763 801
764static void intel_setup_cadls(struct drm_device *dev) 802static void intel_setup_cadls(struct drm_i915_private *dev_priv)
765{ 803{
766 struct drm_i915_private *dev_priv = dev->dev_private;
767 struct intel_opregion *opregion = &dev_priv->opregion; 804 struct intel_opregion *opregion = &dev_priv->opregion;
768 int i = 0; 805 int i = 0;
769 u32 disp_id; 806 u32 disp_id;
@@ -780,17 +817,16 @@ static void intel_setup_cadls(struct drm_device *dev)
780 } while (++i < 8 && disp_id != 0); 817 } while (++i < 8 && disp_id != 0);
781} 818}
782 819
783void intel_opregion_init(struct drm_device *dev) 820void intel_opregion_register(struct drm_i915_private *dev_priv)
784{ 821{
785 struct drm_i915_private *dev_priv = dev->dev_private;
786 struct intel_opregion *opregion = &dev_priv->opregion; 822 struct intel_opregion *opregion = &dev_priv->opregion;
787 823
788 if (!opregion->header) 824 if (!opregion->header)
789 return; 825 return;
790 826
791 if (opregion->acpi) { 827 if (opregion->acpi) {
792 intel_didl_outputs(dev); 828 intel_didl_outputs(dev_priv);
793 intel_setup_cadls(dev); 829 intel_setup_cadls(dev_priv);
794 830
795 /* Notify BIOS we are ready to handle ACPI video ext notifs. 831 /* Notify BIOS we are ready to handle ACPI video ext notifs.
796 * Right now, all the events are handled by the ACPI video module. 832 * Right now, all the events are handled by the ACPI video module.
@@ -808,9 +844,8 @@ void intel_opregion_init(struct drm_device *dev)
808 } 844 }
809} 845}
810 846
811void intel_opregion_fini(struct drm_device *dev) 847void intel_opregion_unregister(struct drm_i915_private *dev_priv)
812{ 848{
813 struct drm_i915_private *dev_priv = dev->dev_private;
814 struct intel_opregion *opregion = &dev_priv->opregion; 849 struct intel_opregion *opregion = &dev_priv->opregion;
815 850
816 if (!opregion->header) 851 if (!opregion->header)
@@ -842,9 +877,8 @@ void intel_opregion_fini(struct drm_device *dev)
842 opregion->lid_state = NULL; 877 opregion->lid_state = NULL;
843} 878}
844 879
845static void swsci_setup(struct drm_device *dev) 880static void swsci_setup(struct drm_i915_private *dev_priv)
846{ 881{
847 struct drm_i915_private *dev_priv = dev->dev_private;
848 struct intel_opregion *opregion = &dev_priv->opregion; 882 struct intel_opregion *opregion = &dev_priv->opregion;
849 bool requested_callbacks = false; 883 bool requested_callbacks = false;
850 u32 tmp; 884 u32 tmp;
@@ -854,7 +888,7 @@ static void swsci_setup(struct drm_device *dev)
854 opregion->swsci_sbcb_sub_functions = 1; 888 opregion->swsci_sbcb_sub_functions = 1;
855 889
856 /* We use GBDA to ask for supported GBDA calls. */ 890 /* We use GBDA to ask for supported GBDA calls. */
857 if (swsci(dev, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) { 891 if (swsci(dev_priv, SWSCI_GBDA_SUPPORTED_CALLS, 0, &tmp) == 0) {
858 /* make the bits match the sub-function codes */ 892 /* make the bits match the sub-function codes */
859 tmp <<= 1; 893 tmp <<= 1;
860 opregion->swsci_gbda_sub_functions |= tmp; 894 opregion->swsci_gbda_sub_functions |= tmp;
@@ -865,7 +899,7 @@ static void swsci_setup(struct drm_device *dev)
865 * must not call interfaces that are not specifically requested by the 899 * must not call interfaces that are not specifically requested by the
866 * bios. 900 * bios.
867 */ 901 */
868 if (swsci(dev, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) { 902 if (swsci(dev_priv, SWSCI_GBDA_REQUESTED_CALLBACKS, 0, &tmp) == 0) {
869 /* here, the bits already match sub-function codes */ 903 /* here, the bits already match sub-function codes */
870 opregion->swsci_sbcb_sub_functions |= tmp; 904 opregion->swsci_sbcb_sub_functions |= tmp;
871 requested_callbacks = true; 905 requested_callbacks = true;
@@ -876,7 +910,7 @@ static void swsci_setup(struct drm_device *dev)
876 * the callback is _requested_. But we still can't call interfaces that 910 * the callback is _requested_. But we still can't call interfaces that
877 * are not requested. 911 * are not requested.
878 */ 912 */
879 if (swsci(dev, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) { 913 if (swsci(dev_priv, SWSCI_SBCB_SUPPORTED_CALLBACKS, 0, &tmp) == 0) {
880 /* make the bits match the sub-function codes */ 914 /* make the bits match the sub-function codes */
881 u32 low = tmp & 0x7ff; 915 u32 low = tmp & 0x7ff;
882 u32 high = tmp & ~0xfff; /* bit 11 is reserved */ 916 u32 high = tmp & ~0xfff; /* bit 11 is reserved */
@@ -918,10 +952,10 @@ static const struct dmi_system_id intel_no_opregion_vbt[] = {
918 { } 952 { }
919}; 953};
920 954
921int intel_opregion_setup(struct drm_device *dev) 955int intel_opregion_setup(struct drm_i915_private *dev_priv)
922{ 956{
923 struct drm_i915_private *dev_priv = dev->dev_private;
924 struct intel_opregion *opregion = &dev_priv->opregion; 957 struct intel_opregion *opregion = &dev_priv->opregion;
958 struct pci_dev *pdev = dev_priv->drm.pdev;
925 u32 asls, mboxes; 959 u32 asls, mboxes;
926 char buf[sizeof(OPREGION_SIGNATURE)]; 960 char buf[sizeof(OPREGION_SIGNATURE)];
927 int err = 0; 961 int err = 0;
@@ -933,7 +967,7 @@ int intel_opregion_setup(struct drm_device *dev)
933 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100); 967 BUILD_BUG_ON(sizeof(struct opregion_asle) != 0x100);
934 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400); 968 BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
935 969
936 pci_read_config_dword(dev->pdev, ASLS, &asls); 970 pci_read_config_dword(pdev, ASLS, &asls);
937 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); 971 DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
938 if (asls == 0) { 972 if (asls == 0) {
939 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); 973 DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
@@ -965,7 +999,7 @@ int intel_opregion_setup(struct drm_device *dev)
965 if (mboxes & MBOX_SWSCI) { 999 if (mboxes & MBOX_SWSCI) {
966 DRM_DEBUG_DRIVER("SWSCI supported\n"); 1000 DRM_DEBUG_DRIVER("SWSCI supported\n");
967 opregion->swsci = base + OPREGION_SWSCI_OFFSET; 1001 opregion->swsci = base + OPREGION_SWSCI_OFFSET;
968 swsci_setup(dev); 1002 swsci_setup(dev_priv);
969 } 1003 }
970 1004
971 if (mboxes & MBOX_ASLE) { 1005 if (mboxes & MBOX_ASLE) {
@@ -1014,12 +1048,12 @@ err_out:
1014} 1048}
1015 1049
1016int 1050int
1017intel_opregion_get_panel_type(struct drm_device *dev) 1051intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
1018{ 1052{
1019 u32 panel_details; 1053 u32 panel_details;
1020 int ret; 1054 int ret;
1021 1055
1022 ret = swsci(dev, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details); 1056 ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
1023 if (ret) { 1057 if (ret) {
1024 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n", 1058 DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
1025 ret); 1059 ret);
@@ -1044,7 +1078,7 @@ intel_opregion_get_panel_type(struct drm_device *dev)
1044 * vswing instead. Low vswing results in some display flickers, so 1078 * vswing instead. Low vswing results in some display flickers, so
1045 * let's simply ignore the OpRegion panel type on SKL for now. 1079 * let's simply ignore the OpRegion panel type on SKL for now.
1046 */ 1080 */
1047 if (IS_SKYLAKE(dev)) { 1081 if (IS_SKYLAKE(dev_priv)) {
1048 DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1); 1082 DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
1049 return -ENODEV; 1083 return -ENODEV;
1050 } 1084 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index bd38e49f7334..3212d8806b5a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -168,7 +168,7 @@ struct overlay_registers {
168}; 168};
169 169
170struct intel_overlay { 170struct intel_overlay {
171 struct drm_device *dev; 171 struct drm_i915_private *i915;
172 struct intel_crtc *crtc; 172 struct intel_crtc *crtc;
173 struct drm_i915_gem_object *vid_bo; 173 struct drm_i915_gem_object *vid_bo;
174 struct drm_i915_gem_object *old_vid_bo; 174 struct drm_i915_gem_object *old_vid_bo;
@@ -190,15 +190,15 @@ struct intel_overlay {
190static struct overlay_registers __iomem * 190static struct overlay_registers __iomem *
191intel_overlay_map_regs(struct intel_overlay *overlay) 191intel_overlay_map_regs(struct intel_overlay *overlay)
192{ 192{
193 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 193 struct drm_i915_private *dev_priv = overlay->i915;
194 struct i915_ggtt *ggtt = &dev_priv->ggtt;
195 struct overlay_registers __iomem *regs; 194 struct overlay_registers __iomem *regs;
196 195
197 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 196 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
198 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr; 197 regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
199 else 198 else
200 regs = io_mapping_map_wc(ggtt->mappable, 199 regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
201 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 200 overlay->flip_addr,
201 PAGE_SIZE);
202 202
203 return regs; 203 return regs;
204} 204}
@@ -206,7 +206,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
206static void intel_overlay_unmap_regs(struct intel_overlay *overlay, 206static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
207 struct overlay_registers __iomem *regs) 207 struct overlay_registers __iomem *regs)
208{ 208{
209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 209 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
210 io_mapping_unmap(regs); 210 io_mapping_unmap(regs);
211} 211}
212 212
@@ -232,14 +232,13 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
232/* overlay needs to be disable in OCMD reg */ 232/* overlay needs to be disable in OCMD reg */
233static int intel_overlay_on(struct intel_overlay *overlay) 233static int intel_overlay_on(struct intel_overlay *overlay)
234{ 234{
235 struct drm_device *dev = overlay->dev; 235 struct drm_i915_private *dev_priv = overlay->i915;
236 struct drm_i915_private *dev_priv = dev->dev_private;
237 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 236 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
238 struct drm_i915_gem_request *req; 237 struct drm_i915_gem_request *req;
239 int ret; 238 int ret;
240 239
241 WARN_ON(overlay->active); 240 WARN_ON(overlay->active);
242 WARN_ON(IS_I830(dev) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE)); 241 WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
243 242
244 req = i915_gem_request_alloc(engine, NULL); 243 req = i915_gem_request_alloc(engine, NULL);
245 if (IS_ERR(req)) 244 if (IS_ERR(req))
@@ -266,8 +265,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
266static int intel_overlay_continue(struct intel_overlay *overlay, 265static int intel_overlay_continue(struct intel_overlay *overlay,
267 bool load_polyphase_filter) 266 bool load_polyphase_filter)
268{ 267{
269 struct drm_device *dev = overlay->dev; 268 struct drm_i915_private *dev_priv = overlay->i915;
270 struct drm_i915_private *dev_priv = dev->dev_private;
271 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 269 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
272 struct drm_i915_gem_request *req; 270 struct drm_i915_gem_request *req;
273 u32 flip_addr = overlay->flip_addr; 271 u32 flip_addr = overlay->flip_addr;
@@ -335,8 +333,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
335/* overlay needs to be disabled in OCMD reg */ 333/* overlay needs to be disabled in OCMD reg */
336static int intel_overlay_off(struct intel_overlay *overlay) 334static int intel_overlay_off(struct intel_overlay *overlay)
337{ 335{
338 struct drm_device *dev = overlay->dev; 336 struct drm_i915_private *dev_priv = overlay->i915;
339 struct drm_i915_private *dev_priv = dev->dev_private;
340 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 337 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
341 struct drm_i915_gem_request *req; 338 struct drm_i915_gem_request *req;
342 u32 flip_addr = overlay->flip_addr; 339 u32 flip_addr = overlay->flip_addr;
@@ -365,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
365 intel_ring_emit(engine, flip_addr); 362 intel_ring_emit(engine, flip_addr);
366 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 363 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
367 /* turn overlay off */ 364 /* turn overlay off */
368 if (IS_I830(dev)) { 365 if (IS_I830(dev_priv)) {
369 /* Workaround: Don't disable the overlay fully, since otherwise 366 /* Workaround: Don't disable the overlay fully, since otherwise
370 * it dies on the next OVERLAY_ON cmd. */ 367 * it dies on the next OVERLAY_ON cmd. */
371 intel_ring_emit(engine, MI_NOOP); 368 intel_ring_emit(engine, MI_NOOP);
@@ -408,12 +405,11 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
408 */ 405 */
409static int intel_overlay_release_old_vid(struct intel_overlay *overlay) 406static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
410{ 407{
411 struct drm_device *dev = overlay->dev; 408 struct drm_i915_private *dev_priv = overlay->i915;
412 struct drm_i915_private *dev_priv = dev->dev_private;
413 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 409 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
414 int ret; 410 int ret;
415 411
416 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 412 lockdep_assert_held(&dev_priv->drm.struct_mutex);
417 413
418 /* Only wait if there is actually an old frame to release to 414 /* Only wait if there is actually an old frame to release to
419 * guarantee forward progress. 415 * guarantee forward progress.
@@ -537,10 +533,10 @@ static int uv_vsubsampling(u32 format)
537 } 533 }
538} 534}
539 535
540static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width) 536static u32 calc_swidthsw(struct drm_i915_private *dev_priv, u32 offset, u32 width)
541{ 537{
542 u32 mask, shift, ret; 538 u32 mask, shift, ret;
543 if (IS_GEN2(dev)) { 539 if (IS_GEN2(dev_priv)) {
544 mask = 0x1f; 540 mask = 0x1f;
545 shift = 5; 541 shift = 5;
546 } else { 542 } else {
@@ -548,7 +544,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
548 shift = 6; 544 shift = 6;
549 } 545 }
550 ret = ((offset + width + mask) >> shift) - (offset >> shift); 546 ret = ((offset + width + mask) >> shift) - (offset >> shift);
551 if (!IS_GEN2(dev)) 547 if (!IS_GEN2(dev_priv))
552 ret <<= 1; 548 ret <<= 1;
553 ret -= 1; 549 ret -= 1;
554 return ret << 2; 550 return ret << 2;
@@ -741,12 +737,12 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
741 int ret, tmp_width; 737 int ret, tmp_width;
742 struct overlay_registers __iomem *regs; 738 struct overlay_registers __iomem *regs;
743 bool scale_changed = false; 739 bool scale_changed = false;
744 struct drm_device *dev = overlay->dev; 740 struct drm_i915_private *dev_priv = overlay->i915;
745 u32 swidth, swidthsw, sheight, ostride; 741 u32 swidth, swidthsw, sheight, ostride;
746 enum pipe pipe = overlay->crtc->pipe; 742 enum pipe pipe = overlay->crtc->pipe;
747 743
748 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 744 lockdep_assert_held(&dev_priv->drm.struct_mutex);
749 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 745 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
750 746
751 ret = intel_overlay_release_old_vid(overlay); 747 ret = intel_overlay_release_old_vid(overlay);
752 if (ret != 0) 748 if (ret != 0)
@@ -769,7 +765,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
769 goto out_unpin; 765 goto out_unpin;
770 } 766 }
771 oconfig = OCONF_CC_OUT_8BIT; 767 oconfig = OCONF_CC_OUT_8BIT;
772 if (IS_GEN4(overlay->dev)) 768 if (IS_GEN4(dev_priv))
773 oconfig |= OCONF_CSC_MODE_BT709; 769 oconfig |= OCONF_CSC_MODE_BT709;
774 oconfig |= pipe == 0 ? 770 oconfig |= pipe == 0 ?
775 OCONF_PIPE_A : OCONF_PIPE_B; 771 OCONF_PIPE_A : OCONF_PIPE_B;
@@ -796,7 +792,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
796 tmp_width = params->src_w; 792 tmp_width = params->src_w;
797 793
798 swidth = params->src_w; 794 swidth = params->src_w;
799 swidthsw = calc_swidthsw(overlay->dev, params->offset_Y, tmp_width); 795 swidthsw = calc_swidthsw(dev_priv, params->offset_Y, tmp_width);
800 sheight = params->src_h; 796 sheight = params->src_h;
801 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y); 797 iowrite32(i915_gem_obj_ggtt_offset(new_bo) + params->offset_Y, &regs->OBUF_0Y);
802 ostride = params->stride_Y; 798 ostride = params->stride_Y;
@@ -806,9 +802,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
806 int uv_vscale = uv_vsubsampling(params->format); 802 int uv_vscale = uv_vsubsampling(params->format);
807 u32 tmp_U, tmp_V; 803 u32 tmp_U, tmp_V;
808 swidth |= (params->src_w/uv_hscale) << 16; 804 swidth |= (params->src_w/uv_hscale) << 16;
809 tmp_U = calc_swidthsw(overlay->dev, params->offset_U, 805 tmp_U = calc_swidthsw(dev_priv, params->offset_U,
810 params->src_w/uv_hscale); 806 params->src_w/uv_hscale);
811 tmp_V = calc_swidthsw(overlay->dev, params->offset_V, 807 tmp_V = calc_swidthsw(dev_priv, params->offset_V,
812 params->src_w/uv_hscale); 808 params->src_w/uv_hscale);
813 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16; 809 swidthsw |= max_t(u32, tmp_U, tmp_V) << 16;
814 sheight |= (params->src_h/uv_vscale) << 16; 810 sheight |= (params->src_h/uv_vscale) << 16;
@@ -840,7 +836,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
840 overlay->old_vid_bo = overlay->vid_bo; 836 overlay->old_vid_bo = overlay->vid_bo;
841 overlay->vid_bo = new_bo; 837 overlay->vid_bo = new_bo;
842 838
843 intel_frontbuffer_flip(dev, 839 intel_frontbuffer_flip(&dev_priv->drm,
844 INTEL_FRONTBUFFER_OVERLAY(pipe)); 840 INTEL_FRONTBUFFER_OVERLAY(pipe));
845 841
846 return 0; 842 return 0;
@@ -852,12 +848,12 @@ out_unpin:
852 848
853int intel_overlay_switch_off(struct intel_overlay *overlay) 849int intel_overlay_switch_off(struct intel_overlay *overlay)
854{ 850{
851 struct drm_i915_private *dev_priv = overlay->i915;
855 struct overlay_registers __iomem *regs; 852 struct overlay_registers __iomem *regs;
856 struct drm_device *dev = overlay->dev;
857 int ret; 853 int ret;
858 854
859 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 855 lockdep_assert_held(&dev_priv->drm.struct_mutex);
860 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 856 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
861 857
862 ret = intel_overlay_recover_from_interrupt(overlay); 858 ret = intel_overlay_recover_from_interrupt(overlay);
863 if (ret != 0) 859 if (ret != 0)
@@ -897,15 +893,14 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
897 893
898static void update_pfit_vscale_ratio(struct intel_overlay *overlay) 894static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
899{ 895{
900 struct drm_device *dev = overlay->dev; 896 struct drm_i915_private *dev_priv = overlay->i915;
901 struct drm_i915_private *dev_priv = dev->dev_private;
902 u32 pfit_control = I915_READ(PFIT_CONTROL); 897 u32 pfit_control = I915_READ(PFIT_CONTROL);
903 u32 ratio; 898 u32 ratio;
904 899
905 /* XXX: This is not the same logic as in the xorg driver, but more in 900 /* XXX: This is not the same logic as in the xorg driver, but more in
906 * line with the intel documentation for the i965 901 * line with the intel documentation for the i965
907 */ 902 */
908 if (INTEL_INFO(dev)->gen >= 4) { 903 if (INTEL_GEN(dev_priv) >= 4) {
909 /* on i965 use the PGM reg to read out the autoscaler values */ 904 /* on i965 use the PGM reg to read out the autoscaler values */
910 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965; 905 ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
911 } else { 906 } else {
@@ -948,7 +943,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
948 return 0; 943 return 0;
949} 944}
950 945
951static int check_overlay_src(struct drm_device *dev, 946static int check_overlay_src(struct drm_i915_private *dev_priv,
952 struct drm_intel_overlay_put_image *rec, 947 struct drm_intel_overlay_put_image *rec,
953 struct drm_i915_gem_object *new_bo) 948 struct drm_i915_gem_object *new_bo)
954{ 949{
@@ -959,7 +954,7 @@ static int check_overlay_src(struct drm_device *dev,
959 u32 tmp; 954 u32 tmp;
960 955
961 /* check src dimensions */ 956 /* check src dimensions */
962 if (IS_845G(dev) || IS_I830(dev)) { 957 if (IS_845G(dev_priv) || IS_I830(dev_priv)) {
963 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY || 958 if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY ||
964 rec->src_width > IMAGE_MAX_WIDTH_LEGACY) 959 rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
965 return -EINVAL; 960 return -EINVAL;
@@ -1011,14 +1006,14 @@ static int check_overlay_src(struct drm_device *dev,
1011 return -EINVAL; 1006 return -EINVAL;
1012 1007
1013 /* stride checking */ 1008 /* stride checking */
1014 if (IS_I830(dev) || IS_845G(dev)) 1009 if (IS_I830(dev_priv) || IS_845G(dev_priv))
1015 stride_mask = 255; 1010 stride_mask = 255;
1016 else 1011 else
1017 stride_mask = 63; 1012 stride_mask = 63;
1018 1013
1019 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask) 1014 if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
1020 return -EINVAL; 1015 return -EINVAL;
1021 if (IS_GEN4(dev) && rec->stride_Y < 512) 1016 if (IS_GEN4(dev_priv) && rec->stride_Y < 512)
1022 return -EINVAL; 1017 return -EINVAL;
1023 1018
1024 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ? 1019 tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
@@ -1063,13 +1058,13 @@ static int check_overlay_src(struct drm_device *dev,
1063 * Return the pipe currently connected to the panel fitter, 1058 * Return the pipe currently connected to the panel fitter,
1064 * or -1 if the panel fitter is not present or not in use 1059 * or -1 if the panel fitter is not present or not in use
1065 */ 1060 */
1066static int intel_panel_fitter_pipe(struct drm_device *dev) 1061static int intel_panel_fitter_pipe(struct drm_i915_private *dev_priv)
1067{ 1062{
1068 struct drm_i915_private *dev_priv = dev->dev_private;
1069 u32 pfit_control; 1063 u32 pfit_control;
1070 1064
1071 /* i830 doesn't have a panel fitter */ 1065 /* i830 doesn't have a panel fitter */
1072 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev))) 1066 if (INTEL_GEN(dev_priv) <= 3 &&
1067 (IS_I830(dev_priv) || !IS_MOBILE(dev_priv)))
1073 return -1; 1068 return -1;
1074 1069
1075 pfit_control = I915_READ(PFIT_CONTROL); 1070 pfit_control = I915_READ(PFIT_CONTROL);
@@ -1079,18 +1074,18 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
1079 return -1; 1074 return -1;
1080 1075
1081 /* 965 can place panel fitter on either pipe */ 1076 /* 965 can place panel fitter on either pipe */
1082 if (IS_GEN4(dev)) 1077 if (IS_GEN4(dev_priv))
1083 return (pfit_control >> 29) & 0x3; 1078 return (pfit_control >> 29) & 0x3;
1084 1079
1085 /* older chips can only use pipe 1 */ 1080 /* older chips can only use pipe 1 */
1086 return 1; 1081 return 1;
1087} 1082}
1088 1083
1089int intel_overlay_put_image(struct drm_device *dev, void *data, 1084int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
1090 struct drm_file *file_priv) 1085 struct drm_file *file_priv)
1091{ 1086{
1092 struct drm_intel_overlay_put_image *put_image_rec = data; 1087 struct drm_intel_overlay_put_image *put_image_rec = data;
1093 struct drm_i915_private *dev_priv = dev->dev_private; 1088 struct drm_i915_private *dev_priv = to_i915(dev);
1094 struct intel_overlay *overlay; 1089 struct intel_overlay *overlay;
1095 struct drm_crtc *drmmode_crtc; 1090 struct drm_crtc *drmmode_crtc;
1096 struct intel_crtc *crtc; 1091 struct intel_crtc *crtc;
@@ -1162,7 +1157,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1162 1157
1163 /* line too wide, i.e. one-line-mode */ 1158 /* line too wide, i.e. one-line-mode */
1164 if (mode->hdisplay > 1024 && 1159 if (mode->hdisplay > 1024 &&
1165 intel_panel_fitter_pipe(dev) == crtc->pipe) { 1160 intel_panel_fitter_pipe(dev_priv) == crtc->pipe) {
1166 overlay->pfit_active = true; 1161 overlay->pfit_active = true;
1167 update_pfit_vscale_ratio(overlay); 1162 update_pfit_vscale_ratio(overlay);
1168 } else 1163 } else
@@ -1196,7 +1191,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1196 goto out_unlock; 1191 goto out_unlock;
1197 } 1192 }
1198 1193
1199 ret = check_overlay_src(dev, put_image_rec, new_bo); 1194 ret = check_overlay_src(dev_priv, put_image_rec, new_bo);
1200 if (ret != 0) 1195 if (ret != 0)
1201 goto out_unlock; 1196 goto out_unlock;
1202 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK; 1197 params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
@@ -1284,11 +1279,11 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
1284 return 0; 1279 return 0;
1285} 1280}
1286 1281
1287int intel_overlay_attrs(struct drm_device *dev, void *data, 1282int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
1288 struct drm_file *file_priv) 1283 struct drm_file *file_priv)
1289{ 1284{
1290 struct drm_intel_overlay_attrs *attrs = data; 1285 struct drm_intel_overlay_attrs *attrs = data;
1291 struct drm_i915_private *dev_priv = dev->dev_private; 1286 struct drm_i915_private *dev_priv = to_i915(dev);
1292 struct intel_overlay *overlay; 1287 struct intel_overlay *overlay;
1293 struct overlay_registers __iomem *regs; 1288 struct overlay_registers __iomem *regs;
1294 int ret; 1289 int ret;
@@ -1309,7 +1304,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1309 attrs->contrast = overlay->contrast; 1304 attrs->contrast = overlay->contrast;
1310 attrs->saturation = overlay->saturation; 1305 attrs->saturation = overlay->saturation;
1311 1306
1312 if (!IS_GEN2(dev)) { 1307 if (!IS_GEN2(dev_priv)) {
1313 attrs->gamma0 = I915_READ(OGAMC0); 1308 attrs->gamma0 = I915_READ(OGAMC0);
1314 attrs->gamma1 = I915_READ(OGAMC1); 1309 attrs->gamma1 = I915_READ(OGAMC1);
1315 attrs->gamma2 = I915_READ(OGAMC2); 1310 attrs->gamma2 = I915_READ(OGAMC2);
@@ -1341,7 +1336,7 @@ int intel_overlay_attrs(struct drm_device *dev, void *data,
1341 intel_overlay_unmap_regs(overlay, regs); 1336 intel_overlay_unmap_regs(overlay, regs);
1342 1337
1343 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) { 1338 if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
1344 if (IS_GEN2(dev)) 1339 if (IS_GEN2(dev_priv))
1345 goto out_unlock; 1340 goto out_unlock;
1346 1341
1347 if (overlay->active) { 1342 if (overlay->active) {
@@ -1371,37 +1366,37 @@ out_unlock:
1371 return ret; 1366 return ret;
1372} 1367}
1373 1368
1374void intel_setup_overlay(struct drm_device *dev) 1369void intel_setup_overlay(struct drm_i915_private *dev_priv)
1375{ 1370{
1376 struct drm_i915_private *dev_priv = dev->dev_private;
1377 struct intel_overlay *overlay; 1371 struct intel_overlay *overlay;
1378 struct drm_i915_gem_object *reg_bo; 1372 struct drm_i915_gem_object *reg_bo;
1379 struct overlay_registers __iomem *regs; 1373 struct overlay_registers __iomem *regs;
1380 int ret; 1374 int ret;
1381 1375
1382 if (!HAS_OVERLAY(dev)) 1376 if (!HAS_OVERLAY(dev_priv))
1383 return; 1377 return;
1384 1378
1385 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); 1379 overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
1386 if (!overlay) 1380 if (!overlay)
1387 return; 1381 return;
1388 1382
1389 mutex_lock(&dev->struct_mutex); 1383 mutex_lock(&dev_priv->drm.struct_mutex);
1390 if (WARN_ON(dev_priv->overlay)) 1384 if (WARN_ON(dev_priv->overlay))
1391 goto out_free; 1385 goto out_free;
1392 1386
1393 overlay->dev = dev; 1387 overlay->i915 = dev_priv;
1394 1388
1395 reg_bo = NULL; 1389 reg_bo = NULL;
1396 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1390 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1397 reg_bo = i915_gem_object_create_stolen(dev, PAGE_SIZE); 1391 reg_bo = i915_gem_object_create_stolen(&dev_priv->drm,
1398 if (reg_bo == NULL) 1392 PAGE_SIZE);
1399 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1400 if (reg_bo == NULL) 1393 if (reg_bo == NULL)
1394 reg_bo = i915_gem_object_create(&dev_priv->drm, PAGE_SIZE);
1395 if (IS_ERR(reg_bo))
1401 goto out_free; 1396 goto out_free;
1402 overlay->reg_bo = reg_bo; 1397 overlay->reg_bo = reg_bo;
1403 1398
1404 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1399 if (OVERLAY_NEEDS_PHYSICAL(dev_priv)) {
1405 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE); 1400 ret = i915_gem_object_attach_phys(reg_bo, PAGE_SIZE);
1406 if (ret) { 1401 if (ret) {
1407 DRM_ERROR("failed to attach phys overlay regs\n"); 1402 DRM_ERROR("failed to attach phys overlay regs\n");
@@ -1441,25 +1436,23 @@ void intel_setup_overlay(struct drm_device *dev)
1441 intel_overlay_unmap_regs(overlay, regs); 1436 intel_overlay_unmap_regs(overlay, regs);
1442 1437
1443 dev_priv->overlay = overlay; 1438 dev_priv->overlay = overlay;
1444 mutex_unlock(&dev->struct_mutex); 1439 mutex_unlock(&dev_priv->drm.struct_mutex);
1445 DRM_INFO("initialized overlay support\n"); 1440 DRM_INFO("initialized overlay support\n");
1446 return; 1441 return;
1447 1442
1448out_unpin_bo: 1443out_unpin_bo:
1449 if (!OVERLAY_NEEDS_PHYSICAL(dev)) 1444 if (!OVERLAY_NEEDS_PHYSICAL(dev_priv))
1450 i915_gem_object_ggtt_unpin(reg_bo); 1445 i915_gem_object_ggtt_unpin(reg_bo);
1451out_free_bo: 1446out_free_bo:
1452 drm_gem_object_unreference(&reg_bo->base); 1447 drm_gem_object_unreference(&reg_bo->base);
1453out_free: 1448out_free:
1454 mutex_unlock(&dev->struct_mutex); 1449 mutex_unlock(&dev_priv->drm.struct_mutex);
1455 kfree(overlay); 1450 kfree(overlay);
1456 return; 1451 return;
1457} 1452}
1458 1453
1459void intel_cleanup_overlay(struct drm_device *dev) 1454void intel_cleanup_overlay(struct drm_i915_private *dev_priv)
1460{ 1455{
1461 struct drm_i915_private *dev_priv = dev->dev_private;
1462
1463 if (!dev_priv->overlay) 1456 if (!dev_priv->overlay)
1464 return; 1457 return;
1465 1458
@@ -1482,18 +1475,17 @@ struct intel_overlay_error_state {
1482static struct overlay_registers __iomem * 1475static struct overlay_registers __iomem *
1483intel_overlay_map_regs_atomic(struct intel_overlay *overlay) 1476intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1484{ 1477{
1485 struct drm_i915_private *dev_priv = to_i915(overlay->dev); 1478 struct drm_i915_private *dev_priv = overlay->i915;
1486 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1487 struct overlay_registers __iomem *regs; 1479 struct overlay_registers __iomem *regs;
1488 1480
1489 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1481 if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
1490 /* Cast to make sparse happy, but it's wc memory anyway, so 1482 /* Cast to make sparse happy, but it's wc memory anyway, so
1491 * equivalent to the wc io mapping on X86. */ 1483 * equivalent to the wc io mapping on X86. */
1492 regs = (struct overlay_registers __iomem *) 1484 regs = (struct overlay_registers __iomem *)
1493 overlay->reg_bo->phys_handle->vaddr; 1485 overlay->reg_bo->phys_handle->vaddr;
1494 else 1486 else
1495 regs = io_mapping_map_atomic_wc(ggtt->mappable, 1487 regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
1496 i915_gem_obj_ggtt_offset(overlay->reg_bo)); 1488 overlay->flip_addr);
1497 1489
1498 return regs; 1490 return regs;
1499} 1491}
@@ -1501,15 +1493,13 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
1501static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay, 1493static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
1502 struct overlay_registers __iomem *regs) 1494 struct overlay_registers __iomem *regs)
1503{ 1495{
1504 if (!OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1496 if (!OVERLAY_NEEDS_PHYSICAL(overlay->i915))
1505 io_mapping_unmap_atomic(regs); 1497 io_mapping_unmap_atomic(regs);
1506} 1498}
1507 1499
1508
1509struct intel_overlay_error_state * 1500struct intel_overlay_error_state *
1510intel_overlay_capture_error_state(struct drm_device *dev) 1501intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
1511{ 1502{
1512 struct drm_i915_private *dev_priv = dev->dev_private;
1513 struct intel_overlay *overlay = dev_priv->overlay; 1503 struct intel_overlay *overlay = dev_priv->overlay;
1514 struct intel_overlay_error_state *error; 1504 struct intel_overlay_error_state *error;
1515 struct overlay_registers __iomem *regs; 1505 struct overlay_registers __iomem *regs;
@@ -1523,10 +1513,7 @@ intel_overlay_capture_error_state(struct drm_device *dev)
1523 1513
1524 error->dovsta = I915_READ(DOVSTA); 1514 error->dovsta = I915_READ(DOVSTA);
1525 error->isr = I915_READ(ISR); 1515 error->isr = I915_READ(ISR);
1526 if (OVERLAY_NEEDS_PHYSICAL(overlay->dev)) 1516 error->base = overlay->flip_addr;
1527 error->base = (__force long)overlay->reg_bo->phys_handle->vaddr;
1528 else
1529 error->base = i915_gem_obj_ggtt_offset(overlay->reg_bo);
1530 1517
1531 regs = intel_overlay_map_regs_atomic(overlay); 1518 regs = intel_overlay_map_regs_atomic(overlay);
1532 if (!regs) 1519 if (!regs)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index aba94099886b..96c65d77e886 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -377,7 +377,7 @@ out:
377enum drm_connector_status 377enum drm_connector_status
378intel_panel_detect(struct drm_device *dev) 378intel_panel_detect(struct drm_device *dev)
379{ 379{
380 struct drm_i915_private *dev_priv = dev->dev_private; 380 struct drm_i915_private *dev_priv = to_i915(dev);
381 381
382 /* Assume that the BIOS does not lie through the OpRegion... */ 382 /* Assume that the BIOS does not lie through the OpRegion... */
383 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) { 383 if (!i915.panel_ignore_lid && dev_priv->opregion.lid_state) {
@@ -504,7 +504,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
504 if (panel->backlight.combination_mode) { 504 if (panel->backlight.combination_mode) {
505 u8 lbpc; 505 u8 lbpc;
506 506
507 pci_read_config_byte(dev_priv->dev->pdev, LBPC, &lbpc); 507 pci_read_config_byte(dev_priv->drm.pdev, LBPC, &lbpc);
508 val *= lbpc; 508 val *= lbpc;
509 } 509 }
510 510
@@ -592,7 +592,7 @@ static void i9xx_set_backlight(struct intel_connector *connector, u32 level)
592 592
593 lbpc = level * 0xfe / panel->backlight.max + 1; 593 lbpc = level * 0xfe / panel->backlight.max + 1;
594 level /= lbpc; 594 level /= lbpc;
595 pci_write_config_byte(dev_priv->dev->pdev, LBPC, lbpc); 595 pci_write_config_byte(dev_priv->drm.pdev, LBPC, lbpc);
596 } 596 }
597 597
598 if (IS_GEN4(dev_priv)) { 598 if (IS_GEN4(dev_priv)) {
@@ -822,7 +822,7 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
822 * backlight. This will leave the backlight on unnecessarily when 822 * backlight. This will leave the backlight on unnecessarily when
823 * another client is not activated. 823 * another client is not activated.
824 */ 824 */
825 if (dev_priv->dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { 825 if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
826 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); 826 DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
827 return; 827 return;
828 } 828 }
@@ -1142,7 +1142,7 @@ static int intel_backlight_device_get_brightness(struct backlight_device *bd)
1142{ 1142{
1143 struct intel_connector *connector = bl_get_data(bd); 1143 struct intel_connector *connector = bl_get_data(bd);
1144 struct drm_device *dev = connector->base.dev; 1144 struct drm_device *dev = connector->base.dev;
1145 struct drm_i915_private *dev_priv = dev->dev_private; 1145 struct drm_i915_private *dev_priv = to_i915(dev);
1146 u32 hw_level; 1146 u32 hw_level;
1147 int ret; 1147 int ret;
1148 1148
@@ -1163,7 +1163,7 @@ static const struct backlight_ops intel_backlight_device_ops = {
1163 .get_brightness = intel_backlight_device_get_brightness, 1163 .get_brightness = intel_backlight_device_get_brightness,
1164}; 1164};
1165 1165
1166static int intel_backlight_device_register(struct intel_connector *connector) 1166int intel_backlight_device_register(struct intel_connector *connector)
1167{ 1167{
1168 struct intel_panel *panel = &connector->panel; 1168 struct intel_panel *panel = &connector->panel;
1169 struct backlight_properties props; 1169 struct backlight_properties props;
@@ -1216,7 +1216,7 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1216 return 0; 1216 return 0;
1217} 1217}
1218 1218
1219static void intel_backlight_device_unregister(struct intel_connector *connector) 1219void intel_backlight_device_unregister(struct intel_connector *connector)
1220{ 1220{
1221 struct intel_panel *panel = &connector->panel; 1221 struct intel_panel *panel = &connector->panel;
1222 1222
@@ -1225,14 +1225,6 @@ static void intel_backlight_device_unregister(struct intel_connector *connector)
1225 panel->backlight.device = NULL; 1225 panel->backlight.device = NULL;
1226 } 1226 }
1227} 1227}
1228#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1229static int intel_backlight_device_register(struct intel_connector *connector)
1230{
1231 return 0;
1232}
1233static void intel_backlight_device_unregister(struct intel_connector *connector)
1234{
1235}
1236#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1228#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
1237 1229
1238/* 1230/*
@@ -1324,7 +1316,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1324static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz) 1316static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
1325{ 1317{
1326 struct drm_device *dev = connector->base.dev; 1318 struct drm_device *dev = connector->base.dev;
1327 struct drm_i915_private *dev_priv = dev->dev_private; 1319 struct drm_i915_private *dev_priv = to_i915(dev);
1328 int clock; 1320 int clock;
1329 1321
1330 if (IS_G4X(dev_priv)) 1322 if (IS_G4X(dev_priv))
@@ -1724,6 +1716,14 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
1724 container_of(panel, struct intel_connector, panel); 1716 container_of(panel, struct intel_connector, panel);
1725 struct drm_i915_private *dev_priv = to_i915(connector->base.dev); 1717 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
1726 1718
1719 if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
1720 intel_dp_aux_init_backlight_funcs(connector) == 0)
1721 return;
1722
1723 if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
1724 intel_dsi_dcs_init_backlight_funcs(connector) == 0)
1725 return;
1726
1727 if (IS_BROXTON(dev_priv)) { 1727 if (IS_BROXTON(dev_priv)) {
1728 panel->backlight.setup = bxt_setup_backlight; 1728 panel->backlight.setup = bxt_setup_backlight;
1729 panel->backlight.enable = bxt_enable_backlight; 1729 panel->backlight.enable = bxt_enable_backlight;
@@ -1805,19 +1805,3 @@ void intel_panel_fini(struct intel_panel *panel)
1805 drm_mode_destroy(intel_connector->base.dev, 1805 drm_mode_destroy(intel_connector->base.dev,
1806 panel->downclock_mode); 1806 panel->downclock_mode);
1807} 1807}
1808
1809void intel_backlight_register(struct drm_device *dev)
1810{
1811 struct intel_connector *connector;
1812
1813 for_each_intel_connector(dev, connector)
1814 intel_backlight_device_register(connector);
1815}
1816
1817void intel_backlight_unregister(struct drm_device *dev)
1818{
1819 struct intel_connector *connector;
1820
1821 for_each_intel_connector(dev, connector)
1822 intel_backlight_device_unregister(connector);
1823}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 2863b92c9da6..f4f3fcc8b3be 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -26,6 +26,7 @@
26 */ 26 */
27 27
28#include <linux/cpufreq.h> 28#include <linux/cpufreq.h>
29#include <drm/drm_plane_helper.h>
29#include "i915_drv.h" 30#include "i915_drv.h"
30#include "intel_drv.h" 31#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 32#include "../../../platform/x86/intel_ips.h"
@@ -82,7 +83,7 @@ static void gen9_init_clock_gating(struct drm_device *dev)
82 83
83static void bxt_init_clock_gating(struct drm_device *dev) 84static void bxt_init_clock_gating(struct drm_device *dev)
84{ 85{
85 struct drm_i915_private *dev_priv = dev->dev_private; 86 struct drm_i915_private *dev_priv = to_i915(dev);
86 87
87 gen9_init_clock_gating(dev); 88 gen9_init_clock_gating(dev);
88 89
@@ -108,7 +109,7 @@ static void bxt_init_clock_gating(struct drm_device *dev)
108 109
109static void i915_pineview_get_mem_freq(struct drm_device *dev) 110static void i915_pineview_get_mem_freq(struct drm_device *dev)
110{ 111{
111 struct drm_i915_private *dev_priv = dev->dev_private; 112 struct drm_i915_private *dev_priv = to_i915(dev);
112 u32 tmp; 113 u32 tmp;
113 114
114 tmp = I915_READ(CLKCFG); 115 tmp = I915_READ(CLKCFG);
@@ -147,7 +148,7 @@ static void i915_pineview_get_mem_freq(struct drm_device *dev)
147 148
148static void i915_ironlake_get_mem_freq(struct drm_device *dev) 149static void i915_ironlake_get_mem_freq(struct drm_device *dev)
149{ 150{
150 struct drm_i915_private *dev_priv = dev->dev_private; 151 struct drm_i915_private *dev_priv = to_i915(dev);
151 u16 ddrpll, csipll; 152 u16 ddrpll, csipll;
152 153
153 ddrpll = I915_READ16(DDRMPLL1); 154 ddrpll = I915_READ16(DDRMPLL1);
@@ -318,7 +319,7 @@ static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable)
318 319
319void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) 320void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable)
320{ 321{
321 struct drm_device *dev = dev_priv->dev; 322 struct drm_device *dev = &dev_priv->drm;
322 u32 val; 323 u32 val;
323 324
324 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 325 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
@@ -374,7 +375,7 @@ static const int pessimal_latency_ns = 5000;
374static int vlv_get_fifo_size(struct drm_device *dev, 375static int vlv_get_fifo_size(struct drm_device *dev,
375 enum pipe pipe, int plane) 376 enum pipe pipe, int plane)
376{ 377{
377 struct drm_i915_private *dev_priv = dev->dev_private; 378 struct drm_i915_private *dev_priv = to_i915(dev);
378 int sprite0_start, sprite1_start, size; 379 int sprite0_start, sprite1_start, size;
379 380
380 switch (pipe) { 381 switch (pipe) {
@@ -425,7 +426,7 @@ static int vlv_get_fifo_size(struct drm_device *dev,
425 426
426static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 427static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
427{ 428{
428 struct drm_i915_private *dev_priv = dev->dev_private; 429 struct drm_i915_private *dev_priv = to_i915(dev);
429 uint32_t dsparb = I915_READ(DSPARB); 430 uint32_t dsparb = I915_READ(DSPARB);
430 int size; 431 int size;
431 432
@@ -441,7 +442,7 @@ static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
441 442
442static int i830_get_fifo_size(struct drm_device *dev, int plane) 443static int i830_get_fifo_size(struct drm_device *dev, int plane)
443{ 444{
444 struct drm_i915_private *dev_priv = dev->dev_private; 445 struct drm_i915_private *dev_priv = to_i915(dev);
445 uint32_t dsparb = I915_READ(DSPARB); 446 uint32_t dsparb = I915_READ(DSPARB);
446 int size; 447 int size;
447 448
@@ -458,7 +459,7 @@ static int i830_get_fifo_size(struct drm_device *dev, int plane)
458 459
459static int i845_get_fifo_size(struct drm_device *dev, int plane) 460static int i845_get_fifo_size(struct drm_device *dev, int plane)
460{ 461{
461 struct drm_i915_private *dev_priv = dev->dev_private; 462 struct drm_i915_private *dev_priv = to_i915(dev);
462 uint32_t dsparb = I915_READ(DSPARB); 463 uint32_t dsparb = I915_READ(DSPARB);
463 int size; 464 int size;
464 465
@@ -636,7 +637,7 @@ static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
636static void pineview_update_wm(struct drm_crtc *unused_crtc) 637static void pineview_update_wm(struct drm_crtc *unused_crtc)
637{ 638{
638 struct drm_device *dev = unused_crtc->dev; 639 struct drm_device *dev = unused_crtc->dev;
639 struct drm_i915_private *dev_priv = dev->dev_private; 640 struct drm_i915_private *dev_priv = to_i915(dev);
640 struct drm_crtc *crtc; 641 struct drm_crtc *crtc;
641 const struct cxsr_latency *latency; 642 const struct cxsr_latency *latency;
642 u32 reg; 643 u32 reg;
@@ -933,7 +934,7 @@ static unsigned int vlv_wm_method2(unsigned int pixel_rate,
933 934
934static void vlv_setup_wm_latency(struct drm_device *dev) 935static void vlv_setup_wm_latency(struct drm_device *dev)
935{ 936{
936 struct drm_i915_private *dev_priv = dev->dev_private; 937 struct drm_i915_private *dev_priv = to_i915(dev);
937 938
938 /* all latencies in usec */ 939 /* all latencies in usec */
939 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3; 940 dev_priv->wm.pri_latency[VLV_WM_LEVEL_PM2] = 3;
@@ -1324,7 +1325,7 @@ static void vlv_merge_wm(struct drm_device *dev,
1324static void vlv_update_wm(struct drm_crtc *crtc) 1325static void vlv_update_wm(struct drm_crtc *crtc)
1325{ 1326{
1326 struct drm_device *dev = crtc->dev; 1327 struct drm_device *dev = crtc->dev;
1327 struct drm_i915_private *dev_priv = dev->dev_private; 1328 struct drm_i915_private *dev_priv = to_i915(dev);
1328 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1329 enum pipe pipe = intel_crtc->pipe; 1330 enum pipe pipe = intel_crtc->pipe;
1330 struct vlv_wm_values wm = {}; 1331 struct vlv_wm_values wm = {};
@@ -1380,7 +1381,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1380{ 1381{
1381 struct drm_device *dev = crtc->dev; 1382 struct drm_device *dev = crtc->dev;
1382 static const int sr_latency_ns = 12000; 1383 static const int sr_latency_ns = 12000;
1383 struct drm_i915_private *dev_priv = dev->dev_private; 1384 struct drm_i915_private *dev_priv = to_i915(dev);
1384 int planea_wm, planeb_wm, cursora_wm, cursorb_wm; 1385 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1385 int plane_sr, cursor_sr; 1386 int plane_sr, cursor_sr;
1386 unsigned int enabled = 0; 1387 unsigned int enabled = 0;
@@ -1437,7 +1438,7 @@ static void g4x_update_wm(struct drm_crtc *crtc)
1437static void i965_update_wm(struct drm_crtc *unused_crtc) 1438static void i965_update_wm(struct drm_crtc *unused_crtc)
1438{ 1439{
1439 struct drm_device *dev = unused_crtc->dev; 1440 struct drm_device *dev = unused_crtc->dev;
1440 struct drm_i915_private *dev_priv = dev->dev_private; 1441 struct drm_i915_private *dev_priv = to_i915(dev);
1441 struct drm_crtc *crtc; 1442 struct drm_crtc *crtc;
1442 int srwm = 1; 1443 int srwm = 1;
1443 int cursor_sr = 16; 1444 int cursor_sr = 16;
@@ -1511,7 +1512,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc)
1511static void i9xx_update_wm(struct drm_crtc *unused_crtc) 1512static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1512{ 1513{
1513 struct drm_device *dev = unused_crtc->dev; 1514 struct drm_device *dev = unused_crtc->dev;
1514 struct drm_i915_private *dev_priv = dev->dev_private; 1515 struct drm_i915_private *dev_priv = to_i915(dev);
1515 const struct intel_watermark_params *wm_info; 1516 const struct intel_watermark_params *wm_info;
1516 uint32_t fwater_lo; 1517 uint32_t fwater_lo;
1517 uint32_t fwater_hi; 1518 uint32_t fwater_hi;
@@ -1641,7 +1642,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc)
1641static void i845_update_wm(struct drm_crtc *unused_crtc) 1642static void i845_update_wm(struct drm_crtc *unused_crtc)
1642{ 1643{
1643 struct drm_device *dev = unused_crtc->dev; 1644 struct drm_device *dev = unused_crtc->dev;
1644 struct drm_i915_private *dev_priv = dev->dev_private; 1645 struct drm_i915_private *dev_priv = to_i915(dev);
1645 struct drm_crtc *crtc; 1646 struct drm_crtc *crtc;
1646 const struct drm_display_mode *adjusted_mode; 1647 const struct drm_display_mode *adjusted_mode;
1647 uint32_t fwater_lo; 1648 uint32_t fwater_lo;
@@ -2040,10 +2041,10 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
2040} 2041}
2041 2042
2042static uint32_t 2043static uint32_t
2043hsw_compute_linetime_wm(struct drm_device *dev, 2044hsw_compute_linetime_wm(const struct intel_crtc_state *cstate)
2044 struct intel_crtc_state *cstate)
2045{ 2045{
2046 struct drm_i915_private *dev_priv = dev->dev_private; 2046 const struct intel_atomic_state *intel_state =
2047 to_intel_atomic_state(cstate->base.state);
2047 const struct drm_display_mode *adjusted_mode = 2048 const struct drm_display_mode *adjusted_mode =
2048 &cstate->base.adjusted_mode; 2049 &cstate->base.adjusted_mode;
2049 u32 linetime, ips_linetime; 2050 u32 linetime, ips_linetime;
@@ -2052,7 +2053,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2052 return 0; 2053 return 0;
2053 if (WARN_ON(adjusted_mode->crtc_clock == 0)) 2054 if (WARN_ON(adjusted_mode->crtc_clock == 0))
2054 return 0; 2055 return 0;
2055 if (WARN_ON(dev_priv->cdclk_freq == 0)) 2056 if (WARN_ON(intel_state->cdclk == 0))
2056 return 0; 2057 return 0;
2057 2058
2058 /* The WM are computed with base on how long it takes to fill a single 2059 /* The WM are computed with base on how long it takes to fill a single
@@ -2061,7 +2062,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2061 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2062 linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2062 adjusted_mode->crtc_clock); 2063 adjusted_mode->crtc_clock);
2063 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2064 ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
2064 dev_priv->cdclk_freq); 2065 intel_state->cdclk);
2065 2066
2066 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | 2067 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
2067 PIPE_WM_LINETIME_TIME(linetime); 2068 PIPE_WM_LINETIME_TIME(linetime);
@@ -2069,7 +2070,7 @@ hsw_compute_linetime_wm(struct drm_device *dev,
2069 2070
2070static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8]) 2071static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2071{ 2072{
2072 struct drm_i915_private *dev_priv = dev->dev_private; 2073 struct drm_i915_private *dev_priv = to_i915(dev);
2073 2074
2074 if (IS_GEN9(dev)) { 2075 if (IS_GEN9(dev)) {
2075 uint32_t val; 2076 uint32_t val;
@@ -2174,14 +2175,14 @@ static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2174static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2175static void intel_fixup_spr_wm_latency(struct drm_device *dev, uint16_t wm[5])
2175{ 2176{
2176 /* ILK sprite LP0 latency is 1300 ns */ 2177 /* ILK sprite LP0 latency is 1300 ns */
2177 if (INTEL_INFO(dev)->gen == 5) 2178 if (IS_GEN5(dev))
2178 wm[0] = 13; 2179 wm[0] = 13;
2179} 2180}
2180 2181
2181static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2182static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2182{ 2183{
2183 /* ILK cursor LP0 latency is 1300 ns */ 2184 /* ILK cursor LP0 latency is 1300 ns */
2184 if (INTEL_INFO(dev)->gen == 5) 2185 if (IS_GEN5(dev))
2185 wm[0] = 13; 2186 wm[0] = 13;
2186 2187
2187 /* WaDoubleCursorLP3Latency:ivb */ 2188 /* WaDoubleCursorLP3Latency:ivb */
@@ -2235,7 +2236,7 @@ static void intel_print_wm_latency(struct drm_device *dev,
2235static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv, 2236static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2236 uint16_t wm[5], uint16_t min) 2237 uint16_t wm[5], uint16_t min)
2237{ 2238{
2238 int level, max_level = ilk_wm_max_level(dev_priv->dev); 2239 int level, max_level = ilk_wm_max_level(&dev_priv->drm);
2239 2240
2240 if (wm[0] >= min) 2241 if (wm[0] >= min)
2241 return false; 2242 return false;
@@ -2249,7 +2250,7 @@ static bool ilk_increase_wm_latency(struct drm_i915_private *dev_priv,
2249 2250
2250static void snb_wm_latency_quirk(struct drm_device *dev) 2251static void snb_wm_latency_quirk(struct drm_device *dev)
2251{ 2252{
2252 struct drm_i915_private *dev_priv = dev->dev_private; 2253 struct drm_i915_private *dev_priv = to_i915(dev);
2253 bool changed; 2254 bool changed;
2254 2255
2255 /* 2256 /*
@@ -2271,7 +2272,7 @@ static void snb_wm_latency_quirk(struct drm_device *dev)
2271 2272
2272static void ilk_setup_wm_latency(struct drm_device *dev) 2273static void ilk_setup_wm_latency(struct drm_device *dev)
2273{ 2274{
2274 struct drm_i915_private *dev_priv = dev->dev_private; 2275 struct drm_i915_private *dev_priv = to_i915(dev);
2275 2276
2276 intel_read_wm_latency(dev, dev_priv->wm.pri_latency); 2277 intel_read_wm_latency(dev, dev_priv->wm.pri_latency);
2277 2278
@@ -2293,7 +2294,7 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2293 2294
2294static void skl_setup_wm_latency(struct drm_device *dev) 2295static void skl_setup_wm_latency(struct drm_device *dev)
2295{ 2296{
2296 struct drm_i915_private *dev_priv = dev->dev_private; 2297 struct drm_i915_private *dev_priv = to_i915(dev);
2297 2298
2298 intel_read_wm_latency(dev, dev_priv->wm.skl_latency); 2299 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2299 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2300 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
@@ -2329,7 +2330,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2329 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2330 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
2330 struct intel_pipe_wm *pipe_wm; 2331 struct intel_pipe_wm *pipe_wm;
2331 struct drm_device *dev = state->dev; 2332 struct drm_device *dev = state->dev;
2332 const struct drm_i915_private *dev_priv = dev->dev_private; 2333 const struct drm_i915_private *dev_priv = to_i915(dev);
2333 struct intel_plane *intel_plane; 2334 struct intel_plane *intel_plane;
2334 struct intel_plane_state *pristate = NULL; 2335 struct intel_plane_state *pristate = NULL;
2335 struct intel_plane_state *sprstate = NULL; 2336 struct intel_plane_state *sprstate = NULL;
@@ -2337,7 +2338,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2337 int level, max_level = ilk_wm_max_level(dev), usable_level; 2338 int level, max_level = ilk_wm_max_level(dev), usable_level;
2338 struct ilk_wm_maximums max; 2339 struct ilk_wm_maximums max;
2339 2340
2340 pipe_wm = &cstate->wm.optimal.ilk; 2341 pipe_wm = &cstate->wm.ilk.optimal;
2341 2342
2342 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2343 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
2343 struct intel_plane_state *ps; 2344 struct intel_plane_state *ps;
@@ -2380,7 +2381,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
2380 pipe_wm->wm[0] = pipe_wm->raw_wm[0]; 2381 pipe_wm->wm[0] = pipe_wm->raw_wm[0];
2381 2382
2382 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2383 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2383 pipe_wm->linetime = hsw_compute_linetime_wm(dev, cstate); 2384 pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
2384 2385
2385 if (!ilk_validate_pipe_wm(dev, pipe_wm)) 2386 if (!ilk_validate_pipe_wm(dev, pipe_wm))
2386 return -EINVAL; 2387 return -EINVAL;
@@ -2419,7 +2420,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2419 struct intel_crtc *intel_crtc, 2420 struct intel_crtc *intel_crtc,
2420 struct intel_crtc_state *newstate) 2421 struct intel_crtc_state *newstate)
2421{ 2422{
2422 struct intel_pipe_wm *a = &newstate->wm.intermediate; 2423 struct intel_pipe_wm *a = &newstate->wm.ilk.intermediate;
2423 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk; 2424 struct intel_pipe_wm *b = &intel_crtc->wm.active.ilk;
2424 int level, max_level = ilk_wm_max_level(dev); 2425 int level, max_level = ilk_wm_max_level(dev);
2425 2426
@@ -2428,7 +2429,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2428 * currently active watermarks to get values that are safe both before 2429 * currently active watermarks to get values that are safe both before
2429 * and after the vblank. 2430 * and after the vblank.
2430 */ 2431 */
2431 *a = newstate->wm.optimal.ilk; 2432 *a = newstate->wm.ilk.optimal;
2432 a->pipe_enabled |= b->pipe_enabled; 2433 a->pipe_enabled |= b->pipe_enabled;
2433 a->sprites_enabled |= b->sprites_enabled; 2434 a->sprites_enabled |= b->sprites_enabled;
2434 a->sprites_scaled |= b->sprites_scaled; 2435 a->sprites_scaled |= b->sprites_scaled;
@@ -2457,7 +2458,7 @@ static int ilk_compute_intermediate_wm(struct drm_device *dev,
2457 * If our intermediate WM are identical to the final WM, then we can 2458 * If our intermediate WM are identical to the final WM, then we can
2458 * omit the post-vblank programming; only update if it's different. 2459 * omit the post-vblank programming; only update if it's different.
2459 */ 2460 */
2460 if (memcmp(a, &newstate->wm.optimal.ilk, sizeof(*a)) == 0) 2461 if (memcmp(a, &newstate->wm.ilk.optimal, sizeof(*a)) == 0)
2461 newstate->wm.need_postvbl_update = false; 2462 newstate->wm.need_postvbl_update = false;
2462 2463
2463 return 0; 2464 return 0;
@@ -2504,7 +2505,7 @@ static void ilk_wm_merge(struct drm_device *dev,
2504 const struct ilk_wm_maximums *max, 2505 const struct ilk_wm_maximums *max,
2505 struct intel_pipe_wm *merged) 2506 struct intel_pipe_wm *merged)
2506{ 2507{
2507 struct drm_i915_private *dev_priv = dev->dev_private; 2508 struct drm_i915_private *dev_priv = to_i915(dev);
2508 int level, max_level = ilk_wm_max_level(dev); 2509 int level, max_level = ilk_wm_max_level(dev);
2509 int last_enabled_level = max_level; 2510 int last_enabled_level = max_level;
2510 2511
@@ -2564,7 +2565,7 @@ static int ilk_wm_lp_to_level(int wm_lp, const struct intel_pipe_wm *pipe_wm)
2564/* The value we need to program into the WM_LPx latency field */ 2565/* The value we need to program into the WM_LPx latency field */
2565static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level) 2566static unsigned int ilk_wm_lp_latency(struct drm_device *dev, int level)
2566{ 2567{
2567 struct drm_i915_private *dev_priv = dev->dev_private; 2568 struct drm_i915_private *dev_priv = to_i915(dev);
2568 2569
2569 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2570 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2570 return 2 * level; 2571 return 2 * level;
@@ -2764,7 +2765,7 @@ static bool _ilk_disable_lp_wm(struct drm_i915_private *dev_priv,
2764static void ilk_write_wm_values(struct drm_i915_private *dev_priv, 2765static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2765 struct ilk_wm_values *results) 2766 struct ilk_wm_values *results)
2766{ 2767{
2767 struct drm_device *dev = dev_priv->dev; 2768 struct drm_device *dev = &dev_priv->drm;
2768 struct ilk_wm_values *previous = &dev_priv->wm.hw; 2769 struct ilk_wm_values *previous = &dev_priv->wm.hw;
2769 unsigned int dirty; 2770 unsigned int dirty;
2770 uint32_t val; 2771 uint32_t val;
@@ -2839,7 +2840,7 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
2839 2840
2840bool ilk_disable_lp_wm(struct drm_device *dev) 2841bool ilk_disable_lp_wm(struct drm_device *dev)
2841{ 2842{
2842 struct drm_i915_private *dev_priv = dev->dev_private; 2843 struct drm_i915_private *dev_priv = to_i915(dev);
2843 2844
2844 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2845 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2845} 2846}
@@ -2877,20 +2878,29 @@ skl_wm_plane_id(const struct intel_plane *plane)
2877static void 2878static void
2878skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2879skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2879 const struct intel_crtc_state *cstate, 2880 const struct intel_crtc_state *cstate,
2880 const struct intel_wm_config *config, 2881 struct skl_ddb_entry *alloc, /* out */
2881 struct skl_ddb_entry *alloc /* out */) 2882 int *num_active /* out */)
2882{ 2883{
2884 struct drm_atomic_state *state = cstate->base.state;
2885 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2886 struct drm_i915_private *dev_priv = to_i915(dev);
2883 struct drm_crtc *for_crtc = cstate->base.crtc; 2887 struct drm_crtc *for_crtc = cstate->base.crtc;
2884 struct drm_crtc *crtc;
2885 unsigned int pipe_size, ddb_size; 2888 unsigned int pipe_size, ddb_size;
2886 int nth_active_pipe; 2889 int nth_active_pipe;
2890 int pipe = to_intel_crtc(for_crtc)->pipe;
2887 2891
2888 if (!cstate->base.active) { 2892 if (WARN_ON(!state) || !cstate->base.active) {
2889 alloc->start = 0; 2893 alloc->start = 0;
2890 alloc->end = 0; 2894 alloc->end = 0;
2895 *num_active = hweight32(dev_priv->active_crtcs);
2891 return; 2896 return;
2892 } 2897 }
2893 2898
2899 if (intel_state->active_pipe_changes)
2900 *num_active = hweight32(intel_state->active_crtcs);
2901 else
2902 *num_active = hweight32(dev_priv->active_crtcs);
2903
2894 if (IS_BROXTON(dev)) 2904 if (IS_BROXTON(dev))
2895 ddb_size = BXT_DDB_SIZE; 2905 ddb_size = BXT_DDB_SIZE;
2896 else 2906 else
@@ -2898,25 +2908,29 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2898 2908
2899 ddb_size -= 4; /* 4 blocks for bypass path allocation */ 2909 ddb_size -= 4; /* 4 blocks for bypass path allocation */
2900 2910
2901 nth_active_pipe = 0; 2911 /*
2902 for_each_crtc(dev, crtc) { 2912 * If the state doesn't change the active CRTC's, then there's
2903 if (!to_intel_crtc(crtc)->active) 2913 * no need to recalculate; the existing pipe allocation limits
2904 continue; 2914 * should remain unchanged. Note that we're safe from racing
2905 2915 * commits since any racing commit that changes the active CRTC
2906 if (crtc == for_crtc) 2916 * list would need to grab _all_ crtc locks, including the one
2907 break; 2917 * we currently hold.
2908 2918 */
2909 nth_active_pipe++; 2919 if (!intel_state->active_pipe_changes) {
2920 *alloc = dev_priv->wm.skl_hw.ddb.pipe[pipe];
2921 return;
2910 } 2922 }
2911 2923
2912 pipe_size = ddb_size / config->num_pipes_active; 2924 nth_active_pipe = hweight32(intel_state->active_crtcs &
2913 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active; 2925 (drm_crtc_mask(for_crtc) - 1));
2926 pipe_size = ddb_size / hweight32(intel_state->active_crtcs);
2927 alloc->start = nth_active_pipe * ddb_size / *num_active;
2914 alloc->end = alloc->start + pipe_size; 2928 alloc->end = alloc->start + pipe_size;
2915} 2929}
2916 2930
2917static unsigned int skl_cursor_allocation(const struct intel_wm_config *config) 2931static unsigned int skl_cursor_allocation(int num_active)
2918{ 2932{
2919 if (config->num_pipes_active == 1) 2933 if (num_active == 1)
2920 return 32; 2934 return 32;
2921 2935
2922 return 8; 2936 return 8;
@@ -2960,6 +2974,46 @@ void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
2960 } 2974 }
2961} 2975}
2962 2976
2977/*
2978 * Determines the downscale amount of a plane for the purposes of watermark calculations.
2979 * The bspec defines downscale amount as:
2980 *
2981 * """
2982 * Horizontal down scale amount = maximum[1, Horizontal source size /
2983 * Horizontal destination size]
2984 * Vertical down scale amount = maximum[1, Vertical source size /
2985 * Vertical destination size]
2986 * Total down scale amount = Horizontal down scale amount *
2987 * Vertical down scale amount
2988 * """
2989 *
2990 * Return value is provided in 16.16 fixed point form to retain fractional part.
2991 * Caller should take care of dividing & rounding off the value.
2992 */
2993static uint32_t
2994skl_plane_downscale_amount(const struct intel_plane_state *pstate)
2995{
2996 uint32_t downscale_h, downscale_w;
2997 uint32_t src_w, src_h, dst_w, dst_h;
2998
2999 if (WARN_ON(!pstate->visible))
3000 return DRM_PLANE_HELPER_NO_SCALING;
3001
3002 /* n.b., src is 16.16 fixed point, dst is whole integer */
3003 src_w = drm_rect_width(&pstate->src);
3004 src_h = drm_rect_height(&pstate->src);
3005 dst_w = drm_rect_width(&pstate->dst);
3006 dst_h = drm_rect_height(&pstate->dst);
3007 if (intel_rotation_90_or_270(pstate->base.rotation))
3008 swap(dst_w, dst_h);
3009
3010 downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3011 downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
3012
3013 /* Provide result in 16.16 fixed point */
3014 return (uint64_t)downscale_w * downscale_h >> 16;
3015}
3016
2963static unsigned int 3017static unsigned int
2964skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 3018skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2965 const struct drm_plane_state *pstate, 3019 const struct drm_plane_state *pstate,
@@ -2967,7 +3021,16 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2967{ 3021{
2968 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate); 3022 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
2969 struct drm_framebuffer *fb = pstate->fb; 3023 struct drm_framebuffer *fb = pstate->fb;
3024 uint32_t down_scale_amount, data_rate;
2970 uint32_t width = 0, height = 0; 3025 uint32_t width = 0, height = 0;
3026 unsigned format = fb ? fb->pixel_format : DRM_FORMAT_XRGB8888;
3027
3028 if (!intel_pstate->visible)
3029 return 0;
3030 if (pstate->plane->type == DRM_PLANE_TYPE_CURSOR)
3031 return 0;
3032 if (y && format != DRM_FORMAT_NV12)
3033 return 0;
2971 3034
2972 width = drm_rect_width(&intel_pstate->src) >> 16; 3035 width = drm_rect_width(&intel_pstate->src) >> 16;
2973 height = drm_rect_height(&intel_pstate->src) >> 16; 3036 height = drm_rect_height(&intel_pstate->src) >> 16;
@@ -2976,17 +3039,21 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2976 swap(width, height); 3039 swap(width, height);
2977 3040
2978 /* for planar format */ 3041 /* for planar format */
2979 if (fb->pixel_format == DRM_FORMAT_NV12) { 3042 if (format == DRM_FORMAT_NV12) {
2980 if (y) /* y-plane data rate */ 3043 if (y) /* y-plane data rate */
2981 return width * height * 3044 data_rate = width * height *
2982 drm_format_plane_cpp(fb->pixel_format, 0); 3045 drm_format_plane_cpp(format, 0);
2983 else /* uv-plane data rate */ 3046 else /* uv-plane data rate */
2984 return (width / 2) * (height / 2) * 3047 data_rate = (width / 2) * (height / 2) *
2985 drm_format_plane_cpp(fb->pixel_format, 1); 3048 drm_format_plane_cpp(format, 1);
3049 } else {
3050 /* for packed formats */
3051 data_rate = width * height * drm_format_plane_cpp(format, 0);
2986 } 3052 }
2987 3053
2988 /* for packed formats */ 3054 down_scale_amount = skl_plane_downscale_amount(intel_pstate);
2989 return width * height * drm_format_plane_cpp(fb->pixel_format, 0); 3055
3056 return (uint64_t)data_rate * down_scale_amount >> 16;
2990} 3057}
2991 3058
2992/* 3059/*
@@ -2995,86 +3062,188 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
2995 * 3 * 4096 * 8192 * 4 < 2^32 3062 * 3 * 4096 * 8192 * 4 < 2^32
2996 */ 3063 */
2997static unsigned int 3064static unsigned int
2998skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) 3065skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
2999{ 3066{
3000 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3067 struct drm_crtc_state *cstate = &intel_cstate->base;
3001 struct drm_device *dev = intel_crtc->base.dev; 3068 struct drm_atomic_state *state = cstate->state;
3069 struct drm_crtc *crtc = cstate->crtc;
3070 struct drm_device *dev = crtc->dev;
3071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3072 const struct drm_plane *plane;
3002 const struct intel_plane *intel_plane; 3073 const struct intel_plane *intel_plane;
3003 unsigned int total_data_rate = 0; 3074 struct drm_plane_state *pstate;
3075 unsigned int rate, total_data_rate = 0;
3076 int id;
3077 int i;
3004 3078
3005 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3079 if (WARN_ON(!state))
3006 const struct drm_plane_state *pstate = intel_plane->base.state; 3080 return 0;
3007 3081
3008 if (pstate->fb == NULL) 3082 /* Calculate and cache data rate for each plane */
3009 continue; 3083 for_each_plane_in_state(state, plane, pstate, i) {
3084 id = skl_wm_plane_id(to_intel_plane(plane));
3085 intel_plane = to_intel_plane(plane);
3010 3086
3011 if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR) 3087 if (intel_plane->pipe != intel_crtc->pipe)
3012 continue; 3088 continue;
3013 3089
3014 /* packed/uv */ 3090 /* packed/uv */
3015 total_data_rate += skl_plane_relative_data_rate(cstate, 3091 rate = skl_plane_relative_data_rate(intel_cstate,
3016 pstate, 3092 pstate, 0);
3017 0); 3093 intel_cstate->wm.skl.plane_data_rate[id] = rate;
3094
3095 /* y-plane */
3096 rate = skl_plane_relative_data_rate(intel_cstate,
3097 pstate, 1);
3098 intel_cstate->wm.skl.plane_y_data_rate[id] = rate;
3099 }
3100
3101 /* Calculate CRTC's total data rate from cached values */
3102 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3103 int id = skl_wm_plane_id(intel_plane);
3018 3104
3019 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 3105 /* packed/uv */
3020 /* y-plane */ 3106 total_data_rate += intel_cstate->wm.skl.plane_data_rate[id];
3021 total_data_rate += skl_plane_relative_data_rate(cstate, 3107 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3022 pstate,
3023 1);
3024 } 3108 }
3025 3109
3110 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3111
3026 return total_data_rate; 3112 return total_data_rate;
3027} 3113}
3028 3114
3029static void 3115static uint16_t
3116skl_ddb_min_alloc(const struct drm_plane_state *pstate,
3117 const int y)
3118{
3119 struct drm_framebuffer *fb = pstate->fb;
3120 struct intel_plane_state *intel_pstate = to_intel_plane_state(pstate);
3121 uint32_t src_w, src_h;
3122 uint32_t min_scanlines = 8;
3123 uint8_t plane_bpp;
3124
3125 if (WARN_ON(!fb))
3126 return 0;
3127
3128 /* For packed formats, no y-plane, return 0 */
3129 if (y && fb->pixel_format != DRM_FORMAT_NV12)
3130 return 0;
3131
3132 /* For Non Y-tile return 8-blocks */
3133 if (fb->modifier[0] != I915_FORMAT_MOD_Y_TILED &&
3134 fb->modifier[0] != I915_FORMAT_MOD_Yf_TILED)
3135 return 8;
3136
3137 src_w = drm_rect_width(&intel_pstate->src) >> 16;
3138 src_h = drm_rect_height(&intel_pstate->src) >> 16;
3139
3140 if (intel_rotation_90_or_270(pstate->rotation))
3141 swap(src_w, src_h);
3142
3143 /* Halve UV plane width and height for NV12 */
3144 if (fb->pixel_format == DRM_FORMAT_NV12 && !y) {
3145 src_w /= 2;
3146 src_h /= 2;
3147 }
3148
3149 if (fb->pixel_format == DRM_FORMAT_NV12 && !y)
3150 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 1);
3151 else
3152 plane_bpp = drm_format_plane_cpp(fb->pixel_format, 0);
3153
3154 if (intel_rotation_90_or_270(pstate->rotation)) {
3155 switch (plane_bpp) {
3156 case 1:
3157 min_scanlines = 32;
3158 break;
3159 case 2:
3160 min_scanlines = 16;
3161 break;
3162 case 4:
3163 min_scanlines = 8;
3164 break;
3165 case 8:
3166 min_scanlines = 4;
3167 break;
3168 default:
3169 WARN(1, "Unsupported pixel depth %u for rotation",
3170 plane_bpp);
3171 min_scanlines = 32;
3172 }
3173 }
3174
3175 return DIV_ROUND_UP((4 * src_w * plane_bpp), 512) * min_scanlines/4 + 3;
3176}
3177
3178static int
3030skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 3179skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3031 struct skl_ddb_allocation *ddb /* out */) 3180 struct skl_ddb_allocation *ddb /* out */)
3032{ 3181{
3182 struct drm_atomic_state *state = cstate->base.state;
3033 struct drm_crtc *crtc = cstate->base.crtc; 3183 struct drm_crtc *crtc = cstate->base.crtc;
3034 struct drm_device *dev = crtc->dev; 3184 struct drm_device *dev = crtc->dev;
3035 struct drm_i915_private *dev_priv = to_i915(dev);
3036 struct intel_wm_config *config = &dev_priv->wm.config;
3037 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3185 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3038 struct intel_plane *intel_plane; 3186 struct intel_plane *intel_plane;
3187 struct drm_plane *plane;
3188 struct drm_plane_state *pstate;
3039 enum pipe pipe = intel_crtc->pipe; 3189 enum pipe pipe = intel_crtc->pipe;
3040 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 3190 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3041 uint16_t alloc_size, start, cursor_blocks; 3191 uint16_t alloc_size, start, cursor_blocks;
3042 uint16_t minimum[I915_MAX_PLANES]; 3192 uint16_t *minimum = cstate->wm.skl.minimum_blocks;
3043 uint16_t y_minimum[I915_MAX_PLANES]; 3193 uint16_t *y_minimum = cstate->wm.skl.minimum_y_blocks;
3044 unsigned int total_data_rate; 3194 unsigned int total_data_rate;
3195 int num_active;
3196 int id, i;
3045 3197
3046 skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 3198 if (WARN_ON(!state))
3199 return 0;
3200
3201 if (!cstate->base.active) {
3202 ddb->pipe[pipe].start = ddb->pipe[pipe].end = 0;
3203 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3204 memset(ddb->y_plane[pipe], 0, sizeof(ddb->y_plane[pipe]));
3205 return 0;
3206 }
3207
3208 skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active);
3047 alloc_size = skl_ddb_entry_size(alloc); 3209 alloc_size = skl_ddb_entry_size(alloc);
3048 if (alloc_size == 0) { 3210 if (alloc_size == 0) {
3049 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 3211 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3050 memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 3212 return 0;
3051 sizeof(ddb->plane[pipe][PLANE_CURSOR]));
3052 return;
3053 } 3213 }
3054 3214
3055 cursor_blocks = skl_cursor_allocation(config); 3215 cursor_blocks = skl_cursor_allocation(num_active);
3056 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3216 ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks;
3057 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 3217 ddb->plane[pipe][PLANE_CURSOR].end = alloc->end;
3058 3218
3059 alloc_size -= cursor_blocks; 3219 alloc_size -= cursor_blocks;
3060 alloc->end -= cursor_blocks;
3061 3220
3062 /* 1. Allocate the mininum required blocks for each active plane */ 3221 /* 1. Allocate the mininum required blocks for each active plane */
3063 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3222 for_each_plane_in_state(state, plane, pstate, i) {
3064 struct drm_plane *plane = &intel_plane->base; 3223 intel_plane = to_intel_plane(plane);
3065 struct drm_framebuffer *fb = plane->state->fb; 3224 id = skl_wm_plane_id(intel_plane);
3066 int id = skl_wm_plane_id(intel_plane);
3067 3225
3068 if (!to_intel_plane_state(plane->state)->visible) 3226 if (intel_plane->pipe != pipe)
3069 continue; 3227 continue;
3070 3228
3071 if (plane->type == DRM_PLANE_TYPE_CURSOR) 3229 if (!to_intel_plane_state(pstate)->visible) {
3230 minimum[id] = 0;
3231 y_minimum[id] = 0;
3232 continue;
3233 }
3234 if (plane->type == DRM_PLANE_TYPE_CURSOR) {
3235 minimum[id] = 0;
3236 y_minimum[id] = 0;
3072 continue; 3237 continue;
3238 }
3239
3240 minimum[id] = skl_ddb_min_alloc(pstate, 0);
3241 y_minimum[id] = skl_ddb_min_alloc(pstate, 1);
3242 }
3073 3243
3074 minimum[id] = 8; 3244 for (i = 0; i < PLANE_CURSOR; i++) {
3075 alloc_size -= minimum[id]; 3245 alloc_size -= minimum[i];
3076 y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 3246 alloc_size -= y_minimum[i];
3077 alloc_size -= y_minimum[id];
3078 } 3247 }
3079 3248
3080 /* 3249 /*
@@ -3084,21 +3253,16 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3084 * FIXME: we may not allocate every single block here. 3253 * FIXME: we may not allocate every single block here.
3085 */ 3254 */
3086 total_data_rate = skl_get_total_relative_data_rate(cstate); 3255 total_data_rate = skl_get_total_relative_data_rate(cstate);
3256 if (total_data_rate == 0)
3257 return 0;
3087 3258
3088 start = alloc->start; 3259 start = alloc->start;
3089 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3260 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
3090 struct drm_plane *plane = &intel_plane->base;
3091 struct drm_plane_state *pstate = intel_plane->base.state;
3092 unsigned int data_rate, y_data_rate; 3261 unsigned int data_rate, y_data_rate;
3093 uint16_t plane_blocks, y_plane_blocks = 0; 3262 uint16_t plane_blocks, y_plane_blocks = 0;
3094 int id = skl_wm_plane_id(intel_plane); 3263 int id = skl_wm_plane_id(intel_plane);
3095 3264
3096 if (!to_intel_plane_state(pstate)->visible) 3265 data_rate = cstate->wm.skl.plane_data_rate[id];
3097 continue;
3098 if (plane->type == DRM_PLANE_TYPE_CURSOR)
3099 continue;
3100
3101 data_rate = skl_plane_relative_data_rate(cstate, pstate, 0);
3102 3266
3103 /* 3267 /*
3104 * allocation for (packed formats) or (uv-plane part of planar format): 3268 * allocation for (packed formats) or (uv-plane part of planar format):
@@ -3109,30 +3273,32 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate,
3109 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3273 plane_blocks += div_u64((uint64_t)alloc_size * data_rate,
3110 total_data_rate); 3274 total_data_rate);
3111 3275
3112 ddb->plane[pipe][id].start = start; 3276 /* Leave disabled planes at (0,0) */
3113 ddb->plane[pipe][id].end = start + plane_blocks; 3277 if (data_rate) {
3278 ddb->plane[pipe][id].start = start;
3279 ddb->plane[pipe][id].end = start + plane_blocks;
3280 }
3114 3281
3115 start += plane_blocks; 3282 start += plane_blocks;
3116 3283
3117 /* 3284 /*
3118 * allocation for y_plane part of planar format: 3285 * allocation for y_plane part of planar format:
3119 */ 3286 */
3120 if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { 3287 y_data_rate = cstate->wm.skl.plane_y_data_rate[id];
3121 y_data_rate = skl_plane_relative_data_rate(cstate, 3288
3122 pstate, 3289 y_plane_blocks = y_minimum[id];
3123 1); 3290 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3124 y_plane_blocks = y_minimum[id]; 3291 total_data_rate);
3125 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate,
3126 total_data_rate);
3127 3292
3293 if (y_data_rate) {
3128 ddb->y_plane[pipe][id].start = start; 3294 ddb->y_plane[pipe][id].start = start;
3129 ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3295 ddb->y_plane[pipe][id].end = start + y_plane_blocks;
3130
3131 start += y_plane_blocks;
3132 } 3296 }
3133 3297
3298 start += y_plane_blocks;
3134 } 3299 }
3135 3300
3301 return 0;
3136} 3302}
3137 3303
3138static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config) 3304static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_state *config)
@@ -3189,35 +3355,41 @@ static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3189 return ret; 3355 return ret;
3190} 3356}
3191 3357
3192static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb, 3358static uint32_t skl_adjusted_plane_pixel_rate(const struct intel_crtc_state *cstate,
3193 const struct intel_crtc *intel_crtc) 3359 struct intel_plane_state *pstate)
3194{ 3360{
3195 struct drm_device *dev = intel_crtc->base.dev; 3361 uint64_t adjusted_pixel_rate;
3196 struct drm_i915_private *dev_priv = dev->dev_private; 3362 uint64_t downscale_amount;
3197 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb; 3363 uint64_t pixel_rate;
3364
3365 /* Shouldn't reach here on disabled planes... */
3366 if (WARN_ON(!pstate->visible))
3367 return 0;
3198 3368
3199 /* 3369 /*
3200 * If ddb allocation of pipes changed, it may require recalculation of 3370 * Adjusted plane pixel rate is just the pipe's adjusted pixel rate
3201 * watermarks 3371 * with additional adjustments for plane-specific scaling.
3202 */ 3372 */
3203 if (memcmp(new_ddb->pipe, cur_ddb->pipe, sizeof(new_ddb->pipe))) 3373 adjusted_pixel_rate = skl_pipe_pixel_rate(cstate);
3204 return true; 3374 downscale_amount = skl_plane_downscale_amount(pstate);
3375
3376 pixel_rate = adjusted_pixel_rate * downscale_amount >> 16;
3377 WARN_ON(pixel_rate != clamp_t(uint32_t, pixel_rate, 0, ~0));
3205 3378
3206 return false; 3379 return pixel_rate;
3207} 3380}
3208 3381
3209static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3382static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3210 struct intel_crtc_state *cstate, 3383 struct intel_crtc_state *cstate,
3211 struct intel_plane *intel_plane, 3384 struct intel_plane_state *intel_pstate,
3212 uint16_t ddb_allocation, 3385 uint16_t ddb_allocation,
3213 int level, 3386 int level,
3214 uint16_t *out_blocks, /* out */ 3387 uint16_t *out_blocks, /* out */
3215 uint8_t *out_lines /* out */) 3388 uint8_t *out_lines, /* out */
3389 bool *enabled /* out */)
3216{ 3390{
3217 struct drm_plane *plane = &intel_plane->base; 3391 struct drm_plane_state *pstate = &intel_pstate->base;
3218 struct drm_framebuffer *fb = plane->state->fb; 3392 struct drm_framebuffer *fb = pstate->fb;
3219 struct intel_plane_state *intel_pstate =
3220 to_intel_plane_state(plane->state);
3221 uint32_t latency = dev_priv->wm.skl_latency[level]; 3393 uint32_t latency = dev_priv->wm.skl_latency[level];
3222 uint32_t method1, method2; 3394 uint32_t method1, method2;
3223 uint32_t plane_bytes_per_line, plane_blocks_per_line; 3395 uint32_t plane_bytes_per_line, plane_blocks_per_line;
@@ -3225,20 +3397,24 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3225 uint32_t selected_result; 3397 uint32_t selected_result;
3226 uint8_t cpp; 3398 uint8_t cpp;
3227 uint32_t width = 0, height = 0; 3399 uint32_t width = 0, height = 0;
3400 uint32_t plane_pixel_rate;
3228 3401
3229 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) 3402 if (latency == 0 || !cstate->base.active || !intel_pstate->visible) {
3230 return false; 3403 *enabled = false;
3404 return 0;
3405 }
3231 3406
3232 width = drm_rect_width(&intel_pstate->src) >> 16; 3407 width = drm_rect_width(&intel_pstate->src) >> 16;
3233 height = drm_rect_height(&intel_pstate->src) >> 16; 3408 height = drm_rect_height(&intel_pstate->src) >> 16;
3234 3409
3235 if (intel_rotation_90_or_270(plane->state->rotation)) 3410 if (intel_rotation_90_or_270(pstate->rotation))
3236 swap(width, height); 3411 swap(width, height);
3237 3412
3238 cpp = drm_format_plane_cpp(fb->pixel_format, 0); 3413 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3239 method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3414 plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);
3240 cpp, latency); 3415
3241 method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3416 method1 = skl_wm_method1(plane_pixel_rate, cpp, latency);
3417 method2 = skl_wm_method2(plane_pixel_rate,
3242 cstate->base.adjusted_mode.crtc_htotal, 3418 cstate->base.adjusted_mode.crtc_htotal,
3243 width, 3419 width,
3244 cpp, 3420 cpp,
@@ -3252,7 +3428,7 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3252 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3428 fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) {
3253 uint32_t min_scanlines = 4; 3429 uint32_t min_scanlines = 4;
3254 uint32_t y_tile_minimum; 3430 uint32_t y_tile_minimum;
3255 if (intel_rotation_90_or_270(plane->state->rotation)) { 3431 if (intel_rotation_90_or_270(pstate->rotation)) {
3256 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3432 int cpp = (fb->pixel_format == DRM_FORMAT_NV12) ?
3257 drm_format_plane_cpp(fb->pixel_format, 1) : 3433 drm_format_plane_cpp(fb->pixel_format, 1) :
3258 drm_format_plane_cpp(fb->pixel_format, 0); 3434 drm_format_plane_cpp(fb->pixel_format, 0);
@@ -3288,40 +3464,100 @@ static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
3288 res_blocks++; 3464 res_blocks++;
3289 } 3465 }
3290 3466
3291 if (res_blocks >= ddb_allocation || res_lines > 31) 3467 if (res_blocks >= ddb_allocation || res_lines > 31) {
3292 return false; 3468 *enabled = false;
3469
3470 /*
3471 * If there are no valid level 0 watermarks, then we can't
3472 * support this display configuration.
3473 */
3474 if (level) {
3475 return 0;
3476 } else {
3477 DRM_DEBUG_KMS("Requested display configuration exceeds system watermark limitations\n");
3478 DRM_DEBUG_KMS("Plane %d.%d: blocks required = %u/%u, lines required = %u/31\n",
3479 to_intel_crtc(cstate->base.crtc)->pipe,
3480 skl_wm_plane_id(to_intel_plane(pstate->plane)),
3481 res_blocks, ddb_allocation, res_lines);
3482
3483 return -EINVAL;
3484 }
3485 }
3293 3486
3294 *out_blocks = res_blocks; 3487 *out_blocks = res_blocks;
3295 *out_lines = res_lines; 3488 *out_lines = res_lines;
3489 *enabled = true;
3296 3490
3297 return true; 3491 return 0;
3298} 3492}
3299 3493
3300static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3494static int
3301 struct skl_ddb_allocation *ddb, 3495skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3302 struct intel_crtc_state *cstate, 3496 struct skl_ddb_allocation *ddb,
3303 int level, 3497 struct intel_crtc_state *cstate,
3304 struct skl_wm_level *result) 3498 int level,
3499 struct skl_wm_level *result)
3305{ 3500{
3306 struct drm_device *dev = dev_priv->dev; 3501 struct drm_atomic_state *state = cstate->base.state;
3307 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3502 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3503 struct drm_plane *plane;
3308 struct intel_plane *intel_plane; 3504 struct intel_plane *intel_plane;
3505 struct intel_plane_state *intel_pstate;
3309 uint16_t ddb_blocks; 3506 uint16_t ddb_blocks;
3310 enum pipe pipe = intel_crtc->pipe; 3507 enum pipe pipe = intel_crtc->pipe;
3508 int ret;
3311 3509
3312 for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3510 /*
3511 * We'll only calculate watermarks for planes that are actually
3512 * enabled, so make sure all other planes are set as disabled.
3513 */
3514 memset(result, 0, sizeof(*result));
3515
3516 for_each_intel_plane_mask(&dev_priv->drm,
3517 intel_plane,
3518 cstate->base.plane_mask) {
3313 int i = skl_wm_plane_id(intel_plane); 3519 int i = skl_wm_plane_id(intel_plane);
3314 3520
3521 plane = &intel_plane->base;
3522 intel_pstate = NULL;
3523 if (state)
3524 intel_pstate =
3525 intel_atomic_get_existing_plane_state(state,
3526 intel_plane);
3527
3528 /*
3529 * Note: If we start supporting multiple pending atomic commits
3530 * against the same planes/CRTC's in the future, plane->state
3531 * will no longer be the correct pre-state to use for the
3532 * calculations here and we'll need to change where we get the
3533 * 'unchanged' plane data from.
3534 *
3535 * For now this is fine because we only allow one queued commit
3536 * against a CRTC. Even if the plane isn't modified by this
3537 * transaction and we don't have a plane lock, we still have
3538 * the CRTC's lock, so we know that no other transactions are
3539 * racing with us to update it.
3540 */
3541 if (!intel_pstate)
3542 intel_pstate = to_intel_plane_state(plane->state);
3543
3544 WARN_ON(!intel_pstate->base.fb);
3545
3315 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3546 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3316 3547
3317 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3548 ret = skl_compute_plane_wm(dev_priv,
3318 cstate, 3549 cstate,
3319 intel_plane, 3550 intel_pstate,
3320 ddb_blocks, 3551 ddb_blocks,
3321 level, 3552 level,
3322 &result->plane_res_b[i], 3553 &result->plane_res_b[i],
3323 &result->plane_res_l[i]); 3554 &result->plane_res_l[i],
3555 &result->plane_en[i]);
3556 if (ret)
3557 return ret;
3324 } 3558 }
3559
3560 return 0;
3325} 3561}
3326 3562
3327static uint32_t 3563static uint32_t
@@ -3355,21 +3591,26 @@ static void skl_compute_transition_wm(struct intel_crtc_state *cstate,
3355 } 3591 }
3356} 3592}
3357 3593
3358static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, 3594static int skl_build_pipe_wm(struct intel_crtc_state *cstate,
3359 struct skl_ddb_allocation *ddb, 3595 struct skl_ddb_allocation *ddb,
3360 struct skl_pipe_wm *pipe_wm) 3596 struct skl_pipe_wm *pipe_wm)
3361{ 3597{
3362 struct drm_device *dev = cstate->base.crtc->dev; 3598 struct drm_device *dev = cstate->base.crtc->dev;
3363 const struct drm_i915_private *dev_priv = dev->dev_private; 3599 const struct drm_i915_private *dev_priv = to_i915(dev);
3364 int level, max_level = ilk_wm_max_level(dev); 3600 int level, max_level = ilk_wm_max_level(dev);
3601 int ret;
3365 3602
3366 for (level = 0; level <= max_level; level++) { 3603 for (level = 0; level <= max_level; level++) {
3367 skl_compute_wm_level(dev_priv, ddb, cstate, 3604 ret = skl_compute_wm_level(dev_priv, ddb, cstate,
3368 level, &pipe_wm->wm[level]); 3605 level, &pipe_wm->wm[level]);
3606 if (ret)
3607 return ret;
3369 } 3608 }
3370 pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3609 pipe_wm->linetime = skl_compute_linetime_wm(cstate);
3371 3610
3372 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3611 skl_compute_transition_wm(cstate, &pipe_wm->trans_wm);
3612
3613 return 0;
3373} 3614}
3374 3615
3375static void skl_compute_wm_results(struct drm_device *dev, 3616static void skl_compute_wm_results(struct drm_device *dev,
@@ -3442,14 +3683,16 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
3442static void skl_write_wm_values(struct drm_i915_private *dev_priv, 3683static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3443 const struct skl_wm_values *new) 3684 const struct skl_wm_values *new)
3444{ 3685{
3445 struct drm_device *dev = dev_priv->dev; 3686 struct drm_device *dev = &dev_priv->drm;
3446 struct intel_crtc *crtc; 3687 struct intel_crtc *crtc;
3447 3688
3448 for_each_intel_crtc(dev, crtc) { 3689 for_each_intel_crtc(dev, crtc) {
3449 int i, level, max_level = ilk_wm_max_level(dev); 3690 int i, level, max_level = ilk_wm_max_level(dev);
3450 enum pipe pipe = crtc->pipe; 3691 enum pipe pipe = crtc->pipe;
3451 3692
3452 if (!new->dirty[pipe]) 3693 if ((new->dirty_pipes & drm_crtc_mask(&crtc->base)) == 0)
3694 continue;
3695 if (!crtc->active)
3453 continue; 3696 continue;
3454 3697
3455 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]); 3698 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
@@ -3537,7 +3780,7 @@ skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3537static void skl_flush_wm_values(struct drm_i915_private *dev_priv, 3780static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3538 struct skl_wm_values *new_values) 3781 struct skl_wm_values *new_values)
3539{ 3782{
3540 struct drm_device *dev = dev_priv->dev; 3783 struct drm_device *dev = &dev_priv->drm;
3541 struct skl_ddb_allocation *cur_ddb, *new_ddb; 3784 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3542 bool reallocated[I915_MAX_PIPES] = {}; 3785 bool reallocated[I915_MAX_PIPES] = {};
3543 struct intel_crtc *crtc; 3786 struct intel_crtc *crtc;
@@ -3616,116 +3859,182 @@ static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3616 } 3859 }
3617} 3860}
3618 3861
3619static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3862static int skl_update_pipe_wm(struct drm_crtc_state *cstate,
3620 struct skl_ddb_allocation *ddb, /* out */ 3863 struct skl_ddb_allocation *ddb, /* out */
3621 struct skl_pipe_wm *pipe_wm /* out */) 3864 struct skl_pipe_wm *pipe_wm, /* out */
3865 bool *changed /* out */)
3622{ 3866{
3623 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3867 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->crtc);
3624 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3868 struct intel_crtc_state *intel_cstate = to_intel_crtc_state(cstate);
3869 int ret;
3625 3870
3626 skl_allocate_pipe_ddb(cstate, ddb); 3871 ret = skl_build_pipe_wm(intel_cstate, ddb, pipe_wm);
3627 skl_compute_pipe_wm(cstate, ddb, pipe_wm); 3872 if (ret)
3873 return ret;
3628 3874
3629 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm))) 3875 if (!memcmp(&intel_crtc->wm.active.skl, pipe_wm, sizeof(*pipe_wm)))
3630 return false; 3876 *changed = false;
3877 else
3878 *changed = true;
3631 3879
3632 intel_crtc->wm.active.skl = *pipe_wm; 3880 return 0;
3881}
3633 3882
3634 return true; 3883static uint32_t
3884pipes_modified(struct drm_atomic_state *state)
3885{
3886 struct drm_crtc *crtc;
3887 struct drm_crtc_state *cstate;
3888 uint32_t i, ret = 0;
3889
3890 for_each_crtc_in_state(state, crtc, cstate, i)
3891 ret |= drm_crtc_mask(crtc);
3892
3893 return ret;
3635} 3894}
3636 3895
3637static void skl_update_other_pipe_wm(struct drm_device *dev, 3896static int
3638 struct drm_crtc *crtc, 3897skl_compute_ddb(struct drm_atomic_state *state)
3639 struct skl_wm_values *r)
3640{ 3898{
3899 struct drm_device *dev = state->dev;
3900 struct drm_i915_private *dev_priv = to_i915(dev);
3901 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3641 struct intel_crtc *intel_crtc; 3902 struct intel_crtc *intel_crtc;
3642 struct intel_crtc *this_crtc = to_intel_crtc(crtc); 3903 struct skl_ddb_allocation *ddb = &intel_state->wm_results.ddb;
3904 uint32_t realloc_pipes = pipes_modified(state);
3905 int ret;
3643 3906
3644 /* 3907 /*
3645 * If the WM update hasn't changed the allocation for this_crtc (the 3908 * If this is our first atomic update following hardware readout,
3646 * crtc we are currently computing the new WM values for), other 3909 * we can't trust the DDB that the BIOS programmed for us. Let's
3647 * enabled crtcs will keep the same allocation and we don't need to 3910 * pretend that all pipes switched active status so that we'll
3648 * recompute anything for them. 3911 * ensure a full DDB recompute.
3649 */ 3912 */
3650 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc)) 3913 if (dev_priv->wm.distrust_bios_wm)
3651 return; 3914 intel_state->active_pipe_changes = ~0;
3652 3915
3653 /* 3916 /*
3654 * Otherwise, because of this_crtc being freshly enabled/disabled, the 3917 * If the modeset changes which CRTC's are active, we need to
3655 * other active pipes need new DDB allocation and WM values. 3918 * recompute the DDB allocation for *all* active pipes, even
3919 * those that weren't otherwise being modified in any way by this
3920 * atomic commit. Due to the shrinking of the per-pipe allocations
3921 * when new active CRTC's are added, it's possible for a pipe that
3922 * we were already using and aren't changing at all here to suddenly
3923 * become invalid if its DDB needs exceeds its new allocation.
3924 *
3925 * Note that if we wind up doing a full DDB recompute, we can't let
3926 * any other display updates race with this transaction, so we need
3927 * to grab the lock on *all* CRTC's.
3656 */ 3928 */
3657 for_each_intel_crtc(dev, intel_crtc) { 3929 if (intel_state->active_pipe_changes) {
3658 struct skl_pipe_wm pipe_wm = {}; 3930 realloc_pipes = ~0;
3659 bool wm_changed; 3931 intel_state->wm_results.dirty_pipes = ~0;
3660 3932 }
3661 if (this_crtc->pipe == intel_crtc->pipe)
3662 continue;
3663 3933
3664 if (!intel_crtc->active) 3934 for_each_intel_crtc_mask(dev, intel_crtc, realloc_pipes) {
3665 continue; 3935 struct intel_crtc_state *cstate;
3666 3936
3667 wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3937 cstate = intel_atomic_get_crtc_state(state, intel_crtc);
3668 &r->ddb, &pipe_wm); 3938 if (IS_ERR(cstate))
3939 return PTR_ERR(cstate);
3669 3940
3670 /* 3941 ret = skl_allocate_pipe_ddb(cstate, ddb);
3671 * If we end up re-computing the other pipe WM values, it's 3942 if (ret)
3672 * because it was really needed, so we expect the WM values to 3943 return ret;
3673 * be different.
3674 */
3675 WARN_ON(!wm_changed);
3676
3677 skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc);
3678 r->dirty[intel_crtc->pipe] = true;
3679 } 3944 }
3945
3946 return 0;
3680} 3947}
3681 3948
3682static void skl_clear_wm(struct skl_wm_values *watermarks, enum pipe pipe) 3949static int
3950skl_compute_wm(struct drm_atomic_state *state)
3683{ 3951{
3684 watermarks->wm_linetime[pipe] = 0; 3952 struct drm_crtc *crtc;
3685 memset(watermarks->plane[pipe], 0, 3953 struct drm_crtc_state *cstate;
3686 sizeof(uint32_t) * 8 * I915_MAX_PLANES); 3954 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
3687 memset(watermarks->plane_trans[pipe], 3955 struct skl_wm_values *results = &intel_state->wm_results;
3688 0, sizeof(uint32_t) * I915_MAX_PLANES); 3956 struct skl_pipe_wm *pipe_wm;
3689 watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; 3957 bool changed = false;
3958 int ret, i;
3959
3960 /*
3961 * If this transaction isn't actually touching any CRTC's, don't
3962 * bother with watermark calculation. Note that if we pass this
3963 * test, we're guaranteed to hold at least one CRTC state mutex,
3964 * which means we can safely use values like dev_priv->active_crtcs
3965 * since any racing commits that want to update them would need to
3966 * hold _all_ CRTC state mutexes.
3967 */
3968 for_each_crtc_in_state(state, crtc, cstate, i)
3969 changed = true;
3970 if (!changed)
3971 return 0;
3972
3973 /* Clear all dirty flags */
3974 results->dirty_pipes = 0;
3975
3976 ret = skl_compute_ddb(state);
3977 if (ret)
3978 return ret;
3979
3980 /*
3981 * Calculate WM's for all pipes that are part of this transaction.
3982 * Note that the DDB allocation above may have added more CRTC's that
3983 * weren't otherwise being modified (and set bits in dirty_pipes) if
3984 * pipe allocations had to change.
3985 *
3986 * FIXME: Now that we're doing this in the atomic check phase, we
3987 * should allow skl_update_pipe_wm() to return failure in cases where
3988 * no suitable watermark values can be found.
3989 */
3990 for_each_crtc_in_state(state, crtc, cstate, i) {
3991 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3992 struct intel_crtc_state *intel_cstate =
3993 to_intel_crtc_state(cstate);
3994
3995 pipe_wm = &intel_cstate->wm.skl.optimal;
3996 ret = skl_update_pipe_wm(cstate, &results->ddb, pipe_wm,
3997 &changed);
3998 if (ret)
3999 return ret;
3690 4000
3691 /* Clear ddb entries for pipe */ 4001 if (changed)
3692 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); 4002 results->dirty_pipes |= drm_crtc_mask(crtc);
3693 memset(&watermarks->ddb.plane[pipe], 0,
3694 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3695 memset(&watermarks->ddb.y_plane[pipe], 0,
3696 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES);
3697 memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0,
3698 sizeof(struct skl_ddb_entry));
3699 4003
4004 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4005 /* This pipe's WM's did not change */
4006 continue;
4007
4008 intel_cstate->update_wm_pre = true;
4009 skl_compute_wm_results(crtc->dev, pipe_wm, results, intel_crtc);
4010 }
4011
4012 return 0;
3700} 4013}
3701 4014
3702static void skl_update_wm(struct drm_crtc *crtc) 4015static void skl_update_wm(struct drm_crtc *crtc)
3703{ 4016{
3704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4017 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3705 struct drm_device *dev = crtc->dev; 4018 struct drm_device *dev = crtc->dev;
3706 struct drm_i915_private *dev_priv = dev->dev_private; 4019 struct drm_i915_private *dev_priv = to_i915(dev);
3707 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4020 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3708 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4021 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3709 struct skl_pipe_wm *pipe_wm = &cstate->wm.optimal.skl; 4022 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
3710 4023
3711 4024 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
3712 /* Clear all dirty flags */
3713 memset(results->dirty, 0, sizeof(bool) * I915_MAX_PIPES);
3714
3715 skl_clear_wm(results, intel_crtc->pipe);
3716
3717 if (!skl_update_pipe_wm(crtc, &results->ddb, pipe_wm))
3718 return; 4025 return;
3719 4026
3720 skl_compute_wm_results(dev, pipe_wm, results, intel_crtc); 4027 intel_crtc->wm.active.skl = *pipe_wm;
3721 results->dirty[intel_crtc->pipe] = true; 4028
4029 mutex_lock(&dev_priv->wm.wm_mutex);
3722 4030
3723 skl_update_other_pipe_wm(dev, crtc, results);
3724 skl_write_wm_values(dev_priv, results); 4031 skl_write_wm_values(dev_priv, results);
3725 skl_flush_wm_values(dev_priv, results); 4032 skl_flush_wm_values(dev_priv, results);
3726 4033
3727 /* store the new configuration */ 4034 /* store the new configuration */
3728 dev_priv->wm.skl_hw = *results; 4035 dev_priv->wm.skl_hw = *results;
4036
4037 mutex_unlock(&dev_priv->wm.wm_mutex);
3729} 4038}
3730 4039
3731static void ilk_compute_wm_config(struct drm_device *dev, 4040static void ilk_compute_wm_config(struct drm_device *dev,
@@ -3748,7 +4057,7 @@ static void ilk_compute_wm_config(struct drm_device *dev,
3748 4057
3749static void ilk_program_watermarks(struct drm_i915_private *dev_priv) 4058static void ilk_program_watermarks(struct drm_i915_private *dev_priv)
3750{ 4059{
3751 struct drm_device *dev = dev_priv->dev; 4060 struct drm_device *dev = &dev_priv->drm;
3752 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 4061 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm;
3753 struct ilk_wm_maximums max; 4062 struct ilk_wm_maximums max;
3754 struct intel_wm_config config = {}; 4063 struct intel_wm_config config = {};
@@ -3785,7 +4094,7 @@ static void ilk_initial_watermarks(struct intel_crtc_state *cstate)
3785 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 4094 struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc);
3786 4095
3787 mutex_lock(&dev_priv->wm.wm_mutex); 4096 mutex_lock(&dev_priv->wm.wm_mutex);
3788 intel_crtc->wm.active.ilk = cstate->wm.intermediate; 4097 intel_crtc->wm.active.ilk = cstate->wm.ilk.intermediate;
3789 ilk_program_watermarks(dev_priv); 4098 ilk_program_watermarks(dev_priv);
3790 mutex_unlock(&dev_priv->wm.wm_mutex); 4099 mutex_unlock(&dev_priv->wm.wm_mutex);
3791} 4100}
@@ -3797,7 +4106,7 @@ static void ilk_optimize_watermarks(struct intel_crtc_state *cstate)
3797 4106
3798 mutex_lock(&dev_priv->wm.wm_mutex); 4107 mutex_lock(&dev_priv->wm.wm_mutex);
3799 if (cstate->wm.need_postvbl_update) { 4108 if (cstate->wm.need_postvbl_update) {
3800 intel_crtc->wm.active.ilk = cstate->wm.optimal.ilk; 4109 intel_crtc->wm.active.ilk = cstate->wm.ilk.optimal;
3801 ilk_program_watermarks(dev_priv); 4110 ilk_program_watermarks(dev_priv);
3802 } 4111 }
3803 mutex_unlock(&dev_priv->wm.wm_mutex); 4112 mutex_unlock(&dev_priv->wm.wm_mutex);
@@ -3850,11 +4159,11 @@ static void skl_pipe_wm_active_state(uint32_t val,
3850static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4159static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3851{ 4160{
3852 struct drm_device *dev = crtc->dev; 4161 struct drm_device *dev = crtc->dev;
3853 struct drm_i915_private *dev_priv = dev->dev_private; 4162 struct drm_i915_private *dev_priv = to_i915(dev);
3854 struct skl_wm_values *hw = &dev_priv->wm.skl_hw; 4163 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3855 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4164 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3856 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4165 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3857 struct skl_pipe_wm *active = &cstate->wm.optimal.skl; 4166 struct skl_pipe_wm *active = &cstate->wm.skl.optimal;
3858 enum pipe pipe = intel_crtc->pipe; 4167 enum pipe pipe = intel_crtc->pipe;
3859 int level, i, max_level; 4168 int level, i, max_level;
3860 uint32_t temp; 4169 uint32_t temp;
@@ -3877,7 +4186,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3877 if (!intel_crtc->active) 4186 if (!intel_crtc->active)
3878 return; 4187 return;
3879 4188
3880 hw->dirty[pipe] = true; 4189 hw->dirty_pipes |= drm_crtc_mask(crtc);
3881 4190
3882 active->linetime = hw->wm_linetime[pipe]; 4191 active->linetime = hw->wm_linetime[pipe];
3883 4192
@@ -3904,23 +4213,31 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3904 4213
3905void skl_wm_get_hw_state(struct drm_device *dev) 4214void skl_wm_get_hw_state(struct drm_device *dev)
3906{ 4215{
3907 struct drm_i915_private *dev_priv = dev->dev_private; 4216 struct drm_i915_private *dev_priv = to_i915(dev);
3908 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb; 4217 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3909 struct drm_crtc *crtc; 4218 struct drm_crtc *crtc;
3910 4219
3911 skl_ddb_get_hw_state(dev_priv, ddb); 4220 skl_ddb_get_hw_state(dev_priv, ddb);
3912 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 4221 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3913 skl_pipe_wm_get_hw_state(crtc); 4222 skl_pipe_wm_get_hw_state(crtc);
4223
4224 if (dev_priv->active_crtcs) {
4225 /* Fully recompute DDB on first atomic commit */
4226 dev_priv->wm.distrust_bios_wm = true;
4227 } else {
4228 /* Easy/common case; just sanitize DDB now if everything off */
4229 memset(ddb, 0, sizeof(*ddb));
4230 }
3914} 4231}
3915 4232
3916static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 4233static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3917{ 4234{
3918 struct drm_device *dev = crtc->dev; 4235 struct drm_device *dev = crtc->dev;
3919 struct drm_i915_private *dev_priv = dev->dev_private; 4236 struct drm_i915_private *dev_priv = to_i915(dev);
3920 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4237 struct ilk_wm_values *hw = &dev_priv->wm.hw;
3921 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4238 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3922 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4239 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
3923 struct intel_pipe_wm *active = &cstate->wm.optimal.ilk; 4240 struct intel_pipe_wm *active = &cstate->wm.ilk.optimal;
3924 enum pipe pipe = intel_crtc->pipe; 4241 enum pipe pipe = intel_crtc->pipe;
3925 static const i915_reg_t wm0_pipe_reg[] = { 4242 static const i915_reg_t wm0_pipe_reg[] = {
3926 [PIPE_A] = WM0_PIPEA_ILK, 4243 [PIPE_A] = WM0_PIPEA_ILK,
@@ -4120,7 +4437,7 @@ void vlv_wm_get_hw_state(struct drm_device *dev)
4120 4437
4121void ilk_wm_get_hw_state(struct drm_device *dev) 4438void ilk_wm_get_hw_state(struct drm_device *dev)
4122{ 4439{
4123 struct drm_i915_private *dev_priv = dev->dev_private; 4440 struct drm_i915_private *dev_priv = to_i915(dev);
4124 struct ilk_wm_values *hw = &dev_priv->wm.hw; 4441 struct ilk_wm_values *hw = &dev_priv->wm.hw;
4125 struct drm_crtc *crtc; 4442 struct drm_crtc *crtc;
4126 4443
@@ -4182,7 +4499,7 @@ void ilk_wm_get_hw_state(struct drm_device *dev)
4182 */ 4499 */
4183void intel_update_watermarks(struct drm_crtc *crtc) 4500void intel_update_watermarks(struct drm_crtc *crtc)
4184{ 4501{
4185 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 4502 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4186 4503
4187 if (dev_priv->display.update_wm) 4504 if (dev_priv->display.update_wm)
4188 dev_priv->display.update_wm(crtc); 4505 dev_priv->display.update_wm(crtc);
@@ -4197,9 +4514,8 @@ DEFINE_SPINLOCK(mchdev_lock);
4197 * mchdev_lock. */ 4514 * mchdev_lock. */
4198static struct drm_i915_private *i915_mch_dev; 4515static struct drm_i915_private *i915_mch_dev;
4199 4516
4200bool ironlake_set_drps(struct drm_device *dev, u8 val) 4517bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val)
4201{ 4518{
4202 struct drm_i915_private *dev_priv = dev->dev_private;
4203 u16 rgvswctl; 4519 u16 rgvswctl;
4204 4520
4205 assert_spin_locked(&mchdev_lock); 4521 assert_spin_locked(&mchdev_lock);
@@ -4221,9 +4537,8 @@ bool ironlake_set_drps(struct drm_device *dev, u8 val)
4221 return true; 4537 return true;
4222} 4538}
4223 4539
4224static void ironlake_enable_drps(struct drm_device *dev) 4540static void ironlake_enable_drps(struct drm_i915_private *dev_priv)
4225{ 4541{
4226 struct drm_i915_private *dev_priv = dev->dev_private;
4227 u32 rgvmodectl; 4542 u32 rgvmodectl;
4228 u8 fmax, fmin, fstart, vstart; 4543 u8 fmax, fmin, fstart, vstart;
4229 4544
@@ -4280,7 +4595,7 @@ static void ironlake_enable_drps(struct drm_device *dev)
4280 DRM_ERROR("stuck trying to change perf mode\n"); 4595 DRM_ERROR("stuck trying to change perf mode\n");
4281 mdelay(1); 4596 mdelay(1);
4282 4597
4283 ironlake_set_drps(dev, fstart); 4598 ironlake_set_drps(dev_priv, fstart);
4284 4599
4285 dev_priv->ips.last_count1 = I915_READ(DMIEC) + 4600 dev_priv->ips.last_count1 = I915_READ(DMIEC) +
4286 I915_READ(DDREC) + I915_READ(CSIEC); 4601 I915_READ(DDREC) + I915_READ(CSIEC);
@@ -4291,9 +4606,8 @@ static void ironlake_enable_drps(struct drm_device *dev)
4291 spin_unlock_irq(&mchdev_lock); 4606 spin_unlock_irq(&mchdev_lock);
4292} 4607}
4293 4608
4294static void ironlake_disable_drps(struct drm_device *dev) 4609static void ironlake_disable_drps(struct drm_i915_private *dev_priv)
4295{ 4610{
4296 struct drm_i915_private *dev_priv = dev->dev_private;
4297 u16 rgvswctl; 4611 u16 rgvswctl;
4298 4612
4299 spin_lock_irq(&mchdev_lock); 4613 spin_lock_irq(&mchdev_lock);
@@ -4308,7 +4622,7 @@ static void ironlake_disable_drps(struct drm_device *dev)
4308 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); 4622 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
4309 4623
4310 /* Go back to the starting frequency */ 4624 /* Go back to the starting frequency */
4311 ironlake_set_drps(dev, dev_priv->ips.fstart); 4625 ironlake_set_drps(dev_priv, dev_priv->ips.fstart);
4312 mdelay(1); 4626 mdelay(1);
4313 rgvswctl |= MEMCTL_CMD_STS; 4627 rgvswctl |= MEMCTL_CMD_STS;
4314 I915_WRITE(MEMSWCTL, rgvswctl); 4628 I915_WRITE(MEMSWCTL, rgvswctl);
@@ -4354,19 +4668,23 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4354 new_power = dev_priv->rps.power; 4668 new_power = dev_priv->rps.power;
4355 switch (dev_priv->rps.power) { 4669 switch (dev_priv->rps.power) {
4356 case LOW_POWER: 4670 case LOW_POWER:
4357 if (val > dev_priv->rps.efficient_freq + 1 && val > dev_priv->rps.cur_freq) 4671 if (val > dev_priv->rps.efficient_freq + 1 &&
4672 val > dev_priv->rps.cur_freq)
4358 new_power = BETWEEN; 4673 new_power = BETWEEN;
4359 break; 4674 break;
4360 4675
4361 case BETWEEN: 4676 case BETWEEN:
4362 if (val <= dev_priv->rps.efficient_freq && val < dev_priv->rps.cur_freq) 4677 if (val <= dev_priv->rps.efficient_freq &&
4678 val < dev_priv->rps.cur_freq)
4363 new_power = LOW_POWER; 4679 new_power = LOW_POWER;
4364 else if (val >= dev_priv->rps.rp0_freq && val > dev_priv->rps.cur_freq) 4680 else if (val >= dev_priv->rps.rp0_freq &&
4681 val > dev_priv->rps.cur_freq)
4365 new_power = HIGH_POWER; 4682 new_power = HIGH_POWER;
4366 break; 4683 break;
4367 4684
4368 case HIGH_POWER: 4685 case HIGH_POWER:
4369 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 && val < dev_priv->rps.cur_freq) 4686 if (val < (dev_priv->rps.rp1_freq + dev_priv->rps.rp0_freq) >> 1 &&
4687 val < dev_priv->rps.cur_freq)
4370 new_power = BETWEEN; 4688 new_power = BETWEEN;
4371 break; 4689 break;
4372 } 4690 }
@@ -4412,22 +4730,24 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val)
4412 } 4730 }
4413 4731
4414 I915_WRITE(GEN6_RP_UP_EI, 4732 I915_WRITE(GEN6_RP_UP_EI,
4415 GT_INTERVAL_FROM_US(dev_priv, ei_up)); 4733 GT_INTERVAL_FROM_US(dev_priv, ei_up));
4416 I915_WRITE(GEN6_RP_UP_THRESHOLD, 4734 I915_WRITE(GEN6_RP_UP_THRESHOLD,
4417 GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); 4735 GT_INTERVAL_FROM_US(dev_priv,
4736 ei_up * threshold_up / 100));
4418 4737
4419 I915_WRITE(GEN6_RP_DOWN_EI, 4738 I915_WRITE(GEN6_RP_DOWN_EI,
4420 GT_INTERVAL_FROM_US(dev_priv, ei_down)); 4739 GT_INTERVAL_FROM_US(dev_priv, ei_down));
4421 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 4740 I915_WRITE(GEN6_RP_DOWN_THRESHOLD,
4422 GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); 4741 GT_INTERVAL_FROM_US(dev_priv,
4742 ei_down * threshold_down / 100));
4423 4743
4424 I915_WRITE(GEN6_RP_CONTROL, 4744 I915_WRITE(GEN6_RP_CONTROL,
4425 GEN6_RP_MEDIA_TURBO | 4745 GEN6_RP_MEDIA_TURBO |
4426 GEN6_RP_MEDIA_HW_NORMAL_MODE | 4746 GEN6_RP_MEDIA_HW_NORMAL_MODE |
4427 GEN6_RP_MEDIA_IS_GFX | 4747 GEN6_RP_MEDIA_IS_GFX |
4428 GEN6_RP_ENABLE | 4748 GEN6_RP_ENABLE |
4429 GEN6_RP_UP_BUSY_AVG | 4749 GEN6_RP_UP_BUSY_AVG |
4430 GEN6_RP_DOWN_IDLE_AVG); 4750 GEN6_RP_DOWN_IDLE_AVG);
4431 4751
4432 dev_priv->rps.power = new_power; 4752 dev_priv->rps.power = new_power;
4433 dev_priv->rps.up_threshold = threshold_up; 4753 dev_priv->rps.up_threshold = threshold_up;
@@ -4452,12 +4772,10 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
4452/* gen6_set_rps is called to update the frequency request, but should also be 4772/* gen6_set_rps is called to update the frequency request, but should also be
4453 * called when the range (min_delay and max_delay) is modified so that we can 4773 * called when the range (min_delay and max_delay) is modified so that we can
4454 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ 4774 * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */
4455static void gen6_set_rps(struct drm_device *dev, u8 val) 4775static void gen6_set_rps(struct drm_i915_private *dev_priv, u8 val)
4456{ 4776{
4457 struct drm_i915_private *dev_priv = dev->dev_private;
4458
4459 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 4777 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4460 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 4778 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
4461 return; 4779 return;
4462 4780
4463 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4781 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
@@ -4470,10 +4788,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4470 if (val != dev_priv->rps.cur_freq) { 4788 if (val != dev_priv->rps.cur_freq) {
4471 gen6_set_rps_thresholds(dev_priv, val); 4789 gen6_set_rps_thresholds(dev_priv, val);
4472 4790
4473 if (IS_GEN9(dev)) 4791 if (IS_GEN9(dev_priv))
4474 I915_WRITE(GEN6_RPNSWREQ, 4792 I915_WRITE(GEN6_RPNSWREQ,
4475 GEN9_FREQUENCY(val)); 4793 GEN9_FREQUENCY(val));
4476 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 4794 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4477 I915_WRITE(GEN6_RPNSWREQ, 4795 I915_WRITE(GEN6_RPNSWREQ,
4478 HSW_FREQUENCY(val)); 4796 HSW_FREQUENCY(val));
4479 else 4797 else
@@ -4495,15 +4813,13 @@ static void gen6_set_rps(struct drm_device *dev, u8 val)
4495 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); 4813 trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
4496} 4814}
4497 4815
4498static void valleyview_set_rps(struct drm_device *dev, u8 val) 4816static void valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val)
4499{ 4817{
4500 struct drm_i915_private *dev_priv = dev->dev_private;
4501
4502 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 4818 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
4503 WARN_ON(val > dev_priv->rps.max_freq); 4819 WARN_ON(val > dev_priv->rps.max_freq);
4504 WARN_ON(val < dev_priv->rps.min_freq); 4820 WARN_ON(val < dev_priv->rps.min_freq);
4505 4821
4506 if (WARN_ONCE(IS_CHERRYVIEW(dev) && (val & 1), 4822 if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1),
4507 "Odd GPU freq value\n")) 4823 "Odd GPU freq value\n"))
4508 val &= ~1; 4824 val &= ~1;
4509 4825
@@ -4536,7 +4852,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
4536 /* Wake up the media well, as that takes a lot less 4852 /* Wake up the media well, as that takes a lot less
4537 * power than the Render well. */ 4853 * power than the Render well. */
4538 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); 4854 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA);
4539 valleyview_set_rps(dev_priv->dev, val); 4855 valleyview_set_rps(dev_priv, val);
4540 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); 4856 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA);
4541} 4857}
4542 4858
@@ -4548,20 +4864,33 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv)
4548 gen6_rps_reset_ei(dev_priv); 4864 gen6_rps_reset_ei(dev_priv);
4549 I915_WRITE(GEN6_PMINTRMSK, 4865 I915_WRITE(GEN6_PMINTRMSK,
4550 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq)); 4866 gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
4867
4868 gen6_enable_rps_interrupts(dev_priv);
4869
4870 /* Ensure we start at the user's desired frequency */
4871 intel_set_rps(dev_priv,
4872 clamp(dev_priv->rps.cur_freq,
4873 dev_priv->rps.min_freq_softlimit,
4874 dev_priv->rps.max_freq_softlimit));
4551 } 4875 }
4552 mutex_unlock(&dev_priv->rps.hw_lock); 4876 mutex_unlock(&dev_priv->rps.hw_lock);
4553} 4877}
4554 4878
4555void gen6_rps_idle(struct drm_i915_private *dev_priv) 4879void gen6_rps_idle(struct drm_i915_private *dev_priv)
4556{ 4880{
4557 struct drm_device *dev = dev_priv->dev; 4881 /* Flush our bottom-half so that it does not race with us
4882 * setting the idle frequency and so that it is bounded by
4883 * our rpm wakeref. And then disable the interrupts to stop any
4884 * futher RPS reclocking whilst we are asleep.
4885 */
4886 gen6_disable_rps_interrupts(dev_priv);
4558 4887
4559 mutex_lock(&dev_priv->rps.hw_lock); 4888 mutex_lock(&dev_priv->rps.hw_lock);
4560 if (dev_priv->rps.enabled) { 4889 if (dev_priv->rps.enabled) {
4561 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4890 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4562 vlv_set_rps_idle(dev_priv); 4891 vlv_set_rps_idle(dev_priv);
4563 else 4892 else
4564 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 4893 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4565 dev_priv->rps.last_adj = 0; 4894 dev_priv->rps.last_adj = 0;
4566 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4895 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
4567 } 4896 }
@@ -4580,7 +4909,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4580 /* This is intentionally racy! We peek at the state here, then 4909 /* This is intentionally racy! We peek at the state here, then
4581 * validate inside the RPS worker. 4910 * validate inside the RPS worker.
4582 */ 4911 */
4583 if (!(dev_priv->mm.busy && 4912 if (!(dev_priv->gt.awake &&
4584 dev_priv->rps.enabled && 4913 dev_priv->rps.enabled &&
4585 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)) 4914 dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit))
4586 return; 4915 return;
@@ -4596,7 +4925,7 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4596 spin_lock_irq(&dev_priv->irq_lock); 4925 spin_lock_irq(&dev_priv->irq_lock);
4597 if (dev_priv->rps.interrupts_enabled) { 4926 if (dev_priv->rps.interrupts_enabled) {
4598 dev_priv->rps.client_boost = true; 4927 dev_priv->rps.client_boost = true;
4599 queue_work(dev_priv->wq, &dev_priv->rps.work); 4928 schedule_work(&dev_priv->rps.work);
4600 } 4929 }
4601 spin_unlock_irq(&dev_priv->irq_lock); 4930 spin_unlock_irq(&dev_priv->irq_lock);
4602 4931
@@ -4609,49 +4938,39 @@ void gen6_rps_boost(struct drm_i915_private *dev_priv,
4609 spin_unlock(&dev_priv->rps.client_lock); 4938 spin_unlock(&dev_priv->rps.client_lock);
4610} 4939}
4611 4940
4612void intel_set_rps(struct drm_device *dev, u8 val) 4941void intel_set_rps(struct drm_i915_private *dev_priv, u8 val)
4613{ 4942{
4614 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) 4943 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4615 valleyview_set_rps(dev, val); 4944 valleyview_set_rps(dev_priv, val);
4616 else 4945 else
4617 gen6_set_rps(dev, val); 4946 gen6_set_rps(dev_priv, val);
4618} 4947}
4619 4948
4620static void gen9_disable_rc6(struct drm_device *dev) 4949static void gen9_disable_rc6(struct drm_i915_private *dev_priv)
4621{ 4950{
4622 struct drm_i915_private *dev_priv = dev->dev_private;
4623
4624 I915_WRITE(GEN6_RC_CONTROL, 0); 4951 I915_WRITE(GEN6_RC_CONTROL, 0);
4625 I915_WRITE(GEN9_PG_ENABLE, 0); 4952 I915_WRITE(GEN9_PG_ENABLE, 0);
4626} 4953}
4627 4954
4628static void gen9_disable_rps(struct drm_device *dev) 4955static void gen9_disable_rps(struct drm_i915_private *dev_priv)
4629{ 4956{
4630 struct drm_i915_private *dev_priv = dev->dev_private;
4631
4632 I915_WRITE(GEN6_RP_CONTROL, 0); 4957 I915_WRITE(GEN6_RP_CONTROL, 0);
4633} 4958}
4634 4959
4635static void gen6_disable_rps(struct drm_device *dev) 4960static void gen6_disable_rps(struct drm_i915_private *dev_priv)
4636{ 4961{
4637 struct drm_i915_private *dev_priv = dev->dev_private;
4638
4639 I915_WRITE(GEN6_RC_CONTROL, 0); 4962 I915_WRITE(GEN6_RC_CONTROL, 0);
4640 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4963 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
4641 I915_WRITE(GEN6_RP_CONTROL, 0); 4964 I915_WRITE(GEN6_RP_CONTROL, 0);
4642} 4965}
4643 4966
4644static void cherryview_disable_rps(struct drm_device *dev) 4967static void cherryview_disable_rps(struct drm_i915_private *dev_priv)
4645{ 4968{
4646 struct drm_i915_private *dev_priv = dev->dev_private;
4647
4648 I915_WRITE(GEN6_RC_CONTROL, 0); 4969 I915_WRITE(GEN6_RC_CONTROL, 0);
4649} 4970}
4650 4971
4651static void valleyview_disable_rps(struct drm_device *dev) 4972static void valleyview_disable_rps(struct drm_i915_private *dev_priv)
4652{ 4973{
4653 struct drm_i915_private *dev_priv = dev->dev_private;
4654
4655 /* we're doing forcewake before Disabling RC6, 4974 /* we're doing forcewake before Disabling RC6,
4656 * This what the BIOS expects when going into suspend */ 4975 * This what the BIOS expects when going into suspend */
4657 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 4976 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4661,34 +4980,45 @@ static void valleyview_disable_rps(struct drm_device *dev)
4661 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 4980 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4662} 4981}
4663 4982
4664static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4983static void intel_print_rc6_info(struct drm_i915_private *dev_priv, u32 mode)
4665{ 4984{
4666 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 4985 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
4667 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1))) 4986 if (mode & (GEN7_RC_CTL_TO_MODE | GEN6_RC_CTL_EI_MODE(1)))
4668 mode = GEN6_RC_CTL_RC6_ENABLE; 4987 mode = GEN6_RC_CTL_RC6_ENABLE;
4669 else 4988 else
4670 mode = 0; 4989 mode = 0;
4671 } 4990 }
4672 if (HAS_RC6p(dev)) 4991 if (HAS_RC6p(dev_priv))
4673 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n", 4992 DRM_DEBUG_DRIVER("Enabling RC6 states: "
4674 onoff(mode & GEN6_RC_CTL_RC6_ENABLE), 4993 "RC6 %s RC6p %s RC6pp %s\n",
4675 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE), 4994 onoff(mode & GEN6_RC_CTL_RC6_ENABLE),
4676 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE)); 4995 onoff(mode & GEN6_RC_CTL_RC6p_ENABLE),
4996 onoff(mode & GEN6_RC_CTL_RC6pp_ENABLE));
4677 4997
4678 else 4998 else
4679 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n", 4999 DRM_DEBUG_DRIVER("Enabling RC6 states: RC6 %s\n",
4680 onoff(mode & GEN6_RC_CTL_RC6_ENABLE)); 5000 onoff(mode & GEN6_RC_CTL_RC6_ENABLE));
4681} 5001}
4682 5002
4683static bool bxt_check_bios_rc6_setup(const struct drm_device *dev) 5003static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv)
4684{ 5004{
4685 struct drm_i915_private *dev_priv = to_i915(dev);
4686 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5005 struct i915_ggtt *ggtt = &dev_priv->ggtt;
4687 bool enable_rc6 = true; 5006 bool enable_rc6 = true;
4688 unsigned long rc6_ctx_base; 5007 unsigned long rc6_ctx_base;
5008 u32 rc_ctl;
5009 int rc_sw_target;
5010
5011 rc_ctl = I915_READ(GEN6_RC_CONTROL);
5012 rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >>
5013 RC_SW_TARGET_STATE_SHIFT;
5014 DRM_DEBUG_DRIVER("BIOS enabled RC states: "
5015 "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n",
5016 onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE),
5017 onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE),
5018 rc_sw_target);
4689 5019
4690 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { 5020 if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) {
4691 DRM_DEBUG_KMS("RC6 Base location not set properly.\n"); 5021 DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n");
4692 enable_rc6 = false; 5022 enable_rc6 = false;
4693 } 5023 }
4694 5024
@@ -4700,7 +5030,7 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4700 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) && 5030 if (!((rc6_ctx_base >= ggtt->stolen_reserved_base) &&
4701 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base + 5031 (rc6_ctx_base + PAGE_SIZE <= ggtt->stolen_reserved_base +
4702 ggtt->stolen_reserved_size))) { 5032 ggtt->stolen_reserved_size))) {
4703 DRM_DEBUG_KMS("RC6 Base address not as expected.\n"); 5033 DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n");
4704 enable_rc6 = false; 5034 enable_rc6 = false;
4705 } 5035 }
4706 5036
@@ -4708,31 +5038,40 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
4708 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && 5038 ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) &&
4709 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && 5039 ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) &&
4710 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { 5040 ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) {
4711 DRM_DEBUG_KMS("Engine Idle wait time not set properly.\n"); 5041 DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n");
5042 enable_rc6 = false;
5043 }
5044
5045 if (!I915_READ(GEN8_PUSHBUS_CONTROL) ||
5046 !I915_READ(GEN8_PUSHBUS_ENABLE) ||
5047 !I915_READ(GEN8_PUSHBUS_SHIFT)) {
5048 DRM_DEBUG_DRIVER("Pushbus not setup properly.\n");
5049 enable_rc6 = false;
5050 }
5051
5052 if (!I915_READ(GEN6_GFXPAUSE)) {
5053 DRM_DEBUG_DRIVER("GFX pause not setup properly.\n");
4712 enable_rc6 = false; 5054 enable_rc6 = false;
4713 } 5055 }
4714 5056
4715 if (!(I915_READ(GEN6_RC_CONTROL) & (GEN6_RC_CTL_RC6_ENABLE | 5057 if (!I915_READ(GEN8_MISC_CTRL0)) {
4716 GEN6_RC_CTL_HW_ENABLE)) && 5058 DRM_DEBUG_DRIVER("GPM control not setup properly.\n");
4717 ((I915_READ(GEN6_RC_CONTROL) & GEN6_RC_CTL_HW_ENABLE) ||
4718 !(I915_READ(GEN6_RC_STATE) & RC6_STATE))) {
4719 DRM_DEBUG_KMS("HW/SW RC6 is not enabled by BIOS.\n");
4720 enable_rc6 = false; 5059 enable_rc6 = false;
4721 } 5060 }
4722 5061
4723 return enable_rc6; 5062 return enable_rc6;
4724} 5063}
4725 5064
4726int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 5065int sanitize_rc6_option(struct drm_i915_private *dev_priv, int enable_rc6)
4727{ 5066{
4728 /* No RC6 before Ironlake and code is gone for ilk. */ 5067 /* No RC6 before Ironlake and code is gone for ilk. */
4729 if (INTEL_INFO(dev)->gen < 6) 5068 if (INTEL_INFO(dev_priv)->gen < 6)
4730 return 0; 5069 return 0;
4731 5070
4732 if (!enable_rc6) 5071 if (!enable_rc6)
4733 return 0; 5072 return 0;
4734 5073
4735 if (IS_BROXTON(dev) && !bxt_check_bios_rc6_setup(dev)) { 5074 if (IS_BROXTON(dev_priv) && !bxt_check_bios_rc6_setup(dev_priv)) {
4736 DRM_INFO("RC6 disabled by BIOS\n"); 5075 DRM_INFO("RC6 disabled by BIOS\n");
4737 return 0; 5076 return 0;
4738 } 5077 }
@@ -4741,33 +5080,28 @@ int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
4741 if (enable_rc6 >= 0) { 5080 if (enable_rc6 >= 0) {
4742 int mask; 5081 int mask;
4743 5082
4744 if (HAS_RC6p(dev)) 5083 if (HAS_RC6p(dev_priv))
4745 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 5084 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
4746 INTEL_RC6pp_ENABLE; 5085 INTEL_RC6pp_ENABLE;
4747 else 5086 else
4748 mask = INTEL_RC6_ENABLE; 5087 mask = INTEL_RC6_ENABLE;
4749 5088
4750 if ((enable_rc6 & mask) != enable_rc6) 5089 if ((enable_rc6 & mask) != enable_rc6)
4751 DRM_DEBUG_KMS("Adjusting RC6 mask to %d (requested %d, valid %d)\n", 5090 DRM_DEBUG_DRIVER("Adjusting RC6 mask to %d "
4752 enable_rc6 & mask, enable_rc6, mask); 5091 "(requested %d, valid %d)\n",
5092 enable_rc6 & mask, enable_rc6, mask);
4753 5093
4754 return enable_rc6 & mask; 5094 return enable_rc6 & mask;
4755 } 5095 }
4756 5096
4757 if (IS_IVYBRIDGE(dev)) 5097 if (IS_IVYBRIDGE(dev_priv))
4758 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE); 5098 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
4759 5099
4760 return INTEL_RC6_ENABLE; 5100 return INTEL_RC6_ENABLE;
4761} 5101}
4762 5102
4763int intel_enable_rc6(const struct drm_device *dev) 5103static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv)
4764{ 5104{
4765 return i915.enable_rc6;
4766}
4767
4768static void gen6_init_rps_frequencies(struct drm_device *dev)
4769{
4770 struct drm_i915_private *dev_priv = dev->dev_private;
4771 uint32_t rp_state_cap; 5105 uint32_t rp_state_cap;
4772 u32 ddcc_status = 0; 5106 u32 ddcc_status = 0;
4773 int ret; 5107 int ret;
@@ -4775,7 +5109,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4775 /* All of these values are in units of 50MHz */ 5109 /* All of these values are in units of 50MHz */
4776 dev_priv->rps.cur_freq = 0; 5110 dev_priv->rps.cur_freq = 0;
4777 /* static values from HW: RP0 > RP1 > RPn (min_freq) */ 5111 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
4778 if (IS_BROXTON(dev)) { 5112 if (IS_BROXTON(dev_priv)) {
4779 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); 5113 rp_state_cap = I915_READ(BXT_RP_STATE_CAP);
4780 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff; 5114 dev_priv->rps.rp0_freq = (rp_state_cap >> 16) & 0xff;
4781 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; 5115 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
@@ -4791,8 +5125,8 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4791 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 5125 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
4792 5126
4793 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq; 5127 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4794 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || 5128 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) ||
4795 IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5129 IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4796 ret = sandybridge_pcode_read(dev_priv, 5130 ret = sandybridge_pcode_read(dev_priv,
4797 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, 5131 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4798 &ddcc_status); 5132 &ddcc_status);
@@ -4804,7 +5138,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4804 dev_priv->rps.max_freq); 5138 dev_priv->rps.max_freq);
4805 } 5139 }
4806 5140
4807 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5141 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
4808 /* Store the frequency values in 16.66 MHZ units, which is 5142 /* Store the frequency values in 16.66 MHZ units, which is
4809 the natural hardware unit for SKL */ 5143 the natural hardware unit for SKL */
4810 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; 5144 dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER;
@@ -4821,7 +5155,7 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4821 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 5155 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
4822 5156
4823 if (dev_priv->rps.min_freq_softlimit == 0) { 5157 if (dev_priv->rps.min_freq_softlimit == 0) {
4824 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 5158 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
4825 dev_priv->rps.min_freq_softlimit = 5159 dev_priv->rps.min_freq_softlimit =
4826 max_t(int, dev_priv->rps.efficient_freq, 5160 max_t(int, dev_priv->rps.efficient_freq,
4827 intel_freq_opcode(dev_priv, 450)); 5161 intel_freq_opcode(dev_priv, 450));
@@ -4832,16 +5166,14 @@ static void gen6_init_rps_frequencies(struct drm_device *dev)
4832} 5166}
4833 5167
4834/* See the Gen9_GT_PM_Programming_Guide doc for the below */ 5168/* See the Gen9_GT_PM_Programming_Guide doc for the below */
4835static void gen9_enable_rps(struct drm_device *dev) 5169static void gen9_enable_rps(struct drm_i915_private *dev_priv)
4836{ 5170{
4837 struct drm_i915_private *dev_priv = dev->dev_private;
4838
4839 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5171 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4840 5172
4841 gen6_init_rps_frequencies(dev); 5173 gen6_init_rps_frequencies(dev_priv);
4842 5174
4843 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */ 5175 /* WaGsvDisableTurbo: Workaround to disable turbo on BXT A* */
4844 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5176 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4845 /* 5177 /*
4846 * BIOS could leave the Hw Turbo enabled, so need to explicitly 5178 * BIOS could leave the Hw Turbo enabled, so need to explicitly
4847 * clear out the Control register just to avoid inconsitency 5179 * clear out the Control register just to avoid inconsitency
@@ -4851,7 +5183,7 @@ static void gen9_enable_rps(struct drm_device *dev)
4851 * if the Turbo is left enabled in the Control register, as the 5183 * if the Turbo is left enabled in the Control register, as the
4852 * Up/Down interrupts would remain masked. 5184 * Up/Down interrupts would remain masked.
4853 */ 5185 */
4854 gen9_disable_rps(dev); 5186 gen9_disable_rps(dev_priv);
4855 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5187 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4856 return; 5188 return;
4857 } 5189 }
@@ -4870,14 +5202,13 @@ static void gen9_enable_rps(struct drm_device *dev)
4870 * Up/Down EI & threshold registers, as well as the RP_CONTROL, 5202 * Up/Down EI & threshold registers, as well as the RP_CONTROL,
4871 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ 5203 * RP_INTERRUPT_LIMITS & RPNSWREQ registers */
4872 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5204 dev_priv->rps.power = HIGH_POWER; /* force a reset */
4873 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5205 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
4874 5206
4875 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5207 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4876} 5208}
4877 5209
4878static void gen9_enable_rc6(struct drm_device *dev) 5210static void gen9_enable_rc6(struct drm_i915_private *dev_priv)
4879{ 5211{
4880 struct drm_i915_private *dev_priv = dev->dev_private;
4881 struct intel_engine_cs *engine; 5212 struct intel_engine_cs *engine;
4882 uint32_t rc6_mask = 0; 5213 uint32_t rc6_mask = 0;
4883 5214
@@ -4894,7 +5225,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4894 /* 2b: Program RC6 thresholds.*/ 5225 /* 2b: Program RC6 thresholds.*/
4895 5226
4896 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */ 5227 /* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
4897 if (IS_SKYLAKE(dev)) 5228 if (IS_SKYLAKE(dev_priv))
4898 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); 5229 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
4899 else 5230 else
4900 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); 5231 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
@@ -4903,7 +5234,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
4903 for_each_engine(engine, dev_priv) 5234 for_each_engine(engine, dev_priv)
4904 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5235 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4905 5236
4906 if (HAS_GUC_UCODE(dev)) 5237 if (HAS_GUC(dev_priv))
4907 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 5238 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA);
4908 5239
4909 I915_WRITE(GEN6_RC_SLEEP, 0); 5240 I915_WRITE(GEN6_RC_SLEEP, 0);
@@ -4913,12 +5244,12 @@ static void gen9_enable_rc6(struct drm_device *dev)
4913 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25); 5244 I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 25);
4914 5245
4915 /* 3a: Enable RC6 */ 5246 /* 3a: Enable RC6 */
4916 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5247 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4917 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5248 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4918 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE)); 5249 DRM_INFO("RC6 %s\n", onoff(rc6_mask & GEN6_RC_CTL_RC6_ENABLE));
4919 /* WaRsUseTimeoutMode */ 5250 /* WaRsUseTimeoutMode */
4920 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0) || 5251 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_D0) ||
4921 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 5252 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
4922 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 5253 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */
4923 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5254 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4924 GEN7_RC_CTL_TO_MODE | 5255 GEN7_RC_CTL_TO_MODE |
@@ -4934,19 +5265,17 @@ static void gen9_enable_rc6(struct drm_device *dev)
4934 * 3b: Enable Coarse Power Gating only when RC6 is enabled. 5265 * 3b: Enable Coarse Power Gating only when RC6 is enabled.
4935 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6. 5266 * WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
4936 */ 5267 */
4937 if (NEEDS_WaRsDisableCoarsePowerGating(dev)) 5268 if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv))
4938 I915_WRITE(GEN9_PG_ENABLE, 0); 5269 I915_WRITE(GEN9_PG_ENABLE, 0);
4939 else 5270 else
4940 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 5271 I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4941 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0); 5272 (GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE) : 0);
4942 5273
4943 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5274 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4944
4945} 5275}
4946 5276
4947static void gen8_enable_rps(struct drm_device *dev) 5277static void gen8_enable_rps(struct drm_i915_private *dev_priv)
4948{ 5278{
4949 struct drm_i915_private *dev_priv = dev->dev_private;
4950 struct intel_engine_cs *engine; 5279 struct intel_engine_cs *engine;
4951 uint32_t rc6_mask = 0; 5280 uint32_t rc6_mask = 0;
4952 5281
@@ -4961,7 +5290,7 @@ static void gen8_enable_rps(struct drm_device *dev)
4961 I915_WRITE(GEN6_RC_CONTROL, 0); 5290 I915_WRITE(GEN6_RC_CONTROL, 0);
4962 5291
4963 /* Initialize rps frequencies */ 5292 /* Initialize rps frequencies */
4964 gen6_init_rps_frequencies(dev); 5293 gen6_init_rps_frequencies(dev_priv);
4965 5294
4966 /* 2b: Program RC6 thresholds.*/ 5295 /* 2b: Program RC6 thresholds.*/
4967 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 5296 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -4970,16 +5299,16 @@ static void gen8_enable_rps(struct drm_device *dev)
4970 for_each_engine(engine, dev_priv) 5299 for_each_engine(engine, dev_priv)
4971 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); 5300 I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10);
4972 I915_WRITE(GEN6_RC_SLEEP, 0); 5301 I915_WRITE(GEN6_RC_SLEEP, 0);
4973 if (IS_BROADWELL(dev)) 5302 if (IS_BROADWELL(dev_priv))
4974 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ 5303 I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */
4975 else 5304 else
4976 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ 5305 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */
4977 5306
4978 /* 3: Enable RC6 */ 5307 /* 3: Enable RC6 */
4979 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 5308 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
4980 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 5309 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4981 intel_print_rc6_info(dev, rc6_mask); 5310 intel_print_rc6_info(dev_priv, rc6_mask);
4982 if (IS_BROADWELL(dev)) 5311 if (IS_BROADWELL(dev_priv))
4983 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 5312 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4984 GEN7_RC_CTL_TO_MODE | 5313 GEN7_RC_CTL_TO_MODE |
4985 rc6_mask); 5314 rc6_mask);
@@ -5020,14 +5349,13 @@ static void gen8_enable_rps(struct drm_device *dev)
5020 /* 6: Ring frequency + overclocking (our driver does this later */ 5349 /* 6: Ring frequency + overclocking (our driver does this later */
5021 5350
5022 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5351 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5023 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5352 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5024 5353
5025 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5354 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5026} 5355}
5027 5356
5028static void gen6_enable_rps(struct drm_device *dev) 5357static void gen6_enable_rps(struct drm_i915_private *dev_priv)
5029{ 5358{
5030 struct drm_i915_private *dev_priv = dev->dev_private;
5031 struct intel_engine_cs *engine; 5359 struct intel_engine_cs *engine;
5032 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 5360 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
5033 u32 gtfifodbg; 5361 u32 gtfifodbg;
@@ -5054,7 +5382,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5054 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 5382 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5055 5383
5056 /* Initialize rps frequencies */ 5384 /* Initialize rps frequencies */
5057 gen6_init_rps_frequencies(dev); 5385 gen6_init_rps_frequencies(dev_priv);
5058 5386
5059 /* disable the counters and set deterministic thresholds */ 5387 /* disable the counters and set deterministic thresholds */
5060 I915_WRITE(GEN6_RC_CONTROL, 0); 5388 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -5070,7 +5398,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5070 5398
5071 I915_WRITE(GEN6_RC_SLEEP, 0); 5399 I915_WRITE(GEN6_RC_SLEEP, 0);
5072 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); 5400 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
5073 if (IS_IVYBRIDGE(dev)) 5401 if (IS_IVYBRIDGE(dev_priv))
5074 I915_WRITE(GEN6_RC6_THRESHOLD, 125000); 5402 I915_WRITE(GEN6_RC6_THRESHOLD, 125000);
5075 else 5403 else
5076 I915_WRITE(GEN6_RC6_THRESHOLD, 50000); 5404 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
@@ -5078,12 +5406,12 @@ static void gen6_enable_rps(struct drm_device *dev)
5078 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ 5406 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
5079 5407
5080 /* Check if we are enabling RC6 */ 5408 /* Check if we are enabling RC6 */
5081 rc6_mode = intel_enable_rc6(dev_priv->dev); 5409 rc6_mode = intel_enable_rc6();
5082 if (rc6_mode & INTEL_RC6_ENABLE) 5410 if (rc6_mode & INTEL_RC6_ENABLE)
5083 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE; 5411 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
5084 5412
5085 /* We don't use those on Haswell */ 5413 /* We don't use those on Haswell */
5086 if (!IS_HASWELL(dev)) { 5414 if (!IS_HASWELL(dev_priv)) {
5087 if (rc6_mode & INTEL_RC6p_ENABLE) 5415 if (rc6_mode & INTEL_RC6p_ENABLE)
5088 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; 5416 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
5089 5417
@@ -5091,7 +5419,7 @@ static void gen6_enable_rps(struct drm_device *dev)
5091 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; 5419 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
5092 } 5420 }
5093 5421
5094 intel_print_rc6_info(dev, rc6_mask); 5422 intel_print_rc6_info(dev_priv, rc6_mask);
5095 5423
5096 I915_WRITE(GEN6_RC_CONTROL, 5424 I915_WRITE(GEN6_RC_CONTROL,
5097 rc6_mask | 5425 rc6_mask |
@@ -5115,13 +5443,13 @@ static void gen6_enable_rps(struct drm_device *dev)
5115 } 5443 }
5116 5444
5117 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 5445 dev_priv->rps.power = HIGH_POWER; /* force a reset */
5118 gen6_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5446 gen6_set_rps(dev_priv, dev_priv->rps.idle_freq);
5119 5447
5120 rc6vids = 0; 5448 rc6vids = 0;
5121 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 5449 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
5122 if (IS_GEN6(dev) && ret) { 5450 if (IS_GEN6(dev_priv) && ret) {
5123 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); 5451 DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n");
5124 } else if (IS_GEN6(dev) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { 5452 } else if (IS_GEN6(dev_priv) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) {
5125 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", 5453 DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n",
5126 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); 5454 GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450);
5127 rc6vids &= 0xffff00; 5455 rc6vids &= 0xffff00;
@@ -5134,9 +5462,8 @@ static void gen6_enable_rps(struct drm_device *dev)
5134 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5462 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5135} 5463}
5136 5464
5137static void __gen6_update_ring_freq(struct drm_device *dev) 5465static void __gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5138{ 5466{
5139 struct drm_i915_private *dev_priv = dev->dev_private;
5140 int min_freq = 15; 5467 int min_freq = 15;
5141 unsigned int gpu_freq; 5468 unsigned int gpu_freq;
5142 unsigned int max_ia_freq, min_ring_freq; 5469 unsigned int max_ia_freq, min_ring_freq;
@@ -5165,7 +5492,7 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5165 /* convert DDR frequency from units of 266.6MHz to bandwidth */ 5492 /* convert DDR frequency from units of 266.6MHz to bandwidth */
5166 min_ring_freq = mult_frac(min_ring_freq, 8, 3); 5493 min_ring_freq = mult_frac(min_ring_freq, 8, 3);
5167 5494
5168 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5495 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5169 /* Convert GT frequency to 50 HZ units */ 5496 /* Convert GT frequency to 50 HZ units */
5170 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER; 5497 min_gpu_freq = dev_priv->rps.min_freq / GEN9_FREQ_SCALER;
5171 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER; 5498 max_gpu_freq = dev_priv->rps.max_freq / GEN9_FREQ_SCALER;
@@ -5183,16 +5510,16 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5183 int diff = max_gpu_freq - gpu_freq; 5510 int diff = max_gpu_freq - gpu_freq;
5184 unsigned int ia_freq = 0, ring_freq = 0; 5511 unsigned int ia_freq = 0, ring_freq = 0;
5185 5512
5186 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) { 5513 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
5187 /* 5514 /*
5188 * ring_freq = 2 * GT. ring_freq is in 100MHz units 5515 * ring_freq = 2 * GT. ring_freq is in 100MHz units
5189 * No floor required for ring frequency on SKL. 5516 * No floor required for ring frequency on SKL.
5190 */ 5517 */
5191 ring_freq = gpu_freq; 5518 ring_freq = gpu_freq;
5192 } else if (INTEL_INFO(dev)->gen >= 8) { 5519 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
5193 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 5520 /* max(2 * GT, DDR). NB: GT is 50MHz units */
5194 ring_freq = max(min_ring_freq, gpu_freq); 5521 ring_freq = max(min_ring_freq, gpu_freq);
5195 } else if (IS_HASWELL(dev)) { 5522 } else if (IS_HASWELL(dev_priv)) {
5196 ring_freq = mult_frac(gpu_freq, 5, 4); 5523 ring_freq = mult_frac(gpu_freq, 5, 4);
5197 ring_freq = max(min_ring_freq, ring_freq); 5524 ring_freq = max(min_ring_freq, ring_freq);
5198 /* leave ia_freq as the default, chosen by cpufreq */ 5525 /* leave ia_freq as the default, chosen by cpufreq */
@@ -5219,26 +5546,23 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
5219 } 5546 }
5220} 5547}
5221 5548
5222void gen6_update_ring_freq(struct drm_device *dev) 5549void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
5223{ 5550{
5224 struct drm_i915_private *dev_priv = dev->dev_private; 5551 if (!HAS_CORE_RING_FREQ(dev_priv))
5225
5226 if (!HAS_CORE_RING_FREQ(dev))
5227 return; 5552 return;
5228 5553
5229 mutex_lock(&dev_priv->rps.hw_lock); 5554 mutex_lock(&dev_priv->rps.hw_lock);
5230 __gen6_update_ring_freq(dev); 5555 __gen6_update_ring_freq(dev_priv);
5231 mutex_unlock(&dev_priv->rps.hw_lock); 5556 mutex_unlock(&dev_priv->rps.hw_lock);
5232} 5557}
5233 5558
5234static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) 5559static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv)
5235{ 5560{
5236 struct drm_device *dev = dev_priv->dev;
5237 u32 val, rp0; 5561 u32 val, rp0;
5238 5562
5239 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5563 val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE);
5240 5564
5241 switch (INTEL_INFO(dev)->eu_total) { 5565 switch (INTEL_INFO(dev_priv)->eu_total) {
5242 case 8: 5566 case 8:
5243 /* (2 * 4) config */ 5567 /* (2 * 4) config */
5244 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5568 rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT);
@@ -5349,9 +5673,8 @@ static void cherryview_check_pctx(struct drm_i915_private *dev_priv)
5349 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); 5673 WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0);
5350} 5674}
5351 5675
5352static void cherryview_setup_pctx(struct drm_device *dev) 5676static void cherryview_setup_pctx(struct drm_i915_private *dev_priv)
5353{ 5677{
5354 struct drm_i915_private *dev_priv = to_i915(dev);
5355 struct i915_ggtt *ggtt = &dev_priv->ggtt; 5678 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5356 unsigned long pctx_paddr, paddr; 5679 unsigned long pctx_paddr, paddr;
5357 u32 pcbr; 5680 u32 pcbr;
@@ -5370,15 +5693,14 @@ static void cherryview_setup_pctx(struct drm_device *dev)
5370 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5693 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5371} 5694}
5372 5695
5373static void valleyview_setup_pctx(struct drm_device *dev) 5696static void valleyview_setup_pctx(struct drm_i915_private *dev_priv)
5374{ 5697{
5375 struct drm_i915_private *dev_priv = dev->dev_private;
5376 struct drm_i915_gem_object *pctx; 5698 struct drm_i915_gem_object *pctx;
5377 unsigned long pctx_paddr; 5699 unsigned long pctx_paddr;
5378 u32 pcbr; 5700 u32 pcbr;
5379 int pctx_size = 24*1024; 5701 int pctx_size = 24*1024;
5380 5702
5381 mutex_lock(&dev->struct_mutex); 5703 mutex_lock(&dev_priv->drm.struct_mutex);
5382 5704
5383 pcbr = I915_READ(VLV_PCBR); 5705 pcbr = I915_READ(VLV_PCBR);
5384 if (pcbr) { 5706 if (pcbr) {
@@ -5386,7 +5708,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5386 int pcbr_offset; 5708 int pcbr_offset;
5387 5709
5388 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base; 5710 pcbr_offset = (pcbr & (~4095)) - dev_priv->mm.stolen_base;
5389 pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv->dev, 5711 pctx = i915_gem_object_create_stolen_for_preallocated(&dev_priv->drm,
5390 pcbr_offset, 5712 pcbr_offset,
5391 I915_GTT_OFFSET_NONE, 5713 I915_GTT_OFFSET_NONE,
5392 pctx_size); 5714 pctx_size);
@@ -5403,7 +5725,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5403 * overlap with other ranges, such as the frame buffer, protected 5725 * overlap with other ranges, such as the frame buffer, protected
5404 * memory, or any other relevant ranges. 5726 * memory, or any other relevant ranges.
5405 */ 5727 */
5406 pctx = i915_gem_object_create_stolen(dev, pctx_size); 5728 pctx = i915_gem_object_create_stolen(&dev_priv->drm, pctx_size);
5407 if (!pctx) { 5729 if (!pctx) {
5408 DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); 5730 DRM_DEBUG("not enough stolen space for PCTX, disabling\n");
5409 goto out; 5731 goto out;
@@ -5415,13 +5737,11 @@ static void valleyview_setup_pctx(struct drm_device *dev)
5415out: 5737out:
5416 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); 5738 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
5417 dev_priv->vlv_pctx = pctx; 5739 dev_priv->vlv_pctx = pctx;
5418 mutex_unlock(&dev->struct_mutex); 5740 mutex_unlock(&dev_priv->drm.struct_mutex);
5419} 5741}
5420 5742
5421static void valleyview_cleanup_pctx(struct drm_device *dev) 5743static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv)
5422{ 5744{
5423 struct drm_i915_private *dev_priv = dev->dev_private;
5424
5425 if (WARN_ON(!dev_priv->vlv_pctx)) 5745 if (WARN_ON(!dev_priv->vlv_pctx))
5426 return; 5746 return;
5427 5747
@@ -5440,12 +5760,11 @@ static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv)
5440 dev_priv->rps.gpll_ref_freq); 5760 dev_priv->rps.gpll_ref_freq);
5441} 5761}
5442 5762
5443static void valleyview_init_gt_powersave(struct drm_device *dev) 5763static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv)
5444{ 5764{
5445 struct drm_i915_private *dev_priv = dev->dev_private;
5446 u32 val; 5765 u32 val;
5447 5766
5448 valleyview_setup_pctx(dev); 5767 valleyview_setup_pctx(dev_priv);
5449 5768
5450 vlv_init_gpll_ref_freq(dev_priv); 5769 vlv_init_gpll_ref_freq(dev_priv);
5451 5770
@@ -5499,12 +5818,11 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
5499 mutex_unlock(&dev_priv->rps.hw_lock); 5818 mutex_unlock(&dev_priv->rps.hw_lock);
5500} 5819}
5501 5820
5502static void cherryview_init_gt_powersave(struct drm_device *dev) 5821static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv)
5503{ 5822{
5504 struct drm_i915_private *dev_priv = dev->dev_private;
5505 u32 val; 5823 u32 val;
5506 5824
5507 cherryview_setup_pctx(dev); 5825 cherryview_setup_pctx(dev_priv);
5508 5826
5509 vlv_init_gpll_ref_freq(dev_priv); 5827 vlv_init_gpll_ref_freq(dev_priv);
5510 5828
@@ -5564,14 +5882,13 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
5564 mutex_unlock(&dev_priv->rps.hw_lock); 5882 mutex_unlock(&dev_priv->rps.hw_lock);
5565} 5883}
5566 5884
5567static void valleyview_cleanup_gt_powersave(struct drm_device *dev) 5885static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
5568{ 5886{
5569 valleyview_cleanup_pctx(dev); 5887 valleyview_cleanup_pctx(dev_priv);
5570} 5888}
5571 5889
5572static void cherryview_enable_rps(struct drm_device *dev) 5890static void cherryview_enable_rps(struct drm_i915_private *dev_priv)
5573{ 5891{
5574 struct drm_i915_private *dev_priv = dev->dev_private;
5575 struct intel_engine_cs *engine; 5892 struct intel_engine_cs *engine;
5576 u32 gtfifodbg, val, rc6_mode = 0, pcbr; 5893 u32 gtfifodbg, val, rc6_mode = 0, pcbr;
5577 5894
@@ -5616,8 +5933,8 @@ static void cherryview_enable_rps(struct drm_device *dev)
5616 pcbr = I915_READ(VLV_PCBR); 5933 pcbr = I915_READ(VLV_PCBR);
5617 5934
5618 /* 3: Enable RC6 */ 5935 /* 3: Enable RC6 */
5619 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5936 if ((intel_enable_rc6() & INTEL_RC6_ENABLE) &&
5620 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5937 (pcbr >> VLV_PCBR_ADDR_SHIFT))
5621 rc6_mode = GEN7_RC_CTL_TO_MODE; 5938 rc6_mode = GEN7_RC_CTL_TO_MODE;
5622 5939
5623 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 5940 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
@@ -5662,14 +5979,13 @@ static void cherryview_enable_rps(struct drm_device *dev)
5662 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 5979 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5663 dev_priv->rps.idle_freq); 5980 dev_priv->rps.idle_freq);
5664 5981
5665 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 5982 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5666 5983
5667 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 5984 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5668} 5985}
5669 5986
5670static void valleyview_enable_rps(struct drm_device *dev) 5987static void valleyview_enable_rps(struct drm_i915_private *dev_priv)
5671{ 5988{
5672 struct drm_i915_private *dev_priv = dev->dev_private;
5673 struct intel_engine_cs *engine; 5989 struct intel_engine_cs *engine;
5674 u32 gtfifodbg, val, rc6_mode = 0; 5990 u32 gtfifodbg, val, rc6_mode = 0;
5675 5991
@@ -5722,10 +6038,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
5722 VLV_MEDIA_RC6_COUNT_EN | 6038 VLV_MEDIA_RC6_COUNT_EN |
5723 VLV_RENDER_RC6_COUNT_EN)); 6039 VLV_RENDER_RC6_COUNT_EN));
5724 6040
5725 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE) 6041 if (intel_enable_rc6() & INTEL_RC6_ENABLE)
5726 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL; 6042 rc6_mode = GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
5727 6043
5728 intel_print_rc6_info(dev, rc6_mode); 6044 intel_print_rc6_info(dev_priv, rc6_mode);
5729 6045
5730 I915_WRITE(GEN6_RC_CONTROL, rc6_mode); 6046 I915_WRITE(GEN6_RC_CONTROL, rc6_mode);
5731 6047
@@ -5752,7 +6068,7 @@ static void valleyview_enable_rps(struct drm_device *dev)
5752 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq), 6068 intel_gpu_freq(dev_priv, dev_priv->rps.idle_freq),
5753 dev_priv->rps.idle_freq); 6069 dev_priv->rps.idle_freq);
5754 6070
5755 valleyview_set_rps(dev_priv->dev, dev_priv->rps.idle_freq); 6071 valleyview_set_rps(dev_priv, dev_priv->rps.idle_freq);
5756 6072
5757 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 6073 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5758} 6074}
@@ -5842,10 +6158,9 @@ static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv)
5842 6158
5843unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) 6159unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
5844{ 6160{
5845 struct drm_device *dev = dev_priv->dev;
5846 unsigned long val; 6161 unsigned long val;
5847 6162
5848 if (INTEL_INFO(dev)->gen != 5) 6163 if (INTEL_INFO(dev_priv)->gen != 5)
5849 return 0; 6164 return 0;
5850 6165
5851 spin_lock_irq(&mchdev_lock); 6166 spin_lock_irq(&mchdev_lock);
@@ -5885,11 +6200,10 @@ static int _pxvid_to_vd(u8 pxvid)
5885 6200
5886static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 6201static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
5887{ 6202{
5888 struct drm_device *dev = dev_priv->dev;
5889 const int vd = _pxvid_to_vd(pxvid); 6203 const int vd = _pxvid_to_vd(pxvid);
5890 const int vm = vd - 1125; 6204 const int vm = vd - 1125;
5891 6205
5892 if (INTEL_INFO(dev)->is_mobile) 6206 if (INTEL_INFO(dev_priv)->is_mobile)
5893 return vm > 0 ? vm : 0; 6207 return vm > 0 ? vm : 0;
5894 6208
5895 return vd; 6209 return vd;
@@ -5930,9 +6244,7 @@ static void __i915_update_gfx_val(struct drm_i915_private *dev_priv)
5930 6244
5931void i915_update_gfx_val(struct drm_i915_private *dev_priv) 6245void i915_update_gfx_val(struct drm_i915_private *dev_priv)
5932{ 6246{
5933 struct drm_device *dev = dev_priv->dev; 6247 if (INTEL_INFO(dev_priv)->gen != 5)
5934
5935 if (INTEL_INFO(dev)->gen != 5)
5936 return; 6248 return;
5937 6249
5938 spin_lock_irq(&mchdev_lock); 6250 spin_lock_irq(&mchdev_lock);
@@ -5981,10 +6293,9 @@ static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv)
5981 6293
5982unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) 6294unsigned long i915_gfx_val(struct drm_i915_private *dev_priv)
5983{ 6295{
5984 struct drm_device *dev = dev_priv->dev;
5985 unsigned long val; 6296 unsigned long val;
5986 6297
5987 if (INTEL_INFO(dev)->gen != 5) 6298 if (INTEL_INFO(dev_priv)->gen != 5)
5988 return 0; 6299 return 0;
5989 6300
5990 spin_lock_irq(&mchdev_lock); 6301 spin_lock_irq(&mchdev_lock);
@@ -6125,7 +6436,7 @@ bool i915_gpu_turbo_disable(void)
6125 6436
6126 dev_priv->ips.max_delay = dev_priv->ips.fstart; 6437 dev_priv->ips.max_delay = dev_priv->ips.fstart;
6127 6438
6128 if (!ironlake_set_drps(dev_priv->dev, dev_priv->ips.fstart)) 6439 if (!ironlake_set_drps(dev_priv, dev_priv->ips.fstart))
6129 ret = false; 6440 ret = false;
6130 6441
6131out_unlock: 6442out_unlock:
@@ -6173,9 +6484,8 @@ void intel_gpu_ips_teardown(void)
6173 spin_unlock_irq(&mchdev_lock); 6484 spin_unlock_irq(&mchdev_lock);
6174} 6485}
6175 6486
6176static void intel_init_emon(struct drm_device *dev) 6487static void intel_init_emon(struct drm_i915_private *dev_priv)
6177{ 6488{
6178 struct drm_i915_private *dev_priv = dev->dev_private;
6179 u32 lcfuse; 6489 u32 lcfuse;
6180 u8 pxw[16]; 6490 u8 pxw[16];
6181 int i; 6491 int i;
@@ -6244,10 +6554,8 @@ static void intel_init_emon(struct drm_device *dev)
6244 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); 6554 dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK);
6245} 6555}
6246 6556
6247void intel_init_gt_powersave(struct drm_device *dev) 6557void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
6248{ 6558{
6249 struct drm_i915_private *dev_priv = dev->dev_private;
6250
6251 /* 6559 /*
6252 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a 6560 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
6253 * requirement. 6561 * requirement.
@@ -6257,74 +6565,66 @@ void intel_init_gt_powersave(struct drm_device *dev)
6257 intel_runtime_pm_get(dev_priv); 6565 intel_runtime_pm_get(dev_priv);
6258 } 6566 }
6259 6567
6260 if (IS_CHERRYVIEW(dev)) 6568 if (IS_CHERRYVIEW(dev_priv))
6261 cherryview_init_gt_powersave(dev); 6569 cherryview_init_gt_powersave(dev_priv);
6262 else if (IS_VALLEYVIEW(dev)) 6570 else if (IS_VALLEYVIEW(dev_priv))
6263 valleyview_init_gt_powersave(dev); 6571 valleyview_init_gt_powersave(dev_priv);
6264} 6572}
6265 6573
6266void intel_cleanup_gt_powersave(struct drm_device *dev) 6574void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
6267{ 6575{
6268 struct drm_i915_private *dev_priv = dev->dev_private; 6576 if (IS_CHERRYVIEW(dev_priv))
6269
6270 if (IS_CHERRYVIEW(dev))
6271 return; 6577 return;
6272 else if (IS_VALLEYVIEW(dev)) 6578 else if (IS_VALLEYVIEW(dev_priv))
6273 valleyview_cleanup_gt_powersave(dev); 6579 valleyview_cleanup_gt_powersave(dev_priv);
6274 6580
6275 if (!i915.enable_rc6) 6581 if (!i915.enable_rc6)
6276 intel_runtime_pm_put(dev_priv); 6582 intel_runtime_pm_put(dev_priv);
6277} 6583}
6278 6584
6279static void gen6_suspend_rps(struct drm_device *dev) 6585static void gen6_suspend_rps(struct drm_i915_private *dev_priv)
6280{ 6586{
6281 struct drm_i915_private *dev_priv = dev->dev_private;
6282
6283 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6587 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
6284 6588
6285 gen6_disable_rps_interrupts(dev); 6589 gen6_disable_rps_interrupts(dev_priv);
6286} 6590}
6287 6591
6288/** 6592/**
6289 * intel_suspend_gt_powersave - suspend PM work and helper threads 6593 * intel_suspend_gt_powersave - suspend PM work and helper threads
6290 * @dev: drm device 6594 * @dev_priv: i915 device
6291 * 6595 *
6292 * We don't want to disable RC6 or other features here, we just want 6596 * We don't want to disable RC6 or other features here, we just want
6293 * to make sure any work we've queued has finished and won't bother 6597 * to make sure any work we've queued has finished and won't bother
6294 * us while we're suspended. 6598 * us while we're suspended.
6295 */ 6599 */
6296void intel_suspend_gt_powersave(struct drm_device *dev) 6600void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
6297{ 6601{
6298 struct drm_i915_private *dev_priv = dev->dev_private; 6602 if (INTEL_GEN(dev_priv) < 6)
6299
6300 if (INTEL_INFO(dev)->gen < 6)
6301 return; 6603 return;
6302 6604
6303 gen6_suspend_rps(dev); 6605 gen6_suspend_rps(dev_priv);
6304 6606
6305 /* Force GPU to min freq during suspend */ 6607 /* Force GPU to min freq during suspend */
6306 gen6_rps_idle(dev_priv); 6608 gen6_rps_idle(dev_priv);
6307} 6609}
6308 6610
6309void intel_disable_gt_powersave(struct drm_device *dev) 6611void intel_disable_gt_powersave(struct drm_i915_private *dev_priv)
6310{ 6612{
6311 struct drm_i915_private *dev_priv = dev->dev_private; 6613 if (IS_IRONLAKE_M(dev_priv)) {
6312 6614 ironlake_disable_drps(dev_priv);
6313 if (IS_IRONLAKE_M(dev)) { 6615 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6314 ironlake_disable_drps(dev); 6616 intel_suspend_gt_powersave(dev_priv);
6315 } else if (INTEL_INFO(dev)->gen >= 6) {
6316 intel_suspend_gt_powersave(dev);
6317 6617
6318 mutex_lock(&dev_priv->rps.hw_lock); 6618 mutex_lock(&dev_priv->rps.hw_lock);
6319 if (INTEL_INFO(dev)->gen >= 9) { 6619 if (INTEL_INFO(dev_priv)->gen >= 9) {
6320 gen9_disable_rc6(dev); 6620 gen9_disable_rc6(dev_priv);
6321 gen9_disable_rps(dev); 6621 gen9_disable_rps(dev_priv);
6322 } else if (IS_CHERRYVIEW(dev)) 6622 } else if (IS_CHERRYVIEW(dev_priv))
6323 cherryview_disable_rps(dev); 6623 cherryview_disable_rps(dev_priv);
6324 else if (IS_VALLEYVIEW(dev)) 6624 else if (IS_VALLEYVIEW(dev_priv))
6325 valleyview_disable_rps(dev); 6625 valleyview_disable_rps(dev_priv);
6326 else 6626 else
6327 gen6_disable_rps(dev); 6627 gen6_disable_rps(dev_priv);
6328 6628
6329 dev_priv->rps.enabled = false; 6629 dev_priv->rps.enabled = false;
6330 mutex_unlock(&dev_priv->rps.hw_lock); 6630 mutex_unlock(&dev_priv->rps.hw_lock);
@@ -6336,27 +6636,26 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6336 struct drm_i915_private *dev_priv = 6636 struct drm_i915_private *dev_priv =
6337 container_of(work, struct drm_i915_private, 6637 container_of(work, struct drm_i915_private,
6338 rps.delayed_resume_work.work); 6638 rps.delayed_resume_work.work);
6339 struct drm_device *dev = dev_priv->dev;
6340 6639
6341 mutex_lock(&dev_priv->rps.hw_lock); 6640 mutex_lock(&dev_priv->rps.hw_lock);
6342 6641
6343 gen6_reset_rps_interrupts(dev); 6642 gen6_reset_rps_interrupts(dev_priv);
6344 6643
6345 if (IS_CHERRYVIEW(dev)) { 6644 if (IS_CHERRYVIEW(dev_priv)) {
6346 cherryview_enable_rps(dev); 6645 cherryview_enable_rps(dev_priv);
6347 } else if (IS_VALLEYVIEW(dev)) { 6646 } else if (IS_VALLEYVIEW(dev_priv)) {
6348 valleyview_enable_rps(dev); 6647 valleyview_enable_rps(dev_priv);
6349 } else if (INTEL_INFO(dev)->gen >= 9) { 6648 } else if (INTEL_INFO(dev_priv)->gen >= 9) {
6350 gen9_enable_rc6(dev); 6649 gen9_enable_rc6(dev_priv);
6351 gen9_enable_rps(dev); 6650 gen9_enable_rps(dev_priv);
6352 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) 6651 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
6353 __gen6_update_ring_freq(dev); 6652 __gen6_update_ring_freq(dev_priv);
6354 } else if (IS_BROADWELL(dev)) { 6653 } else if (IS_BROADWELL(dev_priv)) {
6355 gen8_enable_rps(dev); 6654 gen8_enable_rps(dev_priv);
6356 __gen6_update_ring_freq(dev); 6655 __gen6_update_ring_freq(dev_priv);
6357 } else { 6656 } else {
6358 gen6_enable_rps(dev); 6657 gen6_enable_rps(dev_priv);
6359 __gen6_update_ring_freq(dev); 6658 __gen6_update_ring_freq(dev_priv);
6360 } 6659 }
6361 6660
6362 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq); 6661 WARN_ON(dev_priv->rps.max_freq < dev_priv->rps.min_freq);
@@ -6367,27 +6666,25 @@ static void intel_gen6_powersave_work(struct work_struct *work)
6367 6666
6368 dev_priv->rps.enabled = true; 6667 dev_priv->rps.enabled = true;
6369 6668
6370 gen6_enable_rps_interrupts(dev); 6669 gen6_enable_rps_interrupts(dev_priv);
6371 6670
6372 mutex_unlock(&dev_priv->rps.hw_lock); 6671 mutex_unlock(&dev_priv->rps.hw_lock);
6373 6672
6374 intel_runtime_pm_put(dev_priv); 6673 intel_runtime_pm_put(dev_priv);
6375} 6674}
6376 6675
6377void intel_enable_gt_powersave(struct drm_device *dev) 6676void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
6378{ 6677{
6379 struct drm_i915_private *dev_priv = dev->dev_private;
6380
6381 /* Powersaving is controlled by the host when inside a VM */ 6678 /* Powersaving is controlled by the host when inside a VM */
6382 if (intel_vgpu_active(dev)) 6679 if (intel_vgpu_active(dev_priv))
6383 return; 6680 return;
6384 6681
6385 if (IS_IRONLAKE_M(dev)) { 6682 if (IS_IRONLAKE_M(dev_priv)) {
6386 ironlake_enable_drps(dev); 6683 ironlake_enable_drps(dev_priv);
6387 mutex_lock(&dev->struct_mutex); 6684 mutex_lock(&dev_priv->drm.struct_mutex);
6388 intel_init_emon(dev); 6685 intel_init_emon(dev_priv);
6389 mutex_unlock(&dev->struct_mutex); 6686 mutex_unlock(&dev_priv->drm.struct_mutex);
6390 } else if (INTEL_INFO(dev)->gen >= 6) { 6687 } else if (INTEL_INFO(dev_priv)->gen >= 6) {
6391 /* 6688 /*
6392 * PCU communication is slow and this doesn't need to be 6689 * PCU communication is slow and this doesn't need to be
6393 * done at any specific time, so do this out of our fast path 6690 * done at any specific time, so do this out of our fast path
@@ -6406,20 +6703,18 @@ void intel_enable_gt_powersave(struct drm_device *dev)
6406 } 6703 }
6407} 6704}
6408 6705
6409void intel_reset_gt_powersave(struct drm_device *dev) 6706void intel_reset_gt_powersave(struct drm_i915_private *dev_priv)
6410{ 6707{
6411 struct drm_i915_private *dev_priv = dev->dev_private; 6708 if (INTEL_INFO(dev_priv)->gen < 6)
6412
6413 if (INTEL_INFO(dev)->gen < 6)
6414 return; 6709 return;
6415 6710
6416 gen6_suspend_rps(dev); 6711 gen6_suspend_rps(dev_priv);
6417 dev_priv->rps.enabled = false; 6712 dev_priv->rps.enabled = false;
6418} 6713}
6419 6714
6420static void ibx_init_clock_gating(struct drm_device *dev) 6715static void ibx_init_clock_gating(struct drm_device *dev)
6421{ 6716{
6422 struct drm_i915_private *dev_priv = dev->dev_private; 6717 struct drm_i915_private *dev_priv = to_i915(dev);
6423 6718
6424 /* 6719 /*
6425 * On Ibex Peak and Cougar Point, we need to disable clock 6720 * On Ibex Peak and Cougar Point, we need to disable clock
@@ -6431,7 +6726,7 @@ static void ibx_init_clock_gating(struct drm_device *dev)
6431 6726
6432static void g4x_disable_trickle_feed(struct drm_device *dev) 6727static void g4x_disable_trickle_feed(struct drm_device *dev)
6433{ 6728{
6434 struct drm_i915_private *dev_priv = dev->dev_private; 6729 struct drm_i915_private *dev_priv = to_i915(dev);
6435 enum pipe pipe; 6730 enum pipe pipe;
6436 6731
6437 for_each_pipe(dev_priv, pipe) { 6732 for_each_pipe(dev_priv, pipe) {
@@ -6446,7 +6741,7 @@ static void g4x_disable_trickle_feed(struct drm_device *dev)
6446 6741
6447static void ilk_init_lp_watermarks(struct drm_device *dev) 6742static void ilk_init_lp_watermarks(struct drm_device *dev)
6448{ 6743{
6449 struct drm_i915_private *dev_priv = dev->dev_private; 6744 struct drm_i915_private *dev_priv = to_i915(dev);
6450 6745
6451 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN); 6746 I915_WRITE(WM3_LP_ILK, I915_READ(WM3_LP_ILK) & ~WM1_LP_SR_EN);
6452 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN); 6747 I915_WRITE(WM2_LP_ILK, I915_READ(WM2_LP_ILK) & ~WM1_LP_SR_EN);
@@ -6460,7 +6755,7 @@ static void ilk_init_lp_watermarks(struct drm_device *dev)
6460 6755
6461static void ironlake_init_clock_gating(struct drm_device *dev) 6756static void ironlake_init_clock_gating(struct drm_device *dev)
6462{ 6757{
6463 struct drm_i915_private *dev_priv = dev->dev_private; 6758 struct drm_i915_private *dev_priv = to_i915(dev);
6464 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6759 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6465 6760
6466 /* 6761 /*
@@ -6534,7 +6829,7 @@ static void ironlake_init_clock_gating(struct drm_device *dev)
6534 6829
6535static void cpt_init_clock_gating(struct drm_device *dev) 6830static void cpt_init_clock_gating(struct drm_device *dev)
6536{ 6831{
6537 struct drm_i915_private *dev_priv = dev->dev_private; 6832 struct drm_i915_private *dev_priv = to_i915(dev);
6538 int pipe; 6833 int pipe;
6539 uint32_t val; 6834 uint32_t val;
6540 6835
@@ -6571,7 +6866,7 @@ static void cpt_init_clock_gating(struct drm_device *dev)
6571 6866
6572static void gen6_check_mch_setup(struct drm_device *dev) 6867static void gen6_check_mch_setup(struct drm_device *dev)
6573{ 6868{
6574 struct drm_i915_private *dev_priv = dev->dev_private; 6869 struct drm_i915_private *dev_priv = to_i915(dev);
6575 uint32_t tmp; 6870 uint32_t tmp;
6576 6871
6577 tmp = I915_READ(MCH_SSKPD); 6872 tmp = I915_READ(MCH_SSKPD);
@@ -6582,7 +6877,7 @@ static void gen6_check_mch_setup(struct drm_device *dev)
6582 6877
6583static void gen6_init_clock_gating(struct drm_device *dev) 6878static void gen6_init_clock_gating(struct drm_device *dev)
6584{ 6879{
6585 struct drm_i915_private *dev_priv = dev->dev_private; 6880 struct drm_i915_private *dev_priv = to_i915(dev);
6586 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE; 6881 uint32_t dspclk_gate = ILK_VRHUNIT_CLOCK_GATE_DISABLE;
6587 6882
6588 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate); 6883 I915_WRITE(ILK_DSPCLK_GATE_D, dspclk_gate);
@@ -6697,7 +6992,7 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
6697 6992
6698static void lpt_init_clock_gating(struct drm_device *dev) 6993static void lpt_init_clock_gating(struct drm_device *dev)
6699{ 6994{
6700 struct drm_i915_private *dev_priv = dev->dev_private; 6995 struct drm_i915_private *dev_priv = to_i915(dev);
6701 6996
6702 /* 6997 /*
6703 * TODO: this bit should only be enabled when really needed, then 6998 * TODO: this bit should only be enabled when really needed, then
@@ -6716,7 +7011,7 @@ static void lpt_init_clock_gating(struct drm_device *dev)
6716 7011
6717static void lpt_suspend_hw(struct drm_device *dev) 7012static void lpt_suspend_hw(struct drm_device *dev)
6718{ 7013{
6719 struct drm_i915_private *dev_priv = dev->dev_private; 7014 struct drm_i915_private *dev_priv = to_i915(dev);
6720 7015
6721 if (HAS_PCH_LPT_LP(dev)) { 7016 if (HAS_PCH_LPT_LP(dev)) {
6722 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D); 7017 uint32_t val = I915_READ(SOUTH_DSPCLK_GATE_D);
@@ -6726,6 +7021,29 @@ static void lpt_suspend_hw(struct drm_device *dev)
6726 } 7021 }
6727} 7022}
6728 7023
7024static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv,
7025 int general_prio_credits,
7026 int high_prio_credits)
7027{
7028 u32 misccpctl;
7029
7030 /* WaTempDisableDOPClkGating:bdw */
7031 misccpctl = I915_READ(GEN7_MISCCPCTL);
7032 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
7033
7034 I915_WRITE(GEN8_L3SQCREG1,
7035 L3_GENERAL_PRIO_CREDITS(general_prio_credits) |
7036 L3_HIGH_PRIO_CREDITS(high_prio_credits));
7037
7038 /*
7039 * Wait at least 100 clocks before re-enabling clock gating.
7040 * See the definition of L3SQCREG1 in BSpec.
7041 */
7042 POSTING_READ(GEN8_L3SQCREG1);
7043 udelay(1);
7044 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
7045}
7046
6729static void kabylake_init_clock_gating(struct drm_device *dev) 7047static void kabylake_init_clock_gating(struct drm_device *dev)
6730{ 7048{
6731 struct drm_i915_private *dev_priv = dev->dev_private; 7049 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -6753,6 +7071,10 @@ static void skylake_init_clock_gating(struct drm_device *dev)
6753 7071
6754 gen9_init_clock_gating(dev); 7072 gen9_init_clock_gating(dev);
6755 7073
7074 /* WAC6entrylatency:skl */
7075 I915_WRITE(FBC_LLC_READ_CTRL, I915_READ(FBC_LLC_READ_CTRL) |
7076 FBC_LLC_FULLY_OPEN);
7077
6756 /* WaFbcNukeOnHostModify:skl */ 7078 /* WaFbcNukeOnHostModify:skl */
6757 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) | 7079 I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
6758 ILK_DPFC_NUKE_ON_ANY_MODIFICATION); 7080 ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
@@ -6760,9 +7082,8 @@ static void skylake_init_clock_gating(struct drm_device *dev)
6760 7082
6761static void broadwell_init_clock_gating(struct drm_device *dev) 7083static void broadwell_init_clock_gating(struct drm_device *dev)
6762{ 7084{
6763 struct drm_i915_private *dev_priv = dev->dev_private; 7085 struct drm_i915_private *dev_priv = to_i915(dev);
6764 enum pipe pipe; 7086 enum pipe pipe;
6765 uint32_t misccpctl;
6766 7087
6767 ilk_init_lp_watermarks(dev); 7088 ilk_init_lp_watermarks(dev);
6768 7089
@@ -6793,20 +7114,8 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
6793 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 7114 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
6794 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7115 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
6795 7116
6796 /* 7117 /* WaProgramL3SqcReg1Default:bdw */
6797 * WaProgramL3SqcReg1Default:bdw 7118 gen8_set_l3sqc_credits(dev_priv, 30, 2);
6798 * WaTempDisableDOPClkGating:bdw
6799 */
6800 misccpctl = I915_READ(GEN7_MISCCPCTL);
6801 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
6802 I915_WRITE(GEN8_L3SQCREG1, BDW_WA_L3SQCREG1_DEFAULT);
6803 /*
6804 * Wait at least 100 clocks before re-enabling clock gating. See
6805 * the definition of L3SQCREG1 in BSpec.
6806 */
6807 POSTING_READ(GEN8_L3SQCREG1);
6808 udelay(1);
6809 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
6810 7119
6811 /* 7120 /*
6812 * WaGttCachingOffByDefault:bdw 7121 * WaGttCachingOffByDefault:bdw
@@ -6815,12 +7124,16 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
6815 */ 7124 */
6816 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL); 7125 I915_WRITE(HSW_GTT_CACHE_EN, GTT_CACHE_EN_ALL);
6817 7126
7127 /* WaKVMNotificationOnConfigChange:bdw */
7128 I915_WRITE(CHICKEN_PAR2_1, I915_READ(CHICKEN_PAR2_1)
7129 | KVM_CONFIG_CHANGE_NOTIFICATION_SELECT);
7130
6818 lpt_init_clock_gating(dev); 7131 lpt_init_clock_gating(dev);
6819} 7132}
6820 7133
6821static void haswell_init_clock_gating(struct drm_device *dev) 7134static void haswell_init_clock_gating(struct drm_device *dev)
6822{ 7135{
6823 struct drm_i915_private *dev_priv = dev->dev_private; 7136 struct drm_i915_private *dev_priv = to_i915(dev);
6824 7137
6825 ilk_init_lp_watermarks(dev); 7138 ilk_init_lp_watermarks(dev);
6826 7139
@@ -6876,7 +7189,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
6876 7189
6877static void ivybridge_init_clock_gating(struct drm_device *dev) 7190static void ivybridge_init_clock_gating(struct drm_device *dev)
6878{ 7191{
6879 struct drm_i915_private *dev_priv = dev->dev_private; 7192 struct drm_i915_private *dev_priv = to_i915(dev);
6880 uint32_t snpcr; 7193 uint32_t snpcr;
6881 7194
6882 ilk_init_lp_watermarks(dev); 7195 ilk_init_lp_watermarks(dev);
@@ -6974,7 +7287,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
6974 7287
6975static void valleyview_init_clock_gating(struct drm_device *dev) 7288static void valleyview_init_clock_gating(struct drm_device *dev)
6976{ 7289{
6977 struct drm_i915_private *dev_priv = dev->dev_private; 7290 struct drm_i915_private *dev_priv = to_i915(dev);
6978 7291
6979 /* WaDisableEarlyCull:vlv */ 7292 /* WaDisableEarlyCull:vlv */
6980 I915_WRITE(_3D_CHICKEN3, 7293 I915_WRITE(_3D_CHICKEN3,
@@ -7056,7 +7369,7 @@ static void valleyview_init_clock_gating(struct drm_device *dev)
7056 7369
7057static void cherryview_init_clock_gating(struct drm_device *dev) 7370static void cherryview_init_clock_gating(struct drm_device *dev)
7058{ 7371{
7059 struct drm_i915_private *dev_priv = dev->dev_private; 7372 struct drm_i915_private *dev_priv = to_i915(dev);
7060 7373
7061 /* WaVSRefCountFullforceMissDisable:chv */ 7374 /* WaVSRefCountFullforceMissDisable:chv */
7062 /* WaDSRefCountFullforceMissDisable:chv */ 7375 /* WaDSRefCountFullforceMissDisable:chv */
@@ -7077,6 +7390,13 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7077 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 7390 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
7078 7391
7079 /* 7392 /*
7393 * WaProgramL3SqcReg1Default:chv
7394 * See gfxspecs/Related Documents/Performance Guide/
7395 * LSQC Setting Recommendations.
7396 */
7397 gen8_set_l3sqc_credits(dev_priv, 38, 2);
7398
7399 /*
7080 * GTT cache may not work with big pages, so if those 7400 * GTT cache may not work with big pages, so if those
7081 * are ever enabled GTT cache may need to be disabled. 7401 * are ever enabled GTT cache may need to be disabled.
7082 */ 7402 */
@@ -7085,7 +7405,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
7085 7405
7086static void g4x_init_clock_gating(struct drm_device *dev) 7406static void g4x_init_clock_gating(struct drm_device *dev)
7087{ 7407{
7088 struct drm_i915_private *dev_priv = dev->dev_private; 7408 struct drm_i915_private *dev_priv = to_i915(dev);
7089 uint32_t dspclk_gate; 7409 uint32_t dspclk_gate;
7090 7410
7091 I915_WRITE(RENCLK_GATE_D1, 0); 7411 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -7112,7 +7432,7 @@ static void g4x_init_clock_gating(struct drm_device *dev)
7112 7432
7113static void crestline_init_clock_gating(struct drm_device *dev) 7433static void crestline_init_clock_gating(struct drm_device *dev)
7114{ 7434{
7115 struct drm_i915_private *dev_priv = dev->dev_private; 7435 struct drm_i915_private *dev_priv = to_i915(dev);
7116 7436
7117 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); 7437 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
7118 I915_WRITE(RENCLK_GATE_D2, 0); 7438 I915_WRITE(RENCLK_GATE_D2, 0);
@@ -7128,7 +7448,7 @@ static void crestline_init_clock_gating(struct drm_device *dev)
7128 7448
7129static void broadwater_init_clock_gating(struct drm_device *dev) 7449static void broadwater_init_clock_gating(struct drm_device *dev)
7130{ 7450{
7131 struct drm_i915_private *dev_priv = dev->dev_private; 7451 struct drm_i915_private *dev_priv = to_i915(dev);
7132 7452
7133 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | 7453 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
7134 I965_RCC_CLOCK_GATE_DISABLE | 7454 I965_RCC_CLOCK_GATE_DISABLE |
@@ -7145,7 +7465,7 @@ static void broadwater_init_clock_gating(struct drm_device *dev)
7145 7465
7146static void gen3_init_clock_gating(struct drm_device *dev) 7466static void gen3_init_clock_gating(struct drm_device *dev)
7147{ 7467{
7148 struct drm_i915_private *dev_priv = dev->dev_private; 7468 struct drm_i915_private *dev_priv = to_i915(dev);
7149 u32 dstate = I915_READ(D_STATE); 7469 u32 dstate = I915_READ(D_STATE);
7150 7470
7151 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | 7471 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
@@ -7170,7 +7490,7 @@ static void gen3_init_clock_gating(struct drm_device *dev)
7170 7490
7171static void i85x_init_clock_gating(struct drm_device *dev) 7491static void i85x_init_clock_gating(struct drm_device *dev)
7172{ 7492{
7173 struct drm_i915_private *dev_priv = dev->dev_private; 7493 struct drm_i915_private *dev_priv = to_i915(dev);
7174 7494
7175 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); 7495 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
7176 7496
@@ -7184,7 +7504,7 @@ static void i85x_init_clock_gating(struct drm_device *dev)
7184 7504
7185static void i830_init_clock_gating(struct drm_device *dev) 7505static void i830_init_clock_gating(struct drm_device *dev)
7186{ 7506{
7187 struct drm_i915_private *dev_priv = dev->dev_private; 7507 struct drm_i915_private *dev_priv = to_i915(dev);
7188 7508
7189 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); 7509 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
7190 7510
@@ -7195,7 +7515,7 @@ static void i830_init_clock_gating(struct drm_device *dev)
7195 7515
7196void intel_init_clock_gating(struct drm_device *dev) 7516void intel_init_clock_gating(struct drm_device *dev)
7197{ 7517{
7198 struct drm_i915_private *dev_priv = dev->dev_private; 7518 struct drm_i915_private *dev_priv = to_i915(dev);
7199 7519
7200 dev_priv->display.init_clock_gating(dev); 7520 dev_priv->display.init_clock_gating(dev);
7201} 7521}
@@ -7263,7 +7583,7 @@ void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
7263/* Set up chip specific power management-related functions */ 7583/* Set up chip specific power management-related functions */
7264void intel_init_pm(struct drm_device *dev) 7584void intel_init_pm(struct drm_device *dev)
7265{ 7585{
7266 struct drm_i915_private *dev_priv = dev->dev_private; 7586 struct drm_i915_private *dev_priv = to_i915(dev);
7267 7587
7268 intel_fbc_init(dev_priv); 7588 intel_fbc_init(dev_priv);
7269 7589
@@ -7277,6 +7597,7 @@ void intel_init_pm(struct drm_device *dev)
7277 if (INTEL_INFO(dev)->gen >= 9) { 7597 if (INTEL_INFO(dev)->gen >= 9) {
7278 skl_setup_wm_latency(dev); 7598 skl_setup_wm_latency(dev);
7279 dev_priv->display.update_wm = skl_update_wm; 7599 dev_priv->display.update_wm = skl_update_wm;
7600 dev_priv->display.compute_global_watermarks = skl_compute_wm;
7280 } else if (HAS_PCH_SPLIT(dev)) { 7601 } else if (HAS_PCH_SPLIT(dev)) {
7281 ilk_setup_wm_latency(dev); 7602 ilk_setup_wm_latency(dev);
7282 7603
@@ -7340,46 +7661,59 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
7340{ 7661{
7341 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7662 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7342 7663
7343 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7664 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7665 * use te fw I915_READ variants to reduce the amount of work
7666 * required when reading/writing.
7667 */
7668
7669 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7344 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n"); 7670 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed\n");
7345 return -EAGAIN; 7671 return -EAGAIN;
7346 } 7672 }
7347 7673
7348 I915_WRITE(GEN6_PCODE_DATA, *val); 7674 I915_WRITE_FW(GEN6_PCODE_DATA, *val);
7349 I915_WRITE(GEN6_PCODE_DATA1, 0); 7675 I915_WRITE_FW(GEN6_PCODE_DATA1, 0);
7350 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7676 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7351 7677
7352 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7678 if (intel_wait_for_register_fw(dev_priv,
7353 500)) { 7679 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7680 500)) {
7354 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox); 7681 DRM_ERROR("timeout waiting for pcode read (%d) to finish\n", mbox);
7355 return -ETIMEDOUT; 7682 return -ETIMEDOUT;
7356 } 7683 }
7357 7684
7358 *val = I915_READ(GEN6_PCODE_DATA); 7685 *val = I915_READ_FW(GEN6_PCODE_DATA);
7359 I915_WRITE(GEN6_PCODE_DATA, 0); 7686 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7360 7687
7361 return 0; 7688 return 0;
7362} 7689}
7363 7690
7364int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val) 7691int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7692 u32 mbox, u32 val)
7365{ 7693{
7366 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7694 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7367 7695
7368 if (I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) { 7696 /* GEN6_PCODE_* are outside of the forcewake domain, we can
7697 * use te fw I915_READ variants to reduce the amount of work
7698 * required when reading/writing.
7699 */
7700
7701 if (I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) {
7369 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n"); 7702 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed\n");
7370 return -EAGAIN; 7703 return -EAGAIN;
7371 } 7704 }
7372 7705
7373 I915_WRITE(GEN6_PCODE_DATA, val); 7706 I915_WRITE_FW(GEN6_PCODE_DATA, val);
7374 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7707 I915_WRITE_FW(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7375 7708
7376 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7709 if (intel_wait_for_register_fw(dev_priv,
7377 500)) { 7710 GEN6_PCODE_MAILBOX, GEN6_PCODE_READY, 0,
7711 500)) {
7378 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox); 7712 DRM_ERROR("timeout waiting for pcode write (%d) to finish\n", mbox);
7379 return -ETIMEDOUT; 7713 return -ETIMEDOUT;
7380 } 7714 }
7381 7715
7382 I915_WRITE(GEN6_PCODE_DATA, 0); 7716 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7383 7717
7384 return 0; 7718 return 0;
7385} 7719}
@@ -7449,23 +7783,21 @@ static void __intel_rps_boost_work(struct work_struct *work)
7449 struct request_boost *boost = container_of(work, struct request_boost, work); 7783 struct request_boost *boost = container_of(work, struct request_boost, work);
7450 struct drm_i915_gem_request *req = boost->req; 7784 struct drm_i915_gem_request *req = boost->req;
7451 7785
7452 if (!i915_gem_request_completed(req, true)) 7786 if (!i915_gem_request_completed(req))
7453 gen6_rps_boost(to_i915(req->engine->dev), NULL, 7787 gen6_rps_boost(req->i915, NULL, req->emitted_jiffies);
7454 req->emitted_jiffies);
7455 7788
7456 i915_gem_request_unreference__unlocked(req); 7789 i915_gem_request_unreference(req);
7457 kfree(boost); 7790 kfree(boost);
7458} 7791}
7459 7792
7460void intel_queue_rps_boost_for_request(struct drm_device *dev, 7793void intel_queue_rps_boost_for_request(struct drm_i915_gem_request *req)
7461 struct drm_i915_gem_request *req)
7462{ 7794{
7463 struct request_boost *boost; 7795 struct request_boost *boost;
7464 7796
7465 if (req == NULL || INTEL_INFO(dev)->gen < 6) 7797 if (req == NULL || INTEL_GEN(req->i915) < 6)
7466 return; 7798 return;
7467 7799
7468 if (i915_gem_request_completed(req, true)) 7800 if (i915_gem_request_completed(req))
7469 return; 7801 return;
7470 7802
7471 boost = kmalloc(sizeof(*boost), GFP_ATOMIC); 7803 boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
@@ -7476,12 +7808,12 @@ void intel_queue_rps_boost_for_request(struct drm_device *dev,
7476 boost->req = req; 7808 boost->req = req;
7477 7809
7478 INIT_WORK(&boost->work, __intel_rps_boost_work); 7810 INIT_WORK(&boost->work, __intel_rps_boost_work);
7479 queue_work(to_i915(dev)->wq, &boost->work); 7811 queue_work(req->i915->wq, &boost->work);
7480} 7812}
7481 7813
7482void intel_pm_setup(struct drm_device *dev) 7814void intel_pm_setup(struct drm_device *dev)
7483{ 7815{
7484 struct drm_i915_private *dev_priv = dev->dev_private; 7816 struct drm_i915_private *dev_priv = to_i915(dev);
7485 7817
7486 mutex_init(&dev_priv->rps.hw_lock); 7818 mutex_init(&dev_priv->rps.hw_lock);
7487 spin_lock_init(&dev_priv->rps.client_lock); 7819 spin_lock_init(&dev_priv->rps.client_lock);
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index a788d1e9589b..68bd0bb34817 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -63,7 +63,7 @@ static bool is_edp_psr(struct intel_dp *intel_dp)
63 63
64static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe) 64static bool vlv_is_psr_active_on_pipe(struct drm_device *dev, int pipe)
65{ 65{
66 struct drm_i915_private *dev_priv = dev->dev_private; 66 struct drm_i915_private *dev_priv = to_i915(dev);
67 uint32_t val; 67 uint32_t val;
68 68
69 val = I915_READ(VLV_PSRSTAT(pipe)) & 69 val = I915_READ(VLV_PSRSTAT(pipe)) &
@@ -77,7 +77,7 @@ static void intel_psr_write_vsc(struct intel_dp *intel_dp,
77{ 77{
78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 78 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
79 struct drm_device *dev = dig_port->base.base.dev; 79 struct drm_device *dev = dig_port->base.base.dev;
80 struct drm_i915_private *dev_priv = dev->dev_private; 80 struct drm_i915_private *dev_priv = to_i915(dev);
81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc); 81 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder; 82 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder); 83 i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
@@ -107,7 +107,7 @@ static void vlv_psr_setup_vsc(struct intel_dp *intel_dp)
107{ 107{
108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 108 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
109 struct drm_device *dev = intel_dig_port->base.base.dev; 109 struct drm_device *dev = intel_dig_port->base.base.dev;
110 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_i915_private *dev_priv = to_i915(dev);
111 struct drm_crtc *crtc = intel_dig_port->base.base.crtc; 111 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
112 enum pipe pipe = to_intel_crtc(crtc)->pipe; 112 enum pipe pipe = to_intel_crtc(crtc)->pipe;
113 uint32_t val; 113 uint32_t val;
@@ -173,10 +173,9 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
173{ 173{
174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 174 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
175 struct drm_device *dev = dig_port->base.base.dev; 175 struct drm_device *dev = dig_port->base.base.dev;
176 struct drm_i915_private *dev_priv = dev->dev_private; 176 struct drm_i915_private *dev_priv = to_i915(dev);
177 uint32_t aux_clock_divider; 177 uint32_t aux_clock_divider;
178 i915_reg_t aux_ctl_reg; 178 i915_reg_t aux_ctl_reg;
179 int precharge = 0x3;
180 static const uint8_t aux_msg[] = { 179 static const uint8_t aux_msg[] = {
181 [0] = DP_AUX_NATIVE_WRITE << 4, 180 [0] = DP_AUX_NATIVE_WRITE << 4,
182 [1] = DP_SET_POWER >> 8, 181 [1] = DP_SET_POWER >> 8,
@@ -185,6 +184,7 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
185 [4] = DP_SET_POWER_D0, 184 [4] = DP_SET_POWER_D0,
186 }; 185 };
187 enum port port = dig_port->port; 186 enum port port = dig_port->port;
187 u32 aux_ctl;
188 int i; 188 int i;
189 189
190 BUILD_BUG_ON(sizeof(aux_msg) > 20); 190 BUILD_BUG_ON(sizeof(aux_msg) > 20);
@@ -197,6 +197,13 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF, 197 DP_SINK_DEVICE_AUX_FRAME_SYNC_CONF,
198 DP_AUX_FRAME_SYNC_ENABLE); 198 DP_AUX_FRAME_SYNC_ENABLE);
199 199
200 if (dev_priv->psr.link_standby)
201 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
202 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
203 else
204 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
205 DP_PSR_ENABLE);
206
200 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port); 207 aux_ctl_reg = psr_aux_ctl_reg(dev_priv, port);
201 208
202 /* Setup AUX registers */ 209 /* Setup AUX registers */
@@ -204,40 +211,16 @@ static void hsw_psr_enable_sink(struct intel_dp *intel_dp)
204 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2), 211 I915_WRITE(psr_aux_data_reg(dev_priv, port, i >> 2),
205 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); 212 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
206 213
207 if (INTEL_INFO(dev)->gen >= 9) { 214 aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, 0, sizeof(aux_msg),
208 uint32_t val; 215 aux_clock_divider);
209 216 I915_WRITE(aux_ctl_reg, aux_ctl);
210 val = I915_READ(aux_ctl_reg);
211 val &= ~DP_AUX_CH_CTL_TIME_OUT_MASK;
212 val |= DP_AUX_CH_CTL_TIME_OUT_1600us;
213 val &= ~DP_AUX_CH_CTL_MESSAGE_SIZE_MASK;
214 val |= (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
215 /* Use hardcoded data values for PSR, frame sync and GTC */
216 val &= ~DP_AUX_CH_CTL_PSR_DATA_AUX_REG_SKL;
217 val &= ~DP_AUX_CH_CTL_FS_DATA_AUX_REG_SKL;
218 val &= ~DP_AUX_CH_CTL_GTC_DATA_AUX_REG_SKL;
219 I915_WRITE(aux_ctl_reg, val);
220 } else {
221 I915_WRITE(aux_ctl_reg,
222 DP_AUX_CH_CTL_TIME_OUT_400us |
223 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
224 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
225 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
226 }
227
228 if (dev_priv->psr.link_standby)
229 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
230 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
231 else
232 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
233 DP_PSR_ENABLE);
234} 217}
235 218
236static void vlv_psr_enable_source(struct intel_dp *intel_dp) 219static void vlv_psr_enable_source(struct intel_dp *intel_dp)
237{ 220{
238 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 221 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
239 struct drm_device *dev = dig_port->base.base.dev; 222 struct drm_device *dev = dig_port->base.base.dev;
240 struct drm_i915_private *dev_priv = dev->dev_private; 223 struct drm_i915_private *dev_priv = to_i915(dev);
241 struct drm_crtc *crtc = dig_port->base.base.crtc; 224 struct drm_crtc *crtc = dig_port->base.base.crtc;
242 enum pipe pipe = to_intel_crtc(crtc)->pipe; 225 enum pipe pipe = to_intel_crtc(crtc)->pipe;
243 226
@@ -252,7 +235,7 @@ static void vlv_psr_activate(struct intel_dp *intel_dp)
252{ 235{
253 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 236 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
254 struct drm_device *dev = dig_port->base.base.dev; 237 struct drm_device *dev = dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private; 238 struct drm_i915_private *dev_priv = to_i915(dev);
256 struct drm_crtc *crtc = dig_port->base.base.crtc; 239 struct drm_crtc *crtc = dig_port->base.base.crtc;
257 enum pipe pipe = to_intel_crtc(crtc)->pipe; 240 enum pipe pipe = to_intel_crtc(crtc)->pipe;
258 241
@@ -269,17 +252,17 @@ static void hsw_psr_enable_source(struct intel_dp *intel_dp)
269{ 252{
270 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 253 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
271 struct drm_device *dev = dig_port->base.base.dev; 254 struct drm_device *dev = dig_port->base.base.dev;
272 struct drm_i915_private *dev_priv = dev->dev_private; 255 struct drm_i915_private *dev_priv = to_i915(dev);
273 256
274 uint32_t max_sleep_time = 0x1f; 257 uint32_t max_sleep_time = 0x1f;
275 /* 258 /* Lately it was identified that depending on panel idle frame count
276 * Let's respect VBT in case VBT asks a higher idle_frame value. 259 * calculated at HW can be off by 1. So let's use what came
277 * Let's use 6 as the minimum to cover all known cases including 260 * from VBT + 1.
278 * the off-by-one issue that HW has in some cases. Also there are 261 * There are also other cases where panel demands at least 4
279 * cases where sink should be able to train 262 * but VBT is not being set. To cover these 2 cases lets use
280 * with the 5 or 6 idle patterns. 263 * at least 5 when VBT isn't set to be on the safest side.
281 */ 264 */
282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 265 uint32_t idle_frames = dev_priv->vbt.psr.idle_frames + 1;
283 uint32_t val = EDP_PSR_ENABLE; 266 uint32_t val = EDP_PSR_ENABLE;
284 267
285 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 268 val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
@@ -341,7 +324,7 @@ static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
341{ 324{
342 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 325 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
343 struct drm_device *dev = dig_port->base.base.dev; 326 struct drm_device *dev = dig_port->base.base.dev;
344 struct drm_i915_private *dev_priv = dev->dev_private; 327 struct drm_i915_private *dev_priv = to_i915(dev);
345 struct drm_crtc *crtc = dig_port->base.base.crtc; 328 struct drm_crtc *crtc = dig_port->base.base.crtc;
346 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 329 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
347 330
@@ -395,7 +378,7 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
395{ 378{
396 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 379 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
397 struct drm_device *dev = intel_dig_port->base.base.dev; 380 struct drm_device *dev = intel_dig_port->base.base.dev;
398 struct drm_i915_private *dev_priv = dev->dev_private; 381 struct drm_i915_private *dev_priv = to_i915(dev);
399 382
400 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); 383 WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE);
401 WARN_ON(dev_priv->psr.active); 384 WARN_ON(dev_priv->psr.active);
@@ -424,7 +407,7 @@ void intel_psr_enable(struct intel_dp *intel_dp)
424{ 407{
425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 408 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
426 struct drm_device *dev = intel_dig_port->base.base.dev; 409 struct drm_device *dev = intel_dig_port->base.base.dev;
427 struct drm_i915_private *dev_priv = dev->dev_private; 410 struct drm_i915_private *dev_priv = to_i915(dev);
428 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc); 411 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
429 412
430 if (!HAS_PSR(dev)) { 413 if (!HAS_PSR(dev)) {
@@ -511,15 +494,18 @@ static void vlv_psr_disable(struct intel_dp *intel_dp)
511{ 494{
512 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
513 struct drm_device *dev = intel_dig_port->base.base.dev; 496 struct drm_device *dev = intel_dig_port->base.base.dev;
514 struct drm_i915_private *dev_priv = dev->dev_private; 497 struct drm_i915_private *dev_priv = to_i915(dev);
515 struct intel_crtc *intel_crtc = 498 struct intel_crtc *intel_crtc =
516 to_intel_crtc(intel_dig_port->base.base.crtc); 499 to_intel_crtc(intel_dig_port->base.base.crtc);
517 uint32_t val; 500 uint32_t val;
518 501
519 if (dev_priv->psr.active) { 502 if (dev_priv->psr.active) {
520 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */ 503 /* Put VLV PSR back to PSR_state 0 that is PSR Disabled. */
521 if (wait_for((I915_READ(VLV_PSRSTAT(intel_crtc->pipe)) & 504 if (intel_wait_for_register(dev_priv,
522 VLV_EDP_PSR_IN_TRANS) == 0, 1)) 505 VLV_PSRSTAT(intel_crtc->pipe),
506 VLV_EDP_PSR_IN_TRANS,
507 0,
508 1))
523 WARN(1, "PSR transition took longer than expected\n"); 509 WARN(1, "PSR transition took longer than expected\n");
524 510
525 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe)); 511 val = I915_READ(VLV_PSRCTL(intel_crtc->pipe));
@@ -538,16 +524,18 @@ static void hsw_psr_disable(struct intel_dp *intel_dp)
538{ 524{
539 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 525 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
540 struct drm_device *dev = intel_dig_port->base.base.dev; 526 struct drm_device *dev = intel_dig_port->base.base.dev;
541 struct drm_i915_private *dev_priv = dev->dev_private; 527 struct drm_i915_private *dev_priv = to_i915(dev);
542 528
543 if (dev_priv->psr.active) { 529 if (dev_priv->psr.active) {
544 I915_WRITE(EDP_PSR_CTL, 530 I915_WRITE(EDP_PSR_CTL,
545 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE); 531 I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
546 532
547 /* Wait till PSR is idle */ 533 /* Wait till PSR is idle */
548 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 534 if (intel_wait_for_register(dev_priv,
549 EDP_PSR_STATUS_STATE_MASK) == 0, 535 EDP_PSR_STATUS_CTL,
550 2 * USEC_PER_SEC, 10 * USEC_PER_MSEC)) 536 EDP_PSR_STATUS_STATE_MASK,
537 0,
538 2000))
551 DRM_ERROR("Timed out waiting for PSR Idle State\n"); 539 DRM_ERROR("Timed out waiting for PSR Idle State\n");
552 540
553 dev_priv->psr.active = false; 541 dev_priv->psr.active = false;
@@ -566,7 +554,7 @@ void intel_psr_disable(struct intel_dp *intel_dp)
566{ 554{
567 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
568 struct drm_device *dev = intel_dig_port->base.base.dev; 556 struct drm_device *dev = intel_dig_port->base.base.dev;
569 struct drm_i915_private *dev_priv = dev->dev_private; 557 struct drm_i915_private *dev_priv = to_i915(dev);
570 558
571 mutex_lock(&dev_priv->psr.lock); 559 mutex_lock(&dev_priv->psr.lock);
572 if (!dev_priv->psr.enabled) { 560 if (!dev_priv->psr.enabled) {
@@ -603,14 +591,20 @@ static void intel_psr_work(struct work_struct *work)
603 * and be ready for re-enable. 591 * and be ready for re-enable.
604 */ 592 */
605 if (HAS_DDI(dev_priv)) { 593 if (HAS_DDI(dev_priv)) {
606 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL) & 594 if (intel_wait_for_register(dev_priv,
607 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) { 595 EDP_PSR_STATUS_CTL,
596 EDP_PSR_STATUS_STATE_MASK,
597 0,
598 50)) {
608 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 599 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
609 return; 600 return;
610 } 601 }
611 } else { 602 } else {
612 if (wait_for((I915_READ(VLV_PSRSTAT(pipe)) & 603 if (intel_wait_for_register(dev_priv,
613 VLV_EDP_PSR_IN_TRANS) == 0, 1)) { 604 VLV_PSRSTAT(pipe),
605 VLV_EDP_PSR_IN_TRANS,
606 0,
607 1)) {
614 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n"); 608 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
615 return; 609 return;
616 } 610 }
@@ -636,7 +630,7 @@ unlock:
636 630
637static void intel_psr_exit(struct drm_device *dev) 631static void intel_psr_exit(struct drm_device *dev)
638{ 632{
639 struct drm_i915_private *dev_priv = dev->dev_private; 633 struct drm_i915_private *dev_priv = to_i915(dev);
640 struct intel_dp *intel_dp = dev_priv->psr.enabled; 634 struct intel_dp *intel_dp = dev_priv->psr.enabled;
641 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc; 635 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
642 enum pipe pipe = to_intel_crtc(crtc)->pipe; 636 enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -691,7 +685,7 @@ static void intel_psr_exit(struct drm_device *dev)
691void intel_psr_single_frame_update(struct drm_device *dev, 685void intel_psr_single_frame_update(struct drm_device *dev,
692 unsigned frontbuffer_bits) 686 unsigned frontbuffer_bits)
693{ 687{
694 struct drm_i915_private *dev_priv = dev->dev_private; 688 struct drm_i915_private *dev_priv = to_i915(dev);
695 struct drm_crtc *crtc; 689 struct drm_crtc *crtc;
696 enum pipe pipe; 690 enum pipe pipe;
697 u32 val; 691 u32 val;
@@ -739,7 +733,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
739void intel_psr_invalidate(struct drm_device *dev, 733void intel_psr_invalidate(struct drm_device *dev,
740 unsigned frontbuffer_bits) 734 unsigned frontbuffer_bits)
741{ 735{
742 struct drm_i915_private *dev_priv = dev->dev_private; 736 struct drm_i915_private *dev_priv = to_i915(dev);
743 struct drm_crtc *crtc; 737 struct drm_crtc *crtc;
744 enum pipe pipe; 738 enum pipe pipe;
745 739
@@ -777,7 +771,7 @@ void intel_psr_invalidate(struct drm_device *dev,
777void intel_psr_flush(struct drm_device *dev, 771void intel_psr_flush(struct drm_device *dev,
778 unsigned frontbuffer_bits, enum fb_op_origin origin) 772 unsigned frontbuffer_bits, enum fb_op_origin origin)
779{ 773{
780 struct drm_i915_private *dev_priv = dev->dev_private; 774 struct drm_i915_private *dev_priv = to_i915(dev);
781 struct drm_crtc *crtc; 775 struct drm_crtc *crtc;
782 enum pipe pipe; 776 enum pipe pipe;
783 777
@@ -813,7 +807,7 @@ void intel_psr_flush(struct drm_device *dev,
813 */ 807 */
814void intel_psr_init(struct drm_device *dev) 808void intel_psr_init(struct drm_device *dev)
815{ 809{
816 struct drm_i915_private *dev_priv = dev->dev_private; 810 struct drm_i915_private *dev_priv = to_i915(dev);
817 811
818 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? 812 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ?
819 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; 813 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 68c5af079ef8..cca7792f26d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -34,6 +34,11 @@
34#include "i915_trace.h" 34#include "i915_trace.h"
35#include "intel_drv.h" 35#include "intel_drv.h"
36 36
37/* Rough estimate of the typical request size, performing a flush,
38 * set-context and then emitting the batch.
39 */
40#define LEGACY_REQUEST_SIZE 200
41
37int __intel_ring_space(int head, int tail, int size) 42int __intel_ring_space(int head, int tail, int size)
38{ 43{
39 int space = head - tail; 44 int space = head - tail;
@@ -53,18 +58,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
53 ringbuf->tail, ringbuf->size); 58 ringbuf->tail, ringbuf->size);
54} 59}
55 60
56bool intel_engine_stopped(struct intel_engine_cs *engine)
57{
58 struct drm_i915_private *dev_priv = engine->dev->dev_private;
59 return dev_priv->gpu_error.stop_rings & intel_engine_flag(engine);
60}
61
62static void __intel_ring_advance(struct intel_engine_cs *engine) 61static void __intel_ring_advance(struct intel_engine_cs *engine)
63{ 62{
64 struct intel_ringbuffer *ringbuf = engine->buffer; 63 struct intel_ringbuffer *ringbuf = engine->buffer;
65 ringbuf->tail &= ringbuf->size - 1; 64 ringbuf->tail &= ringbuf->size - 1;
66 if (intel_engine_stopped(engine))
67 return;
68 engine->write_tail(engine, ringbuf->tail); 65 engine->write_tail(engine, ringbuf->tail);
69} 66}
70 67
@@ -101,7 +98,6 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
101 u32 flush_domains) 98 u32 flush_domains)
102{ 99{
103 struct intel_engine_cs *engine = req->engine; 100 struct intel_engine_cs *engine = req->engine;
104 struct drm_device *dev = engine->dev;
105 u32 cmd; 101 u32 cmd;
106 int ret; 102 int ret;
107 103
@@ -140,7 +136,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
140 cmd |= MI_EXE_FLUSH; 136 cmd |= MI_EXE_FLUSH;
141 137
142 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 138 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
143 (IS_G4X(dev) || IS_GEN5(dev))) 139 (IS_G4X(req->i915) || IS_GEN5(req->i915)))
144 cmd |= MI_INVALIDATE_ISP; 140 cmd |= MI_INVALIDATE_ISP;
145 141
146 ret = intel_ring_begin(req, 2); 142 ret = intel_ring_begin(req, 2);
@@ -426,19 +422,19 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
426static void ring_write_tail(struct intel_engine_cs *engine, 422static void ring_write_tail(struct intel_engine_cs *engine,
427 u32 value) 423 u32 value)
428{ 424{
429 struct drm_i915_private *dev_priv = engine->dev->dev_private; 425 struct drm_i915_private *dev_priv = engine->i915;
430 I915_WRITE_TAIL(engine, value); 426 I915_WRITE_TAIL(engine, value);
431} 427}
432 428
433u64 intel_ring_get_active_head(struct intel_engine_cs *engine) 429u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
434{ 430{
435 struct drm_i915_private *dev_priv = engine->dev->dev_private; 431 struct drm_i915_private *dev_priv = engine->i915;
436 u64 acthd; 432 u64 acthd;
437 433
438 if (INTEL_INFO(engine->dev)->gen >= 8) 434 if (INTEL_GEN(dev_priv) >= 8)
439 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base), 435 acthd = I915_READ64_2x32(RING_ACTHD(engine->mmio_base),
440 RING_ACTHD_UDW(engine->mmio_base)); 436 RING_ACTHD_UDW(engine->mmio_base));
441 else if (INTEL_INFO(engine->dev)->gen >= 4) 437 else if (INTEL_GEN(dev_priv) >= 4)
442 acthd = I915_READ(RING_ACTHD(engine->mmio_base)); 438 acthd = I915_READ(RING_ACTHD(engine->mmio_base));
443 else 439 else
444 acthd = I915_READ(ACTHD); 440 acthd = I915_READ(ACTHD);
@@ -448,25 +444,24 @@ u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
448 444
449static void ring_setup_phys_status_page(struct intel_engine_cs *engine) 445static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
450{ 446{
451 struct drm_i915_private *dev_priv = engine->dev->dev_private; 447 struct drm_i915_private *dev_priv = engine->i915;
452 u32 addr; 448 u32 addr;
453 449
454 addr = dev_priv->status_page_dmah->busaddr; 450 addr = dev_priv->status_page_dmah->busaddr;
455 if (INTEL_INFO(engine->dev)->gen >= 4) 451 if (INTEL_GEN(dev_priv) >= 4)
456 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0; 452 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
457 I915_WRITE(HWS_PGA, addr); 453 I915_WRITE(HWS_PGA, addr);
458} 454}
459 455
460static void intel_ring_setup_status_page(struct intel_engine_cs *engine) 456static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
461{ 457{
462 struct drm_device *dev = engine->dev; 458 struct drm_i915_private *dev_priv = engine->i915;
463 struct drm_i915_private *dev_priv = engine->dev->dev_private;
464 i915_reg_t mmio; 459 i915_reg_t mmio;
465 460
466 /* The ring status page addresses are no longer next to the rest of 461 /* The ring status page addresses are no longer next to the rest of
467 * the ring registers as of gen7. 462 * the ring registers as of gen7.
468 */ 463 */
469 if (IS_GEN7(dev)) { 464 if (IS_GEN7(dev_priv)) {
470 switch (engine->id) { 465 switch (engine->id) {
471 case RCS: 466 case RCS:
472 mmio = RENDER_HWS_PGA_GEN7; 467 mmio = RENDER_HWS_PGA_GEN7;
@@ -486,7 +481,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
486 mmio = VEBOX_HWS_PGA_GEN7; 481 mmio = VEBOX_HWS_PGA_GEN7;
487 break; 482 break;
488 } 483 }
489 } else if (IS_GEN6(engine->dev)) { 484 } else if (IS_GEN6(dev_priv)) {
490 mmio = RING_HWS_PGA_GEN6(engine->mmio_base); 485 mmio = RING_HWS_PGA_GEN6(engine->mmio_base);
491 } else { 486 } else {
492 /* XXX: gen8 returns to sanity */ 487 /* XXX: gen8 returns to sanity */
@@ -503,7 +498,7 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
503 * arises: do we still need this and if so how should we go about 498 * arises: do we still need this and if so how should we go about
504 * invalidating the TLB? 499 * invalidating the TLB?
505 */ 500 */
506 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { 501 if (IS_GEN(dev_priv, 6, 7)) {
507 i915_reg_t reg = RING_INSTPM(engine->mmio_base); 502 i915_reg_t reg = RING_INSTPM(engine->mmio_base);
508 503
509 /* ring should be idle before issuing a sync flush*/ 504 /* ring should be idle before issuing a sync flush*/
@@ -512,8 +507,9 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
512 I915_WRITE(reg, 507 I915_WRITE(reg,
513 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | 508 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
514 INSTPM_SYNC_FLUSH)); 509 INSTPM_SYNC_FLUSH));
515 if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, 510 if (intel_wait_for_register(dev_priv,
516 1000)) 511 reg, INSTPM_SYNC_FLUSH, 0,
512 1000))
517 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", 513 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
518 engine->name); 514 engine->name);
519 } 515 }
@@ -521,11 +517,15 @@ static void intel_ring_setup_status_page(struct intel_engine_cs *engine)
521 517
522static bool stop_ring(struct intel_engine_cs *engine) 518static bool stop_ring(struct intel_engine_cs *engine)
523{ 519{
524 struct drm_i915_private *dev_priv = to_i915(engine->dev); 520 struct drm_i915_private *dev_priv = engine->i915;
525 521
526 if (!IS_GEN2(engine->dev)) { 522 if (!IS_GEN2(dev_priv)) {
527 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING)); 523 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
528 if (wait_for((I915_READ_MODE(engine) & MODE_IDLE) != 0, 1000)) { 524 if (intel_wait_for_register(dev_priv,
525 RING_MI_MODE(engine->mmio_base),
526 MODE_IDLE,
527 MODE_IDLE,
528 1000)) {
529 DRM_ERROR("%s : timed out trying to stop ring\n", 529 DRM_ERROR("%s : timed out trying to stop ring\n",
530 engine->name); 530 engine->name);
531 /* Sometimes we observe that the idle flag is not 531 /* Sometimes we observe that the idle flag is not
@@ -541,7 +541,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
541 I915_WRITE_HEAD(engine, 0); 541 I915_WRITE_HEAD(engine, 0);
542 engine->write_tail(engine, 0); 542 engine->write_tail(engine, 0);
543 543
544 if (!IS_GEN2(engine->dev)) { 544 if (!IS_GEN2(dev_priv)) {
545 (void)I915_READ_CTL(engine); 545 (void)I915_READ_CTL(engine);
546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING)); 546 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
547 } 547 }
@@ -556,8 +556,7 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
556 556
557static int init_ring_common(struct intel_engine_cs *engine) 557static int init_ring_common(struct intel_engine_cs *engine)
558{ 558{
559 struct drm_device *dev = engine->dev; 559 struct drm_i915_private *dev_priv = engine->i915;
560 struct drm_i915_private *dev_priv = dev->dev_private;
561 struct intel_ringbuffer *ringbuf = engine->buffer; 560 struct intel_ringbuffer *ringbuf = engine->buffer;
562 struct drm_i915_gem_object *obj = ringbuf->obj; 561 struct drm_i915_gem_object *obj = ringbuf->obj;
563 int ret = 0; 562 int ret = 0;
@@ -587,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
587 } 586 }
588 } 587 }
589 588
590 if (I915_NEED_GFX_HWS(dev)) 589 if (I915_NEED_GFX_HWS(dev_priv))
591 intel_ring_setup_status_page(engine); 590 intel_ring_setup_status_page(engine);
592 else 591 else
593 ring_setup_phys_status_page(engine); 592 ring_setup_phys_status_page(engine);
@@ -641,59 +640,42 @@ out:
641 return ret; 640 return ret;
642} 641}
643 642
644void 643void intel_fini_pipe_control(struct intel_engine_cs *engine)
645intel_fini_pipe_control(struct intel_engine_cs *engine)
646{ 644{
647 struct drm_device *dev = engine->dev;
648
649 if (engine->scratch.obj == NULL) 645 if (engine->scratch.obj == NULL)
650 return; 646 return;
651 647
652 if (INTEL_INFO(dev)->gen >= 5) { 648 i915_gem_object_ggtt_unpin(engine->scratch.obj);
653 kunmap(sg_page(engine->scratch.obj->pages->sgl));
654 i915_gem_object_ggtt_unpin(engine->scratch.obj);
655 }
656
657 drm_gem_object_unreference(&engine->scratch.obj->base); 649 drm_gem_object_unreference(&engine->scratch.obj->base);
658 engine->scratch.obj = NULL; 650 engine->scratch.obj = NULL;
659} 651}
660 652
661int 653int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
662intel_init_pipe_control(struct intel_engine_cs *engine)
663{ 654{
655 struct drm_i915_gem_object *obj;
664 int ret; 656 int ret;
665 657
666 WARN_ON(engine->scratch.obj); 658 WARN_ON(engine->scratch.obj);
667 659
668 engine->scratch.obj = i915_gem_alloc_object(engine->dev, 4096); 660 obj = i915_gem_object_create_stolen(&engine->i915->drm, size);
669 if (engine->scratch.obj == NULL) { 661 if (!obj)
670 DRM_ERROR("Failed to allocate seqno page\n"); 662 obj = i915_gem_object_create(&engine->i915->drm, size);
671 ret = -ENOMEM; 663 if (IS_ERR(obj)) {
664 DRM_ERROR("Failed to allocate scratch page\n");
665 ret = PTR_ERR(obj);
672 goto err; 666 goto err;
673 } 667 }
674 668
675 ret = i915_gem_object_set_cache_level(engine->scratch.obj, 669 ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
676 I915_CACHE_LLC);
677 if (ret)
678 goto err_unref;
679
680 ret = i915_gem_obj_ggtt_pin(engine->scratch.obj, 4096, 0);
681 if (ret) 670 if (ret)
682 goto err_unref; 671 goto err_unref;
683 672
684 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(engine->scratch.obj); 673 engine->scratch.obj = obj;
685 engine->scratch.cpu_page = kmap(sg_page(engine->scratch.obj->pages->sgl)); 674 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
686 if (engine->scratch.cpu_page == NULL) {
687 ret = -ENOMEM;
688 goto err_unpin;
689 }
690
691 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 675 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
692 engine->name, engine->scratch.gtt_offset); 676 engine->name, engine->scratch.gtt_offset);
693 return 0; 677 return 0;
694 678
695err_unpin:
696 i915_gem_object_ggtt_unpin(engine->scratch.obj);
697err_unref: 679err_unref:
698 drm_gem_object_unreference(&engine->scratch.obj->base); 680 drm_gem_object_unreference(&engine->scratch.obj->base);
699err: 681err:
@@ -702,11 +684,9 @@ err:
702 684
703static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req) 685static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
704{ 686{
705 int ret, i;
706 struct intel_engine_cs *engine = req->engine; 687 struct intel_engine_cs *engine = req->engine;
707 struct drm_device *dev = engine->dev; 688 struct i915_workarounds *w = &req->i915->workarounds;
708 struct drm_i915_private *dev_priv = dev->dev_private; 689 int ret, i;
709 struct i915_workarounds *w = &dev_priv->workarounds;
710 690
711 if (w->count == 0) 691 if (w->count == 0)
712 return 0; 692 return 0;
@@ -795,7 +775,7 @@ static int wa_add(struct drm_i915_private *dev_priv,
795static int wa_ring_whitelist_reg(struct intel_engine_cs *engine, 775static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
796 i915_reg_t reg) 776 i915_reg_t reg)
797{ 777{
798 struct drm_i915_private *dev_priv = engine->dev->dev_private; 778 struct drm_i915_private *dev_priv = engine->i915;
799 struct i915_workarounds *wa = &dev_priv->workarounds; 779 struct i915_workarounds *wa = &dev_priv->workarounds;
800 const uint32_t index = wa->hw_whitelist_count[engine->id]; 780 const uint32_t index = wa->hw_whitelist_count[engine->id];
801 781
@@ -811,8 +791,7 @@ static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
811 791
812static int gen8_init_workarounds(struct intel_engine_cs *engine) 792static int gen8_init_workarounds(struct intel_engine_cs *engine)
813{ 793{
814 struct drm_device *dev = engine->dev; 794 struct drm_i915_private *dev_priv = engine->i915;
815 struct drm_i915_private *dev_priv = dev->dev_private;
816 795
817 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 796 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
818 797
@@ -863,9 +842,8 @@ static int gen8_init_workarounds(struct intel_engine_cs *engine)
863 842
864static int bdw_init_workarounds(struct intel_engine_cs *engine) 843static int bdw_init_workarounds(struct intel_engine_cs *engine)
865{ 844{
845 struct drm_i915_private *dev_priv = engine->i915;
866 int ret; 846 int ret;
867 struct drm_device *dev = engine->dev;
868 struct drm_i915_private *dev_priv = dev->dev_private;
869 847
870 ret = gen8_init_workarounds(engine); 848 ret = gen8_init_workarounds(engine);
871 if (ret) 849 if (ret)
@@ -885,16 +863,15 @@ static int bdw_init_workarounds(struct intel_engine_cs *engine)
885 /* WaForceContextSaveRestoreNonCoherent:bdw */ 863 /* WaForceContextSaveRestoreNonCoherent:bdw */
886 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 864 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
887 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 865 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
888 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 866 (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
889 867
890 return 0; 868 return 0;
891} 869}
892 870
893static int chv_init_workarounds(struct intel_engine_cs *engine) 871static int chv_init_workarounds(struct intel_engine_cs *engine)
894{ 872{
873 struct drm_i915_private *dev_priv = engine->i915;
895 int ret; 874 int ret;
896 struct drm_device *dev = engine->dev;
897 struct drm_i915_private *dev_priv = dev->dev_private;
898 875
899 ret = gen8_init_workarounds(engine); 876 ret = gen8_init_workarounds(engine);
900 if (ret) 877 if (ret)
@@ -911,8 +888,7 @@ static int chv_init_workarounds(struct intel_engine_cs *engine)
911 888
912static int gen9_init_workarounds(struct intel_engine_cs *engine) 889static int gen9_init_workarounds(struct intel_engine_cs *engine)
913{ 890{
914 struct drm_device *dev = engine->dev; 891 struct drm_i915_private *dev_priv = engine->i915;
915 struct drm_i915_private *dev_priv = dev->dev_private;
916 int ret; 892 int ret;
917 893
918 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */ 894 /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
@@ -937,14 +913,14 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
937 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); 913 GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
938 914
939 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */ 915 /* WaDisableDgMirrorFixInHalfSliceChicken5:skl,bxt */
940 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 916 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
941 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 917 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
942 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, 918 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
943 GEN9_DG_MIRROR_FIX_ENABLE); 919 GEN9_DG_MIRROR_FIX_ENABLE);
944 920
945 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */ 921 /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:skl,bxt */
946 if (IS_SKL_REVID(dev, 0, SKL_REVID_B0) || 922 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_B0) ||
947 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 923 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
948 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, 924 WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
949 GEN9_RHWO_OPTIMIZATION_DISABLE); 925 GEN9_RHWO_OPTIMIZATION_DISABLE);
950 /* 926 /*
@@ -970,8 +946,8 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
970 GEN9_CCS_TLB_PREFETCH_ENABLE); 946 GEN9_CCS_TLB_PREFETCH_ENABLE);
971 947
972 /* WaDisableMaskBasedCammingInRCC:skl,bxt */ 948 /* WaDisableMaskBasedCammingInRCC:skl,bxt */
973 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_C0) || 949 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_C0) ||
974 IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 950 IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
975 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, 951 WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
976 PIXEL_MASK_CAMMING_DISABLE); 952 PIXEL_MASK_CAMMING_DISABLE);
977 953
@@ -1035,8 +1011,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
1035 1011
1036static int skl_tune_iz_hashing(struct intel_engine_cs *engine) 1012static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1037{ 1013{
1038 struct drm_device *dev = engine->dev; 1014 struct drm_i915_private *dev_priv = engine->i915;
1039 struct drm_i915_private *dev_priv = dev->dev_private;
1040 u8 vals[3] = { 0, 0, 0 }; 1015 u8 vals[3] = { 0, 0, 0 };
1041 unsigned int i; 1016 unsigned int i;
1042 1017
@@ -1077,9 +1052,8 @@ static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
1077 1052
1078static int skl_init_workarounds(struct intel_engine_cs *engine) 1053static int skl_init_workarounds(struct intel_engine_cs *engine)
1079{ 1054{
1055 struct drm_i915_private *dev_priv = engine->i915;
1080 int ret; 1056 int ret;
1081 struct drm_device *dev = engine->dev;
1082 struct drm_i915_private *dev_priv = dev->dev_private;
1083 1057
1084 ret = gen9_init_workarounds(engine); 1058 ret = gen9_init_workarounds(engine);
1085 if (ret) 1059 if (ret)
@@ -1090,12 +1064,12 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1090 * until D0 which is the default case so this is equivalent to 1064 * until D0 which is the default case so this is equivalent to
1091 * !WaDisablePerCtxtPreemptionGranularityControl:skl 1065 * !WaDisablePerCtxtPreemptionGranularityControl:skl
1092 */ 1066 */
1093 if (IS_SKL_REVID(dev, SKL_REVID_E0, REVID_FOREVER)) { 1067 if (IS_SKL_REVID(dev_priv, SKL_REVID_E0, REVID_FOREVER)) {
1094 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1, 1068 I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
1095 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL)); 1069 _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
1096 } 1070 }
1097 1071
1098 if (IS_SKL_REVID(dev, 0, SKL_REVID_D0)) { 1072 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0)) {
1099 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */ 1073 /* WaDisableChickenBitTSGBarrierAckForFFSliceCS:skl */
1100 I915_WRITE(FF_SLICE_CS_CHICKEN2, 1074 I915_WRITE(FF_SLICE_CS_CHICKEN2,
1101 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE)); 1075 _MASKED_BIT_ENABLE(GEN9_TSG_BARRIER_ACK_DISABLE));
@@ -1104,30 +1078,30 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1104 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes 1078 /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
1105 * involving this register should also be added to WA batch as required. 1079 * involving this register should also be added to WA batch as required.
1106 */ 1080 */
1107 if (IS_SKL_REVID(dev, 0, SKL_REVID_E0)) 1081 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0))
1108 /* WaDisableLSQCROPERFforOCL:skl */ 1082 /* WaDisableLSQCROPERFforOCL:skl */
1109 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) | 1083 I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
1110 GEN8_LQSC_RO_PERF_DIS); 1084 GEN8_LQSC_RO_PERF_DIS);
1111 1085
1112 /* WaEnableGapsTsvCreditFix:skl */ 1086 /* WaEnableGapsTsvCreditFix:skl */
1113 if (IS_SKL_REVID(dev, SKL_REVID_C0, REVID_FOREVER)) { 1087 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, REVID_FOREVER)) {
1114 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) | 1088 I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
1115 GEN9_GAPS_TSV_CREDIT_DISABLE)); 1089 GEN9_GAPS_TSV_CREDIT_DISABLE));
1116 } 1090 }
1117 1091
1118 /* WaDisablePowerCompilerClockGating:skl */ 1092 /* WaDisablePowerCompilerClockGating:skl */
1119 if (IS_SKL_REVID(dev, SKL_REVID_B0, SKL_REVID_B0)) 1093 if (IS_SKL_REVID(dev_priv, SKL_REVID_B0, SKL_REVID_B0))
1120 WA_SET_BIT_MASKED(HIZ_CHICKEN, 1094 WA_SET_BIT_MASKED(HIZ_CHICKEN,
1121 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); 1095 BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
1122 1096
1123 /* WaBarrierPerformanceFixDisable:skl */ 1097 /* WaBarrierPerformanceFixDisable:skl */
1124 if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) 1098 if (IS_SKL_REVID(dev_priv, SKL_REVID_C0, SKL_REVID_D0))
1125 WA_SET_BIT_MASKED(HDC_CHICKEN0, 1099 WA_SET_BIT_MASKED(HDC_CHICKEN0,
1126 HDC_FENCE_DEST_SLM_DISABLE | 1100 HDC_FENCE_DEST_SLM_DISABLE |
1127 HDC_BARRIER_PERFORMANCE_DISABLE); 1101 HDC_BARRIER_PERFORMANCE_DISABLE);
1128 1102
1129 /* WaDisableSbeCacheDispatchPortSharing:skl */ 1103 /* WaDisableSbeCacheDispatchPortSharing:skl */
1130 if (IS_SKL_REVID(dev, 0, SKL_REVID_F0)) 1104 if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0))
1131 WA_SET_BIT_MASKED( 1105 WA_SET_BIT_MASKED(
1132 GEN7_HALF_SLICE_CHICKEN1, 1106 GEN7_HALF_SLICE_CHICKEN1,
1133 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1107 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1135,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1135 /* WaDisableGafsUnitClkGating:skl */ 1109 /* WaDisableGafsUnitClkGating:skl */
1136 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); 1110 WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1137 1111
1112 /* WaInPlaceDecompressionHang:skl */
1113 if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
1114 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1115 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1116
1138 /* WaDisableLSQCROPERFforOCL:skl */ 1117 /* WaDisableLSQCROPERFforOCL:skl */
1139 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1118 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1140 if (ret) 1119 if (ret)
@@ -1145,9 +1124,8 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
1145 1124
1146static int bxt_init_workarounds(struct intel_engine_cs *engine) 1125static int bxt_init_workarounds(struct intel_engine_cs *engine)
1147{ 1126{
1127 struct drm_i915_private *dev_priv = engine->i915;
1148 int ret; 1128 int ret;
1149 struct drm_device *dev = engine->dev;
1150 struct drm_i915_private *dev_priv = dev->dev_private;
1151 1129
1152 ret = gen9_init_workarounds(engine); 1130 ret = gen9_init_workarounds(engine);
1153 if (ret) 1131 if (ret)
@@ -1155,11 +1133,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1155 1133
1156 /* WaStoreMultiplePTEenable:bxt */ 1134 /* WaStoreMultiplePTEenable:bxt */
1157 /* This is a requirement according to Hardware specification */ 1135 /* This is a requirement according to Hardware specification */
1158 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) 1136 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
1159 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 1137 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
1160 1138
1161 /* WaSetClckGatingDisableMedia:bxt */ 1139 /* WaSetClckGatingDisableMedia:bxt */
1162 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1140 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1163 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & 1141 I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
1164 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); 1142 ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
1165 } 1143 }
@@ -1168,8 +1146,14 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1168 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 1146 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
1169 STALL_DOP_GATING_DISABLE); 1147 STALL_DOP_GATING_DISABLE);
1170 1148
1149 /* WaDisablePooledEuLoadBalancingFix:bxt */
1150 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
1151 WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
1152 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
1153 }
1154
1171 /* WaDisableSbeCacheDispatchPortSharing:bxt */ 1155 /* WaDisableSbeCacheDispatchPortSharing:bxt */
1172 if (IS_BXT_REVID(dev, 0, BXT_REVID_B0)) { 1156 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
1173 WA_SET_BIT_MASKED( 1157 WA_SET_BIT_MASKED(
1174 GEN7_HALF_SLICE_CHICKEN1, 1158 GEN7_HALF_SLICE_CHICKEN1,
1175 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1159 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
@@ -1179,7 +1163,7 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1179 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ 1163 /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
1180 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ 1164 /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
1181 /* WaDisableLSQCROPERFforOCL:bxt */ 1165 /* WaDisableLSQCROPERFforOCL:bxt */
1182 if (IS_BXT_REVID(dev, 0, BXT_REVID_A1)) { 1166 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
1183 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); 1167 ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
1184 if (ret) 1168 if (ret)
1185 return ret; 1169 return ret;
@@ -1189,17 +1173,27 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
1189 return ret; 1173 return ret;
1190 } 1174 }
1191 1175
1176 /* WaProgramL3SqcReg1DefaultForPerf:bxt */
1177 if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
1178 I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
1179 L3_HIGH_PRIO_CREDITS(2));
1180
1192 /* WaInsertDummyPushConstPs:bxt */ 1181 /* WaInsertDummyPushConstPs:bxt */
1193 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) 1182 if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
1194 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, 1183 WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
1195 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); 1184 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
1196 1185
1186 /* WaInPlaceDecompressionHang:bxt */
1187 if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
1188 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1189 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1190
1197 return 0; 1191 return 0;
1198} 1192}
1199 1193
1200static int kbl_init_workarounds(struct intel_engine_cs *engine) 1194static int kbl_init_workarounds(struct intel_engine_cs *engine)
1201{ 1195{
1202 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1196 struct drm_i915_private *dev_priv = engine->i915;
1203 int ret; 1197 int ret;
1204 1198
1205 ret = gen9_init_workarounds(engine); 1199 ret = gen9_init_workarounds(engine);
@@ -1241,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1241 GEN7_HALF_SLICE_CHICKEN1, 1235 GEN7_HALF_SLICE_CHICKEN1,
1242 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); 1236 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
1243 1237
1238 /* WaInPlaceDecompressionHang:kbl */
1239 WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
1240 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1241
1244 /* WaDisableLSQCROPERFforOCL:kbl */ 1242 /* WaDisableLSQCROPERFforOCL:kbl */
1245 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); 1243 ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
1246 if (ret) 1244 if (ret)
@@ -1251,24 +1249,23 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
1251 1249
1252int init_workarounds_ring(struct intel_engine_cs *engine) 1250int init_workarounds_ring(struct intel_engine_cs *engine)
1253{ 1251{
1254 struct drm_device *dev = engine->dev; 1252 struct drm_i915_private *dev_priv = engine->i915;
1255 struct drm_i915_private *dev_priv = dev->dev_private;
1256 1253
1257 WARN_ON(engine->id != RCS); 1254 WARN_ON(engine->id != RCS);
1258 1255
1259 dev_priv->workarounds.count = 0; 1256 dev_priv->workarounds.count = 0;
1260 dev_priv->workarounds.hw_whitelist_count[RCS] = 0; 1257 dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
1261 1258
1262 if (IS_BROADWELL(dev)) 1259 if (IS_BROADWELL(dev_priv))
1263 return bdw_init_workarounds(engine); 1260 return bdw_init_workarounds(engine);
1264 1261
1265 if (IS_CHERRYVIEW(dev)) 1262 if (IS_CHERRYVIEW(dev_priv))
1266 return chv_init_workarounds(engine); 1263 return chv_init_workarounds(engine);
1267 1264
1268 if (IS_SKYLAKE(dev)) 1265 if (IS_SKYLAKE(dev_priv))
1269 return skl_init_workarounds(engine); 1266 return skl_init_workarounds(engine);
1270 1267
1271 if (IS_BROXTON(dev)) 1268 if (IS_BROXTON(dev_priv))
1272 return bxt_init_workarounds(engine); 1269 return bxt_init_workarounds(engine);
1273 1270
1274 if (IS_KABYLAKE(dev_priv)) 1271 if (IS_KABYLAKE(dev_priv))
@@ -1279,14 +1276,13 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
1279 1276
1280static int init_render_ring(struct intel_engine_cs *engine) 1277static int init_render_ring(struct intel_engine_cs *engine)
1281{ 1278{
1282 struct drm_device *dev = engine->dev; 1279 struct drm_i915_private *dev_priv = engine->i915;
1283 struct drm_i915_private *dev_priv = dev->dev_private;
1284 int ret = init_ring_common(engine); 1280 int ret = init_ring_common(engine);
1285 if (ret) 1281 if (ret)
1286 return ret; 1282 return ret;
1287 1283
1288 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ 1284 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
1289 if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) 1285 if (IS_GEN(dev_priv, 4, 6))
1290 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); 1286 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
1291 1287
1292 /* We need to disable the AsyncFlip performance optimisations in order 1288 /* We need to disable the AsyncFlip performance optimisations in order
@@ -1295,22 +1291,22 @@ static int init_render_ring(struct intel_engine_cs *engine)
1295 * 1291 *
1296 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv 1292 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
1297 */ 1293 */
1298 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1294 if (IS_GEN(dev_priv, 6, 7))
1299 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 1295 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1300 1296
1301 /* Required for the hardware to program scanline values for waiting */ 1297 /* Required for the hardware to program scanline values for waiting */
1302 /* WaEnableFlushTlbInvalidationMode:snb */ 1298 /* WaEnableFlushTlbInvalidationMode:snb */
1303 if (INTEL_INFO(dev)->gen == 6) 1299 if (IS_GEN6(dev_priv))
1304 I915_WRITE(GFX_MODE, 1300 I915_WRITE(GFX_MODE,
1305 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); 1301 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
1306 1302
1307 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ 1303 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
1308 if (IS_GEN7(dev)) 1304 if (IS_GEN7(dev_priv))
1309 I915_WRITE(GFX_MODE_GEN7, 1305 I915_WRITE(GFX_MODE_GEN7,
1310 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) | 1306 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
1311 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE)); 1307 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
1312 1308
1313 if (IS_GEN6(dev)) { 1309 if (IS_GEN6(dev_priv)) {
1314 /* From the Sandybridge PRM, volume 1 part 3, page 24: 1310 /* From the Sandybridge PRM, volume 1 part 3, page 24:
1315 * "If this bit is set, STCunit will have LRA as replacement 1311 * "If this bit is set, STCunit will have LRA as replacement
1316 * policy. [...] This bit must be reset. LRA replacement 1312 * policy. [...] This bit must be reset. LRA replacement
@@ -1320,19 +1316,18 @@ static int init_render_ring(struct intel_engine_cs *engine)
1320 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); 1316 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
1321 } 1317 }
1322 1318
1323 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) 1319 if (IS_GEN(dev_priv, 6, 7))
1324 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1320 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1325 1321
1326 if (HAS_L3_DPF(dev)) 1322 if (INTEL_INFO(dev_priv)->gen >= 6)
1327 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev)); 1323 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1328 1324
1329 return init_workarounds_ring(engine); 1325 return init_workarounds_ring(engine);
1330} 1326}
1331 1327
1332static void render_ring_cleanup(struct intel_engine_cs *engine) 1328static void render_ring_cleanup(struct intel_engine_cs *engine)
1333{ 1329{
1334 struct drm_device *dev = engine->dev; 1330 struct drm_i915_private *dev_priv = engine->i915;
1335 struct drm_i915_private *dev_priv = dev->dev_private;
1336 1331
1337 if (dev_priv->semaphore_obj) { 1332 if (dev_priv->semaphore_obj) {
1338 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj); 1333 i915_gem_object_ggtt_unpin(dev_priv->semaphore_obj);
@@ -1348,13 +1343,12 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1348{ 1343{
1349#define MBOX_UPDATE_DWORDS 8 1344#define MBOX_UPDATE_DWORDS 8
1350 struct intel_engine_cs *signaller = signaller_req->engine; 1345 struct intel_engine_cs *signaller = signaller_req->engine;
1351 struct drm_device *dev = signaller->dev; 1346 struct drm_i915_private *dev_priv = signaller_req->i915;
1352 struct drm_i915_private *dev_priv = dev->dev_private;
1353 struct intel_engine_cs *waiter; 1347 struct intel_engine_cs *waiter;
1354 enum intel_engine_id id; 1348 enum intel_engine_id id;
1355 int ret, num_rings; 1349 int ret, num_rings;
1356 1350
1357 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1351 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1358 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1352 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1359#undef MBOX_UPDATE_DWORDS 1353#undef MBOX_UPDATE_DWORDS
1360 1354
@@ -1363,19 +1357,17 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
1363 return ret; 1357 return ret;
1364 1358
1365 for_each_engine_id(waiter, dev_priv, id) { 1359 for_each_engine_id(waiter, dev_priv, id) {
1366 u32 seqno;
1367 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1360 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1368 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1361 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1369 continue; 1362 continue;
1370 1363
1371 seqno = i915_gem_request_get_seqno(signaller_req);
1372 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6)); 1364 intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
1373 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB | 1365 intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
1374 PIPE_CONTROL_QW_WRITE | 1366 PIPE_CONTROL_QW_WRITE |
1375 PIPE_CONTROL_FLUSH_ENABLE); 1367 PIPE_CONTROL_CS_STALL);
1376 intel_ring_emit(signaller, lower_32_bits(gtt_offset)); 1368 intel_ring_emit(signaller, lower_32_bits(gtt_offset));
1377 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1369 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1378 intel_ring_emit(signaller, seqno); 1370 intel_ring_emit(signaller, signaller_req->seqno);
1379 intel_ring_emit(signaller, 0); 1371 intel_ring_emit(signaller, 0);
1380 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1372 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1381 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1373 MI_SEMAPHORE_TARGET(waiter->hw_id));
@@ -1390,13 +1382,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1390{ 1382{
1391#define MBOX_UPDATE_DWORDS 6 1383#define MBOX_UPDATE_DWORDS 6
1392 struct intel_engine_cs *signaller = signaller_req->engine; 1384 struct intel_engine_cs *signaller = signaller_req->engine;
1393 struct drm_device *dev = signaller->dev; 1385 struct drm_i915_private *dev_priv = signaller_req->i915;
1394 struct drm_i915_private *dev_priv = dev->dev_private;
1395 struct intel_engine_cs *waiter; 1386 struct intel_engine_cs *waiter;
1396 enum intel_engine_id id; 1387 enum intel_engine_id id;
1397 int ret, num_rings; 1388 int ret, num_rings;
1398 1389
1399 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1390 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1400 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS; 1391 num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
1401#undef MBOX_UPDATE_DWORDS 1392#undef MBOX_UPDATE_DWORDS
1402 1393
@@ -1405,18 +1396,16 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
1405 return ret; 1396 return ret;
1406 1397
1407 for_each_engine_id(waiter, dev_priv, id) { 1398 for_each_engine_id(waiter, dev_priv, id) {
1408 u32 seqno;
1409 u64 gtt_offset = signaller->semaphore.signal_ggtt[id]; 1399 u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
1410 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID) 1400 if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
1411 continue; 1401 continue;
1412 1402
1413 seqno = i915_gem_request_get_seqno(signaller_req);
1414 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) | 1403 intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
1415 MI_FLUSH_DW_OP_STOREDW); 1404 MI_FLUSH_DW_OP_STOREDW);
1416 intel_ring_emit(signaller, lower_32_bits(gtt_offset) | 1405 intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
1417 MI_FLUSH_DW_USE_GTT); 1406 MI_FLUSH_DW_USE_GTT);
1418 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1407 intel_ring_emit(signaller, upper_32_bits(gtt_offset));
1419 intel_ring_emit(signaller, seqno); 1408 intel_ring_emit(signaller, signaller_req->seqno);
1420 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1409 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
1421 MI_SEMAPHORE_TARGET(waiter->hw_id)); 1410 MI_SEMAPHORE_TARGET(waiter->hw_id));
1422 intel_ring_emit(signaller, 0); 1411 intel_ring_emit(signaller, 0);
@@ -1429,14 +1418,13 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1429 unsigned int num_dwords) 1418 unsigned int num_dwords)
1430{ 1419{
1431 struct intel_engine_cs *signaller = signaller_req->engine; 1420 struct intel_engine_cs *signaller = signaller_req->engine;
1432 struct drm_device *dev = signaller->dev; 1421 struct drm_i915_private *dev_priv = signaller_req->i915;
1433 struct drm_i915_private *dev_priv = dev->dev_private;
1434 struct intel_engine_cs *useless; 1422 struct intel_engine_cs *useless;
1435 enum intel_engine_id id; 1423 enum intel_engine_id id;
1436 int ret, num_rings; 1424 int ret, num_rings;
1437 1425
1438#define MBOX_UPDATE_DWORDS 3 1426#define MBOX_UPDATE_DWORDS 3
1439 num_rings = hweight32(INTEL_INFO(dev)->ring_mask); 1427 num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
1440 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2); 1428 num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
1441#undef MBOX_UPDATE_DWORDS 1429#undef MBOX_UPDATE_DWORDS
1442 1430
@@ -1448,11 +1436,9 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
1448 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id]; 1436 i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
1449 1437
1450 if (i915_mmio_reg_valid(mbox_reg)) { 1438 if (i915_mmio_reg_valid(mbox_reg)) {
1451 u32 seqno = i915_gem_request_get_seqno(signaller_req);
1452
1453 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); 1439 intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
1454 intel_ring_emit_reg(signaller, mbox_reg); 1440 intel_ring_emit_reg(signaller, mbox_reg);
1455 intel_ring_emit(signaller, seqno); 1441 intel_ring_emit(signaller, signaller_req->seqno);
1456 } 1442 }
1457 } 1443 }
1458 1444
@@ -1488,17 +1474,45 @@ gen6_add_request(struct drm_i915_gem_request *req)
1488 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1474 intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
1489 intel_ring_emit(engine, 1475 intel_ring_emit(engine,
1490 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1476 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1477 intel_ring_emit(engine, req->seqno);
1478 intel_ring_emit(engine, MI_USER_INTERRUPT);
1479 __intel_ring_advance(engine);
1480
1481 return 0;
1482}
1483
1484static int
1485gen8_render_add_request(struct drm_i915_gem_request *req)
1486{
1487 struct intel_engine_cs *engine = req->engine;
1488 int ret;
1489
1490 if (engine->semaphore.signal)
1491 ret = engine->semaphore.signal(req, 8);
1492 else
1493 ret = intel_ring_begin(req, 8);
1494 if (ret)
1495 return ret;
1496
1497 intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
1498 intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
1499 PIPE_CONTROL_CS_STALL |
1500 PIPE_CONTROL_QW_WRITE));
1501 intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
1502 intel_ring_emit(engine, 0);
1491 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1503 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1504 /* We're thrashing one dword of HWS. */
1505 intel_ring_emit(engine, 0);
1492 intel_ring_emit(engine, MI_USER_INTERRUPT); 1506 intel_ring_emit(engine, MI_USER_INTERRUPT);
1507 intel_ring_emit(engine, MI_NOOP);
1493 __intel_ring_advance(engine); 1508 __intel_ring_advance(engine);
1494 1509
1495 return 0; 1510 return 0;
1496} 1511}
1497 1512
1498static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev, 1513static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
1499 u32 seqno) 1514 u32 seqno)
1500{ 1515{
1501 struct drm_i915_private *dev_priv = dev->dev_private;
1502 return dev_priv->last_seqno < seqno; 1516 return dev_priv->last_seqno < seqno;
1503} 1517}
1504 1518
@@ -1516,7 +1530,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1516 u32 seqno) 1530 u32 seqno)
1517{ 1531{
1518 struct intel_engine_cs *waiter = waiter_req->engine; 1532 struct intel_engine_cs *waiter = waiter_req->engine;
1519 struct drm_i915_private *dev_priv = waiter->dev->dev_private; 1533 struct drm_i915_private *dev_priv = waiter_req->i915;
1534 u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
1535 struct i915_hw_ppgtt *ppgtt;
1520 int ret; 1536 int ret;
1521 1537
1522 ret = intel_ring_begin(waiter_req, 4); 1538 ret = intel_ring_begin(waiter_req, 4);
@@ -1525,14 +1541,20 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
1525 1541
1526 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT | 1542 intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
1527 MI_SEMAPHORE_GLOBAL_GTT | 1543 MI_SEMAPHORE_GLOBAL_GTT |
1528 MI_SEMAPHORE_POLL |
1529 MI_SEMAPHORE_SAD_GTE_SDD); 1544 MI_SEMAPHORE_SAD_GTE_SDD);
1530 intel_ring_emit(waiter, seqno); 1545 intel_ring_emit(waiter, seqno);
1531 intel_ring_emit(waiter, 1546 intel_ring_emit(waiter, lower_32_bits(offset));
1532 lower_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id))); 1547 intel_ring_emit(waiter, upper_32_bits(offset));
1533 intel_ring_emit(waiter,
1534 upper_32_bits(GEN8_WAIT_OFFSET(waiter, signaller->id)));
1535 intel_ring_advance(waiter); 1548 intel_ring_advance(waiter);
1549
1550 /* When the !RCS engines idle waiting upon a semaphore, they lose their
1551 * pagetables and we must reload them before executing the batch.
1552 * We do this on the i915_switch_context() following the wait and
1553 * before the dispatch.
1554 */
1555 ppgtt = waiter_req->ctx->ppgtt;
1556 if (ppgtt && waiter_req->engine->id != RCS)
1557 ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
1536 return 0; 1558 return 0;
1537} 1559}
1538 1560
@@ -1561,7 +1583,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1561 return ret; 1583 return ret;
1562 1584
1563 /* If seqno wrap happened, omit the wait with no-ops */ 1585 /* If seqno wrap happened, omit the wait with no-ops */
1564 if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { 1586 if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
1565 intel_ring_emit(waiter, dw1 | wait_mbox); 1587 intel_ring_emit(waiter, dw1 | wait_mbox);
1566 intel_ring_emit(waiter, seqno); 1588 intel_ring_emit(waiter, seqno);
1567 intel_ring_emit(waiter, 0); 1589 intel_ring_emit(waiter, 0);
@@ -1577,72 +1599,28 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
1577 return 0; 1599 return 0;
1578} 1600}
1579 1601
1580#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 1602static void
1581do { \ 1603gen5_seqno_barrier(struct intel_engine_cs *ring)
1582 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
1583 PIPE_CONTROL_DEPTH_STALL); \
1584 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
1585 intel_ring_emit(ring__, 0); \
1586 intel_ring_emit(ring__, 0); \
1587} while (0)
1588
1589static int
1590pc_render_add_request(struct drm_i915_gem_request *req)
1591{ 1604{
1592 struct intel_engine_cs *engine = req->engine; 1605 /* MI_STORE are internally buffered by the GPU and not flushed
1593 u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1606 * either by MI_FLUSH or SyncFlush or any other combination of
1594 int ret; 1607 * MI commands.
1595
1596 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
1597 * incoherent with writes to memory, i.e. completely fubar,
1598 * so we need to use PIPE_NOTIFY instead.
1599 * 1608 *
1600 * However, we also need to workaround the qword write 1609 * "Only the submission of the store operation is guaranteed.
1601 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to 1610 * The write result will be complete (coherent) some time later
1602 * memory before requesting an interrupt. 1611 * (this is practically a finite period but there is no guaranteed
1612 * latency)."
1613 *
1614 * Empirically, we observe that we need a delay of at least 75us to
1615 * be sure that the seqno write is visible by the CPU.
1603 */ 1616 */
1604 ret = intel_ring_begin(req, 32); 1617 usleep_range(125, 250);
1605 if (ret)
1606 return ret;
1607
1608 intel_ring_emit(engine,
1609 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1610 PIPE_CONTROL_WRITE_FLUSH |
1611 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
1612 intel_ring_emit(engine,
1613 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1614 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1615 intel_ring_emit(engine, 0);
1616 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1617 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
1618 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1619 scratch_addr += 2 * CACHELINE_BYTES;
1620 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1621 scratch_addr += 2 * CACHELINE_BYTES;
1622 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1623 scratch_addr += 2 * CACHELINE_BYTES;
1624 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1625 scratch_addr += 2 * CACHELINE_BYTES;
1626 PIPE_CONTROL_FLUSH(engine, scratch_addr);
1627
1628 intel_ring_emit(engine,
1629 GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
1630 PIPE_CONTROL_WRITE_FLUSH |
1631 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1632 PIPE_CONTROL_NOTIFY);
1633 intel_ring_emit(engine,
1634 engine->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1635 intel_ring_emit(engine, i915_gem_request_get_seqno(req));
1636 intel_ring_emit(engine, 0);
1637 __intel_ring_advance(engine);
1638
1639 return 0;
1640} 1618}
1641 1619
1642static void 1620static void
1643gen6_seqno_barrier(struct intel_engine_cs *engine) 1621gen6_seqno_barrier(struct intel_engine_cs *engine)
1644{ 1622{
1645 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1623 struct drm_i915_private *dev_priv = engine->i915;
1646 1624
1647 /* Workaround to force correct ordering between irq and seqno writes on 1625 /* Workaround to force correct ordering between irq and seqno writes on
1648 * ivb (and maybe also on snb) by reading from a CS register (like 1626 * ivb (and maybe also on snb) by reading from a CS register (like
@@ -1664,133 +1642,54 @@ gen6_seqno_barrier(struct intel_engine_cs *engine)
1664 spin_unlock_irq(&dev_priv->uncore.lock); 1642 spin_unlock_irq(&dev_priv->uncore.lock);
1665} 1643}
1666 1644
1667static u32
1668ring_get_seqno(struct intel_engine_cs *engine)
1669{
1670 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
1671}
1672
1673static void 1645static void
1674ring_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1646gen5_irq_enable(struct intel_engine_cs *engine)
1675{
1676 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
1677}
1678
1679static u32
1680pc_render_get_seqno(struct intel_engine_cs *engine)
1681{ 1647{
1682 return engine->scratch.cpu_page[0]; 1648 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
1683} 1649}
1684 1650
1685static void 1651static void
1686pc_render_set_seqno(struct intel_engine_cs *engine, u32 seqno) 1652gen5_irq_disable(struct intel_engine_cs *engine)
1687{
1688 engine->scratch.cpu_page[0] = seqno;
1689}
1690
1691static bool
1692gen5_ring_get_irq(struct intel_engine_cs *engine)
1693{ 1653{
1694 struct drm_device *dev = engine->dev; 1654 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
1695 struct drm_i915_private *dev_priv = dev->dev_private;
1696 unsigned long flags;
1697
1698 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1699 return false;
1700
1701 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1702 if (engine->irq_refcount++ == 0)
1703 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1704 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1705
1706 return true;
1707} 1655}
1708 1656
1709static void 1657static void
1710gen5_ring_put_irq(struct intel_engine_cs *engine) 1658i9xx_irq_enable(struct intel_engine_cs *engine)
1711{ 1659{
1712 struct drm_device *dev = engine->dev; 1660 struct drm_i915_private *dev_priv = engine->i915;
1713 struct drm_i915_private *dev_priv = dev->dev_private;
1714 unsigned long flags;
1715
1716 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1717 if (--engine->irq_refcount == 0)
1718 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1719 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1720}
1721
1722static bool
1723i9xx_ring_get_irq(struct intel_engine_cs *engine)
1724{
1725 struct drm_device *dev = engine->dev;
1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 unsigned long flags;
1728
1729 if (!intel_irqs_enabled(dev_priv))
1730 return false;
1731
1732 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1733 if (engine->irq_refcount++ == 0) {
1734 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1735 I915_WRITE(IMR, dev_priv->irq_mask);
1736 POSTING_READ(IMR);
1737 }
1738 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1739 1661
1740 return true; 1662 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1663 I915_WRITE(IMR, dev_priv->irq_mask);
1664 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1741} 1665}
1742 1666
1743static void 1667static void
1744i9xx_ring_put_irq(struct intel_engine_cs *engine) 1668i9xx_irq_disable(struct intel_engine_cs *engine)
1745{ 1669{
1746 struct drm_device *dev = engine->dev; 1670 struct drm_i915_private *dev_priv = engine->i915;
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1748 unsigned long flags;
1749 1671
1750 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1672 dev_priv->irq_mask |= engine->irq_enable_mask;
1751 if (--engine->irq_refcount == 0) { 1673 I915_WRITE(IMR, dev_priv->irq_mask);
1752 dev_priv->irq_mask |= engine->irq_enable_mask;
1753 I915_WRITE(IMR, dev_priv->irq_mask);
1754 POSTING_READ(IMR);
1755 }
1756 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1757} 1674}
1758 1675
1759static bool 1676static void
1760i8xx_ring_get_irq(struct intel_engine_cs *engine) 1677i8xx_irq_enable(struct intel_engine_cs *engine)
1761{ 1678{
1762 struct drm_device *dev = engine->dev; 1679 struct drm_i915_private *dev_priv = engine->i915;
1763 struct drm_i915_private *dev_priv = dev->dev_private;
1764 unsigned long flags;
1765
1766 if (!intel_irqs_enabled(dev_priv))
1767 return false;
1768
1769 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1770 if (engine->irq_refcount++ == 0) {
1771 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1772 I915_WRITE16(IMR, dev_priv->irq_mask);
1773 POSTING_READ16(IMR);
1774 }
1775 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1776 1680
1777 return true; 1681 dev_priv->irq_mask &= ~engine->irq_enable_mask;
1682 I915_WRITE16(IMR, dev_priv->irq_mask);
1683 POSTING_READ16(RING_IMR(engine->mmio_base));
1778} 1684}
1779 1685
1780static void 1686static void
1781i8xx_ring_put_irq(struct intel_engine_cs *engine) 1687i8xx_irq_disable(struct intel_engine_cs *engine)
1782{ 1688{
1783 struct drm_device *dev = engine->dev; 1689 struct drm_i915_private *dev_priv = engine->i915;
1784 struct drm_i915_private *dev_priv = dev->dev_private;
1785 unsigned long flags;
1786 1690
1787 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1691 dev_priv->irq_mask |= engine->irq_enable_mask;
1788 if (--engine->irq_refcount == 0) { 1692 I915_WRITE16(IMR, dev_priv->irq_mask);
1789 dev_priv->irq_mask |= engine->irq_enable_mask;
1790 I915_WRITE16(IMR, dev_priv->irq_mask);
1791 POSTING_READ16(IMR);
1792 }
1793 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1794} 1693}
1795 1694
1796static int 1695static int
@@ -1824,135 +1723,68 @@ i9xx_add_request(struct drm_i915_gem_request *req)
1824 intel_ring_emit(engine, MI_STORE_DWORD_INDEX); 1723 intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
1825 intel_ring_emit(engine, 1724 intel_ring_emit(engine,
1826 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1725 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1827 intel_ring_emit(engine, i915_gem_request_get_seqno(req)); 1726 intel_ring_emit(engine, req->seqno);
1828 intel_ring_emit(engine, MI_USER_INTERRUPT); 1727 intel_ring_emit(engine, MI_USER_INTERRUPT);
1829 __intel_ring_advance(engine); 1728 __intel_ring_advance(engine);
1830 1729
1831 return 0; 1730 return 0;
1832} 1731}
1833 1732
1834static bool 1733static void
1835gen6_ring_get_irq(struct intel_engine_cs *engine) 1734gen6_irq_enable(struct intel_engine_cs *engine)
1836{ 1735{
1837 struct drm_device *dev = engine->dev; 1736 struct drm_i915_private *dev_priv = engine->i915;
1838 struct drm_i915_private *dev_priv = dev->dev_private;
1839 unsigned long flags;
1840
1841 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1842 return false;
1843
1844 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1845 if (engine->irq_refcount++ == 0) {
1846 if (HAS_L3_DPF(dev) && engine->id == RCS)
1847 I915_WRITE_IMR(engine,
1848 ~(engine->irq_enable_mask |
1849 GT_PARITY_ERROR(dev)));
1850 else
1851 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1852 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1853 }
1854 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1855 1737
1856 return true; 1738 I915_WRITE_IMR(engine,
1739 ~(engine->irq_enable_mask |
1740 engine->irq_keep_mask));
1741 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
1857} 1742}
1858 1743
1859static void 1744static void
1860gen6_ring_put_irq(struct intel_engine_cs *engine) 1745gen6_irq_disable(struct intel_engine_cs *engine)
1861{ 1746{
1862 struct drm_device *dev = engine->dev; 1747 struct drm_i915_private *dev_priv = engine->i915;
1863 struct drm_i915_private *dev_priv = dev->dev_private;
1864 unsigned long flags;
1865 1748
1866 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1749 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1867 if (--engine->irq_refcount == 0) { 1750 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1868 if (HAS_L3_DPF(dev) && engine->id == RCS)
1869 I915_WRITE_IMR(engine, ~GT_PARITY_ERROR(dev));
1870 else
1871 I915_WRITE_IMR(engine, ~0);
1872 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
1873 }
1874 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1875} 1751}
1876 1752
1877static bool 1753static void
1878hsw_vebox_get_irq(struct intel_engine_cs *engine) 1754hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1879{ 1755{
1880 struct drm_device *dev = engine->dev; 1756 struct drm_i915_private *dev_priv = engine->i915;
1881 struct drm_i915_private *dev_priv = dev->dev_private;
1882 unsigned long flags;
1883
1884 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1885 return false;
1886 1757
1887 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1758 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1888 if (engine->irq_refcount++ == 0) { 1759 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1889 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1890 gen6_enable_pm_irq(dev_priv, engine->irq_enable_mask);
1891 }
1892 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1893
1894 return true;
1895} 1760}
1896 1761
1897static void 1762static void
1898hsw_vebox_put_irq(struct intel_engine_cs *engine) 1763hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1899{ 1764{
1900 struct drm_device *dev = engine->dev; 1765 struct drm_i915_private *dev_priv = engine->i915;
1901 struct drm_i915_private *dev_priv = dev->dev_private;
1902 unsigned long flags;
1903 1766
1904 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1767 I915_WRITE_IMR(engine, ~0);
1905 if (--engine->irq_refcount == 0) { 1768 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1906 I915_WRITE_IMR(engine, ~0);
1907 gen6_disable_pm_irq(dev_priv, engine->irq_enable_mask);
1908 }
1909 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1910} 1769}
1911 1770
1912static bool 1771static void
1913gen8_ring_get_irq(struct intel_engine_cs *engine) 1772gen8_irq_enable(struct intel_engine_cs *engine)
1914{ 1773{
1915 struct drm_device *dev = engine->dev; 1774 struct drm_i915_private *dev_priv = engine->i915;
1916 struct drm_i915_private *dev_priv = dev->dev_private;
1917 unsigned long flags;
1918
1919 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1920 return false;
1921 1775
1922 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1776 I915_WRITE_IMR(engine,
1923 if (engine->irq_refcount++ == 0) { 1777 ~(engine->irq_enable_mask |
1924 if (HAS_L3_DPF(dev) && engine->id == RCS) { 1778 engine->irq_keep_mask));
1925 I915_WRITE_IMR(engine, 1779 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1926 ~(engine->irq_enable_mask |
1927 GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
1928 } else {
1929 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1930 }
1931 POSTING_READ(RING_IMR(engine->mmio_base));
1932 }
1933 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1934
1935 return true;
1936} 1780}
1937 1781
1938static void 1782static void
1939gen8_ring_put_irq(struct intel_engine_cs *engine) 1783gen8_irq_disable(struct intel_engine_cs *engine)
1940{ 1784{
1941 struct drm_device *dev = engine->dev; 1785 struct drm_i915_private *dev_priv = engine->i915;
1942 struct drm_i915_private *dev_priv = dev->dev_private;
1943 unsigned long flags;
1944 1786
1945 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1787 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1946 if (--engine->irq_refcount == 0) {
1947 if (HAS_L3_DPF(dev) && engine->id == RCS) {
1948 I915_WRITE_IMR(engine,
1949 ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
1950 } else {
1951 I915_WRITE_IMR(engine, ~0);
1952 }
1953 POSTING_READ(RING_IMR(engine->mmio_base));
1954 }
1955 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1956} 1788}
1957 1789
1958static int 1790static int
@@ -2066,12 +1898,12 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
2066 1898
2067static void cleanup_phys_status_page(struct intel_engine_cs *engine) 1899static void cleanup_phys_status_page(struct intel_engine_cs *engine)
2068{ 1900{
2069 struct drm_i915_private *dev_priv = to_i915(engine->dev); 1901 struct drm_i915_private *dev_priv = engine->i915;
2070 1902
2071 if (!dev_priv->status_page_dmah) 1903 if (!dev_priv->status_page_dmah)
2072 return; 1904 return;
2073 1905
2074 drm_pci_free(engine->dev, dev_priv->status_page_dmah); 1906 drm_pci_free(&dev_priv->drm, dev_priv->status_page_dmah);
2075 engine->status_page.page_addr = NULL; 1907 engine->status_page.page_addr = NULL;
2076} 1908}
2077 1909
@@ -2097,10 +1929,10 @@ static int init_status_page(struct intel_engine_cs *engine)
2097 unsigned flags; 1929 unsigned flags;
2098 int ret; 1930 int ret;
2099 1931
2100 obj = i915_gem_alloc_object(engine->dev, 4096); 1932 obj = i915_gem_object_create(&engine->i915->drm, 4096);
2101 if (obj == NULL) { 1933 if (IS_ERR(obj)) {
2102 DRM_ERROR("Failed to allocate status page\n"); 1934 DRM_ERROR("Failed to allocate status page\n");
2103 return -ENOMEM; 1935 return PTR_ERR(obj);
2104 } 1936 }
2105 1937
2106 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 1938 ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
@@ -2108,7 +1940,7 @@ static int init_status_page(struct intel_engine_cs *engine)
2108 goto err_unref; 1940 goto err_unref;
2109 1941
2110 flags = 0; 1942 flags = 0;
2111 if (!HAS_LLC(engine->dev)) 1943 if (!HAS_LLC(engine->i915))
2112 /* On g33, we cannot place HWS above 256MiB, so 1944 /* On g33, we cannot place HWS above 256MiB, so
2113 * restrict its pinning to the low mappable arena. 1945 * restrict its pinning to the low mappable arena.
2114 * Though this restriction is not documented for 1946 * Though this restriction is not documented for
@@ -2142,11 +1974,11 @@ err_unref:
2142 1974
2143static int init_phys_status_page(struct intel_engine_cs *engine) 1975static int init_phys_status_page(struct intel_engine_cs *engine)
2144{ 1976{
2145 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1977 struct drm_i915_private *dev_priv = engine->i915;
2146 1978
2147 if (!dev_priv->status_page_dmah) { 1979 if (!dev_priv->status_page_dmah) {
2148 dev_priv->status_page_dmah = 1980 dev_priv->status_page_dmah =
2149 drm_pci_alloc(engine->dev, PAGE_SIZE, PAGE_SIZE); 1981 drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
2150 if (!dev_priv->status_page_dmah) 1982 if (!dev_priv->status_page_dmah)
2151 return -ENOMEM; 1983 return -ENOMEM;
2152 } 1984 }
@@ -2159,20 +1991,22 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
2159 1991
2160void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1992void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
2161{ 1993{
1994 GEM_BUG_ON(ringbuf->vma == NULL);
1995 GEM_BUG_ON(ringbuf->virtual_start == NULL);
1996
2162 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen) 1997 if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
2163 i915_gem_object_unpin_map(ringbuf->obj); 1998 i915_gem_object_unpin_map(ringbuf->obj);
2164 else 1999 else
2165 iounmap(ringbuf->virtual_start); 2000 i915_vma_unpin_iomap(ringbuf->vma);
2166 ringbuf->virtual_start = NULL; 2001 ringbuf->virtual_start = NULL;
2167 ringbuf->vma = NULL; 2002
2168 i915_gem_object_ggtt_unpin(ringbuf->obj); 2003 i915_gem_object_ggtt_unpin(ringbuf->obj);
2004 ringbuf->vma = NULL;
2169} 2005}
2170 2006
2171int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 2007int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
2172 struct intel_ringbuffer *ringbuf) 2008 struct intel_ringbuffer *ringbuf)
2173{ 2009{
2174 struct drm_i915_private *dev_priv = to_i915(dev);
2175 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2176 struct drm_i915_gem_object *obj = ringbuf->obj; 2010 struct drm_i915_gem_object *obj = ringbuf->obj;
2177 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ 2011 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
2178 unsigned flags = PIN_OFFSET_BIAS | 4096; 2012 unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2206,10 +2040,9 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
2206 /* Access through the GTT requires the device to be awake. */ 2040 /* Access through the GTT requires the device to be awake. */
2207 assert_rpm_wakelock_held(dev_priv); 2041 assert_rpm_wakelock_held(dev_priv);
2208 2042
2209 addr = ioremap_wc(ggtt->mappable_base + 2043 addr = i915_vma_pin_iomap(i915_gem_obj_to_ggtt(obj));
2210 i915_gem_obj_ggtt_offset(obj), ringbuf->size); 2044 if (IS_ERR(addr)) {
2211 if (addr == NULL) { 2045 ret = PTR_ERR(addr);
2212 ret = -ENOMEM;
2213 goto err_unpin; 2046 goto err_unpin;
2214 } 2047 }
2215 } 2048 }
@@ -2238,9 +2071,9 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
2238 if (!HAS_LLC(dev)) 2071 if (!HAS_LLC(dev))
2239 obj = i915_gem_object_create_stolen(dev, ringbuf->size); 2072 obj = i915_gem_object_create_stolen(dev, ringbuf->size);
2240 if (obj == NULL) 2073 if (obj == NULL)
2241 obj = i915_gem_alloc_object(dev, ringbuf->size); 2074 obj = i915_gem_object_create(dev, ringbuf->size);
2242 if (obj == NULL) 2075 if (IS_ERR(obj))
2243 return -ENOMEM; 2076 return PTR_ERR(obj);
2244 2077
2245 /* mark ring buffers as read-only from GPU side by default */ 2078 /* mark ring buffers as read-only from GPU side by default */
2246 obj->gt_ro = 1; 2079 obj->gt_ro = 1;
@@ -2272,13 +2105,13 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
2272 * of the buffer. 2105 * of the buffer.
2273 */ 2106 */
2274 ring->effective_size = size; 2107 ring->effective_size = size;
2275 if (IS_I830(engine->dev) || IS_845G(engine->dev)) 2108 if (IS_I830(engine->i915) || IS_845G(engine->i915))
2276 ring->effective_size -= 2 * CACHELINE_BYTES; 2109 ring->effective_size -= 2 * CACHELINE_BYTES;
2277 2110
2278 ring->last_retired_head = -1; 2111 ring->last_retired_head = -1;
2279 intel_ring_update_space(ring); 2112 intel_ring_update_space(ring);
2280 2113
2281 ret = intel_alloc_ringbuffer_obj(engine->dev, ring); 2114 ret = intel_alloc_ringbuffer_obj(&engine->i915->drm, ring);
2282 if (ret) { 2115 if (ret) {
2283 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n", 2116 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s: %d\n",
2284 engine->name, ret); 2117 engine->name, ret);
@@ -2298,15 +2131,67 @@ intel_ringbuffer_free(struct intel_ringbuffer *ring)
2298 kfree(ring); 2131 kfree(ring);
2299} 2132}
2300 2133
2134static int intel_ring_context_pin(struct i915_gem_context *ctx,
2135 struct intel_engine_cs *engine)
2136{
2137 struct intel_context *ce = &ctx->engine[engine->id];
2138 int ret;
2139
2140 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2141
2142 if (ce->pin_count++)
2143 return 0;
2144
2145 if (ce->state) {
2146 ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
2147 if (ret)
2148 goto error;
2149 }
2150
2151 /* The kernel context is only used as a placeholder for flushing the
2152 * active context. It is never used for submitting user rendering and
2153 * as such never requires the golden render context, and so we can skip
2154 * emitting it when we switch to the kernel context. This is required
2155 * as during eviction we cannot allocate and pin the renderstate in
2156 * order to initialise the context.
2157 */
2158 if (ctx == ctx->i915->kernel_context)
2159 ce->initialised = true;
2160
2161 i915_gem_context_reference(ctx);
2162 return 0;
2163
2164error:
2165 ce->pin_count = 0;
2166 return ret;
2167}
2168
2169static void intel_ring_context_unpin(struct i915_gem_context *ctx,
2170 struct intel_engine_cs *engine)
2171{
2172 struct intel_context *ce = &ctx->engine[engine->id];
2173
2174 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
2175
2176 if (--ce->pin_count)
2177 return;
2178
2179 if (ce->state)
2180 i915_gem_object_ggtt_unpin(ce->state);
2181
2182 i915_gem_context_unreference(ctx);
2183}
2184
2301static int intel_init_ring_buffer(struct drm_device *dev, 2185static int intel_init_ring_buffer(struct drm_device *dev,
2302 struct intel_engine_cs *engine) 2186 struct intel_engine_cs *engine)
2303{ 2187{
2188 struct drm_i915_private *dev_priv = to_i915(dev);
2304 struct intel_ringbuffer *ringbuf; 2189 struct intel_ringbuffer *ringbuf;
2305 int ret; 2190 int ret;
2306 2191
2307 WARN_ON(engine->buffer); 2192 WARN_ON(engine->buffer);
2308 2193
2309 engine->dev = dev; 2194 engine->i915 = dev_priv;
2310 INIT_LIST_HEAD(&engine->active_list); 2195 INIT_LIST_HEAD(&engine->active_list);
2311 INIT_LIST_HEAD(&engine->request_list); 2196 INIT_LIST_HEAD(&engine->request_list);
2312 INIT_LIST_HEAD(&engine->execlist_queue); 2197 INIT_LIST_HEAD(&engine->execlist_queue);
@@ -2315,7 +2200,20 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2315 memset(engine->semaphore.sync_seqno, 0, 2200 memset(engine->semaphore.sync_seqno, 0,
2316 sizeof(engine->semaphore.sync_seqno)); 2201 sizeof(engine->semaphore.sync_seqno));
2317 2202
2318 init_waitqueue_head(&engine->irq_queue); 2203 ret = intel_engine_init_breadcrumbs(engine);
2204 if (ret)
2205 goto error;
2206
2207 /* We may need to do things with the shrinker which
2208 * require us to immediately switch back to the default
2209 * context. This can cause a problem as pinning the
2210 * default context also requires GTT space which may not
2211 * be available. To avoid this we always pin the default
2212 * context.
2213 */
2214 ret = intel_ring_context_pin(dev_priv->kernel_context, engine);
2215 if (ret)
2216 goto error;
2319 2217
2320 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE); 2218 ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
2321 if (IS_ERR(ringbuf)) { 2219 if (IS_ERR(ringbuf)) {
@@ -2324,7 +2222,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2324 } 2222 }
2325 engine->buffer = ringbuf; 2223 engine->buffer = ringbuf;
2326 2224
2327 if (I915_NEED_GFX_HWS(dev)) { 2225 if (I915_NEED_GFX_HWS(dev_priv)) {
2328 ret = init_status_page(engine); 2226 ret = init_status_page(engine);
2329 if (ret) 2227 if (ret)
2330 goto error; 2228 goto error;
@@ -2335,7 +2233,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
2335 goto error; 2233 goto error;
2336 } 2234 }
2337 2235
2338 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf); 2236 ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
2339 if (ret) { 2237 if (ret) {
2340 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n", 2238 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
2341 engine->name, ret); 2239 engine->name, ret);
@@ -2361,11 +2259,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2361 if (!intel_engine_initialized(engine)) 2259 if (!intel_engine_initialized(engine))
2362 return; 2260 return;
2363 2261
2364 dev_priv = to_i915(engine->dev); 2262 dev_priv = engine->i915;
2365 2263
2366 if (engine->buffer) { 2264 if (engine->buffer) {
2367 intel_stop_engine(engine); 2265 intel_stop_engine(engine);
2368 WARN_ON(!IS_GEN2(engine->dev) && (I915_READ_MODE(engine) & MODE_IDLE) == 0); 2266 WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
2369 2267
2370 intel_unpin_ringbuffer_obj(engine->buffer); 2268 intel_unpin_ringbuffer_obj(engine->buffer);
2371 intel_ringbuffer_free(engine->buffer); 2269 intel_ringbuffer_free(engine->buffer);
@@ -2375,7 +2273,7 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2375 if (engine->cleanup) 2273 if (engine->cleanup)
2376 engine->cleanup(engine); 2274 engine->cleanup(engine);
2377 2275
2378 if (I915_NEED_GFX_HWS(engine->dev)) { 2276 if (I915_NEED_GFX_HWS(dev_priv)) {
2379 cleanup_status_page(engine); 2277 cleanup_status_page(engine);
2380 } else { 2278 } else {
2381 WARN_ON(engine->id != RCS); 2279 WARN_ON(engine->id != RCS);
@@ -2384,7 +2282,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
2384 2282
2385 i915_cmd_parser_fini_ring(engine); 2283 i915_cmd_parser_fini_ring(engine);
2386 i915_gem_batch_pool_fini(&engine->batch_pool); 2284 i915_gem_batch_pool_fini(&engine->batch_pool);
2387 engine->dev = NULL; 2285 intel_engine_fini_breadcrumbs(engine);
2286
2287 intel_ring_context_unpin(dev_priv->kernel_context, engine);
2288
2289 engine->i915 = NULL;
2388} 2290}
2389 2291
2390int intel_engine_idle(struct intel_engine_cs *engine) 2292int intel_engine_idle(struct intel_engine_cs *engine)
@@ -2407,46 +2309,22 @@ int intel_engine_idle(struct intel_engine_cs *engine)
2407 2309
2408int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request) 2310int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
2409{ 2311{
2410 request->ringbuf = request->engine->buffer; 2312 int ret;
2411 return 0;
2412}
2413 2313
2414int intel_ring_reserve_space(struct drm_i915_gem_request *request) 2314 /* Flush enough space to reduce the likelihood of waiting after
2415{ 2315 * we start building the request - in which case we will just
2416 /* 2316 * have to repeat work.
2417 * The first call merely notes the reserve request and is common for
2418 * all back ends. The subsequent localised _begin() call actually
2419 * ensures that the reservation is available. Without the begin, if
2420 * the request creator immediately submitted the request without
2421 * adding any commands to it then there might not actually be
2422 * sufficient room for the submission commands.
2423 */ 2317 */
2424 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 2318 request->reserved_space += LEGACY_REQUEST_SIZE;
2425
2426 return intel_ring_begin(request, 0);
2427}
2428 2319
2429void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) 2320 request->ringbuf = request->engine->buffer;
2430{
2431 GEM_BUG_ON(ringbuf->reserved_size);
2432 ringbuf->reserved_size = size;
2433}
2434
2435void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf)
2436{
2437 GEM_BUG_ON(!ringbuf->reserved_size);
2438 ringbuf->reserved_size = 0;
2439}
2440 2321
2441void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2322 ret = intel_ring_begin(request, 0);
2442{ 2323 if (ret)
2443 GEM_BUG_ON(!ringbuf->reserved_size); 2324 return ret;
2444 ringbuf->reserved_size = 0;
2445}
2446 2325
2447void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2326 request->reserved_space -= LEGACY_REQUEST_SIZE;
2448{ 2327 return 0;
2449 GEM_BUG_ON(ringbuf->reserved_size);
2450} 2328}
2451 2329
2452static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2330static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
@@ -2468,7 +2346,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
2468 * 2346 *
2469 * See also i915_gem_request_alloc() and i915_add_request(). 2347 * See also i915_gem_request_alloc() and i915_add_request().
2470 */ 2348 */
2471 GEM_BUG_ON(!ringbuf->reserved_size); 2349 GEM_BUG_ON(!req->reserved_space);
2472 2350
2473 list_for_each_entry(target, &engine->request_list, list) { 2351 list_for_each_entry(target, &engine->request_list, list) {
2474 unsigned space; 2352 unsigned space;
@@ -2503,7 +2381,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2503 int total_bytes, wait_bytes; 2381 int total_bytes, wait_bytes;
2504 bool need_wrap = false; 2382 bool need_wrap = false;
2505 2383
2506 total_bytes = bytes + ringbuf->reserved_size; 2384 total_bytes = bytes + req->reserved_space;
2507 2385
2508 if (unlikely(bytes > remain_usable)) { 2386 if (unlikely(bytes > remain_usable)) {
2509 /* 2387 /*
@@ -2519,7 +2397,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
2519 * and only need to effectively wait for the reserved 2397 * and only need to effectively wait for the reserved
2520 * size space from the start of ringbuffer. 2398 * size space from the start of ringbuffer.
2521 */ 2399 */
2522 wait_bytes = remain_actual + ringbuf->reserved_size; 2400 wait_bytes = remain_actual + req->reserved_space;
2523 } else { 2401 } else {
2524 /* No wrapping required, just waiting. */ 2402 /* No wrapping required, just waiting. */
2525 wait_bytes = total_bytes; 2403 wait_bytes = total_bytes;
@@ -2576,7 +2454,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
2576 2454
2577void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno) 2455void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2578{ 2456{
2579 struct drm_i915_private *dev_priv = to_i915(engine->dev); 2457 struct drm_i915_private *dev_priv = engine->i915;
2580 2458
2581 /* Our semaphore implementation is strictly monotonic (i.e. we proceed 2459 /* Our semaphore implementation is strictly monotonic (i.e. we proceed
2582 * so long as the semaphore value in the register/page is greater 2460 * so long as the semaphore value in the register/page is greater
@@ -2586,7 +2464,7 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2586 * the semaphore value, then when the seqno moves backwards all 2464 * the semaphore value, then when the seqno moves backwards all
2587 * future waits will complete instantly (causing rendering corruption). 2465 * future waits will complete instantly (causing rendering corruption).
2588 */ 2466 */
2589 if (INTEL_INFO(dev_priv)->gen == 6 || INTEL_INFO(dev_priv)->gen == 7) { 2467 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv)) {
2590 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0); 2468 I915_WRITE(RING_SYNC_0(engine->mmio_base), 0);
2591 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0); 2469 I915_WRITE(RING_SYNC_1(engine->mmio_base), 0);
2592 if (HAS_VEBOX(dev_priv)) 2470 if (HAS_VEBOX(dev_priv))
@@ -2603,43 +2481,58 @@ void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
2603 memset(engine->semaphore.sync_seqno, 0, 2481 memset(engine->semaphore.sync_seqno, 0,
2604 sizeof(engine->semaphore.sync_seqno)); 2482 sizeof(engine->semaphore.sync_seqno));
2605 2483
2606 engine->set_seqno(engine, seqno); 2484 intel_write_status_page(engine, I915_GEM_HWS_INDEX, seqno);
2485 if (engine->irq_seqno_barrier)
2486 engine->irq_seqno_barrier(engine);
2607 engine->last_submitted_seqno = seqno; 2487 engine->last_submitted_seqno = seqno;
2608 2488
2609 engine->hangcheck.seqno = seqno; 2489 engine->hangcheck.seqno = seqno;
2490
2491 /* After manually advancing the seqno, fake the interrupt in case
2492 * there are any waiters for that seqno.
2493 */
2494 rcu_read_lock();
2495 intel_engine_wakeup(engine);
2496 rcu_read_unlock();
2610} 2497}
2611 2498
2612static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine, 2499static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
2613 u32 value) 2500 u32 value)
2614{ 2501{
2615 struct drm_i915_private *dev_priv = engine->dev->dev_private; 2502 struct drm_i915_private *dev_priv = engine->i915;
2503
2504 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2616 2505
2617 /* Every tail move must follow the sequence below */ 2506 /* Every tail move must follow the sequence below */
2618 2507
2619 /* Disable notification that the ring is IDLE. The GT 2508 /* Disable notification that the ring is IDLE. The GT
2620 * will then assume that it is busy and bring it out of rc6. 2509 * will then assume that it is busy and bring it out of rc6.
2621 */ 2510 */
2622 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2511 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2623 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2512 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2624 2513
2625 /* Clear the context id. Here be magic! */ 2514 /* Clear the context id. Here be magic! */
2626 I915_WRITE64(GEN6_BSD_RNCID, 0x0); 2515 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2627 2516
2628 /* Wait for the ring not to be idle, i.e. for it to wake up. */ 2517 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2629 if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) & 2518 if (intel_wait_for_register_fw(dev_priv,
2630 GEN6_BSD_SLEEP_INDICATOR) == 0, 2519 GEN6_BSD_SLEEP_PSMI_CONTROL,
2631 50)) 2520 GEN6_BSD_SLEEP_INDICATOR,
2521 0,
2522 50))
2632 DRM_ERROR("timed out waiting for the BSD ring to wake up\n"); 2523 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2633 2524
2634 /* Now that the ring is fully powered up, update the tail */ 2525 /* Now that the ring is fully powered up, update the tail */
2635 I915_WRITE_TAIL(engine, value); 2526 I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
2636 POSTING_READ(RING_TAIL(engine->mmio_base)); 2527 POSTING_READ_FW(RING_TAIL(engine->mmio_base));
2637 2528
2638 /* Let the ring send IDLE messages to the GT again, 2529 /* Let the ring send IDLE messages to the GT again,
2639 * and so let it sleep to conserve power when idle. 2530 * and so let it sleep to conserve power when idle.
2640 */ 2531 */
2641 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 2532 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2642 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); 2533 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2534
2535 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2643} 2536}
2644 2537
2645static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, 2538static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
@@ -2654,7 +2547,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2654 return ret; 2547 return ret;
2655 2548
2656 cmd = MI_FLUSH_DW; 2549 cmd = MI_FLUSH_DW;
2657 if (INTEL_INFO(engine->dev)->gen >= 8) 2550 if (INTEL_GEN(req->i915) >= 8)
2658 cmd += 1; 2551 cmd += 1;
2659 2552
2660 /* We always require a command barrier so that subsequent 2553 /* We always require a command barrier so that subsequent
@@ -2676,7 +2569,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
2676 intel_ring_emit(engine, cmd); 2569 intel_ring_emit(engine, cmd);
2677 intel_ring_emit(engine, 2570 intel_ring_emit(engine,
2678 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2571 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2679 if (INTEL_INFO(engine->dev)->gen >= 8) { 2572 if (INTEL_GEN(req->i915) >= 8) {
2680 intel_ring_emit(engine, 0); /* upper addr */ 2573 intel_ring_emit(engine, 0); /* upper addr */
2681 intel_ring_emit(engine, 0); /* value */ 2574 intel_ring_emit(engine, 0); /* value */
2682 } else { 2575 } else {
@@ -2767,7 +2660,6 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2767 u32 invalidate, u32 flush) 2660 u32 invalidate, u32 flush)
2768{ 2661{
2769 struct intel_engine_cs *engine = req->engine; 2662 struct intel_engine_cs *engine = req->engine;
2770 struct drm_device *dev = engine->dev;
2771 uint32_t cmd; 2663 uint32_t cmd;
2772 int ret; 2664 int ret;
2773 2665
@@ -2776,7 +2668,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2776 return ret; 2668 return ret;
2777 2669
2778 cmd = MI_FLUSH_DW; 2670 cmd = MI_FLUSH_DW;
2779 if (INTEL_INFO(dev)->gen >= 8) 2671 if (INTEL_GEN(req->i915) >= 8)
2780 cmd += 1; 2672 cmd += 1;
2781 2673
2782 /* We always require a command barrier so that subsequent 2674 /* We always require a command barrier so that subsequent
@@ -2797,7 +2689,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2797 intel_ring_emit(engine, cmd); 2689 intel_ring_emit(engine, cmd);
2798 intel_ring_emit(engine, 2690 intel_ring_emit(engine,
2799 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); 2691 I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
2800 if (INTEL_INFO(dev)->gen >= 8) { 2692 if (INTEL_GEN(req->i915) >= 8) {
2801 intel_ring_emit(engine, 0); /* upper addr */ 2693 intel_ring_emit(engine, 0); /* upper addr */
2802 intel_ring_emit(engine, 0); /* value */ 2694 intel_ring_emit(engine, 0); /* value */
2803 } else { 2695 } else {
@@ -2809,11 +2701,159 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
2809 return 0; 2701 return 0;
2810} 2702}
2811 2703
2704static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
2705 struct intel_engine_cs *engine)
2706{
2707 struct drm_i915_gem_object *obj;
2708 int ret, i;
2709
2710 if (!i915_semaphore_is_enabled(dev_priv))
2711 return;
2712
2713 if (INTEL_GEN(dev_priv) >= 8 && !dev_priv->semaphore_obj) {
2714 obj = i915_gem_object_create(&dev_priv->drm, 4096);
2715 if (IS_ERR(obj)) {
2716 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2717 i915.semaphores = 0;
2718 } else {
2719 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2720 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2721 if (ret != 0) {
2722 drm_gem_object_unreference(&obj->base);
2723 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2724 i915.semaphores = 0;
2725 } else {
2726 dev_priv->semaphore_obj = obj;
2727 }
2728 }
2729 }
2730
2731 if (!i915_semaphore_is_enabled(dev_priv))
2732 return;
2733
2734 if (INTEL_GEN(dev_priv) >= 8) {
2735 u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
2736
2737 engine->semaphore.sync_to = gen8_ring_sync;
2738 engine->semaphore.signal = gen8_xcs_signal;
2739
2740 for (i = 0; i < I915_NUM_ENGINES; i++) {
2741 u64 ring_offset;
2742
2743 if (i != engine->id)
2744 ring_offset = offset + GEN8_SEMAPHORE_OFFSET(engine->id, i);
2745 else
2746 ring_offset = MI_SEMAPHORE_SYNC_INVALID;
2747
2748 engine->semaphore.signal_ggtt[i] = ring_offset;
2749 }
2750 } else if (INTEL_GEN(dev_priv) >= 6) {
2751 engine->semaphore.sync_to = gen6_ring_sync;
2752 engine->semaphore.signal = gen6_signal;
2753
2754 /*
2755 * The current semaphore is only applied on pre-gen8
2756 * platform. And there is no VCS2 ring on the pre-gen8
2757 * platform. So the semaphore between RCS and VCS2 is
2758 * initialized as INVALID. Gen8 will initialize the
2759 * sema between VCS2 and RCS later.
2760 */
2761 for (i = 0; i < I915_NUM_ENGINES; i++) {
2762 static const struct {
2763 u32 wait_mbox;
2764 i915_reg_t mbox_reg;
2765 } sem_data[I915_NUM_ENGINES][I915_NUM_ENGINES] = {
2766 [RCS] = {
2767 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RV, .mbox_reg = GEN6_VRSYNC },
2768 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RB, .mbox_reg = GEN6_BRSYNC },
2769 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_RVE, .mbox_reg = GEN6_VERSYNC },
2770 },
2771 [VCS] = {
2772 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VR, .mbox_reg = GEN6_RVSYNC },
2773 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VB, .mbox_reg = GEN6_BVSYNC },
2774 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VVE, .mbox_reg = GEN6_VEVSYNC },
2775 },
2776 [BCS] = {
2777 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BR, .mbox_reg = GEN6_RBSYNC },
2778 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BV, .mbox_reg = GEN6_VBSYNC },
2779 [VECS] = { .wait_mbox = MI_SEMAPHORE_SYNC_BVE, .mbox_reg = GEN6_VEBSYNC },
2780 },
2781 [VECS] = {
2782 [RCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VER, .mbox_reg = GEN6_RVESYNC },
2783 [VCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEV, .mbox_reg = GEN6_VVESYNC },
2784 [BCS] = { .wait_mbox = MI_SEMAPHORE_SYNC_VEB, .mbox_reg = GEN6_BVESYNC },
2785 },
2786 };
2787 u32 wait_mbox;
2788 i915_reg_t mbox_reg;
2789
2790 if (i == engine->id || i == VCS2) {
2791 wait_mbox = MI_SEMAPHORE_SYNC_INVALID;
2792 mbox_reg = GEN6_NOSYNC;
2793 } else {
2794 wait_mbox = sem_data[engine->id][i].wait_mbox;
2795 mbox_reg = sem_data[engine->id][i].mbox_reg;
2796 }
2797
2798 engine->semaphore.mbox.wait[i] = wait_mbox;
2799 engine->semaphore.mbox.signal[i] = mbox_reg;
2800 }
2801 }
2802}
2803
2804static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2805 struct intel_engine_cs *engine)
2806{
2807 if (INTEL_GEN(dev_priv) >= 8) {
2808 engine->irq_enable = gen8_irq_enable;
2809 engine->irq_disable = gen8_irq_disable;
2810 engine->irq_seqno_barrier = gen6_seqno_barrier;
2811 } else if (INTEL_GEN(dev_priv) >= 6) {
2812 engine->irq_enable = gen6_irq_enable;
2813 engine->irq_disable = gen6_irq_disable;
2814 engine->irq_seqno_barrier = gen6_seqno_barrier;
2815 } else if (INTEL_GEN(dev_priv) >= 5) {
2816 engine->irq_enable = gen5_irq_enable;
2817 engine->irq_disable = gen5_irq_disable;
2818 engine->irq_seqno_barrier = gen5_seqno_barrier;
2819 } else if (INTEL_GEN(dev_priv) >= 3) {
2820 engine->irq_enable = i9xx_irq_enable;
2821 engine->irq_disable = i9xx_irq_disable;
2822 } else {
2823 engine->irq_enable = i8xx_irq_enable;
2824 engine->irq_disable = i8xx_irq_disable;
2825 }
2826}
2827
2828static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2829 struct intel_engine_cs *engine)
2830{
2831 engine->init_hw = init_ring_common;
2832 engine->write_tail = ring_write_tail;
2833
2834 engine->add_request = i9xx_add_request;
2835 if (INTEL_GEN(dev_priv) >= 6)
2836 engine->add_request = gen6_add_request;
2837
2838 if (INTEL_GEN(dev_priv) >= 8)
2839 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2840 else if (INTEL_GEN(dev_priv) >= 6)
2841 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2842 else if (INTEL_GEN(dev_priv) >= 4)
2843 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2844 else if (IS_I830(dev_priv) || IS_845G(dev_priv))
2845 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2846 else
2847 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
2848
2849 intel_ring_init_irq(dev_priv, engine);
2850 intel_ring_init_semaphores(dev_priv, engine);
2851}
2852
2812int intel_init_render_ring_buffer(struct drm_device *dev) 2853int intel_init_render_ring_buffer(struct drm_device *dev)
2813{ 2854{
2814 struct drm_i915_private *dev_priv = dev->dev_private; 2855 struct drm_i915_private *dev_priv = to_i915(dev);
2815 struct intel_engine_cs *engine = &dev_priv->engine[RCS]; 2856 struct intel_engine_cs *engine = &dev_priv->engine[RCS];
2816 struct drm_i915_gem_object *obj;
2817 int ret; 2857 int ret;
2818 2858
2819 engine->name = "render ring"; 2859 engine->name = "render ring";
@@ -2822,140 +2862,49 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2822 engine->hw_id = 0; 2862 engine->hw_id = 0;
2823 engine->mmio_base = RENDER_RING_BASE; 2863 engine->mmio_base = RENDER_RING_BASE;
2824 2864
2825 if (INTEL_INFO(dev)->gen >= 8) { 2865 intel_ring_default_vfuncs(dev_priv, engine);
2826 if (i915_semaphore_is_enabled(dev)) {
2827 obj = i915_gem_alloc_object(dev, 4096);
2828 if (obj == NULL) {
2829 DRM_ERROR("Failed to allocate semaphore bo. Disabling semaphores\n");
2830 i915.semaphores = 0;
2831 } else {
2832 i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
2833 ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
2834 if (ret != 0) {
2835 drm_gem_object_unreference(&obj->base);
2836 DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
2837 i915.semaphores = 0;
2838 } else
2839 dev_priv->semaphore_obj = obj;
2840 }
2841 }
2842 2866
2867 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2868 if (HAS_L3_DPF(dev_priv))
2869 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2870
2871 if (INTEL_GEN(dev_priv) >= 8) {
2843 engine->init_context = intel_rcs_ctx_init; 2872 engine->init_context = intel_rcs_ctx_init;
2844 engine->add_request = gen6_add_request; 2873 engine->add_request = gen8_render_add_request;
2845 engine->flush = gen8_render_ring_flush; 2874 engine->flush = gen8_render_ring_flush;
2846 engine->irq_get = gen8_ring_get_irq; 2875 if (i915_semaphore_is_enabled(dev_priv))
2847 engine->irq_put = gen8_ring_put_irq;
2848 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2849 engine->irq_seqno_barrier = gen6_seqno_barrier;
2850 engine->get_seqno = ring_get_seqno;
2851 engine->set_seqno = ring_set_seqno;
2852 if (i915_semaphore_is_enabled(dev)) {
2853 WARN_ON(!dev_priv->semaphore_obj);
2854 engine->semaphore.sync_to = gen8_ring_sync;
2855 engine->semaphore.signal = gen8_rcs_signal; 2876 engine->semaphore.signal = gen8_rcs_signal;
2856 GEN8_RING_SEMAPHORE_INIT(engine); 2877 } else if (INTEL_GEN(dev_priv) >= 6) {
2857 }
2858 } else if (INTEL_INFO(dev)->gen >= 6) {
2859 engine->init_context = intel_rcs_ctx_init; 2878 engine->init_context = intel_rcs_ctx_init;
2860 engine->add_request = gen6_add_request;
2861 engine->flush = gen7_render_ring_flush; 2879 engine->flush = gen7_render_ring_flush;
2862 if (INTEL_INFO(dev)->gen == 6) 2880 if (IS_GEN6(dev_priv))
2863 engine->flush = gen6_render_ring_flush; 2881 engine->flush = gen6_render_ring_flush;
2864 engine->irq_get = gen6_ring_get_irq; 2882 } else if (IS_GEN5(dev_priv)) {
2865 engine->irq_put = gen6_ring_put_irq;
2866 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2867 engine->irq_seqno_barrier = gen6_seqno_barrier;
2868 engine->get_seqno = ring_get_seqno;
2869 engine->set_seqno = ring_set_seqno;
2870 if (i915_semaphore_is_enabled(dev)) {
2871 engine->semaphore.sync_to = gen6_ring_sync;
2872 engine->semaphore.signal = gen6_signal;
2873 /*
2874 * The current semaphore is only applied on pre-gen8
2875 * platform. And there is no VCS2 ring on the pre-gen8
2876 * platform. So the semaphore between RCS and VCS2 is
2877 * initialized as INVALID. Gen8 will initialize the
2878 * sema between VCS2 and RCS later.
2879 */
2880 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID;
2881 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV;
2882 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB;
2883 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE;
2884 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
2885 engine->semaphore.mbox.signal[RCS] = GEN6_NOSYNC;
2886 engine->semaphore.mbox.signal[VCS] = GEN6_VRSYNC;
2887 engine->semaphore.mbox.signal[BCS] = GEN6_BRSYNC;
2888 engine->semaphore.mbox.signal[VECS] = GEN6_VERSYNC;
2889 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
2890 }
2891 } else if (IS_GEN5(dev)) {
2892 engine->add_request = pc_render_add_request;
2893 engine->flush = gen4_render_ring_flush; 2883 engine->flush = gen4_render_ring_flush;
2894 engine->get_seqno = pc_render_get_seqno;
2895 engine->set_seqno = pc_render_set_seqno;
2896 engine->irq_get = gen5_ring_get_irq;
2897 engine->irq_put = gen5_ring_put_irq;
2898 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
2899 GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
2900 } else { 2884 } else {
2901 engine->add_request = i9xx_add_request; 2885 if (INTEL_GEN(dev_priv) < 4)
2902 if (INTEL_INFO(dev)->gen < 4)
2903 engine->flush = gen2_render_ring_flush; 2886 engine->flush = gen2_render_ring_flush;
2904 else 2887 else
2905 engine->flush = gen4_render_ring_flush; 2888 engine->flush = gen4_render_ring_flush;
2906 engine->get_seqno = ring_get_seqno;
2907 engine->set_seqno = ring_set_seqno;
2908 if (IS_GEN2(dev)) {
2909 engine->irq_get = i8xx_ring_get_irq;
2910 engine->irq_put = i8xx_ring_put_irq;
2911 } else {
2912 engine->irq_get = i9xx_ring_get_irq;
2913 engine->irq_put = i9xx_ring_put_irq;
2914 }
2915 engine->irq_enable_mask = I915_USER_INTERRUPT; 2889 engine->irq_enable_mask = I915_USER_INTERRUPT;
2916 } 2890 }
2917 engine->write_tail = ring_write_tail;
2918 2891
2919 if (IS_HASWELL(dev)) 2892 if (IS_HASWELL(dev_priv))
2920 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; 2893 engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
2921 else if (IS_GEN8(dev)) 2894
2922 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
2923 else if (INTEL_INFO(dev)->gen >= 6)
2924 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
2925 else if (INTEL_INFO(dev)->gen >= 4)
2926 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
2927 else if (IS_I830(dev) || IS_845G(dev))
2928 engine->dispatch_execbuffer = i830_dispatch_execbuffer;
2929 else
2930 engine->dispatch_execbuffer = i915_dispatch_execbuffer;
2931 engine->init_hw = init_render_ring; 2895 engine->init_hw = init_render_ring;
2932 engine->cleanup = render_ring_cleanup; 2896 engine->cleanup = render_ring_cleanup;
2933 2897
2934 /* Workaround batchbuffer to combat CS tlb bug. */
2935 if (HAS_BROKEN_CS_TLB(dev)) {
2936 obj = i915_gem_alloc_object(dev, I830_WA_SIZE);
2937 if (obj == NULL) {
2938 DRM_ERROR("Failed to allocate batch bo\n");
2939 return -ENOMEM;
2940 }
2941
2942 ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
2943 if (ret != 0) {
2944 drm_gem_object_unreference(&obj->base);
2945 DRM_ERROR("Failed to ping batch bo\n");
2946 return ret;
2947 }
2948
2949 engine->scratch.obj = obj;
2950 engine->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
2951 }
2952
2953 ret = intel_init_ring_buffer(dev, engine); 2898 ret = intel_init_ring_buffer(dev, engine);
2954 if (ret) 2899 if (ret)
2955 return ret; 2900 return ret;
2956 2901
2957 if (INTEL_INFO(dev)->gen >= 5) { 2902 if (INTEL_GEN(dev_priv) >= 6) {
2958 ret = intel_init_pipe_control(engine); 2903 ret = intel_init_pipe_control(engine, 4096);
2904 if (ret)
2905 return ret;
2906 } else if (HAS_BROKEN_CS_TLB(dev_priv)) {
2907 ret = intel_init_pipe_control(engine, I830_WA_SIZE);
2959 if (ret) 2908 if (ret)
2960 return ret; 2909 return ret;
2961 } 2910 }
@@ -2965,7 +2914,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2965 2914
2966int intel_init_bsd_ring_buffer(struct drm_device *dev) 2915int intel_init_bsd_ring_buffer(struct drm_device *dev)
2967{ 2916{
2968 struct drm_i915_private *dev_priv = dev->dev_private; 2917 struct drm_i915_private *dev_priv = to_i915(dev);
2969 struct intel_engine_cs *engine = &dev_priv->engine[VCS]; 2918 struct intel_engine_cs *engine = &dev_priv->engine[VCS];
2970 2919
2971 engine->name = "bsd ring"; 2920 engine->name = "bsd ring";
@@ -2973,68 +2922,27 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
2973 engine->exec_id = I915_EXEC_BSD; 2922 engine->exec_id = I915_EXEC_BSD;
2974 engine->hw_id = 1; 2923 engine->hw_id = 1;
2975 2924
2976 engine->write_tail = ring_write_tail; 2925 intel_ring_default_vfuncs(dev_priv, engine);
2977 if (INTEL_INFO(dev)->gen >= 6) { 2926
2927 if (INTEL_GEN(dev_priv) >= 6) {
2978 engine->mmio_base = GEN6_BSD_RING_BASE; 2928 engine->mmio_base = GEN6_BSD_RING_BASE;
2979 /* gen6 bsd needs a special wa for tail updates */ 2929 /* gen6 bsd needs a special wa for tail updates */
2980 if (IS_GEN6(dev)) 2930 if (IS_GEN6(dev_priv))
2981 engine->write_tail = gen6_bsd_ring_write_tail; 2931 engine->write_tail = gen6_bsd_ring_write_tail;
2982 engine->flush = gen6_bsd_ring_flush; 2932 engine->flush = gen6_bsd_ring_flush;
2983 engine->add_request = gen6_add_request; 2933 if (INTEL_GEN(dev_priv) >= 8)
2984 engine->irq_seqno_barrier = gen6_seqno_barrier;
2985 engine->get_seqno = ring_get_seqno;
2986 engine->set_seqno = ring_set_seqno;
2987 if (INTEL_INFO(dev)->gen >= 8) {
2988 engine->irq_enable_mask = 2934 engine->irq_enable_mask =
2989 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; 2935 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
2990 engine->irq_get = gen8_ring_get_irq; 2936 else
2991 engine->irq_put = gen8_ring_put_irq;
2992 engine->dispatch_execbuffer =
2993 gen8_ring_dispatch_execbuffer;
2994 if (i915_semaphore_is_enabled(dev)) {
2995 engine->semaphore.sync_to = gen8_ring_sync;
2996 engine->semaphore.signal = gen8_xcs_signal;
2997 GEN8_RING_SEMAPHORE_INIT(engine);
2998 }
2999 } else {
3000 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT; 2937 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
3001 engine->irq_get = gen6_ring_get_irq;
3002 engine->irq_put = gen6_ring_put_irq;
3003 engine->dispatch_execbuffer =
3004 gen6_ring_dispatch_execbuffer;
3005 if (i915_semaphore_is_enabled(dev)) {
3006 engine->semaphore.sync_to = gen6_ring_sync;
3007 engine->semaphore.signal = gen6_signal;
3008 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR;
3009 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID;
3010 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB;
3011 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE;
3012 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3013 engine->semaphore.mbox.signal[RCS] = GEN6_RVSYNC;
3014 engine->semaphore.mbox.signal[VCS] = GEN6_NOSYNC;
3015 engine->semaphore.mbox.signal[BCS] = GEN6_BVSYNC;
3016 engine->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC;
3017 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
3018 }
3019 }
3020 } else { 2938 } else {
3021 engine->mmio_base = BSD_RING_BASE; 2939 engine->mmio_base = BSD_RING_BASE;
3022 engine->flush = bsd_ring_flush; 2940 engine->flush = bsd_ring_flush;
3023 engine->add_request = i9xx_add_request; 2941 if (IS_GEN5(dev_priv))
3024 engine->get_seqno = ring_get_seqno;
3025 engine->set_seqno = ring_set_seqno;
3026 if (IS_GEN5(dev)) {
3027 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT; 2942 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
3028 engine->irq_get = gen5_ring_get_irq; 2943 else
3029 engine->irq_put = gen5_ring_put_irq;
3030 } else {
3031 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT; 2944 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
3032 engine->irq_get = i9xx_ring_get_irq;
3033 engine->irq_put = i9xx_ring_put_irq;
3034 }
3035 engine->dispatch_execbuffer = i965_dispatch_execbuffer;
3036 } 2945 }
3037 engine->init_hw = init_ring_common;
3038 2946
3039 return intel_init_ring_buffer(dev, engine); 2947 return intel_init_ring_buffer(dev, engine);
3040} 2948}
@@ -3044,147 +2952,70 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
3044 */ 2952 */
3045int intel_init_bsd2_ring_buffer(struct drm_device *dev) 2953int intel_init_bsd2_ring_buffer(struct drm_device *dev)
3046{ 2954{
3047 struct drm_i915_private *dev_priv = dev->dev_private; 2955 struct drm_i915_private *dev_priv = to_i915(dev);
3048 struct intel_engine_cs *engine = &dev_priv->engine[VCS2]; 2956 struct intel_engine_cs *engine = &dev_priv->engine[VCS2];
3049 2957
3050 engine->name = "bsd2 ring"; 2958 engine->name = "bsd2 ring";
3051 engine->id = VCS2; 2959 engine->id = VCS2;
3052 engine->exec_id = I915_EXEC_BSD; 2960 engine->exec_id = I915_EXEC_BSD;
3053 engine->hw_id = 4; 2961 engine->hw_id = 4;
3054
3055 engine->write_tail = ring_write_tail;
3056 engine->mmio_base = GEN8_BSD2_RING_BASE; 2962 engine->mmio_base = GEN8_BSD2_RING_BASE;
2963
2964 intel_ring_default_vfuncs(dev_priv, engine);
2965
3057 engine->flush = gen6_bsd_ring_flush; 2966 engine->flush = gen6_bsd_ring_flush;
3058 engine->add_request = gen6_add_request;
3059 engine->irq_seqno_barrier = gen6_seqno_barrier;
3060 engine->get_seqno = ring_get_seqno;
3061 engine->set_seqno = ring_set_seqno;
3062 engine->irq_enable_mask = 2967 engine->irq_enable_mask =
3063 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; 2968 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
3064 engine->irq_get = gen8_ring_get_irq;
3065 engine->irq_put = gen8_ring_put_irq;
3066 engine->dispatch_execbuffer =
3067 gen8_ring_dispatch_execbuffer;
3068 if (i915_semaphore_is_enabled(dev)) {
3069 engine->semaphore.sync_to = gen8_ring_sync;
3070 engine->semaphore.signal = gen8_xcs_signal;
3071 GEN8_RING_SEMAPHORE_INIT(engine);
3072 }
3073 engine->init_hw = init_ring_common;
3074 2969
3075 return intel_init_ring_buffer(dev, engine); 2970 return intel_init_ring_buffer(dev, engine);
3076} 2971}
3077 2972
3078int intel_init_blt_ring_buffer(struct drm_device *dev) 2973int intel_init_blt_ring_buffer(struct drm_device *dev)
3079{ 2974{
3080 struct drm_i915_private *dev_priv = dev->dev_private; 2975 struct drm_i915_private *dev_priv = to_i915(dev);
3081 struct intel_engine_cs *engine = &dev_priv->engine[BCS]; 2976 struct intel_engine_cs *engine = &dev_priv->engine[BCS];
3082 2977
3083 engine->name = "blitter ring"; 2978 engine->name = "blitter ring";
3084 engine->id = BCS; 2979 engine->id = BCS;
3085 engine->exec_id = I915_EXEC_BLT; 2980 engine->exec_id = I915_EXEC_BLT;
3086 engine->hw_id = 2; 2981 engine->hw_id = 2;
3087
3088 engine->mmio_base = BLT_RING_BASE; 2982 engine->mmio_base = BLT_RING_BASE;
3089 engine->write_tail = ring_write_tail; 2983
2984 intel_ring_default_vfuncs(dev_priv, engine);
2985
3090 engine->flush = gen6_ring_flush; 2986 engine->flush = gen6_ring_flush;
3091 engine->add_request = gen6_add_request; 2987 if (INTEL_GEN(dev_priv) >= 8)
3092 engine->irq_seqno_barrier = gen6_seqno_barrier;
3093 engine->get_seqno = ring_get_seqno;
3094 engine->set_seqno = ring_set_seqno;
3095 if (INTEL_INFO(dev)->gen >= 8) {
3096 engine->irq_enable_mask = 2988 engine->irq_enable_mask =
3097 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; 2989 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
3098 engine->irq_get = gen8_ring_get_irq; 2990 else
3099 engine->irq_put = gen8_ring_put_irq;
3100 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3101 if (i915_semaphore_is_enabled(dev)) {
3102 engine->semaphore.sync_to = gen8_ring_sync;
3103 engine->semaphore.signal = gen8_xcs_signal;
3104 GEN8_RING_SEMAPHORE_INIT(engine);
3105 }
3106 } else {
3107 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT; 2991 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
3108 engine->irq_get = gen6_ring_get_irq;
3109 engine->irq_put = gen6_ring_put_irq;
3110 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3111 if (i915_semaphore_is_enabled(dev)) {
3112 engine->semaphore.signal = gen6_signal;
3113 engine->semaphore.sync_to = gen6_ring_sync;
3114 /*
3115 * The current semaphore is only applied on pre-gen8
3116 * platform. And there is no VCS2 ring on the pre-gen8
3117 * platform. So the semaphore between BCS and VCS2 is
3118 * initialized as INVALID. Gen8 will initialize the
3119 * sema between BCS and VCS2 later.
3120 */
3121 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR;
3122 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV;
3123 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID;
3124 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE;
3125 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3126 engine->semaphore.mbox.signal[RCS] = GEN6_RBSYNC;
3127 engine->semaphore.mbox.signal[VCS] = GEN6_VBSYNC;
3128 engine->semaphore.mbox.signal[BCS] = GEN6_NOSYNC;
3129 engine->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC;
3130 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
3131 }
3132 }
3133 engine->init_hw = init_ring_common;
3134 2992
3135 return intel_init_ring_buffer(dev, engine); 2993 return intel_init_ring_buffer(dev, engine);
3136} 2994}
3137 2995
3138int intel_init_vebox_ring_buffer(struct drm_device *dev) 2996int intel_init_vebox_ring_buffer(struct drm_device *dev)
3139{ 2997{
3140 struct drm_i915_private *dev_priv = dev->dev_private; 2998 struct drm_i915_private *dev_priv = to_i915(dev);
3141 struct intel_engine_cs *engine = &dev_priv->engine[VECS]; 2999 struct intel_engine_cs *engine = &dev_priv->engine[VECS];
3142 3000
3143 engine->name = "video enhancement ring"; 3001 engine->name = "video enhancement ring";
3144 engine->id = VECS; 3002 engine->id = VECS;
3145 engine->exec_id = I915_EXEC_VEBOX; 3003 engine->exec_id = I915_EXEC_VEBOX;
3146 engine->hw_id = 3; 3004 engine->hw_id = 3;
3147
3148 engine->mmio_base = VEBOX_RING_BASE; 3005 engine->mmio_base = VEBOX_RING_BASE;
3149 engine->write_tail = ring_write_tail; 3006
3007 intel_ring_default_vfuncs(dev_priv, engine);
3008
3150 engine->flush = gen6_ring_flush; 3009 engine->flush = gen6_ring_flush;
3151 engine->add_request = gen6_add_request;
3152 engine->irq_seqno_barrier = gen6_seqno_barrier;
3153 engine->get_seqno = ring_get_seqno;
3154 engine->set_seqno = ring_set_seqno;
3155 3010
3156 if (INTEL_INFO(dev)->gen >= 8) { 3011 if (INTEL_GEN(dev_priv) >= 8) {
3157 engine->irq_enable_mask = 3012 engine->irq_enable_mask =
3158 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; 3013 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
3159 engine->irq_get = gen8_ring_get_irq;
3160 engine->irq_put = gen8_ring_put_irq;
3161 engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
3162 if (i915_semaphore_is_enabled(dev)) {
3163 engine->semaphore.sync_to = gen8_ring_sync;
3164 engine->semaphore.signal = gen8_xcs_signal;
3165 GEN8_RING_SEMAPHORE_INIT(engine);
3166 }
3167 } else { 3014 } else {
3168 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; 3015 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
3169 engine->irq_get = hsw_vebox_get_irq; 3016 engine->irq_enable = hsw_vebox_irq_enable;
3170 engine->irq_put = hsw_vebox_put_irq; 3017 engine->irq_disable = hsw_vebox_irq_disable;
3171 engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
3172 if (i915_semaphore_is_enabled(dev)) {
3173 engine->semaphore.sync_to = gen6_ring_sync;
3174 engine->semaphore.signal = gen6_signal;
3175 engine->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER;
3176 engine->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV;
3177 engine->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB;
3178 engine->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID;
3179 engine->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID;
3180 engine->semaphore.mbox.signal[RCS] = GEN6_RVESYNC;
3181 engine->semaphore.mbox.signal[VCS] = GEN6_VVESYNC;
3182 engine->semaphore.mbox.signal[BCS] = GEN6_BVESYNC;
3183 engine->semaphore.mbox.signal[VECS] = GEN6_NOSYNC;
3184 engine->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;
3185 }
3186 } 3018 }
3187 engine->init_hw = init_ring_common;
3188 3019
3189 return intel_init_ring_buffer(dev, engine); 3020 return intel_init_ring_buffer(dev, engine);
3190} 3021}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ff126485d398..12cb7ed90014 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,18 +62,6 @@ struct intel_hw_status_page {
62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \ 62 (i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id)) 63 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
64 64
65#define GEN8_RING_SEMAPHORE_INIT(e) do { \
66 if (!dev_priv->semaphore_obj) { \
67 break; \
68 } \
69 (e)->semaphore.signal_ggtt[RCS] = GEN8_SIGNAL_OFFSET((e), RCS); \
70 (e)->semaphore.signal_ggtt[VCS] = GEN8_SIGNAL_OFFSET((e), VCS); \
71 (e)->semaphore.signal_ggtt[BCS] = GEN8_SIGNAL_OFFSET((e), BCS); \
72 (e)->semaphore.signal_ggtt[VECS] = GEN8_SIGNAL_OFFSET((e), VECS); \
73 (e)->semaphore.signal_ggtt[VCS2] = GEN8_SIGNAL_OFFSET((e), VCS2); \
74 (e)->semaphore.signal_ggtt[(e)->id] = MI_SEMAPHORE_SYNC_INVALID; \
75 } while(0)
76
77enum intel_ring_hangcheck_action { 65enum intel_ring_hangcheck_action {
78 HANGCHECK_IDLE = 0, 66 HANGCHECK_IDLE = 0,
79 HANGCHECK_WAIT, 67 HANGCHECK_WAIT,
@@ -86,8 +74,8 @@ enum intel_ring_hangcheck_action {
86 74
87struct intel_ring_hangcheck { 75struct intel_ring_hangcheck {
88 u64 acthd; 76 u64 acthd;
77 unsigned long user_interrupts;
89 u32 seqno; 78 u32 seqno;
90 unsigned user_interrupts;
91 int score; 79 int score;
92 enum intel_ring_hangcheck_action action; 80 enum intel_ring_hangcheck_action action;
93 int deadlock; 81 int deadlock;
@@ -107,7 +95,6 @@ struct intel_ringbuffer {
107 int space; 95 int space;
108 int size; 96 int size;
109 int effective_size; 97 int effective_size;
110 int reserved_size;
111 98
112 /** We track the position of the requests in the ring buffer, and 99 /** We track the position of the requests in the ring buffer, and
113 * when each is retired we increment last_retired_head as the GPU 100 * when each is retired we increment last_retired_head as the GPU
@@ -120,7 +107,7 @@ struct intel_ringbuffer {
120 u32 last_retired_head; 107 u32 last_retired_head;
121}; 108};
122 109
123struct intel_context; 110struct i915_gem_context;
124struct drm_i915_reg_table; 111struct drm_i915_reg_table;
125 112
126/* 113/*
@@ -142,7 +129,10 @@ struct i915_ctx_workarounds {
142 struct drm_i915_gem_object *obj; 129 struct drm_i915_gem_object *obj;
143}; 130};
144 131
145struct intel_engine_cs { 132struct drm_i915_gem_request;
133
134struct intel_engine_cs {
135 struct drm_i915_private *i915;
146 const char *name; 136 const char *name;
147 enum intel_engine_id { 137 enum intel_engine_id {
148 RCS = 0, 138 RCS = 0,
@@ -157,10 +147,42 @@ struct intel_engine_cs {
157 unsigned int hw_id; 147 unsigned int hw_id;
158 unsigned int guc_id; /* XXX same as hw_id? */ 148 unsigned int guc_id; /* XXX same as hw_id? */
159 u32 mmio_base; 149 u32 mmio_base;
160 struct drm_device *dev;
161 struct intel_ringbuffer *buffer; 150 struct intel_ringbuffer *buffer;
162 struct list_head buffers; 151 struct list_head buffers;
163 152
153 /* Rather than have every client wait upon all user interrupts,
154 * with the herd waking after every interrupt and each doing the
155 * heavyweight seqno dance, we delegate the task (of being the
156 * bottom-half of the user interrupt) to the first client. After
157 * every interrupt, we wake up one client, who does the heavyweight
158 * coherent seqno read and either goes back to sleep (if incomplete),
159 * or wakes up all the completed clients in parallel, before then
160 * transferring the bottom-half status to the next client in the queue.
161 *
162 * Compared to walking the entire list of waiters in a single dedicated
163 * bottom-half, we reduce the latency of the first waiter by avoiding
164 * a context switch, but incur additional coherent seqno reads when
165 * following the chain of request breadcrumbs. Since it is most likely
166 * that we have a single client waiting on each seqno, then reducing
167 * the overhead of waking that client is much preferred.
168 */
169 struct intel_breadcrumbs {
170 struct task_struct *irq_seqno_bh; /* bh for user interrupts */
171 unsigned long irq_wakeups;
172 bool irq_posted;
173
174 spinlock_t lock; /* protects the lists of requests */
175 struct rb_root waiters; /* sorted by retirement, priority */
176 struct rb_root signals; /* sorted by retirement */
177 struct intel_wait *first_wait; /* oldest waiter by retirement */
178 struct task_struct *signaler; /* used for fence signalling */
179 struct drm_i915_gem_request *first_signal;
180 struct timer_list fake_irq; /* used after a missed interrupt */
181
182 bool irq_enabled : 1;
183 bool rpm_wakelock : 1;
184 } breadcrumbs;
185
164 /* 186 /*
165 * A pool of objects to use as shadow copies of client batch buffers 187 * A pool of objects to use as shadow copies of client batch buffers
166 * when the command parser is enabled. Prevents the client from 188 * when the command parser is enabled. Prevents the client from
@@ -171,11 +193,10 @@ struct intel_engine_cs {
171 struct intel_hw_status_page status_page; 193 struct intel_hw_status_page status_page;
172 struct i915_ctx_workarounds wa_ctx; 194 struct i915_ctx_workarounds wa_ctx;
173 195
174 unsigned irq_refcount; /* protected by dev_priv->irq_lock */ 196 u32 irq_keep_mask; /* always keep these interrupts */
175 u32 irq_enable_mask; /* bitmask to enable ring interrupt */ 197 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
176 struct drm_i915_gem_request *trace_irq_req; 198 void (*irq_enable)(struct intel_engine_cs *ring);
177 bool __must_check (*irq_get)(struct intel_engine_cs *ring); 199 void (*irq_disable)(struct intel_engine_cs *ring);
178 void (*irq_put)(struct intel_engine_cs *ring);
179 200
180 int (*init_hw)(struct intel_engine_cs *ring); 201 int (*init_hw)(struct intel_engine_cs *ring);
181 202
@@ -194,9 +215,6 @@ struct intel_engine_cs {
194 * monotonic, even if not coherent. 215 * monotonic, even if not coherent.
195 */ 216 */
196 void (*irq_seqno_barrier)(struct intel_engine_cs *ring); 217 void (*irq_seqno_barrier)(struct intel_engine_cs *ring);
197 u32 (*get_seqno)(struct intel_engine_cs *ring);
198 void (*set_seqno)(struct intel_engine_cs *ring,
199 u32 seqno);
200 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, 218 int (*dispatch_execbuffer)(struct drm_i915_gem_request *req,
201 u64 offset, u32 length, 219 u64 offset, u32 length,
202 unsigned dispatch_flags); 220 unsigned dispatch_flags);
@@ -268,13 +286,11 @@ struct intel_engine_cs {
268 struct tasklet_struct irq_tasklet; 286 struct tasklet_struct irq_tasklet;
269 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */ 287 spinlock_t execlist_lock; /* used inside tasklet, use spin_lock_bh */
270 struct list_head execlist_queue; 288 struct list_head execlist_queue;
271 struct list_head execlist_retired_req_list;
272 unsigned int fw_domains; 289 unsigned int fw_domains;
273 unsigned int next_context_status_buffer; 290 unsigned int next_context_status_buffer;
274 unsigned int idle_lite_restore_wa; 291 unsigned int idle_lite_restore_wa;
275 bool disable_lite_restore_wa; 292 bool disable_lite_restore_wa;
276 u32 ctx_desc_template; 293 u32 ctx_desc_template;
277 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
278 int (*emit_request)(struct drm_i915_gem_request *request); 294 int (*emit_request)(struct drm_i915_gem_request *request);
279 int (*emit_flush)(struct drm_i915_gem_request *request, 295 int (*emit_flush)(struct drm_i915_gem_request *request,
280 u32 invalidate_domains, 296 u32 invalidate_domains,
@@ -306,20 +322,16 @@ struct intel_engine_cs {
306 * inspecting request list. 322 * inspecting request list.
307 */ 323 */
308 u32 last_submitted_seqno; 324 u32 last_submitted_seqno;
309 unsigned user_interrupts;
310 325
311 bool gpu_caches_dirty; 326 bool gpu_caches_dirty;
312 327
313 wait_queue_head_t irq_queue; 328 struct i915_gem_context *last_context;
314
315 struct intel_context *last_context;
316 329
317 struct intel_ring_hangcheck hangcheck; 330 struct intel_ring_hangcheck hangcheck;
318 331
319 struct { 332 struct {
320 struct drm_i915_gem_object *obj; 333 struct drm_i915_gem_object *obj;
321 u32 gtt_offset; 334 u32 gtt_offset;
322 volatile u32 *cpu_page;
323 } scratch; 335 } scratch;
324 336
325 bool needs_cmd_parser; 337 bool needs_cmd_parser;
@@ -350,13 +362,13 @@ struct intel_engine_cs {
350}; 362};
351 363
352static inline bool 364static inline bool
353intel_engine_initialized(struct intel_engine_cs *engine) 365intel_engine_initialized(const struct intel_engine_cs *engine)
354{ 366{
355 return engine->dev != NULL; 367 return engine->i915 != NULL;
356} 368}
357 369
358static inline unsigned 370static inline unsigned
359intel_engine_flag(struct intel_engine_cs *engine) 371intel_engine_flag(const struct intel_engine_cs *engine)
360{ 372{
361 return 1 << engine->id; 373 return 1 << engine->id;
362} 374}
@@ -427,7 +439,7 @@ intel_write_status_page(struct intel_engine_cs *engine,
427 439
428struct intel_ringbuffer * 440struct intel_ringbuffer *
429intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size); 441intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
430int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev, 442int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
431 struct intel_ringbuffer *ringbuf); 443 struct intel_ringbuffer *ringbuf);
432void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 444void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
433void intel_ringbuffer_free(struct intel_ringbuffer *ring); 445void intel_ringbuffer_free(struct intel_ringbuffer *ring);
@@ -458,15 +470,14 @@ static inline void intel_ring_advance(struct intel_engine_cs *engine)
458} 470}
459int __intel_ring_space(int head, int tail, int size); 471int __intel_ring_space(int head, int tail, int size);
460void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 472void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
461bool intel_engine_stopped(struct intel_engine_cs *engine);
462 473
463int __must_check intel_engine_idle(struct intel_engine_cs *engine); 474int __must_check intel_engine_idle(struct intel_engine_cs *engine);
464void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno); 475void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
465int intel_ring_flush_all_caches(struct drm_i915_gem_request *req); 476int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
466int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req); 477int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
467 478
479int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
468void intel_fini_pipe_control(struct intel_engine_cs *engine); 480void intel_fini_pipe_control(struct intel_engine_cs *engine);
469int intel_init_pipe_control(struct intel_engine_cs *engine);
470 481
471int intel_init_render_ring_buffer(struct drm_device *dev); 482int intel_init_render_ring_buffer(struct drm_device *dev);
472int intel_init_bsd_ring_buffer(struct drm_device *dev); 483int intel_init_bsd_ring_buffer(struct drm_device *dev);
@@ -475,6 +486,10 @@ int intel_init_blt_ring_buffer(struct drm_device *dev);
475int intel_init_vebox_ring_buffer(struct drm_device *dev); 486int intel_init_vebox_ring_buffer(struct drm_device *dev);
476 487
477u64 intel_ring_get_active_head(struct intel_engine_cs *engine); 488u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
489static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
490{
491 return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
492}
478 493
479int init_workarounds_ring(struct intel_engine_cs *engine); 494int init_workarounds_ring(struct intel_engine_cs *engine);
480 495
@@ -486,26 +501,73 @@ static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
486/* 501/*
487 * Arbitrary size for largest possible 'add request' sequence. The code paths 502 * Arbitrary size for largest possible 'add request' sequence. The code paths
488 * are complex and variable. Empirical measurement shows that the worst case 503 * are complex and variable. Empirical measurement shows that the worst case
489 * is ILK at 136 words. Reserving too much is better than reserving too little 504 * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
490 * as that allows for corner cases that might have been missed. So the figure 505 * we need to allocate double the largest single packet within that emission
491 * has been rounded up to 160 words. 506 * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
492 */ 507 */
493#define MIN_SPACE_FOR_ADD_REQUEST 160 508#define MIN_SPACE_FOR_ADD_REQUEST 336
494 509
495/* 510static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
496 * Reserve space in the ring to guarantee that the i915_add_request() call 511{
497 * will always have sufficient room to do its stuff. The request creation 512 return engine->status_page.gfx_addr + I915_GEM_HWS_INDEX_ADDR;
498 * code calls this automatically. 513}
499 */ 514
500void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); 515/* intel_breadcrumbs.c -- user interrupt bottom-half for waiters */
501/* Cancel the reservation, e.g. because the request is being discarded. */ 516struct intel_wait {
502void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf); 517 struct rb_node node;
503/* Use the reserved space - for use by i915_add_request() only. */ 518 struct task_struct *tsk;
504void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf); 519 u32 seqno;
505/* Finish with the reserved space - for use by i915_add_request() only. */ 520};
506void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf); 521
507 522struct intel_signal_node {
508/* Legacy ringbuffer specific portion of reservation code: */ 523 struct rb_node node;
509int intel_ring_reserve_space(struct drm_i915_gem_request *request); 524 struct intel_wait wait;
525};
526
527int intel_engine_init_breadcrumbs(struct intel_engine_cs *engine);
528
529static inline void intel_wait_init(struct intel_wait *wait, u32 seqno)
530{
531 wait->tsk = current;
532 wait->seqno = seqno;
533}
534
535static inline bool intel_wait_complete(const struct intel_wait *wait)
536{
537 return RB_EMPTY_NODE(&wait->node);
538}
539
540bool intel_engine_add_wait(struct intel_engine_cs *engine,
541 struct intel_wait *wait);
542void intel_engine_remove_wait(struct intel_engine_cs *engine,
543 struct intel_wait *wait);
544void intel_engine_enable_signaling(struct drm_i915_gem_request *request);
545
546static inline bool intel_engine_has_waiter(struct intel_engine_cs *engine)
547{
548 return READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
549}
550
551static inline bool intel_engine_wakeup(struct intel_engine_cs *engine)
552{
553 bool wakeup = false;
554 struct task_struct *tsk = READ_ONCE(engine->breadcrumbs.irq_seqno_bh);
555 /* Note that for this not to dangerously chase a dangling pointer,
556 * the caller is responsible for ensure that the task remain valid for
557 * wake_up_process() i.e. that the RCU grace period cannot expire.
558 *
559 * Also note that tsk is likely to be in !TASK_RUNNING state so an
560 * early test for tsk->state != TASK_RUNNING before wake_up_process()
561 * is unlikely to be beneficial.
562 */
563 if (tsk)
564 wakeup = wake_up_process(tsk);
565 return wakeup;
566}
567
568void intel_engine_enable_fake_irq(struct intel_engine_cs *engine);
569void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine);
570unsigned int intel_kick_waiters(struct drm_i915_private *i915);
571unsigned int intel_kick_signalers(struct drm_i915_private *i915);
510 572
511#endif /* _INTEL_RINGBUFFER_H_ */ 573#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index 7fb1da4e7fc3..1c603bbe5784 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -65,6 +65,9 @@
65bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, 65bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
66 int power_well_id); 66 int power_well_id);
67 67
68static struct i915_power_well *
69lookup_power_well(struct drm_i915_private *dev_priv, int power_well_id);
70
68const char * 71const char *
69intel_display_power_domain_str(enum intel_display_power_domain domain) 72intel_display_power_domain_str(enum intel_display_power_domain domain)
70{ 73{
@@ -151,6 +154,23 @@ static void intel_power_well_disable(struct drm_i915_private *dev_priv,
151 power_well->ops->disable(dev_priv, power_well); 154 power_well->ops->disable(dev_priv, power_well);
152} 155}
153 156
157static void intel_power_well_get(struct drm_i915_private *dev_priv,
158 struct i915_power_well *power_well)
159{
160 if (!power_well->count++)
161 intel_power_well_enable(dev_priv, power_well);
162}
163
164static void intel_power_well_put(struct drm_i915_private *dev_priv,
165 struct i915_power_well *power_well)
166{
167 WARN(!power_well->count, "Use count on power well %s is already zero",
168 power_well->name);
169
170 if (!--power_well->count)
171 intel_power_well_disable(dev_priv, power_well);
172}
173
154/* 174/*
155 * We should only use the power well if we explicitly asked the hardware to 175 * We should only use the power well if we explicitly asked the hardware to
156 * enable it, so check if it's enabled and also check if we've requested it to 176 * enable it, so check if it's enabled and also check if we've requested it to
@@ -267,7 +287,7 @@ void intel_display_set_init_power(struct drm_i915_private *dev_priv,
267 */ 287 */
268static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) 288static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
269{ 289{
270 struct drm_device *dev = dev_priv->dev; 290 struct drm_device *dev = &dev_priv->drm;
271 291
272 /* 292 /*
273 * After we re-enable the power well, if we touch VGA register 0x3d5 293 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -298,7 +318,7 @@ static void hsw_power_well_pre_disable(struct drm_i915_private *dev_priv)
298static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, 318static void skl_power_well_post_enable(struct drm_i915_private *dev_priv,
299 struct i915_power_well *power_well) 319 struct i915_power_well *power_well)
300{ 320{
301 struct drm_device *dev = dev_priv->dev; 321 struct drm_device *dev = &dev_priv->drm;
302 322
303 /* 323 /*
304 * After we re-enable the power well, if we touch VGA register 0x3d5 324 * After we re-enable the power well, if we touch VGA register 0x3d5
@@ -345,8 +365,11 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
345 365
346 if (!is_enabled) { 366 if (!is_enabled) {
347 DRM_DEBUG_KMS("Enabling power well\n"); 367 DRM_DEBUG_KMS("Enabling power well\n");
348 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & 368 if (intel_wait_for_register(dev_priv,
349 HSW_PWR_WELL_STATE_ENABLED), 20)) 369 HSW_PWR_WELL_DRIVER,
370 HSW_PWR_WELL_STATE_ENABLED,
371 HSW_PWR_WELL_STATE_ENABLED,
372 20))
350 DRM_ERROR("Timeout enabling power well\n"); 373 DRM_ERROR("Timeout enabling power well\n");
351 hsw_power_well_post_enable(dev_priv); 374 hsw_power_well_post_enable(dev_priv);
352 } 375 }
@@ -419,6 +442,16 @@ static void hsw_set_power_well(struct drm_i915_private *dev_priv,
419 BIT(POWER_DOMAIN_MODESET) | \ 442 BIT(POWER_DOMAIN_MODESET) | \
420 BIT(POWER_DOMAIN_AUX_A) | \ 443 BIT(POWER_DOMAIN_AUX_A) | \
421 BIT(POWER_DOMAIN_INIT)) 444 BIT(POWER_DOMAIN_INIT))
445#define BXT_DPIO_CMN_A_POWER_DOMAINS ( \
446 BIT(POWER_DOMAIN_PORT_DDI_A_LANES) | \
447 BIT(POWER_DOMAIN_AUX_A) | \
448 BIT(POWER_DOMAIN_INIT))
449#define BXT_DPIO_CMN_BC_POWER_DOMAINS ( \
450 BIT(POWER_DOMAIN_PORT_DDI_B_LANES) | \
451 BIT(POWER_DOMAIN_PORT_DDI_C_LANES) | \
452 BIT(POWER_DOMAIN_AUX_B) | \
453 BIT(POWER_DOMAIN_AUX_C) | \
454 BIT(POWER_DOMAIN_INIT))
422 455
423static void assert_can_enable_dc9(struct drm_i915_private *dev_priv) 456static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
424{ 457{
@@ -548,6 +581,7 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
548 581
549 DRM_DEBUG_KMS("Enabling DC9\n"); 582 DRM_DEBUG_KMS("Enabling DC9\n");
550 583
584 intel_power_sequencer_reset(dev_priv);
551 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9); 585 gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
552} 586}
553 587
@@ -669,8 +703,11 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
669 703
670 switch (power_well->data) { 704 switch (power_well->data) {
671 case SKL_DISP_PW_1: 705 case SKL_DISP_PW_1:
672 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 706 if (intel_wait_for_register(dev_priv,
673 SKL_FUSE_PG0_DIST_STATUS), 1)) { 707 SKL_FUSE_STATUS,
708 SKL_FUSE_PG0_DIST_STATUS,
709 SKL_FUSE_PG0_DIST_STATUS,
710 1)) {
674 DRM_ERROR("PG0 not enabled\n"); 711 DRM_ERROR("PG0 not enabled\n");
675 return; 712 return;
676 } 713 }
@@ -731,12 +768,18 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv,
731 768
732 if (check_fuse_status) { 769 if (check_fuse_status) {
733 if (power_well->data == SKL_DISP_PW_1) { 770 if (power_well->data == SKL_DISP_PW_1) {
734 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 771 if (intel_wait_for_register(dev_priv,
735 SKL_FUSE_PG1_DIST_STATUS), 1)) 772 SKL_FUSE_STATUS,
773 SKL_FUSE_PG1_DIST_STATUS,
774 SKL_FUSE_PG1_DIST_STATUS,
775 1))
736 DRM_ERROR("PG1 distributing status timeout\n"); 776 DRM_ERROR("PG1 distributing status timeout\n");
737 } else if (power_well->data == SKL_DISP_PW_2) { 777 } else if (power_well->data == SKL_DISP_PW_2) {
738 if (wait_for((I915_READ(SKL_FUSE_STATUS) & 778 if (intel_wait_for_register(dev_priv,
739 SKL_FUSE_PG2_DIST_STATUS), 1)) 779 SKL_FUSE_STATUS,
780 SKL_FUSE_PG2_DIST_STATUS,
781 SKL_FUSE_PG2_DIST_STATUS,
782 1))
740 DRM_ERROR("PG2 distributing status timeout\n"); 783 DRM_ERROR("PG2 distributing status timeout\n");
741 } 784 }
742 } 785 }
@@ -800,21 +843,99 @@ static void skl_power_well_disable(struct drm_i915_private *dev_priv,
800 skl_set_power_well(dev_priv, power_well, false); 843 skl_set_power_well(dev_priv, power_well, false);
801} 844}
802 845
846static enum dpio_phy bxt_power_well_to_phy(struct i915_power_well *power_well)
847{
848 enum skl_disp_power_wells power_well_id = power_well->data;
849
850 return power_well_id == BXT_DPIO_CMN_A ? DPIO_PHY1 : DPIO_PHY0;
851}
852
853static void bxt_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
854 struct i915_power_well *power_well)
855{
856 enum skl_disp_power_wells power_well_id = power_well->data;
857 struct i915_power_well *cmn_a_well;
858
859 if (power_well_id == BXT_DPIO_CMN_BC) {
860 /*
861 * We need to copy the GRC calibration value from the eDP PHY,
862 * so make sure it's powered up.
863 */
864 cmn_a_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
865 intel_power_well_get(dev_priv, cmn_a_well);
866 }
867
868 bxt_ddi_phy_init(dev_priv, bxt_power_well_to_phy(power_well));
869
870 if (power_well_id == BXT_DPIO_CMN_BC)
871 intel_power_well_put(dev_priv, cmn_a_well);
872}
873
874static void bxt_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
875 struct i915_power_well *power_well)
876{
877 bxt_ddi_phy_uninit(dev_priv, bxt_power_well_to_phy(power_well));
878}
879
880static bool bxt_dpio_cmn_power_well_enabled(struct drm_i915_private *dev_priv,
881 struct i915_power_well *power_well)
882{
883 return bxt_ddi_phy_is_enabled(dev_priv,
884 bxt_power_well_to_phy(power_well));
885}
886
887static void bxt_dpio_cmn_power_well_sync_hw(struct drm_i915_private *dev_priv,
888 struct i915_power_well *power_well)
889{
890 if (power_well->count > 0)
891 bxt_dpio_cmn_power_well_enable(dev_priv, power_well);
892 else
893 bxt_dpio_cmn_power_well_disable(dev_priv, power_well);
894}
895
896
897static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
898{
899 struct i915_power_well *power_well;
900
901 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_A);
902 if (power_well->count > 0)
903 bxt_ddi_phy_verify_state(dev_priv,
904 bxt_power_well_to_phy(power_well));
905
906 power_well = lookup_power_well(dev_priv, BXT_DPIO_CMN_BC);
907 if (power_well->count > 0)
908 bxt_ddi_phy_verify_state(dev_priv,
909 bxt_power_well_to_phy(power_well));
910}
911
803static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, 912static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
804 struct i915_power_well *power_well) 913 struct i915_power_well *power_well)
805{ 914{
806 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; 915 return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0;
807} 916}
808 917
918static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
919{
920 u32 tmp = I915_READ(DBUF_CTL);
921
922 WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
923 (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
924 "Unexpected DBuf power power state (0x%08x)\n", tmp);
925}
926
809static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv, 927static void gen9_dc_off_power_well_enable(struct drm_i915_private *dev_priv,
810 struct i915_power_well *power_well) 928 struct i915_power_well *power_well)
811{ 929{
812 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 930 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
813 931
814 if (IS_BROXTON(dev_priv)) { 932 WARN_ON(dev_priv->cdclk_freq !=
815 broxton_cdclk_verify_state(dev_priv); 933 dev_priv->display.get_display_clock_speed(&dev_priv->drm));
816 broxton_ddi_phy_verify_state(dev_priv); 934
817 } 935 gen9_assert_dbuf_enabled(dev_priv);
936
937 if (IS_BROXTON(dev_priv))
938 bxt_verify_ddi_phy_power_wells(dev_priv);
818} 939}
819 940
820static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, 941static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv,
@@ -948,10 +1069,16 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
948 */ 1069 */
949 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); 1070 I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
950 I915_WRITE(CBR1_VLV, 0); 1071 I915_WRITE(CBR1_VLV, 0);
1072
1073 WARN_ON(dev_priv->rawclk_freq == 0);
1074
1075 I915_WRITE(RAWCLK_FREQ_VLV,
1076 DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
951} 1077}
952 1078
953static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) 1079static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
954{ 1080{
1081 struct intel_encoder *encoder;
955 enum pipe pipe; 1082 enum pipe pipe;
956 1083
957 /* 1084 /*
@@ -962,7 +1089,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
962 * 1089 *
963 * CHV DPLL B/C have some issues if VGA mode is enabled. 1090 * CHV DPLL B/C have some issues if VGA mode is enabled.
964 */ 1091 */
965 for_each_pipe(dev_priv->dev, pipe) { 1092 for_each_pipe(&dev_priv->drm, pipe) {
966 u32 val = I915_READ(DPLL(pipe)); 1093 u32 val = I915_READ(DPLL(pipe));
967 1094
968 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 1095 val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -987,7 +1114,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
987 1114
988 intel_hpd_init(dev_priv); 1115 intel_hpd_init(dev_priv);
989 1116
990 i915_redisable_vga_power_on(dev_priv->dev); 1117 /* Re-enable the ADPA, if we have one */
1118 for_each_intel_encoder(&dev_priv->drm, encoder) {
1119 if (encoder->type == INTEL_OUTPUT_ANALOG)
1120 intel_crt_reset(&encoder->base);
1121 }
1122
1123 i915_redisable_vga_power_on(&dev_priv->drm);
991} 1124}
992 1125
993static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) 1126static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
@@ -997,9 +1130,11 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
997 spin_unlock_irq(&dev_priv->irq_lock); 1130 spin_unlock_irq(&dev_priv->irq_lock);
998 1131
999 /* make sure we're done processing display irqs */ 1132 /* make sure we're done processing display irqs */
1000 synchronize_irq(dev_priv->dev->irq); 1133 synchronize_irq(dev_priv->drm.irq);
1134
1135 intel_power_sequencer_reset(dev_priv);
1001 1136
1002 vlv_power_sequencer_reset(dev_priv); 1137 intel_hpd_poll_init(dev_priv);
1003} 1138}
1004 1139
1005static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, 1140static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
@@ -1092,7 +1227,6 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1092 u32 phy_control = dev_priv->chv_phy_control; 1227 u32 phy_control = dev_priv->chv_phy_control;
1093 u32 phy_status = 0; 1228 u32 phy_status = 0;
1094 u32 phy_status_mask = 0xffffffff; 1229 u32 phy_status_mask = 0xffffffff;
1095 u32 tmp;
1096 1230
1097 /* 1231 /*
1098 * The BIOS can leave the PHY is some weird state 1232 * The BIOS can leave the PHY is some weird state
@@ -1180,10 +1314,14 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
1180 * The PHY may be busy with some initial calibration and whatnot, 1314 * The PHY may be busy with some initial calibration and whatnot,
1181 * so the power state can take a while to actually change. 1315 * so the power state can take a while to actually change.
1182 */ 1316 */
1183 if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) 1317 if (intel_wait_for_register(dev_priv,
1184 WARN(phy_status != tmp, 1318 DISPLAY_PHY_STATUS,
1185 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1319 phy_status_mask,
1186 tmp, phy_status, dev_priv->chv_phy_control); 1320 phy_status,
1321 10))
1322 DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
1323 I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
1324 phy_status, dev_priv->chv_phy_control);
1187} 1325}
1188 1326
1189#undef BITS_SET 1327#undef BITS_SET
@@ -1211,7 +1349,11 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
1211 vlv_set_power_well(dev_priv, power_well, true); 1349 vlv_set_power_well(dev_priv, power_well, true);
1212 1350
1213 /* Poll for phypwrgood signal */ 1351 /* Poll for phypwrgood signal */
1214 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 1352 if (intel_wait_for_register(dev_priv,
1353 DISPLAY_PHY_STATUS,
1354 PHY_POWERGOOD(phy),
1355 PHY_POWERGOOD(phy),
1356 1))
1215 DRM_ERROR("Display PHY %d is not power up\n", phy); 1357 DRM_ERROR("Display PHY %d is not power up\n", phy);
1216 1358
1217 mutex_lock(&dev_priv->sb_lock); 1359 mutex_lock(&dev_priv->sb_lock);
@@ -1501,10 +1643,8 @@ __intel_display_power_get_domain(struct drm_i915_private *dev_priv,
1501 struct i915_power_well *power_well; 1643 struct i915_power_well *power_well;
1502 int i; 1644 int i;
1503 1645
1504 for_each_power_well(i, power_well, BIT(domain), power_domains) { 1646 for_each_power_well(i, power_well, BIT(domain), power_domains)
1505 if (!power_well->count++) 1647 intel_power_well_get(dev_priv, power_well);
1506 intel_power_well_enable(dev_priv, power_well);
1507 }
1508 1648
1509 power_domains->domain_use_count[domain]++; 1649 power_domains->domain_use_count[domain]++;
1510} 1650}
@@ -1598,14 +1738,8 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
1598 intel_display_power_domain_str(domain)); 1738 intel_display_power_domain_str(domain));
1599 power_domains->domain_use_count[domain]--; 1739 power_domains->domain_use_count[domain]--;
1600 1740
1601 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 1741 for_each_power_well_rev(i, power_well, BIT(domain), power_domains)
1602 WARN(!power_well->count, 1742 intel_power_well_put(dev_priv, power_well);
1603 "Use count on power well %s is already zero",
1604 power_well->name);
1605
1606 if (!--power_well->count)
1607 intel_power_well_disable(dev_priv, power_well);
1608 }
1609 1743
1610 mutex_unlock(&power_domains->lock); 1744 mutex_unlock(&power_domains->lock);
1611 1745
@@ -1776,6 +1910,13 @@ static const struct i915_power_well_ops gen9_dc_off_power_well_ops = {
1776 .is_enabled = gen9_dc_off_power_well_enabled, 1910 .is_enabled = gen9_dc_off_power_well_enabled,
1777}; 1911};
1778 1912
1913static const struct i915_power_well_ops bxt_dpio_cmn_power_well_ops = {
1914 .sync_hw = bxt_dpio_cmn_power_well_sync_hw,
1915 .enable = bxt_dpio_cmn_power_well_enable,
1916 .disable = bxt_dpio_cmn_power_well_disable,
1917 .is_enabled = bxt_dpio_cmn_power_well_enabled,
1918};
1919
1779static struct i915_power_well hsw_power_wells[] = { 1920static struct i915_power_well hsw_power_wells[] = {
1780 { 1921 {
1781 .name = "always-on", 1922 .name = "always-on",
@@ -2012,6 +2153,18 @@ static struct i915_power_well bxt_power_wells[] = {
2012 .ops = &skl_power_well_ops, 2153 .ops = &skl_power_well_ops,
2013 .data = SKL_DISP_PW_2, 2154 .data = SKL_DISP_PW_2,
2014 }, 2155 },
2156 {
2157 .name = "dpio-common-a",
2158 .domains = BXT_DPIO_CMN_A_POWER_DOMAINS,
2159 .ops = &bxt_dpio_cmn_power_well_ops,
2160 .data = BXT_DPIO_CMN_A,
2161 },
2162 {
2163 .name = "dpio-common-bc",
2164 .domains = BXT_DPIO_CMN_BC_POWER_DOMAINS,
2165 .ops = &bxt_dpio_cmn_power_well_ops,
2166 .data = BXT_DPIO_CMN_BC,
2167 },
2015}; 2168};
2016 2169
2017static int 2170static int
@@ -2131,7 +2284,7 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv)
2131 */ 2284 */
2132void intel_power_domains_fini(struct drm_i915_private *dev_priv) 2285void intel_power_domains_fini(struct drm_i915_private *dev_priv)
2133{ 2286{
2134 struct device *device = &dev_priv->dev->pdev->dev; 2287 struct device *device = &dev_priv->drm.pdev->dev;
2135 2288
2136 /* 2289 /*
2137 * The i915.ko module is still not prepared to be loaded when 2290 * The i915.ko module is still not prepared to be loaded when
@@ -2171,6 +2324,28 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv)
2171 mutex_unlock(&power_domains->lock); 2324 mutex_unlock(&power_domains->lock);
2172} 2325}
2173 2326
2327static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
2328{
2329 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
2330 POSTING_READ(DBUF_CTL);
2331
2332 udelay(10);
2333
2334 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
2335 DRM_ERROR("DBuf power enable timeout\n");
2336}
2337
2338static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
2339{
2340 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
2341 POSTING_READ(DBUF_CTL);
2342
2343 udelay(10);
2344
2345 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
2346 DRM_ERROR("DBuf power disable timeout!\n");
2347}
2348
2174static void skl_display_core_init(struct drm_i915_private *dev_priv, 2349static void skl_display_core_init(struct drm_i915_private *dev_priv,
2175 bool resume) 2350 bool resume)
2176{ 2351{
@@ -2195,12 +2370,11 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
2195 2370
2196 mutex_unlock(&power_domains->lock); 2371 mutex_unlock(&power_domains->lock);
2197 2372
2198 if (!resume)
2199 return;
2200
2201 skl_init_cdclk(dev_priv); 2373 skl_init_cdclk(dev_priv);
2202 2374
2203 if (dev_priv->csr.dmc_payload) 2375 gen9_dbuf_enable(dev_priv);
2376
2377 if (resume && dev_priv->csr.dmc_payload)
2204 intel_csr_load_program(dev_priv); 2378 intel_csr_load_program(dev_priv);
2205} 2379}
2206 2380
@@ -2211,6 +2385,8 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
2211 2385
2212 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2386 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2213 2387
2388 gen9_dbuf_disable(dev_priv);
2389
2214 skl_uninit_cdclk(dev_priv); 2390 skl_uninit_cdclk(dev_priv);
2215 2391
2216 /* The spec doesn't call for removing the reset handshake flag */ 2392 /* The spec doesn't call for removing the reset handshake flag */
@@ -2254,11 +2430,9 @@ void bxt_display_core_init(struct drm_i915_private *dev_priv,
2254 2430
2255 mutex_unlock(&power_domains->lock); 2431 mutex_unlock(&power_domains->lock);
2256 2432
2257 broxton_init_cdclk(dev_priv); 2433 bxt_init_cdclk(dev_priv);
2258 broxton_ddi_phy_init(dev_priv);
2259 2434
2260 broxton_cdclk_verify_state(dev_priv); 2435 gen9_dbuf_enable(dev_priv);
2261 broxton_ddi_phy_verify_state(dev_priv);
2262 2436
2263 if (resume && dev_priv->csr.dmc_payload) 2437 if (resume && dev_priv->csr.dmc_payload)
2264 intel_csr_load_program(dev_priv); 2438 intel_csr_load_program(dev_priv);
@@ -2271,8 +2445,9 @@ void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
2271 2445
2272 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2446 gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
2273 2447
2274 broxton_ddi_phy_uninit(dev_priv); 2448 gen9_dbuf_disable(dev_priv);
2275 broxton_uninit_cdclk(dev_priv); 2449
2450 bxt_uninit_cdclk(dev_priv);
2276 2451
2277 /* The spec doesn't call for removing the reset handshake flag */ 2452 /* The spec doesn't call for removing the reset handshake flag */
2278 2453
@@ -2403,13 +2578,14 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
2403/** 2578/**
2404 * intel_power_domains_init_hw - initialize hardware power domain state 2579 * intel_power_domains_init_hw - initialize hardware power domain state
2405 * @dev_priv: i915 device instance 2580 * @dev_priv: i915 device instance
2581 * @resume: Called from resume code paths or not
2406 * 2582 *
2407 * This function initializes the hardware power domain state and enables all 2583 * This function initializes the hardware power domain state and enables all
2408 * power domains using intel_display_set_init_power(). 2584 * power domains using intel_display_set_init_power().
2409 */ 2585 */
2410void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume) 2586void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
2411{ 2587{
2412 struct drm_device *dev = dev_priv->dev; 2588 struct drm_device *dev = &dev_priv->drm;
2413 struct i915_power_domains *power_domains = &dev_priv->power_domains; 2589 struct i915_power_domains *power_domains = &dev_priv->power_domains;
2414 2590
2415 power_domains->initializing = true; 2591 power_domains->initializing = true;
@@ -2471,7 +2647,7 @@ void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
2471 */ 2647 */
2472void intel_runtime_pm_get(struct drm_i915_private *dev_priv) 2648void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2473{ 2649{
2474 struct drm_device *dev = dev_priv->dev; 2650 struct drm_device *dev = &dev_priv->drm;
2475 struct device *device = &dev->pdev->dev; 2651 struct device *device = &dev->pdev->dev;
2476 2652
2477 pm_runtime_get_sync(device); 2653 pm_runtime_get_sync(device);
@@ -2492,7 +2668,7 @@ void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
2492 */ 2668 */
2493bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 2669bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2494{ 2670{
2495 struct drm_device *dev = dev_priv->dev; 2671 struct drm_device *dev = &dev_priv->drm;
2496 struct device *device = &dev->pdev->dev; 2672 struct device *device = &dev->pdev->dev;
2497 2673
2498 if (IS_ENABLED(CONFIG_PM)) { 2674 if (IS_ENABLED(CONFIG_PM)) {
@@ -2534,7 +2710,7 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
2534 */ 2710 */
2535void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv) 2711void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2536{ 2712{
2537 struct drm_device *dev = dev_priv->dev; 2713 struct drm_device *dev = &dev_priv->drm;
2538 struct device *device = &dev->pdev->dev; 2714 struct device *device = &dev->pdev->dev;
2539 2715
2540 assert_rpm_wakelock_held(dev_priv); 2716 assert_rpm_wakelock_held(dev_priv);
@@ -2553,7 +2729,7 @@ void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
2553 */ 2729 */
2554void intel_runtime_pm_put(struct drm_i915_private *dev_priv) 2730void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2555{ 2731{
2556 struct drm_device *dev = dev_priv->dev; 2732 struct drm_device *dev = &dev_priv->drm;
2557 struct device *device = &dev->pdev->dev; 2733 struct device *device = &dev->pdev->dev;
2558 2734
2559 assert_rpm_wakelock_held(dev_priv); 2735 assert_rpm_wakelock_held(dev_priv);
@@ -2576,7 +2752,7 @@ void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
2576 */ 2752 */
2577void intel_runtime_pm_enable(struct drm_i915_private *dev_priv) 2753void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
2578{ 2754{
2579 struct drm_device *dev = dev_priv->dev; 2755 struct drm_device *dev = &dev_priv->drm;
2580 struct device *device = &dev->pdev->dev; 2756 struct device *device = &dev->pdev->dev;
2581 2757
2582 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */ 2758 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2128fae5687d..e378f35365a2 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -240,7 +240,7 @@ intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
240static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val) 240static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
241{ 241{
242 struct drm_device *dev = intel_sdvo->base.base.dev; 242 struct drm_device *dev = intel_sdvo->base.base.dev;
243 struct drm_i915_private *dev_priv = dev->dev_private; 243 struct drm_i915_private *dev_priv = to_i915(dev);
244 u32 bval = val, cval = val; 244 u32 bval = val, cval = val;
245 int i; 245 int i;
246 246
@@ -1195,7 +1195,7 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
1195static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) 1195static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder)
1196{ 1196{
1197 struct drm_device *dev = intel_encoder->base.dev; 1197 struct drm_device *dev = intel_encoder->base.dev;
1198 struct drm_i915_private *dev_priv = dev->dev_private; 1198 struct drm_i915_private *dev_priv = to_i915(dev);
1199 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); 1199 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc);
1200 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 1200 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
1201 struct drm_display_mode *mode = &crtc->config->base.mode; 1201 struct drm_display_mode *mode = &crtc->config->base.mode;
@@ -1330,7 +1330,7 @@ static bool intel_sdvo_get_hw_state(struct intel_encoder *encoder,
1330 enum pipe *pipe) 1330 enum pipe *pipe)
1331{ 1331{
1332 struct drm_device *dev = encoder->base.dev; 1332 struct drm_device *dev = encoder->base.dev;
1333 struct drm_i915_private *dev_priv = dev->dev_private; 1333 struct drm_i915_private *dev_priv = to_i915(dev);
1334 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1334 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1335 u16 active_outputs = 0; 1335 u16 active_outputs = 0;
1336 u32 tmp; 1336 u32 tmp;
@@ -1353,7 +1353,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1353 struct intel_crtc_state *pipe_config) 1353 struct intel_crtc_state *pipe_config)
1354{ 1354{
1355 struct drm_device *dev = encoder->base.dev; 1355 struct drm_device *dev = encoder->base.dev;
1356 struct drm_i915_private *dev_priv = dev->dev_private; 1356 struct drm_i915_private *dev_priv = to_i915(dev);
1357 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1357 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1358 struct intel_sdvo_dtd dtd; 1358 struct intel_sdvo_dtd dtd;
1359 int encoder_pixel_multiplier = 0; 1359 int encoder_pixel_multiplier = 0;
@@ -1436,7 +1436,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
1436 1436
1437static void intel_disable_sdvo(struct intel_encoder *encoder) 1437static void intel_disable_sdvo(struct intel_encoder *encoder)
1438{ 1438{
1439 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 1439 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1440 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1440 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1441 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1441 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1442 u32 temp; 1442 u32 temp;
@@ -1471,7 +1471,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder)
1471 temp &= ~SDVO_ENABLE; 1471 temp &= ~SDVO_ENABLE;
1472 intel_sdvo_write_sdvox(intel_sdvo, temp); 1472 intel_sdvo_write_sdvox(intel_sdvo, temp);
1473 1473
1474 intel_wait_for_vblank_if_active(dev_priv->dev, PIPE_A); 1474 intel_wait_for_vblank_if_active(&dev_priv->drm, PIPE_A);
1475 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1475 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1476 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true); 1476 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
1477 } 1477 }
@@ -1489,7 +1489,7 @@ static void pch_post_disable_sdvo(struct intel_encoder *encoder)
1489static void intel_enable_sdvo(struct intel_encoder *encoder) 1489static void intel_enable_sdvo(struct intel_encoder *encoder)
1490{ 1490{
1491 struct drm_device *dev = encoder->base.dev; 1491 struct drm_device *dev = encoder->base.dev;
1492 struct drm_i915_private *dev_priv = dev->dev_private; 1492 struct drm_i915_private *dev_priv = to_i915(dev);
1493 struct intel_sdvo *intel_sdvo = to_sdvo(encoder); 1493 struct intel_sdvo *intel_sdvo = to_sdvo(encoder);
1494 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1494 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1495 u32 temp; 1495 u32 temp;
@@ -1633,7 +1633,7 @@ intel_sdvo_get_edid(struct drm_connector *connector)
1633static struct edid * 1633static struct edid *
1634intel_sdvo_get_analog_edid(struct drm_connector *connector) 1634intel_sdvo_get_analog_edid(struct drm_connector *connector)
1635{ 1635{
1636 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1636 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1637 1637
1638 return drm_get_edid(connector, 1638 return drm_get_edid(connector,
1639 intel_gmbus_get_adapter(dev_priv, 1639 intel_gmbus_get_adapter(dev_priv,
@@ -1916,7 +1916,7 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1916static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) 1916static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1917{ 1917{
1918 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1918 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1919 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1919 struct drm_i915_private *dev_priv = to_i915(connector->dev);
1920 struct drm_display_mode *newmode; 1920 struct drm_display_mode *newmode;
1921 1921
1922 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1922 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
@@ -2001,7 +2001,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
2001{ 2001{
2002 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 2002 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
2003 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 2003 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2004 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2004 struct drm_i915_private *dev_priv = to_i915(connector->dev);
2005 uint16_t temp_value; 2005 uint16_t temp_value;
2006 uint8_t cmd; 2006 uint8_t cmd;
2007 int ret; 2007 int ret;
@@ -2177,12 +2177,39 @@ done:
2177#undef CHECK_PROPERTY 2177#undef CHECK_PROPERTY
2178} 2178}
2179 2179
2180static int
2181intel_sdvo_connector_register(struct drm_connector *connector)
2182{
2183 struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
2184 int ret;
2185
2186 ret = intel_connector_register(connector);
2187 if (ret)
2188 return ret;
2189
2190 return sysfs_create_link(&connector->kdev->kobj,
2191 &sdvo->ddc.dev.kobj,
2192 sdvo->ddc.dev.kobj.name);
2193}
2194
2195static void
2196intel_sdvo_connector_unregister(struct drm_connector *connector)
2197{
2198 struct intel_sdvo *sdvo = intel_attached_sdvo(connector);
2199
2200 sysfs_remove_link(&connector->kdev->kobj,
2201 sdvo->ddc.dev.kobj.name);
2202 intel_connector_unregister(connector);
2203}
2204
2180static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 2205static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2181 .dpms = drm_atomic_helper_connector_dpms, 2206 .dpms = drm_atomic_helper_connector_dpms,
2182 .detect = intel_sdvo_detect, 2207 .detect = intel_sdvo_detect,
2183 .fill_modes = drm_helper_probe_single_connector_modes, 2208 .fill_modes = drm_helper_probe_single_connector_modes,
2184 .set_property = intel_sdvo_set_property, 2209 .set_property = intel_sdvo_set_property,
2185 .atomic_get_property = intel_connector_atomic_get_property, 2210 .atomic_get_property = intel_connector_atomic_get_property,
2211 .late_register = intel_sdvo_connector_register,
2212 .early_unregister = intel_sdvo_connector_unregister,
2186 .destroy = intel_sdvo_destroy, 2213 .destroy = intel_sdvo_destroy,
2187 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 2214 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
2188 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 2215 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -2191,7 +2218,6 @@ static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
2191static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = { 2218static const struct drm_connector_helper_funcs intel_sdvo_connector_helper_funcs = {
2192 .get_modes = intel_sdvo_get_modes, 2219 .get_modes = intel_sdvo_get_modes,
2193 .mode_valid = intel_sdvo_mode_valid, 2220 .mode_valid = intel_sdvo_mode_valid,
2194 .best_encoder = intel_best_encoder,
2195}; 2221};
2196 2222
2197static void intel_sdvo_enc_destroy(struct drm_encoder *encoder) 2223static void intel_sdvo_enc_destroy(struct drm_encoder *encoder)
@@ -2312,7 +2338,7 @@ intel_sdvo_is_hdmi_connector(struct intel_sdvo *intel_sdvo, int device)
2312static u8 2338static u8
2313intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo) 2339intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2314{ 2340{
2315 struct drm_i915_private *dev_priv = dev->dev_private; 2341 struct drm_i915_private *dev_priv = to_i915(dev);
2316 struct sdvo_device_mapping *my_mapping, *other_mapping; 2342 struct sdvo_device_mapping *my_mapping, *other_mapping;
2317 2343
2318 if (sdvo->port == PORT_B) { 2344 if (sdvo->port == PORT_B) {
@@ -2346,20 +2372,6 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, struct intel_sdvo *sdvo)
2346 return 0x72; 2372 return 0x72;
2347} 2373}
2348 2374
2349static void
2350intel_sdvo_connector_unregister(struct intel_connector *intel_connector)
2351{
2352 struct drm_connector *drm_connector;
2353 struct intel_sdvo *sdvo_encoder;
2354
2355 drm_connector = &intel_connector->base;
2356 sdvo_encoder = intel_attached_sdvo(&intel_connector->base);
2357
2358 sysfs_remove_link(&drm_connector->kdev->kobj,
2359 sdvo_encoder->ddc.dev.kobj.name);
2360 intel_connector_unregister(intel_connector);
2361}
2362
2363static int 2375static int
2364intel_sdvo_connector_init(struct intel_sdvo_connector *connector, 2376intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2365 struct intel_sdvo *encoder) 2377 struct intel_sdvo *encoder)
@@ -2382,27 +2394,10 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
2382 connector->base.base.doublescan_allowed = 0; 2394 connector->base.base.doublescan_allowed = 0;
2383 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB; 2395 connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
2384 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state; 2396 connector->base.get_hw_state = intel_sdvo_connector_get_hw_state;
2385 connector->base.unregister = intel_sdvo_connector_unregister;
2386 2397
2387 intel_connector_attach_encoder(&connector->base, &encoder->base); 2398 intel_connector_attach_encoder(&connector->base, &encoder->base);
2388 ret = drm_connector_register(drm_connector);
2389 if (ret < 0)
2390 goto err1;
2391
2392 ret = sysfs_create_link(&drm_connector->kdev->kobj,
2393 &encoder->ddc.dev.kobj,
2394 encoder->ddc.dev.kobj.name);
2395 if (ret < 0)
2396 goto err2;
2397 2399
2398 return 0; 2400 return 0;
2399
2400err2:
2401 drm_connector_unregister(drm_connector);
2402err1:
2403 drm_connector_cleanup(drm_connector);
2404
2405 return ret;
2406} 2401}
2407 2402
2408static void 2403static void
@@ -2529,7 +2524,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type)
2529 return true; 2524 return true;
2530 2525
2531err: 2526err:
2532 drm_connector_unregister(connector);
2533 intel_sdvo_destroy(connector); 2527 intel_sdvo_destroy(connector);
2534 return false; 2528 return false;
2535} 2529}
@@ -2608,7 +2602,6 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device)
2608 return true; 2602 return true;
2609 2603
2610err: 2604err:
2611 drm_connector_unregister(connector);
2612 intel_sdvo_destroy(connector); 2605 intel_sdvo_destroy(connector);
2613 return false; 2606 return false;
2614} 2607}
@@ -2959,7 +2952,7 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
2959bool intel_sdvo_init(struct drm_device *dev, 2952bool intel_sdvo_init(struct drm_device *dev,
2960 i915_reg_t sdvo_reg, enum port port) 2953 i915_reg_t sdvo_reg, enum port port)
2961{ 2954{
2962 struct drm_i915_private *dev_priv = dev->dev_private; 2955 struct drm_i915_private *dev_priv = to_i915(dev);
2963 struct intel_encoder *intel_encoder; 2956 struct intel_encoder *intel_encoder;
2964 struct intel_sdvo *intel_sdvo; 2957 struct intel_sdvo *intel_sdvo;
2965 int i; 2958 int i;
@@ -2981,7 +2974,7 @@ bool intel_sdvo_init(struct drm_device *dev,
2981 intel_encoder = &intel_sdvo->base; 2974 intel_encoder = &intel_sdvo->base;
2982 intel_encoder->type = INTEL_OUTPUT_SDVO; 2975 intel_encoder->type = INTEL_OUTPUT_SDVO;
2983 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0, 2976 drm_encoder_init(dev, &intel_encoder->base, &intel_sdvo_enc_funcs, 0,
2984 NULL); 2977 "SDVO %c", port_name(port));
2985 2978
2986 /* Read the regs to test if we can talk to the device */ 2979 /* Read the regs to test if we can talk to the device */
2987 for (i = 0; i < 0x40; i++) { 2980 for (i = 0; i < 0x40; i++) {
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index c3998188cf35..1a840bf92eea 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -51,7 +51,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
51 51
52 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); 52 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
53 53
54 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { 54 if (intel_wait_for_register(dev_priv,
55 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
56 5)) {
55 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n", 57 DRM_DEBUG_DRIVER("IOSF sideband idle wait (%s) timed out\n",
56 is_read ? "read" : "write"); 58 is_read ? "read" : "write");
57 return -EAGAIN; 59 return -EAGAIN;
@@ -62,7 +64,9 @@ static int vlv_sideband_rw(struct drm_i915_private *dev_priv, u32 devfn,
62 I915_WRITE(VLV_IOSF_DATA, *val); 64 I915_WRITE(VLV_IOSF_DATA, *val);
63 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd); 65 I915_WRITE(VLV_IOSF_DOORBELL_REQ, cmd);
64 66
65 if (wait_for((I915_READ(VLV_IOSF_DOORBELL_REQ) & IOSF_SB_BUSY) == 0, 5)) { 67 if (intel_wait_for_register(dev_priv,
68 VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
69 5)) {
66 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n", 70 DRM_DEBUG_DRIVER("IOSF sideband finish wait (%s) timed out\n",
67 is_read ? "read" : "write"); 71 is_read ? "read" : "write");
68 return -ETIMEDOUT; 72 return -ETIMEDOUT;
@@ -202,8 +206,9 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
202 u32 value = 0; 206 u32 value = 0;
203 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); 207 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
204 208
205 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 209 if (intel_wait_for_register(dev_priv,
206 100)) { 210 SBI_CTL_STAT, SBI_BUSY, 0,
211 100)) {
207 DRM_ERROR("timeout waiting for SBI to become ready\n"); 212 DRM_ERROR("timeout waiting for SBI to become ready\n");
208 return 0; 213 return 0;
209 } 214 }
@@ -216,8 +221,11 @@ u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
216 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD; 221 value = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
217 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY); 222 I915_WRITE(SBI_CTL_STAT, value | SBI_BUSY);
218 223
219 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 224 if (intel_wait_for_register(dev_priv,
220 100)) { 225 SBI_CTL_STAT,
226 SBI_BUSY | SBI_RESPONSE_FAIL,
227 0,
228 100)) {
221 DRM_ERROR("timeout waiting for SBI to complete read transaction\n"); 229 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
222 return 0; 230 return 0;
223 } 231 }
@@ -232,8 +240,9 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
232 240
233 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock)); 241 WARN_ON(!mutex_is_locked(&dev_priv->sb_lock));
234 242
235 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0, 243 if (intel_wait_for_register(dev_priv,
236 100)) { 244 SBI_CTL_STAT, SBI_BUSY, 0,
245 100)) {
237 DRM_ERROR("timeout waiting for SBI to become ready\n"); 246 DRM_ERROR("timeout waiting for SBI to become ready\n");
238 return; 247 return;
239 } 248 }
@@ -247,8 +256,11 @@ void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
247 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR; 256 tmp = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IOWR;
248 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp); 257 I915_WRITE(SBI_CTL_STAT, SBI_BUSY | tmp);
249 258
250 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0, 259 if (intel_wait_for_register(dev_priv,
251 100)) { 260 SBI_CTL_STAT,
261 SBI_BUSY | SBI_RESPONSE_FAIL,
262 0,
263 100)) {
252 DRM_ERROR("timeout waiting for SBI to complete write transaction\n"); 264 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
253 return; 265 return;
254 } 266 }
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 0f3e2303e0e9..0de935ad01c2 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -80,9 +80,7 @@ static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode,
80 */ 80 */
81void intel_pipe_update_start(struct intel_crtc *crtc) 81void intel_pipe_update_start(struct intel_crtc *crtc)
82{ 82{
83 struct drm_device *dev = crtc->base.dev;
84 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 83 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
85 enum pipe pipe = crtc->pipe;
86 long timeout = msecs_to_jiffies_timeout(1); 84 long timeout = msecs_to_jiffies_timeout(1);
87 int scanline, min, max, vblank_start; 85 int scanline, min, max, vblank_start;
88 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
@@ -139,8 +137,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
139 137
140 crtc->debug.scanline_start = scanline; 138 crtc->debug.scanline_start = scanline;
141 crtc->debug.start_vbl_time = ktime_get(); 139 crtc->debug.start_vbl_time = ktime_get();
142 crtc->debug.start_vbl_count = 140 crtc->debug.start_vbl_count = intel_crtc_get_vblank_counter(crtc);
143 dev->driver->get_vblank_counter(dev, pipe);
144 141
145 trace_i915_pipe_update_vblank_evaded(crtc); 142 trace_i915_pipe_update_vblank_evaded(crtc);
146} 143}
@@ -154,16 +151,35 @@ void intel_pipe_update_start(struct intel_crtc *crtc)
154 * re-enables interrupts and verifies the update was actually completed 151 * re-enables interrupts and verifies the update was actually completed
155 * before a vblank using the value of @start_vbl_count. 152 * before a vblank using the value of @start_vbl_count.
156 */ 153 */
157void intel_pipe_update_end(struct intel_crtc *crtc) 154void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work)
158{ 155{
159 struct drm_device *dev = crtc->base.dev;
160 enum pipe pipe = crtc->pipe; 156 enum pipe pipe = crtc->pipe;
161 int scanline_end = intel_get_crtc_scanline(crtc); 157 int scanline_end = intel_get_crtc_scanline(crtc);
162 u32 end_vbl_count = dev->driver->get_vblank_counter(dev, pipe); 158 u32 end_vbl_count = intel_crtc_get_vblank_counter(crtc);
163 ktime_t end_vbl_time = ktime_get(); 159 ktime_t end_vbl_time = ktime_get();
164 160
161 if (work) {
162 work->flip_queued_vblank = end_vbl_count;
163 smp_mb__before_atomic();
164 atomic_set(&work->pending, 1);
165 }
166
165 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); 167 trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end);
166 168
169 /* We're still in the vblank-evade critical section, this can't race.
170 * Would be slightly nice to just grab the vblank count and arm the
171 * event outside of the critical section - the spinlock might spin for a
172 * while ... */
173 if (crtc->base.state->event) {
174 WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
175
176 spin_lock(&crtc->base.dev->event_lock);
177 drm_crtc_arm_vblank_event(&crtc->base, crtc->base.state->event);
178 spin_unlock(&crtc->base.dev->event_lock);
179
180 crtc->base.state->event = NULL;
181 }
182
167 local_irq_enable(); 183 local_irq_enable();
168 184
169 if (crtc->debug.start_vbl_count && 185 if (crtc->debug.start_vbl_count &&
@@ -183,7 +199,7 @@ skl_update_plane(struct drm_plane *drm_plane,
183 const struct intel_plane_state *plane_state) 199 const struct intel_plane_state *plane_state)
184{ 200{
185 struct drm_device *dev = drm_plane->dev; 201 struct drm_device *dev = drm_plane->dev;
186 struct drm_i915_private *dev_priv = dev->dev_private; 202 struct drm_i915_private *dev_priv = to_i915(dev);
187 struct intel_plane *intel_plane = to_intel_plane(drm_plane); 203 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
188 struct drm_framebuffer *fb = plane_state->base.fb; 204 struct drm_framebuffer *fb = plane_state->base.fb;
189 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 205 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -203,8 +219,6 @@ skl_update_plane(struct drm_plane *drm_plane,
203 uint32_t y = plane_state->src.y1 >> 16; 219 uint32_t y = plane_state->src.y1 >> 16;
204 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16; 220 uint32_t src_w = drm_rect_width(&plane_state->src) >> 16;
205 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16; 221 uint32_t src_h = drm_rect_height(&plane_state->src) >> 16;
206 const struct intel_scaler *scaler =
207 &crtc_state->scaler_state.scalers[plane_state->scaler_id];
208 222
209 plane_ctl = PLANE_CTL_ENABLE | 223 plane_ctl = PLANE_CTL_ENABLE |
210 PLANE_CTL_PIPE_GAMMA_ENABLE | 224 PLANE_CTL_PIPE_GAMMA_ENABLE |
@@ -260,13 +274,16 @@ skl_update_plane(struct drm_plane *drm_plane,
260 274
261 /* program plane scaler */ 275 /* program plane scaler */
262 if (plane_state->scaler_id >= 0) { 276 if (plane_state->scaler_id >= 0) {
263 uint32_t ps_ctrl = 0;
264 int scaler_id = plane_state->scaler_id; 277 int scaler_id = plane_state->scaler_id;
278 const struct intel_scaler *scaler;
265 279
266 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane, 280 DRM_DEBUG_KMS("plane = %d PS_PLANE_SEL(plane) = 0x%x\n", plane,
267 PS_PLANE_SEL(plane)); 281 PS_PLANE_SEL(plane));
268 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode; 282
269 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl); 283 scaler = &crtc_state->scaler_state.scalers[scaler_id];
284
285 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id),
286 PS_SCALER_EN | PS_PLANE_SEL(plane) | scaler->mode);
270 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0); 287 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
271 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y); 288 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
272 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), 289 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id),
@@ -286,7 +303,7 @@ static void
286skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) 303skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
287{ 304{
288 struct drm_device *dev = dplane->dev; 305 struct drm_device *dev = dplane->dev;
289 struct drm_i915_private *dev_priv = dev->dev_private; 306 struct drm_i915_private *dev_priv = to_i915(dev);
290 struct intel_plane *intel_plane = to_intel_plane(dplane); 307 struct intel_plane *intel_plane = to_intel_plane(dplane);
291 const int pipe = intel_plane->pipe; 308 const int pipe = intel_plane->pipe;
292 const int plane = intel_plane->plane + 1; 309 const int plane = intel_plane->plane + 1;
@@ -300,7 +317,7 @@ skl_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
300static void 317static void
301chv_update_csc(struct intel_plane *intel_plane, uint32_t format) 318chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
302{ 319{
303 struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private; 320 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
304 int plane = intel_plane->plane; 321 int plane = intel_plane->plane;
305 322
306 /* Seems RGB data bypasses the CSC always */ 323 /* Seems RGB data bypasses the CSC always */
@@ -342,7 +359,7 @@ vlv_update_plane(struct drm_plane *dplane,
342 const struct intel_plane_state *plane_state) 359 const struct intel_plane_state *plane_state)
343{ 360{
344 struct drm_device *dev = dplane->dev; 361 struct drm_device *dev = dplane->dev;
345 struct drm_i915_private *dev_priv = dev->dev_private; 362 struct drm_i915_private *dev_priv = to_i915(dev);
346 struct intel_plane *intel_plane = to_intel_plane(dplane); 363 struct intel_plane *intel_plane = to_intel_plane(dplane);
347 struct drm_framebuffer *fb = plane_state->base.fb; 364 struct drm_framebuffer *fb = plane_state->base.fb;
348 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 365 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -468,7 +485,7 @@ static void
468vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc) 485vlv_disable_plane(struct drm_plane *dplane, struct drm_crtc *crtc)
469{ 486{
470 struct drm_device *dev = dplane->dev; 487 struct drm_device *dev = dplane->dev;
471 struct drm_i915_private *dev_priv = dev->dev_private; 488 struct drm_i915_private *dev_priv = to_i915(dev);
472 struct intel_plane *intel_plane = to_intel_plane(dplane); 489 struct intel_plane *intel_plane = to_intel_plane(dplane);
473 int pipe = intel_plane->pipe; 490 int pipe = intel_plane->pipe;
474 int plane = intel_plane->plane; 491 int plane = intel_plane->plane;
@@ -485,7 +502,7 @@ ivb_update_plane(struct drm_plane *plane,
485 const struct intel_plane_state *plane_state) 502 const struct intel_plane_state *plane_state)
486{ 503{
487 struct drm_device *dev = plane->dev; 504 struct drm_device *dev = plane->dev;
488 struct drm_i915_private *dev_priv = dev->dev_private; 505 struct drm_i915_private *dev_priv = to_i915(dev);
489 struct intel_plane *intel_plane = to_intel_plane(plane); 506 struct intel_plane *intel_plane = to_intel_plane(plane);
490 struct drm_framebuffer *fb = plane_state->base.fb; 507 struct drm_framebuffer *fb = plane_state->base.fb;
491 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 508 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -607,7 +624,7 @@ static void
607ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) 624ivb_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
608{ 625{
609 struct drm_device *dev = plane->dev; 626 struct drm_device *dev = plane->dev;
610 struct drm_i915_private *dev_priv = dev->dev_private; 627 struct drm_i915_private *dev_priv = to_i915(dev);
611 struct intel_plane *intel_plane = to_intel_plane(plane); 628 struct intel_plane *intel_plane = to_intel_plane(plane);
612 int pipe = intel_plane->pipe; 629 int pipe = intel_plane->pipe;
613 630
@@ -626,7 +643,7 @@ ilk_update_plane(struct drm_plane *plane,
626 const struct intel_plane_state *plane_state) 643 const struct intel_plane_state *plane_state)
627{ 644{
628 struct drm_device *dev = plane->dev; 645 struct drm_device *dev = plane->dev;
629 struct drm_i915_private *dev_priv = dev->dev_private; 646 struct drm_i915_private *dev_priv = to_i915(dev);
630 struct intel_plane *intel_plane = to_intel_plane(plane); 647 struct intel_plane *intel_plane = to_intel_plane(plane);
631 struct drm_framebuffer *fb = plane_state->base.fb; 648 struct drm_framebuffer *fb = plane_state->base.fb;
632 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 649 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
@@ -736,7 +753,7 @@ static void
736ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc) 753ilk_disable_plane(struct drm_plane *plane, struct drm_crtc *crtc)
737{ 754{
738 struct drm_device *dev = plane->dev; 755 struct drm_device *dev = plane->dev;
739 struct drm_i915_private *dev_priv = dev->dev_private; 756 struct drm_i915_private *dev_priv = to_i915(dev);
740 struct intel_plane *intel_plane = to_intel_plane(plane); 757 struct intel_plane *intel_plane = to_intel_plane(plane);
741 int pipe = intel_plane->pipe; 758 int pipe = intel_plane->pipe;
742 759
@@ -1111,10 +1128,18 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1111 1128
1112 possible_crtcs = (1 << pipe); 1129 possible_crtcs = (1 << pipe);
1113 1130
1114 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs, 1131 if (INTEL_INFO(dev)->gen >= 9)
1115 &intel_plane_funcs, 1132 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1116 plane_formats, num_plane_formats, 1133 &intel_plane_funcs,
1117 DRM_PLANE_TYPE_OVERLAY, NULL); 1134 plane_formats, num_plane_formats,
1135 DRM_PLANE_TYPE_OVERLAY,
1136 "plane %d%c", plane + 2, pipe_name(pipe));
1137 else
1138 ret = drm_universal_plane_init(dev, &intel_plane->base, possible_crtcs,
1139 &intel_plane_funcs,
1140 plane_formats, num_plane_formats,
1141 DRM_PLANE_TYPE_OVERLAY,
1142 "sprite %c", sprite_name(pipe, plane));
1118 if (ret) 1143 if (ret)
1119 goto fail; 1144 goto fail;
1120 1145
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 223129d3c765..49136ad5473e 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -826,7 +826,7 @@ static bool
826intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe) 826intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
827{ 827{
828 struct drm_device *dev = encoder->base.dev; 828 struct drm_device *dev = encoder->base.dev;
829 struct drm_i915_private *dev_priv = dev->dev_private; 829 struct drm_i915_private *dev_priv = to_i915(dev);
830 u32 tmp = I915_READ(TV_CTL); 830 u32 tmp = I915_READ(TV_CTL);
831 831
832 if (!(tmp & TV_ENC_ENABLE)) 832 if (!(tmp & TV_ENC_ENABLE))
@@ -841,7 +841,7 @@ static void
841intel_enable_tv(struct intel_encoder *encoder) 841intel_enable_tv(struct intel_encoder *encoder)
842{ 842{
843 struct drm_device *dev = encoder->base.dev; 843 struct drm_device *dev = encoder->base.dev;
844 struct drm_i915_private *dev_priv = dev->dev_private; 844 struct drm_i915_private *dev_priv = to_i915(dev);
845 845
846 /* Prevents vblank waits from timing out in intel_tv_detect_type() */ 846 /* Prevents vblank waits from timing out in intel_tv_detect_type() */
847 intel_wait_for_vblank(encoder->base.dev, 847 intel_wait_for_vblank(encoder->base.dev,
@@ -854,7 +854,7 @@ static void
854intel_disable_tv(struct intel_encoder *encoder) 854intel_disable_tv(struct intel_encoder *encoder)
855{ 855{
856 struct drm_device *dev = encoder->base.dev; 856 struct drm_device *dev = encoder->base.dev;
857 struct drm_i915_private *dev_priv = dev->dev_private; 857 struct drm_i915_private *dev_priv = to_i915(dev);
858 858
859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE); 859 I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
860} 860}
@@ -1013,7 +1013,7 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
1013static void intel_tv_pre_enable(struct intel_encoder *encoder) 1013static void intel_tv_pre_enable(struct intel_encoder *encoder)
1014{ 1014{
1015 struct drm_device *dev = encoder->base.dev; 1015 struct drm_device *dev = encoder->base.dev;
1016 struct drm_i915_private *dev_priv = dev->dev_private; 1016 struct drm_i915_private *dev_priv = to_i915(dev);
1017 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1017 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1018 struct intel_tv *intel_tv = enc_to_tv(encoder); 1018 struct intel_tv *intel_tv = enc_to_tv(encoder);
1019 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv); 1019 const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
@@ -1173,7 +1173,7 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1173 struct drm_crtc *crtc = connector->state->crtc; 1173 struct drm_crtc *crtc = connector->state->crtc;
1174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1174 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1175 struct drm_device *dev = connector->dev; 1175 struct drm_device *dev = connector->dev;
1176 struct drm_i915_private *dev_priv = dev->dev_private; 1176 struct drm_i915_private *dev_priv = to_i915(dev);
1177 u32 tv_ctl, save_tv_ctl; 1177 u32 tv_ctl, save_tv_ctl;
1178 u32 tv_dac, save_tv_dac; 1178 u32 tv_dac, save_tv_dac;
1179 int type; 1179 int type;
@@ -1501,6 +1501,8 @@ out:
1501static const struct drm_connector_funcs intel_tv_connector_funcs = { 1501static const struct drm_connector_funcs intel_tv_connector_funcs = {
1502 .dpms = drm_atomic_helper_connector_dpms, 1502 .dpms = drm_atomic_helper_connector_dpms,
1503 .detect = intel_tv_detect, 1503 .detect = intel_tv_detect,
1504 .late_register = intel_connector_register,
1505 .early_unregister = intel_connector_unregister,
1504 .destroy = intel_tv_destroy, 1506 .destroy = intel_tv_destroy,
1505 .set_property = intel_tv_set_property, 1507 .set_property = intel_tv_set_property,
1506 .atomic_get_property = intel_connector_atomic_get_property, 1508 .atomic_get_property = intel_connector_atomic_get_property,
@@ -1512,7 +1514,6 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
1512static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = { 1514static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
1513 .mode_valid = intel_tv_mode_valid, 1515 .mode_valid = intel_tv_mode_valid,
1514 .get_modes = intel_tv_get_modes, 1516 .get_modes = intel_tv_get_modes,
1515 .best_encoder = intel_best_encoder,
1516}; 1517};
1517 1518
1518static const struct drm_encoder_funcs intel_tv_enc_funcs = { 1519static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1522,7 +1523,7 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
1522void 1523void
1523intel_tv_init(struct drm_device *dev) 1524intel_tv_init(struct drm_device *dev)
1524{ 1525{
1525 struct drm_i915_private *dev_priv = dev->dev_private; 1526 struct drm_i915_private *dev_priv = to_i915(dev);
1526 struct drm_connector *connector; 1527 struct drm_connector *connector;
1527 struct intel_tv *intel_tv; 1528 struct intel_tv *intel_tv;
1528 struct intel_encoder *intel_encoder; 1529 struct intel_encoder *intel_encoder;
@@ -1591,7 +1592,7 @@ intel_tv_init(struct drm_device *dev)
1591 DRM_MODE_CONNECTOR_SVIDEO); 1592 DRM_MODE_CONNECTOR_SVIDEO);
1592 1593
1593 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs, 1594 drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
1594 DRM_MODE_ENCODER_TVDAC, NULL); 1595 DRM_MODE_ENCODER_TVDAC, "TV");
1595 1596
1596 intel_encoder->compute_config = intel_tv_compute_config; 1597 intel_encoder->compute_config = intel_tv_compute_config;
1597 intel_encoder->get_config = intel_tv_get_config; 1598 intel_encoder->get_config = intel_tv_get_config;
@@ -1600,7 +1601,6 @@ intel_tv_init(struct drm_device *dev)
1600 intel_encoder->disable = intel_disable_tv; 1601 intel_encoder->disable = intel_disable_tv;
1601 intel_encoder->get_hw_state = intel_tv_get_hw_state; 1602 intel_encoder->get_hw_state = intel_tv_get_hw_state;
1602 intel_connector->get_hw_state = intel_connector_get_hw_state; 1603 intel_connector->get_hw_state = intel_connector_get_hw_state;
1603 intel_connector->unregister = intel_connector_unregister;
1604 1604
1605 intel_connector_attach_encoder(intel_connector, intel_encoder); 1605 intel_connector_attach_encoder(intel_connector, intel_encoder);
1606 intel_encoder->type = INTEL_OUTPUT_TVOUT; 1606 intel_encoder->type = INTEL_OUTPUT_TVOUT;
@@ -1642,5 +1642,4 @@ intel_tv_init(struct drm_device *dev)
1642 drm_object_attach_property(&connector->base, 1642 drm_object_attach_property(&connector->base,
1643 dev->mode_config.tv_bottom_margin_property, 1643 dev->mode_config.tv_bottom_margin_property,
1644 intel_tv->margin[TV_MARGIN_BOTTOM]); 1644 intel_tv->margin[TV_MARGIN_BOTTOM]);
1645 drm_connector_register(connector);
1646} 1645}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 4f1dfe616856..ff80a81b1a84 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -248,9 +248,9 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
248 return HRTIMER_NORESTART; 248 return HRTIMER_NORESTART;
249} 249}
250 250
251void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore) 251void intel_uncore_forcewake_reset(struct drm_i915_private *dev_priv,
252 bool restore)
252{ 253{
253 struct drm_i915_private *dev_priv = dev->dev_private;
254 unsigned long irqflags; 254 unsigned long irqflags;
255 struct intel_uncore_forcewake_domain *domain; 255 struct intel_uncore_forcewake_domain *domain;
256 int retry_count = 100; 256 int retry_count = 100;
@@ -304,7 +304,7 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
304 if (fw) 304 if (fw)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw); 305 dev_priv->uncore.funcs.force_wake_get(dev_priv, fw);
306 306
307 if (IS_GEN6(dev) || IS_GEN7(dev)) 307 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
308 dev_priv->uncore.fifo_count = 308 dev_priv->uncore.fifo_count =
309 fifo_free_entries(dev_priv); 309 fifo_free_entries(dev_priv);
310 } 310 }
@@ -400,43 +400,42 @@ check_for_unclaimed_mmio(struct drm_i915_private *dev_priv)
400 return false; 400 return false;
401} 401}
402 402
403static void __intel_uncore_early_sanitize(struct drm_device *dev, 403static void __intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
404 bool restore_forcewake) 404 bool restore_forcewake)
405{ 405{
406 struct drm_i915_private *dev_priv = dev->dev_private;
407
408 /* clear out unclaimed reg detection bit */ 406 /* clear out unclaimed reg detection bit */
409 if (check_for_unclaimed_mmio(dev_priv)) 407 if (check_for_unclaimed_mmio(dev_priv))
410 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n"); 408 DRM_DEBUG("unclaimed mmio detected on uncore init, clearing\n");
411 409
412 /* clear out old GT FIFO errors */ 410 /* clear out old GT FIFO errors */
413 if (IS_GEN6(dev) || IS_GEN7(dev)) 411 if (IS_GEN6(dev_priv) || IS_GEN7(dev_priv))
414 __raw_i915_write32(dev_priv, GTFIFODBG, 412 __raw_i915_write32(dev_priv, GTFIFODBG,
415 __raw_i915_read32(dev_priv, GTFIFODBG)); 413 __raw_i915_read32(dev_priv, GTFIFODBG));
416 414
417 /* WaDisableShadowRegForCpd:chv */ 415 /* WaDisableShadowRegForCpd:chv */
418 if (IS_CHERRYVIEW(dev)) { 416 if (IS_CHERRYVIEW(dev_priv)) {
419 __raw_i915_write32(dev_priv, GTFIFOCTL, 417 __raw_i915_write32(dev_priv, GTFIFOCTL,
420 __raw_i915_read32(dev_priv, GTFIFOCTL) | 418 __raw_i915_read32(dev_priv, GTFIFOCTL) |
421 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL | 419 GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
422 GT_FIFO_CTL_RC6_POLICY_STALL); 420 GT_FIFO_CTL_RC6_POLICY_STALL);
423 } 421 }
424 422
425 intel_uncore_forcewake_reset(dev, restore_forcewake); 423 intel_uncore_forcewake_reset(dev_priv, restore_forcewake);
426} 424}
427 425
428void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 426void intel_uncore_early_sanitize(struct drm_i915_private *dev_priv,
427 bool restore_forcewake)
429{ 428{
430 __intel_uncore_early_sanitize(dev, restore_forcewake); 429 __intel_uncore_early_sanitize(dev_priv, restore_forcewake);
431 i915_check_and_clear_faults(dev); 430 i915_check_and_clear_faults(dev_priv);
432} 431}
433 432
434void intel_uncore_sanitize(struct drm_device *dev) 433void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
435{ 434{
436 i915.enable_rc6 = sanitize_rc6_option(dev, i915.enable_rc6); 435 i915.enable_rc6 = sanitize_rc6_option(dev_priv, i915.enable_rc6);
437 436
438 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 437 /* BIOS often leaves RC6 enabled, but disable it for hw init */
439 intel_disable_gt_powersave(dev); 438 intel_disable_gt_powersave(dev_priv);
440} 439}
441 440
442static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv, 441static void __intel_uncore_forcewake_get(struct drm_i915_private *dev_priv,
@@ -1233,14 +1232,12 @@ static void fw_domain_init(struct drm_i915_private *dev_priv,
1233 fw_domain_reset(d); 1232 fw_domain_reset(d);
1234} 1233}
1235 1234
1236static void intel_uncore_fw_domains_init(struct drm_device *dev) 1235static void intel_uncore_fw_domains_init(struct drm_i915_private *dev_priv)
1237{ 1236{
1238 struct drm_i915_private *dev_priv = dev->dev_private;
1239
1240 if (INTEL_INFO(dev_priv)->gen <= 5) 1237 if (INTEL_INFO(dev_priv)->gen <= 5)
1241 return; 1238 return;
1242 1239
1243 if (IS_GEN9(dev)) { 1240 if (IS_GEN9(dev_priv)) {
1244 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1241 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1245 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1242 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1246 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1243 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
@@ -1251,9 +1248,9 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1251 FORCEWAKE_ACK_BLITTER_GEN9); 1248 FORCEWAKE_ACK_BLITTER_GEN9);
1252 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1249 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1253 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9); 1250 FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
1254 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) { 1251 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1255 dev_priv->uncore.funcs.force_wake_get = fw_domains_get; 1252 dev_priv->uncore.funcs.force_wake_get = fw_domains_get;
1256 if (!IS_CHERRYVIEW(dev)) 1253 if (!IS_CHERRYVIEW(dev_priv))
1257 dev_priv->uncore.funcs.force_wake_put = 1254 dev_priv->uncore.funcs.force_wake_put =
1258 fw_domains_put_with_fifo; 1255 fw_domains_put_with_fifo;
1259 else 1256 else
@@ -1262,17 +1259,17 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1262 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV); 1259 FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
1263 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA, 1260 fw_domain_init(dev_priv, FW_DOMAIN_ID_MEDIA,
1264 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV); 1261 FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
1265 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1262 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1266 dev_priv->uncore.funcs.force_wake_get = 1263 dev_priv->uncore.funcs.force_wake_get =
1267 fw_domains_get_with_thread_status; 1264 fw_domains_get_with_thread_status;
1268 if (IS_HASWELL(dev)) 1265 if (IS_HASWELL(dev_priv))
1269 dev_priv->uncore.funcs.force_wake_put = 1266 dev_priv->uncore.funcs.force_wake_put =
1270 fw_domains_put_with_fifo; 1267 fw_domains_put_with_fifo;
1271 else 1268 else
1272 dev_priv->uncore.funcs.force_wake_put = fw_domains_put; 1269 dev_priv->uncore.funcs.force_wake_put = fw_domains_put;
1273 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1270 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1274 FORCEWAKE_MT, FORCEWAKE_ACK_HSW); 1271 FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
1275 } else if (IS_IVYBRIDGE(dev)) { 1272 } else if (IS_IVYBRIDGE(dev_priv)) {
1276 u32 ecobus; 1273 u32 ecobus;
1277 1274
1278 /* IVB configs may use multi-threaded forcewake */ 1275 /* IVB configs may use multi-threaded forcewake */
@@ -1302,11 +1299,11 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1302 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1299 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1303 FORCEWAKE_MT, FORCEWAKE_MT_ACK); 1300 FORCEWAKE_MT, FORCEWAKE_MT_ACK);
1304 1301
1305 mutex_lock(&dev->struct_mutex); 1302 spin_lock_irq(&dev_priv->uncore.lock);
1306 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL); 1303 fw_domains_get_with_thread_status(dev_priv, FORCEWAKE_ALL);
1307 ecobus = __raw_i915_read32(dev_priv, ECOBUS); 1304 ecobus = __raw_i915_read32(dev_priv, ECOBUS);
1308 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL); 1305 fw_domains_put_with_fifo(dev_priv, FORCEWAKE_ALL);
1309 mutex_unlock(&dev->struct_mutex); 1306 spin_unlock_irq(&dev_priv->uncore.lock);
1310 1307
1311 if (!(ecobus & FORCEWAKE_MT_ENABLE)) { 1308 if (!(ecobus & FORCEWAKE_MT_ENABLE)) {
1312 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n"); 1309 DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
@@ -1314,7 +1311,7 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1314 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, 1311 fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER,
1315 FORCEWAKE, FORCEWAKE_ACK); 1312 FORCEWAKE, FORCEWAKE_ACK);
1316 } 1313 }
1317 } else if (IS_GEN6(dev)) { 1314 } else if (IS_GEN6(dev_priv)) {
1318 dev_priv->uncore.funcs.force_wake_get = 1315 dev_priv->uncore.funcs.force_wake_get =
1319 fw_domains_get_with_thread_status; 1316 fw_domains_get_with_thread_status;
1320 dev_priv->uncore.funcs.force_wake_put = 1317 dev_priv->uncore.funcs.force_wake_put =
@@ -1327,26 +1324,24 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev)
1327 WARN_ON(dev_priv->uncore.fw_domains == 0); 1324 WARN_ON(dev_priv->uncore.fw_domains == 0);
1328} 1325}
1329 1326
1330void intel_uncore_init(struct drm_device *dev) 1327void intel_uncore_init(struct drm_i915_private *dev_priv)
1331{ 1328{
1332 struct drm_i915_private *dev_priv = dev->dev_private; 1329 i915_check_vgpu(dev_priv);
1333
1334 i915_check_vgpu(dev);
1335 1330
1336 intel_uncore_edram_detect(dev_priv); 1331 intel_uncore_edram_detect(dev_priv);
1337 intel_uncore_fw_domains_init(dev); 1332 intel_uncore_fw_domains_init(dev_priv);
1338 __intel_uncore_early_sanitize(dev, false); 1333 __intel_uncore_early_sanitize(dev_priv, false);
1339 1334
1340 dev_priv->uncore.unclaimed_mmio_check = 1; 1335 dev_priv->uncore.unclaimed_mmio_check = 1;
1341 1336
1342 switch (INTEL_INFO(dev)->gen) { 1337 switch (INTEL_INFO(dev_priv)->gen) {
1343 default: 1338 default:
1344 case 9: 1339 case 9:
1345 ASSIGN_WRITE_MMIO_VFUNCS(gen9); 1340 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1346 ASSIGN_READ_MMIO_VFUNCS(gen9); 1341 ASSIGN_READ_MMIO_VFUNCS(gen9);
1347 break; 1342 break;
1348 case 8: 1343 case 8:
1349 if (IS_CHERRYVIEW(dev)) { 1344 if (IS_CHERRYVIEW(dev_priv)) {
1350 ASSIGN_WRITE_MMIO_VFUNCS(chv); 1345 ASSIGN_WRITE_MMIO_VFUNCS(chv);
1351 ASSIGN_READ_MMIO_VFUNCS(chv); 1346 ASSIGN_READ_MMIO_VFUNCS(chv);
1352 1347
@@ -1357,13 +1352,13 @@ void intel_uncore_init(struct drm_device *dev)
1357 break; 1352 break;
1358 case 7: 1353 case 7:
1359 case 6: 1354 case 6:
1360 if (IS_HASWELL(dev)) { 1355 if (IS_HASWELL(dev_priv)) {
1361 ASSIGN_WRITE_MMIO_VFUNCS(hsw); 1356 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
1362 } else { 1357 } else {
1363 ASSIGN_WRITE_MMIO_VFUNCS(gen6); 1358 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
1364 } 1359 }
1365 1360
1366 if (IS_VALLEYVIEW(dev)) { 1361 if (IS_VALLEYVIEW(dev_priv)) {
1367 ASSIGN_READ_MMIO_VFUNCS(vlv); 1362 ASSIGN_READ_MMIO_VFUNCS(vlv);
1368 } else { 1363 } else {
1369 ASSIGN_READ_MMIO_VFUNCS(gen6); 1364 ASSIGN_READ_MMIO_VFUNCS(gen6);
@@ -1381,24 +1376,24 @@ void intel_uncore_init(struct drm_device *dev)
1381 break; 1376 break;
1382 } 1377 }
1383 1378
1384 if (intel_vgpu_active(dev)) { 1379 if (intel_vgpu_active(dev_priv)) {
1385 ASSIGN_WRITE_MMIO_VFUNCS(vgpu); 1380 ASSIGN_WRITE_MMIO_VFUNCS(vgpu);
1386 ASSIGN_READ_MMIO_VFUNCS(vgpu); 1381 ASSIGN_READ_MMIO_VFUNCS(vgpu);
1387 } 1382 }
1388 1383
1389 i915_check_and_clear_faults(dev); 1384 i915_check_and_clear_faults(dev_priv);
1390} 1385}
1391#undef ASSIGN_WRITE_MMIO_VFUNCS 1386#undef ASSIGN_WRITE_MMIO_VFUNCS
1392#undef ASSIGN_READ_MMIO_VFUNCS 1387#undef ASSIGN_READ_MMIO_VFUNCS
1393 1388
1394void intel_uncore_fini(struct drm_device *dev) 1389void intel_uncore_fini(struct drm_i915_private *dev_priv)
1395{ 1390{
1396 /* Paranoia: make sure we have disabled everything before we exit. */ 1391 /* Paranoia: make sure we have disabled everything before we exit. */
1397 intel_uncore_sanitize(dev); 1392 intel_uncore_sanitize(dev_priv);
1398 intel_uncore_forcewake_reset(dev, false); 1393 intel_uncore_forcewake_reset(dev_priv, false);
1399} 1394}
1400 1395
1401#define GEN_RANGE(l, h) GENMASK(h, l) 1396#define GEN_RANGE(l, h) GENMASK((h) - 1, (l) - 1)
1402 1397
1403static const struct register_whitelist { 1398static const struct register_whitelist {
1404 i915_reg_t offset_ldw, offset_udw; 1399 i915_reg_t offset_ldw, offset_udw;
@@ -1414,7 +1409,7 @@ static const struct register_whitelist {
1414int i915_reg_read_ioctl(struct drm_device *dev, 1409int i915_reg_read_ioctl(struct drm_device *dev,
1415 void *data, struct drm_file *file) 1410 void *data, struct drm_file *file)
1416{ 1411{
1417 struct drm_i915_private *dev_priv = dev->dev_private; 1412 struct drm_i915_private *dev_priv = to_i915(dev);
1418 struct drm_i915_reg_read *reg = data; 1413 struct drm_i915_reg_read *reg = data;
1419 struct register_whitelist const *entry = whitelist; 1414 struct register_whitelist const *entry = whitelist;
1420 unsigned size; 1415 unsigned size;
@@ -1423,7 +1418,7 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1423 1418
1424 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1419 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1425 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) && 1420 if (i915_mmio_reg_offset(entry->offset_ldw) == (reg->offset & -entry->size) &&
1426 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1421 (INTEL_INFO(dev)->gen_mask & entry->gen_bitmask))
1427 break; 1422 break;
1428 } 1423 }
1429 1424
@@ -1467,83 +1462,47 @@ out:
1467 return ret; 1462 return ret;
1468} 1463}
1469 1464
1470int i915_get_reset_stats_ioctl(struct drm_device *dev, 1465static int i915_reset_complete(struct pci_dev *pdev)
1471 void *data, struct drm_file *file)
1472{
1473 struct drm_i915_private *dev_priv = dev->dev_private;
1474 struct drm_i915_reset_stats *args = data;
1475 struct i915_ctx_hang_stats *hs;
1476 struct intel_context *ctx;
1477 int ret;
1478
1479 if (args->flags || args->pad)
1480 return -EINVAL;
1481
1482 if (args->ctx_id == DEFAULT_CONTEXT_HANDLE && !capable(CAP_SYS_ADMIN))
1483 return -EPERM;
1484
1485 ret = mutex_lock_interruptible(&dev->struct_mutex);
1486 if (ret)
1487 return ret;
1488
1489 ctx = i915_gem_context_get(file->driver_priv, args->ctx_id);
1490 if (IS_ERR(ctx)) {
1491 mutex_unlock(&dev->struct_mutex);
1492 return PTR_ERR(ctx);
1493 }
1494 hs = &ctx->hang_stats;
1495
1496 if (capable(CAP_SYS_ADMIN))
1497 args->reset_count = i915_reset_count(&dev_priv->gpu_error);
1498 else
1499 args->reset_count = 0;
1500
1501 args->batch_active = hs->batch_active;
1502 args->batch_pending = hs->batch_pending;
1503
1504 mutex_unlock(&dev->struct_mutex);
1505
1506 return 0;
1507}
1508
1509static int i915_reset_complete(struct drm_device *dev)
1510{ 1466{
1511 u8 gdrst; 1467 u8 gdrst;
1512 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1468 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1513 return (gdrst & GRDOM_RESET_STATUS) == 0; 1469 return (gdrst & GRDOM_RESET_STATUS) == 0;
1514} 1470}
1515 1471
1516static int i915_do_reset(struct drm_device *dev, unsigned engine_mask) 1472static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1517{ 1473{
1474 struct pci_dev *pdev = dev_priv->drm.pdev;
1475
1518 /* assert reset for at least 20 usec */ 1476 /* assert reset for at least 20 usec */
1519 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1477 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1520 udelay(20); 1478 udelay(20);
1521 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1479 pci_write_config_byte(pdev, I915_GDRST, 0);
1522 1480
1523 return wait_for(i915_reset_complete(dev), 500); 1481 return wait_for(i915_reset_complete(pdev), 500);
1524} 1482}
1525 1483
1526static int g4x_reset_complete(struct drm_device *dev) 1484static int g4x_reset_complete(struct pci_dev *pdev)
1527{ 1485{
1528 u8 gdrst; 1486 u8 gdrst;
1529 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); 1487 pci_read_config_byte(pdev, I915_GDRST, &gdrst);
1530 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1488 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1531} 1489}
1532 1490
1533static int g33_do_reset(struct drm_device *dev, unsigned engine_mask) 1491static int g33_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1534{ 1492{
1535 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1493 struct pci_dev *pdev = dev_priv->drm.pdev;
1536 return wait_for(g4x_reset_complete(dev), 500); 1494 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1495 return wait_for(g4x_reset_complete(pdev), 500);
1537} 1496}
1538 1497
1539static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask) 1498static int g4x_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1540{ 1499{
1541 struct drm_i915_private *dev_priv = dev->dev_private; 1500 struct pci_dev *pdev = dev_priv->drm.pdev;
1542 int ret; 1501 int ret;
1543 1502
1544 pci_write_config_byte(dev->pdev, I915_GDRST, 1503 pci_write_config_byte(pdev, I915_GDRST,
1545 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1504 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1546 ret = wait_for(g4x_reset_complete(dev), 500); 1505 ret = wait_for(g4x_reset_complete(pdev), 500);
1547 if (ret) 1506 if (ret)
1548 return ret; 1507 return ret;
1549 1508
@@ -1551,9 +1510,9 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1551 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1510 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1552 POSTING_READ(VDECCLK_GATE_D); 1511 POSTING_READ(VDECCLK_GATE_D);
1553 1512
1554 pci_write_config_byte(dev->pdev, I915_GDRST, 1513 pci_write_config_byte(pdev, I915_GDRST,
1555 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1514 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1556 ret = wait_for(g4x_reset_complete(dev), 500); 1515 ret = wait_for(g4x_reset_complete(pdev), 500);
1557 if (ret) 1516 if (ret)
1558 return ret; 1517 return ret;
1559 1518
@@ -1561,27 +1520,29 @@ static int g4x_do_reset(struct drm_device *dev, unsigned engine_mask)
1561 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1520 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1562 POSTING_READ(VDECCLK_GATE_D); 1521 POSTING_READ(VDECCLK_GATE_D);
1563 1522
1564 pci_write_config_byte(dev->pdev, I915_GDRST, 0); 1523 pci_write_config_byte(pdev, I915_GDRST, 0);
1565 1524
1566 return 0; 1525 return 0;
1567} 1526}
1568 1527
1569static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask) 1528static int ironlake_do_reset(struct drm_i915_private *dev_priv,
1529 unsigned engine_mask)
1570{ 1530{
1571 struct drm_i915_private *dev_priv = dev->dev_private;
1572 int ret; 1531 int ret;
1573 1532
1574 I915_WRITE(ILK_GDSR, 1533 I915_WRITE(ILK_GDSR,
1575 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1534 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE);
1576 ret = wait_for((I915_READ(ILK_GDSR) & 1535 ret = intel_wait_for_register(dev_priv,
1577 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1536 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1537 500);
1578 if (ret) 1538 if (ret)
1579 return ret; 1539 return ret;
1580 1540
1581 I915_WRITE(ILK_GDSR, 1541 I915_WRITE(ILK_GDSR,
1582 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1542 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE);
1583 ret = wait_for((I915_READ(ILK_GDSR) & 1543 ret = intel_wait_for_register(dev_priv,
1584 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1544 ILK_GDSR, ILK_GRDOM_RESET_ENABLE, 0,
1545 500);
1585 if (ret) 1546 if (ret)
1586 return ret; 1547 return ret;
1587 1548
@@ -1594,25 +1555,21 @@ static int ironlake_do_reset(struct drm_device *dev, unsigned engine_mask)
1594static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv, 1555static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1595 u32 hw_domain_mask) 1556 u32 hw_domain_mask)
1596{ 1557{
1597 int ret;
1598
1599 /* GEN6_GDRST is not in the gt power well, no need to check 1558 /* GEN6_GDRST is not in the gt power well, no need to check
1600 * for fifo space for the write or forcewake the chip for 1559 * for fifo space for the write or forcewake the chip for
1601 * the read 1560 * the read
1602 */ 1561 */
1603 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask); 1562 __raw_i915_write32(dev_priv, GEN6_GDRST, hw_domain_mask);
1604 1563
1605#define ACKED ((__raw_i915_read32(dev_priv, GEN6_GDRST) & hw_domain_mask) == 0)
1606 /* Spin waiting for the device to ack the reset requests */ 1564 /* Spin waiting for the device to ack the reset requests */
1607 ret = wait_for(ACKED, 500); 1565 return intel_wait_for_register_fw(dev_priv,
1608#undef ACKED 1566 GEN6_GDRST, hw_domain_mask, 0,
1609 1567 500);
1610 return ret;
1611} 1568}
1612 1569
1613/** 1570/**
1614 * gen6_reset_engines - reset individual engines 1571 * gen6_reset_engines - reset individual engines
1615 * @dev: DRM device 1572 * @dev_priv: i915 device
1616 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset 1573 * @engine_mask: mask of intel_ring_flag() engines or ALL_ENGINES for full reset
1617 * 1574 *
1618 * This function will reset the individual engines that are set in engine_mask. 1575 * This function will reset the individual engines that are set in engine_mask.
@@ -1623,9 +1580,9 @@ static int gen6_hw_domain_reset(struct drm_i915_private *dev_priv,
1623 * 1580 *
1624 * Returns 0 on success, nonzero on error. 1581 * Returns 0 on success, nonzero on error.
1625 */ 1582 */
1626static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask) 1583static int gen6_reset_engines(struct drm_i915_private *dev_priv,
1584 unsigned engine_mask)
1627{ 1585{
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 struct intel_engine_cs *engine; 1586 struct intel_engine_cs *engine;
1630 const u32 hw_engine_mask[I915_NUM_ENGINES] = { 1587 const u32 hw_engine_mask[I915_NUM_ENGINES] = {
1631 [RCS] = GEN6_GRDOM_RENDER, 1588 [RCS] = GEN6_GRDOM_RENDER,
@@ -1647,33 +1604,94 @@ static int gen6_reset_engines(struct drm_device *dev, unsigned engine_mask)
1647 1604
1648 ret = gen6_hw_domain_reset(dev_priv, hw_mask); 1605 ret = gen6_hw_domain_reset(dev_priv, hw_mask);
1649 1606
1650 intel_uncore_forcewake_reset(dev, true); 1607 intel_uncore_forcewake_reset(dev_priv, true);
1651 1608
1652 return ret; 1609 return ret;
1653} 1610}
1654 1611
1655static int wait_for_register_fw(struct drm_i915_private *dev_priv, 1612/**
1656 i915_reg_t reg, 1613 * intel_wait_for_register_fw - wait until register matches expected state
1657 const u32 mask, 1614 * @dev_priv: the i915 device
1658 const u32 value, 1615 * @reg: the register to read
1659 const unsigned long timeout_ms) 1616 * @mask: mask to apply to register value
1617 * @value: expected value
1618 * @timeout_ms: timeout in millisecond
1619 *
1620 * This routine waits until the target register @reg contains the expected
1621 * @value after applying the @mask, i.e. it waits until
1622 * (I915_READ_FW(@reg) & @mask) == @value
1623 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1624 *
1625 * Note that this routine assumes the caller holds forcewake asserted, it is
1626 * not suitable for very long waits. See intel_wait_for_register() if you
1627 * wish to wait without holding forcewake for the duration (i.e. you expect
1628 * the wait to be slow).
1629 *
1630 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1631 */
1632int intel_wait_for_register_fw(struct drm_i915_private *dev_priv,
1633 i915_reg_t reg,
1634 const u32 mask,
1635 const u32 value,
1636 const unsigned long timeout_ms)
1637{
1638#define done ((I915_READ_FW(reg) & mask) == value)
1639 int ret = wait_for_us(done, 2);
1640 if (ret)
1641 ret = wait_for(done, timeout_ms);
1642 return ret;
1643#undef done
1644}
1645
1646/**
1647 * intel_wait_for_register - wait until register matches expected state
1648 * @dev_priv: the i915 device
1649 * @reg: the register to read
1650 * @mask: mask to apply to register value
1651 * @value: expected value
1652 * @timeout_ms: timeout in millisecond
1653 *
1654 * This routine waits until the target register @reg contains the expected
1655 * @value after applying the @mask, i.e. it waits until
1656 * (I915_READ(@reg) & @mask) == @value
1657 * Otherwise, the wait will timeout after @timeout_ms milliseconds.
1658 *
1659 * Returns 0 if the register matches the desired condition, or -ETIMEOUT.
1660 */
1661int intel_wait_for_register(struct drm_i915_private *dev_priv,
1662 i915_reg_t reg,
1663 const u32 mask,
1664 const u32 value,
1665 const unsigned long timeout_ms)
1660{ 1666{
1661 return wait_for((I915_READ_FW(reg) & mask) == value, timeout_ms); 1667
1668 unsigned fw =
1669 intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ);
1670 int ret;
1671
1672 intel_uncore_forcewake_get(dev_priv, fw);
1673 ret = wait_for_us((I915_READ_FW(reg) & mask) == value, 2);
1674 intel_uncore_forcewake_put(dev_priv, fw);
1675 if (ret)
1676 ret = wait_for((I915_READ_NOTRACE(reg) & mask) == value,
1677 timeout_ms);
1678
1679 return ret;
1662} 1680}
1663 1681
1664static int gen8_request_engine_reset(struct intel_engine_cs *engine) 1682static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1665{ 1683{
1684 struct drm_i915_private *dev_priv = engine->i915;
1666 int ret; 1685 int ret;
1667 struct drm_i915_private *dev_priv = engine->dev->dev_private;
1668 1686
1669 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1687 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1670 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET)); 1688 _MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET));
1671 1689
1672 ret = wait_for_register_fw(dev_priv, 1690 ret = intel_wait_for_register_fw(dev_priv,
1673 RING_RESET_CTL(engine->mmio_base), 1691 RING_RESET_CTL(engine->mmio_base),
1674 RESET_CTL_READY_TO_RESET, 1692 RESET_CTL_READY_TO_RESET,
1675 RESET_CTL_READY_TO_RESET, 1693 RESET_CTL_READY_TO_RESET,
1676 700); 1694 700);
1677 if (ret) 1695 if (ret)
1678 DRM_ERROR("%s: reset request timeout\n", engine->name); 1696 DRM_ERROR("%s: reset request timeout\n", engine->name);
1679 1697
@@ -1682,22 +1700,22 @@ static int gen8_request_engine_reset(struct intel_engine_cs *engine)
1682 1700
1683static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine) 1701static void gen8_unrequest_engine_reset(struct intel_engine_cs *engine)
1684{ 1702{
1685 struct drm_i915_private *dev_priv = engine->dev->dev_private; 1703 struct drm_i915_private *dev_priv = engine->i915;
1686 1704
1687 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base), 1705 I915_WRITE_FW(RING_RESET_CTL(engine->mmio_base),
1688 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET)); 1706 _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
1689} 1707}
1690 1708
1691static int gen8_reset_engines(struct drm_device *dev, unsigned engine_mask) 1709static int gen8_reset_engines(struct drm_i915_private *dev_priv,
1710 unsigned engine_mask)
1692{ 1711{
1693 struct drm_i915_private *dev_priv = dev->dev_private;
1694 struct intel_engine_cs *engine; 1712 struct intel_engine_cs *engine;
1695 1713
1696 for_each_engine_masked(engine, dev_priv, engine_mask) 1714 for_each_engine_masked(engine, dev_priv, engine_mask)
1697 if (gen8_request_engine_reset(engine)) 1715 if (gen8_request_engine_reset(engine))
1698 goto not_ready; 1716 goto not_ready;
1699 1717
1700 return gen6_reset_engines(dev, engine_mask); 1718 return gen6_reset_engines(dev_priv, engine_mask);
1701 1719
1702not_ready: 1720not_ready:
1703 for_each_engine_masked(engine, dev_priv, engine_mask) 1721 for_each_engine_masked(engine, dev_priv, engine_mask)
@@ -1706,35 +1724,35 @@ not_ready:
1706 return -EIO; 1724 return -EIO;
1707} 1725}
1708 1726
1709static int (*intel_get_gpu_reset(struct drm_device *dev))(struct drm_device *, 1727typedef int (*reset_func)(struct drm_i915_private *, unsigned engine_mask);
1710 unsigned engine_mask) 1728
1729static reset_func intel_get_gpu_reset(struct drm_i915_private *dev_priv)
1711{ 1730{
1712 if (!i915.reset) 1731 if (!i915.reset)
1713 return NULL; 1732 return NULL;
1714 1733
1715 if (INTEL_INFO(dev)->gen >= 8) 1734 if (INTEL_INFO(dev_priv)->gen >= 8)
1716 return gen8_reset_engines; 1735 return gen8_reset_engines;
1717 else if (INTEL_INFO(dev)->gen >= 6) 1736 else if (INTEL_INFO(dev_priv)->gen >= 6)
1718 return gen6_reset_engines; 1737 return gen6_reset_engines;
1719 else if (IS_GEN5(dev)) 1738 else if (IS_GEN5(dev_priv))
1720 return ironlake_do_reset; 1739 return ironlake_do_reset;
1721 else if (IS_G4X(dev)) 1740 else if (IS_G4X(dev_priv))
1722 return g4x_do_reset; 1741 return g4x_do_reset;
1723 else if (IS_G33(dev)) 1742 else if (IS_G33(dev_priv))
1724 return g33_do_reset; 1743 return g33_do_reset;
1725 else if (INTEL_INFO(dev)->gen >= 3) 1744 else if (INTEL_INFO(dev_priv)->gen >= 3)
1726 return i915_do_reset; 1745 return i915_do_reset;
1727 else 1746 else
1728 return NULL; 1747 return NULL;
1729} 1748}
1730 1749
1731int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask) 1750int intel_gpu_reset(struct drm_i915_private *dev_priv, unsigned engine_mask)
1732{ 1751{
1733 struct drm_i915_private *dev_priv = to_i915(dev); 1752 reset_func reset;
1734 int (*reset)(struct drm_device *, unsigned);
1735 int ret; 1753 int ret;
1736 1754
1737 reset = intel_get_gpu_reset(dev); 1755 reset = intel_get_gpu_reset(dev_priv);
1738 if (reset == NULL) 1756 if (reset == NULL)
1739 return -ENODEV; 1757 return -ENODEV;
1740 1758
@@ -1742,15 +1760,15 @@ int intel_gpu_reset(struct drm_device *dev, unsigned engine_mask)
1742 * request may be dropped and never completes (causing -EIO). 1760 * request may be dropped and never completes (causing -EIO).
1743 */ 1761 */
1744 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1762 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
1745 ret = reset(dev, engine_mask); 1763 ret = reset(dev_priv, engine_mask);
1746 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 1764 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
1747 1765
1748 return ret; 1766 return ret;
1749} 1767}
1750 1768
1751bool intel_has_gpu_reset(struct drm_device *dev) 1769bool intel_has_gpu_reset(struct drm_i915_private *dev_priv)
1752{ 1770{
1753 return intel_get_gpu_reset(dev) != NULL; 1771 return intel_get_gpu_reset(dev_priv) != NULL;
1754} 1772}
1755 1773
1756int intel_guc_reset(struct drm_i915_private *dev_priv) 1774int intel_guc_reset(struct drm_i915_private *dev_priv)
@@ -1758,7 +1776,7 @@ int intel_guc_reset(struct drm_i915_private *dev_priv)
1758 int ret; 1776 int ret;
1759 unsigned long irqflags; 1777 unsigned long irqflags;
1760 1778
1761 if (!i915.enable_guc_submission) 1779 if (!HAS_GUC(dev_priv))
1762 return -EINVAL; 1780 return -EINVAL;
1763 1781
1764 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); 1782 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -1802,10 +1820,10 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
1802{ 1820{
1803 enum forcewake_domains fw_domains; 1821 enum forcewake_domains fw_domains;
1804 1822
1805 if (intel_vgpu_active(dev_priv->dev)) 1823 if (intel_vgpu_active(dev_priv))
1806 return 0; 1824 return 0;
1807 1825
1808 switch (INTEL_INFO(dev_priv)->gen) { 1826 switch (INTEL_GEN(dev_priv)) {
1809 case 9: 1827 case 9:
1810 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg)); 1828 fw_domains = __gen9_reg_read_fw_domains(i915_mmio_reg_offset(reg));
1811 break; 1829 break;
@@ -1842,10 +1860,10 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
1842{ 1860{
1843 enum forcewake_domains fw_domains; 1861 enum forcewake_domains fw_domains;
1844 1862
1845 if (intel_vgpu_active(dev_priv->dev)) 1863 if (intel_vgpu_active(dev_priv))
1846 return 0; 1864 return 0;
1847 1865
1848 switch (INTEL_INFO(dev_priv)->gen) { 1866 switch (INTEL_GEN(dev_priv)) {
1849 case 9: 1867 case 9:
1850 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg)); 1868 fw_domains = __gen9_reg_write_fw_domains(i915_mmio_reg_offset(reg));
1851 break; 1869 break;
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h
index 44fb0b35eed3..68db9621f1f0 100644
--- a/drivers/gpu/drm/i915/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/intel_vbt_defs.h
@@ -447,10 +447,16 @@ struct bdb_lfp_backlight_data_entry {
447 u8 obsolete3; 447 u8 obsolete3;
448} __packed; 448} __packed;
449 449
450struct bdb_lfp_backlight_control_method {
451 u8 type:4;
452 u8 controller:4;
453} __packed;
454
450struct bdb_lfp_backlight_data { 455struct bdb_lfp_backlight_data {
451 u8 entry_size; 456 u8 entry_size;
452 struct bdb_lfp_backlight_data_entry data[16]; 457 struct bdb_lfp_backlight_data_entry data[16];
453 u8 level[16]; 458 u8 level[16];
459 struct bdb_lfp_backlight_control_method backlight_control[16];
454} __packed; 460} __packed;
455 461
456struct aimdb_header { 462struct aimdb_header {
diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig
index a1844b50546c..f2c9ae822149 100644
--- a/drivers/gpu/drm/imx/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
@@ -1,7 +1,6 @@
1config DRM_IMX 1config DRM_IMX
2 tristate "DRM Support for Freescale i.MX" 2 tristate "DRM Support for Freescale i.MX"
3 select DRM_KMS_HELPER 3 select DRM_KMS_HELPER
4 select DRM_KMS_FB_HELPER
5 select VIDEOMODE_HELPERS 4 select VIDEOMODE_HELPERS
6 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
7 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
diff --git a/drivers/gpu/drm/imx/dw_hdmi-imx.c b/drivers/gpu/drm/imx/dw_hdmi-imx.c
index a24631fdf4ad..359cd2765552 100644
--- a/drivers/gpu/drm/imx/dw_hdmi-imx.c
+++ b/drivers/gpu/drm/imx/dw_hdmi-imx.c
@@ -28,6 +28,11 @@ struct imx_hdmi {
28 struct regmap *regmap; 28 struct regmap *regmap;
29}; 29};
30 30
31static inline struct imx_hdmi *enc_to_imx_hdmi(struct drm_encoder *e)
32{
33 return container_of(e, struct imx_hdmi, encoder);
34}
35
31static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = { 36static const struct dw_hdmi_mpll_config imx_mpll_cfg[] = {
32 { 37 {
33 45250000, { 38 45250000, {
@@ -109,15 +114,9 @@ static void dw_hdmi_imx_encoder_disable(struct drm_encoder *encoder)
109{ 114{
110} 115}
111 116
112static void dw_hdmi_imx_encoder_mode_set(struct drm_encoder *encoder, 117static void dw_hdmi_imx_encoder_enable(struct drm_encoder *encoder)
113 struct drm_display_mode *mode,
114 struct drm_display_mode *adj_mode)
115{ 118{
116} 119 struct imx_hdmi *hdmi = enc_to_imx_hdmi(encoder);
117
118static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
119{
120 struct imx_hdmi *hdmi = container_of(encoder, struct imx_hdmi, encoder);
121 int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder); 120 int mux = drm_of_encoder_active_port_id(hdmi->dev->of_node, encoder);
122 121
123 regmap_update_bits(hdmi->regmap, IOMUXC_GPR3, 122 regmap_update_bits(hdmi->regmap, IOMUXC_GPR3,
@@ -125,16 +124,23 @@ static void dw_hdmi_imx_encoder_commit(struct drm_encoder *encoder)
125 mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT); 124 mux << IMX6Q_GPR3_HDMI_MUX_CTL_SHIFT);
126} 125}
127 126
128static void dw_hdmi_imx_encoder_prepare(struct drm_encoder *encoder) 127static int dw_hdmi_imx_atomic_check(struct drm_encoder *encoder,
128 struct drm_crtc_state *crtc_state,
129 struct drm_connector_state *conn_state)
129{ 130{
130 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_RGB888_1X24); 131 struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
132
133 imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
134 imx_crtc_state->di_hsync_pin = 2;
135 imx_crtc_state->di_vsync_pin = 3;
136
137 return 0;
131} 138}
132 139
133static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = { 140static const struct drm_encoder_helper_funcs dw_hdmi_imx_encoder_helper_funcs = {
134 .mode_set = dw_hdmi_imx_encoder_mode_set, 141 .enable = dw_hdmi_imx_encoder_enable,
135 .prepare = dw_hdmi_imx_encoder_prepare,
136 .commit = dw_hdmi_imx_encoder_commit,
137 .disable = dw_hdmi_imx_encoder_disable, 142 .disable = dw_hdmi_imx_encoder_disable,
143 .atomic_check = dw_hdmi_imx_atomic_check,
138}; 144};
139 145
140static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = { 146static const struct drm_encoder_funcs dw_hdmi_imx_encoder_funcs = {
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 82656654fb21..9f7dafce3a4c 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -15,10 +15,14 @@
15 */ 15 */
16#include <linux/component.h> 16#include <linux/component.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/dma-buf.h>
18#include <linux/fb.h> 19#include <linux/fb.h>
19#include <linux/module.h> 20#include <linux/module.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/reservation.h>
21#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/drm_atomic.h>
25#include <drm/drm_atomic_helper.h>
22#include <drm/drm_fb_helper.h> 26#include <drm/drm_fb_helper.h>
23#include <drm/drm_crtc_helper.h> 27#include <drm/drm_crtc_helper.h>
24#include <drm/drm_gem_cma_helper.h> 28#include <drm/drm_gem_cma_helper.h>
@@ -41,6 +45,7 @@ struct imx_drm_device {
41 struct imx_drm_crtc *crtc[MAX_CRTC]; 45 struct imx_drm_crtc *crtc[MAX_CRTC];
42 unsigned int pipes; 46 unsigned int pipes;
43 struct drm_fbdev_cma *fbhelper; 47 struct drm_fbdev_cma *fbhelper;
48 struct drm_atomic_state *state;
44}; 49};
45 50
46struct imx_drm_crtc { 51struct imx_drm_crtc {
@@ -85,45 +90,6 @@ static int imx_drm_driver_unload(struct drm_device *drm)
85 return 0; 90 return 0;
86} 91}
87 92
88static struct imx_drm_crtc *imx_drm_find_crtc(struct drm_crtc *crtc)
89{
90 struct imx_drm_device *imxdrm = crtc->dev->dev_private;
91 unsigned i;
92
93 for (i = 0; i < MAX_CRTC; i++)
94 if (imxdrm->crtc[i] && imxdrm->crtc[i]->crtc == crtc)
95 return imxdrm->crtc[i];
96
97 return NULL;
98}
99
100int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
101 int hsync_pin, int vsync_pin, u32 bus_flags)
102{
103 struct imx_drm_crtc_helper_funcs *helper;
104 struct imx_drm_crtc *imx_crtc;
105
106 imx_crtc = imx_drm_find_crtc(encoder->crtc);
107 if (!imx_crtc)
108 return -EINVAL;
109
110 helper = &imx_crtc->imx_drm_helper_funcs;
111 if (helper->set_interface_pix_fmt)
112 return helper->set_interface_pix_fmt(encoder->crtc,
113 bus_format, hsync_pin, vsync_pin,
114 bus_flags);
115 return 0;
116}
117EXPORT_SYMBOL_GPL(imx_drm_set_bus_config);
118
119int imx_drm_set_bus_format(struct drm_encoder *encoder, u32 bus_format)
120{
121 return imx_drm_set_bus_config(encoder, bus_format, 2, 3,
122 DRM_BUS_FLAG_DE_HIGH |
123 DRM_BUS_FLAG_PIXDATA_NEGEDGE);
124}
125EXPORT_SYMBOL_GPL(imx_drm_set_bus_format);
126
127int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc) 93int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
128{ 94{
129 return drm_crtc_vblank_get(imx_drm_crtc->crtc); 95 return drm_crtc_vblank_get(imx_drm_crtc->crtc);
@@ -208,6 +174,63 @@ static void imx_drm_output_poll_changed(struct drm_device *drm)
208static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = { 174static const struct drm_mode_config_funcs imx_drm_mode_config_funcs = {
209 .fb_create = drm_fb_cma_create, 175 .fb_create = drm_fb_cma_create,
210 .output_poll_changed = imx_drm_output_poll_changed, 176 .output_poll_changed = imx_drm_output_poll_changed,
177 .atomic_check = drm_atomic_helper_check,
178 .atomic_commit = drm_atomic_helper_commit,
179};
180
181static void imx_drm_atomic_commit_tail(struct drm_atomic_state *state)
182{
183 struct drm_device *dev = state->dev;
184 struct drm_crtc *crtc;
185 struct drm_crtc_state *crtc_state;
186 struct drm_plane_state *plane_state;
187 struct drm_gem_cma_object *cma_obj;
188 struct fence *excl;
189 unsigned shared_count;
190 struct fence **shared;
191 unsigned int i, j;
192 int ret;
193
194 /* Wait for fences. */
195 for_each_crtc_in_state(state, crtc, crtc_state, i) {
196 plane_state = crtc->primary->state;
197 if (plane_state->fb) {
198 cma_obj = drm_fb_cma_get_gem_obj(plane_state->fb, 0);
199 if (cma_obj->base.dma_buf) {
200 ret = reservation_object_get_fences_rcu(
201 cma_obj->base.dma_buf->resv, &excl,
202 &shared_count, &shared);
203 if (unlikely(ret))
204 DRM_ERROR("failed to get fences "
205 "for buffer\n");
206
207 if (excl) {
208 fence_wait(excl, false);
209 fence_put(excl);
210 }
211 for (j = 0; j < shared_count; i++) {
212 fence_wait(shared[j], false);
213 fence_put(shared[j]);
214 }
215 }
216 }
217 }
218
219 drm_atomic_helper_commit_modeset_disables(dev, state);
220
221 drm_atomic_helper_commit_planes(dev, state, true);
222
223 drm_atomic_helper_commit_modeset_enables(dev, state);
224
225 drm_atomic_helper_commit_hw_done(state);
226
227 drm_atomic_helper_wait_for_vblanks(dev, state);
228
229 drm_atomic_helper_cleanup_planes(dev, state);
230}
231
232static struct drm_mode_config_helper_funcs imx_drm_mode_config_helpers = {
233 .atomic_commit_tail = imx_drm_atomic_commit_tail,
211}; 234};
212 235
213/* 236/*
@@ -249,6 +272,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
249 drm->mode_config.max_width = 4096; 272 drm->mode_config.max_width = 4096;
250 drm->mode_config.max_height = 4096; 273 drm->mode_config.max_height = 4096;
251 drm->mode_config.funcs = &imx_drm_mode_config_funcs; 274 drm->mode_config.funcs = &imx_drm_mode_config_funcs;
275 drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
252 276
253 drm_mode_config_init(drm); 277 drm_mode_config_init(drm);
254 278
@@ -279,6 +303,8 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
279 } 303 }
280 } 304 }
281 305
306 drm_mode_config_reset(drm);
307
282 /* 308 /*
283 * All components are now initialised, so setup the fb helper. 309 * All components are now initialised, so setup the fb helper.
284 * The fb helper takes copies of key hardware information, so the 310 * The fb helper takes copies of key hardware information, so the
@@ -289,7 +315,6 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
289 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n"); 315 dev_warn(drm->dev, "Invalid legacyfb_depth. Defaulting to 16bpp\n");
290 legacyfb_depth = 16; 316 legacyfb_depth = 16;
291 } 317 }
292 drm_helper_disable_unused_functions(drm);
293 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth, 318 imxdrm->fbhelper = drm_fbdev_cma_init(drm, legacyfb_depth,
294 drm->mode_config.num_crtc, MAX_CRTC); 319 drm->mode_config.num_crtc, MAX_CRTC);
295 if (IS_ERR(imxdrm->fbhelper)) { 320 if (IS_ERR(imxdrm->fbhelper)) {
@@ -403,11 +428,11 @@ static const struct drm_ioctl_desc imx_drm_ioctls[] = {
403}; 428};
404 429
405static struct drm_driver imx_drm_driver = { 430static struct drm_driver imx_drm_driver = {
406 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 431 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
432 DRIVER_ATOMIC,
407 .load = imx_drm_driver_load, 433 .load = imx_drm_driver_load,
408 .unload = imx_drm_driver_unload, 434 .unload = imx_drm_driver_unload,
409 .lastclose = imx_drm_driver_lastclose, 435 .lastclose = imx_drm_driver_lastclose,
410 .set_busid = drm_platform_set_busid,
411 .gem_free_object_unlocked = drm_gem_cma_free_object, 436 .gem_free_object_unlocked = drm_gem_cma_free_object,
412 .gem_vm_ops = &drm_gem_cma_vm_ops, 437 .gem_vm_ops = &drm_gem_cma_vm_ops,
413 .dumb_create = drm_gem_cma_dumb_create, 438 .dumb_create = drm_gem_cma_dumb_create,
@@ -492,6 +517,7 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
492static int imx_drm_suspend(struct device *dev) 517static int imx_drm_suspend(struct device *dev)
493{ 518{
494 struct drm_device *drm_dev = dev_get_drvdata(dev); 519 struct drm_device *drm_dev = dev_get_drvdata(dev);
520 struct imx_drm_device *imxdrm;
495 521
496 /* The drm_dev is NULL before .load hook is called */ 522 /* The drm_dev is NULL before .load hook is called */
497 if (drm_dev == NULL) 523 if (drm_dev == NULL)
@@ -499,17 +525,26 @@ static int imx_drm_suspend(struct device *dev)
499 525
500 drm_kms_helper_poll_disable(drm_dev); 526 drm_kms_helper_poll_disable(drm_dev);
501 527
528 imxdrm = drm_dev->dev_private;
529 imxdrm->state = drm_atomic_helper_suspend(drm_dev);
530 if (IS_ERR(imxdrm->state)) {
531 drm_kms_helper_poll_enable(drm_dev);
532 return PTR_ERR(imxdrm->state);
533 }
534
502 return 0; 535 return 0;
503} 536}
504 537
505static int imx_drm_resume(struct device *dev) 538static int imx_drm_resume(struct device *dev)
506{ 539{
507 struct drm_device *drm_dev = dev_get_drvdata(dev); 540 struct drm_device *drm_dev = dev_get_drvdata(dev);
541 struct imx_drm_device *imx_drm;
508 542
509 if (drm_dev == NULL) 543 if (drm_dev == NULL)
510 return 0; 544 return 0;
511 545
512 drm_helper_resume_force_mode(drm_dev); 546 imx_drm = drm_dev->dev_private;
547 drm_atomic_helper_resume(drm_dev, imx_drm->state);
513 drm_kms_helper_poll_enable(drm_dev); 548 drm_kms_helper_poll_enable(drm_dev);
514 549
515 return 0; 550 return 0;
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 74320a1723b7..07d33e45f90f 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -15,12 +15,22 @@ struct platform_device;
15 15
16unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc); 16unsigned int imx_drm_crtc_id(struct imx_drm_crtc *crtc);
17 17
18struct imx_crtc_state {
19 struct drm_crtc_state base;
20 u32 bus_format;
21 u32 bus_flags;
22 int di_hsync_pin;
23 int di_vsync_pin;
24};
25
26static inline struct imx_crtc_state *to_imx_crtc_state(struct drm_crtc_state *s)
27{
28 return container_of(s, struct imx_crtc_state, base);
29}
30
18struct imx_drm_crtc_helper_funcs { 31struct imx_drm_crtc_helper_funcs {
19 int (*enable_vblank)(struct drm_crtc *crtc); 32 int (*enable_vblank)(struct drm_crtc *crtc);
20 void (*disable_vblank)(struct drm_crtc *crtc); 33 void (*disable_vblank)(struct drm_crtc *crtc);
21 int (*set_interface_pix_fmt)(struct drm_crtc *crtc,
22 u32 bus_format, int hsync_pin, int vsync_pin,
23 u32 bus_flags);
24 const struct drm_crtc_helper_funcs *crtc_helper_funcs; 34 const struct drm_crtc_helper_funcs *crtc_helper_funcs;
25 const struct drm_crtc_funcs *crtc_funcs; 35 const struct drm_crtc_funcs *crtc_funcs;
26}; 36};
@@ -42,11 +52,6 @@ void imx_drm_mode_config_init(struct drm_device *drm);
42 52
43struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb); 53struct drm_gem_cma_object *imx_drm_fb_get_obj(struct drm_framebuffer *fb);
44 54
45int imx_drm_set_bus_config(struct drm_encoder *encoder, u32 bus_format,
46 int hsync_pin, int vsync_pin, u32 bus_flags);
47int imx_drm_set_bus_format(struct drm_encoder *encoder,
48 u32 bus_format);
49
50int imx_drm_encoder_parse_of(struct drm_device *drm, 55int imx_drm_encoder_parse_of(struct drm_device *drm,
51 struct drm_encoder *encoder, struct device_node *np); 56 struct drm_encoder *encoder, struct device_node *np);
52 57
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index beff793bb717..b03919ed60ba 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -17,6 +17,8 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/component.h> 18#include <linux/component.h>
19#include <drm/drmP.h> 19#include <drm/drmP.h>
20#include <drm/drm_atomic.h>
21#include <drm/drm_atomic_helper.h>
20#include <drm/drm_fb_helper.h> 22#include <drm/drm_fb_helper.h>
21#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
22#include <drm/drm_of.h> 24#include <drm/drm_of.h>
@@ -49,9 +51,6 @@
49#define LDB_DI1_VS_POL_ACT_LOW (1 << 10) 51#define LDB_DI1_VS_POL_ACT_LOW (1 << 10)
50#define LDB_BGREF_RMODE_INT (1 << 15) 52#define LDB_BGREF_RMODE_INT (1 << 15)
51 53
52#define con_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, connector)
53#define enc_to_imx_ldb_ch(x) container_of(x, struct imx_ldb_channel, encoder)
54
55struct imx_ldb; 54struct imx_ldb;
56 55
57struct imx_ldb_channel { 56struct imx_ldb_channel {
@@ -66,9 +65,19 @@ struct imx_ldb_channel {
66 int edid_len; 65 int edid_len;
67 struct drm_display_mode mode; 66 struct drm_display_mode mode;
68 int mode_valid; 67 int mode_valid;
69 int bus_format; 68 u32 bus_format;
70}; 69};
71 70
71static inline struct imx_ldb_channel *con_to_imx_ldb_ch(struct drm_connector *c)
72{
73 return container_of(c, struct imx_ldb_channel, connector);
74}
75
76static inline struct imx_ldb_channel *enc_to_imx_ldb_ch(struct drm_encoder *e)
77{
78 return container_of(e, struct imx_ldb_channel, encoder);
79}
80
72struct bus_mux { 81struct bus_mux {
73 int reg; 82 int reg;
74 int shift; 83 int shift;
@@ -93,6 +102,32 @@ static enum drm_connector_status imx_ldb_connector_detect(
93 return connector_status_connected; 102 return connector_status_connected;
94} 103}
95 104
105static void imx_ldb_ch_set_bus_format(struct imx_ldb_channel *imx_ldb_ch,
106 u32 bus_format)
107{
108 struct imx_ldb *ldb = imx_ldb_ch->ldb;
109 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
110
111 switch (bus_format) {
112 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
113 break;
114 case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
115 if (imx_ldb_ch->chno == 0 || dual)
116 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
117 if (imx_ldb_ch->chno == 1 || dual)
118 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
119 break;
120 case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
121 if (imx_ldb_ch->chno == 0 || dual)
122 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
123 LDB_BIT_MAP_CH0_JEIDA;
124 if (imx_ldb_ch->chno == 1 || dual)
125 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
126 LDB_BIT_MAP_CH1_JEIDA;
127 break;
128 }
129}
130
96static int imx_ldb_connector_get_modes(struct drm_connector *connector) 131static int imx_ldb_connector_get_modes(struct drm_connector *connector)
97{ 132{
98 struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); 133 struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector);
@@ -100,11 +135,7 @@ static int imx_ldb_connector_get_modes(struct drm_connector *connector)
100 135
101 if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs && 136 if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs &&
102 imx_ldb_ch->panel->funcs->get_modes) { 137 imx_ldb_ch->panel->funcs->get_modes) {
103 struct drm_display_info *di = &connector->display_info;
104
105 num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel); 138 num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel);
106 if (!imx_ldb_ch->bus_format && di->num_bus_formats)
107 imx_ldb_ch->bus_format = di->bus_formats[0];
108 if (num_modes > 0) 139 if (num_modes > 0)
109 return num_modes; 140 return num_modes;
110 } 141 }
@@ -141,10 +172,6 @@ static struct drm_encoder *imx_ldb_connector_best_encoder(
141 return &imx_ldb_ch->encoder; 172 return &imx_ldb_ch->encoder;
142} 173}
143 174
144static void imx_ldb_encoder_dpms(struct drm_encoder *encoder, int mode)
145{
146}
147
148static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno, 175static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
149 unsigned long serial_clk, unsigned long di_clk) 176 unsigned long serial_clk, unsigned long di_clk)
150{ 177{
@@ -173,43 +200,7 @@ static void imx_ldb_set_clock(struct imx_ldb *ldb, int mux, int chno,
173 chno); 200 chno);
174} 201}
175 202
176static void imx_ldb_encoder_prepare(struct drm_encoder *encoder) 203static void imx_ldb_encoder_enable(struct drm_encoder *encoder)
177{
178 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
179 struct imx_ldb *ldb = imx_ldb_ch->ldb;
180 int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
181 u32 bus_format;
182
183 switch (imx_ldb_ch->bus_format) {
184 default:
185 dev_warn(ldb->dev,
186 "could not determine data mapping, default to 18-bit \"spwg\"\n");
187 /* fallthrough */
188 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
189 bus_format = MEDIA_BUS_FMT_RGB666_1X18;
190 break;
191 case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
192 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
193 if (imx_ldb_ch->chno == 0 || dual)
194 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24;
195 if (imx_ldb_ch->chno == 1 || dual)
196 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24;
197 break;
198 case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
199 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
200 if (imx_ldb_ch->chno == 0 || dual)
201 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH0_24 |
202 LDB_BIT_MAP_CH0_JEIDA;
203 if (imx_ldb_ch->chno == 1 || dual)
204 ldb->ldb_ctrl |= LDB_DATA_WIDTH_CH1_24 |
205 LDB_BIT_MAP_CH1_JEIDA;
206 break;
207 }
208
209 imx_drm_set_bus_format(encoder, bus_format);
210}
211
212static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
213{ 204{
214 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder); 205 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
215 struct imx_ldb *ldb = imx_ldb_ch->ldb; 206 struct imx_ldb *ldb = imx_ldb_ch->ldb;
@@ -219,8 +210,13 @@ static void imx_ldb_encoder_commit(struct drm_encoder *encoder)
219 drm_panel_prepare(imx_ldb_ch->panel); 210 drm_panel_prepare(imx_ldb_ch->panel);
220 211
221 if (dual) { 212 if (dual) {
213 clk_set_parent(ldb->clk_sel[mux], ldb->clk[0]);
214 clk_set_parent(ldb->clk_sel[mux], ldb->clk[1]);
215
222 clk_prepare_enable(ldb->clk[0]); 216 clk_prepare_enable(ldb->clk[0]);
223 clk_prepare_enable(ldb->clk[1]); 217 clk_prepare_enable(ldb->clk[1]);
218 } else {
219 clk_set_parent(ldb->clk_sel[mux], ldb->clk[imx_ldb_ch->chno]);
224 } 220 }
225 221
226 if (imx_ldb_ch == &ldb->channel[0] || dual) { 222 if (imx_ldb_ch == &ldb->channel[0] || dual) {
@@ -265,6 +261,7 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
265 unsigned long serial_clk; 261 unsigned long serial_clk;
266 unsigned long di_clk = mode->clock * 1000; 262 unsigned long di_clk = mode->clock * 1000;
267 int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder); 263 int mux = drm_of_encoder_active_port_id(imx_ldb_ch->child, encoder);
264 u32 bus_format = imx_ldb_ch->bus_format;
268 265
269 if (mode->clock > 170000) { 266 if (mode->clock > 170000) {
270 dev_warn(ldb->dev, 267 dev_warn(ldb->dev,
@@ -286,18 +283,33 @@ static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder,
286 } 283 }
287 284
288 /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */ 285 /* FIXME - assumes straight connections DI0 --> CH0, DI1 --> CH1 */
289 if (imx_ldb_ch == &ldb->channel[0]) { 286 if (imx_ldb_ch == &ldb->channel[0] || dual) {
290 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 287 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
291 ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW; 288 ldb->ldb_ctrl |= LDB_DI0_VS_POL_ACT_LOW;
292 else if (mode->flags & DRM_MODE_FLAG_PVSYNC) 289 else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
293 ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW; 290 ldb->ldb_ctrl &= ~LDB_DI0_VS_POL_ACT_LOW;
294 } 291 }
295 if (imx_ldb_ch == &ldb->channel[1]) { 292 if (imx_ldb_ch == &ldb->channel[1] || dual) {
296 if (mode->flags & DRM_MODE_FLAG_NVSYNC) 293 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
297 ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW; 294 ldb->ldb_ctrl |= LDB_DI1_VS_POL_ACT_LOW;
298 else if (mode->flags & DRM_MODE_FLAG_PVSYNC) 295 else if (mode->flags & DRM_MODE_FLAG_PVSYNC)
299 ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW; 296 ldb->ldb_ctrl &= ~LDB_DI1_VS_POL_ACT_LOW;
300 } 297 }
298
299 if (!bus_format) {
300 struct drm_connector *connector;
301
302 drm_for_each_connector(connector, encoder->dev) {
303 struct drm_display_info *di = &connector->display_info;
304
305 if (connector->encoder == encoder &&
306 di->num_bus_formats) {
307 bus_format = di->bus_formats[0];
308 break;
309 }
310 }
311 }
312 imx_ldb_ch_set_bus_format(imx_ldb_ch, bus_format);
301} 313}
302 314
303static void imx_ldb_encoder_disable(struct drm_encoder *encoder) 315static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
@@ -357,11 +369,45 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
357 drm_panel_unprepare(imx_ldb_ch->panel); 369 drm_panel_unprepare(imx_ldb_ch->panel);
358} 370}
359 371
372static int imx_ldb_encoder_atomic_check(struct drm_encoder *encoder,
373 struct drm_crtc_state *crtc_state,
374 struct drm_connector_state *conn_state)
375{
376 struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
377 struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
378 struct drm_display_info *di = &conn_state->connector->display_info;
379 u32 bus_format = imx_ldb_ch->bus_format;
380
381 /* Bus format description in DT overrides connector display info. */
382 if (!bus_format && di->num_bus_formats)
383 bus_format = di->bus_formats[0];
384 switch (bus_format) {
385 case MEDIA_BUS_FMT_RGB666_1X7X3_SPWG:
386 imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
387 break;
388 case MEDIA_BUS_FMT_RGB888_1X7X4_SPWG:
389 case MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA:
390 imx_crtc_state->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
391 break;
392 default:
393 return -EINVAL;
394 }
395
396 imx_crtc_state->di_hsync_pin = 2;
397 imx_crtc_state->di_vsync_pin = 3;
398
399 return 0;
400}
401
402
360static const struct drm_connector_funcs imx_ldb_connector_funcs = { 403static const struct drm_connector_funcs imx_ldb_connector_funcs = {
361 .dpms = drm_helper_connector_dpms, 404 .dpms = drm_atomic_helper_connector_dpms,
362 .fill_modes = drm_helper_probe_single_connector_modes, 405 .fill_modes = drm_helper_probe_single_connector_modes,
363 .detect = imx_ldb_connector_detect, 406 .detect = imx_ldb_connector_detect,
364 .destroy = imx_drm_connector_destroy, 407 .destroy = imx_drm_connector_destroy,
408 .reset = drm_atomic_helper_connector_reset,
409 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
410 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
365}; 411};
366 412
367static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = { 413static const struct drm_connector_helper_funcs imx_ldb_connector_helper_funcs = {
@@ -374,11 +420,10 @@ static const struct drm_encoder_funcs imx_ldb_encoder_funcs = {
374}; 420};
375 421
376static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = { 422static const struct drm_encoder_helper_funcs imx_ldb_encoder_helper_funcs = {
377 .dpms = imx_ldb_encoder_dpms,
378 .prepare = imx_ldb_encoder_prepare,
379 .commit = imx_ldb_encoder_commit,
380 .mode_set = imx_ldb_encoder_mode_set, 423 .mode_set = imx_ldb_encoder_mode_set,
424 .enable = imx_ldb_encoder_enable,
381 .disable = imx_ldb_encoder_disable, 425 .disable = imx_ldb_encoder_disable,
426 .atomic_check = imx_ldb_encoder_atomic_check,
382}; 427};
383 428
384static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno) 429static int imx_ldb_get_clk(struct imx_ldb *ldb, int chno)
@@ -400,10 +445,10 @@ static int imx_ldb_register(struct drm_device *drm,
400 struct imx_ldb_channel *imx_ldb_ch) 445 struct imx_ldb_channel *imx_ldb_ch)
401{ 446{
402 struct imx_ldb *ldb = imx_ldb_ch->ldb; 447 struct imx_ldb *ldb = imx_ldb_ch->ldb;
448 struct drm_encoder *encoder = &imx_ldb_ch->encoder;
403 int ret; 449 int ret;
404 450
405 ret = imx_drm_encoder_parse_of(drm, &imx_ldb_ch->encoder, 451 ret = imx_drm_encoder_parse_of(drm, encoder, imx_ldb_ch->child);
406 imx_ldb_ch->child);
407 if (ret) 452 if (ret)
408 return ret; 453 return ret;
409 454
@@ -417,9 +462,8 @@ static int imx_ldb_register(struct drm_device *drm,
417 return ret; 462 return ret;
418 } 463 }
419 464
420 drm_encoder_helper_add(&imx_ldb_ch->encoder, 465 drm_encoder_helper_add(encoder, &imx_ldb_encoder_helper_funcs);
421 &imx_ldb_encoder_helper_funcs); 466 drm_encoder_init(drm, encoder, &imx_ldb_encoder_funcs,
422 drm_encoder_init(drm, &imx_ldb_ch->encoder, &imx_ldb_encoder_funcs,
423 DRM_MODE_ENCODER_LVDS, NULL); 467 DRM_MODE_ENCODER_LVDS, NULL);
424 468
425 drm_connector_helper_add(&imx_ldb_ch->connector, 469 drm_connector_helper_add(&imx_ldb_ch->connector,
@@ -427,11 +471,14 @@ static int imx_ldb_register(struct drm_device *drm,
427 drm_connector_init(drm, &imx_ldb_ch->connector, 471 drm_connector_init(drm, &imx_ldb_ch->connector,
428 &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS); 472 &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS);
429 473
430 if (imx_ldb_ch->panel) 474 if (imx_ldb_ch->panel) {
431 drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector); 475 ret = drm_panel_attach(imx_ldb_ch->panel,
476 &imx_ldb_ch->connector);
477 if (ret)
478 return ret;
479 }
432 480
433 drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, 481 drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, encoder);
434 &imx_ldb_ch->encoder);
435 482
436 return 0; 483 return 0;
437} 484}
@@ -560,6 +607,7 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
560 struct imx_ldb_channel *channel; 607 struct imx_ldb_channel *channel;
561 struct device_node *ddc_node; 608 struct device_node *ddc_node;
562 struct device_node *ep; 609 struct device_node *ep;
610 int bus_format;
563 611
564 ret = of_property_read_u32(child, "reg", &i); 612 ret = of_property_read_u32(child, "reg", &i);
565 if (ret || i < 0 || i > 1) 613 if (ret || i < 0 || i > 1)
@@ -632,21 +680,22 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
632 } 680 }
633 } 681 }
634 682
635 channel->bus_format = of_get_bus_format(dev, child); 683 bus_format = of_get_bus_format(dev, child);
636 if (channel->bus_format == -EINVAL) { 684 if (bus_format == -EINVAL) {
637 /* 685 /*
638 * If no bus format was specified in the device tree, 686 * If no bus format was specified in the device tree,
639 * we can still get it from the connected panel later. 687 * we can still get it from the connected panel later.
640 */ 688 */
641 if (channel->panel && channel->panel->funcs && 689 if (channel->panel && channel->panel->funcs &&
642 channel->panel->funcs->get_modes) 690 channel->panel->funcs->get_modes)
643 channel->bus_format = 0; 691 bus_format = 0;
644 } 692 }
645 if (channel->bus_format < 0) { 693 if (bus_format < 0) {
646 dev_err(dev, "could not determine data mapping: %d\n", 694 dev_err(dev, "could not determine data mapping: %d\n",
647 channel->bus_format); 695 bus_format);
648 return channel->bus_format; 696 return bus_format;
649 } 697 }
698 channel->bus_format = bus_format;
650 699
651 ret = imx_ldb_register(drm, channel); 700 ret = imx_ldb_register(drm, channel);
652 if (ret) 701 if (ret)
diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index baf788121287..5e875944ffa2 100644
--- a/drivers/gpu/drm/imx/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
@@ -23,6 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/videodev2.h> 24#include <linux/videodev2.h>
25#include <drm/drmP.h> 25#include <drm/drmP.h>
26#include <drm/drm_atomic_helper.h>
26#include <drm/drm_fb_helper.h> 27#include <drm/drm_fb_helper.h>
27#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
28#include <video/imx-ipu-v3.h> 29#include <video/imx-ipu-v3.h>
@@ -97,9 +98,6 @@
97/* TVE_TST_MODE_REG */ 98/* TVE_TST_MODE_REG */
98#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0) 99#define TVE_TVDAC_TEST_MODE_MASK (0x7 << 0)
99 100
100#define con_to_tve(x) container_of(x, struct imx_tve, connector)
101#define enc_to_tve(x) container_of(x, struct imx_tve, encoder)
102
103enum { 101enum {
104 TVE_MODE_TVOUT, 102 TVE_MODE_TVOUT,
105 TVE_MODE_VGA, 103 TVE_MODE_VGA,
@@ -112,6 +110,8 @@ struct imx_tve {
112 spinlock_t lock; /* register lock */ 110 spinlock_t lock; /* register lock */
113 bool enabled; 111 bool enabled;
114 int mode; 112 int mode;
113 int di_hsync_pin;
114 int di_vsync_pin;
115 115
116 struct regmap *regmap; 116 struct regmap *regmap;
117 struct regulator *dac_reg; 117 struct regulator *dac_reg;
@@ -120,10 +120,18 @@ struct imx_tve {
120 struct clk *di_sel_clk; 120 struct clk *di_sel_clk;
121 struct clk_hw clk_hw_di; 121 struct clk_hw clk_hw_di;
122 struct clk *di_clk; 122 struct clk *di_clk;
123 int vsync_pin;
124 int hsync_pin;
125}; 123};
126 124
125static inline struct imx_tve *con_to_tve(struct drm_connector *c)
126{
127 return container_of(c, struct imx_tve, connector);
128}
129
130static inline struct imx_tve *enc_to_tve(struct drm_encoder *e)
131{
132 return container_of(e, struct imx_tve, encoder);
133}
134
127static void tve_lock(void *__tve) 135static void tve_lock(void *__tve)
128__acquires(&tve->lock) 136__acquires(&tve->lock)
129{ 137{
@@ -148,8 +156,7 @@ static void tve_enable(struct imx_tve *tve)
148 tve->enabled = true; 156 tve->enabled = true;
149 clk_prepare_enable(tve->clk); 157 clk_prepare_enable(tve->clk);
150 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 158 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
151 TVE_IPU_CLK_EN | TVE_EN, 159 TVE_EN, TVE_EN);
152 TVE_IPU_CLK_EN | TVE_EN);
153 } 160 }
154 161
155 /* clear interrupt status register */ 162 /* clear interrupt status register */
@@ -172,7 +179,7 @@ static void tve_disable(struct imx_tve *tve)
172 if (tve->enabled) { 179 if (tve->enabled) {
173 tve->enabled = false; 180 tve->enabled = false;
174 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG, 181 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
175 TVE_IPU_CLK_EN | TVE_EN, 0); 182 TVE_EN, 0);
176 clk_disable_unprepare(tve->clk); 183 clk_disable_unprepare(tve->clk);
177 } 184 }
178} 185}
@@ -275,36 +282,6 @@ static struct drm_encoder *imx_tve_connector_best_encoder(
275 return &tve->encoder; 282 return &tve->encoder;
276} 283}
277 284
278static void imx_tve_encoder_dpms(struct drm_encoder *encoder, int mode)
279{
280 struct imx_tve *tve = enc_to_tve(encoder);
281 int ret;
282
283 ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
284 TVE_TV_OUT_MODE_MASK, TVE_TV_OUT_DISABLE);
285 if (ret < 0)
286 dev_err(tve->dev, "failed to disable TVOUT: %d\n", ret);
287}
288
289static void imx_tve_encoder_prepare(struct drm_encoder *encoder)
290{
291 struct imx_tve *tve = enc_to_tve(encoder);
292
293 tve_disable(tve);
294
295 switch (tve->mode) {
296 case TVE_MODE_VGA:
297 imx_drm_set_bus_config(encoder, MEDIA_BUS_FMT_GBR888_1X24,
298 tve->hsync_pin, tve->vsync_pin,
299 DRM_BUS_FLAG_DE_HIGH |
300 DRM_BUS_FLAG_PIXDATA_NEGEDGE);
301 break;
302 case TVE_MODE_TVOUT:
303 imx_drm_set_bus_format(encoder, MEDIA_BUS_FMT_YUV8_1X24);
304 break;
305 }
306}
307
308static void imx_tve_encoder_mode_set(struct drm_encoder *encoder, 285static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
309 struct drm_display_mode *orig_mode, 286 struct drm_display_mode *orig_mode,
310 struct drm_display_mode *mode) 287 struct drm_display_mode *mode)
@@ -333,6 +310,9 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
333 ret); 310 ret);
334 } 311 }
335 312
313 regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
314 TVE_IPU_CLK_EN, TVE_IPU_CLK_EN);
315
336 if (tve->mode == TVE_MODE_VGA) 316 if (tve->mode == TVE_MODE_VGA)
337 ret = tve_setup_vga(tve); 317 ret = tve_setup_vga(tve);
338 else 318 else
@@ -341,7 +321,7 @@ static void imx_tve_encoder_mode_set(struct drm_encoder *encoder,
341 dev_err(tve->dev, "failed to set configuration: %d\n", ret); 321 dev_err(tve->dev, "failed to set configuration: %d\n", ret);
342} 322}
343 323
344static void imx_tve_encoder_commit(struct drm_encoder *encoder) 324static void imx_tve_encoder_enable(struct drm_encoder *encoder)
345{ 325{
346 struct imx_tve *tve = enc_to_tve(encoder); 326 struct imx_tve *tve = enc_to_tve(encoder);
347 327
@@ -355,11 +335,28 @@ static void imx_tve_encoder_disable(struct drm_encoder *encoder)
355 tve_disable(tve); 335 tve_disable(tve);
356} 336}
357 337
338static int imx_tve_atomic_check(struct drm_encoder *encoder,
339 struct drm_crtc_state *crtc_state,
340 struct drm_connector_state *conn_state)
341{
342 struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
343 struct imx_tve *tve = enc_to_tve(encoder);
344
345 imx_crtc_state->bus_format = MEDIA_BUS_FMT_GBR888_1X24;
346 imx_crtc_state->di_hsync_pin = tve->di_hsync_pin;
347 imx_crtc_state->di_vsync_pin = tve->di_vsync_pin;
348
349 return 0;
350}
351
358static const struct drm_connector_funcs imx_tve_connector_funcs = { 352static const struct drm_connector_funcs imx_tve_connector_funcs = {
359 .dpms = drm_helper_connector_dpms, 353 .dpms = drm_atomic_helper_connector_dpms,
360 .fill_modes = drm_helper_probe_single_connector_modes, 354 .fill_modes = drm_helper_probe_single_connector_modes,
361 .detect = imx_tve_connector_detect, 355 .detect = imx_tve_connector_detect,
362 .destroy = imx_drm_connector_destroy, 356 .destroy = imx_drm_connector_destroy,
357 .reset = drm_atomic_helper_connector_reset,
358 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
359 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
363}; 360};
364 361
365static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { 362static const struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = {
@@ -373,11 +370,10 @@ static const struct drm_encoder_funcs imx_tve_encoder_funcs = {
373}; 370};
374 371
375static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = { 372static const struct drm_encoder_helper_funcs imx_tve_encoder_helper_funcs = {
376 .dpms = imx_tve_encoder_dpms,
377 .prepare = imx_tve_encoder_prepare,
378 .mode_set = imx_tve_encoder_mode_set, 373 .mode_set = imx_tve_encoder_mode_set,
379 .commit = imx_tve_encoder_commit, 374 .enable = imx_tve_encoder_enable,
380 .disable = imx_tve_encoder_disable, 375 .disable = imx_tve_encoder_disable,
376 .atomic_check = imx_tve_atomic_check,
381}; 377};
382 378
383static irqreturn_t imx_tve_irq_handler(int irq, void *data) 379static irqreturn_t imx_tve_irq_handler(int irq, void *data)
@@ -495,8 +491,7 @@ static int imx_tve_register(struct drm_device *drm, struct imx_tve *tve)
495 encoder_type = tve->mode == TVE_MODE_VGA ? 491 encoder_type = tve->mode == TVE_MODE_VGA ?
496 DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC; 492 DRM_MODE_ENCODER_DAC : DRM_MODE_ENCODER_TVDAC;
497 493
498 ret = imx_drm_encoder_parse_of(drm, &tve->encoder, 494 ret = imx_drm_encoder_parse_of(drm, &tve->encoder, tve->dev->of_node);
499 tve->dev->of_node);
500 if (ret) 495 if (ret)
501 return ret; 496 return ret;
502 497
@@ -587,15 +582,15 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
587 582
588 if (tve->mode == TVE_MODE_VGA) { 583 if (tve->mode == TVE_MODE_VGA) {
589 ret = of_property_read_u32(np, "fsl,hsync-pin", 584 ret = of_property_read_u32(np, "fsl,hsync-pin",
590 &tve->hsync_pin); 585 &tve->di_hsync_pin);
591 586
592 if (ret < 0) { 587 if (ret < 0) {
593 dev_err(dev, "failed to get vsync pin\n"); 588 dev_err(dev, "failed to get hsync pin\n");
594 return ret; 589 return ret;
595 } 590 }
596 591
597 ret |= of_property_read_u32(np, "fsl,vsync-pin", 592 ret = of_property_read_u32(np, "fsl,vsync-pin",
598 &tve->vsync_pin); 593 &tve->di_vsync_pin);
599 594
600 if (ret < 0) { 595 if (ret < 0) {
601 dev_err(dev, "failed to get vsync pin\n"); 596 dev_err(dev, "failed to get vsync pin\n");
@@ -633,7 +628,9 @@ static int imx_tve_bind(struct device *dev, struct device *master, void *data)
633 628
634 tve->dac_reg = devm_regulator_get(dev, "dac"); 629 tve->dac_reg = devm_regulator_get(dev, "dac");
635 if (!IS_ERR(tve->dac_reg)) { 630 if (!IS_ERR(tve->dac_reg)) {
636 regulator_set_voltage(tve->dac_reg, 2750000, 2750000); 631 ret = regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
632 if (ret)
633 return ret;
637 ret = regulator_enable(tve->dac_reg); 634 ret = regulator_enable(tve->dac_reg);
638 if (ret) 635 if (ret)
639 return ret; 636 return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index fc040417e1e8..08e188bc10fc 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -18,12 +18,12 @@
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <drm/drmP.h> 20#include <drm/drmP.h>
21#include <drm/drm_atomic.h>
22#include <drm/drm_atomic_helper.h>
21#include <drm/drm_crtc_helper.h> 23#include <drm/drm_crtc_helper.h>
22#include <linux/fb.h> 24#include <linux/fb.h>
23#include <linux/clk.h> 25#include <linux/clk.h>
24#include <linux/errno.h> 26#include <linux/errno.h>
25#include <linux/reservation.h>
26#include <linux/dma-buf.h>
27#include <drm/drm_gem_cma_helper.h> 27#include <drm/drm_gem_cma_helper.h>
28#include <drm/drm_fb_cma_helper.h> 28#include <drm/drm_fb_cma_helper.h>
29 29
@@ -33,23 +33,6 @@
33 33
34#define DRIVER_DESC "i.MX IPUv3 Graphics" 34#define DRIVER_DESC "i.MX IPUv3 Graphics"
35 35
36enum ipu_flip_status {
37 IPU_FLIP_NONE,
38 IPU_FLIP_PENDING,
39 IPU_FLIP_SUBMITTED,
40};
41
42struct ipu_flip_work {
43 struct work_struct unref_work;
44 struct drm_gem_object *bo;
45 struct drm_pending_vblank_event *page_flip_event;
46 struct work_struct fence_work;
47 struct ipu_crtc *crtc;
48 struct fence *excl;
49 unsigned shared_count;
50 struct fence **shared;
51};
52
53struct ipu_crtc { 36struct ipu_crtc {
54 struct device *dev; 37 struct device *dev;
55 struct drm_crtc base; 38 struct drm_crtc base;
@@ -60,201 +43,166 @@ struct ipu_crtc {
60 43
61 struct ipu_dc *dc; 44 struct ipu_dc *dc;
62 struct ipu_di *di; 45 struct ipu_di *di;
63 int enabled;
64 enum ipu_flip_status flip_state;
65 struct workqueue_struct *flip_queue;
66 struct ipu_flip_work *flip_work;
67 int irq; 46 int irq;
68 u32 bus_format;
69 u32 bus_flags;
70 int di_hsync_pin;
71 int di_vsync_pin;
72}; 47};
73 48
74#define to_ipu_crtc(x) container_of(x, struct ipu_crtc, base) 49static inline struct ipu_crtc *to_ipu_crtc(struct drm_crtc *crtc)
50{
51 return container_of(crtc, struct ipu_crtc, base);
52}
75 53
76static void ipu_fb_enable(struct ipu_crtc *ipu_crtc) 54static void ipu_crtc_enable(struct drm_crtc *crtc)
77{ 55{
56 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
78 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 57 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
79 58
80 if (ipu_crtc->enabled)
81 return;
82
83 ipu_dc_enable(ipu); 59 ipu_dc_enable(ipu);
84 ipu_plane_enable(ipu_crtc->plane[0]);
85 /* Start DC channel and DI after IDMAC */
86 ipu_dc_enable_channel(ipu_crtc->dc); 60 ipu_dc_enable_channel(ipu_crtc->dc);
87 ipu_di_enable(ipu_crtc->di); 61 ipu_di_enable(ipu_crtc->di);
88 drm_crtc_vblank_on(&ipu_crtc->base);
89
90 ipu_crtc->enabled = 1;
91} 62}
92 63
93static void ipu_fb_disable(struct ipu_crtc *ipu_crtc) 64static void ipu_crtc_disable(struct drm_crtc *crtc)
94{ 65{
66 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
95 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); 67 struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent);
96 68
97 if (!ipu_crtc->enabled)
98 return;
99
100 /* Stop DC channel and DI before IDMAC */
101 ipu_dc_disable_channel(ipu_crtc->dc); 69 ipu_dc_disable_channel(ipu_crtc->dc);
102 ipu_di_disable(ipu_crtc->di); 70 ipu_di_disable(ipu_crtc->di);
103 ipu_plane_disable(ipu_crtc->plane[0]);
104 ipu_dc_disable(ipu); 71 ipu_dc_disable(ipu);
105 drm_crtc_vblank_off(&ipu_crtc->base);
106 72
107 ipu_crtc->enabled = 0; 73 spin_lock_irq(&crtc->dev->event_lock);
74 if (crtc->state->event) {
75 drm_crtc_send_vblank_event(crtc, crtc->state->event);
76 crtc->state->event = NULL;
77 }
78 spin_unlock_irq(&crtc->dev->event_lock);
108} 79}
109 80
110static void ipu_crtc_dpms(struct drm_crtc *crtc, int mode) 81static void imx_drm_crtc_reset(struct drm_crtc *crtc)
111{ 82{
112 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 83 struct imx_crtc_state *state;
113 84
114 dev_dbg(ipu_crtc->dev, "%s mode: %d\n", __func__, mode); 85 if (crtc->state) {
115 86 if (crtc->state->mode_blob)
116 switch (mode) { 87 drm_property_unreference_blob(crtc->state->mode_blob);
117 case DRM_MODE_DPMS_ON: 88
118 ipu_fb_enable(ipu_crtc); 89 state = to_imx_crtc_state(crtc->state);
119 break; 90 memset(state, 0, sizeof(*state));
120 case DRM_MODE_DPMS_STANDBY: 91 } else {
121 case DRM_MODE_DPMS_SUSPEND: 92 state = kzalloc(sizeof(*state), GFP_KERNEL);
122 case DRM_MODE_DPMS_OFF: 93 if (!state)
123 ipu_fb_disable(ipu_crtc); 94 return;
124 break; 95 crtc->state = &state->base;
125 } 96 }
97
98 state->base.crtc = crtc;
126} 99}
127 100
128static void ipu_flip_unref_work_func(struct work_struct *__work) 101static struct drm_crtc_state *imx_drm_crtc_duplicate_state(struct drm_crtc *crtc)
129{ 102{
130 struct ipu_flip_work *work = 103 struct imx_crtc_state *state;
131 container_of(__work, struct ipu_flip_work, unref_work); 104
105 state = kzalloc(sizeof(*state), GFP_KERNEL);
106 if (!state)
107 return NULL;
132 108
133 drm_gem_object_unreference_unlocked(work->bo); 109 __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
134 kfree(work); 110
111 WARN_ON(state->base.crtc != crtc);
112 state->base.crtc = crtc;
113
114 return &state->base;
135} 115}
136 116
137static void ipu_flip_fence_work_func(struct work_struct *__work) 117static void imx_drm_crtc_destroy_state(struct drm_crtc *crtc,
118 struct drm_crtc_state *state)
138{ 119{
139 struct ipu_flip_work *work = 120 __drm_atomic_helper_crtc_destroy_state(state);
140 container_of(__work, struct ipu_flip_work, fence_work); 121 kfree(to_imx_crtc_state(state));
141 int i; 122}
142 123
143 /* wait for all fences attached to the FB obj to signal */ 124static const struct drm_crtc_funcs ipu_crtc_funcs = {
144 if (work->excl) { 125 .set_config = drm_atomic_helper_set_config,
145 fence_wait(work->excl, false); 126 .destroy = drm_crtc_cleanup,
146 fence_put(work->excl); 127 .page_flip = drm_atomic_helper_page_flip,
147 } 128 .reset = imx_drm_crtc_reset,
148 for (i = 0; i < work->shared_count; i++) { 129 .atomic_duplicate_state = imx_drm_crtc_duplicate_state,
149 fence_wait(work->shared[i], false); 130 .atomic_destroy_state = imx_drm_crtc_destroy_state,
150 fence_put(work->shared[i]); 131};
151 }
152 132
153 work->crtc->flip_state = IPU_FLIP_SUBMITTED; 133static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
134{
135 struct ipu_crtc *ipu_crtc = dev_id;
136
137 imx_drm_handle_vblank(ipu_crtc->imx_crtc);
138
139 return IRQ_HANDLED;
154} 140}
155 141
156static int ipu_page_flip(struct drm_crtc *crtc, 142static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
157 struct drm_framebuffer *fb, 143 const struct drm_display_mode *mode,
158 struct drm_pending_vblank_event *event, 144 struct drm_display_mode *adjusted_mode)
159 uint32_t page_flip_flags)
160{ 145{
161 struct drm_gem_cma_object *cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
162 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 146 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
163 struct ipu_flip_work *flip_work; 147 struct videomode vm;
164 int ret; 148 int ret;
165 149
166 if (ipu_crtc->flip_state != IPU_FLIP_NONE) 150 drm_display_mode_to_videomode(adjusted_mode, &vm);
167 return -EBUSY;
168
169 ret = imx_drm_crtc_vblank_get(ipu_crtc->imx_crtc);
170 if (ret) {
171 dev_dbg(ipu_crtc->dev, "failed to acquire vblank counter\n");
172 list_del(&event->base.link);
173
174 return ret;
175 }
176 151
177 flip_work = kzalloc(sizeof *flip_work, GFP_KERNEL); 152 ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
178 if (!flip_work) { 153 if (ret)
179 ret = -ENOMEM; 154 return false;
180 goto put_vblank;
181 }
182 INIT_WORK(&flip_work->unref_work, ipu_flip_unref_work_func);
183 flip_work->page_flip_event = event;
184 155
185 /* get BO backing the old framebuffer and take a reference */ 156 if ((vm.vsync_len == 0) || (vm.hsync_len == 0))
186 flip_work->bo = &drm_fb_cma_get_gem_obj(crtc->primary->fb, 0)->base; 157 return false;
187 drm_gem_object_reference(flip_work->bo);
188 158
189 ipu_crtc->flip_work = flip_work; 159 drm_display_mode_from_videomode(&vm, adjusted_mode);
190 /*
191 * If the object has a DMABUF attached, we need to wait on its fences
192 * if there are any.
193 */
194 if (cma_obj->base.dma_buf) {
195 INIT_WORK(&flip_work->fence_work, ipu_flip_fence_work_func);
196 flip_work->crtc = ipu_crtc;
197 160
198 ret = reservation_object_get_fences_rcu( 161 return true;
199 cma_obj->base.dma_buf->resv, &flip_work->excl, 162}
200 &flip_work->shared_count, &flip_work->shared);
201 163
202 if (unlikely(ret)) { 164static int ipu_crtc_atomic_check(struct drm_crtc *crtc,
203 DRM_ERROR("failed to get fences for buffer\n"); 165 struct drm_crtc_state *state)
204 goto free_flip_work; 166{
205 } 167 u32 primary_plane_mask = 1 << drm_plane_index(crtc->primary);
206 168
207 /* No need to queue the worker if the are no fences */ 169 if (state->active && (primary_plane_mask & state->plane_mask) == 0)
208 if (!flip_work->excl && !flip_work->shared_count) { 170 return -EINVAL;
209 ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
210 } else {
211 ipu_crtc->flip_state = IPU_FLIP_PENDING;
212 queue_work(ipu_crtc->flip_queue,
213 &flip_work->fence_work);
214 }
215 } else {
216 ipu_crtc->flip_state = IPU_FLIP_SUBMITTED;
217 }
218 171
219 return 0; 172 return 0;
220
221free_flip_work:
222 drm_gem_object_unreference_unlocked(flip_work->bo);
223 kfree(flip_work);
224 ipu_crtc->flip_work = NULL;
225put_vblank:
226 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
227
228 return ret;
229} 173}
230 174
231static const struct drm_crtc_funcs ipu_crtc_funcs = { 175static void ipu_crtc_atomic_begin(struct drm_crtc *crtc,
232 .set_config = drm_crtc_helper_set_config, 176 struct drm_crtc_state *old_crtc_state)
233 .destroy = drm_crtc_cleanup, 177{
234 .page_flip = ipu_page_flip, 178 spin_lock_irq(&crtc->dev->event_lock);
235}; 179 if (crtc->state->event) {
180 WARN_ON(drm_crtc_vblank_get(crtc));
181 drm_crtc_arm_vblank_event(crtc, crtc->state->event);
182 crtc->state->event = NULL;
183 }
184 spin_unlock_irq(&crtc->dev->event_lock);
185}
236 186
237static int ipu_crtc_mode_set(struct drm_crtc *crtc, 187static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc)
238 struct drm_display_mode *orig_mode,
239 struct drm_display_mode *mode,
240 int x, int y,
241 struct drm_framebuffer *old_fb)
242{ 188{
243 struct drm_device *dev = crtc->dev; 189 struct drm_device *dev = crtc->dev;
244 struct drm_encoder *encoder; 190 struct drm_encoder *encoder;
245 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc); 191 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
192 struct drm_display_mode *mode = &crtc->state->adjusted_mode;
193 struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc->state);
246 struct ipu_di_signal_cfg sig_cfg = {}; 194 struct ipu_di_signal_cfg sig_cfg = {};
247 unsigned long encoder_types = 0; 195 unsigned long encoder_types = 0;
248 int ret;
249 196
250 dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__, 197 dev_dbg(ipu_crtc->dev, "%s: mode->hdisplay: %d\n", __func__,
251 mode->hdisplay); 198 mode->hdisplay);
252 dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__, 199 dev_dbg(ipu_crtc->dev, "%s: mode->vdisplay: %d\n", __func__,
253 mode->vdisplay); 200 mode->vdisplay);
254 201
255 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 202 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
256 if (encoder->crtc == crtc) 203 if (encoder->crtc == crtc)
257 encoder_types |= BIT(encoder->encoder_type); 204 encoder_types |= BIT(encoder->encoder_type);
205 }
258 206
259 dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n", 207 dev_dbg(ipu_crtc->dev, "%s: attached to encoder types 0x%lx\n",
260 __func__, encoder_types); 208 __func__, encoder_types);
@@ -272,114 +220,30 @@ static int ipu_crtc_mode_set(struct drm_crtc *crtc,
272 else 220 else
273 sig_cfg.clkflags = 0; 221 sig_cfg.clkflags = 0;
274 222
275 sig_cfg.enable_pol = !(ipu_crtc->bus_flags & DRM_BUS_FLAG_DE_LOW); 223 sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
276 /* Default to driving pixel data on negative clock edges */ 224 /* Default to driving pixel data on negative clock edges */
277 sig_cfg.clk_pol = !!(ipu_crtc->bus_flags & 225 sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
278 DRM_BUS_FLAG_PIXDATA_POSEDGE); 226 DRM_BUS_FLAG_PIXDATA_POSEDGE);
279 sig_cfg.bus_format = ipu_crtc->bus_format; 227 sig_cfg.bus_format = imx_crtc_state->bus_format;
280 sig_cfg.v_to_h_sync = 0; 228 sig_cfg.v_to_h_sync = 0;
281 sig_cfg.hsync_pin = ipu_crtc->di_hsync_pin; 229 sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
282 sig_cfg.vsync_pin = ipu_crtc->di_vsync_pin; 230 sig_cfg.vsync_pin = imx_crtc_state->di_vsync_pin;
283 231
284 drm_display_mode_to_videomode(mode, &sig_cfg.mode); 232 drm_display_mode_to_videomode(mode, &sig_cfg.mode);
285 233
286 ret = ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di, 234 ipu_dc_init_sync(ipu_crtc->dc, ipu_crtc->di,
287 mode->flags & DRM_MODE_FLAG_INTERLACE, 235 mode->flags & DRM_MODE_FLAG_INTERLACE,
288 ipu_crtc->bus_format, mode->hdisplay); 236 imx_crtc_state->bus_format, mode->hdisplay);
289 if (ret) { 237 ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
290 dev_err(ipu_crtc->dev,
291 "initializing display controller failed with %d\n",
292 ret);
293 return ret;
294 }
295
296 ret = ipu_di_init_sync_panel(ipu_crtc->di, &sig_cfg);
297 if (ret) {
298 dev_err(ipu_crtc->dev,
299 "initializing panel failed with %d\n", ret);
300 return ret;
301 }
302
303 return ipu_plane_mode_set(ipu_crtc->plane[0], crtc, mode,
304 crtc->primary->fb,
305 0, 0, mode->hdisplay, mode->vdisplay,
306 x, y, mode->hdisplay, mode->vdisplay,
307 mode->flags & DRM_MODE_FLAG_INTERLACE);
308}
309
310static void ipu_crtc_handle_pageflip(struct ipu_crtc *ipu_crtc)
311{
312 unsigned long flags;
313 struct drm_device *drm = ipu_crtc->base.dev;
314 struct ipu_flip_work *work = ipu_crtc->flip_work;
315
316 spin_lock_irqsave(&drm->event_lock, flags);
317 if (work->page_flip_event)
318 drm_crtc_send_vblank_event(&ipu_crtc->base,
319 work->page_flip_event);
320 imx_drm_crtc_vblank_put(ipu_crtc->imx_crtc);
321 spin_unlock_irqrestore(&drm->event_lock, flags);
322}
323
324static irqreturn_t ipu_irq_handler(int irq, void *dev_id)
325{
326 struct ipu_crtc *ipu_crtc = dev_id;
327
328 imx_drm_handle_vblank(ipu_crtc->imx_crtc);
329
330 if (ipu_crtc->flip_state == IPU_FLIP_SUBMITTED) {
331 struct ipu_plane *plane = ipu_crtc->plane[0];
332
333 ipu_plane_set_base(plane, ipu_crtc->base.primary->fb,
334 plane->x, plane->y);
335 ipu_crtc_handle_pageflip(ipu_crtc);
336 queue_work(ipu_crtc->flip_queue,
337 &ipu_crtc->flip_work->unref_work);
338 ipu_crtc->flip_state = IPU_FLIP_NONE;
339 }
340
341 return IRQ_HANDLED;
342}
343
344static bool ipu_crtc_mode_fixup(struct drm_crtc *crtc,
345 const struct drm_display_mode *mode,
346 struct drm_display_mode *adjusted_mode)
347{
348 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
349 struct videomode vm;
350 int ret;
351
352 drm_display_mode_to_videomode(adjusted_mode, &vm);
353
354 ret = ipu_di_adjust_videomode(ipu_crtc->di, &vm);
355 if (ret)
356 return false;
357
358 drm_display_mode_from_videomode(&vm, adjusted_mode);
359
360 return true;
361}
362
363static void ipu_crtc_prepare(struct drm_crtc *crtc)
364{
365 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
366
367 ipu_fb_disable(ipu_crtc);
368}
369
370static void ipu_crtc_commit(struct drm_crtc *crtc)
371{
372 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
373
374 ipu_fb_enable(ipu_crtc);
375} 238}
376 239
377static const struct drm_crtc_helper_funcs ipu_helper_funcs = { 240static const struct drm_crtc_helper_funcs ipu_helper_funcs = {
378 .dpms = ipu_crtc_dpms,
379 .mode_fixup = ipu_crtc_mode_fixup, 241 .mode_fixup = ipu_crtc_mode_fixup,
380 .mode_set = ipu_crtc_mode_set, 242 .mode_set_nofb = ipu_crtc_mode_set_nofb,
381 .prepare = ipu_crtc_prepare, 243 .atomic_check = ipu_crtc_atomic_check,
382 .commit = ipu_crtc_commit, 244 .atomic_begin = ipu_crtc_atomic_begin,
245 .disable = ipu_crtc_disable,
246 .enable = ipu_crtc_enable,
383}; 247};
384 248
385static int ipu_enable_vblank(struct drm_crtc *crtc) 249static int ipu_enable_vblank(struct drm_crtc *crtc)
@@ -398,23 +262,9 @@ static void ipu_disable_vblank(struct drm_crtc *crtc)
398 disable_irq_nosync(ipu_crtc->irq); 262 disable_irq_nosync(ipu_crtc->irq);
399} 263}
400 264
401static int ipu_set_interface_pix_fmt(struct drm_crtc *crtc,
402 u32 bus_format, int hsync_pin, int vsync_pin, u32 bus_flags)
403{
404 struct ipu_crtc *ipu_crtc = to_ipu_crtc(crtc);
405
406 ipu_crtc->bus_format = bus_format;
407 ipu_crtc->bus_flags = bus_flags;
408 ipu_crtc->di_hsync_pin = hsync_pin;
409 ipu_crtc->di_vsync_pin = vsync_pin;
410
411 return 0;
412}
413
414static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = { 265static const struct imx_drm_crtc_helper_funcs ipu_crtc_helper_funcs = {
415 .enable_vblank = ipu_enable_vblank, 266 .enable_vblank = ipu_enable_vblank,
416 .disable_vblank = ipu_disable_vblank, 267 .disable_vblank = ipu_disable_vblank,
417 .set_interface_pix_fmt = ipu_set_interface_pix_fmt,
418 .crtc_funcs = &ipu_crtc_funcs, 268 .crtc_funcs = &ipu_crtc_funcs,
419 .crtc_helper_funcs = &ipu_helper_funcs, 269 .crtc_helper_funcs = &ipu_helper_funcs,
420}; 270};
@@ -496,8 +346,16 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
496 IPU_DP_FLOW_SYNC_FG, 346 IPU_DP_FLOW_SYNC_FG,
497 drm_crtc_mask(&ipu_crtc->base), 347 drm_crtc_mask(&ipu_crtc->base),
498 DRM_PLANE_TYPE_OVERLAY); 348 DRM_PLANE_TYPE_OVERLAY);
499 if (IS_ERR(ipu_crtc->plane[1])) 349 if (IS_ERR(ipu_crtc->plane[1])) {
500 ipu_crtc->plane[1] = NULL; 350 ipu_crtc->plane[1] = NULL;
351 } else {
352 ret = ipu_plane_get_resources(ipu_crtc->plane[1]);
353 if (ret) {
354 dev_err(ipu_crtc->dev, "getting plane 1 "
355 "resources failed with %d.\n", ret);
356 goto err_put_plane0_res;
357 }
358 }
501 } 359 }
502 360
503 ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]); 361 ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
@@ -505,16 +363,17 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
505 "imx_drm", ipu_crtc); 363 "imx_drm", ipu_crtc);
506 if (ret < 0) { 364 if (ret < 0) {
507 dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret); 365 dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
508 goto err_put_plane_res; 366 goto err_put_plane1_res;
509 } 367 }
510 /* Only enable IRQ when we actually need it to trigger work. */ 368 /* Only enable IRQ when we actually need it to trigger work. */
511 disable_irq(ipu_crtc->irq); 369 disable_irq(ipu_crtc->irq);
512 370
513 ipu_crtc->flip_queue = create_singlethread_workqueue("ipu-crtc-flip");
514
515 return 0; 371 return 0;
516 372
517err_put_plane_res: 373err_put_plane1_res:
374 if (ipu_crtc->plane[1])
375 ipu_plane_put_resources(ipu_crtc->plane[1]);
376err_put_plane0_res:
518 ipu_plane_put_resources(ipu_crtc->plane[0]); 377 ipu_plane_put_resources(ipu_crtc->plane[0]);
519err_remove_crtc: 378err_remove_crtc:
520 imx_drm_remove_crtc(ipu_crtc->imx_crtc); 379 imx_drm_remove_crtc(ipu_crtc->imx_crtc);
@@ -553,9 +412,10 @@ static void ipu_drm_unbind(struct device *dev, struct device *master,
553 412
554 imx_drm_remove_crtc(ipu_crtc->imx_crtc); 413 imx_drm_remove_crtc(ipu_crtc->imx_crtc);
555 414
556 destroy_workqueue(ipu_crtc->flip_queue);
557 ipu_plane_put_resources(ipu_crtc->plane[0]);
558 ipu_put_resources(ipu_crtc); 415 ipu_put_resources(ipu_crtc);
416 if (ipu_crtc->plane[1])
417 ipu_plane_put_resources(ipu_crtc->plane[1]);
418 ipu_plane_put_resources(ipu_crtc->plane[0]);
559} 419}
560 420
561static const struct component_ops ipu_crtc_ops = { 421static const struct component_ops ipu_crtc_ops = {
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index a4bb44118d33..4ad67d015ec7 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -14,13 +14,19 @@
14 */ 14 */
15 15
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_atomic.h>
18#include <drm/drm_atomic_helper.h>
17#include <drm/drm_fb_cma_helper.h> 19#include <drm/drm_fb_cma_helper.h>
18#include <drm/drm_gem_cma_helper.h> 20#include <drm/drm_gem_cma_helper.h>
21#include <drm/drm_plane_helper.h>
19 22
20#include "video/imx-ipu-v3.h" 23#include "video/imx-ipu-v3.h"
21#include "ipuv3-plane.h" 24#include "ipuv3-plane.h"
22 25
23#define to_ipu_plane(x) container_of(x, struct ipu_plane, base) 26static inline struct ipu_plane *to_ipu_plane(struct drm_plane *p)
27{
28 return container_of(p, struct ipu_plane, base);
29}
24 30
25static const uint32_t ipu_plane_formats[] = { 31static const uint32_t ipu_plane_formats[] = {
26 DRM_FORMAT_ARGB1555, 32 DRM_FORMAT_ARGB1555,
@@ -53,62 +59,67 @@ int ipu_plane_irq(struct ipu_plane *ipu_plane)
53 IPU_IRQ_EOF); 59 IPU_IRQ_EOF);
54} 60}
55 61
56static int calc_vref(struct drm_display_mode *mode) 62static inline unsigned long
63drm_plane_state_to_eba(struct drm_plane_state *state)
57{ 64{
58 unsigned long htotal, vtotal; 65 struct drm_framebuffer *fb = state->fb;
66 struct drm_gem_cma_object *cma_obj;
59 67
60 htotal = mode->htotal; 68 cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
61 vtotal = mode->vtotal; 69 BUG_ON(!cma_obj);
62 70
63 if (!htotal || !vtotal) 71 return cma_obj->paddr + fb->offsets[0] +
64 return 60; 72 fb->pitches[0] * (state->src_y >> 16) +
65 73 (fb->bits_per_pixel >> 3) * (state->src_x >> 16);
66 return DIV_ROUND_UP(mode->clock * 1000, vtotal * htotal);
67} 74}
68 75
69static inline int calc_bandwidth(int width, int height, unsigned int vref) 76static inline unsigned long
77drm_plane_state_to_ubo(struct drm_plane_state *state)
70{ 78{
71 return width * height * vref; 79 struct drm_framebuffer *fb = state->fb;
72} 80 struct drm_gem_cma_object *cma_obj;
81 unsigned long eba = drm_plane_state_to_eba(state);
73 82
74int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb, 83 cma_obj = drm_fb_cma_get_gem_obj(fb, 1);
75 int x, int y) 84 BUG_ON(!cma_obj);
76{
77 struct drm_gem_cma_object *cma_obj[3];
78 unsigned long eba, ubo, vbo;
79 int active, i;
80 85
81 for (i = 0; i < drm_format_num_planes(fb->pixel_format); i++) { 86 return cma_obj->paddr + fb->offsets[1] +
82 cma_obj[i] = drm_fb_cma_get_gem_obj(fb, i); 87 fb->pitches[1] * (state->src_y >> 16) / 2 +
83 if (!cma_obj[i]) { 88 (state->src_x >> 16) / 2 - eba;
84 DRM_DEBUG_KMS("plane %d entry is null.\n", i); 89}
85 return -EFAULT;
86 }
87 }
88 90
89 eba = cma_obj[0]->paddr + fb->offsets[0] + 91static inline unsigned long
90 fb->pitches[0] * y + (fb->bits_per_pixel >> 3) * x; 92drm_plane_state_to_vbo(struct drm_plane_state *state)
93{
94 struct drm_framebuffer *fb = state->fb;
95 struct drm_gem_cma_object *cma_obj;
96 unsigned long eba = drm_plane_state_to_eba(state);
91 97
92 if (eba & 0x7) { 98 cma_obj = drm_fb_cma_get_gem_obj(fb, 2);
93 DRM_DEBUG_KMS("base address must be a multiple of 8.\n"); 99 BUG_ON(!cma_obj);
94 return -EINVAL;
95 }
96 100
97 if (fb->pitches[0] < 1 || fb->pitches[0] > 16384) { 101 return cma_obj->paddr + fb->offsets[2] +
98 DRM_DEBUG_KMS("pitches out of range.\n"); 102 fb->pitches[2] * (state->src_y >> 16) / 2 +
99 return -EINVAL; 103 (state->src_x >> 16) / 2 - eba;
100 } 104}
101 105
102 if (ipu_plane->enabled && fb->pitches[0] != ipu_plane->stride[0]) { 106static void ipu_plane_atomic_set_base(struct ipu_plane *ipu_plane,
103 DRM_DEBUG_KMS("pitches must not change while plane is enabled.\n"); 107 struct drm_plane_state *old_state)
104 return -EINVAL; 108{
105 } 109 struct drm_plane *plane = &ipu_plane->base;
110 struct drm_plane_state *state = plane->state;
111 struct drm_framebuffer *fb = state->fb;
112 unsigned long eba, ubo, vbo;
113 int active;
106 114
107 ipu_plane->stride[0] = fb->pitches[0]; 115 eba = drm_plane_state_to_eba(state);
108 116
109 switch (fb->pixel_format) { 117 switch (fb->pixel_format) {
110 case DRM_FORMAT_YUV420: 118 case DRM_FORMAT_YUV420:
111 case DRM_FORMAT_YVU420: 119 case DRM_FORMAT_YVU420:
120 if (old_state->fb)
121 break;
122
112 /* 123 /*
113 * Multiplanar formats have to meet the following restrictions: 124 * Multiplanar formats have to meet the following restrictions:
114 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO 125 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
@@ -117,59 +128,28 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
117 * - Only EBA may be changed while scanout is active 128 * - Only EBA may be changed while scanout is active
118 * - The strides of U and V planes must be identical. 129 * - The strides of U and V planes must be identical.
119 */ 130 */
120 ubo = cma_obj[1]->paddr + fb->offsets[1] + 131 ubo = drm_plane_state_to_ubo(state);
121 fb->pitches[1] * y / 2 + x / 2 - eba; 132 vbo = drm_plane_state_to_vbo(state);
122 vbo = cma_obj[2]->paddr + fb->offsets[2] +
123 fb->pitches[2] * y / 2 + x / 2 - eba;
124 133
125 if ((ubo & 0x7) || (vbo & 0x7)) { 134 if (fb->pixel_format == DRM_FORMAT_YUV420)
126 DRM_DEBUG_KMS("U/V buffer offsets must be a multiple of 8.\n"); 135 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
127 return -EINVAL; 136 fb->pitches[1], ubo, vbo);
128 } 137 else
129 138 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
130 if ((ubo > 0xfffff8) || (vbo > 0xfffff8)) { 139 fb->pitches[1], vbo, ubo);
131 DRM_DEBUG_KMS("U/V buffer offsets must be positive and not larger than 0xfffff8.\n");
132 return -EINVAL;
133 }
134
135 if (ipu_plane->enabled && ((ipu_plane->u_offset != ubo) ||
136 (ipu_plane->v_offset != vbo))) {
137 DRM_DEBUG_KMS("U/V buffer offsets must not change while plane is enabled.\n");
138 return -EINVAL;
139 }
140
141 if (fb->pitches[1] != fb->pitches[2]) {
142 DRM_DEBUG_KMS("U/V pitches must be identical.\n");
143 return -EINVAL;
144 }
145
146 if (fb->pitches[1] < 1 || fb->pitches[1] > 16384) {
147 DRM_DEBUG_KMS("U/V pitches out of range.\n");
148 return -EINVAL;
149 }
150
151 if (ipu_plane->enabled &&
152 (ipu_plane->stride[1] != fb->pitches[1])) {
153 DRM_DEBUG_KMS("U/V pitches must not change while plane is enabled.\n");
154 return -EINVAL;
155 }
156
157 ipu_plane->u_offset = ubo;
158 ipu_plane->v_offset = vbo;
159 ipu_plane->stride[1] = fb->pitches[1];
160 140
161 dev_dbg(ipu_plane->base.dev->dev, 141 dev_dbg(ipu_plane->base.dev->dev,
162 "phys = %pad %pad %pad, x = %d, y = %d", 142 "phy = %lu %lu %lu, x = %d, y = %d", eba, ubo, vbo,
163 &cma_obj[0]->paddr, &cma_obj[1]->paddr, 143 state->src_x >> 16, state->src_y >> 16);
164 &cma_obj[2]->paddr, x, y);
165 break; 144 break;
166 default: 145 default:
167 dev_dbg(ipu_plane->base.dev->dev, "phys = %pad, x = %d, y = %d", 146 dev_dbg(ipu_plane->base.dev->dev, "phys = %lu, x = %d, y = %d",
168 &cma_obj[0]->paddr, x, y); 147 eba, state->src_x >> 16, state->src_y >> 16);
148
169 break; 149 break;
170 } 150 }
171 151
172 if (ipu_plane->enabled) { 152 if (old_state->fb) {
173 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); 153 active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
174 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); 154 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
175 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); 155 ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
@@ -177,155 +157,6 @@ int ipu_plane_set_base(struct ipu_plane *ipu_plane, struct drm_framebuffer *fb,
177 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba); 157 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 0, eba);
178 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); 158 ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
179 } 159 }
180
181 /* cache offsets for subsequent pageflips */
182 ipu_plane->x = x;
183 ipu_plane->y = y;
184
185 return 0;
186}
187
188int ipu_plane_mode_set(struct ipu_plane *ipu_plane, struct drm_crtc *crtc,
189 struct drm_display_mode *mode,
190 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
191 unsigned int crtc_w, unsigned int crtc_h,
192 uint32_t src_x, uint32_t src_y,
193 uint32_t src_w, uint32_t src_h, bool interlaced)
194{
195 struct device *dev = ipu_plane->base.dev->dev;
196 int ret;
197
198 /* no scaling */
199 if (src_w != crtc_w || src_h != crtc_h)
200 return -EINVAL;
201
202 /* clip to crtc bounds */
203 if (crtc_x < 0) {
204 if (-crtc_x > crtc_w)
205 return -EINVAL;
206 src_x += -crtc_x;
207 src_w -= -crtc_x;
208 crtc_w -= -crtc_x;
209 crtc_x = 0;
210 }
211 if (crtc_y < 0) {
212 if (-crtc_y > crtc_h)
213 return -EINVAL;
214 src_y += -crtc_y;
215 src_h -= -crtc_y;
216 crtc_h -= -crtc_y;
217 crtc_y = 0;
218 }
219 if (crtc_x + crtc_w > mode->hdisplay) {
220 if (crtc_x > mode->hdisplay)
221 return -EINVAL;
222 crtc_w = mode->hdisplay - crtc_x;
223 src_w = crtc_w;
224 }
225 if (crtc_y + crtc_h > mode->vdisplay) {
226 if (crtc_y > mode->vdisplay)
227 return -EINVAL;
228 crtc_h = mode->vdisplay - crtc_y;
229 src_h = crtc_h;
230 }
231 /* full plane minimum width is 13 pixels */
232 if (crtc_w < 13 && (ipu_plane->dp_flow != IPU_DP_FLOW_SYNC_FG))
233 return -EINVAL;
234 if (crtc_h < 2)
235 return -EINVAL;
236
237 /*
238 * since we cannot touch active IDMAC channels, we do not support
239 * resizing the enabled plane or changing its format
240 */
241 if (ipu_plane->enabled) {
242 if (src_w != ipu_plane->w || src_h != ipu_plane->h ||
243 fb->pixel_format != ipu_plane->base.fb->pixel_format)
244 return -EINVAL;
245
246 return ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
247 }
248
249 switch (ipu_plane->dp_flow) {
250 case IPU_DP_FLOW_SYNC_BG:
251 ret = ipu_dp_setup_channel(ipu_plane->dp,
252 IPUV3_COLORSPACE_RGB,
253 IPUV3_COLORSPACE_RGB);
254 if (ret) {
255 dev_err(dev,
256 "initializing display processor failed with %d\n",
257 ret);
258 return ret;
259 }
260 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
261 break;
262 case IPU_DP_FLOW_SYNC_FG:
263 ipu_dp_setup_channel(ipu_plane->dp,
264 ipu_drm_fourcc_to_colorspace(fb->pixel_format),
265 IPUV3_COLORSPACE_UNKNOWN);
266 ipu_dp_set_window_pos(ipu_plane->dp, crtc_x, crtc_y);
267 /* Enable local alpha on partial plane */
268 switch (fb->pixel_format) {
269 case DRM_FORMAT_ARGB1555:
270 case DRM_FORMAT_ABGR1555:
271 case DRM_FORMAT_RGBA5551:
272 case DRM_FORMAT_BGRA5551:
273 case DRM_FORMAT_ARGB4444:
274 case DRM_FORMAT_ARGB8888:
275 case DRM_FORMAT_ABGR8888:
276 case DRM_FORMAT_RGBA8888:
277 case DRM_FORMAT_BGRA8888:
278 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
279 break;
280 default:
281 break;
282 }
283 }
284
285 ret = ipu_dmfc_alloc_bandwidth(ipu_plane->dmfc,
286 calc_bandwidth(crtc_w, crtc_h,
287 calc_vref(mode)), 64);
288 if (ret) {
289 dev_err(dev, "allocating dmfc bandwidth failed with %d\n", ret);
290 return ret;
291 }
292
293 ipu_dmfc_config_wait4eot(ipu_plane->dmfc, crtc_w);
294
295 ipu_cpmem_zero(ipu_plane->ipu_ch);
296 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, src_w, src_h);
297 ret = ipu_cpmem_set_fmt(ipu_plane->ipu_ch, fb->pixel_format);
298 if (ret < 0) {
299 dev_err(dev, "unsupported pixel format 0x%08x\n",
300 fb->pixel_format);
301 return ret;
302 }
303 ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
304 ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
305 ipu_cpmem_set_stride(ipu_plane->ipu_ch, fb->pitches[0]);
306
307 ret = ipu_plane_set_base(ipu_plane, fb, src_x, src_y);
308 if (ret < 0)
309 return ret;
310 if (interlaced)
311 ipu_cpmem_interlaced_scan(ipu_plane->ipu_ch, fb->pitches[0]);
312
313 if (fb->pixel_format == DRM_FORMAT_YUV420) {
314 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
315 ipu_plane->stride[1],
316 ipu_plane->u_offset,
317 ipu_plane->v_offset);
318 } else if (fb->pixel_format == DRM_FORMAT_YVU420) {
319 ipu_cpmem_set_yuv_planar_full(ipu_plane->ipu_ch,
320 ipu_plane->stride[1],
321 ipu_plane->v_offset,
322 ipu_plane->u_offset);
323 }
324
325 ipu_plane->w = src_w;
326 ipu_plane->h = src_h;
327
328 return 0;
329} 160}
330 161
331void ipu_plane_put_resources(struct ipu_plane *ipu_plane) 162void ipu_plane_put_resources(struct ipu_plane *ipu_plane)
@@ -372,7 +203,7 @@ err_out:
372 return ret; 203 return ret;
373} 204}
374 205
375void ipu_plane_enable(struct ipu_plane *ipu_plane) 206static void ipu_plane_enable(struct ipu_plane *ipu_plane)
376{ 207{
377 if (ipu_plane->dp) 208 if (ipu_plane->dp)
378 ipu_dp_enable(ipu_plane->ipu); 209 ipu_dp_enable(ipu_plane->ipu);
@@ -380,14 +211,10 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane)
380 ipu_idmac_enable_channel(ipu_plane->ipu_ch); 211 ipu_idmac_enable_channel(ipu_plane->ipu_ch);
381 if (ipu_plane->dp) 212 if (ipu_plane->dp)
382 ipu_dp_enable_channel(ipu_plane->dp); 213 ipu_dp_enable_channel(ipu_plane->dp);
383
384 ipu_plane->enabled = true;
385} 214}
386 215
387void ipu_plane_disable(struct ipu_plane *ipu_plane) 216static void ipu_plane_disable(struct ipu_plane *ipu_plane)
388{ 217{
389 ipu_plane->enabled = false;
390
391 ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50); 218 ipu_idmac_wait_busy(ipu_plane->ipu_ch, 50);
392 219
393 if (ipu_plane->dp) 220 if (ipu_plane->dp)
@@ -398,74 +225,225 @@ void ipu_plane_disable(struct ipu_plane *ipu_plane)
398 ipu_dp_disable(ipu_plane->ipu); 225 ipu_dp_disable(ipu_plane->ipu);
399} 226}
400 227
401/* 228static int ipu_disable_plane(struct drm_plane *plane)
402 * drm_plane API
403 */
404
405static int ipu_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
406 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
407 unsigned int crtc_w, unsigned int crtc_h,
408 uint32_t src_x, uint32_t src_y,
409 uint32_t src_w, uint32_t src_h)
410{ 229{
411 struct ipu_plane *ipu_plane = to_ipu_plane(plane); 230 struct ipu_plane *ipu_plane = to_ipu_plane(plane);
412 int ret = 0;
413
414 DRM_DEBUG_KMS("plane - %p\n", plane);
415
416 if (!ipu_plane->enabled)
417 ret = ipu_plane_get_resources(ipu_plane);
418 if (ret < 0)
419 return ret;
420
421 ret = ipu_plane_mode_set(ipu_plane, crtc, &crtc->hwmode, fb,
422 crtc_x, crtc_y, crtc_w, crtc_h,
423 src_x >> 16, src_y >> 16, src_w >> 16, src_h >> 16,
424 false);
425 if (ret < 0) {
426 ipu_plane_put_resources(ipu_plane);
427 return ret;
428 }
429 231
430 if (crtc != plane->crtc) 232 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
431 dev_dbg(plane->dev->dev, "crtc change: %p -> %p\n",
432 plane->crtc, crtc);
433 233
434 if (!ipu_plane->enabled) 234 ipu_plane_disable(ipu_plane);
435 ipu_plane_enable(ipu_plane);
436 235
437 return 0; 236 return 0;
438} 237}
439 238
440static int ipu_disable_plane(struct drm_plane *plane) 239static void ipu_plane_destroy(struct drm_plane *plane)
441{ 240{
442 struct ipu_plane *ipu_plane = to_ipu_plane(plane); 241 struct ipu_plane *ipu_plane = to_ipu_plane(plane);
443 242
444 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 243 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
445 244
446 if (ipu_plane->enabled) 245 ipu_disable_plane(plane);
447 ipu_plane_disable(ipu_plane); 246 drm_plane_cleanup(plane);
247 kfree(ipu_plane);
248}
448 249
449 ipu_plane_put_resources(ipu_plane); 250static const struct drm_plane_funcs ipu_plane_funcs = {
251 .update_plane = drm_atomic_helper_update_plane,
252 .disable_plane = drm_atomic_helper_disable_plane,
253 .destroy = ipu_plane_destroy,
254 .reset = drm_atomic_helper_plane_reset,
255 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
256 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
257};
258
259static int ipu_plane_atomic_check(struct drm_plane *plane,
260 struct drm_plane_state *state)
261{
262 struct drm_plane_state *old_state = plane->state;
263 struct drm_crtc_state *crtc_state;
264 struct device *dev = plane->dev->dev;
265 struct drm_framebuffer *fb = state->fb;
266 struct drm_framebuffer *old_fb = old_state->fb;
267 unsigned long eba, ubo, vbo, old_ubo, old_vbo;
268
269 /* Ok to disable */
270 if (!fb)
271 return 0;
272
273 if (!state->crtc)
274 return -EINVAL;
275
276 crtc_state =
277 drm_atomic_get_existing_crtc_state(state->state, state->crtc);
278 if (WARN_ON(!crtc_state))
279 return -EINVAL;
280
281 /* CRTC should be enabled */
282 if (!crtc_state->enable)
283 return -EINVAL;
284
285 /* no scaling */
286 if (state->src_w >> 16 != state->crtc_w ||
287 state->src_h >> 16 != state->crtc_h)
288 return -EINVAL;
289
290 switch (plane->type) {
291 case DRM_PLANE_TYPE_PRIMARY:
292 /* full plane doesn't support partial off screen */
293 if (state->crtc_x || state->crtc_y ||
294 state->crtc_w != crtc_state->adjusted_mode.hdisplay ||
295 state->crtc_h != crtc_state->adjusted_mode.vdisplay)
296 return -EINVAL;
297
298 /* full plane minimum width is 13 pixels */
299 if (state->crtc_w < 13)
300 return -EINVAL;
301 break;
302 case DRM_PLANE_TYPE_OVERLAY:
303 if (state->crtc_x < 0 || state->crtc_y < 0)
304 return -EINVAL;
305
306 if (state->crtc_x + state->crtc_w >
307 crtc_state->adjusted_mode.hdisplay)
308 return -EINVAL;
309 if (state->crtc_y + state->crtc_h >
310 crtc_state->adjusted_mode.vdisplay)
311 return -EINVAL;
312 break;
313 default:
314 dev_warn(dev, "Unsupported plane type\n");
315 return -EINVAL;
316 }
317
318 if (state->crtc_h < 2)
319 return -EINVAL;
320
321 /*
322 * since we cannot touch active IDMAC channels, we do not support
323 * resizing the enabled plane or changing its format
324 */
325 if (old_fb && (state->src_w != old_state->src_w ||
326 state->src_h != old_state->src_h ||
327 fb->pixel_format != old_fb->pixel_format))
328 return -EINVAL;
329
330 eba = drm_plane_state_to_eba(state);
331
332 if (eba & 0x7)
333 return -EINVAL;
334
335 if (fb->pitches[0] < 1 || fb->pitches[0] > 16384)
336 return -EINVAL;
337
338 if (old_fb && fb->pitches[0] != old_fb->pitches[0])
339 return -EINVAL;
340
341 switch (fb->pixel_format) {
342 case DRM_FORMAT_YUV420:
343 case DRM_FORMAT_YVU420:
344 /*
345 * Multiplanar formats have to meet the following restrictions:
346 * - The (up to) three plane addresses are EBA, EBA+UBO, EBA+VBO
347 * - EBA, UBO and VBO are a multiple of 8
348 * - UBO and VBO are unsigned and not larger than 0xfffff8
349 * - Only EBA may be changed while scanout is active
350 * - The strides of U and V planes must be identical.
351 */
352 ubo = drm_plane_state_to_ubo(state);
353 vbo = drm_plane_state_to_vbo(state);
354
355 if ((ubo & 0x7) || (vbo & 0x7))
356 return -EINVAL;
357
358 if ((ubo > 0xfffff8) || (vbo > 0xfffff8))
359 return -EINVAL;
360
361 if (old_fb) {
362 old_ubo = drm_plane_state_to_ubo(old_state);
363 old_vbo = drm_plane_state_to_vbo(old_state);
364 if (ubo != old_ubo || vbo != old_vbo)
365 return -EINVAL;
366 }
367
368 if (fb->pitches[1] != fb->pitches[2])
369 return -EINVAL;
370
371 if (fb->pitches[1] < 1 || fb->pitches[1] > 16384)
372 return -EINVAL;
373
374 if (old_fb && old_fb->pitches[1] != fb->pitches[1])
375 return -EINVAL;
376 }
450 377
451 return 0; 378 return 0;
452} 379}
453 380
454static void ipu_plane_destroy(struct drm_plane *plane) 381static void ipu_plane_atomic_disable(struct drm_plane *plane,
382 struct drm_plane_state *old_state)
383{
384 ipu_disable_plane(plane);
385}
386
387static void ipu_plane_atomic_update(struct drm_plane *plane,
388 struct drm_plane_state *old_state)
455{ 389{
456 struct ipu_plane *ipu_plane = to_ipu_plane(plane); 390 struct ipu_plane *ipu_plane = to_ipu_plane(plane);
391 struct drm_plane_state *state = plane->state;
392 enum ipu_color_space ics;
457 393
458 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 394 if (old_state->fb) {
395 ipu_plane_atomic_set_base(ipu_plane, old_state);
396 return;
397 }
459 398
460 ipu_disable_plane(plane); 399 switch (ipu_plane->dp_flow) {
461 drm_plane_cleanup(plane); 400 case IPU_DP_FLOW_SYNC_BG:
462 kfree(ipu_plane); 401 ipu_dp_setup_channel(ipu_plane->dp,
402 IPUV3_COLORSPACE_RGB,
403 IPUV3_COLORSPACE_RGB);
404 ipu_dp_set_global_alpha(ipu_plane->dp, true, 0, true);
405 break;
406 case IPU_DP_FLOW_SYNC_FG:
407 ics = ipu_drm_fourcc_to_colorspace(state->fb->pixel_format);
408 ipu_dp_setup_channel(ipu_plane->dp, ics,
409 IPUV3_COLORSPACE_UNKNOWN);
410 ipu_dp_set_window_pos(ipu_plane->dp, state->crtc_x,
411 state->crtc_y);
412 /* Enable local alpha on partial plane */
413 switch (state->fb->pixel_format) {
414 case DRM_FORMAT_ARGB1555:
415 case DRM_FORMAT_ABGR1555:
416 case DRM_FORMAT_RGBA5551:
417 case DRM_FORMAT_BGRA5551:
418 case DRM_FORMAT_ARGB4444:
419 case DRM_FORMAT_ARGB8888:
420 case DRM_FORMAT_ABGR8888:
421 case DRM_FORMAT_RGBA8888:
422 case DRM_FORMAT_BGRA8888:
423 ipu_dp_set_global_alpha(ipu_plane->dp, false, 0, false);
424 break;
425 default:
426 break;
427 }
428 }
429
430 ipu_dmfc_config_wait4eot(ipu_plane->dmfc, state->crtc_w);
431
432 ipu_cpmem_zero(ipu_plane->ipu_ch);
433 ipu_cpmem_set_resolution(ipu_plane->ipu_ch, state->src_w >> 16,
434 state->src_h >> 16);
435 ipu_cpmem_set_fmt(ipu_plane->ipu_ch, state->fb->pixel_format);
436 ipu_cpmem_set_high_priority(ipu_plane->ipu_ch);
437 ipu_idmac_set_double_buffer(ipu_plane->ipu_ch, 1);
438 ipu_cpmem_set_stride(ipu_plane->ipu_ch, state->fb->pitches[0]);
439 ipu_plane_atomic_set_base(ipu_plane, old_state);
440 ipu_plane_enable(ipu_plane);
463} 441}
464 442
465static const struct drm_plane_funcs ipu_plane_funcs = { 443static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
466 .update_plane = ipu_update_plane, 444 .atomic_check = ipu_plane_atomic_check,
467 .disable_plane = ipu_disable_plane, 445 .atomic_disable = ipu_plane_atomic_disable,
468 .destroy = ipu_plane_destroy, 446 .atomic_update = ipu_plane_atomic_update,
469}; 447};
470 448
471struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 449struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -498,5 +476,7 @@ struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
498 return ERR_PTR(ret); 476 return ERR_PTR(ret);
499 } 477 }
500 478
479 drm_plane_helper_add(&ipu_plane->base, &ipu_plane_helper_funcs);
480
501 return ipu_plane; 481 return ipu_plane;
502} 482}
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index 4448fd4ad4eb..338b88a74eb6 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
@@ -23,17 +23,6 @@ struct ipu_plane {
23 23
24 int dma; 24 int dma;
25 int dp_flow; 25 int dp_flow;
26
27 int x;
28 int y;
29 int w;
30 int h;
31
32 unsigned int u_offset;
33 unsigned int v_offset;
34 unsigned int stride[2];
35
36 bool enabled;
37}; 26};
38 27
39struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, 28struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
@@ -48,11 +37,6 @@ int ipu_plane_mode_set(struct ipu_plane *plane, struct drm_crtc *crtc,
48 uint32_t src_x, uint32_t src_y, uint32_t src_w, 37 uint32_t src_x, uint32_t src_y, uint32_t src_w,
49 uint32_t src_h, bool interlaced); 38 uint32_t src_h, bool interlaced);
50 39
51void ipu_plane_enable(struct ipu_plane *plane);
52void ipu_plane_disable(struct ipu_plane *plane);
53int ipu_plane_set_base(struct ipu_plane *plane, struct drm_framebuffer *fb,
54 int x, int y);
55
56int ipu_plane_get_resources(struct ipu_plane *plane); 40int ipu_plane_get_resources(struct ipu_plane *plane);
57void ipu_plane_put_resources(struct ipu_plane *plane); 41void ipu_plane_put_resources(struct ipu_plane *plane);
58 42
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 2d1fd02cd3d6..1dad297b01fd 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -16,6 +16,7 @@
16#include <linux/component.h> 16#include <linux/component.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <drm/drmP.h> 18#include <drm/drmP.h>
19#include <drm/drm_atomic_helper.h>
19#include <drm/drm_fb_helper.h> 20#include <drm/drm_fb_helper.h>
20#include <drm/drm_crtc_helper.h> 21#include <drm/drm_crtc_helper.h>
21#include <drm/drm_panel.h> 22#include <drm/drm_panel.h>
@@ -25,9 +26,6 @@
25 26
26#include "imx-drm.h" 27#include "imx-drm.h"
27 28
28#define con_to_imxpd(x) container_of(x, struct imx_parallel_display, connector)
29#define enc_to_imxpd(x) container_of(x, struct imx_parallel_display, encoder)
30
31struct imx_parallel_display { 29struct imx_parallel_display {
32 struct drm_connector connector; 30 struct drm_connector connector;
33 struct drm_encoder encoder; 31 struct drm_encoder encoder;
@@ -37,8 +35,19 @@ struct imx_parallel_display {
37 u32 bus_format; 35 u32 bus_format;
38 struct drm_display_mode mode; 36 struct drm_display_mode mode;
39 struct drm_panel *panel; 37 struct drm_panel *panel;
38 struct drm_bridge *bridge;
40}; 39};
41 40
41static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
42{
43 return container_of(c, struct imx_parallel_display, connector);
44}
45
46static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
47{
48 return container_of(e, struct imx_parallel_display, encoder);
49}
50
42static enum drm_connector_status imx_pd_connector_detect( 51static enum drm_connector_status imx_pd_connector_detect(
43 struct drm_connector *connector, bool force) 52 struct drm_connector *connector, bool force)
44{ 53{
@@ -53,11 +62,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
53 62
54 if (imxpd->panel && imxpd->panel->funcs && 63 if (imxpd->panel && imxpd->panel->funcs &&
55 imxpd->panel->funcs->get_modes) { 64 imxpd->panel->funcs->get_modes) {
56 struct drm_display_info *di = &connector->display_info;
57
58 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel); 65 num_modes = imxpd->panel->funcs->get_modes(imxpd->panel);
59 if (!imxpd->bus_format && di->num_bus_formats)
60 imxpd->bus_format = di->bus_formats[0];
61 if (num_modes > 0) 66 if (num_modes > 0)
62 return num_modes; 67 return num_modes;
63 } 68 }
@@ -69,10 +74,16 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
69 74
70 if (np) { 75 if (np) {
71 struct drm_display_mode *mode = drm_mode_create(connector->dev); 76 struct drm_display_mode *mode = drm_mode_create(connector->dev);
77 int ret;
72 78
73 if (!mode) 79 if (!mode)
74 return -EINVAL; 80 return -EINVAL;
75 of_get_drm_display_mode(np, &imxpd->mode, OF_USE_NATIVE_MODE); 81
82 ret = of_get_drm_display_mode(np, &imxpd->mode,
83 OF_USE_NATIVE_MODE);
84 if (ret)
85 return ret;
86
76 drm_mode_copy(mode, &imxpd->mode); 87 drm_mode_copy(mode, &imxpd->mode);
77 mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, 88 mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
78 drm_mode_probed_add(connector, mode); 89 drm_mode_probed_add(connector, mode);
@@ -90,24 +101,7 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
90 return &imxpd->encoder; 101 return &imxpd->encoder;
91} 102}
92 103
93static void imx_pd_encoder_dpms(struct drm_encoder *encoder, int mode) 104static void imx_pd_encoder_enable(struct drm_encoder *encoder)
94{
95 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
96
97 if (mode != DRM_MODE_DPMS_ON)
98 drm_panel_disable(imxpd->panel);
99 else
100 drm_panel_enable(imxpd->panel);
101}
102
103static void imx_pd_encoder_prepare(struct drm_encoder *encoder)
104{
105 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
106 imx_drm_set_bus_config(encoder, imxpd->bus_format, 2, 3,
107 imxpd->connector.display_info.bus_flags);
108}
109
110static void imx_pd_encoder_commit(struct drm_encoder *encoder)
111{ 105{
112 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); 106 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
113 107
@@ -115,12 +109,6 @@ static void imx_pd_encoder_commit(struct drm_encoder *encoder)
115 drm_panel_enable(imxpd->panel); 109 drm_panel_enable(imxpd->panel);
116} 110}
117 111
118static void imx_pd_encoder_mode_set(struct drm_encoder *encoder,
119 struct drm_display_mode *orig_mode,
120 struct drm_display_mode *mode)
121{
122}
123
124static void imx_pd_encoder_disable(struct drm_encoder *encoder) 112static void imx_pd_encoder_disable(struct drm_encoder *encoder)
125{ 113{
126 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder); 114 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
@@ -129,11 +117,33 @@ static void imx_pd_encoder_disable(struct drm_encoder *encoder)
129 drm_panel_unprepare(imxpd->panel); 117 drm_panel_unprepare(imxpd->panel);
130} 118}
131 119
120static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
121 struct drm_crtc_state *crtc_state,
122 struct drm_connector_state *conn_state)
123{
124 struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
125 struct drm_display_info *di = &conn_state->connector->display_info;
126 struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
127
128 imx_crtc_state->bus_flags = di->bus_flags;
129 if (!imxpd->bus_format && di->num_bus_formats)
130 imx_crtc_state->bus_format = di->bus_formats[0];
131 else
132 imx_crtc_state->bus_format = imxpd->bus_format;
133 imx_crtc_state->di_hsync_pin = 2;
134 imx_crtc_state->di_vsync_pin = 3;
135
136 return 0;
137}
138
132static const struct drm_connector_funcs imx_pd_connector_funcs = { 139static const struct drm_connector_funcs imx_pd_connector_funcs = {
133 .dpms = drm_helper_connector_dpms, 140 .dpms = drm_atomic_helper_connector_dpms,
134 .fill_modes = drm_helper_probe_single_connector_modes, 141 .fill_modes = drm_helper_probe_single_connector_modes,
135 .detect = imx_pd_connector_detect, 142 .detect = imx_pd_connector_detect,
136 .destroy = imx_drm_connector_destroy, 143 .destroy = imx_drm_connector_destroy,
144 .reset = drm_atomic_helper_connector_reset,
145 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
146 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
137}; 147};
138 148
139static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = { 149static const struct drm_connector_helper_funcs imx_pd_connector_helper_funcs = {
@@ -146,20 +156,18 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
146}; 156};
147 157
148static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = { 158static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
149 .dpms = imx_pd_encoder_dpms, 159 .enable = imx_pd_encoder_enable,
150 .prepare = imx_pd_encoder_prepare,
151 .commit = imx_pd_encoder_commit,
152 .mode_set = imx_pd_encoder_mode_set,
153 .disable = imx_pd_encoder_disable, 160 .disable = imx_pd_encoder_disable,
161 .atomic_check = imx_pd_encoder_atomic_check,
154}; 162};
155 163
156static int imx_pd_register(struct drm_device *drm, 164static int imx_pd_register(struct drm_device *drm,
157 struct imx_parallel_display *imxpd) 165 struct imx_parallel_display *imxpd)
158{ 166{
167 struct drm_encoder *encoder = &imxpd->encoder;
159 int ret; 168 int ret;
160 169
161 ret = imx_drm_encoder_parse_of(drm, &imxpd->encoder, 170 ret = imx_drm_encoder_parse_of(drm, encoder, imxpd->dev->of_node);
162 imxpd->dev->of_node);
163 if (ret) 171 if (ret)
164 return ret; 172 return ret;
165 173
@@ -170,19 +178,33 @@ static int imx_pd_register(struct drm_device *drm,
170 */ 178 */
171 imxpd->connector.dpms = DRM_MODE_DPMS_OFF; 179 imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
172 180
173 drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs); 181 drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
174 drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs, 182 drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
175 DRM_MODE_ENCODER_NONE, NULL); 183 DRM_MODE_ENCODER_NONE, NULL);
176 184
177 drm_connector_helper_add(&imxpd->connector, 185 if (!imxpd->bridge) {
178 &imx_pd_connector_helper_funcs); 186 drm_connector_helper_add(&imxpd->connector,
179 drm_connector_init(drm, &imxpd->connector, &imx_pd_connector_funcs, 187 &imx_pd_connector_helper_funcs);
180 DRM_MODE_CONNECTOR_VGA); 188 drm_connector_init(drm, &imxpd->connector,
189 &imx_pd_connector_funcs,
190 DRM_MODE_CONNECTOR_VGA);
191 }
181 192
182 if (imxpd->panel) 193 if (imxpd->panel)
183 drm_panel_attach(imxpd->panel, &imxpd->connector); 194 drm_panel_attach(imxpd->panel, &imxpd->connector);
184 195
185 drm_mode_connector_attach_encoder(&imxpd->connector, &imxpd->encoder); 196 if (imxpd->bridge) {
197 imxpd->bridge->encoder = encoder;
198 encoder->bridge = imxpd->bridge;
199 ret = drm_bridge_attach(drm, imxpd->bridge);
200 if (ret < 0) {
201 dev_err(imxpd->dev, "failed to attach bridge: %d\n",
202 ret);
203 return ret;
204 }
205 } else {
206 drm_mode_connector_attach_encoder(&imxpd->connector, encoder);
207 }
186 208
187 return 0; 209 return 0;
188} 210}
@@ -195,6 +217,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
195 const u8 *edidp; 217 const u8 *edidp;
196 struct imx_parallel_display *imxpd; 218 struct imx_parallel_display *imxpd;
197 int ret; 219 int ret;
220 u32 bus_format = 0;
198 const char *fmt; 221 const char *fmt;
199 222
200 imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL); 223 imxpd = devm_kzalloc(dev, sizeof(*imxpd), GFP_KERNEL);
@@ -208,14 +231,15 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
208 ret = of_property_read_string(np, "interface-pix-fmt", &fmt); 231 ret = of_property_read_string(np, "interface-pix-fmt", &fmt);
209 if (!ret) { 232 if (!ret) {
210 if (!strcmp(fmt, "rgb24")) 233 if (!strcmp(fmt, "rgb24"))
211 imxpd->bus_format = MEDIA_BUS_FMT_RGB888_1X24; 234 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
212 else if (!strcmp(fmt, "rgb565")) 235 else if (!strcmp(fmt, "rgb565"))
213 imxpd->bus_format = MEDIA_BUS_FMT_RGB565_1X16; 236 bus_format = MEDIA_BUS_FMT_RGB565_1X16;
214 else if (!strcmp(fmt, "bgr666")) 237 else if (!strcmp(fmt, "bgr666"))
215 imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X18; 238 bus_format = MEDIA_BUS_FMT_RGB666_1X18;
216 else if (!strcmp(fmt, "lvds666")) 239 else if (!strcmp(fmt, "lvds666"))
217 imxpd->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI; 240 bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
218 } 241 }
242 imxpd->bus_format = bus_format;
219 243
220 /* port@1 is the output port */ 244 /* port@1 is the output port */
221 ep = of_graph_get_endpoint_by_regs(np, 1, -1); 245 ep = of_graph_get_endpoint_by_regs(np, 1, -1);
@@ -223,13 +247,30 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
223 struct device_node *remote; 247 struct device_node *remote;
224 248
225 remote = of_graph_get_remote_port_parent(ep); 249 remote = of_graph_get_remote_port_parent(ep);
250 if (!remote) {
251 dev_warn(dev, "endpoint %s not connected\n",
252 ep->full_name);
253 of_node_put(ep);
254 return -ENODEV;
255 }
226 of_node_put(ep); 256 of_node_put(ep);
227 if (remote) { 257
228 imxpd->panel = of_drm_find_panel(remote); 258 imxpd->panel = of_drm_find_panel(remote);
229 of_node_put(remote); 259 if (imxpd->panel) {
260 dev_dbg(dev, "found panel %s\n", remote->full_name);
261 } else {
262 imxpd->bridge = of_drm_find_bridge(remote);
263 if (imxpd->bridge)
264 dev_dbg(dev, "found bridge %s\n",
265 remote->full_name);
230 } 266 }
231 if (!imxpd->panel) 267 if (!imxpd->panel && !imxpd->bridge) {
268 dev_dbg(dev, "waiting for panel or bridge %s\n",
269 remote->full_name);
270 of_node_put(remote);
232 return -EPROBE_DEFER; 271 return -EPROBE_DEFER;
272 }
273 of_node_put(remote);
233 } 274 }
234 275
235 imxpd->dev = dev; 276 imxpd->dev = dev;
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index eeefc971801a..23ac8041c562 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -6,7 +6,6 @@ config DRM_MEDIATEK
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_MIPI_DSI 7 select DRM_MIPI_DSI
8 select DRM_PANEL 8 select DRM_PANEL
9 select IOMMU_DMA
10 select MEMORY 9 select MEMORY
11 select MTK_SMI 10 select MTK_SMI
12 help 11 help
@@ -14,3 +13,11 @@ config DRM_MEDIATEK
14 The module will be called mediatek-drm 13 The module will be called mediatek-drm
15 This driver provides kernel mode setting and 14 This driver provides kernel mode setting and
16 buffer management to userspace. 15 buffer management to userspace.
16
17config DRM_MEDIATEK_HDMI
18 tristate "DRM HDMI Support for Mediatek SoCs"
19 depends on DRM_MEDIATEK
20 select SND_SOC_HDMI_CODEC if SND_SOC
21 select GENERIC_PHY
22 help
23 DRM/KMS HDMI driver for Mediatek SoCs
diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile
index 5fcf58e87786..bf2e5be1ab30 100644
--- a/drivers/gpu/drm/mediatek/Makefile
+++ b/drivers/gpu/drm/mediatek/Makefile
@@ -12,3 +12,10 @@ mediatek-drm-y := mtk_disp_ovl.o \
12 mtk_dpi.o 12 mtk_dpi.o
13 13
14obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o 14obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o
15
16mediatek-drm-hdmi-objs := mtk_cec.o \
17 mtk_hdmi.o \
18 mtk_hdmi_ddc.o \
19 mtk_mt8173_hdmi_phy.o
20
21obj-$(CONFIG_DRM_MEDIATEK_HDMI) += mediatek-drm-hdmi.o
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
new file mode 100644
index 000000000000..7a3eb8c17ef9
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -0,0 +1,265 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/clk.h>
15#include <linux/delay.h>
16#include <linux/io.h>
17#include <linux/interrupt.h>
18#include <linux/platform_device.h>
19
20#include "mtk_cec.h"
21
22#define TR_CONFIG 0x00
23#define CLEAR_CEC_IRQ BIT(15)
24
25#define CEC_CKGEN 0x04
26#define CEC_32K_PDN BIT(19)
27#define PDN BIT(16)
28
29#define RX_EVENT 0x54
30#define HDMI_PORD BIT(25)
31#define HDMI_HTPLG BIT(24)
32#define HDMI_PORD_INT_EN BIT(9)
33#define HDMI_HTPLG_INT_EN BIT(8)
34
35#define RX_GEN_WD 0x58
36#define HDMI_PORD_INT_32K_STATUS BIT(26)
37#define RX_RISC_INT_32K_STATUS BIT(25)
38#define HDMI_HTPLG_INT_32K_STATUS BIT(24)
39#define HDMI_PORD_INT_32K_CLR BIT(18)
40#define RX_INT_32K_CLR BIT(17)
41#define HDMI_HTPLG_INT_32K_CLR BIT(16)
42#define HDMI_PORD_INT_32K_STA_MASK BIT(10)
43#define RX_RISC_INT_32K_STA_MASK BIT(9)
44#define HDMI_HTPLG_INT_32K_STA_MASK BIT(8)
45#define HDMI_PORD_INT_32K_EN BIT(2)
46#define RX_INT_32K_EN BIT(1)
47#define HDMI_HTPLG_INT_32K_EN BIT(0)
48
49#define NORMAL_INT_CTRL 0x5C
50#define HDMI_HTPLG_INT_STA BIT(0)
51#define HDMI_PORD_INT_STA BIT(1)
52#define HDMI_HTPLG_INT_CLR BIT(16)
53#define HDMI_PORD_INT_CLR BIT(17)
54#define HDMI_FULL_INT_CLR BIT(20)
55
56struct mtk_cec {
57 void __iomem *regs;
58 struct clk *clk;
59 int irq;
60 bool hpd;
61 void (*hpd_event)(bool hpd, struct device *dev);
62 struct device *hdmi_dev;
63 spinlock_t lock;
64};
65
66static void mtk_cec_clear_bits(struct mtk_cec *cec, unsigned int offset,
67 unsigned int bits)
68{
69 void __iomem *reg = cec->regs + offset;
70 u32 tmp;
71
72 tmp = readl(reg);
73 tmp &= ~bits;
74 writel(tmp, reg);
75}
76
77static void mtk_cec_set_bits(struct mtk_cec *cec, unsigned int offset,
78 unsigned int bits)
79{
80 void __iomem *reg = cec->regs + offset;
81 u32 tmp;
82
83 tmp = readl(reg);
84 tmp |= bits;
85 writel(tmp, reg);
86}
87
88static void mtk_cec_mask(struct mtk_cec *cec, unsigned int offset,
89 unsigned int val, unsigned int mask)
90{
91 u32 tmp = readl(cec->regs + offset) & ~mask;
92
93 tmp |= val & mask;
94 writel(val, cec->regs + offset);
95}
96
97void mtk_cec_set_hpd_event(struct device *dev,
98 void (*hpd_event)(bool hpd, struct device *dev),
99 struct device *hdmi_dev)
100{
101 struct mtk_cec *cec = dev_get_drvdata(dev);
102 unsigned long flags;
103
104 spin_lock_irqsave(&cec->lock, flags);
105 cec->hdmi_dev = hdmi_dev;
106 cec->hpd_event = hpd_event;
107 spin_unlock_irqrestore(&cec->lock, flags);
108}
109
110bool mtk_cec_hpd_high(struct device *dev)
111{
112 struct mtk_cec *cec = dev_get_drvdata(dev);
113 unsigned int status;
114
115 status = readl(cec->regs + RX_EVENT);
116
117 return (status & (HDMI_PORD | HDMI_HTPLG)) == (HDMI_PORD | HDMI_HTPLG);
118}
119
120static void mtk_cec_htplg_irq_init(struct mtk_cec *cec)
121{
122 mtk_cec_mask(cec, CEC_CKGEN, 0 | CEC_32K_PDN, PDN | CEC_32K_PDN);
123 mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
124 RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
125 mtk_cec_mask(cec, RX_GEN_WD, 0, HDMI_PORD_INT_32K_CLR | RX_INT_32K_CLR |
126 HDMI_HTPLG_INT_32K_CLR | HDMI_PORD_INT_32K_EN |
127 RX_INT_32K_EN | HDMI_HTPLG_INT_32K_EN);
128}
129
130static void mtk_cec_htplg_irq_enable(struct mtk_cec *cec)
131{
132 mtk_cec_set_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
133}
134
135static void mtk_cec_htplg_irq_disable(struct mtk_cec *cec)
136{
137 mtk_cec_clear_bits(cec, RX_EVENT, HDMI_PORD_INT_EN | HDMI_HTPLG_INT_EN);
138}
139
140static void mtk_cec_clear_htplg_irq(struct mtk_cec *cec)
141{
142 mtk_cec_set_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
143 mtk_cec_set_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
144 HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
145 mtk_cec_set_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
146 RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
147 usleep_range(5, 10);
148 mtk_cec_clear_bits(cec, NORMAL_INT_CTRL, HDMI_HTPLG_INT_CLR |
149 HDMI_PORD_INT_CLR | HDMI_FULL_INT_CLR);
150 mtk_cec_clear_bits(cec, TR_CONFIG, CLEAR_CEC_IRQ);
151 mtk_cec_clear_bits(cec, RX_GEN_WD, HDMI_PORD_INT_32K_CLR |
152 RX_INT_32K_CLR | HDMI_HTPLG_INT_32K_CLR);
153}
154
155static void mtk_cec_hpd_event(struct mtk_cec *cec, bool hpd)
156{
157 void (*hpd_event)(bool hpd, struct device *dev);
158 struct device *hdmi_dev;
159 unsigned long flags;
160
161 spin_lock_irqsave(&cec->lock, flags);
162 hpd_event = cec->hpd_event;
163 hdmi_dev = cec->hdmi_dev;
164 spin_unlock_irqrestore(&cec->lock, flags);
165
166 if (hpd_event)
167 hpd_event(hpd, hdmi_dev);
168}
169
170static irqreturn_t mtk_cec_htplg_isr_thread(int irq, void *arg)
171{
172 struct device *dev = arg;
173 struct mtk_cec *cec = dev_get_drvdata(dev);
174 bool hpd;
175
176 mtk_cec_clear_htplg_irq(cec);
177 hpd = mtk_cec_hpd_high(dev);
178
179 if (cec->hpd != hpd) {
180 dev_dbg(dev, "hotplug event! cur hpd = %d, hpd = %d\n",
181 cec->hpd, hpd);
182 cec->hpd = hpd;
183 mtk_cec_hpd_event(cec, hpd);
184 }
185 return IRQ_HANDLED;
186}
187
188static int mtk_cec_probe(struct platform_device *pdev)
189{
190 struct device *dev = &pdev->dev;
191 struct mtk_cec *cec;
192 struct resource *res;
193 int ret;
194
195 cec = devm_kzalloc(dev, sizeof(*cec), GFP_KERNEL);
196 if (!cec)
197 return -ENOMEM;
198
199 platform_set_drvdata(pdev, cec);
200 spin_lock_init(&cec->lock);
201
202 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
203 cec->regs = devm_ioremap_resource(dev, res);
204 if (IS_ERR(cec->regs)) {
205 ret = PTR_ERR(cec->regs);
206 dev_err(dev, "Failed to ioremap cec: %d\n", ret);
207 return ret;
208 }
209
210 cec->clk = devm_clk_get(dev, NULL);
211 if (IS_ERR(cec->clk)) {
212 ret = PTR_ERR(cec->clk);
213 dev_err(dev, "Failed to get cec clock: %d\n", ret);
214 return ret;
215 }
216
217 cec->irq = platform_get_irq(pdev, 0);
218 if (cec->irq < 0) {
219 dev_err(dev, "Failed to get cec irq: %d\n", cec->irq);
220 return cec->irq;
221 }
222
223 ret = devm_request_threaded_irq(dev, cec->irq, NULL,
224 mtk_cec_htplg_isr_thread,
225 IRQF_SHARED | IRQF_TRIGGER_LOW |
226 IRQF_ONESHOT, "hdmi hpd", dev);
227 if (ret) {
228 dev_err(dev, "Failed to register cec irq: %d\n", ret);
229 return ret;
230 }
231
232 ret = clk_prepare_enable(cec->clk);
233 if (ret) {
234 dev_err(dev, "Failed to enable cec clock: %d\n", ret);
235 return ret;
236 }
237
238 mtk_cec_htplg_irq_init(cec);
239 mtk_cec_htplg_irq_enable(cec);
240
241 return 0;
242}
243
244static int mtk_cec_remove(struct platform_device *pdev)
245{
246 struct mtk_cec *cec = platform_get_drvdata(pdev);
247
248 mtk_cec_htplg_irq_disable(cec);
249 clk_disable_unprepare(cec->clk);
250 return 0;
251}
252
253static const struct of_device_id mtk_cec_of_ids[] = {
254 { .compatible = "mediatek,mt8173-cec", },
255 {}
256};
257
258struct platform_driver mtk_cec_driver = {
259 .probe = mtk_cec_probe,
260 .remove = mtk_cec_remove,
261 .driver = {
262 .name = "mediatek-cec",
263 .of_match_table = mtk_cec_of_ids,
264 },
265};
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.h b/drivers/gpu/drm/mediatek/mtk_cec.h
new file mode 100644
index 000000000000..10057b7eabec
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_cec.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MTK_CEC_H
15#define _MTK_CEC_H
16
17#include <linux/types.h>
18
19struct device;
20
21void mtk_cec_set_hpd_event(struct device *dev,
22 void (*hotplug_event)(bool hpd, struct device *dev),
23 struct device *hdmi_dev);
24bool mtk_cec_hpd_high(struct device *dev);
25
26#endif /* _MTK_CEC_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index b1223d54d0ab..eebb7d881c2b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -91,7 +91,7 @@ static int mtk_atomic_commit(struct drm_device *drm,
91 mutex_lock(&private->commit.lock); 91 mutex_lock(&private->commit.lock);
92 flush_work(&private->commit.work); 92 flush_work(&private->commit.work);
93 93
94 drm_atomic_helper_swap_state(drm, state); 94 drm_atomic_helper_swap_state(state, true);
95 95
96 if (async) 96 if (async)
97 mtk_atomic_schedule(private, state); 97 mtk_atomic_schedule(private, state);
@@ -243,7 +243,7 @@ static struct drm_driver mtk_drm_driver = {
243 .enable_vblank = mtk_drm_crtc_enable_vblank, 243 .enable_vblank = mtk_drm_crtc_enable_vblank,
244 .disable_vblank = mtk_drm_crtc_disable_vblank, 244 .disable_vblank = mtk_drm_crtc_disable_vblank,
245 245
246 .gem_free_object = mtk_drm_gem_free_object, 246 .gem_free_object_unlocked = mtk_drm_gem_free_object,
247 .gem_vm_ops = &drm_gem_cma_vm_ops, 247 .gem_vm_ops = &drm_gem_cma_vm_ops,
248 .dumb_create = mtk_drm_gem_dumb_create, 248 .dumb_create = mtk_drm_gem_dumb_create,
249 .dumb_map_offset = mtk_drm_gem_dumb_map_offset, 249 .dumb_map_offset = mtk_drm_gem_dumb_map_offset,
@@ -280,8 +280,6 @@ static int mtk_drm_bind(struct device *dev)
280 if (!drm) 280 if (!drm)
281 return -ENOMEM; 281 return -ENOMEM;
282 282
283 drm_dev_set_unique(drm, dev_name(dev));
284
285 drm->dev_private = private; 283 drm->dev_private = private;
286 private->drm = drm; 284 private->drm = drm;
287 285
@@ -293,14 +291,8 @@ static int mtk_drm_bind(struct device *dev)
293 if (ret < 0) 291 if (ret < 0)
294 goto err_deinit; 292 goto err_deinit;
295 293
296 ret = drm_connector_register_all(drm);
297 if (ret < 0)
298 goto err_unregister;
299
300 return 0; 294 return 0;
301 295
302err_unregister:
303 drm_dev_unregister(drm);
304err_deinit: 296err_deinit:
305 mtk_drm_kms_deinit(drm); 297 mtk_drm_kms_deinit(drm);
306err_free: 298err_free:
@@ -455,7 +447,6 @@ static int mtk_drm_remove(struct platform_device *pdev)
455 struct drm_device *drm = private->drm; 447 struct drm_device *drm = private->drm;
456 int i; 448 int i;
457 449
458 drm_connector_unregister_all(drm);
459 drm_dev_unregister(drm); 450 drm_dev_unregister(drm);
460 mtk_drm_kms_deinit(drm); 451 mtk_drm_kms_deinit(drm);
461 drm_dev_unref(drm); 452 drm_dev_unref(drm);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 51bc8988fc26..3995765a90dc 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -170,6 +170,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane,
170 170
171 return drm_plane_helper_check_update(plane, state->crtc, fb, 171 return drm_plane_helper_check_update(plane, state->crtc, fb,
172 &src, &dest, &clip, 172 &src, &dest, &clip,
173 state->rotation,
173 DRM_PLANE_HELPER_NO_SCALING, 174 DRM_PLANE_HELPER_NO_SCALING,
174 DRM_PLANE_HELPER_NO_SCALING, 175 DRM_PLANE_HELPER_NO_SCALING,
175 true, true, &visible); 176 true, true, &visible);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 769559124562..28b2044ed9f2 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -575,14 +575,6 @@ static int mtk_dsi_connector_get_modes(struct drm_connector *connector)
575 return drm_panel_get_modes(dsi->panel); 575 return drm_panel_get_modes(dsi->panel);
576} 576}
577 577
578static struct drm_encoder *mtk_dsi_connector_best_encoder(
579 struct drm_connector *connector)
580{
581 struct mtk_dsi *dsi = connector_to_dsi(connector);
582
583 return &dsi->encoder;
584}
585
586static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = { 578static const struct drm_encoder_helper_funcs mtk_dsi_encoder_helper_funcs = {
587 .mode_fixup = mtk_dsi_encoder_mode_fixup, 579 .mode_fixup = mtk_dsi_encoder_mode_fixup,
588 .mode_set = mtk_dsi_encoder_mode_set, 580 .mode_set = mtk_dsi_encoder_mode_set,
@@ -603,7 +595,6 @@ static const struct drm_connector_funcs mtk_dsi_connector_funcs = {
603static const struct drm_connector_helper_funcs 595static const struct drm_connector_helper_funcs
604 mtk_dsi_connector_helper_funcs = { 596 mtk_dsi_connector_helper_funcs = {
605 .get_modes = mtk_dsi_connector_get_modes, 597 .get_modes = mtk_dsi_connector_get_modes,
606 .best_encoder = mtk_dsi_connector_best_encoder,
607}; 598};
608 599
609static int mtk_drm_attach_bridge(struct drm_bridge *bridge, 600static int mtk_drm_attach_bridge(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
new file mode 100644
index 000000000000..334562d06731
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -0,0 +1,1828 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_edid.h>
19#include <linux/arm-smccc.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/hdmi.h>
23#include <linux/i2c.h>
24#include <linux/io.h>
25#include <linux/kernel.h>
26#include <linux/mfd/syscon.h>
27#include <linux/of_platform.h>
28#include <linux/of.h>
29#include <linux/of_gpio.h>
30#include <linux/of_graph.h>
31#include <linux/phy/phy.h>
32#include <linux/platform_device.h>
33#include <linux/regmap.h>
34#include <sound/hdmi-codec.h>
35#include "mtk_cec.h"
36#include "mtk_hdmi.h"
37#include "mtk_hdmi_regs.h"
38
39#define NCTS_BYTES 7
40
41enum mtk_hdmi_clk_id {
42 MTK_HDMI_CLK_HDMI_PIXEL,
43 MTK_HDMI_CLK_HDMI_PLL,
44 MTK_HDMI_CLK_AUD_BCLK,
45 MTK_HDMI_CLK_AUD_SPDIF,
46 MTK_HDMI_CLK_COUNT
47};
48
49enum hdmi_aud_input_type {
50 HDMI_AUD_INPUT_I2S = 0,
51 HDMI_AUD_INPUT_SPDIF,
52};
53
54enum hdmi_aud_i2s_fmt {
55 HDMI_I2S_MODE_RJT_24BIT = 0,
56 HDMI_I2S_MODE_RJT_16BIT,
57 HDMI_I2S_MODE_LJT_24BIT,
58 HDMI_I2S_MODE_LJT_16BIT,
59 HDMI_I2S_MODE_I2S_24BIT,
60 HDMI_I2S_MODE_I2S_16BIT
61};
62
63enum hdmi_aud_mclk {
64 HDMI_AUD_MCLK_128FS,
65 HDMI_AUD_MCLK_192FS,
66 HDMI_AUD_MCLK_256FS,
67 HDMI_AUD_MCLK_384FS,
68 HDMI_AUD_MCLK_512FS,
69 HDMI_AUD_MCLK_768FS,
70 HDMI_AUD_MCLK_1152FS,
71};
72
73enum hdmi_aud_channel_type {
74 HDMI_AUD_CHAN_TYPE_1_0 = 0,
75 HDMI_AUD_CHAN_TYPE_1_1,
76 HDMI_AUD_CHAN_TYPE_2_0,
77 HDMI_AUD_CHAN_TYPE_2_1,
78 HDMI_AUD_CHAN_TYPE_3_0,
79 HDMI_AUD_CHAN_TYPE_3_1,
80 HDMI_AUD_CHAN_TYPE_4_0,
81 HDMI_AUD_CHAN_TYPE_4_1,
82 HDMI_AUD_CHAN_TYPE_5_0,
83 HDMI_AUD_CHAN_TYPE_5_1,
84 HDMI_AUD_CHAN_TYPE_6_0,
85 HDMI_AUD_CHAN_TYPE_6_1,
86 HDMI_AUD_CHAN_TYPE_7_0,
87 HDMI_AUD_CHAN_TYPE_7_1,
88 HDMI_AUD_CHAN_TYPE_3_0_LRS,
89 HDMI_AUD_CHAN_TYPE_3_1_LRS,
90 HDMI_AUD_CHAN_TYPE_4_0_CLRS,
91 HDMI_AUD_CHAN_TYPE_4_1_CLRS,
92 HDMI_AUD_CHAN_TYPE_6_1_CS,
93 HDMI_AUD_CHAN_TYPE_6_1_CH,
94 HDMI_AUD_CHAN_TYPE_6_1_OH,
95 HDMI_AUD_CHAN_TYPE_6_1_CHR,
96 HDMI_AUD_CHAN_TYPE_7_1_LH_RH,
97 HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR,
98 HDMI_AUD_CHAN_TYPE_7_1_LC_RC,
99 HDMI_AUD_CHAN_TYPE_7_1_LW_RW,
100 HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD,
101 HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS,
102 HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS,
103 HDMI_AUD_CHAN_TYPE_7_1_CS_CH,
104 HDMI_AUD_CHAN_TYPE_7_1_CS_OH,
105 HDMI_AUD_CHAN_TYPE_7_1_CS_CHR,
106 HDMI_AUD_CHAN_TYPE_7_1_CH_OH,
107 HDMI_AUD_CHAN_TYPE_7_1_CH_CHR,
108 HDMI_AUD_CHAN_TYPE_7_1_OH_CHR,
109 HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR,
110 HDMI_AUD_CHAN_TYPE_6_0_CS,
111 HDMI_AUD_CHAN_TYPE_6_0_CH,
112 HDMI_AUD_CHAN_TYPE_6_0_OH,
113 HDMI_AUD_CHAN_TYPE_6_0_CHR,
114 HDMI_AUD_CHAN_TYPE_7_0_LH_RH,
115 HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR,
116 HDMI_AUD_CHAN_TYPE_7_0_LC_RC,
117 HDMI_AUD_CHAN_TYPE_7_0_LW_RW,
118 HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD,
119 HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS,
120 HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS,
121 HDMI_AUD_CHAN_TYPE_7_0_CS_CH,
122 HDMI_AUD_CHAN_TYPE_7_0_CS_OH,
123 HDMI_AUD_CHAN_TYPE_7_0_CS_CHR,
124 HDMI_AUD_CHAN_TYPE_7_0_CH_OH,
125 HDMI_AUD_CHAN_TYPE_7_0_CH_CHR,
126 HDMI_AUD_CHAN_TYPE_7_0_OH_CHR,
127 HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR,
128 HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS,
129 HDMI_AUD_CHAN_TYPE_UNKNOWN = 0xFF
130};
131
132enum hdmi_aud_channel_swap_type {
133 HDMI_AUD_SWAP_LR,
134 HDMI_AUD_SWAP_LFE_CC,
135 HDMI_AUD_SWAP_LSRS,
136 HDMI_AUD_SWAP_RLS_RRS,
137 HDMI_AUD_SWAP_LR_STATUS,
138};
139
140struct hdmi_audio_param {
141 enum hdmi_audio_coding_type aud_codec;
142 enum hdmi_audio_sample_size aud_sampe_size;
143 enum hdmi_aud_input_type aud_input_type;
144 enum hdmi_aud_i2s_fmt aud_i2s_fmt;
145 enum hdmi_aud_mclk aud_mclk;
146 enum hdmi_aud_channel_type aud_input_chan_type;
147 struct hdmi_codec_params codec_params;
148};
149
150struct mtk_hdmi {
151 struct drm_bridge bridge;
152 struct drm_connector conn;
153 struct device *dev;
154 struct phy *phy;
155 struct device *cec_dev;
156 struct i2c_adapter *ddc_adpt;
157 struct clk *clk[MTK_HDMI_CLK_COUNT];
158 struct drm_display_mode mode;
159 bool dvi_mode;
160 u32 min_clock;
161 u32 max_clock;
162 u32 max_hdisplay;
163 u32 max_vdisplay;
164 u32 ibias;
165 u32 ibias_up;
166 struct regmap *sys_regmap;
167 unsigned int sys_offset;
168 void __iomem *regs;
169 enum hdmi_colorspace csp;
170 struct hdmi_audio_param aud_param;
171 bool audio_enable;
172 bool powered;
173 bool enabled;
174};
175
176static inline struct mtk_hdmi *hdmi_ctx_from_bridge(struct drm_bridge *b)
177{
178 return container_of(b, struct mtk_hdmi, bridge);
179}
180
181static inline struct mtk_hdmi *hdmi_ctx_from_conn(struct drm_connector *c)
182{
183 return container_of(c, struct mtk_hdmi, conn);
184}
185
186static u32 mtk_hdmi_read(struct mtk_hdmi *hdmi, u32 offset)
187{
188 return readl(hdmi->regs + offset);
189}
190
191static void mtk_hdmi_write(struct mtk_hdmi *hdmi, u32 offset, u32 val)
192{
193 writel(val, hdmi->regs + offset);
194}
195
196static void mtk_hdmi_clear_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
197{
198 void __iomem *reg = hdmi->regs + offset;
199 u32 tmp;
200
201 tmp = readl(reg);
202 tmp &= ~bits;
203 writel(tmp, reg);
204}
205
206static void mtk_hdmi_set_bits(struct mtk_hdmi *hdmi, u32 offset, u32 bits)
207{
208 void __iomem *reg = hdmi->regs + offset;
209 u32 tmp;
210
211 tmp = readl(reg);
212 tmp |= bits;
213 writel(tmp, reg);
214}
215
216static void mtk_hdmi_mask(struct mtk_hdmi *hdmi, u32 offset, u32 val, u32 mask)
217{
218 void __iomem *reg = hdmi->regs + offset;
219 u32 tmp;
220
221 tmp = readl(reg);
222 tmp = (tmp & ~mask) | (val & mask);
223 writel(tmp, reg);
224}
225
226static void mtk_hdmi_hw_vid_black(struct mtk_hdmi *hdmi, bool black)
227{
228 mtk_hdmi_mask(hdmi, VIDEO_CFG_4, black ? GEN_RGB : NORMAL_PATH,
229 VIDEO_SOURCE_SEL);
230}
231
232static void mtk_hdmi_hw_make_reg_writable(struct mtk_hdmi *hdmi, bool enable)
233{
234 struct arm_smccc_res res;
235
236 /*
237 * MT8173 HDMI hardware has an output control bit to enable/disable HDMI
238 * output. This bit can only be controlled in ARM supervisor mode.
239 * The ARM trusted firmware provides an API for the HDMI driver to set
240 * this control bit to enable HDMI output in supervisor mode.
241 */
242 arm_smccc_smc(MTK_SIP_SET_AUTHORIZED_SECURE_REG, 0x14000904, 0x80000000,
243 0, 0, 0, 0, 0, &res);
244
245 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
246 HDMI_PCLK_FREE_RUN, enable ? HDMI_PCLK_FREE_RUN : 0);
247 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
248 HDMI_ON | ANLG_ON, enable ? (HDMI_ON | ANLG_ON) : 0);
249}
250
251static void mtk_hdmi_hw_1p4_version_enable(struct mtk_hdmi *hdmi, bool enable)
252{
253 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
254 HDMI2P0_EN, enable ? 0 : HDMI2P0_EN);
255}
256
257static void mtk_hdmi_hw_aud_mute(struct mtk_hdmi *hdmi)
258{
259 mtk_hdmi_set_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
260}
261
262static void mtk_hdmi_hw_aud_unmute(struct mtk_hdmi *hdmi)
263{
264 mtk_hdmi_clear_bits(hdmi, GRL_AUDIO_CFG, AUDIO_ZERO);
265}
266
267static void mtk_hdmi_hw_reset(struct mtk_hdmi *hdmi)
268{
269 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
270 HDMI_RST, HDMI_RST);
271 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
272 HDMI_RST, 0);
273 mtk_hdmi_clear_bits(hdmi, GRL_CFG3, CFG3_CONTROL_PACKET_DELAY);
274 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG1C,
275 ANLG_ON, ANLG_ON);
276}
277
278static void mtk_hdmi_hw_enable_notice(struct mtk_hdmi *hdmi, bool enable_notice)
279{
280 mtk_hdmi_mask(hdmi, GRL_CFG2, enable_notice ? CFG2_NOTICE_EN : 0,
281 CFG2_NOTICE_EN);
282}
283
284static void mtk_hdmi_hw_write_int_mask(struct mtk_hdmi *hdmi, u32 int_mask)
285{
286 mtk_hdmi_write(hdmi, GRL_INT_MASK, int_mask);
287}
288
289static void mtk_hdmi_hw_enable_dvi_mode(struct mtk_hdmi *hdmi, bool enable)
290{
291 mtk_hdmi_mask(hdmi, GRL_CFG1, enable ? CFG1_DVI : 0, CFG1_DVI);
292}
293
294static void mtk_hdmi_hw_send_info_frame(struct mtk_hdmi *hdmi, u8 *buffer,
295 u8 len)
296{
297 u32 ctrl_reg = GRL_CTRL;
298 int i;
299 u8 *frame_data;
300 enum hdmi_infoframe_type frame_type;
301 u8 frame_ver;
302 u8 frame_len;
303 u8 checksum;
304 int ctrl_frame_en = 0;
305
306 frame_type = *buffer;
307 buffer += 1;
308 frame_ver = *buffer;
309 buffer += 1;
310 frame_len = *buffer;
311 buffer += 1;
312 checksum = *buffer;
313 buffer += 1;
314 frame_data = buffer;
315
316 dev_dbg(hdmi->dev,
317 "frame_type:0x%x,frame_ver:0x%x,frame_len:0x%x,checksum:0x%x\n",
318 frame_type, frame_ver, frame_len, checksum);
319
320 switch (frame_type) {
321 case HDMI_INFOFRAME_TYPE_AVI:
322 ctrl_frame_en = CTRL_AVI_EN;
323 ctrl_reg = GRL_CTRL;
324 break;
325 case HDMI_INFOFRAME_TYPE_SPD:
326 ctrl_frame_en = CTRL_SPD_EN;
327 ctrl_reg = GRL_CTRL;
328 break;
329 case HDMI_INFOFRAME_TYPE_AUDIO:
330 ctrl_frame_en = CTRL_AUDIO_EN;
331 ctrl_reg = GRL_CTRL;
332 break;
333 case HDMI_INFOFRAME_TYPE_VENDOR:
334 ctrl_frame_en = VS_EN;
335 ctrl_reg = GRL_ACP_ISRC_CTRL;
336 break;
337 }
338 mtk_hdmi_clear_bits(hdmi, ctrl_reg, ctrl_frame_en);
339 mtk_hdmi_write(hdmi, GRL_INFOFRM_TYPE, frame_type);
340 mtk_hdmi_write(hdmi, GRL_INFOFRM_VER, frame_ver);
341 mtk_hdmi_write(hdmi, GRL_INFOFRM_LNG, frame_len);
342
343 mtk_hdmi_write(hdmi, GRL_IFM_PORT, checksum);
344 for (i = 0; i < frame_len; i++)
345 mtk_hdmi_write(hdmi, GRL_IFM_PORT, frame_data[i]);
346
347 mtk_hdmi_set_bits(hdmi, ctrl_reg, ctrl_frame_en);
348}
349
350static void mtk_hdmi_hw_send_aud_packet(struct mtk_hdmi *hdmi, bool enable)
351{
352 mtk_hdmi_mask(hdmi, GRL_SHIFT_R2, enable ? 0 : AUDIO_PACKET_OFF,
353 AUDIO_PACKET_OFF);
354}
355
356static void mtk_hdmi_hw_config_sys(struct mtk_hdmi *hdmi)
357{
358 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
359 HDMI_OUT_FIFO_EN | MHL_MODE_ON, 0);
360 usleep_range(2000, 4000);
361 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
362 HDMI_OUT_FIFO_EN | MHL_MODE_ON, HDMI_OUT_FIFO_EN);
363}
364
365static void mtk_hdmi_hw_set_deep_color_mode(struct mtk_hdmi *hdmi)
366{
367 regmap_update_bits(hdmi->sys_regmap, hdmi->sys_offset + HDMI_SYS_CFG20,
368 DEEP_COLOR_MODE_MASK | DEEP_COLOR_EN,
369 COLOR_8BIT_MODE);
370}
371
372static void mtk_hdmi_hw_send_av_mute(struct mtk_hdmi *hdmi)
373{
374 mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
375 usleep_range(2000, 4000);
376 mtk_hdmi_set_bits(hdmi, GRL_CFG4, CTRL_AVMUTE);
377}
378
379static void mtk_hdmi_hw_send_av_unmute(struct mtk_hdmi *hdmi)
380{
381 mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_EN,
382 CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
383 usleep_range(2000, 4000);
384 mtk_hdmi_mask(hdmi, GRL_CFG4, CFG4_AV_UNMUTE_SET,
385 CFG4_AV_UNMUTE_EN | CFG4_AV_UNMUTE_SET);
386}
387
388static void mtk_hdmi_hw_ncts_enable(struct mtk_hdmi *hdmi, bool on)
389{
390 mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, on ? 0 : CTS_CTRL_SOFT,
391 CTS_CTRL_SOFT);
392}
393
394static void mtk_hdmi_hw_ncts_auto_write_enable(struct mtk_hdmi *hdmi,
395 bool enable)
396{
397 mtk_hdmi_mask(hdmi, GRL_CTS_CTRL, enable ? NCTS_WRI_ANYTIME : 0,
398 NCTS_WRI_ANYTIME);
399}
400
401static void mtk_hdmi_hw_msic_setting(struct mtk_hdmi *hdmi,
402 struct drm_display_mode *mode)
403{
404 mtk_hdmi_clear_bits(hdmi, GRL_CFG4, CFG4_MHL_MODE);
405
406 if (mode->flags & DRM_MODE_FLAG_INTERLACE &&
407 mode->clock == 74250 &&
408 mode->vdisplay == 1080)
409 mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
410 else
411 mtk_hdmi_set_bits(hdmi, GRL_CFG2, CFG2_MHL_DE_SEL);
412}
413
414static void mtk_hdmi_hw_aud_set_channel_swap(struct mtk_hdmi *hdmi,
415 enum hdmi_aud_channel_swap_type swap)
416{
417 u8 swap_bit;
418
419 switch (swap) {
420 case HDMI_AUD_SWAP_LR:
421 swap_bit = LR_SWAP;
422 break;
423 case HDMI_AUD_SWAP_LFE_CC:
424 swap_bit = LFE_CC_SWAP;
425 break;
426 case HDMI_AUD_SWAP_LSRS:
427 swap_bit = LSRS_SWAP;
428 break;
429 case HDMI_AUD_SWAP_RLS_RRS:
430 swap_bit = RLS_RRS_SWAP;
431 break;
432 case HDMI_AUD_SWAP_LR_STATUS:
433 swap_bit = LR_STATUS_SWAP;
434 break;
435 default:
436 swap_bit = LFE_CC_SWAP;
437 break;
438 }
439 mtk_hdmi_mask(hdmi, GRL_CH_SWAP, swap_bit, 0xff);
440}
441
442static void mtk_hdmi_hw_aud_set_bit_num(struct mtk_hdmi *hdmi,
443 enum hdmi_audio_sample_size bit_num)
444{
445 u32 val;
446
447 switch (bit_num) {
448 case HDMI_AUDIO_SAMPLE_SIZE_16:
449 val = AOUT_16BIT;
450 break;
451 case HDMI_AUDIO_SAMPLE_SIZE_20:
452 val = AOUT_20BIT;
453 break;
454 case HDMI_AUDIO_SAMPLE_SIZE_24:
455 case HDMI_AUDIO_SAMPLE_SIZE_STREAM:
456 val = AOUT_24BIT;
457 break;
458 }
459
460 mtk_hdmi_mask(hdmi, GRL_AOUT_CFG, val, AOUT_BNUM_SEL_MASK);
461}
462
463static void mtk_hdmi_hw_aud_set_i2s_fmt(struct mtk_hdmi *hdmi,
464 enum hdmi_aud_i2s_fmt i2s_fmt)
465{
466 u32 val;
467
468 val = mtk_hdmi_read(hdmi, GRL_CFG0);
469 val &= ~(CFG0_W_LENGTH_MASK | CFG0_I2S_MODE_MASK);
470
471 switch (i2s_fmt) {
472 case HDMI_I2S_MODE_RJT_24BIT:
473 val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_24BIT;
474 break;
475 case HDMI_I2S_MODE_RJT_16BIT:
476 val |= CFG0_I2S_MODE_RTJ | CFG0_W_LENGTH_16BIT;
477 break;
478 case HDMI_I2S_MODE_LJT_24BIT:
479 default:
480 val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_24BIT;
481 break;
482 case HDMI_I2S_MODE_LJT_16BIT:
483 val |= CFG0_I2S_MODE_LTJ | CFG0_W_LENGTH_16BIT;
484 break;
485 case HDMI_I2S_MODE_I2S_24BIT:
486 val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_24BIT;
487 break;
488 case HDMI_I2S_MODE_I2S_16BIT:
489 val |= CFG0_I2S_MODE_I2S | CFG0_W_LENGTH_16BIT;
490 break;
491 }
492 mtk_hdmi_write(hdmi, GRL_CFG0, val);
493}
494
495static void mtk_hdmi_hw_audio_config(struct mtk_hdmi *hdmi, bool dst)
496{
497 const u8 mask = HIGH_BIT_RATE | DST_NORMAL_DOUBLE | SACD_DST | DSD_SEL;
498 u8 val;
499
500 /* Disable high bitrate, set DST packet normal/double */
501 mtk_hdmi_clear_bits(hdmi, GRL_AOUT_CFG, HIGH_BIT_RATE_PACKET_ALIGN);
502
503 if (dst)
504 val = DST_NORMAL_DOUBLE | SACD_DST;
505 else
506 val = 0;
507
508 mtk_hdmi_mask(hdmi, GRL_AUDIO_CFG, val, mask);
509}
510
511static void mtk_hdmi_hw_aud_set_i2s_chan_num(struct mtk_hdmi *hdmi,
512 enum hdmi_aud_channel_type channel_type,
513 u8 channel_count)
514{
515 unsigned int ch_switch;
516 u8 i2s_uv;
517
518 ch_switch = CH_SWITCH(7, 7) | CH_SWITCH(6, 6) |
519 CH_SWITCH(5, 5) | CH_SWITCH(4, 4) |
520 CH_SWITCH(3, 3) | CH_SWITCH(1, 2) |
521 CH_SWITCH(2, 1) | CH_SWITCH(0, 0);
522
523 if (channel_count == 2) {
524 i2s_uv = I2S_UV_CH_EN(0);
525 } else if (channel_count == 3 || channel_count == 4) {
526 if (channel_count == 4 &&
527 (channel_type == HDMI_AUD_CHAN_TYPE_3_0_LRS ||
528 channel_type == HDMI_AUD_CHAN_TYPE_4_0))
529 i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(0);
530 else
531 i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2);
532 } else if (channel_count == 6 || channel_count == 5) {
533 if (channel_count == 6 &&
534 channel_type != HDMI_AUD_CHAN_TYPE_5_1 &&
535 channel_type != HDMI_AUD_CHAN_TYPE_4_1_CLRS) {
536 i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
537 I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
538 } else {
539 i2s_uv = I2S_UV_CH_EN(2) | I2S_UV_CH_EN(1) |
540 I2S_UV_CH_EN(0);
541 }
542 } else if (channel_count == 8 || channel_count == 7) {
543 i2s_uv = I2S_UV_CH_EN(3) | I2S_UV_CH_EN(2) |
544 I2S_UV_CH_EN(1) | I2S_UV_CH_EN(0);
545 } else {
546 i2s_uv = I2S_UV_CH_EN(0);
547 }
548
549 mtk_hdmi_write(hdmi, GRL_CH_SW0, ch_switch & 0xff);
550 mtk_hdmi_write(hdmi, GRL_CH_SW1, (ch_switch >> 8) & 0xff);
551 mtk_hdmi_write(hdmi, GRL_CH_SW2, (ch_switch >> 16) & 0xff);
552 mtk_hdmi_write(hdmi, GRL_I2S_UV, i2s_uv);
553}
554
555static void mtk_hdmi_hw_aud_set_input_type(struct mtk_hdmi *hdmi,
556 enum hdmi_aud_input_type input_type)
557{
558 u32 val;
559
560 val = mtk_hdmi_read(hdmi, GRL_CFG1);
561 if (input_type == HDMI_AUD_INPUT_I2S &&
562 (val & CFG1_SPDIF) == CFG1_SPDIF) {
563 val &= ~CFG1_SPDIF;
564 } else if (input_type == HDMI_AUD_INPUT_SPDIF &&
565 (val & CFG1_SPDIF) == 0) {
566 val |= CFG1_SPDIF;
567 }
568 mtk_hdmi_write(hdmi, GRL_CFG1, val);
569}
570
571static void mtk_hdmi_hw_aud_set_channel_status(struct mtk_hdmi *hdmi,
572 u8 *channel_status)
573{
574 int i;
575
576 for (i = 0; i < 5; i++) {
577 mtk_hdmi_write(hdmi, GRL_I2S_C_STA0 + i * 4, channel_status[i]);
578 mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, channel_status[i]);
579 mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, channel_status[i]);
580 }
581 for (; i < 24; i++) {
582 mtk_hdmi_write(hdmi, GRL_L_STATUS_0 + i * 4, 0);
583 mtk_hdmi_write(hdmi, GRL_R_STATUS_0 + i * 4, 0);
584 }
585}
586
587static void mtk_hdmi_hw_aud_src_reenable(struct mtk_hdmi *hdmi)
588{
589 u32 val;
590
591 val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
592 if (val & MIX_CTRL_SRC_EN) {
593 val &= ~MIX_CTRL_SRC_EN;
594 mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
595 usleep_range(255, 512);
596 val |= MIX_CTRL_SRC_EN;
597 mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
598 }
599}
600
601static void mtk_hdmi_hw_aud_src_disable(struct mtk_hdmi *hdmi)
602{
603 u32 val;
604
605 val = mtk_hdmi_read(hdmi, GRL_MIX_CTRL);
606 val &= ~MIX_CTRL_SRC_EN;
607 mtk_hdmi_write(hdmi, GRL_MIX_CTRL, val);
608 mtk_hdmi_write(hdmi, GRL_SHIFT_L1, 0x00);
609}
610
611static void mtk_hdmi_hw_aud_set_mclk(struct mtk_hdmi *hdmi,
612 enum hdmi_aud_mclk mclk)
613{
614 u32 val;
615
616 val = mtk_hdmi_read(hdmi, GRL_CFG5);
617 val &= CFG5_CD_RATIO_MASK;
618
619 switch (mclk) {
620 case HDMI_AUD_MCLK_128FS:
621 val |= CFG5_FS128;
622 break;
623 case HDMI_AUD_MCLK_256FS:
624 val |= CFG5_FS256;
625 break;
626 case HDMI_AUD_MCLK_384FS:
627 val |= CFG5_FS384;
628 break;
629 case HDMI_AUD_MCLK_512FS:
630 val |= CFG5_FS512;
631 break;
632 case HDMI_AUD_MCLK_768FS:
633 val |= CFG5_FS768;
634 break;
635 default:
636 val |= CFG5_FS256;
637 break;
638 }
639 mtk_hdmi_write(hdmi, GRL_CFG5, val);
640}
641
642struct hdmi_acr_n {
643 unsigned int clock;
644 unsigned int n[3];
645};
646
647/* Recommended N values from HDMI specification, tables 7-1 to 7-3 */
648static const struct hdmi_acr_n hdmi_rec_n_table[] = {
649 /* Clock, N: 32kHz 44.1kHz 48kHz */
650 { 25175, { 4576, 7007, 6864 } },
651 { 74176, { 11648, 17836, 11648 } },
652 { 148352, { 11648, 8918, 5824 } },
653 { 296703, { 5824, 4459, 5824 } },
654 { 297000, { 3072, 4704, 5120 } },
655 { 0, { 4096, 6272, 6144 } }, /* all other TMDS clocks */
656};
657
658/**
659 * hdmi_recommended_n() - Return N value recommended by HDMI specification
660 * @freq: audio sample rate in Hz
661 * @clock: rounded TMDS clock in kHz
662 */
663static unsigned int hdmi_recommended_n(unsigned int freq, unsigned int clock)
664{
665 const struct hdmi_acr_n *recommended;
666 unsigned int i;
667
668 for (i = 0; i < ARRAY_SIZE(hdmi_rec_n_table) - 1; i++) {
669 if (clock == hdmi_rec_n_table[i].clock)
670 break;
671 }
672 recommended = hdmi_rec_n_table + i;
673
674 switch (freq) {
675 case 32000:
676 return recommended->n[0];
677 case 44100:
678 return recommended->n[1];
679 case 48000:
680 return recommended->n[2];
681 case 88200:
682 return recommended->n[1] * 2;
683 case 96000:
684 return recommended->n[2] * 2;
685 case 176400:
686 return recommended->n[1] * 4;
687 case 192000:
688 return recommended->n[2] * 4;
689 default:
690 return (128 * freq) / 1000;
691 }
692}
693
694static unsigned int hdmi_mode_clock_to_hz(unsigned int clock)
695{
696 switch (clock) {
697 case 25175:
698 return 25174825; /* 25.2/1.001 MHz */
699 case 74176:
700 return 74175824; /* 74.25/1.001 MHz */
701 case 148352:
702 return 148351648; /* 148.5/1.001 MHz */
703 case 296703:
704 return 296703297; /* 297/1.001 MHz */
705 default:
706 return clock * 1000;
707 }
708}
709
710static unsigned int hdmi_expected_cts(unsigned int audio_sample_rate,
711 unsigned int tmds_clock, unsigned int n)
712{
713 return DIV_ROUND_CLOSEST_ULL((u64)hdmi_mode_clock_to_hz(tmds_clock) * n,
714 128 * audio_sample_rate);
715}
716
717static void do_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi, unsigned int n,
718 unsigned int cts)
719{
720 unsigned char val[NCTS_BYTES];
721 int i;
722
723 mtk_hdmi_write(hdmi, GRL_NCTS, 0);
724 mtk_hdmi_write(hdmi, GRL_NCTS, 0);
725 mtk_hdmi_write(hdmi, GRL_NCTS, 0);
726 memset(val, 0, sizeof(val));
727
728 val[0] = (cts >> 24) & 0xff;
729 val[1] = (cts >> 16) & 0xff;
730 val[2] = (cts >> 8) & 0xff;
731 val[3] = cts & 0xff;
732
733 val[4] = (n >> 16) & 0xff;
734 val[5] = (n >> 8) & 0xff;
735 val[6] = n & 0xff;
736
737 for (i = 0; i < NCTS_BYTES; i++)
738 mtk_hdmi_write(hdmi, GRL_NCTS, val[i]);
739}
740
741static void mtk_hdmi_hw_aud_set_ncts(struct mtk_hdmi *hdmi,
742 unsigned int sample_rate,
743 unsigned int clock)
744{
745 unsigned int n, cts;
746
747 n = hdmi_recommended_n(sample_rate, clock);
748 cts = hdmi_expected_cts(sample_rate, clock, n);
749
750 dev_dbg(hdmi->dev, "%s: sample_rate=%u, clock=%d, cts=%u, n=%u\n",
751 __func__, sample_rate, clock, n, cts);
752
753 mtk_hdmi_mask(hdmi, DUMMY_304, AUDIO_I2S_NCTS_SEL_64,
754 AUDIO_I2S_NCTS_SEL);
755 do_hdmi_hw_aud_set_ncts(hdmi, n, cts);
756}
757
758static u8 mtk_hdmi_aud_get_chnl_count(enum hdmi_aud_channel_type channel_type)
759{
760 switch (channel_type) {
761 case HDMI_AUD_CHAN_TYPE_1_0:
762 case HDMI_AUD_CHAN_TYPE_1_1:
763 case HDMI_AUD_CHAN_TYPE_2_0:
764 return 2;
765 case HDMI_AUD_CHAN_TYPE_2_1:
766 case HDMI_AUD_CHAN_TYPE_3_0:
767 return 3;
768 case HDMI_AUD_CHAN_TYPE_3_1:
769 case HDMI_AUD_CHAN_TYPE_4_0:
770 case HDMI_AUD_CHAN_TYPE_3_0_LRS:
771 return 4;
772 case HDMI_AUD_CHAN_TYPE_4_1:
773 case HDMI_AUD_CHAN_TYPE_5_0:
774 case HDMI_AUD_CHAN_TYPE_3_1_LRS:
775 case HDMI_AUD_CHAN_TYPE_4_0_CLRS:
776 return 5;
777 case HDMI_AUD_CHAN_TYPE_5_1:
778 case HDMI_AUD_CHAN_TYPE_6_0:
779 case HDMI_AUD_CHAN_TYPE_4_1_CLRS:
780 case HDMI_AUD_CHAN_TYPE_6_0_CS:
781 case HDMI_AUD_CHAN_TYPE_6_0_CH:
782 case HDMI_AUD_CHAN_TYPE_6_0_OH:
783 case HDMI_AUD_CHAN_TYPE_6_0_CHR:
784 return 6;
785 case HDMI_AUD_CHAN_TYPE_6_1:
786 case HDMI_AUD_CHAN_TYPE_6_1_CS:
787 case HDMI_AUD_CHAN_TYPE_6_1_CH:
788 case HDMI_AUD_CHAN_TYPE_6_1_OH:
789 case HDMI_AUD_CHAN_TYPE_6_1_CHR:
790 case HDMI_AUD_CHAN_TYPE_7_0:
791 case HDMI_AUD_CHAN_TYPE_7_0_LH_RH:
792 case HDMI_AUD_CHAN_TYPE_7_0_LSR_RSR:
793 case HDMI_AUD_CHAN_TYPE_7_0_LC_RC:
794 case HDMI_AUD_CHAN_TYPE_7_0_LW_RW:
795 case HDMI_AUD_CHAN_TYPE_7_0_LSD_RSD:
796 case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS:
797 case HDMI_AUD_CHAN_TYPE_7_0_LHS_RHS:
798 case HDMI_AUD_CHAN_TYPE_7_0_CS_CH:
799 case HDMI_AUD_CHAN_TYPE_7_0_CS_OH:
800 case HDMI_AUD_CHAN_TYPE_7_0_CS_CHR:
801 case HDMI_AUD_CHAN_TYPE_7_0_CH_OH:
802 case HDMI_AUD_CHAN_TYPE_7_0_CH_CHR:
803 case HDMI_AUD_CHAN_TYPE_7_0_OH_CHR:
804 case HDMI_AUD_CHAN_TYPE_7_0_LSS_RSS_LSR_RSR:
805 case HDMI_AUD_CHAN_TYPE_8_0_LH_RH_CS:
806 return 7;
807 case HDMI_AUD_CHAN_TYPE_7_1:
808 case HDMI_AUD_CHAN_TYPE_7_1_LH_RH:
809 case HDMI_AUD_CHAN_TYPE_7_1_LSR_RSR:
810 case HDMI_AUD_CHAN_TYPE_7_1_LC_RC:
811 case HDMI_AUD_CHAN_TYPE_7_1_LW_RW:
812 case HDMI_AUD_CHAN_TYPE_7_1_LSD_RSD:
813 case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS:
814 case HDMI_AUD_CHAN_TYPE_7_1_LHS_RHS:
815 case HDMI_AUD_CHAN_TYPE_7_1_CS_CH:
816 case HDMI_AUD_CHAN_TYPE_7_1_CS_OH:
817 case HDMI_AUD_CHAN_TYPE_7_1_CS_CHR:
818 case HDMI_AUD_CHAN_TYPE_7_1_CH_OH:
819 case HDMI_AUD_CHAN_TYPE_7_1_CH_CHR:
820 case HDMI_AUD_CHAN_TYPE_7_1_OH_CHR:
821 case HDMI_AUD_CHAN_TYPE_7_1_LSS_RSS_LSR_RSR:
822 return 8;
823 default:
824 return 2;
825 }
826}
827
828static int mtk_hdmi_video_change_vpll(struct mtk_hdmi *hdmi, u32 clock)
829{
830 unsigned long rate;
831 int ret;
832
833 /* The DPI driver already should have set TVDPLL to the correct rate */
834 ret = clk_set_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL], clock);
835 if (ret) {
836 dev_err(hdmi->dev, "Failed to set PLL to %u Hz: %d\n", clock,
837 ret);
838 return ret;
839 }
840
841 rate = clk_get_rate(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
842
843 if (DIV_ROUND_CLOSEST(rate, 1000) != DIV_ROUND_CLOSEST(clock, 1000))
844 dev_warn(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock,
845 rate);
846 else
847 dev_dbg(hdmi->dev, "Want PLL %u Hz, got %lu Hz\n", clock, rate);
848
849 mtk_hdmi_hw_config_sys(hdmi);
850 mtk_hdmi_hw_set_deep_color_mode(hdmi);
851 return 0;
852}
853
854static void mtk_hdmi_video_set_display_mode(struct mtk_hdmi *hdmi,
855 struct drm_display_mode *mode)
856{
857 mtk_hdmi_hw_reset(hdmi);
858 mtk_hdmi_hw_enable_notice(hdmi, true);
859 mtk_hdmi_hw_write_int_mask(hdmi, 0xff);
860 mtk_hdmi_hw_enable_dvi_mode(hdmi, hdmi->dvi_mode);
861 mtk_hdmi_hw_ncts_auto_write_enable(hdmi, true);
862
863 mtk_hdmi_hw_msic_setting(hdmi, mode);
864}
865
866static int mtk_hdmi_aud_enable_packet(struct mtk_hdmi *hdmi, bool enable)
867{
868 mtk_hdmi_hw_send_aud_packet(hdmi, enable);
869 return 0;
870}
871
872static int mtk_hdmi_aud_on_off_hw_ncts(struct mtk_hdmi *hdmi, bool on)
873{
874 mtk_hdmi_hw_ncts_enable(hdmi, on);
875 return 0;
876}
877
878static int mtk_hdmi_aud_set_input(struct mtk_hdmi *hdmi)
879{
880 enum hdmi_aud_channel_type chan_type;
881 u8 chan_count;
882 bool dst;
883
884 mtk_hdmi_hw_aud_set_channel_swap(hdmi, HDMI_AUD_SWAP_LFE_CC);
885 mtk_hdmi_set_bits(hdmi, GRL_MIX_CTRL, MIX_CTRL_FLAT);
886
887 if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF &&
888 hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST) {
889 mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
890 } else if (hdmi->aud_param.aud_i2s_fmt == HDMI_I2S_MODE_LJT_24BIT) {
891 hdmi->aud_param.aud_i2s_fmt = HDMI_I2S_MODE_LJT_16BIT;
892 }
893
894 mtk_hdmi_hw_aud_set_i2s_fmt(hdmi, hdmi->aud_param.aud_i2s_fmt);
895 mtk_hdmi_hw_aud_set_bit_num(hdmi, HDMI_AUDIO_SAMPLE_SIZE_24);
896
897 dst = ((hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF) &&
898 (hdmi->aud_param.aud_codec == HDMI_AUDIO_CODING_TYPE_DST));
899 mtk_hdmi_hw_audio_config(hdmi, dst);
900
901 if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_SPDIF)
902 chan_type = HDMI_AUD_CHAN_TYPE_2_0;
903 else
904 chan_type = hdmi->aud_param.aud_input_chan_type;
905 chan_count = mtk_hdmi_aud_get_chnl_count(chan_type);
906 mtk_hdmi_hw_aud_set_i2s_chan_num(hdmi, chan_type, chan_count);
907 mtk_hdmi_hw_aud_set_input_type(hdmi, hdmi->aud_param.aud_input_type);
908
909 return 0;
910}
911
912static int mtk_hdmi_aud_set_src(struct mtk_hdmi *hdmi,
913 struct drm_display_mode *display_mode)
914{
915 unsigned int sample_rate = hdmi->aud_param.codec_params.sample_rate;
916
917 mtk_hdmi_aud_on_off_hw_ncts(hdmi, false);
918 mtk_hdmi_hw_aud_src_disable(hdmi);
919 mtk_hdmi_clear_bits(hdmi, GRL_CFG2, CFG2_ACLK_INV);
920
921 if (hdmi->aud_param.aud_input_type == HDMI_AUD_INPUT_I2S) {
922 switch (sample_rate) {
923 case 32000:
924 case 44100:
925 case 48000:
926 case 88200:
927 case 96000:
928 break;
929 default:
930 return -EINVAL;
931 }
932 mtk_hdmi_hw_aud_set_mclk(hdmi, hdmi->aud_param.aud_mclk);
933 } else {
934 switch (sample_rate) {
935 case 32000:
936 case 44100:
937 case 48000:
938 break;
939 default:
940 return -EINVAL;
941 }
942 mtk_hdmi_hw_aud_set_mclk(hdmi, HDMI_AUD_MCLK_128FS);
943 }
944
945 mtk_hdmi_hw_aud_set_ncts(hdmi, sample_rate, display_mode->clock);
946
947 mtk_hdmi_hw_aud_src_reenable(hdmi);
948 return 0;
949}
950
951static int mtk_hdmi_aud_output_config(struct mtk_hdmi *hdmi,
952 struct drm_display_mode *display_mode)
953{
954 mtk_hdmi_hw_aud_mute(hdmi);
955 mtk_hdmi_aud_enable_packet(hdmi, false);
956
957 mtk_hdmi_aud_set_input(hdmi);
958 mtk_hdmi_aud_set_src(hdmi, display_mode);
959 mtk_hdmi_hw_aud_set_channel_status(hdmi,
960 hdmi->aud_param.codec_params.iec.status);
961
962 usleep_range(50, 100);
963
964 mtk_hdmi_aud_on_off_hw_ncts(hdmi, true);
965 mtk_hdmi_aud_enable_packet(hdmi, true);
966 mtk_hdmi_hw_aud_unmute(hdmi);
967 return 0;
968}
969
970static int mtk_hdmi_setup_avi_infoframe(struct mtk_hdmi *hdmi,
971 struct drm_display_mode *mode)
972{
973 struct hdmi_avi_infoframe frame;
974 u8 buffer[17];
975 ssize_t err;
976
977 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
978 if (err < 0) {
979 dev_err(hdmi->dev,
980 "Failed to get AVI infoframe from mode: %zd\n", err);
981 return err;
982 }
983
984 err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer));
985 if (err < 0) {
986 dev_err(hdmi->dev, "Failed to pack AVI infoframe: %zd\n", err);
987 return err;
988 }
989
990 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
991 return 0;
992}
993
994static int mtk_hdmi_setup_spd_infoframe(struct mtk_hdmi *hdmi,
995 const char *vendor,
996 const char *product)
997{
998 struct hdmi_spd_infoframe frame;
999 u8 buffer[29];
1000 ssize_t err;
1001
1002 err = hdmi_spd_infoframe_init(&frame, vendor, product);
1003 if (err < 0) {
1004 dev_err(hdmi->dev, "Failed to initialize SPD infoframe: %zd\n",
1005 err);
1006 return err;
1007 }
1008
1009 err = hdmi_spd_infoframe_pack(&frame, buffer, sizeof(buffer));
1010 if (err < 0) {
1011 dev_err(hdmi->dev, "Failed to pack SDP infoframe: %zd\n", err);
1012 return err;
1013 }
1014
1015 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
1016 return 0;
1017}
1018
1019static int mtk_hdmi_setup_audio_infoframe(struct mtk_hdmi *hdmi)
1020{
1021 struct hdmi_audio_infoframe frame;
1022 u8 buffer[14];
1023 ssize_t err;
1024
1025 err = hdmi_audio_infoframe_init(&frame);
1026 if (err < 0) {
1027 dev_err(hdmi->dev, "Failed to setup audio infoframe: %zd\n",
1028 err);
1029 return err;
1030 }
1031
1032 frame.coding_type = HDMI_AUDIO_CODING_TYPE_STREAM;
1033 frame.sample_frequency = HDMI_AUDIO_SAMPLE_FREQUENCY_STREAM;
1034 frame.sample_size = HDMI_AUDIO_SAMPLE_SIZE_STREAM;
1035 frame.channels = mtk_hdmi_aud_get_chnl_count(
1036 hdmi->aud_param.aud_input_chan_type);
1037
1038 err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
1039 if (err < 0) {
1040 dev_err(hdmi->dev, "Failed to pack audio infoframe: %zd\n",
1041 err);
1042 return err;
1043 }
1044
1045 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
1046 return 0;
1047}
1048
1049static int mtk_hdmi_setup_vendor_specific_infoframe(struct mtk_hdmi *hdmi,
1050 struct drm_display_mode *mode)
1051{
1052 struct hdmi_vendor_infoframe frame;
1053 u8 buffer[10];
1054 ssize_t err;
1055
1056 err = drm_hdmi_vendor_infoframe_from_display_mode(&frame, mode);
1057 if (err) {
1058 dev_err(hdmi->dev,
1059 "Failed to get vendor infoframe from mode: %zd\n", err);
1060 return err;
1061 }
1062
1063 err = hdmi_vendor_infoframe_pack(&frame, buffer, sizeof(buffer));
1064 if (err) {
1065 dev_err(hdmi->dev, "Failed to pack vendor infoframe: %zd\n",
1066 err);
1067 return err;
1068 }
1069
1070 mtk_hdmi_hw_send_info_frame(hdmi, buffer, sizeof(buffer));
1071 return 0;
1072}
1073
1074static int mtk_hdmi_output_init(struct mtk_hdmi *hdmi)
1075{
1076 struct hdmi_audio_param *aud_param = &hdmi->aud_param;
1077
1078 hdmi->csp = HDMI_COLORSPACE_RGB;
1079 aud_param->aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
1080 aud_param->aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
1081 aud_param->aud_input_type = HDMI_AUD_INPUT_I2S;
1082 aud_param->aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
1083 aud_param->aud_mclk = HDMI_AUD_MCLK_128FS;
1084 aud_param->aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
1085
1086 return 0;
1087}
1088
1089void mtk_hdmi_audio_enable(struct mtk_hdmi *hdmi)
1090{
1091 mtk_hdmi_aud_enable_packet(hdmi, true);
1092 hdmi->audio_enable = true;
1093}
1094
1095void mtk_hdmi_audio_disable(struct mtk_hdmi *hdmi)
1096{
1097 mtk_hdmi_aud_enable_packet(hdmi, false);
1098 hdmi->audio_enable = false;
1099}
1100
1101int mtk_hdmi_audio_set_param(struct mtk_hdmi *hdmi,
1102 struct hdmi_audio_param *param)
1103{
1104 if (!hdmi->audio_enable) {
1105 dev_err(hdmi->dev, "hdmi audio is in disable state!\n");
1106 return -EINVAL;
1107 }
1108 dev_dbg(hdmi->dev, "codec:%d, input:%d, channel:%d, fs:%d\n",
1109 param->aud_codec, param->aud_input_type,
1110 param->aud_input_chan_type, param->codec_params.sample_rate);
1111 memcpy(&hdmi->aud_param, param, sizeof(*param));
1112 return mtk_hdmi_aud_output_config(hdmi, &hdmi->mode);
1113}
1114
1115static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi,
1116 struct drm_display_mode *mode)
1117{
1118 int ret;
1119
1120 mtk_hdmi_hw_vid_black(hdmi, true);
1121 mtk_hdmi_hw_aud_mute(hdmi);
1122 mtk_hdmi_hw_send_av_mute(hdmi);
1123 phy_power_off(hdmi->phy);
1124
1125 ret = mtk_hdmi_video_change_vpll(hdmi,
1126 mode->clock * 1000);
1127 if (ret) {
1128 dev_err(hdmi->dev, "Failed to set vpll: %d\n", ret);
1129 return ret;
1130 }
1131 mtk_hdmi_video_set_display_mode(hdmi, mode);
1132
1133 phy_power_on(hdmi->phy);
1134 mtk_hdmi_aud_output_config(hdmi, mode);
1135
1136 mtk_hdmi_setup_audio_infoframe(hdmi);
1137 mtk_hdmi_setup_avi_infoframe(hdmi, mode);
1138 mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI");
1139 if (mode->flags & DRM_MODE_FLAG_3D_MASK)
1140 mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode);
1141
1142 mtk_hdmi_hw_vid_black(hdmi, false);
1143 mtk_hdmi_hw_aud_unmute(hdmi);
1144 mtk_hdmi_hw_send_av_unmute(hdmi);
1145
1146 return 0;
1147}
1148
1149static const char * const mtk_hdmi_clk_names[MTK_HDMI_CLK_COUNT] = {
1150 [MTK_HDMI_CLK_HDMI_PIXEL] = "pixel",
1151 [MTK_HDMI_CLK_HDMI_PLL] = "pll",
1152 [MTK_HDMI_CLK_AUD_BCLK] = "bclk",
1153 [MTK_HDMI_CLK_AUD_SPDIF] = "spdif",
1154};
1155
1156static int mtk_hdmi_get_all_clk(struct mtk_hdmi *hdmi,
1157 struct device_node *np)
1158{
1159 int i;
1160
1161 for (i = 0; i < ARRAY_SIZE(mtk_hdmi_clk_names); i++) {
1162 hdmi->clk[i] = of_clk_get_by_name(np,
1163 mtk_hdmi_clk_names[i]);
1164 if (IS_ERR(hdmi->clk[i]))
1165 return PTR_ERR(hdmi->clk[i]);
1166 }
1167 return 0;
1168}
1169
1170static int mtk_hdmi_clk_enable_audio(struct mtk_hdmi *hdmi)
1171{
1172 int ret;
1173
1174 ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
1175 if (ret)
1176 return ret;
1177
1178 ret = clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
1179 if (ret)
1180 goto err;
1181
1182 return 0;
1183err:
1184 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
1185 return ret;
1186}
1187
1188static void mtk_hdmi_clk_disable_audio(struct mtk_hdmi *hdmi)
1189{
1190 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_BCLK]);
1191 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_AUD_SPDIF]);
1192}
1193
1194static enum drm_connector_status hdmi_conn_detect(struct drm_connector *conn,
1195 bool force)
1196{
1197 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1198
1199 return mtk_cec_hpd_high(hdmi->cec_dev) ?
1200 connector_status_connected : connector_status_disconnected;
1201}
1202
1203static void hdmi_conn_destroy(struct drm_connector *conn)
1204{
1205 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1206
1207 mtk_cec_set_hpd_event(hdmi->cec_dev, NULL, NULL);
1208
1209 drm_connector_cleanup(conn);
1210}
1211
1212static int mtk_hdmi_conn_get_modes(struct drm_connector *conn)
1213{
1214 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1215 struct edid *edid;
1216 int ret;
1217
1218 if (!hdmi->ddc_adpt)
1219 return -ENODEV;
1220
1221 edid = drm_get_edid(conn, hdmi->ddc_adpt);
1222 if (!edid)
1223 return -ENODEV;
1224
1225 hdmi->dvi_mode = !drm_detect_monitor_audio(edid);
1226
1227 drm_mode_connector_update_edid_property(conn, edid);
1228
1229 ret = drm_add_edid_modes(conn, edid);
1230 drm_edid_to_eld(conn, edid);
1231 kfree(edid);
1232 return ret;
1233}
1234
1235static int mtk_hdmi_conn_mode_valid(struct drm_connector *conn,
1236 struct drm_display_mode *mode)
1237{
1238 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1239
1240 dev_dbg(hdmi->dev, "xres=%d, yres=%d, refresh=%d, intl=%d clock=%d\n",
1241 mode->hdisplay, mode->vdisplay, mode->vrefresh,
1242 !!(mode->flags & DRM_MODE_FLAG_INTERLACE), mode->clock * 1000);
1243
1244 if (hdmi->bridge.next) {
1245 struct drm_display_mode adjusted_mode;
1246
1247 drm_mode_copy(&adjusted_mode, mode);
1248 if (!drm_bridge_mode_fixup(hdmi->bridge.next, mode,
1249 &adjusted_mode))
1250 return MODE_BAD;
1251 }
1252
1253 if (mode->clock < 27000)
1254 return MODE_CLOCK_LOW;
1255 if (mode->clock > 297000)
1256 return MODE_CLOCK_HIGH;
1257
1258 return drm_mode_validate_size(mode, 0x1fff, 0x1fff);
1259}
1260
1261static struct drm_encoder *mtk_hdmi_conn_best_enc(struct drm_connector *conn)
1262{
1263 struct mtk_hdmi *hdmi = hdmi_ctx_from_conn(conn);
1264
1265 return hdmi->bridge.encoder;
1266}
1267
1268static const struct drm_connector_funcs mtk_hdmi_connector_funcs = {
1269 .dpms = drm_atomic_helper_connector_dpms,
1270 .detect = hdmi_conn_detect,
1271 .fill_modes = drm_helper_probe_single_connector_modes,
1272 .destroy = hdmi_conn_destroy,
1273 .reset = drm_atomic_helper_connector_reset,
1274 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1275 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1276};
1277
1278static const struct drm_connector_helper_funcs
1279 mtk_hdmi_connector_helper_funcs = {
1280 .get_modes = mtk_hdmi_conn_get_modes,
1281 .mode_valid = mtk_hdmi_conn_mode_valid,
1282 .best_encoder = mtk_hdmi_conn_best_enc,
1283};
1284
1285static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
1286{
1287 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1288
1289 if (hdmi && hdmi->bridge.encoder && hdmi->bridge.encoder->dev)
1290 drm_helper_hpd_irq_event(hdmi->bridge.encoder->dev);
1291}
1292
1293/*
1294 * Bridge callbacks
1295 */
1296
1297static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
1298{
1299 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1300 int ret;
1301
1302 ret = drm_connector_init(bridge->encoder->dev, &hdmi->conn,
1303 &mtk_hdmi_connector_funcs,
1304 DRM_MODE_CONNECTOR_HDMIA);
1305 if (ret) {
1306 dev_err(hdmi->dev, "Failed to initialize connector: %d\n", ret);
1307 return ret;
1308 }
1309 drm_connector_helper_add(&hdmi->conn, &mtk_hdmi_connector_helper_funcs);
1310
1311 hdmi->conn.polled = DRM_CONNECTOR_POLL_HPD;
1312 hdmi->conn.interlace_allowed = true;
1313 hdmi->conn.doublescan_allowed = false;
1314
1315 ret = drm_mode_connector_attach_encoder(&hdmi->conn,
1316 bridge->encoder);
1317 if (ret) {
1318 dev_err(hdmi->dev,
1319 "Failed to attach connector to encoder: %d\n", ret);
1320 return ret;
1321 }
1322
1323 if (bridge->next) {
1324 bridge->next->encoder = bridge->encoder;
1325 ret = drm_bridge_attach(bridge->encoder->dev, bridge->next);
1326 if (ret) {
1327 dev_err(hdmi->dev,
1328 "Failed to attach external bridge: %d\n", ret);
1329 return ret;
1330 }
1331 }
1332
1333 mtk_cec_set_hpd_event(hdmi->cec_dev, mtk_hdmi_hpd_event, hdmi->dev);
1334
1335 return 0;
1336}
1337
1338static bool mtk_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
1339 const struct drm_display_mode *mode,
1340 struct drm_display_mode *adjusted_mode)
1341{
1342 return true;
1343}
1344
1345static void mtk_hdmi_bridge_disable(struct drm_bridge *bridge)
1346{
1347 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1348
1349 if (!hdmi->enabled)
1350 return;
1351
1352 phy_power_off(hdmi->phy);
1353 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
1354 clk_disable_unprepare(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
1355
1356 hdmi->enabled = false;
1357}
1358
1359static void mtk_hdmi_bridge_post_disable(struct drm_bridge *bridge)
1360{
1361 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1362
1363 if (!hdmi->powered)
1364 return;
1365
1366 mtk_hdmi_hw_1p4_version_enable(hdmi, true);
1367 mtk_hdmi_hw_make_reg_writable(hdmi, false);
1368
1369 hdmi->powered = false;
1370}
1371
1372static void mtk_hdmi_bridge_mode_set(struct drm_bridge *bridge,
1373 struct drm_display_mode *mode,
1374 struct drm_display_mode *adjusted_mode)
1375{
1376 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1377
1378 dev_dbg(hdmi->dev, "cur info: name:%s, hdisplay:%d\n",
1379 adjusted_mode->name, adjusted_mode->hdisplay);
1380 dev_dbg(hdmi->dev, "hsync_start:%d,hsync_end:%d, htotal:%d",
1381 adjusted_mode->hsync_start, adjusted_mode->hsync_end,
1382 adjusted_mode->htotal);
1383 dev_dbg(hdmi->dev, "hskew:%d, vdisplay:%d\n",
1384 adjusted_mode->hskew, adjusted_mode->vdisplay);
1385 dev_dbg(hdmi->dev, "vsync_start:%d, vsync_end:%d, vtotal:%d",
1386 adjusted_mode->vsync_start, adjusted_mode->vsync_end,
1387 adjusted_mode->vtotal);
1388 dev_dbg(hdmi->dev, "vscan:%d, flag:%d\n",
1389 adjusted_mode->vscan, adjusted_mode->flags);
1390
1391 drm_mode_copy(&hdmi->mode, adjusted_mode);
1392}
1393
1394static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
1395{
1396 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1397
1398 mtk_hdmi_hw_make_reg_writable(hdmi, true);
1399 mtk_hdmi_hw_1p4_version_enable(hdmi, true);
1400
1401 hdmi->powered = true;
1402}
1403
1404static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge)
1405{
1406 struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
1407
1408 mtk_hdmi_output_set_display_mode(hdmi, &hdmi->mode);
1409 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]);
1410 clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]);
1411 phy_power_on(hdmi->phy);
1412
1413 hdmi->enabled = true;
1414}
1415
1416static const struct drm_bridge_funcs mtk_hdmi_bridge_funcs = {
1417 .attach = mtk_hdmi_bridge_attach,
1418 .mode_fixup = mtk_hdmi_bridge_mode_fixup,
1419 .disable = mtk_hdmi_bridge_disable,
1420 .post_disable = mtk_hdmi_bridge_post_disable,
1421 .mode_set = mtk_hdmi_bridge_mode_set,
1422 .pre_enable = mtk_hdmi_bridge_pre_enable,
1423 .enable = mtk_hdmi_bridge_enable,
1424};
1425
1426static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi,
1427 struct platform_device *pdev)
1428{
1429 struct device *dev = &pdev->dev;
1430 struct device_node *np = dev->of_node;
1431 struct device_node *cec_np, *port, *ep, *remote, *i2c_np;
1432 struct platform_device *cec_pdev;
1433 struct regmap *regmap;
1434 struct resource *mem;
1435 int ret;
1436
1437 ret = mtk_hdmi_get_all_clk(hdmi, np);
1438 if (ret) {
1439 dev_err(dev, "Failed to get clocks: %d\n", ret);
1440 return ret;
1441 }
1442
1443 /* The CEC module handles HDMI hotplug detection */
1444 cec_np = of_find_compatible_node(np->parent, NULL,
1445 "mediatek,mt8173-cec");
1446 if (!cec_np) {
1447 dev_err(dev, "Failed to find CEC node\n");
1448 return -EINVAL;
1449 }
1450
1451 cec_pdev = of_find_device_by_node(cec_np);
1452 if (!cec_pdev) {
1453 dev_err(hdmi->dev, "Waiting for CEC device %s\n",
1454 cec_np->full_name);
1455 return -EPROBE_DEFER;
1456 }
1457 hdmi->cec_dev = &cec_pdev->dev;
1458
1459 /*
1460 * The mediatek,syscon-hdmi property contains a phandle link to the
1461 * MMSYS_CONFIG device and the register offset of the HDMI_SYS_CFG
1462 * registers it contains.
1463 */
1464 regmap = syscon_regmap_lookup_by_phandle(np, "mediatek,syscon-hdmi");
1465 ret = of_property_read_u32_index(np, "mediatek,syscon-hdmi", 1,
1466 &hdmi->sys_offset);
1467 if (IS_ERR(regmap))
1468 ret = PTR_ERR(regmap);
1469 if (ret) {
1470 ret = PTR_ERR(regmap);
1471 dev_err(dev,
1472 "Failed to get system configuration registers: %d\n",
1473 ret);
1474 return ret;
1475 }
1476 hdmi->sys_regmap = regmap;
1477
1478 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1479 hdmi->regs = devm_ioremap_resource(dev, mem);
1480 if (IS_ERR(hdmi->regs))
1481 return PTR_ERR(hdmi->regs);
1482
1483 port = of_graph_get_port_by_id(np, 1);
1484 if (!port) {
1485 dev_err(dev, "Missing output port node\n");
1486 return -EINVAL;
1487 }
1488
1489 ep = of_get_child_by_name(port, "endpoint");
1490 if (!ep) {
1491 dev_err(dev, "Missing endpoint node in port %s\n",
1492 port->full_name);
1493 of_node_put(port);
1494 return -EINVAL;
1495 }
1496 of_node_put(port);
1497
1498 remote = of_graph_get_remote_port_parent(ep);
1499 if (!remote) {
1500 dev_err(dev, "Missing connector/bridge node for endpoint %s\n",
1501 ep->full_name);
1502 of_node_put(ep);
1503 return -EINVAL;
1504 }
1505 of_node_put(ep);
1506
1507 if (!of_device_is_compatible(remote, "hdmi-connector")) {
1508 hdmi->bridge.next = of_drm_find_bridge(remote);
1509 if (!hdmi->bridge.next) {
1510 dev_err(dev, "Waiting for external bridge\n");
1511 of_node_put(remote);
1512 return -EPROBE_DEFER;
1513 }
1514 }
1515
1516 i2c_np = of_parse_phandle(remote, "ddc-i2c-bus", 0);
1517 if (!i2c_np) {
1518 dev_err(dev, "Failed to find ddc-i2c-bus node in %s\n",
1519 remote->full_name);
1520 of_node_put(remote);
1521 return -EINVAL;
1522 }
1523 of_node_put(remote);
1524
1525 hdmi->ddc_adpt = of_find_i2c_adapter_by_node(i2c_np);
1526 if (!hdmi->ddc_adpt) {
1527 dev_err(dev, "Failed to get ddc i2c adapter by node\n");
1528 return -EINVAL;
1529 }
1530
1531 return 0;
1532}
1533
1534/*
1535 * HDMI audio codec callbacks
1536 */
1537
1538static int mtk_hdmi_audio_hw_params(struct device *dev, void *data,
1539 struct hdmi_codec_daifmt *daifmt,
1540 struct hdmi_codec_params *params)
1541{
1542 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1543 struct hdmi_audio_param hdmi_params;
1544 unsigned int chan = params->cea.channels;
1545
1546 dev_dbg(hdmi->dev, "%s: %u Hz, %d bit, %d channels\n", __func__,
1547 params->sample_rate, params->sample_width, chan);
1548
1549 if (!hdmi->bridge.encoder)
1550 return -ENODEV;
1551
1552 switch (chan) {
1553 case 2:
1554 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_2_0;
1555 break;
1556 case 4:
1557 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_4_0;
1558 break;
1559 case 6:
1560 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_5_1;
1561 break;
1562 case 8:
1563 hdmi_params.aud_input_chan_type = HDMI_AUD_CHAN_TYPE_7_1;
1564 break;
1565 default:
1566 dev_err(hdmi->dev, "channel[%d] not supported!\n", chan);
1567 return -EINVAL;
1568 }
1569
1570 switch (params->sample_rate) {
1571 case 32000:
1572 case 44100:
1573 case 48000:
1574 case 88200:
1575 case 96000:
1576 case 176400:
1577 case 192000:
1578 break;
1579 default:
1580 dev_err(hdmi->dev, "rate[%d] not supported!\n",
1581 params->sample_rate);
1582 return -EINVAL;
1583 }
1584
1585 switch (daifmt->fmt) {
1586 case HDMI_I2S:
1587 hdmi_params.aud_codec = HDMI_AUDIO_CODING_TYPE_PCM;
1588 hdmi_params.aud_sampe_size = HDMI_AUDIO_SAMPLE_SIZE_16;
1589 hdmi_params.aud_input_type = HDMI_AUD_INPUT_I2S;
1590 hdmi_params.aud_i2s_fmt = HDMI_I2S_MODE_I2S_24BIT;
1591 hdmi_params.aud_mclk = HDMI_AUD_MCLK_128FS;
1592 break;
1593 default:
1594 dev_err(hdmi->dev, "%s: Invalid DAI format %d\n", __func__,
1595 daifmt->fmt);
1596 return -EINVAL;
1597 }
1598
1599 memcpy(&hdmi_params.codec_params, params,
1600 sizeof(hdmi_params.codec_params));
1601
1602 mtk_hdmi_audio_set_param(hdmi, &hdmi_params);
1603
1604 return 0;
1605}
1606
1607static int mtk_hdmi_audio_startup(struct device *dev, void *data)
1608{
1609 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1610
1611 dev_dbg(dev, "%s\n", __func__);
1612
1613 mtk_hdmi_audio_enable(hdmi);
1614
1615 return 0;
1616}
1617
1618static void mtk_hdmi_audio_shutdown(struct device *dev, void *data)
1619{
1620 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1621
1622 dev_dbg(dev, "%s\n", __func__);
1623
1624 mtk_hdmi_audio_disable(hdmi);
1625}
1626
1627int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
1628{
1629 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1630
1631 dev_dbg(dev, "%s(%d)\n", __func__, enable);
1632
1633 if (enable)
1634 mtk_hdmi_hw_aud_mute(hdmi);
1635 else
1636 mtk_hdmi_hw_aud_unmute(hdmi);
1637
1638 return 0;
1639}
1640
1641static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
1642{
1643 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1644
1645 dev_dbg(dev, "%s\n", __func__);
1646
1647 memcpy(buf, hdmi->conn.eld, min(sizeof(hdmi->conn.eld), len));
1648
1649 return 0;
1650}
1651
1652static const struct hdmi_codec_ops mtk_hdmi_audio_codec_ops = {
1653 .hw_params = mtk_hdmi_audio_hw_params,
1654 .audio_startup = mtk_hdmi_audio_startup,
1655 .audio_shutdown = mtk_hdmi_audio_shutdown,
1656 .digital_mute = mtk_hdmi_audio_digital_mute,
1657 .get_eld = mtk_hdmi_audio_get_eld,
1658};
1659
1660static void mtk_hdmi_register_audio_driver(struct device *dev)
1661{
1662 struct hdmi_codec_pdata codec_data = {
1663 .ops = &mtk_hdmi_audio_codec_ops,
1664 .max_i2s_channels = 2,
1665 .i2s = 1,
1666 };
1667 struct platform_device *pdev;
1668
1669 pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME,
1670 PLATFORM_DEVID_AUTO, &codec_data,
1671 sizeof(codec_data));
1672 if (IS_ERR(pdev))
1673 return;
1674
1675 DRM_INFO("%s driver bound to HDMI\n", HDMI_CODEC_DRV_NAME);
1676}
1677
1678static int mtk_drm_hdmi_probe(struct platform_device *pdev)
1679{
1680 struct mtk_hdmi *hdmi;
1681 struct device *dev = &pdev->dev;
1682 int ret;
1683
1684 hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
1685 if (!hdmi)
1686 return -ENOMEM;
1687
1688 hdmi->dev = dev;
1689
1690 ret = mtk_hdmi_dt_parse_pdata(hdmi, pdev);
1691 if (ret)
1692 return ret;
1693
1694 hdmi->phy = devm_phy_get(dev, "hdmi");
1695 if (IS_ERR(hdmi->phy)) {
1696 ret = PTR_ERR(hdmi->phy);
1697 dev_err(dev, "Failed to get HDMI PHY: %d\n", ret);
1698 return ret;
1699 }
1700
1701 platform_set_drvdata(pdev, hdmi);
1702
1703 ret = mtk_hdmi_output_init(hdmi);
1704 if (ret) {
1705 dev_err(dev, "Failed to initialize hdmi output\n");
1706 return ret;
1707 }
1708
1709 mtk_hdmi_register_audio_driver(dev);
1710
1711 hdmi->bridge.funcs = &mtk_hdmi_bridge_funcs;
1712 hdmi->bridge.of_node = pdev->dev.of_node;
1713 ret = drm_bridge_add(&hdmi->bridge);
1714 if (ret) {
1715 dev_err(dev, "failed to add bridge, ret = %d\n", ret);
1716 return ret;
1717 }
1718
1719 ret = mtk_hdmi_clk_enable_audio(hdmi);
1720 if (ret) {
1721 dev_err(dev, "Failed to enable audio clocks: %d\n", ret);
1722 goto err_bridge_remove;
1723 }
1724
1725 dev_dbg(dev, "mediatek hdmi probe success\n");
1726 return 0;
1727
1728err_bridge_remove:
1729 drm_bridge_remove(&hdmi->bridge);
1730 return ret;
1731}
1732
1733static int mtk_drm_hdmi_remove(struct platform_device *pdev)
1734{
1735 struct mtk_hdmi *hdmi = platform_get_drvdata(pdev);
1736
1737 drm_bridge_remove(&hdmi->bridge);
1738 mtk_hdmi_clk_disable_audio(hdmi);
1739 return 0;
1740}
1741
1742#ifdef CONFIG_PM_SLEEP
1743static int mtk_hdmi_suspend(struct device *dev)
1744{
1745 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1746
1747 mtk_hdmi_clk_disable_audio(hdmi);
1748 dev_dbg(dev, "hdmi suspend success!\n");
1749 return 0;
1750}
1751
1752static int mtk_hdmi_resume(struct device *dev)
1753{
1754 struct mtk_hdmi *hdmi = dev_get_drvdata(dev);
1755 int ret = 0;
1756
1757 ret = mtk_hdmi_clk_enable_audio(hdmi);
1758 if (ret) {
1759 dev_err(dev, "hdmi resume failed!\n");
1760 return ret;
1761 }
1762
1763 dev_dbg(dev, "hdmi resume success!\n");
1764 return 0;
1765}
1766#endif
1767static SIMPLE_DEV_PM_OPS(mtk_hdmi_pm_ops,
1768 mtk_hdmi_suspend, mtk_hdmi_resume);
1769
1770static const struct of_device_id mtk_drm_hdmi_of_ids[] = {
1771 { .compatible = "mediatek,mt8173-hdmi", },
1772 {}
1773};
1774
1775static struct platform_driver mtk_hdmi_driver = {
1776 .probe = mtk_drm_hdmi_probe,
1777 .remove = mtk_drm_hdmi_remove,
1778 .driver = {
1779 .name = "mediatek-drm-hdmi",
1780 .of_match_table = mtk_drm_hdmi_of_ids,
1781 .pm = &mtk_hdmi_pm_ops,
1782 },
1783};
1784
1785static struct platform_driver * const mtk_hdmi_drivers[] = {
1786 &mtk_hdmi_phy_driver,
1787 &mtk_hdmi_ddc_driver,
1788 &mtk_cec_driver,
1789 &mtk_hdmi_driver,
1790};
1791
1792static int __init mtk_hdmitx_init(void)
1793{
1794 int ret;
1795 int i;
1796
1797 for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) {
1798 ret = platform_driver_register(mtk_hdmi_drivers[i]);
1799 if (ret < 0) {
1800 pr_err("Failed to register %s driver: %d\n",
1801 mtk_hdmi_drivers[i]->driver.name, ret);
1802 goto err;
1803 }
1804 }
1805
1806 return 0;
1807
1808err:
1809 while (--i >= 0)
1810 platform_driver_unregister(mtk_hdmi_drivers[i]);
1811
1812 return ret;
1813}
1814
1815static void __exit mtk_hdmitx_exit(void)
1816{
1817 int i;
1818
1819 for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--)
1820 platform_driver_unregister(mtk_hdmi_drivers[i]);
1821}
1822
1823module_init(mtk_hdmitx_init);
1824module_exit(mtk_hdmitx_exit);
1825
1826MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
1827MODULE_DESCRIPTION("MediaTek HDMI Driver");
1828MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.h b/drivers/gpu/drm/mediatek/mtk_hdmi.h
new file mode 100644
index 000000000000..6371b3de1ff6
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MTK_HDMI_CTRL_H
15#define _MTK_HDMI_CTRL_H
16
17struct platform_driver;
18
19extern struct platform_driver mtk_cec_driver;
20extern struct platform_driver mtk_hdmi_ddc_driver;
21extern struct platform_driver mtk_hdmi_phy_driver;
22
23#endif /* _MTK_HDMI_CTRL_H */
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
new file mode 100644
index 000000000000..33c9e1bdb114
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_ddc.c
@@ -0,0 +1,358 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/i2c.h>
17#include <linux/time.h>
18#include <linux/delay.h>
19#include <linux/errno.h>
20#include <linux/err.h>
21#include <linux/platform_device.h>
22#include <linux/clk.h>
23#include <linux/slab.h>
24#include <linux/io.h>
25#include <linux/iopoll.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_platform.h>
29
30#define SIF1_CLOK (288)
31#define DDC_DDCMCTL0 (0x0)
32#define DDCM_ODRAIN BIT(31)
33#define DDCM_CLK_DIV_OFFSET (16)
34#define DDCM_CLK_DIV_MASK (0xfff << 16)
35#define DDCM_CS_STATUS BIT(4)
36#define DDCM_SCL_STATE BIT(3)
37#define DDCM_SDA_STATE BIT(2)
38#define DDCM_SM0EN BIT(1)
39#define DDCM_SCL_STRECH BIT(0)
40#define DDC_DDCMCTL1 (0x4)
41#define DDCM_ACK_OFFSET (16)
42#define DDCM_ACK_MASK (0xff << 16)
43#define DDCM_PGLEN_OFFSET (8)
44#define DDCM_PGLEN_MASK (0x7 << 8)
45#define DDCM_SIF_MODE_OFFSET (4)
46#define DDCM_SIF_MODE_MASK (0x7 << 4)
47#define DDCM_START (0x1)
48#define DDCM_WRITE_DATA (0x2)
49#define DDCM_STOP (0x3)
50#define DDCM_READ_DATA_NO_ACK (0x4)
51#define DDCM_READ_DATA_ACK (0x5)
52#define DDCM_TRI BIT(0)
53#define DDC_DDCMD0 (0x8)
54#define DDCM_DATA3 (0xff << 24)
55#define DDCM_DATA2 (0xff << 16)
56#define DDCM_DATA1 (0xff << 8)
57#define DDCM_DATA0 (0xff << 0)
58#define DDC_DDCMD1 (0xc)
59#define DDCM_DATA7 (0xff << 24)
60#define DDCM_DATA6 (0xff << 16)
61#define DDCM_DATA5 (0xff << 8)
62#define DDCM_DATA4 (0xff << 0)
63
64struct mtk_hdmi_ddc {
65 struct i2c_adapter adap;
66 struct clk *clk;
67 void __iomem *regs;
68};
69
70static inline void sif_set_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
71 unsigned int val)
72{
73 writel(readl(ddc->regs + offset) | val, ddc->regs + offset);
74}
75
76static inline void sif_clr_bit(struct mtk_hdmi_ddc *ddc, unsigned int offset,
77 unsigned int val)
78{
79 writel(readl(ddc->regs + offset) & ~val, ddc->regs + offset);
80}
81
82static inline bool sif_bit_is_set(struct mtk_hdmi_ddc *ddc, unsigned int offset,
83 unsigned int val)
84{
85 return (readl(ddc->regs + offset) & val) == val;
86}
87
88static inline void sif_write_mask(struct mtk_hdmi_ddc *ddc, unsigned int offset,
89 unsigned int mask, unsigned int shift,
90 unsigned int val)
91{
92 unsigned int tmp;
93
94 tmp = readl(ddc->regs + offset);
95 tmp &= ~mask;
96 tmp |= (val << shift) & mask;
97 writel(tmp, ddc->regs + offset);
98}
99
100static inline unsigned int sif_read_mask(struct mtk_hdmi_ddc *ddc,
101 unsigned int offset, unsigned int mask,
102 unsigned int shift)
103{
104 return (readl(ddc->regs + offset) & mask) >> shift;
105}
106
107static void ddcm_trigger_mode(struct mtk_hdmi_ddc *ddc, int mode)
108{
109 u32 val;
110
111 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_SIF_MODE_MASK,
112 DDCM_SIF_MODE_OFFSET, mode);
113 sif_set_bit(ddc, DDC_DDCMCTL1, DDCM_TRI);
114 readl_poll_timeout(ddc->regs + DDC_DDCMCTL1, val,
115 (val & DDCM_TRI) != DDCM_TRI, 4, 20000);
116}
117
118static int mtk_hdmi_ddc_read_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
119{
120 struct device *dev = ddc->adap.dev.parent;
121 u32 remain_count, ack_count, ack_final, read_count, temp_count;
122 u32 index = 0;
123 u32 ack;
124 int i;
125
126 ddcm_trigger_mode(ddc, DDCM_START);
127 sif_write_mask(ddc, DDC_DDCMD0, 0xff, 0, (msg->addr << 1) | 0x01);
128 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
129 0x00);
130 ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
131 ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
132 dev_dbg(dev, "ack = 0x%x\n", ack);
133 if (ack != 0x01) {
134 dev_err(dev, "i2c ack err!\n");
135 return -ENXIO;
136 }
137
138 remain_count = msg->len;
139 ack_count = (msg->len - 1) / 8;
140 ack_final = 0;
141
142 while (remain_count > 0) {
143 if (ack_count > 0) {
144 read_count = 8;
145 ack_final = 0;
146 ack_count--;
147 } else {
148 read_count = remain_count;
149 ack_final = 1;
150 }
151
152 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK,
153 DDCM_PGLEN_OFFSET, read_count - 1);
154 ddcm_trigger_mode(ddc, (ack_final == 1) ?
155 DDCM_READ_DATA_NO_ACK :
156 DDCM_READ_DATA_ACK);
157
158 ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK,
159 DDCM_ACK_OFFSET);
160 temp_count = 0;
161 while (((ack & (1 << temp_count)) != 0) && (temp_count < 8))
162 temp_count++;
163 if (((ack_final == 1) && (temp_count != (read_count - 1))) ||
164 ((ack_final == 0) && (temp_count != read_count))) {
165 dev_err(dev, "Address NACK! ACK(0x%x)\n", ack);
166 break;
167 }
168
169 for (i = read_count; i >= 1; i--) {
170 int shift;
171 int offset;
172
173 if (i > 4) {
174 offset = DDC_DDCMD1;
175 shift = (i - 5) * 8;
176 } else {
177 offset = DDC_DDCMD0;
178 shift = (i - 1) * 8;
179 }
180
181 msg->buf[index + i - 1] = sif_read_mask(ddc, offset,
182 0xff << shift,
183 shift);
184 }
185
186 remain_count -= read_count;
187 index += read_count;
188 }
189
190 return 0;
191}
192
193static int mtk_hdmi_ddc_write_msg(struct mtk_hdmi_ddc *ddc, struct i2c_msg *msg)
194{
195 struct device *dev = ddc->adap.dev.parent;
196 u32 ack;
197
198 ddcm_trigger_mode(ddc, DDCM_START);
199 sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA0, 0, msg->addr << 1);
200 sif_write_mask(ddc, DDC_DDCMD0, DDCM_DATA1, 8, msg->buf[0]);
201 sif_write_mask(ddc, DDC_DDCMCTL1, DDCM_PGLEN_MASK, DDCM_PGLEN_OFFSET,
202 0x1);
203 ddcm_trigger_mode(ddc, DDCM_WRITE_DATA);
204
205 ack = sif_read_mask(ddc, DDC_DDCMCTL1, DDCM_ACK_MASK, DDCM_ACK_OFFSET);
206 dev_dbg(dev, "ack = %d\n", ack);
207
208 if (ack != 0x03) {
209 dev_err(dev, "i2c ack err!\n");
210 return -EIO;
211 }
212
213 return 0;
214}
215
216static int mtk_hdmi_ddc_xfer(struct i2c_adapter *adapter,
217 struct i2c_msg *msgs, int num)
218{
219 struct mtk_hdmi_ddc *ddc = adapter->algo_data;
220 struct device *dev = adapter->dev.parent;
221 int ret;
222 int i;
223
224 if (!ddc) {
225 dev_err(dev, "invalid arguments\n");
226 return -EINVAL;
227 }
228
229 sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SCL_STRECH);
230 sif_set_bit(ddc, DDC_DDCMCTL0, DDCM_SM0EN);
231 sif_clr_bit(ddc, DDC_DDCMCTL0, DDCM_ODRAIN);
232
233 if (sif_bit_is_set(ddc, DDC_DDCMCTL1, DDCM_TRI)) {
234 dev_err(dev, "ddc line is busy!\n");
235 return -EBUSY;
236 }
237
238 sif_write_mask(ddc, DDC_DDCMCTL0, DDCM_CLK_DIV_MASK,
239 DDCM_CLK_DIV_OFFSET, SIF1_CLOK);
240
241 for (i = 0; i < num; i++) {
242 struct i2c_msg *msg = &msgs[i];
243
244 dev_dbg(dev, "i2c msg, adr:0x%x, flags:%d, len :0x%x\n",
245 msg->addr, msg->flags, msg->len);
246
247 if (msg->flags & I2C_M_RD)
248 ret = mtk_hdmi_ddc_read_msg(ddc, msg);
249 else
250 ret = mtk_hdmi_ddc_write_msg(ddc, msg);
251 if (ret < 0)
252 goto xfer_end;
253 }
254
255 ddcm_trigger_mode(ddc, DDCM_STOP);
256
257 return i;
258
259xfer_end:
260 ddcm_trigger_mode(ddc, DDCM_STOP);
261 dev_err(dev, "ddc failed!\n");
262 return ret;
263}
264
265static u32 mtk_hdmi_ddc_func(struct i2c_adapter *adapter)
266{
267 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
268}
269
270static const struct i2c_algorithm mtk_hdmi_ddc_algorithm = {
271 .master_xfer = mtk_hdmi_ddc_xfer,
272 .functionality = mtk_hdmi_ddc_func,
273};
274
275static int mtk_hdmi_ddc_probe(struct platform_device *pdev)
276{
277 struct device *dev = &pdev->dev;
278 struct mtk_hdmi_ddc *ddc;
279 struct resource *mem;
280 int ret;
281
282 ddc = devm_kzalloc(dev, sizeof(struct mtk_hdmi_ddc), GFP_KERNEL);
283 if (!ddc)
284 return -ENOMEM;
285
286 ddc->clk = devm_clk_get(dev, "ddc-i2c");
287 if (IS_ERR(ddc->clk)) {
288 dev_err(dev, "get ddc_clk failed: %p ,\n", ddc->clk);
289 return PTR_ERR(ddc->clk);
290 }
291
292 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
293 ddc->regs = devm_ioremap_resource(&pdev->dev, mem);
294 if (IS_ERR(ddc->regs))
295 return PTR_ERR(ddc->regs);
296
297 ret = clk_prepare_enable(ddc->clk);
298 if (ret) {
299 dev_err(dev, "enable ddc clk failed!\n");
300 return ret;
301 }
302
303 strlcpy(ddc->adap.name, "mediatek-hdmi-ddc", sizeof(ddc->adap.name));
304 ddc->adap.owner = THIS_MODULE;
305 ddc->adap.class = I2C_CLASS_DDC;
306 ddc->adap.algo = &mtk_hdmi_ddc_algorithm;
307 ddc->adap.retries = 3;
308 ddc->adap.dev.of_node = dev->of_node;
309 ddc->adap.algo_data = ddc;
310 ddc->adap.dev.parent = &pdev->dev;
311
312 ret = i2c_add_adapter(&ddc->adap);
313 if (ret < 0) {
314 dev_err(dev, "failed to add bus to i2c core\n");
315 goto err_clk_disable;
316 }
317
318 platform_set_drvdata(pdev, ddc);
319
320 dev_dbg(dev, "ddc->adap: %p\n", &ddc->adap);
321 dev_dbg(dev, "ddc->clk: %p\n", ddc->clk);
322 dev_dbg(dev, "physical adr: %pa, end: %pa\n", &mem->start,
323 &mem->end);
324
325 return 0;
326
327err_clk_disable:
328 clk_disable_unprepare(ddc->clk);
329 return ret;
330}
331
332static int mtk_hdmi_ddc_remove(struct platform_device *pdev)
333{
334 struct mtk_hdmi_ddc *ddc = platform_get_drvdata(pdev);
335
336 i2c_del_adapter(&ddc->adap);
337 clk_disable_unprepare(ddc->clk);
338
339 return 0;
340}
341
342static const struct of_device_id mtk_hdmi_ddc_match[] = {
343 { .compatible = "mediatek,mt8173-hdmi-ddc", },
344 {},
345};
346
347struct platform_driver mtk_hdmi_ddc_driver = {
348 .probe = mtk_hdmi_ddc_probe,
349 .remove = mtk_hdmi_ddc_remove,
350 .driver = {
351 .name = "mediatek-hdmi-ddc",
352 .of_match_table = mtk_hdmi_ddc_match,
353 },
354};
355
356MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
357MODULE_DESCRIPTION("MediaTek HDMI DDC Driver");
358MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
new file mode 100644
index 000000000000..a5cb07d12c9c
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi_regs.h
@@ -0,0 +1,238 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14#ifndef _MTK_HDMI_REGS_H
15#define _MTK_HDMI_REGS_H
16
17#define GRL_INT_MASK 0x18
18#define GRL_IFM_PORT 0x188
19#define GRL_CH_SWAP 0x198
20#define LR_SWAP BIT(0)
21#define LFE_CC_SWAP BIT(1)
22#define LSRS_SWAP BIT(2)
23#define RLS_RRS_SWAP BIT(3)
24#define LR_STATUS_SWAP BIT(4)
25#define GRL_I2S_C_STA0 0x140
26#define GRL_I2S_C_STA1 0x144
27#define GRL_I2S_C_STA2 0x148
28#define GRL_I2S_C_STA3 0x14C
29#define GRL_I2S_C_STA4 0x150
30#define GRL_I2S_UV 0x154
31#define I2S_UV_V BIT(0)
32#define I2S_UV_U BIT(1)
33#define I2S_UV_CH_EN_MASK 0x3c
34#define I2S_UV_CH_EN(x) BIT((x) + 2)
35#define I2S_UV_TMDS_DEBUG BIT(6)
36#define I2S_UV_NORMAL_INFO_INV BIT(7)
37#define GRL_ACP_ISRC_CTRL 0x158
38#define VS_EN BIT(0)
39#define ACP_EN BIT(1)
40#define ISRC1_EN BIT(2)
41#define ISRC2_EN BIT(3)
42#define GAMUT_EN BIT(4)
43#define GRL_CTS_CTRL 0x160
44#define CTS_CTRL_SOFT BIT(0)
45#define GRL_INT 0x14
46#define INT_MDI BIT(0)
47#define INT_HDCP BIT(1)
48#define INT_FIFO_O BIT(2)
49#define INT_FIFO_U BIT(3)
50#define INT_IFM_ERR BIT(4)
51#define INT_INF_DONE BIT(5)
52#define INT_NCTS_DONE BIT(6)
53#define INT_CTRL_PKT_DONE BIT(7)
54#define GRL_INT_MASK 0x18
55#define GRL_CTRL 0x1C
56#define CTRL_GEN_EN BIT(2)
57#define CTRL_SPD_EN BIT(3)
58#define CTRL_MPEG_EN BIT(4)
59#define CTRL_AUDIO_EN BIT(5)
60#define CTRL_AVI_EN BIT(6)
61#define CTRL_AVMUTE BIT(7)
62#define GRL_STATUS 0x20
63#define STATUS_HTPLG BIT(0)
64#define STATUS_PORD BIT(1)
65#define GRL_DIVN 0x170
66#define NCTS_WRI_ANYTIME BIT(6)
67#define GRL_AUDIO_CFG 0x17C
68#define AUDIO_ZERO BIT(0)
69#define HIGH_BIT_RATE BIT(1)
70#define SACD_DST BIT(2)
71#define DST_NORMAL_DOUBLE BIT(3)
72#define DSD_INV BIT(4)
73#define LR_INV BIT(5)
74#define LR_MIX BIT(6)
75#define DSD_SEL BIT(7)
76#define GRL_NCTS 0x184
77#define GRL_CH_SW0 0x18C
78#define GRL_CH_SW1 0x190
79#define GRL_CH_SW2 0x194
80#define CH_SWITCH(from, to) ((from) << ((to) * 3))
81#define GRL_INFOFRM_VER 0x19C
82#define GRL_INFOFRM_TYPE 0x1A0
83#define GRL_INFOFRM_LNG 0x1A4
84#define GRL_MIX_CTRL 0x1B4
85#define MIX_CTRL_SRC_EN BIT(0)
86#define BYPASS_VOLUME BIT(1)
87#define MIX_CTRL_FLAT BIT(7)
88#define GRL_AOUT_CFG 0x1C4
89#define AOUT_BNUM_SEL_MASK 0x03
90#define AOUT_24BIT 0x00
91#define AOUT_20BIT 0x02
92#define AOUT_16BIT 0x03
93#define AOUT_FIFO_ADAP_CTRL BIT(6)
94#define AOUT_BURST_PREAMBLE_EN BIT(7)
95#define HIGH_BIT_RATE_PACKET_ALIGN (AOUT_BURST_PREAMBLE_EN | \
96 AOUT_FIFO_ADAP_CTRL)
97#define GRL_SHIFT_L1 0x1C0
98#define GRL_SHIFT_R2 0x1B0
99#define AUDIO_PACKET_OFF BIT(6)
100#define GRL_CFG0 0x24
101#define CFG0_I2S_MODE_MASK 0x3
102#define CFG0_I2S_MODE_RTJ 0x1
103#define CFG0_I2S_MODE_LTJ 0x0
104#define CFG0_I2S_MODE_I2S 0x2
105#define CFG0_W_LENGTH_MASK 0x30
106#define CFG0_W_LENGTH_24BIT 0x00
107#define CFG0_W_LENGTH_16BIT 0x10
108#define GRL_CFG1 0x28
109#define CFG1_EDG_SEL BIT(0)
110#define CFG1_SPDIF BIT(1)
111#define CFG1_DVI BIT(2)
112#define CFG1_HDCP_DEBUG BIT(3)
113#define GRL_CFG2 0x2c
114#define CFG2_MHL_DE_SEL BIT(3)
115#define CFG2_MHL_FAKE_DE_SEL BIT(4)
116#define CFG2_MHL_DATA_REMAP BIT(5)
117#define CFG2_NOTICE_EN BIT(6)
118#define CFG2_ACLK_INV BIT(7)
119#define GRL_CFG3 0x30
120#define CFG3_AES_KEY_INDEX_MASK 0x3f
121#define CFG3_CONTROL_PACKET_DELAY BIT(6)
122#define CFG3_KSV_LOAD_START BIT(7)
123#define GRL_CFG4 0x34
124#define CFG4_AES_KEY_LOAD BIT(4)
125#define CFG4_AV_UNMUTE_EN BIT(5)
126#define CFG4_AV_UNMUTE_SET BIT(6)
127#define CFG4_MHL_MODE BIT(7)
128#define GRL_CFG5 0x38
129#define CFG5_CD_RATIO_MASK 0x8F
130#define CFG5_FS128 (0x1 << 4)
131#define CFG5_FS256 (0x2 << 4)
132#define CFG5_FS384 (0x3 << 4)
133#define CFG5_FS512 (0x4 << 4)
134#define CFG5_FS768 (0x6 << 4)
135#define DUMMY_304 0x304
136#define CHMO_SEL (0x3 << 2)
137#define CHM1_SEL (0x3 << 4)
138#define CHM2_SEL (0x3 << 6)
139#define AUDIO_I2S_NCTS_SEL BIT(1)
140#define AUDIO_I2S_NCTS_SEL_64 (1 << 1)
141#define AUDIO_I2S_NCTS_SEL_128 (0 << 1)
142#define NEW_GCP_CTRL BIT(0)
143#define NEW_GCP_CTRL_MERGE BIT(0)
144#define GRL_L_STATUS_0 0x200
145#define GRL_L_STATUS_1 0x204
146#define GRL_L_STATUS_2 0x208
147#define GRL_L_STATUS_3 0x20c
148#define GRL_L_STATUS_4 0x210
149#define GRL_L_STATUS_5 0x214
150#define GRL_L_STATUS_6 0x218
151#define GRL_L_STATUS_7 0x21c
152#define GRL_L_STATUS_8 0x220
153#define GRL_L_STATUS_9 0x224
154#define GRL_L_STATUS_10 0x228
155#define GRL_L_STATUS_11 0x22c
156#define GRL_L_STATUS_12 0x230
157#define GRL_L_STATUS_13 0x234
158#define GRL_L_STATUS_14 0x238
159#define GRL_L_STATUS_15 0x23c
160#define GRL_L_STATUS_16 0x240
161#define GRL_L_STATUS_17 0x244
162#define GRL_L_STATUS_18 0x248
163#define GRL_L_STATUS_19 0x24c
164#define GRL_L_STATUS_20 0x250
165#define GRL_L_STATUS_21 0x254
166#define GRL_L_STATUS_22 0x258
167#define GRL_L_STATUS_23 0x25c
168#define GRL_R_STATUS_0 0x260
169#define GRL_R_STATUS_1 0x264
170#define GRL_R_STATUS_2 0x268
171#define GRL_R_STATUS_3 0x26c
172#define GRL_R_STATUS_4 0x270
173#define GRL_R_STATUS_5 0x274
174#define GRL_R_STATUS_6 0x278
175#define GRL_R_STATUS_7 0x27c
176#define GRL_R_STATUS_8 0x280
177#define GRL_R_STATUS_9 0x284
178#define GRL_R_STATUS_10 0x288
179#define GRL_R_STATUS_11 0x28c
180#define GRL_R_STATUS_12 0x290
181#define GRL_R_STATUS_13 0x294
182#define GRL_R_STATUS_14 0x298
183#define GRL_R_STATUS_15 0x29c
184#define GRL_R_STATUS_16 0x2a0
185#define GRL_R_STATUS_17 0x2a4
186#define GRL_R_STATUS_18 0x2a8
187#define GRL_R_STATUS_19 0x2ac
188#define GRL_R_STATUS_20 0x2b0
189#define GRL_R_STATUS_21 0x2b4
190#define GRL_R_STATUS_22 0x2b8
191#define GRL_R_STATUS_23 0x2bc
192#define GRL_ABIST_CTRL0 0x2D4
193#define GRL_ABIST_CTRL1 0x2D8
194#define ABIST_EN BIT(7)
195#define ABIST_DATA_FMT (0x7 << 0)
196#define VIDEO_CFG_0 0x380
197#define VIDEO_CFG_1 0x384
198#define VIDEO_CFG_2 0x388
199#define VIDEO_CFG_3 0x38c
200#define VIDEO_CFG_4 0x390
201#define VIDEO_SOURCE_SEL BIT(7)
202#define NORMAL_PATH (1 << 7)
203#define GEN_RGB (0 << 7)
204
205#define HDMI_SYS_CFG1C 0x000
206#define HDMI_ON BIT(0)
207#define HDMI_RST BIT(1)
208#define ANLG_ON BIT(2)
209#define CFG10_DVI BIT(3)
210#define HDMI_TST BIT(3)
211#define SYS_KEYMASK1 (0xff << 8)
212#define SYS_KEYMASK2 (0xff << 16)
213#define AUD_OUTSYNC_EN BIT(24)
214#define AUD_OUTSYNC_PRE_EN BIT(25)
215#define I2CM_ON BIT(26)
216#define E2PROM_TYPE_8BIT BIT(27)
217#define MCM_E2PROM_ON BIT(28)
218#define EXT_E2PROM_ON BIT(29)
219#define HTPLG_PIN_SEL_OFF BIT(30)
220#define AES_EFUSE_ENABLE BIT(31)
221#define HDMI_SYS_CFG20 0x004
222#define DEEP_COLOR_MODE_MASK (3 << 1)
223#define COLOR_8BIT_MODE (0 << 1)
224#define COLOR_10BIT_MODE (1 << 1)
225#define COLOR_12BIT_MODE (2 << 1)
226#define COLOR_16BIT_MODE (3 << 1)
227#define DEEP_COLOR_EN BIT(0)
228#define HDMI_AUDIO_TEST_SEL BIT(8)
229#define HDMI2P0_EN BIT(11)
230#define HDMI_OUT_FIFO_EN BIT(16)
231#define HDMI_OUT_FIFO_CLK_INV BIT(17)
232#define MHL_MODE_ON BIT(28)
233#define MHL_PP_MODE BIT(29)
234#define MHL_SYNC_AUTO_EN BIT(30)
235#define HDMI_PCLK_FREE_RUN BIT(31)
236
237#define MTK_SIP_SET_AUTHORIZED_SECURE_REG 0x82000001
238#endif
diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
index cf8f38d39e10..1c366f8cb2d0 100644
--- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
+++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c
@@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev)
431 phy_set_drvdata(phy, mipi_tx); 431 phy_set_drvdata(phy, mipi_tx);
432 432
433 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); 433 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
434 if (IS_ERR(phy)) { 434 if (IS_ERR(phy_provider)) {
435 ret = PTR_ERR(phy_provider); 435 ret = PTR_ERR(phy_provider);
436 return ret; 436 return ret;
437 } 437 }
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
new file mode 100644
index 000000000000..8a24754b440f
--- /dev/null
+++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c
@@ -0,0 +1,515 @@
1/*
2 * Copyright (c) 2014 MediaTek Inc.
3 * Author: Jie Qiu <jie.qiu@mediatek.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/delay.h>
18#include <linux/io.h>
19#include <linux/mfd/syscon.h>
20#include <linux/module.h>
21#include <linux/phy/phy.h>
22#include <linux/platform_device.h>
23#include <linux/types.h>
24
25#define HDMI_CON0 0x00
26#define RG_HDMITX_PLL_EN BIT(31)
27#define RG_HDMITX_PLL_FBKDIV (0x7f << 24)
28#define PLL_FBKDIV_SHIFT 24
29#define RG_HDMITX_PLL_FBKSEL (0x3 << 22)
30#define PLL_FBKSEL_SHIFT 22
31#define RG_HDMITX_PLL_PREDIV (0x3 << 20)
32#define PREDIV_SHIFT 20
33#define RG_HDMITX_PLL_POSDIV (0x3 << 18)
34#define POSDIV_SHIFT 18
35#define RG_HDMITX_PLL_RST_DLY (0x3 << 16)
36#define RG_HDMITX_PLL_IR (0xf << 12)
37#define PLL_IR_SHIFT 12
38#define RG_HDMITX_PLL_IC (0xf << 8)
39#define PLL_IC_SHIFT 8
40#define RG_HDMITX_PLL_BP (0xf << 4)
41#define PLL_BP_SHIFT 4
42#define RG_HDMITX_PLL_BR (0x3 << 2)
43#define PLL_BR_SHIFT 2
44#define RG_HDMITX_PLL_BC (0x3 << 0)
45#define PLL_BC_SHIFT 0
46#define HDMI_CON1 0x04
47#define RG_HDMITX_PLL_DIVEN (0x7 << 29)
48#define PLL_DIVEN_SHIFT 29
49#define RG_HDMITX_PLL_AUTOK_EN BIT(28)
50#define RG_HDMITX_PLL_AUTOK_KF (0x3 << 26)
51#define RG_HDMITX_PLL_AUTOK_KS (0x3 << 24)
52#define RG_HDMITX_PLL_AUTOK_LOAD BIT(23)
53#define RG_HDMITX_PLL_BAND (0x3f << 16)
54#define RG_HDMITX_PLL_REF_SEL BIT(15)
55#define RG_HDMITX_PLL_BIAS_EN BIT(14)
56#define RG_HDMITX_PLL_BIAS_LPF_EN BIT(13)
57#define RG_HDMITX_PLL_TXDIV_EN BIT(12)
58#define RG_HDMITX_PLL_TXDIV (0x3 << 10)
59#define PLL_TXDIV_SHIFT 10
60#define RG_HDMITX_PLL_LVROD_EN BIT(9)
61#define RG_HDMITX_PLL_MONVC_EN BIT(8)
62#define RG_HDMITX_PLL_MONCK_EN BIT(7)
63#define RG_HDMITX_PLL_MONREF_EN BIT(6)
64#define RG_HDMITX_PLL_TST_EN BIT(5)
65#define RG_HDMITX_PLL_TST_CK_EN BIT(4)
66#define RG_HDMITX_PLL_TST_SEL (0xf << 0)
67#define HDMI_CON2 0x08
68#define RGS_HDMITX_PLL_AUTOK_BAND (0x7f << 8)
69#define RGS_HDMITX_PLL_AUTOK_FAIL BIT(1)
70#define RG_HDMITX_EN_TX_CKLDO BIT(0)
71#define HDMI_CON3 0x0c
72#define RG_HDMITX_SER_EN (0xf << 28)
73#define RG_HDMITX_PRD_EN (0xf << 24)
74#define RG_HDMITX_PRD_IMP_EN (0xf << 20)
75#define RG_HDMITX_DRV_EN (0xf << 16)
76#define RG_HDMITX_DRV_IMP_EN (0xf << 12)
77#define DRV_IMP_EN_SHIFT 12
78#define RG_HDMITX_MHLCK_FORCE BIT(10)
79#define RG_HDMITX_MHLCK_PPIX_EN BIT(9)
80#define RG_HDMITX_MHLCK_EN BIT(8)
81#define RG_HDMITX_SER_DIN_SEL (0xf << 4)
82#define RG_HDMITX_SER_5T1_BIST_EN BIT(3)
83#define RG_HDMITX_SER_BIST_TOG BIT(2)
84#define RG_HDMITX_SER_DIN_TOG BIT(1)
85#define RG_HDMITX_SER_CLKDIG_INV BIT(0)
86#define HDMI_CON4 0x10
87#define RG_HDMITX_PRD_IBIAS_CLK (0xf << 24)
88#define RG_HDMITX_PRD_IBIAS_D2 (0xf << 16)
89#define RG_HDMITX_PRD_IBIAS_D1 (0xf << 8)
90#define RG_HDMITX_PRD_IBIAS_D0 (0xf << 0)
91#define PRD_IBIAS_CLK_SHIFT 24
92#define PRD_IBIAS_D2_SHIFT 16
93#define PRD_IBIAS_D1_SHIFT 8
94#define PRD_IBIAS_D0_SHIFT 0
95#define HDMI_CON5 0x14
96#define RG_HDMITX_DRV_IBIAS_CLK (0x3f << 24)
97#define RG_HDMITX_DRV_IBIAS_D2 (0x3f << 16)
98#define RG_HDMITX_DRV_IBIAS_D1 (0x3f << 8)
99#define RG_HDMITX_DRV_IBIAS_D0 (0x3f << 0)
100#define DRV_IBIAS_CLK_SHIFT 24
101#define DRV_IBIAS_D2_SHIFT 16
102#define DRV_IBIAS_D1_SHIFT 8
103#define DRV_IBIAS_D0_SHIFT 0
104#define HDMI_CON6 0x18
105#define RG_HDMITX_DRV_IMP_CLK (0x3f << 24)
106#define RG_HDMITX_DRV_IMP_D2 (0x3f << 16)
107#define RG_HDMITX_DRV_IMP_D1 (0x3f << 8)
108#define RG_HDMITX_DRV_IMP_D0 (0x3f << 0)
109#define DRV_IMP_CLK_SHIFT 24
110#define DRV_IMP_D2_SHIFT 16
111#define DRV_IMP_D1_SHIFT 8
112#define DRV_IMP_D0_SHIFT 0
113#define HDMI_CON7 0x1c
114#define RG_HDMITX_MHLCK_DRV_IBIAS (0x1f << 27)
115#define RG_HDMITX_SER_DIN (0x3ff << 16)
116#define RG_HDMITX_CHLDC_TST (0xf << 12)
117#define RG_HDMITX_CHLCK_TST (0xf << 8)
118#define RG_HDMITX_RESERVE (0xff << 0)
119#define HDMI_CON8 0x20
120#define RGS_HDMITX_2T1_LEV (0xf << 16)
121#define RGS_HDMITX_2T1_EDG (0xf << 12)
122#define RGS_HDMITX_5T1_LEV (0xf << 8)
123#define RGS_HDMITX_5T1_EDG (0xf << 4)
124#define RGS_HDMITX_PLUG_TST BIT(0)
125
126struct mtk_hdmi_phy {
127 void __iomem *regs;
128 struct device *dev;
129 struct clk *pll;
130 struct clk_hw pll_hw;
131 unsigned long pll_rate;
132 u8 drv_imp_clk;
133 u8 drv_imp_d2;
134 u8 drv_imp_d1;
135 u8 drv_imp_d0;
136 u32 ibias;
137 u32 ibias_up;
138};
139
140static const u8 PREDIV[3][4] = {
141 {0x0, 0x0, 0x0, 0x0}, /* 27Mhz */
142 {0x1, 0x1, 0x1, 0x1}, /* 74Mhz */
143 {0x1, 0x1, 0x1, 0x1} /* 148Mhz */
144};
145
146static const u8 TXDIV[3][4] = {
147 {0x3, 0x3, 0x3, 0x2}, /* 27Mhz */
148 {0x2, 0x1, 0x1, 0x1}, /* 74Mhz */
149 {0x1, 0x0, 0x0, 0x0} /* 148Mhz */
150};
151
152static const u8 FBKSEL[3][4] = {
153 {0x1, 0x1, 0x1, 0x1}, /* 27Mhz */
154 {0x1, 0x0, 0x1, 0x1}, /* 74Mhz */
155 {0x1, 0x0, 0x1, 0x1} /* 148Mhz */
156};
157
158static const u8 FBKDIV[3][4] = {
159 {19, 24, 29, 19}, /* 27Mhz */
160 {19, 24, 14, 19}, /* 74Mhz */
161 {19, 24, 14, 19} /* 148Mhz */
162};
163
164static const u8 DIVEN[3][4] = {
165 {0x2, 0x1, 0x1, 0x2}, /* 27Mhz */
166 {0x2, 0x2, 0x2, 0x2}, /* 74Mhz */
167 {0x2, 0x2, 0x2, 0x2} /* 148Mhz */
168};
169
170static const u8 HTPLLBP[3][4] = {
171 {0xc, 0xc, 0x8, 0xc}, /* 27Mhz */
172 {0xc, 0xf, 0xf, 0xc}, /* 74Mhz */
173 {0xc, 0xf, 0xf, 0xc} /* 148Mhz */
174};
175
176static const u8 HTPLLBC[3][4] = {
177 {0x2, 0x3, 0x3, 0x2}, /* 27Mhz */
178 {0x2, 0x3, 0x3, 0x2}, /* 74Mhz */
179 {0x2, 0x3, 0x3, 0x2} /* 148Mhz */
180};
181
182static const u8 HTPLLBR[3][4] = {
183 {0x1, 0x1, 0x0, 0x1}, /* 27Mhz */
184 {0x1, 0x2, 0x2, 0x1}, /* 74Mhz */
185 {0x1, 0x2, 0x2, 0x1} /* 148Mhz */
186};
187
188static void mtk_hdmi_phy_clear_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
189 u32 bits)
190{
191 void __iomem *reg = hdmi_phy->regs + offset;
192 u32 tmp;
193
194 tmp = readl(reg);
195 tmp &= ~bits;
196 writel(tmp, reg);
197}
198
199static void mtk_hdmi_phy_set_bits(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
200 u32 bits)
201{
202 void __iomem *reg = hdmi_phy->regs + offset;
203 u32 tmp;
204
205 tmp = readl(reg);
206 tmp |= bits;
207 writel(tmp, reg);
208}
209
210static void mtk_hdmi_phy_mask(struct mtk_hdmi_phy *hdmi_phy, u32 offset,
211 u32 val, u32 mask)
212{
213 void __iomem *reg = hdmi_phy->regs + offset;
214 u32 tmp;
215
216 tmp = readl(reg);
217 tmp = (tmp & ~mask) | (val & mask);
218 writel(tmp, reg);
219}
220
221static inline struct mtk_hdmi_phy *to_mtk_hdmi_phy(struct clk_hw *hw)
222{
223 return container_of(hw, struct mtk_hdmi_phy, pll_hw);
224}
225
226static int mtk_hdmi_pll_prepare(struct clk_hw *hw)
227{
228 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
229
230 dev_dbg(hdmi_phy->dev, "%s\n", __func__);
231
232 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
233 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
234 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_MHLCK_EN);
235 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
236 usleep_range(100, 150);
237 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
238 usleep_range(100, 150);
239 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
240 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
241
242 return 0;
243}
244
245static void mtk_hdmi_pll_unprepare(struct clk_hw *hw)
246{
247 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
248
249 dev_dbg(hdmi_phy->dev, "%s\n", __func__);
250
251 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_TXDIV_EN);
252 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_LPF_EN);
253 usleep_range(100, 150);
254 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_EN);
255 usleep_range(100, 150);
256 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_BIAS_EN);
257 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
258 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON1, RG_HDMITX_PLL_AUTOK_EN);
259 usleep_range(100, 150);
260}
261
262static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate,
263 unsigned long parent_rate)
264{
265 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
266 unsigned int pre_div;
267 unsigned int div;
268
269 dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__,
270 rate, parent_rate);
271
272 if (rate <= 27000000) {
273 pre_div = 0;
274 div = 3;
275 } else if (rate <= 74250000) {
276 pre_div = 1;
277 div = 2;
278 } else {
279 pre_div = 1;
280 div = 1;
281 }
282
283 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
284 (pre_div << PREDIV_SHIFT), RG_HDMITX_PLL_PREDIV);
285 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON0, RG_HDMITX_PLL_POSDIV);
286 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
287 (0x1 << PLL_IC_SHIFT) | (0x1 << PLL_IR_SHIFT),
288 RG_HDMITX_PLL_IC | RG_HDMITX_PLL_IR);
289 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
290 (div << PLL_TXDIV_SHIFT), RG_HDMITX_PLL_TXDIV);
291 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
292 (0x1 << PLL_FBKSEL_SHIFT) | (19 << PLL_FBKDIV_SHIFT),
293 RG_HDMITX_PLL_FBKSEL | RG_HDMITX_PLL_FBKDIV);
294 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON1,
295 (0x2 << PLL_DIVEN_SHIFT), RG_HDMITX_PLL_DIVEN);
296 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON0,
297 (0xc << PLL_BP_SHIFT) | (0x2 << PLL_BC_SHIFT) |
298 (0x1 << PLL_BR_SHIFT),
299 RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC |
300 RG_HDMITX_PLL_BR);
301 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN);
302 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4,
303 (0x3 << PRD_IBIAS_CLK_SHIFT) |
304 (0x3 << PRD_IBIAS_D2_SHIFT) |
305 (0x3 << PRD_IBIAS_D1_SHIFT) |
306 (0x3 << PRD_IBIAS_D0_SHIFT),
307 RG_HDMITX_PRD_IBIAS_CLK |
308 RG_HDMITX_PRD_IBIAS_D2 |
309 RG_HDMITX_PRD_IBIAS_D1 |
310 RG_HDMITX_PRD_IBIAS_D0);
311 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3,
312 (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN);
313 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6,
314 (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) |
315 (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) |
316 (hdmi_phy->drv_imp_d1 << DRV_IMP_D1_SHIFT) |
317 (hdmi_phy->drv_imp_d0 << DRV_IMP_D0_SHIFT),
318 RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 |
319 RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0);
320 mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5,
321 (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) |
322 (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) |
323 (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) |
324 (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT),
325 RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 |
326 RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0);
327 return 0;
328}
329
330static long mtk_hdmi_pll_round_rate(struct clk_hw *hw, unsigned long rate,
331 unsigned long *parent_rate)
332{
333 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
334
335 hdmi_phy->pll_rate = rate;
336 if (rate <= 74250000)
337 *parent_rate = rate;
338 else
339 *parent_rate = rate / 2;
340
341 return rate;
342}
343
344static unsigned long mtk_hdmi_pll_recalc_rate(struct clk_hw *hw,
345 unsigned long parent_rate)
346{
347 struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw);
348
349 return hdmi_phy->pll_rate;
350}
351
352static const struct clk_ops mtk_hdmi_pll_ops = {
353 .prepare = mtk_hdmi_pll_prepare,
354 .unprepare = mtk_hdmi_pll_unprepare,
355 .set_rate = mtk_hdmi_pll_set_rate,
356 .round_rate = mtk_hdmi_pll_round_rate,
357 .recalc_rate = mtk_hdmi_pll_recalc_rate,
358};
359
360static void mtk_hdmi_phy_enable_tmds(struct mtk_hdmi_phy *hdmi_phy)
361{
362 mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3,
363 RG_HDMITX_SER_EN | RG_HDMITX_PRD_EN |
364 RG_HDMITX_DRV_EN);
365 usleep_range(100, 150);
366}
367
368static void mtk_hdmi_phy_disable_tmds(struct mtk_hdmi_phy *hdmi_phy)
369{
370 mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3,
371 RG_HDMITX_DRV_EN | RG_HDMITX_PRD_EN |
372 RG_HDMITX_SER_EN);
373}
374
375static int mtk_hdmi_phy_power_on(struct phy *phy)
376{
377 struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
378 int ret;
379
380 ret = clk_prepare_enable(hdmi_phy->pll);
381 if (ret < 0)
382 return ret;
383
384 mtk_hdmi_phy_enable_tmds(hdmi_phy);
385
386 return 0;
387}
388
389static int mtk_hdmi_phy_power_off(struct phy *phy)
390{
391 struct mtk_hdmi_phy *hdmi_phy = phy_get_drvdata(phy);
392
393 mtk_hdmi_phy_disable_tmds(hdmi_phy);
394 clk_disable_unprepare(hdmi_phy->pll);
395
396 return 0;
397}
398
399static const struct phy_ops mtk_hdmi_phy_ops = {
400 .power_on = mtk_hdmi_phy_power_on,
401 .power_off = mtk_hdmi_phy_power_off,
402 .owner = THIS_MODULE,
403};
404
405static int mtk_hdmi_phy_probe(struct platform_device *pdev)
406{
407 struct device *dev = &pdev->dev;
408 struct mtk_hdmi_phy *hdmi_phy;
409 struct resource *mem;
410 struct clk *ref_clk;
411 const char *ref_clk_name;
412 struct clk_init_data clk_init = {
413 .ops = &mtk_hdmi_pll_ops,
414 .num_parents = 1,
415 .parent_names = (const char * const *)&ref_clk_name,
416 .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_GATE,
417 };
418 struct phy *phy;
419 struct phy_provider *phy_provider;
420 int ret;
421
422 hdmi_phy = devm_kzalloc(dev, sizeof(*hdmi_phy), GFP_KERNEL);
423 if (!hdmi_phy)
424 return -ENOMEM;
425
426 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
427 hdmi_phy->regs = devm_ioremap_resource(dev, mem);
428 if (IS_ERR(hdmi_phy->regs)) {
429 ret = PTR_ERR(hdmi_phy->regs);
430 dev_err(dev, "Failed to get memory resource: %d\n", ret);
431 return ret;
432 }
433
434 ref_clk = devm_clk_get(dev, "pll_ref");
435 if (IS_ERR(ref_clk)) {
436 ret = PTR_ERR(ref_clk);
437 dev_err(&pdev->dev, "Failed to get PLL reference clock: %d\n",
438 ret);
439 return ret;
440 }
441 ref_clk_name = __clk_get_name(ref_clk);
442
443 ret = of_property_read_string(dev->of_node, "clock-output-names",
444 &clk_init.name);
445 if (ret < 0) {
446 dev_err(dev, "Failed to read clock-output-names: %d\n", ret);
447 return ret;
448 }
449
450 hdmi_phy->pll_hw.init = &clk_init;
451 hdmi_phy->pll = devm_clk_register(dev, &hdmi_phy->pll_hw);
452 if (IS_ERR(hdmi_phy->pll)) {
453 ret = PTR_ERR(hdmi_phy->pll);
454 dev_err(dev, "Failed to register PLL: %d\n", ret);
455 return ret;
456 }
457
458 ret = of_property_read_u32(dev->of_node, "mediatek,ibias",
459 &hdmi_phy->ibias);
460 if (ret < 0) {
461 dev_err(&pdev->dev, "Failed to get ibias: %d\n", ret);
462 return ret;
463 }
464
465 ret = of_property_read_u32(dev->of_node, "mediatek,ibias_up",
466 &hdmi_phy->ibias_up);
467 if (ret < 0) {
468 dev_err(&pdev->dev, "Failed to get ibias up: %d\n", ret);
469 return ret;
470 }
471
472 dev_info(dev, "Using default TX DRV impedance: 4.2k/36\n");
473 hdmi_phy->drv_imp_clk = 0x30;
474 hdmi_phy->drv_imp_d2 = 0x30;
475 hdmi_phy->drv_imp_d1 = 0x30;
476 hdmi_phy->drv_imp_d0 = 0x30;
477
478 phy = devm_phy_create(dev, NULL, &mtk_hdmi_phy_ops);
479 if (IS_ERR(phy)) {
480 dev_err(dev, "Failed to create HDMI PHY\n");
481 return PTR_ERR(phy);
482 }
483 phy_set_drvdata(phy, hdmi_phy);
484
485 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
486 if (IS_ERR(phy_provider))
487 return PTR_ERR(phy_provider);
488
489 hdmi_phy->dev = dev;
490 return of_clk_add_provider(dev->of_node, of_clk_src_simple_get,
491 hdmi_phy->pll);
492}
493
494static int mtk_hdmi_phy_remove(struct platform_device *pdev)
495{
496 return 0;
497}
498
499static const struct of_device_id mtk_hdmi_phy_match[] = {
500 { .compatible = "mediatek,mt8173-hdmi-phy", },
501 {},
502};
503
504struct platform_driver mtk_hdmi_phy_driver = {
505 .probe = mtk_hdmi_phy_probe,
506 .remove = mtk_hdmi_phy_remove,
507 .driver = {
508 .name = "mediatek-hdmi-phy",
509 .of_match_table = mtk_hdmi_phy_match,
510 },
511};
512
513MODULE_AUTHOR("Jie Qiu <jie.qiu@mediatek.com>");
514MODULE_DESCRIPTION("MediaTek MT8173 HDMI PHY Driver");
515MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig
index 3a1c5fbae54a..520e5e668d6c 100644
--- a/drivers/gpu/drm/mgag200/Kconfig
+++ b/drivers/gpu/drm/mgag200/Kconfig
@@ -1,11 +1,7 @@
1config DRM_MGAG200 1config DRM_MGAG200
2 tristate "Kernel modesetting driver for MGA G200 server engines" 2 tristate "Kernel modesetting driver for MGA G200 server engines"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
9 select DRM_TTM 5 select DRM_TTM
10 help 6 help
11 This is a KMS driver for the MGA G200 server chips, it 7 This is a KMS driver for the MGA G200 server chips, it
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index ebb470ff7200..2b4b125eebc3 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -101,7 +101,7 @@ static struct drm_driver driver = {
101 .minor = DRIVER_MINOR, 101 .minor = DRIVER_MINOR,
102 .patchlevel = DRIVER_PATCHLEVEL, 102 .patchlevel = DRIVER_PATCHLEVEL,
103 103
104 .gem_free_object = mgag200_gem_free_object, 104 .gem_free_object_unlocked = mgag200_gem_free_object,
105 .dumb_create = mgag200_dumb_create, 105 .dumb_create = mgag200_dumb_create,
106 .dumb_map_offset = mgag200_dumb_mmap_offset, 106 .dumb_map_offset = mgag200_dumb_mmap_offset,
107 .dumb_destroy = drm_gem_dumb_destroy, 107 .dumb_destroy = drm_gem_dumb_destroy,
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 615cbb08ba29..13798b3e6beb 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -17,8 +17,8 @@
17static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb) 17static void mga_user_framebuffer_destroy(struct drm_framebuffer *fb)
18{ 18{
19 struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb); 19 struct mga_framebuffer *mga_fb = to_mga_framebuffer(fb);
20 if (mga_fb->obj) 20
21 drm_gem_object_unreference_unlocked(mga_fb->obj); 21 drm_gem_object_unreference_unlocked(mga_fb->obj);
22 drm_framebuffer_cleanup(fb); 22 drm_framebuffer_cleanup(fb);
23 kfree(fb); 23 kfree(fb);
24} 24}
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index d347dca17267..6b21cb27e1cc 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1352,19 +1352,20 @@ static void mga_crtc_commit(struct drm_crtc *crtc)
1352 * use this for 8-bit mode so can't perform smooth fades on deeper modes, 1352 * use this for 8-bit mode so can't perform smooth fades on deeper modes,
1353 * but it's a requirement that we provide the function 1353 * but it's a requirement that we provide the function
1354 */ 1354 */
1355static void mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 1355static int mga_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
1356 u16 *blue, uint32_t start, uint32_t size) 1356 u16 *blue, uint32_t size)
1357{ 1357{
1358 struct mga_crtc *mga_crtc = to_mga_crtc(crtc); 1358 struct mga_crtc *mga_crtc = to_mga_crtc(crtc);
1359 int end = (start + size > MGAG200_LUT_SIZE) ? MGAG200_LUT_SIZE : start + size;
1360 int i; 1359 int i;
1361 1360
1362 for (i = start; i < end; i++) { 1361 for (i = 0; i < size; i++) {
1363 mga_crtc->lut_r[i] = red[i] >> 8; 1362 mga_crtc->lut_r[i] = red[i] >> 8;
1364 mga_crtc->lut_g[i] = green[i] >> 8; 1363 mga_crtc->lut_g[i] = green[i] >> 8;
1365 mga_crtc->lut_b[i] = blue[i] >> 8; 1364 mga_crtc->lut_b[i] = blue[i] >> 8;
1366 } 1365 }
1367 mga_crtc_load_lut(crtc); 1366 mga_crtc_load_lut(crtc);
1367
1368 return 0;
1368} 1369}
1369 1370
1370/* Simple cleanup function */ 1371/* Simple cleanup function */
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c
index 9d5083d0f1ee..68268e55d595 100644
--- a/drivers/gpu/drm/mgag200/mgag200_ttm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c
@@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r
186{ 186{
187} 187}
188 188
189static int mgag200_bo_move(struct ttm_buffer_object *bo,
190 bool evict, bool interruptible,
191 bool no_wait_gpu,
192 struct ttm_mem_reg *new_mem)
193{
194 int r;
195 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
196 return r;
197}
198
199
200static void mgag200_ttm_backend_destroy(struct ttm_tt *tt) 189static void mgag200_ttm_backend_destroy(struct ttm_tt *tt)
201{ 190{
202 ttm_tt_fini(tt); 191 ttm_tt_fini(tt);
@@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = {
241 .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, 230 .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate,
242 .init_mem_type = mgag200_bo_init_mem_type, 231 .init_mem_type = mgag200_bo_init_mem_type,
243 .evict_flags = mgag200_bo_evict_flags, 232 .evict_flags = mgag200_bo_evict_flags,
244 .move = mgag200_bo_move, 233 .move = NULL,
245 .verify_access = mgag200_bo_verify_access, 234 .verify_access = mgag200_bo_verify_access,
246 .io_mem_reserve = &mgag200_ttm_io_mem_reserve, 235 .io_mem_reserve = &mgag200_ttm_io_mem_reserve,
247 .io_mem_free = &mgag200_ttm_io_mem_free, 236 .io_mem_free = &mgag200_ttm_io_mem_free,
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 167a4971f47c..7c7a0314a756 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,7 @@ config DRM_MSM
10 select SHMEM 10 select SHMEM
11 select TMPFS 11 select TMPFS
12 select QCOM_SCM 12 select QCOM_SCM
13 select SND_SOC_HDMI_CODEC if SND_SOC
13 default y 14 default y
14 help 15 help
15 DRM/KMS driver for MSM/snapdragon. 16 DRM/KMS driver for MSM/snapdragon.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 60cb02624dc0..4e2806cf778c 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -35,6 +35,7 @@ msm-y := \
35 mdp/mdp5/mdp5_crtc.o \ 35 mdp/mdp5/mdp5_crtc.o \
36 mdp/mdp5/mdp5_encoder.o \ 36 mdp/mdp5/mdp5_encoder.o \
37 mdp/mdp5/mdp5_irq.o \ 37 mdp/mdp5/mdp5_irq.o \
38 mdp/mdp5/mdp5_mdss.o \
38 mdp/mdp5/mdp5_kms.o \ 39 mdp/mdp5/mdp5_kms.o \
39 mdp/mdp5/mdp5_plane.o \ 40 mdp/mdp5/mdp5_plane.o \
40 mdp/mdp5/mdp5_smp.o \ 41 mdp/mdp5/mdp5_smp.o \
@@ -45,6 +46,7 @@ msm-y := \
45 msm_fence.o \ 46 msm_fence.o \
46 msm_gem.o \ 47 msm_gem.o \
47 msm_gem_prime.o \ 48 msm_gem_prime.o \
49 msm_gem_shrinker.o \
48 msm_gem_submit.o \ 50 msm_gem_submit.o \
49 msm_gpu.o \ 51 msm_gpu.o \
50 msm_iommu.o \ 52 msm_iommu.o \
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 2aec27dbb5bb..f386f463278d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -139,7 +139,7 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct msm_drm_private *priv = gpu->dev->dev_private; 140 struct msm_drm_private *priv = gpu->dev->dev_private;
141 struct msm_ringbuffer *ring = gpu->rb; 141 struct msm_ringbuffer *ring = gpu->rb;
142 unsigned i, ibs = 0; 142 unsigned i;
143 143
144 for (i = 0; i < submit->nr_cmds; i++) { 144 for (i = 0; i < submit->nr_cmds; i++) {
145 switch (submit->cmd[i].type) { 145 switch (submit->cmd[i].type) {
@@ -155,18 +155,11 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
155 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2); 155 CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
156 OUT_RING(ring, submit->cmd[i].iova); 156 OUT_RING(ring, submit->cmd[i].iova);
157 OUT_RING(ring, submit->cmd[i].size); 157 OUT_RING(ring, submit->cmd[i].size);
158 ibs++; 158 OUT_PKT2(ring);
159 break; 159 break;
160 } 160 }
161 } 161 }
162 162
163 /* on a320, at least, we seem to need to pad things out to an
164 * even number of qwords to avoid issue w/ CP hanging on wrap-
165 * around:
166 */
167 if (ibs % 2)
168 OUT_PKT2(ring);
169
170 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 163 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
171 OUT_RING(ring, submit->fence->seqno); 164 OUT_RING(ring, submit->fence->seqno);
172 165
@@ -407,7 +400,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
407 return ret; 400 return ret;
408 } 401 }
409 402
410 adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); 403 adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo);
411 if (IS_ERR(adreno_gpu->memptrs)) { 404 if (IS_ERR(adreno_gpu->memptrs)) {
412 dev_err(drm->dev, "could not vmap memptrs\n"); 405 dev_err(drm->dev, "could not vmap memptrs\n");
413 return -ENOMEM; 406 return -ENOMEM;
@@ -426,8 +419,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
426void adreno_gpu_cleanup(struct adreno_gpu *gpu) 419void adreno_gpu_cleanup(struct adreno_gpu *gpu)
427{ 420{
428 if (gpu->memptrs_bo) { 421 if (gpu->memptrs_bo) {
422 if (gpu->memptrs)
423 msm_gem_put_vaddr(gpu->memptrs_bo);
424
429 if (gpu->memptrs_iova) 425 if (gpu->memptrs_iova)
430 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 426 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
427
431 drm_gem_object_unreference_unlocked(gpu->memptrs_bo); 428 drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
432 } 429 }
433 release_firmware(gpu->pm4); 430 release_firmware(gpu->pm4);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 6edcd6f57e70..ec572f8389ed 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -29,7 +29,7 @@ static int dsi_get_phy(struct msm_dsi *msm_dsi)
29 struct platform_device *phy_pdev; 29 struct platform_device *phy_pdev;
30 struct device_node *phy_node; 30 struct device_node *phy_node;
31 31
32 phy_node = of_parse_phandle(pdev->dev.of_node, "qcom,dsi-phy", 0); 32 phy_node = of_parse_phandle(pdev->dev.of_node, "phys", 0);
33 if (!phy_node) { 33 if (!phy_node) {
34 dev_err(&pdev->dev, "cannot find phy device\n"); 34 dev_err(&pdev->dev, "cannot find phy device\n");
35 return -ENXIO; 35 return -ENXIO;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 93c1ee094eac..63436d8ee470 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -29,6 +29,8 @@ static const struct msm_dsi_config apq8064_dsi_cfg = {
29 }, 29 },
30 .bus_clk_names = dsi_v2_bus_clk_names, 30 .bus_clk_names = dsi_v2_bus_clk_names,
31 .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names), 31 .num_bus_clks = ARRAY_SIZE(dsi_v2_bus_clk_names),
32 .io_start = { 0x4700000, 0x5800000 },
33 .num_dsi = 2,
32}; 34};
33 35
34static const char * const dsi_6g_bus_clk_names[] = { 36static const char * const dsi_6g_bus_clk_names[] = {
@@ -48,6 +50,8 @@ static const struct msm_dsi_config msm8974_apq8084_dsi_cfg = {
48 }, 50 },
49 .bus_clk_names = dsi_6g_bus_clk_names, 51 .bus_clk_names = dsi_6g_bus_clk_names,
50 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), 52 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
53 .io_start = { 0xfd922800, 0xfd922b00 },
54 .num_dsi = 2,
51}; 55};
52 56
53static const char * const dsi_8916_bus_clk_names[] = { 57static const char * const dsi_8916_bus_clk_names[] = {
@@ -66,6 +70,8 @@ static const struct msm_dsi_config msm8916_dsi_cfg = {
66 }, 70 },
67 .bus_clk_names = dsi_8916_bus_clk_names, 71 .bus_clk_names = dsi_8916_bus_clk_names,
68 .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names), 72 .num_bus_clks = ARRAY_SIZE(dsi_8916_bus_clk_names),
73 .io_start = { 0x1a98000 },
74 .num_dsi = 1,
69}; 75};
70 76
71static const struct msm_dsi_config msm8994_dsi_cfg = { 77static const struct msm_dsi_config msm8994_dsi_cfg = {
@@ -84,6 +90,8 @@ static const struct msm_dsi_config msm8994_dsi_cfg = {
84 }, 90 },
85 .bus_clk_names = dsi_6g_bus_clk_names, 91 .bus_clk_names = dsi_6g_bus_clk_names,
86 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names), 92 .num_bus_clks = ARRAY_SIZE(dsi_6g_bus_clk_names),
93 .io_start = { 0xfd998000, 0xfd9a0000 },
94 .num_dsi = 2,
87}; 95};
88 96
89static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = { 97static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index a68c836744a3..eeacc3232494 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -34,6 +34,8 @@ struct msm_dsi_config {
34 struct dsi_reg_config reg_cfg; 34 struct dsi_reg_config reg_cfg;
35 const char * const *bus_clk_names; 35 const char * const *bus_clk_names;
36 const int num_bus_clks; 36 const int num_bus_clks;
37 const resource_size_t io_start[DSI_MAX];
38 const int num_dsi;
37}; 39};
38 40
39struct msm_dsi_cfg_handler { 41struct msm_dsi_cfg_handler {
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index a3e47ad83eb3..f05ed0e1f3d6 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1066,7 +1066,7 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1066 } 1066 }
1067 1067
1068 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) { 1068 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
1069 data = msm_gem_vaddr(msm_host->tx_gem_obj); 1069 data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
1070 if (IS_ERR(data)) { 1070 if (IS_ERR(data)) {
1071 ret = PTR_ERR(data); 1071 ret = PTR_ERR(data);
1072 pr_err("%s: get vaddr failed, %d\n", __func__, ret); 1072 pr_err("%s: get vaddr failed, %d\n", __func__, ret);
@@ -1094,6 +1094,9 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
1094 if (packet.size < len) 1094 if (packet.size < len)
1095 memset(data + packet.size, 0xff, len - packet.size); 1095 memset(data + packet.size, 0xff, len - packet.size);
1096 1096
1097 if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
1098 msm_gem_put_vaddr(msm_host->tx_gem_obj);
1099
1097 return len; 1100 return len;
1098} 1101}
1099 1102
@@ -1543,7 +1546,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1543 u32 lane_map[4]; 1546 u32 lane_map[4];
1544 int ret, i, len, num_lanes; 1547 int ret, i, len, num_lanes;
1545 1548
1546 prop = of_find_property(ep, "qcom,data-lane-map", &len); 1549 prop = of_find_property(ep, "data-lanes", &len);
1547 if (!prop) { 1550 if (!prop) {
1548 dev_dbg(dev, "failed to find data lane mapping\n"); 1551 dev_dbg(dev, "failed to find data lane mapping\n");
1549 return -EINVAL; 1552 return -EINVAL;
@@ -1558,7 +1561,7 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1558 1561
1559 msm_host->num_data_lanes = num_lanes; 1562 msm_host->num_data_lanes = num_lanes;
1560 1563
1561 ret = of_property_read_u32_array(ep, "qcom,data-lane-map", lane_map, 1564 ret = of_property_read_u32_array(ep, "data-lanes", lane_map,
1562 num_lanes); 1565 num_lanes);
1563 if (ret) { 1566 if (ret) {
1564 dev_err(dev, "failed to read lane data\n"); 1567 dev_err(dev, "failed to read lane data\n");
@@ -1573,8 +1576,19 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
1573 const int *swap = supported_data_lane_swaps[i]; 1576 const int *swap = supported_data_lane_swaps[i];
1574 int j; 1577 int j;
1575 1578
1579 /*
1580 * the data-lanes array we get from DT has a logical->physical
1581 * mapping. The "data lane swap" register field represents
1582 * supported configurations in a physical->logical mapping.
1583 * Translate the DT mapping to what we understand and find a
1584 * configuration that works.
1585 */
1576 for (j = 0; j < num_lanes; j++) { 1586 for (j = 0; j < num_lanes; j++) {
1577 if (swap[j] != lane_map[j]) 1587 if (lane_map[j] < 0 || lane_map[j] > 3)
1588 dev_err(dev, "bad physical lane entry %u\n",
1589 lane_map[j]);
1590
1591 if (swap[lane_map[j]] != j)
1578 break; 1592 break;
1579 } 1593 }
1580 1594
@@ -1594,20 +1608,13 @@ static int dsi_host_parse_dt(struct msm_dsi_host *msm_host)
1594 struct device_node *endpoint, *device_node; 1608 struct device_node *endpoint, *device_node;
1595 int ret; 1609 int ret;
1596 1610
1597 ret = of_property_read_u32(np, "qcom,dsi-host-index", &msm_host->id);
1598 if (ret) {
1599 dev_err(dev, "%s: host index not specified, ret=%d\n",
1600 __func__, ret);
1601 return ret;
1602 }
1603
1604 /* 1611 /*
1605 * Get the first endpoint node. In our case, dsi has one output port 1612 * Get the endpoint of the output port of the DSI host. In our case,
1606 * to which the panel is connected. Don't return an error if a port 1613 * this is mapped to port number with reg = 1. Don't return an error if
1607 * isn't defined. It's possible that there is nothing connected to 1614 * the remote endpoint isn't defined. It's possible that there is
1608 * the dsi output. 1615 * nothing connected to the dsi output.
1609 */ 1616 */
1610 endpoint = of_graph_get_next_endpoint(np, NULL); 1617 endpoint = of_graph_get_endpoint_by_regs(np, 1, -1);
1611 if (!endpoint) { 1618 if (!endpoint) {
1612 dev_dbg(dev, "%s: no endpoint\n", __func__); 1619 dev_dbg(dev, "%s: no endpoint\n", __func__);
1613 return 0; 1620 return 0;
@@ -1648,6 +1655,25 @@ err:
1648 return ret; 1655 return ret;
1649} 1656}
1650 1657
1658static int dsi_host_get_id(struct msm_dsi_host *msm_host)
1659{
1660 struct platform_device *pdev = msm_host->pdev;
1661 const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
1662 struct resource *res;
1663 int i;
1664
1665 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_ctrl");
1666 if (!res)
1667 return -EINVAL;
1668
1669 for (i = 0; i < cfg->num_dsi; i++) {
1670 if (cfg->io_start[i] == res->start)
1671 return i;
1672 }
1673
1674 return -EINVAL;
1675}
1676
1651int msm_dsi_host_init(struct msm_dsi *msm_dsi) 1677int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1652{ 1678{
1653 struct msm_dsi_host *msm_host = NULL; 1679 struct msm_dsi_host *msm_host = NULL;
@@ -1684,6 +1710,13 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
1684 goto fail; 1710 goto fail;
1685 } 1711 }
1686 1712
1713 msm_host->id = dsi_host_get_id(msm_host);
1714 if (msm_host->id < 0) {
1715 ret = msm_host->id;
1716 pr_err("%s: unable to identify DSI host index\n", __func__);
1717 goto fail;
1718 }
1719
1687 /* fixup base address by io offset */ 1720 /* fixup base address by io offset */
1688 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset; 1721 msm_host->ctrl_base += msm_host->cfg_hnd->cfg->io_offset;
1689 1722
@@ -2245,9 +2278,9 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
2245 } 2278 }
2246 2279
2247 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode); 2280 msm_host->mode = drm_mode_duplicate(msm_host->dev, mode);
2248 if (IS_ERR(msm_host->mode)) { 2281 if (!msm_host->mode) {
2249 pr_err("%s: cannot duplicate mode\n", __func__); 2282 pr_err("%s: cannot duplicate mode\n", __func__);
2250 return PTR_ERR(msm_host->mode); 2283 return -ENOMEM;
2251 } 2284 }
2252 2285
2253 return 0; 2286 return 0;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index e2f42d8ea294..f39386ed75e4 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -271,6 +271,30 @@ static const struct of_device_id dsi_phy_dt_match[] = {
271 {} 271 {}
272}; 272};
273 273
274/*
275 * Currently, we only support one SoC for each PHY type. When we have multiple
276 * SoCs for the same PHY, we can try to make the index searching a bit more
277 * clever.
278 */
279static int dsi_phy_get_id(struct msm_dsi_phy *phy)
280{
281 struct platform_device *pdev = phy->pdev;
282 const struct msm_dsi_phy_cfg *cfg = phy->cfg;
283 struct resource *res;
284 int i;
285
286 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dsi_phy");
287 if (!res)
288 return -EINVAL;
289
290 for (i = 0; i < cfg->num_dsi_phy; i++) {
291 if (cfg->io_start[i] == res->start)
292 return i;
293 }
294
295 return -EINVAL;
296}
297
274static int dsi_phy_driver_probe(struct platform_device *pdev) 298static int dsi_phy_driver_probe(struct platform_device *pdev)
275{ 299{
276 struct msm_dsi_phy *phy; 300 struct msm_dsi_phy *phy;
@@ -289,10 +313,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
289 phy->cfg = match->data; 313 phy->cfg = match->data;
290 phy->pdev = pdev; 314 phy->pdev = pdev;
291 315
292 ret = of_property_read_u32(dev->of_node, 316 phy->id = dsi_phy_get_id(phy);
293 "qcom,dsi-phy-index", &phy->id); 317 if (phy->id < 0) {
294 if (ret) { 318 ret = phy->id;
295 dev_err(dev, "%s: PHY index not specified, %d\n", 319 dev_err(dev, "%s: couldn't identify PHY index, %d\n",
296 __func__, ret); 320 __func__, ret);
297 goto fail; 321 goto fail;
298 } 322 }
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index 0d54ed00386d..f24a85439b94 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -38,6 +38,8 @@ struct msm_dsi_phy_cfg {
38 * Fill default H/W values in illegal cells, eg. cell {0, 1}. 38 * Fill default H/W values in illegal cells, eg. cell {0, 1}.
39 */ 39 */
40 bool src_pll_truthtable[DSI_MAX][DSI_MAX]; 40 bool src_pll_truthtable[DSI_MAX][DSI_MAX];
41 const resource_size_t io_start[DSI_MAX];
42 const int num_dsi_phy;
41}; 43};
42 44
43extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs; 45extern const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
index f4bc11af849a..c757e2070cac 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_20nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs = {
145 .ops = { 145 .ops = {
146 .enable = dsi_20nm_phy_enable, 146 .enable = dsi_20nm_phy_enable,
147 .disable = dsi_20nm_phy_disable, 147 .disable = dsi_20nm_phy_disable,
148 } 148 },
149 .io_start = { 0xfd998300, 0xfd9a0300 },
150 .num_dsi_phy = 2,
149}; 151};
150 152
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
index 96d1852af418..63d7fba31380 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm.c
@@ -145,6 +145,8 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_hpm_cfgs = {
145 .enable = dsi_28nm_phy_enable, 145 .enable = dsi_28nm_phy_enable,
146 .disable = dsi_28nm_phy_disable, 146 .disable = dsi_28nm_phy_disable,
147 }, 147 },
148 .io_start = { 0xfd922b00, 0xfd923100 },
149 .num_dsi_phy = 2,
148}; 150};
149 151
150const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = { 152const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
@@ -160,5 +162,7 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_lp_cfgs = {
160 .enable = dsi_28nm_phy_enable, 162 .enable = dsi_28nm_phy_enable,
161 .disable = dsi_28nm_phy_disable, 163 .disable = dsi_28nm_phy_disable,
162 }, 164 },
165 .io_start = { 0x1a98500 },
166 .num_dsi_phy = 1,
163}; 167};
164 168
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
index 213355a3e767..7bdb9de54968 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_28nm_8960.c
@@ -192,4 +192,6 @@ const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs = {
192 .enable = dsi_28nm_phy_enable, 192 .enable = dsi_28nm_phy_enable,
193 .disable = dsi_28nm_phy_disable, 193 .disable = dsi_28nm_phy_disable,
194 }, 194 },
195 .io_start = { 0x4700300, 0x5800300 },
196 .num_dsi_phy = 2,
195}; 197};
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
index 72360cd038c0..5960628ceb93 100644
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ b/drivers/gpu/drm/msm/edp/edp_connector.c
@@ -91,15 +91,6 @@ static int edp_connector_mode_valid(struct drm_connector *connector,
91 return MODE_OK; 91 return MODE_OK;
92} 92}
93 93
94static struct drm_encoder *
95edp_connector_best_encoder(struct drm_connector *connector)
96{
97 struct edp_connector *edp_connector = to_edp_connector(connector);
98
99 DBG("");
100 return edp_connector->edp->encoder;
101}
102
103static const struct drm_connector_funcs edp_connector_funcs = { 94static const struct drm_connector_funcs edp_connector_funcs = {
104 .dpms = drm_atomic_helper_connector_dpms, 95 .dpms = drm_atomic_helper_connector_dpms,
105 .detect = edp_connector_detect, 96 .detect = edp_connector_detect,
@@ -113,7 +104,6 @@ static const struct drm_connector_funcs edp_connector_funcs = {
113static const struct drm_connector_helper_funcs edp_connector_helper_funcs = { 104static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
114 .get_modes = edp_connector_get_modes, 105 .get_modes = edp_connector_get_modes,
115 .mode_valid = edp_connector_mode_valid, 106 .mode_valid = edp_connector_mode_valid,
116 .best_encoder = edp_connector_best_encoder,
117}; 107};
118 108
119/* initialize connector */ 109/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 51b9ea552f97..973720792236 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -19,6 +19,7 @@
19#include <linux/of_irq.h> 19#include <linux/of_irq.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <sound/hdmi-codec.h>
22#include "hdmi.h" 23#include "hdmi.h"
23 24
24void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on) 25void msm_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -434,6 +435,111 @@ static int msm_hdmi_get_gpio(struct device_node *of_node, const char *name)
434 return gpio; 435 return gpio;
435} 436}
436 437
438/*
439 * HDMI audio codec callbacks
440 */
441static int msm_hdmi_audio_hw_params(struct device *dev, void *data,
442 struct hdmi_codec_daifmt *daifmt,
443 struct hdmi_codec_params *params)
444{
445 struct hdmi *hdmi = dev_get_drvdata(dev);
446 unsigned int chan;
447 unsigned int channel_allocation = 0;
448 unsigned int rate;
449 unsigned int level_shift = 0; /* 0dB */
450 bool down_mix = false;
451
452 dev_dbg(dev, "%u Hz, %d bit, %d channels\n", params->sample_rate,
453 params->sample_width, params->cea.channels);
454
455 switch (params->cea.channels) {
456 case 2:
457 /* FR and FL speakers */
458 channel_allocation = 0;
459 chan = MSM_HDMI_AUDIO_CHANNEL_2;
460 break;
461 case 4:
462 /* FC, LFE, FR and FL speakers */
463 channel_allocation = 0x3;
464 chan = MSM_HDMI_AUDIO_CHANNEL_4;
465 break;
466 case 6:
467 /* RR, RL, FC, LFE, FR and FL speakers */
468 channel_allocation = 0x0B;
469 chan = MSM_HDMI_AUDIO_CHANNEL_6;
470 break;
471 case 8:
472 /* FRC, FLC, RR, RL, FC, LFE, FR and FL speakers */
473 channel_allocation = 0x1F;
474 chan = MSM_HDMI_AUDIO_CHANNEL_8;
475 break;
476 default:
477 return -EINVAL;
478 }
479
480 switch (params->sample_rate) {
481 case 32000:
482 rate = HDMI_SAMPLE_RATE_32KHZ;
483 break;
484 case 44100:
485 rate = HDMI_SAMPLE_RATE_44_1KHZ;
486 break;
487 case 48000:
488 rate = HDMI_SAMPLE_RATE_48KHZ;
489 break;
490 case 88200:
491 rate = HDMI_SAMPLE_RATE_88_2KHZ;
492 break;
493 case 96000:
494 rate = HDMI_SAMPLE_RATE_96KHZ;
495 break;
496 case 176400:
497 rate = HDMI_SAMPLE_RATE_176_4KHZ;
498 break;
499 case 192000:
500 rate = HDMI_SAMPLE_RATE_192KHZ;
501 break;
502 default:
503 dev_err(dev, "rate[%d] not supported!\n",
504 params->sample_rate);
505 return -EINVAL;
506 }
507
508 msm_hdmi_audio_set_sample_rate(hdmi, rate);
509 msm_hdmi_audio_info_setup(hdmi, 1, chan, channel_allocation,
510 level_shift, down_mix);
511
512 return 0;
513}
514
515static void msm_hdmi_audio_shutdown(struct device *dev, void *data)
516{
517 struct hdmi *hdmi = dev_get_drvdata(dev);
518
519 msm_hdmi_audio_info_setup(hdmi, 0, 0, 0, 0, 0);
520}
521
522static const struct hdmi_codec_ops msm_hdmi_audio_codec_ops = {
523 .hw_params = msm_hdmi_audio_hw_params,
524 .audio_shutdown = msm_hdmi_audio_shutdown,
525};
526
527static struct hdmi_codec_pdata codec_data = {
528 .ops = &msm_hdmi_audio_codec_ops,
529 .max_i2s_channels = 8,
530 .i2s = 1,
531};
532
533static int msm_hdmi_register_audio_driver(struct hdmi *hdmi, struct device *dev)
534{
535 hdmi->audio_pdev = platform_device_register_data(dev,
536 HDMI_CODEC_DRV_NAME,
537 PLATFORM_DEVID_AUTO,
538 &codec_data,
539 sizeof(codec_data));
540 return PTR_ERR_OR_ZERO(hdmi->audio_pdev);
541}
542
437static int msm_hdmi_bind(struct device *dev, struct device *master, void *data) 543static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
438{ 544{
439 struct drm_device *drm = dev_get_drvdata(master); 545 struct drm_device *drm = dev_get_drvdata(master);
@@ -441,7 +547,7 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
441 static struct hdmi_platform_config *hdmi_cfg; 547 static struct hdmi_platform_config *hdmi_cfg;
442 struct hdmi *hdmi; 548 struct hdmi *hdmi;
443 struct device_node *of_node = dev->of_node; 549 struct device_node *of_node = dev->of_node;
444 int i; 550 int i, err;
445 551
446 hdmi_cfg = (struct hdmi_platform_config *) 552 hdmi_cfg = (struct hdmi_platform_config *)
447 of_device_get_match_data(dev); 553 of_device_get_match_data(dev);
@@ -468,6 +574,12 @@ static int msm_hdmi_bind(struct device *dev, struct device *master, void *data)
468 return PTR_ERR(hdmi); 574 return PTR_ERR(hdmi);
469 priv->hdmi = hdmi; 575 priv->hdmi = hdmi;
470 576
577 err = msm_hdmi_register_audio_driver(hdmi, dev);
578 if (err) {
579 DRM_ERROR("Failed to attach an audio codec %d\n", err);
580 hdmi->audio_pdev = NULL;
581 }
582
471 return 0; 583 return 0;
472} 584}
473 585
@@ -477,6 +589,9 @@ static void msm_hdmi_unbind(struct device *dev, struct device *master,
477 struct drm_device *drm = dev_get_drvdata(master); 589 struct drm_device *drm = dev_get_drvdata(master);
478 struct msm_drm_private *priv = drm->dev_private; 590 struct msm_drm_private *priv = drm->dev_private;
479 if (priv->hdmi) { 591 if (priv->hdmi) {
592 if (priv->hdmi->audio_pdev)
593 platform_device_unregister(priv->hdmi->audio_pdev);
594
480 msm_hdmi_destroy(priv->hdmi); 595 msm_hdmi_destroy(priv->hdmi);
481 priv->hdmi = NULL; 596 priv->hdmi = NULL;
482 } 597 }
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index bc7ba0bdee07..accc9a61611d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -50,6 +50,7 @@ struct hdmi_hdcp_ctrl;
50struct hdmi { 50struct hdmi {
51 struct drm_device *dev; 51 struct drm_device *dev;
52 struct platform_device *pdev; 52 struct platform_device *pdev;
53 struct platform_device *audio_pdev;
53 54
54 const struct hdmi_platform_config *config; 55 const struct hdmi_platform_config *config;
55 56
@@ -210,6 +211,19 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
210/* 211/*
211 * audio: 212 * audio:
212 */ 213 */
214/* Supported HDMI Audio channels and rates */
215#define MSM_HDMI_AUDIO_CHANNEL_2 0
216#define MSM_HDMI_AUDIO_CHANNEL_4 1
217#define MSM_HDMI_AUDIO_CHANNEL_6 2
218#define MSM_HDMI_AUDIO_CHANNEL_8 3
219
220#define HDMI_SAMPLE_RATE_32KHZ 0
221#define HDMI_SAMPLE_RATE_44_1KHZ 1
222#define HDMI_SAMPLE_RATE_48KHZ 2
223#define HDMI_SAMPLE_RATE_88_2KHZ 3
224#define HDMI_SAMPLE_RATE_96KHZ 4
225#define HDMI_SAMPLE_RATE_176_4KHZ 5
226#define HDMI_SAMPLE_RATE_192KHZ 6
213 227
214int msm_hdmi_audio_update(struct hdmi *hdmi); 228int msm_hdmi_audio_update(struct hdmi *hdmi);
215int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, 229int msm_hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index b15d72683112..a2515b466ce5 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -406,13 +406,6 @@ static int msm_hdmi_connector_mode_valid(struct drm_connector *connector,
406 return 0; 406 return 0;
407} 407}
408 408
409static struct drm_encoder *
410msm_hdmi_connector_best_encoder(struct drm_connector *connector)
411{
412 struct hdmi_connector *hdmi_connector = to_hdmi_connector(connector);
413 return hdmi_connector->hdmi->encoder;
414}
415
416static const struct drm_connector_funcs hdmi_connector_funcs = { 409static const struct drm_connector_funcs hdmi_connector_funcs = {
417 .dpms = drm_atomic_helper_connector_dpms, 410 .dpms = drm_atomic_helper_connector_dpms,
418 .detect = hdmi_connector_detect, 411 .detect = hdmi_connector_detect,
@@ -426,7 +419,6 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
426static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = { 419static const struct drm_connector_helper_funcs msm_hdmi_connector_helper_funcs = {
427 .get_modes = msm_hdmi_connector_get_modes, 420 .get_modes = msm_hdmi_connector_get_modes,
428 .mode_valid = msm_hdmi_connector_mode_valid, 421 .mode_valid = msm_hdmi_connector_mode_valid,
429 .best_encoder = msm_hdmi_connector_best_encoder,
430}; 422};
431 423
432/* initialize connector */ 424/* initialize connector */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 0baaaaabd002..6e767979aab3 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1430,7 +1430,7 @@ struct hdmi_hdcp_ctrl *msm_hdmi_hdcp_init(struct hdmi *hdmi)
1430 1430
1431void msm_hdmi_hdcp_destroy(struct hdmi *hdmi) 1431void msm_hdmi_hdcp_destroy(struct hdmi *hdmi)
1432{ 1432{
1433 if (hdmi && hdmi->hdcp_ctrl) { 1433 if (hdmi) {
1434 kfree(hdmi->hdcp_ctrl); 1434 kfree(hdmi->hdcp_ctrl);
1435 hdmi->hdcp_ctrl = NULL; 1435 hdmi->hdcp_ctrl = NULL;
1436 } 1436 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
index 35ad78a1dc1c..24258e3025e3 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_dtv_encoder.c
@@ -23,7 +23,6 @@
23 23
24struct mdp4_dtv_encoder { 24struct mdp4_dtv_encoder {
25 struct drm_encoder base; 25 struct drm_encoder base;
26 struct clk *src_clk;
27 struct clk *hdmi_clk; 26 struct clk *hdmi_clk;
28 struct clk *mdp_clk; 27 struct clk *mdp_clk;
29 unsigned long int pixclock; 28 unsigned long int pixclock;
@@ -179,7 +178,6 @@ static void mdp4_dtv_encoder_disable(struct drm_encoder *encoder)
179 */ 178 */
180 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC); 179 mdp_irq_wait(&mdp4_kms->base, MDP4_IRQ_EXTERNAL_VSYNC);
181 180
182 clk_disable_unprepare(mdp4_dtv_encoder->src_clk);
183 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk); 181 clk_disable_unprepare(mdp4_dtv_encoder->hdmi_clk);
184 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk); 182 clk_disable_unprepare(mdp4_dtv_encoder->mdp_clk);
185 183
@@ -208,19 +206,21 @@ static void mdp4_dtv_encoder_enable(struct drm_encoder *encoder)
208 206
209 bs_set(mdp4_dtv_encoder, 1); 207 bs_set(mdp4_dtv_encoder, 1);
210 208
211 DBG("setting src_clk=%lu", pc); 209 DBG("setting mdp_clk=%lu", pc);
212 210
213 ret = clk_set_rate(mdp4_dtv_encoder->src_clk, pc); 211 ret = clk_set_rate(mdp4_dtv_encoder->mdp_clk, pc);
214 if (ret) 212 if (ret)
215 dev_err(dev->dev, "failed to set src_clk to %lu: %d\n", pc, ret); 213 dev_err(dev->dev, "failed to set mdp_clk to %lu: %d\n",
216 clk_prepare_enable(mdp4_dtv_encoder->src_clk); 214 pc, ret);
217 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk); 215
218 if (ret)
219 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
220 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk); 216 ret = clk_prepare_enable(mdp4_dtv_encoder->mdp_clk);
221 if (ret) 217 if (ret)
222 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret); 218 dev_err(dev->dev, "failed to enabled mdp_clk: %d\n", ret);
223 219
220 ret = clk_prepare_enable(mdp4_dtv_encoder->hdmi_clk);
221 if (ret)
222 dev_err(dev->dev, "failed to enable hdmi_clk: %d\n", ret);
223
224 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1); 224 mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 1);
225 225
226 mdp4_dtv_encoder->enabled = true; 226 mdp4_dtv_encoder->enabled = true;
@@ -235,7 +235,7 @@ static const struct drm_encoder_helper_funcs mdp4_dtv_encoder_helper_funcs = {
235long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate) 235long mdp4_dtv_round_pixclk(struct drm_encoder *encoder, unsigned long rate)
236{ 236{
237 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder); 237 struct mdp4_dtv_encoder *mdp4_dtv_encoder = to_mdp4_dtv_encoder(encoder);
238 return clk_round_rate(mdp4_dtv_encoder->src_clk, rate); 238 return clk_round_rate(mdp4_dtv_encoder->mdp_clk, rate);
239} 239}
240 240
241/* initialize encoder */ 241/* initialize encoder */
@@ -257,13 +257,6 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
257 DRM_MODE_ENCODER_TMDS, NULL); 257 DRM_MODE_ENCODER_TMDS, NULL);
258 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs); 258 drm_encoder_helper_add(encoder, &mdp4_dtv_encoder_helper_funcs);
259 259
260 mdp4_dtv_encoder->src_clk = devm_clk_get(dev->dev, "src_clk");
261 if (IS_ERR(mdp4_dtv_encoder->src_clk)) {
262 dev_err(dev->dev, "failed to get src_clk\n");
263 ret = PTR_ERR(mdp4_dtv_encoder->src_clk);
264 goto fail;
265 }
266
267 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk"); 260 mdp4_dtv_encoder->hdmi_clk = devm_clk_get(dev->dev, "hdmi_clk");
268 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) { 261 if (IS_ERR(mdp4_dtv_encoder->hdmi_clk)) {
269 dev_err(dev->dev, "failed to get hdmi_clk\n"); 262 dev_err(dev->dev, "failed to get hdmi_clk\n");
@@ -271,9 +264,9 @@ struct drm_encoder *mdp4_dtv_encoder_init(struct drm_device *dev)
271 goto fail; 264 goto fail;
272 } 265 }
273 266
274 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "mdp_clk"); 267 mdp4_dtv_encoder->mdp_clk = devm_clk_get(dev->dev, "tv_clk");
275 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) { 268 if (IS_ERR(mdp4_dtv_encoder->mdp_clk)) {
276 dev_err(dev->dev, "failed to get mdp_clk\n"); 269 dev_err(dev->dev, "failed to get tv_clk\n");
277 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk); 270 ret = PTR_ERR(mdp4_dtv_encoder->mdp_clk);
278 goto fail; 271 goto fail;
279 } 272 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 67442d50a6c2..7b39e89fbc2b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -106,31 +106,27 @@ out:
106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state) 106static void mdp4_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *state)
107{ 107{
108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 108 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
109 int i, ncrtcs = state->dev->mode_config.num_crtc; 109 int i;
110 struct drm_crtc *crtc;
111 struct drm_crtc_state *crtc_state;
110 112
111 mdp4_enable(mdp4_kms); 113 mdp4_enable(mdp4_kms);
112 114
113 /* see 119ecb7fd */ 115 /* see 119ecb7fd */
114 for (i = 0; i < ncrtcs; i++) { 116 for_each_crtc_in_state(state, crtc, crtc_state, i)
115 struct drm_crtc *crtc = state->crtcs[i];
116 if (!crtc)
117 continue;
118 drm_crtc_vblank_get(crtc); 117 drm_crtc_vblank_get(crtc);
119 }
120} 118}
121 119
122static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 120static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
123{ 121{
124 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 122 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
125 int i, ncrtcs = state->dev->mode_config.num_crtc; 123 int i;
124 struct drm_crtc *crtc;
125 struct drm_crtc_state *crtc_state;
126 126
127 /* see 119ecb7fd */ 127 /* see 119ecb7fd */
128 for (i = 0; i < ncrtcs; i++) { 128 for_each_crtc_in_state(state, crtc, crtc_state, i)
129 struct drm_crtc *crtc = state->crtcs[i];
130 if (!crtc)
131 continue;
132 drm_crtc_vblank_put(crtc); 129 drm_crtc_vblank_put(crtc);
133 }
134 130
135 mdp4_disable(mdp4_kms); 131 mdp4_disable(mdp4_kms);
136} 132}
@@ -162,6 +158,7 @@ static const char * const iommu_ports[] = {
162static void mdp4_destroy(struct msm_kms *kms) 158static void mdp4_destroy(struct msm_kms *kms)
163{ 159{
164 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); 160 struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
161 struct device *dev = mdp4_kms->dev->dev;
165 struct msm_mmu *mmu = mdp4_kms->mmu; 162 struct msm_mmu *mmu = mdp4_kms->mmu;
166 163
167 if (mmu) { 164 if (mmu) {
@@ -171,8 +168,11 @@ static void mdp4_destroy(struct msm_kms *kms)
171 168
172 if (mdp4_kms->blank_cursor_iova) 169 if (mdp4_kms->blank_cursor_iova)
173 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id); 170 msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
174 if (mdp4_kms->blank_cursor_bo) 171 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
175 drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo); 172
173 if (mdp4_kms->rpm_enabled)
174 pm_runtime_disable(dev);
175
176 kfree(mdp4_kms); 176 kfree(mdp4_kms);
177} 177}
178 178
@@ -440,7 +440,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
440 struct mdp4_kms *mdp4_kms; 440 struct mdp4_kms *mdp4_kms;
441 struct msm_kms *kms = NULL; 441 struct msm_kms *kms = NULL;
442 struct msm_mmu *mmu; 442 struct msm_mmu *mmu;
443 int ret; 443 int irq, ret;
444 444
445 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); 445 mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
446 if (!mdp4_kms) { 446 if (!mdp4_kms) {
@@ -461,6 +461,15 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
461 goto fail; 461 goto fail;
462 } 462 }
463 463
464 irq = platform_get_irq(pdev, 0);
465 if (irq < 0) {
466 ret = irq;
467 dev_err(dev->dev, "failed to get irq: %d\n", ret);
468 goto fail;
469 }
470
471 kms->irq = irq;
472
464 /* NOTE: driver for this regulator still missing upstream.. use 473 /* NOTE: driver for this regulator still missing upstream.. use
465 * _get_exclusive() and ignore the error if it does not exist 474 * _get_exclusive() and ignore the error if it does not exist
466 * (and hope that the bootloader left it on for us) 475 * (and hope that the bootloader left it on for us)
@@ -496,7 +505,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
496 goto fail; 505 goto fail;
497 } 506 }
498 507
499 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk"); 508 mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk");
500 if (IS_ERR(mdp4_kms->axi_clk)) { 509 if (IS_ERR(mdp4_kms->axi_clk)) {
501 dev_err(dev->dev, "failed to get axi_clk\n"); 510 dev_err(dev->dev, "failed to get axi_clk\n");
502 ret = PTR_ERR(mdp4_kms->axi_clk); 511 ret = PTR_ERR(mdp4_kms->axi_clk);
@@ -506,6 +515,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
506 clk_set_rate(mdp4_kms->clk, config->max_clk); 515 clk_set_rate(mdp4_kms->clk, config->max_clk);
507 clk_set_rate(mdp4_kms->lut_clk, config->max_clk); 516 clk_set_rate(mdp4_kms->lut_clk, config->max_clk);
508 517
518 pm_runtime_enable(dev->dev);
519 mdp4_kms->rpm_enabled = true;
520
509 /* make sure things are off before attaching iommu (bootloader could 521 /* make sure things are off before attaching iommu (bootloader could
510 * have left things on, in which case we'll start getting faults if 522 * have left things on, in which case we'll start getting faults if
511 * we don't disable): 523 * we don't disable):
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index c5d045d5680d..25fb83997119 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -47,6 +47,8 @@ struct mdp4_kms {
47 47
48 struct mdp_irq error_handler; 48 struct mdp_irq error_handler;
49 49
50 bool rpm_enabled;
51
50 /* empty/blank cursor bo to use when cursor is "disabled" */ 52 /* empty/blank cursor bo to use when cursor is "disabled" */
51 struct drm_gem_object *blank_cursor_bo; 53 struct drm_gem_object *blank_cursor_bo;
52 uint32_t blank_cursor_iova; 54 uint32_t blank_cursor_iova;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 2648cd7631ef..353429b05733 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -90,14 +90,6 @@ static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector,
90 return MODE_OK; 90 return MODE_OK;
91} 91}
92 92
93static struct drm_encoder *
94mdp4_lvds_connector_best_encoder(struct drm_connector *connector)
95{
96 struct mdp4_lvds_connector *mdp4_lvds_connector =
97 to_mdp4_lvds_connector(connector);
98 return mdp4_lvds_connector->encoder;
99}
100
101static const struct drm_connector_funcs mdp4_lvds_connector_funcs = { 93static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
102 .dpms = drm_atomic_helper_connector_dpms, 94 .dpms = drm_atomic_helper_connector_dpms,
103 .detect = mdp4_lvds_connector_detect, 95 .detect = mdp4_lvds_connector_detect,
@@ -111,7 +103,6 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
111static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
112 .get_modes = mdp4_lvds_connector_get_modes, 104 .get_modes = mdp4_lvds_connector_get_modes,
113 .mode_valid = mdp4_lvds_connector_mode_valid, 105 .mode_valid = mdp4_lvds_connector_mode_valid,
114 .best_encoder = mdp4_lvds_connector_best_encoder,
115}; 106};
116 107
117/* initialize connector */ 108/* initialize connector */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index b275ce11b24b..ca6ca30650a0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -8,19 +8,11 @@ http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git 8git clone https://github.com/freedreno/envytools.git
9 9
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14) 11- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/mdp/mdp5.xml ( 36965 bytes, from 2016-05-10 05:06:30)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21) 12- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14) 13- /local/mnt/workspace/source_trees/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2016-01-07 08:45:55)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28) 14
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28) 15Copyright (C) 2013-2016 by the following authors:
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 27887 bytes, from 2015-10-22 16:34:52)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
21- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
22
23Copyright (C) 2013-2015 by the following authors:
24- Rob Clark <robdclark@gmail.com> (robclark) 16- Rob Clark <robdclark@gmail.com> (robclark)
25- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin) 17- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
26 18
@@ -198,118 +190,109 @@ static inline uint32_t MDSS_HW_VERSION_MAJOR(uint32_t val)
198#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100 190#define MDSS_HW_INTR_STATUS_INTR_HDMI 0x00000100
199#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000 191#define MDSS_HW_INTR_STATUS_INTR_EDP 0x00001000
200 192
201static inline uint32_t __offset_MDP(uint32_t idx) 193#define REG_MDP5_HW_VERSION 0x00000000
202{ 194#define MDP5_HW_VERSION_STEP__MASK 0x0000ffff
203 switch (idx) { 195#define MDP5_HW_VERSION_STEP__SHIFT 0
204 case 0: return (mdp5_cfg->mdp.base[0]); 196static inline uint32_t MDP5_HW_VERSION_STEP(uint32_t val)
205 default: return INVALID_IDX(idx);
206 }
207}
208static inline uint32_t REG_MDP5_MDP(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
209
210static inline uint32_t REG_MDP5_MDP_HW_VERSION(uint32_t i0) { return 0x00000000 + __offset_MDP(i0); }
211#define MDP5_MDP_HW_VERSION_STEP__MASK 0x0000ffff
212#define MDP5_MDP_HW_VERSION_STEP__SHIFT 0
213static inline uint32_t MDP5_MDP_HW_VERSION_STEP(uint32_t val)
214{ 197{
215 return ((val) << MDP5_MDP_HW_VERSION_STEP__SHIFT) & MDP5_MDP_HW_VERSION_STEP__MASK; 198 return ((val) << MDP5_HW_VERSION_STEP__SHIFT) & MDP5_HW_VERSION_STEP__MASK;
216} 199}
217#define MDP5_MDP_HW_VERSION_MINOR__MASK 0x0fff0000 200#define MDP5_HW_VERSION_MINOR__MASK 0x0fff0000
218#define MDP5_MDP_HW_VERSION_MINOR__SHIFT 16 201#define MDP5_HW_VERSION_MINOR__SHIFT 16
219static inline uint32_t MDP5_MDP_HW_VERSION_MINOR(uint32_t val) 202static inline uint32_t MDP5_HW_VERSION_MINOR(uint32_t val)
220{ 203{
221 return ((val) << MDP5_MDP_HW_VERSION_MINOR__SHIFT) & MDP5_MDP_HW_VERSION_MINOR__MASK; 204 return ((val) << MDP5_HW_VERSION_MINOR__SHIFT) & MDP5_HW_VERSION_MINOR__MASK;
222} 205}
223#define MDP5_MDP_HW_VERSION_MAJOR__MASK 0xf0000000 206#define MDP5_HW_VERSION_MAJOR__MASK 0xf0000000
224#define MDP5_MDP_HW_VERSION_MAJOR__SHIFT 28 207#define MDP5_HW_VERSION_MAJOR__SHIFT 28
225static inline uint32_t MDP5_MDP_HW_VERSION_MAJOR(uint32_t val) 208static inline uint32_t MDP5_HW_VERSION_MAJOR(uint32_t val)
226{ 209{
227 return ((val) << MDP5_MDP_HW_VERSION_MAJOR__SHIFT) & MDP5_MDP_HW_VERSION_MAJOR__MASK; 210 return ((val) << MDP5_HW_VERSION_MAJOR__SHIFT) & MDP5_HW_VERSION_MAJOR__MASK;
228} 211}
229 212
230static inline uint32_t REG_MDP5_MDP_DISP_INTF_SEL(uint32_t i0) { return 0x00000004 + __offset_MDP(i0); } 213#define REG_MDP5_DISP_INTF_SEL 0x00000004
231#define MDP5_MDP_DISP_INTF_SEL_INTF0__MASK 0x000000ff 214#define MDP5_DISP_INTF_SEL_INTF0__MASK 0x000000ff
232#define MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT 0 215#define MDP5_DISP_INTF_SEL_INTF0__SHIFT 0
233static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val) 216static inline uint32_t MDP5_DISP_INTF_SEL_INTF0(enum mdp5_intf_type val)
234{ 217{
235 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; 218 return ((val) << MDP5_DISP_INTF_SEL_INTF0__SHIFT) & MDP5_DISP_INTF_SEL_INTF0__MASK;
236} 219}
237#define MDP5_MDP_DISP_INTF_SEL_INTF1__MASK 0x0000ff00 220#define MDP5_DISP_INTF_SEL_INTF1__MASK 0x0000ff00
238#define MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT 8 221#define MDP5_DISP_INTF_SEL_INTF1__SHIFT 8
239static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val) 222static inline uint32_t MDP5_DISP_INTF_SEL_INTF1(enum mdp5_intf_type val)
240{ 223{
241 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; 224 return ((val) << MDP5_DISP_INTF_SEL_INTF1__SHIFT) & MDP5_DISP_INTF_SEL_INTF1__MASK;
242} 225}
243#define MDP5_MDP_DISP_INTF_SEL_INTF2__MASK 0x00ff0000 226#define MDP5_DISP_INTF_SEL_INTF2__MASK 0x00ff0000
244#define MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT 16 227#define MDP5_DISP_INTF_SEL_INTF2__SHIFT 16
245static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val) 228static inline uint32_t MDP5_DISP_INTF_SEL_INTF2(enum mdp5_intf_type val)
246{ 229{
247 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; 230 return ((val) << MDP5_DISP_INTF_SEL_INTF2__SHIFT) & MDP5_DISP_INTF_SEL_INTF2__MASK;
248} 231}
249#define MDP5_MDP_DISP_INTF_SEL_INTF3__MASK 0xff000000 232#define MDP5_DISP_INTF_SEL_INTF3__MASK 0xff000000
250#define MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT 24 233#define MDP5_DISP_INTF_SEL_INTF3__SHIFT 24
251static inline uint32_t MDP5_MDP_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val) 234static inline uint32_t MDP5_DISP_INTF_SEL_INTF3(enum mdp5_intf_type val)
252{ 235{
253 return ((val) << MDP5_MDP_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; 236 return ((val) << MDP5_DISP_INTF_SEL_INTF3__SHIFT) & MDP5_DISP_INTF_SEL_INTF3__MASK;
254} 237}
255 238
256static inline uint32_t REG_MDP5_MDP_INTR_EN(uint32_t i0) { return 0x00000010 + __offset_MDP(i0); } 239#define REG_MDP5_INTR_EN 0x00000010
257 240
258static inline uint32_t REG_MDP5_MDP_INTR_STATUS(uint32_t i0) { return 0x00000014 + __offset_MDP(i0); } 241#define REG_MDP5_INTR_STATUS 0x00000014
259 242
260static inline uint32_t REG_MDP5_MDP_INTR_CLEAR(uint32_t i0) { return 0x00000018 + __offset_MDP(i0); } 243#define REG_MDP5_INTR_CLEAR 0x00000018
261 244
262static inline uint32_t REG_MDP5_MDP_HIST_INTR_EN(uint32_t i0) { return 0x0000001c + __offset_MDP(i0); } 245#define REG_MDP5_HIST_INTR_EN 0x0000001c
263 246
264static inline uint32_t REG_MDP5_MDP_HIST_INTR_STATUS(uint32_t i0) { return 0x00000020 + __offset_MDP(i0); } 247#define REG_MDP5_HIST_INTR_STATUS 0x00000020
265 248
266static inline uint32_t REG_MDP5_MDP_HIST_INTR_CLEAR(uint32_t i0) { return 0x00000024 + __offset_MDP(i0); } 249#define REG_MDP5_HIST_INTR_CLEAR 0x00000024
267 250
268static inline uint32_t REG_MDP5_MDP_SPARE_0(uint32_t i0) { return 0x00000028 + __offset_MDP(i0); } 251#define REG_MDP5_SPARE_0 0x00000028
269#define MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001 252#define MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN 0x00000001
270 253
271static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } 254static inline uint32_t REG_MDP5_SMP_ALLOC_W(uint32_t i0) { return 0x00000080 + 0x4*i0; }
272 255
273static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_W_REG(uint32_t i0, uint32_t i1) { return 0x00000080 + __offset_MDP(i0) + 0x4*i1; } 256static inline uint32_t REG_MDP5_SMP_ALLOC_W_REG(uint32_t i0) { return 0x00000080 + 0x4*i0; }
274#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff 257#define MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK 0x000000ff
275#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0 258#define MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT 0
276static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(uint32_t val) 259static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT0(uint32_t val)
277{ 260{
278 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; 261 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
279} 262}
280#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00 263#define MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK 0x0000ff00
281#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8 264#define MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT 8
282static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(uint32_t val) 265static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT1(uint32_t val)
283{ 266{
284 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; 267 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
285} 268}
286#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000 269#define MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK 0x00ff0000
287#define MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16 270#define MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT 16
288static inline uint32_t MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(uint32_t val) 271static inline uint32_t MDP5_SMP_ALLOC_W_REG_CLIENT2(uint32_t val)
289{ 272{
290 return ((val) << MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; 273 return ((val) << MDP5_SMP_ALLOC_W_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
291} 274}
292 275
293static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } 276static inline uint32_t REG_MDP5_SMP_ALLOC_R(uint32_t i0) { return 0x00000130 + 0x4*i0; }
294 277
295static inline uint32_t REG_MDP5_MDP_SMP_ALLOC_R_REG(uint32_t i0, uint32_t i1) { return 0x00000130 + __offset_MDP(i0) + 0x4*i1; } 278static inline uint32_t REG_MDP5_SMP_ALLOC_R_REG(uint32_t i0) { return 0x00000130 + 0x4*i0; }
296#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff 279#define MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK 0x000000ff
297#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0 280#define MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT 0
298static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0(uint32_t val) 281static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT0(uint32_t val)
299{ 282{
300 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT0__MASK; 283 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT0__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT0__MASK;
301} 284}
302#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00 285#define MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK 0x0000ff00
303#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8 286#define MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT 8
304static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1(uint32_t val) 287static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT1(uint32_t val)
305{ 288{
306 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT1__MASK; 289 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT1__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT1__MASK;
307} 290}
308#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000 291#define MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK 0x00ff0000
309#define MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16 292#define MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT 16
310static inline uint32_t MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2(uint32_t val) 293static inline uint32_t MDP5_SMP_ALLOC_R_REG_CLIENT2(uint32_t val)
311{ 294{
312 return ((val) << MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_MDP_SMP_ALLOC_R_REG_CLIENT2__MASK; 295 return ((val) << MDP5_SMP_ALLOC_R_REG_CLIENT2__SHIFT) & MDP5_SMP_ALLOC_R_REG_CLIENT2__MASK;
313} 296}
314 297
315static inline uint32_t __offset_IGC(enum mdp5_igc_type idx) 298static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
@@ -322,35 +305,35 @@ static inline uint32_t __offset_IGC(enum mdp5_igc_type idx)
322 default: return INVALID_IDX(idx); 305 default: return INVALID_IDX(idx);
323 } 306 }
324} 307}
325static inline uint32_t REG_MDP5_MDP_IGC(uint32_t i0, enum mdp5_igc_type i1) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1); } 308static inline uint32_t REG_MDP5_IGC(enum mdp5_igc_type i0) { return 0x00000000 + __offset_IGC(i0); }
326 309
327static inline uint32_t REG_MDP5_MDP_IGC_LUT(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } 310static inline uint32_t REG_MDP5_IGC_LUT(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
328 311
329static inline uint32_t REG_MDP5_MDP_IGC_LUT_REG(uint32_t i0, enum mdp5_igc_type i1, uint32_t i2) { return 0x00000000 + __offset_MDP(i0) + __offset_IGC(i1) + 0x4*i2; } 312static inline uint32_t REG_MDP5_IGC_LUT_REG(enum mdp5_igc_type i0, uint32_t i1) { return 0x00000000 + __offset_IGC(i0) + 0x4*i1; }
330#define MDP5_MDP_IGC_LUT_REG_VAL__MASK 0x00000fff 313#define MDP5_IGC_LUT_REG_VAL__MASK 0x00000fff
331#define MDP5_MDP_IGC_LUT_REG_VAL__SHIFT 0 314#define MDP5_IGC_LUT_REG_VAL__SHIFT 0
332static inline uint32_t MDP5_MDP_IGC_LUT_REG_VAL(uint32_t val) 315static inline uint32_t MDP5_IGC_LUT_REG_VAL(uint32_t val)
333{ 316{
334 return ((val) << MDP5_MDP_IGC_LUT_REG_VAL__SHIFT) & MDP5_MDP_IGC_LUT_REG_VAL__MASK; 317 return ((val) << MDP5_IGC_LUT_REG_VAL__SHIFT) & MDP5_IGC_LUT_REG_VAL__MASK;
335} 318}
336#define MDP5_MDP_IGC_LUT_REG_INDEX_UPDATE 0x02000000 319#define MDP5_IGC_LUT_REG_INDEX_UPDATE 0x02000000
337#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000 320#define MDP5_IGC_LUT_REG_DISABLE_PIPE_0 0x10000000
338#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000 321#define MDP5_IGC_LUT_REG_DISABLE_PIPE_1 0x20000000
339#define MDP5_MDP_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000 322#define MDP5_IGC_LUT_REG_DISABLE_PIPE_2 0x40000000
340 323
341static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_EN(uint32_t i0) { return 0x000002f4 + __offset_MDP(i0); } 324#define REG_MDP5_SPLIT_DPL_EN 0x000002f4
342 325
343static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_UPPER(uint32_t i0) { return 0x000002f8 + __offset_MDP(i0); } 326#define REG_MDP5_SPLIT_DPL_UPPER 0x000002f8
344#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002 327#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL 0x00000002
345#define MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004 328#define MDP5_SPLIT_DPL_UPPER_SMART_PANEL_FREE_RUN 0x00000004
346#define MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010 329#define MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX 0x00000010
347#define MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100 330#define MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX 0x00000100
348 331
349static inline uint32_t REG_MDP5_MDP_SPLIT_DPL_LOWER(uint32_t i0) { return 0x000003f0 + __offset_MDP(i0); } 332#define REG_MDP5_SPLIT_DPL_LOWER 0x000003f0
350#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002 333#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL 0x00000002
351#define MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004 334#define MDP5_SPLIT_DPL_LOWER_SMART_PANEL_FREE_RUN 0x00000004
352#define MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010 335#define MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC 0x00000010
353#define MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100 336#define MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC 0x00000100
354 337
355static inline uint32_t __offset_CTL(uint32_t idx) 338static inline uint32_t __offset_CTL(uint32_t idx)
356{ 339{
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index 57f73f0c120d..ac9e4cde1380 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -26,7 +26,6 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
26 .name = "msm8x74v1", 26 .name = "msm8x74v1",
27 .mdp = { 27 .mdp = {
28 .count = 1, 28 .count = 1,
29 .base = { 0x00100 },
30 .caps = MDP_CAP_SMP | 29 .caps = MDP_CAP_SMP |
31 0, 30 0,
32 }, 31 },
@@ -41,12 +40,12 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
41 }, 40 },
42 .ctl = { 41 .ctl = {
43 .count = 5, 42 .count = 5,
44 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 43 .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
45 .flush_hw_mask = 0x0003ffff, 44 .flush_hw_mask = 0x0003ffff,
46 }, 45 },
47 .pipe_vig = { 46 .pipe_vig = {
48 .count = 3, 47 .count = 3,
49 .base = { 0x01200, 0x01600, 0x01a00 }, 48 .base = { 0x01100, 0x01500, 0x01900 },
50 .caps = MDP_PIPE_CAP_HFLIP | 49 .caps = MDP_PIPE_CAP_HFLIP |
51 MDP_PIPE_CAP_VFLIP | 50 MDP_PIPE_CAP_VFLIP |
52 MDP_PIPE_CAP_SCALE | 51 MDP_PIPE_CAP_SCALE |
@@ -55,7 +54,7 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
55 }, 54 },
56 .pipe_rgb = { 55 .pipe_rgb = {
57 .count = 3, 56 .count = 3,
58 .base = { 0x01e00, 0x02200, 0x02600 }, 57 .base = { 0x01d00, 0x02100, 0x02500 },
59 .caps = MDP_PIPE_CAP_HFLIP | 58 .caps = MDP_PIPE_CAP_HFLIP |
60 MDP_PIPE_CAP_VFLIP | 59 MDP_PIPE_CAP_VFLIP |
61 MDP_PIPE_CAP_SCALE | 60 MDP_PIPE_CAP_SCALE |
@@ -63,26 +62,26 @@ const struct mdp5_cfg_hw msm8x74v1_config = {
63 }, 62 },
64 .pipe_dma = { 63 .pipe_dma = {
65 .count = 2, 64 .count = 2,
66 .base = { 0x02a00, 0x02e00 }, 65 .base = { 0x02900, 0x02d00 },
67 .caps = MDP_PIPE_CAP_HFLIP | 66 .caps = MDP_PIPE_CAP_HFLIP |
68 MDP_PIPE_CAP_VFLIP | 67 MDP_PIPE_CAP_VFLIP |
69 0, 68 0,
70 }, 69 },
71 .lm = { 70 .lm = {
72 .count = 5, 71 .count = 5,
73 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, 72 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
74 .nb_stages = 5, 73 .nb_stages = 5,
75 }, 74 },
76 .dspp = { 75 .dspp = {
77 .count = 3, 76 .count = 3,
78 .base = { 0x04600, 0x04a00, 0x04e00 }, 77 .base = { 0x04500, 0x04900, 0x04d00 },
79 }, 78 },
80 .pp = { 79 .pp = {
81 .count = 3, 80 .count = 3,
82 .base = { 0x21b00, 0x21c00, 0x21d00 }, 81 .base = { 0x21a00, 0x21b00, 0x21c00 },
83 }, 82 },
84 .intf = { 83 .intf = {
85 .base = { 0x21100, 0x21300, 0x21500, 0x21700 }, 84 .base = { 0x21000, 0x21200, 0x21400, 0x21600 },
86 .connect = { 85 .connect = {
87 [0] = INTF_eDP, 86 [0] = INTF_eDP,
88 [1] = INTF_DSI, 87 [1] = INTF_DSI,
@@ -97,7 +96,6 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
97 .name = "msm8x74", 96 .name = "msm8x74",
98 .mdp = { 97 .mdp = {
99 .count = 1, 98 .count = 1,
100 .base = { 0x00100 },
101 .caps = MDP_CAP_SMP | 99 .caps = MDP_CAP_SMP |
102 0, 100 0,
103 }, 101 },
@@ -112,48 +110,48 @@ const struct mdp5_cfg_hw msm8x74v2_config = {
112 }, 110 },
113 .ctl = { 111 .ctl = {
114 .count = 5, 112 .count = 5,
115 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 113 .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
116 .flush_hw_mask = 0x0003ffff, 114 .flush_hw_mask = 0x0003ffff,
117 }, 115 },
118 .pipe_vig = { 116 .pipe_vig = {
119 .count = 3, 117 .count = 3,
120 .base = { 0x01200, 0x01600, 0x01a00 }, 118 .base = { 0x01100, 0x01500, 0x01900 },
121 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 119 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
122 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 120 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
123 MDP_PIPE_CAP_DECIMATION, 121 MDP_PIPE_CAP_DECIMATION,
124 }, 122 },
125 .pipe_rgb = { 123 .pipe_rgb = {
126 .count = 3, 124 .count = 3,
127 .base = { 0x01e00, 0x02200, 0x02600 }, 125 .base = { 0x01d00, 0x02100, 0x02500 },
128 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 126 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
129 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 127 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
130 }, 128 },
131 .pipe_dma = { 129 .pipe_dma = {
132 .count = 2, 130 .count = 2,
133 .base = { 0x02a00, 0x02e00 }, 131 .base = { 0x02900, 0x02d00 },
134 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 132 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
135 }, 133 },
136 .lm = { 134 .lm = {
137 .count = 5, 135 .count = 5,
138 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 }, 136 .base = { 0x03100, 0x03500, 0x03900, 0x03d00, 0x04100 },
139 .nb_stages = 5, 137 .nb_stages = 5,
140 .max_width = 2048, 138 .max_width = 2048,
141 .max_height = 0xFFFF, 139 .max_height = 0xFFFF,
142 }, 140 },
143 .dspp = { 141 .dspp = {
144 .count = 3, 142 .count = 3,
145 .base = { 0x04600, 0x04a00, 0x04e00 }, 143 .base = { 0x04500, 0x04900, 0x04d00 },
146 }, 144 },
147 .ad = { 145 .ad = {
148 .count = 2, 146 .count = 2,
149 .base = { 0x13100, 0x13300 }, 147 .base = { 0x13000, 0x13200 },
150 }, 148 },
151 .pp = { 149 .pp = {
152 .count = 3, 150 .count = 3,
153 .base = { 0x12d00, 0x12e00, 0x12f00 }, 151 .base = { 0x12c00, 0x12d00, 0x12e00 },
154 }, 152 },
155 .intf = { 153 .intf = {
156 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 }, 154 .base = { 0x12400, 0x12600, 0x12800, 0x12a00 },
157 .connect = { 155 .connect = {
158 [0] = INTF_eDP, 156 [0] = INTF_eDP,
159 [1] = INTF_DSI, 157 [1] = INTF_DSI,
@@ -168,7 +166,6 @@ const struct mdp5_cfg_hw apq8084_config = {
168 .name = "apq8084", 166 .name = "apq8084",
169 .mdp = { 167 .mdp = {
170 .count = 1, 168 .count = 1,
171 .base = { 0x00100 },
172 .caps = MDP_CAP_SMP | 169 .caps = MDP_CAP_SMP |
173 0, 170 0,
174 }, 171 },
@@ -190,49 +187,49 @@ const struct mdp5_cfg_hw apq8084_config = {
190 }, 187 },
191 .ctl = { 188 .ctl = {
192 .count = 5, 189 .count = 5,
193 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 }, 190 .base = { 0x00500, 0x00600, 0x00700, 0x00800, 0x00900 },
194 .flush_hw_mask = 0x003fffff, 191 .flush_hw_mask = 0x003fffff,
195 }, 192 },
196 .pipe_vig = { 193 .pipe_vig = {
197 .count = 4, 194 .count = 4,
198 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 }, 195 .base = { 0x01100, 0x01500, 0x01900, 0x01d00 },
199 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 196 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
200 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 197 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
201 MDP_PIPE_CAP_DECIMATION, 198 MDP_PIPE_CAP_DECIMATION,
202 }, 199 },
203 .pipe_rgb = { 200 .pipe_rgb = {
204 .count = 4, 201 .count = 4,
205 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 }, 202 .base = { 0x02100, 0x02500, 0x02900, 0x02d00 },
206 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 203 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
207 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 204 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
208 }, 205 },
209 .pipe_dma = { 206 .pipe_dma = {
210 .count = 2, 207 .count = 2,
211 .base = { 0x03200, 0x03600 }, 208 .base = { 0x03100, 0x03500 },
212 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 209 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
213 }, 210 },
214 .lm = { 211 .lm = {
215 .count = 6, 212 .count = 6,
216 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 }, 213 .base = { 0x03900, 0x03d00, 0x04100, 0x04500, 0x04900, 0x04d00 },
217 .nb_stages = 5, 214 .nb_stages = 5,
218 .max_width = 2048, 215 .max_width = 2048,
219 .max_height = 0xFFFF, 216 .max_height = 0xFFFF,
220 }, 217 },
221 .dspp = { 218 .dspp = {
222 .count = 4, 219 .count = 4,
223 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 }, 220 .base = { 0x05100, 0x05500, 0x05900, 0x05d00 },
224 221
225 }, 222 },
226 .ad = { 223 .ad = {
227 .count = 3, 224 .count = 3,
228 .base = { 0x13500, 0x13700, 0x13900 }, 225 .base = { 0x13400, 0x13600, 0x13800 },
229 }, 226 },
230 .pp = { 227 .pp = {
231 .count = 4, 228 .count = 4,
232 .base = { 0x12f00, 0x13000, 0x13100, 0x13200 }, 229 .base = { 0x12e00, 0x12f00, 0x13000, 0x13100 },
233 }, 230 },
234 .intf = { 231 .intf = {
235 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 }, 232 .base = { 0x12400, 0x12600, 0x12800, 0x12a00, 0x12c00 },
236 .connect = { 233 .connect = {
237 [0] = INTF_eDP, 234 [0] = INTF_eDP,
238 [1] = INTF_DSI, 235 [1] = INTF_DSI,
@@ -247,7 +244,7 @@ const struct mdp5_cfg_hw msm8x16_config = {
247 .name = "msm8x16", 244 .name = "msm8x16",
248 .mdp = { 245 .mdp = {
249 .count = 1, 246 .count = 1,
250 .base = { 0x01000 }, 247 .base = { 0x0 },
251 .caps = MDP_CAP_SMP | 248 .caps = MDP_CAP_SMP |
252 0, 249 0,
253 }, 250 },
@@ -261,41 +258,41 @@ const struct mdp5_cfg_hw msm8x16_config = {
261 }, 258 },
262 .ctl = { 259 .ctl = {
263 .count = 5, 260 .count = 5,
264 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, 261 .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
265 .flush_hw_mask = 0x4003ffff, 262 .flush_hw_mask = 0x4003ffff,
266 }, 263 },
267 .pipe_vig = { 264 .pipe_vig = {
268 .count = 1, 265 .count = 1,
269 .base = { 0x05000 }, 266 .base = { 0x04000 },
270 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 267 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
271 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 268 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
272 MDP_PIPE_CAP_DECIMATION, 269 MDP_PIPE_CAP_DECIMATION,
273 }, 270 },
274 .pipe_rgb = { 271 .pipe_rgb = {
275 .count = 2, 272 .count = 2,
276 .base = { 0x15000, 0x17000 }, 273 .base = { 0x14000, 0x16000 },
277 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 274 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
278 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 275 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
279 }, 276 },
280 .pipe_dma = { 277 .pipe_dma = {
281 .count = 1, 278 .count = 1,
282 .base = { 0x25000 }, 279 .base = { 0x24000 },
283 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 280 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
284 }, 281 },
285 .lm = { 282 .lm = {
286 .count = 2, /* LM0 and LM3 */ 283 .count = 2, /* LM0 and LM3 */
287 .base = { 0x45000, 0x48000 }, 284 .base = { 0x44000, 0x47000 },
288 .nb_stages = 5, 285 .nb_stages = 5,
289 .max_width = 2048, 286 .max_width = 2048,
290 .max_height = 0xFFFF, 287 .max_height = 0xFFFF,
291 }, 288 },
292 .dspp = { 289 .dspp = {
293 .count = 1, 290 .count = 1,
294 .base = { 0x55000 }, 291 .base = { 0x54000 },
295 292
296 }, 293 },
297 .intf = { 294 .intf = {
298 .base = { 0x00000, 0x6b800 }, 295 .base = { 0x00000, 0x6a800 },
299 .connect = { 296 .connect = {
300 [0] = INTF_DISABLED, 297 [0] = INTF_DISABLED,
301 [1] = INTF_DSI, 298 [1] = INTF_DSI,
@@ -308,7 +305,6 @@ const struct mdp5_cfg_hw msm8x94_config = {
308 .name = "msm8x94", 305 .name = "msm8x94",
309 .mdp = { 306 .mdp = {
310 .count = 1, 307 .count = 1,
311 .base = { 0x01000 },
312 .caps = MDP_CAP_SMP | 308 .caps = MDP_CAP_SMP |
313 0, 309 0,
314 }, 310 },
@@ -330,49 +326,49 @@ const struct mdp5_cfg_hw msm8x94_config = {
330 }, 326 },
331 .ctl = { 327 .ctl = {
332 .count = 5, 328 .count = 5,
333 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, 329 .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
334 .flush_hw_mask = 0xf0ffffff, 330 .flush_hw_mask = 0xf0ffffff,
335 }, 331 },
336 .pipe_vig = { 332 .pipe_vig = {
337 .count = 4, 333 .count = 4,
338 .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, 334 .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
339 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 335 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
340 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC | 336 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_CSC |
341 MDP_PIPE_CAP_DECIMATION, 337 MDP_PIPE_CAP_DECIMATION,
342 }, 338 },
343 .pipe_rgb = { 339 .pipe_rgb = {
344 .count = 4, 340 .count = 4,
345 .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, 341 .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
346 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP | 342 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP |
347 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION, 343 MDP_PIPE_CAP_SCALE | MDP_PIPE_CAP_DECIMATION,
348 }, 344 },
349 .pipe_dma = { 345 .pipe_dma = {
350 .count = 2, 346 .count = 2,
351 .base = { 0x25000, 0x27000 }, 347 .base = { 0x24000, 0x26000 },
352 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP, 348 .caps = MDP_PIPE_CAP_HFLIP | MDP_PIPE_CAP_VFLIP,
353 }, 349 },
354 .lm = { 350 .lm = {
355 .count = 6, 351 .count = 6,
356 .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, 352 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
357 .nb_stages = 8, 353 .nb_stages = 8,
358 .max_width = 2048, 354 .max_width = 2048,
359 .max_height = 0xFFFF, 355 .max_height = 0xFFFF,
360 }, 356 },
361 .dspp = { 357 .dspp = {
362 .count = 4, 358 .count = 4,
363 .base = { 0x55000, 0x57000, 0x59000, 0x5b000 }, 359 .base = { 0x54000, 0x56000, 0x58000, 0x5a000 },
364 360
365 }, 361 },
366 .ad = { 362 .ad = {
367 .count = 3, 363 .count = 3,
368 .base = { 0x79000, 0x79800, 0x7a000 }, 364 .base = { 0x78000, 0x78800, 0x79000 },
369 }, 365 },
370 .pp = { 366 .pp = {
371 .count = 4, 367 .count = 4,
372 .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, 368 .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
373 }, 369 },
374 .intf = { 370 .intf = {
375 .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, 371 .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
376 .connect = { 372 .connect = {
377 [0] = INTF_DISABLED, 373 [0] = INTF_DISABLED,
378 [1] = INTF_DSI, 374 [1] = INTF_DSI,
@@ -387,19 +383,18 @@ const struct mdp5_cfg_hw msm8x96_config = {
387 .name = "msm8x96", 383 .name = "msm8x96",
388 .mdp = { 384 .mdp = {
389 .count = 1, 385 .count = 1,
390 .base = { 0x01000 },
391 .caps = MDP_CAP_DSC | 386 .caps = MDP_CAP_DSC |
392 MDP_CAP_CDM | 387 MDP_CAP_CDM |
393 0, 388 0,
394 }, 389 },
395 .ctl = { 390 .ctl = {
396 .count = 5, 391 .count = 5,
397 .base = { 0x02000, 0x02200, 0x02400, 0x02600, 0x02800 }, 392 .base = { 0x01000, 0x01200, 0x01400, 0x01600, 0x01800 },
398 .flush_hw_mask = 0xf4ffffff, 393 .flush_hw_mask = 0xf4ffffff,
399 }, 394 },
400 .pipe_vig = { 395 .pipe_vig = {
401 .count = 4, 396 .count = 4,
402 .base = { 0x05000, 0x07000, 0x09000, 0x0b000 }, 397 .base = { 0x04000, 0x06000, 0x08000, 0x0a000 },
403 .caps = MDP_PIPE_CAP_HFLIP | 398 .caps = MDP_PIPE_CAP_HFLIP |
404 MDP_PIPE_CAP_VFLIP | 399 MDP_PIPE_CAP_VFLIP |
405 MDP_PIPE_CAP_SCALE | 400 MDP_PIPE_CAP_SCALE |
@@ -410,7 +405,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
410 }, 405 },
411 .pipe_rgb = { 406 .pipe_rgb = {
412 .count = 4, 407 .count = 4,
413 .base = { 0x15000, 0x17000, 0x19000, 0x1b000 }, 408 .base = { 0x14000, 0x16000, 0x18000, 0x1a000 },
414 .caps = MDP_PIPE_CAP_HFLIP | 409 .caps = MDP_PIPE_CAP_HFLIP |
415 MDP_PIPE_CAP_VFLIP | 410 MDP_PIPE_CAP_VFLIP |
416 MDP_PIPE_CAP_SCALE | 411 MDP_PIPE_CAP_SCALE |
@@ -420,7 +415,7 @@ const struct mdp5_cfg_hw msm8x96_config = {
420 }, 415 },
421 .pipe_dma = { 416 .pipe_dma = {
422 .count = 2, 417 .count = 2,
423 .base = { 0x25000, 0x27000 }, 418 .base = { 0x24000, 0x26000 },
424 .caps = MDP_PIPE_CAP_HFLIP | 419 .caps = MDP_PIPE_CAP_HFLIP |
425 MDP_PIPE_CAP_VFLIP | 420 MDP_PIPE_CAP_VFLIP |
426 MDP_PIPE_CAP_SW_PIX_EXT | 421 MDP_PIPE_CAP_SW_PIX_EXT |
@@ -428,33 +423,33 @@ const struct mdp5_cfg_hw msm8x96_config = {
428 }, 423 },
429 .lm = { 424 .lm = {
430 .count = 6, 425 .count = 6,
431 .base = { 0x45000, 0x46000, 0x47000, 0x48000, 0x49000, 0x4a000 }, 426 .base = { 0x44000, 0x45000, 0x46000, 0x47000, 0x48000, 0x49000 },
432 .nb_stages = 8, 427 .nb_stages = 8,
433 .max_width = 2560, 428 .max_width = 2560,
434 .max_height = 0xFFFF, 429 .max_height = 0xFFFF,
435 }, 430 },
436 .dspp = { 431 .dspp = {
437 .count = 2, 432 .count = 2,
438 .base = { 0x55000, 0x57000 }, 433 .base = { 0x54000, 0x56000 },
439 }, 434 },
440 .ad = { 435 .ad = {
441 .count = 3, 436 .count = 3,
442 .base = { 0x79000, 0x79800, 0x7a000 }, 437 .base = { 0x78000, 0x78800, 0x79000 },
443 }, 438 },
444 .pp = { 439 .pp = {
445 .count = 4, 440 .count = 4,
446 .base = { 0x71000, 0x71800, 0x72000, 0x72800 }, 441 .base = { 0x70000, 0x70800, 0x71000, 0x71800 },
447 }, 442 },
448 .cdm = { 443 .cdm = {
449 .count = 1, 444 .count = 1,
450 .base = { 0x7a200 }, 445 .base = { 0x79200 },
451 }, 446 },
452 .dsc = { 447 .dsc = {
453 .count = 2, 448 .count = 2,
454 .base = { 0x81000, 0x81400 }, 449 .base = { 0x80000, 0x80400 },
455 }, 450 },
456 .intf = { 451 .intf = {
457 .base = { 0x6b000, 0x6b800, 0x6c000, 0x6c800, 0x6d000 }, 452 .base = { 0x6a000, 0x6a800, 0x6b000, 0x6b800, 0x6c000 },
458 .connect = { 453 .connect = {
459 [0] = INTF_DISABLED, 454 [0] = INTF_DISABLED,
460 [1] = INTF_DSI, 455 [1] = INTF_DSI,
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
index 69094cb28103..c627ab6d0061 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cmd_encoder.c
@@ -272,22 +272,22 @@ int mdp5_cmd_encoder_set_split_display(struct drm_encoder *encoder,
272 * start signal for the slave encoder 272 * start signal for the slave encoder
273 */ 273 */
274 if (intf_num == 1) 274 if (intf_num == 1)
275 data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX; 275 data |= MDP5_SPLIT_DPL_UPPER_INTF2_SW_TRG_MUX;
276 else if (intf_num == 2) 276 else if (intf_num == 2)
277 data |= MDP5_MDP_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX; 277 data |= MDP5_SPLIT_DPL_UPPER_INTF1_SW_TRG_MUX;
278 else 278 else
279 return -EINVAL; 279 return -EINVAL;
280 280
281 /* Smart Panel, Sync mode */ 281 /* Smart Panel, Sync mode */
282 data |= MDP5_MDP_SPLIT_DPL_UPPER_SMART_PANEL; 282 data |= MDP5_SPLIT_DPL_UPPER_SMART_PANEL;
283 283
284 /* Make sure clocks are on when connectors calling this function. */ 284 /* Make sure clocks are on when connectors calling this function. */
285 mdp5_enable(mdp5_kms); 285 mdp5_enable(mdp5_kms);
286 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), data); 286 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, data);
287 287
288 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), 288 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER,
289 MDP5_MDP_SPLIT_DPL_LOWER_SMART_PANEL); 289 MDP5_SPLIT_DPL_LOWER_SMART_PANEL);
290 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); 290 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
291 mdp5_disable(mdp5_kms); 291 mdp5_disable(mdp5_kms);
292 292
293 return 0; 293 return 0;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 88fe256c1931..fa2be7ce9468 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -374,6 +374,7 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
374 struct drm_device *dev = crtc->dev; 374 struct drm_device *dev = crtc->dev;
375 struct plane_state pstates[STAGE_MAX + 1]; 375 struct plane_state pstates[STAGE_MAX + 1];
376 const struct mdp5_cfg_hw *hw_cfg; 376 const struct mdp5_cfg_hw *hw_cfg;
377 const struct drm_plane_state *pstate;
377 int cnt = 0, i; 378 int cnt = 0, i;
378 379
379 DBG("%s: check", mdp5_crtc->name); 380 DBG("%s: check", mdp5_crtc->name);
@@ -382,20 +383,13 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
382 * and that we don't have conflicting mixer stages: 383 * and that we don't have conflicting mixer stages:
383 */ 384 */
384 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 385 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
385 drm_atomic_crtc_state_for_each_plane(plane, state) { 386 drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
386 struct drm_plane_state *pstate;
387 if (cnt >= (hw_cfg->lm.nb_stages)) { 387 if (cnt >= (hw_cfg->lm.nb_stages)) {
388 dev_err(dev->dev, "too many planes!\n"); 388 dev_err(dev->dev, "too many planes!\n");
389 return -EINVAL; 389 return -EINVAL;
390 } 390 }
391 391
392 pstate = state->state->plane_states[drm_plane_index(plane)];
393 392
394 /* plane might not have changed, in which case take
395 * current state:
396 */
397 if (!pstate)
398 pstate = plane->state;
399 pstates[cnt].plane = plane; 393 pstates[cnt].plane = plane;
400 pstates[cnt].state = to_mdp5_plane_state(pstate); 394 pstates[cnt].state = to_mdp5_plane_state(pstate);
401 395
@@ -496,8 +490,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
496 struct mdp5_kms *mdp5_kms = get_kms(crtc); 490 struct mdp5_kms *mdp5_kms = get_kms(crtc);
497 struct drm_gem_object *cursor_bo, *old_bo = NULL; 491 struct drm_gem_object *cursor_bo, *old_bo = NULL;
498 uint32_t blendcfg, cursor_addr, stride; 492 uint32_t blendcfg, cursor_addr, stride;
499 int ret, bpp, lm; 493 int ret, lm;
500 unsigned int depth;
501 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL; 494 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
502 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0); 495 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
503 uint32_t roi_w, roi_h; 496 uint32_t roi_w, roi_h;
@@ -527,8 +520,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
527 return -EINVAL; 520 return -EINVAL;
528 521
529 lm = mdp5_crtc->lm; 522 lm = mdp5_crtc->lm;
530 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp); 523 stride = width * drm_format_plane_cpp(DRM_FORMAT_ARGB8888, 0);
531 stride = width * (bpp >> 3);
532 524
533 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); 525 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
534 old_bo = mdp5_crtc->cursor.scanout_bo; 526 old_bo = mdp5_crtc->cursor.scanout_bo;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
index 4e81ca4f964a..d021edc3b307 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -118,31 +118,31 @@ static void set_display_intf(struct mdp5_kms *mdp5_kms,
118 u32 intf_sel; 118 u32 intf_sel;
119 119
120 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 120 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
121 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0)); 121 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
122 122
123 switch (intf->num) { 123 switch (intf->num) {
124 case 0: 124 case 0:
125 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF0__MASK; 125 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
126 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF0(intf->type); 126 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
127 break; 127 break;
128 case 1: 128 case 1:
129 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF1__MASK; 129 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
130 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF1(intf->type); 130 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
131 break; 131 break;
132 case 2: 132 case 2:
133 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF2__MASK; 133 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
134 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF2(intf->type); 134 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
135 break; 135 break;
136 case 3: 136 case 3:
137 intf_sel &= ~MDP5_MDP_DISP_INTF_SEL_INTF3__MASK; 137 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
138 intf_sel |= MDP5_MDP_DISP_INTF_SEL_INTF3(intf->type); 138 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
139 break; 139 break;
140 default: 140 default:
141 BUG(); 141 BUG();
142 break; 142 break;
143 } 143 }
144 144
145 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), intf_sel); 145 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
146 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 146 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
147} 147}
148 148
@@ -557,7 +557,7 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
557 if (!enable) { 557 if (!enable) {
558 ctlx->pair = NULL; 558 ctlx->pair = NULL;
559 ctly->pair = NULL; 559 ctly->pair = NULL;
560 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 0); 560 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
561 return 0; 561 return 0;
562 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) { 562 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
563 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n"); 563 dev_err(ctl_mgr->dev->dev, "CTLs already paired\n");
@@ -570,8 +570,8 @@ int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
570 ctlx->pair = ctly; 570 ctlx->pair = ctly;
571 ctly->pair = ctlx; 571 ctly->pair = ctlx;
572 572
573 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPARE_0(0), 573 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
574 MDP5_MDP_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN); 574 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
575 575
576 return 0; 576 return 0;
577} 577}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index 1d95f9fd9dc7..fe0c22230883 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -322,18 +322,18 @@ int mdp5_encoder_set_split_display(struct drm_encoder *encoder,
322 * to use the master's enable signal for the slave encoder. 322 * to use the master's enable signal for the slave encoder.
323 */ 323 */
324 if (intf_num == 1) 324 if (intf_num == 1)
325 data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF2_TG_SYNC; 325 data |= MDP5_SPLIT_DPL_LOWER_INTF2_TG_SYNC;
326 else if (intf_num == 2) 326 else if (intf_num == 2)
327 data |= MDP5_MDP_SPLIT_DPL_LOWER_INTF1_TG_SYNC; 327 data |= MDP5_SPLIT_DPL_LOWER_INTF1_TG_SYNC;
328 else 328 else
329 return -EINVAL; 329 return -EINVAL;
330 330
331 /* Make sure clocks are on when connectors calling this function. */ 331 /* Make sure clocks are on when connectors calling this function. */
332 mdp5_enable(mdp5_kms); 332 mdp5_enable(mdp5_kms);
333 /* Dumb Panel, Sync mode */ 333 /* Dumb Panel, Sync mode */
334 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_UPPER(0), 0); 334 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_UPPER, 0);
335 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_LOWER(0), data); 335 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_LOWER, data);
336 mdp5_write(mdp5_kms, REG_MDP5_MDP_SPLIT_DPL_EN(0), 1); 336 mdp5_write(mdp5_kms, REG_MDP5_SPLIT_DPL_EN, 1);
337 337
338 mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true); 338 mdp5_ctl_pair(mdp5_encoder->ctl, mdp5_slave_enc->ctl, true);
339 339
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index 73bc3e312fd4..d53e5510fd7c 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,7 +15,6 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/irqdomain.h>
19#include <linux/irq.h> 18#include <linux/irq.h>
20 19
21#include "msm_drv.h" 20#include "msm_drv.h"
@@ -24,9 +23,9 @@
24void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask, 23void mdp5_set_irqmask(struct mdp_kms *mdp_kms, uint32_t irqmask,
25 uint32_t old_irqmask) 24 uint32_t old_irqmask)
26{ 25{
27 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_CLEAR(0), 26 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_CLEAR,
28 irqmask ^ (irqmask & old_irqmask)); 27 irqmask ^ (irqmask & old_irqmask));
29 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_MDP_INTR_EN(0), irqmask); 28 mdp5_write(to_mdp5_kms(mdp_kms), REG_MDP5_INTR_EN, irqmask);
30} 29}
31 30
32static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus) 31static void mdp5_irq_error_handler(struct mdp_irq *irq, uint32_t irqstatus)
@@ -38,8 +37,8 @@ void mdp5_irq_preinstall(struct msm_kms *kms)
38{ 37{
39 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 38 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
40 mdp5_enable(mdp5_kms); 39 mdp5_enable(mdp5_kms);
41 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), 0xffffffff); 40 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, 0xffffffff);
42 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); 41 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
43 mdp5_disable(mdp5_kms); 42 mdp5_disable(mdp5_kms);
44} 43}
45 44
@@ -55,7 +54,9 @@ int mdp5_irq_postinstall(struct msm_kms *kms)
55 MDP5_IRQ_INTF2_UNDER_RUN | 54 MDP5_IRQ_INTF2_UNDER_RUN |
56 MDP5_IRQ_INTF3_UNDER_RUN; 55 MDP5_IRQ_INTF3_UNDER_RUN;
57 56
57 mdp5_enable(mdp5_kms);
58 mdp_irq_register(mdp_kms, error_handler); 58 mdp_irq_register(mdp_kms, error_handler);
59 mdp5_disable(mdp5_kms);
59 60
60 return 0; 61 return 0;
61} 62}
@@ -64,21 +65,22 @@ void mdp5_irq_uninstall(struct msm_kms *kms)
64{ 65{
65 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 66 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
66 mdp5_enable(mdp5_kms); 67 mdp5_enable(mdp5_kms);
67 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_EN(0), 0x00000000); 68 mdp5_write(mdp5_kms, REG_MDP5_INTR_EN, 0x00000000);
68 mdp5_disable(mdp5_kms); 69 mdp5_disable(mdp5_kms);
69} 70}
70 71
71static void mdp5_irq_mdp(struct mdp_kms *mdp_kms) 72irqreturn_t mdp5_irq(struct msm_kms *kms)
72{ 73{
74 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
73 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms); 75 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
74 struct drm_device *dev = mdp5_kms->dev; 76 struct drm_device *dev = mdp5_kms->dev;
75 struct msm_drm_private *priv = dev->dev_private; 77 struct msm_drm_private *priv = dev->dev_private;
76 unsigned int id; 78 unsigned int id;
77 uint32_t status, enable; 79 uint32_t status, enable;
78 80
79 enable = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_EN(0)); 81 enable = mdp5_read(mdp5_kms, REG_MDP5_INTR_EN);
80 status = mdp5_read(mdp5_kms, REG_MDP5_MDP_INTR_STATUS(0)) & enable; 82 status = mdp5_read(mdp5_kms, REG_MDP5_INTR_STATUS) & enable;
81 mdp5_write(mdp5_kms, REG_MDP5_MDP_INTR_CLEAR(0), status); 83 mdp5_write(mdp5_kms, REG_MDP5_INTR_CLEAR, status);
82 84
83 VERB("status=%08x", status); 85 VERB("status=%08x", status);
84 86
@@ -87,29 +89,6 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
87 for (id = 0; id < priv->num_crtcs; id++) 89 for (id = 0; id < priv->num_crtcs; id++)
88 if (status & mdp5_crtc_vblank(priv->crtcs[id])) 90 if (status & mdp5_crtc_vblank(priv->crtcs[id]))
89 drm_handle_vblank(dev, id); 91 drm_handle_vblank(dev, id);
90}
91
92irqreturn_t mdp5_irq(struct msm_kms *kms)
93{
94 struct mdp_kms *mdp_kms = to_mdp_kms(kms);
95 struct mdp5_kms *mdp5_kms = to_mdp5_kms(mdp_kms);
96 uint32_t intr;
97
98 intr = mdp5_read(mdp5_kms, REG_MDSS_HW_INTR_STATUS);
99
100 VERB("intr=%08x", intr);
101
102 if (intr & MDSS_HW_INTR_STATUS_INTR_MDP) {
103 mdp5_irq_mdp(mdp_kms);
104 intr &= ~MDSS_HW_INTR_STATUS_INTR_MDP;
105 }
106
107 while (intr) {
108 irq_hw_number_t hwirq = fls(intr) - 1;
109 generic_handle_irq(irq_find_mapping(
110 mdp5_kms->irqcontroller.domain, hwirq));
111 intr &= ~(1 << hwirq);
112 }
113 92
114 return IRQ_HANDLED; 93 return IRQ_HANDLED;
115} 94}
@@ -135,81 +114,3 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
135 mdp5_crtc_vblank(crtc), false); 114 mdp5_crtc_vblank(crtc), false);
136 mdp5_disable(mdp5_kms); 115 mdp5_disable(mdp5_kms);
137} 116}
138
139/*
140 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
141 * can register to get their irq's delivered
142 */
143
144#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_DSI0 | \
145 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
146 MDSS_HW_INTR_STATUS_INTR_HDMI | \
147 MDSS_HW_INTR_STATUS_INTR_EDP)
148
149static void mdp5_hw_mask_irq(struct irq_data *irqd)
150{
151 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
152 smp_mb__before_atomic();
153 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
154 smp_mb__after_atomic();
155}
156
157static void mdp5_hw_unmask_irq(struct irq_data *irqd)
158{
159 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
160 smp_mb__before_atomic();
161 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
162 smp_mb__after_atomic();
163}
164
165static struct irq_chip mdp5_hw_irq_chip = {
166 .name = "mdp5",
167 .irq_mask = mdp5_hw_mask_irq,
168 .irq_unmask = mdp5_hw_unmask_irq,
169};
170
171static int mdp5_hw_irqdomain_map(struct irq_domain *d,
172 unsigned int irq, irq_hw_number_t hwirq)
173{
174 struct mdp5_kms *mdp5_kms = d->host_data;
175
176 if (!(VALID_IRQS & (1 << hwirq)))
177 return -EPERM;
178
179 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
180 irq_set_chip_data(irq, mdp5_kms);
181
182 return 0;
183}
184
185static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
186 .map = mdp5_hw_irqdomain_map,
187 .xlate = irq_domain_xlate_onecell,
188};
189
190
191int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
192{
193 struct device *dev = mdp5_kms->dev->dev;
194 struct irq_domain *d;
195
196 d = irq_domain_add_linear(dev->of_node, 32,
197 &mdp5_hw_irqdomain_ops, mdp5_kms);
198 if (!d) {
199 dev_err(dev, "mdp5 irq domain add failed\n");
200 return -ENXIO;
201 }
202
203 mdp5_kms->irqcontroller.enabled_mask = 0;
204 mdp5_kms->irqcontroller.domain = d;
205
206 return 0;
207}
208
209void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
210{
211 if (mdp5_kms->irqcontroller.domain) {
212 irq_domain_remove(mdp5_kms->irqcontroller.domain);
213 mdp5_kms->irqcontroller.domain = NULL;
214 }
215}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 484b4d15e71d..ed7143d35b25 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -16,6 +16,7 @@
16 * this program. If not, see <http://www.gnu.org/licenses/>. 16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */ 17 */
18 18
19#include <linux/of_irq.h>
19 20
20#include "msm_drv.h" 21#include "msm_drv.h"
21#include "msm_mmu.h" 22#include "msm_mmu.h"
@@ -28,10 +29,11 @@ static const char *iommu_ports[] = {
28static int mdp5_hw_init(struct msm_kms *kms) 29static int mdp5_hw_init(struct msm_kms *kms)
29{ 30{
30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 31 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
31 struct drm_device *dev = mdp5_kms->dev; 32 struct platform_device *pdev = mdp5_kms->pdev;
32 unsigned long flags; 33 unsigned long flags;
33 34
34 pm_runtime_get_sync(dev->dev); 35 pm_runtime_get_sync(&pdev->dev);
36 mdp5_enable(mdp5_kms);
35 37
36 /* Magic unknown register writes: 38 /* Magic unknown register writes:
37 * 39 *
@@ -58,12 +60,13 @@ static int mdp5_hw_init(struct msm_kms *kms)
58 */ 60 */
59 61
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 62 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
61 mdp5_write(mdp5_kms, REG_MDP5_MDP_DISP_INTF_SEL(0), 0); 63 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags); 64 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
63 65
64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm); 66 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
65 67
66 pm_runtime_put_sync(dev->dev); 68 mdp5_disable(mdp5_kms);
69 pm_runtime_put_sync(&pdev->dev);
67 70
68 return 0; 71 return 0;
69} 72}
@@ -78,17 +81,11 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
78{ 81{
79 int i; 82 int i;
80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 83 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane; 84 struct drm_plane *plane;
82 85 struct drm_plane_state *plane_state;
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89 86
87 for_each_plane_in_state(state, plane, plane_state, i)
90 mdp5_plane_complete_commit(plane, plane_state); 88 mdp5_plane_complete_commit(plane, plane_state);
91 }
92 89
93 mdp5_disable(mdp5_kms); 90 mdp5_disable(mdp5_kms);
94} 91}
@@ -117,26 +114,15 @@ static int mdp5_set_split_display(struct msm_kms *kms,
117 return mdp5_encoder_set_split_display(encoder, slave_encoder); 114 return mdp5_encoder_set_split_display(encoder, slave_encoder);
118} 115}
119 116
120static void mdp5_destroy(struct msm_kms *kms) 117static void mdp5_kms_destroy(struct msm_kms *kms)
121{ 118{
122 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 119 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
123 struct msm_mmu *mmu = mdp5_kms->mmu; 120 struct msm_mmu *mmu = mdp5_kms->mmu;
124 121
125 mdp5_irq_domain_fini(mdp5_kms);
126
127 if (mmu) { 122 if (mmu) {
128 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 123 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
129 mmu->funcs->destroy(mmu); 124 mmu->funcs->destroy(mmu);
130 } 125 }
131
132 if (mdp5_kms->ctlm)
133 mdp5_ctlm_destroy(mdp5_kms->ctlm);
134 if (mdp5_kms->smp)
135 mdp5_smp_destroy(mdp5_kms->smp);
136 if (mdp5_kms->cfg)
137 mdp5_cfg_destroy(mdp5_kms->cfg);
138
139 kfree(mdp5_kms);
140} 126}
141 127
142static const struct mdp_kms_funcs kms_funcs = { 128static const struct mdp_kms_funcs kms_funcs = {
@@ -154,7 +140,7 @@ static const struct mdp_kms_funcs kms_funcs = {
154 .get_format = mdp_get_format, 140 .get_format = mdp_get_format,
155 .round_pixclk = mdp5_round_pixclk, 141 .round_pixclk = mdp5_round_pixclk,
156 .set_split_display = mdp5_set_split_display, 142 .set_split_display = mdp5_set_split_display,
157 .destroy = mdp5_destroy, 143 .destroy = mdp5_kms_destroy,
158 }, 144 },
159 .set_irqmask = mdp5_set_irqmask, 145 .set_irqmask = mdp5_set_irqmask,
160}; 146};
@@ -351,13 +337,6 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
351 337
352 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg); 338 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
353 339
354 /* register our interrupt-controller for hdmi/eDP/dsi/etc
355 * to use for irqs routed through mdp:
356 */
357 ret = mdp5_irq_domain_init(mdp5_kms);
358 if (ret)
359 goto fail;
360
361 /* construct CRTCs and their private planes: */ 340 /* construct CRTCs and their private planes: */
362 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) { 341 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
363 struct drm_plane *plane; 342 struct drm_plane *plane;
@@ -425,17 +404,17 @@ fail:
425 return ret; 404 return ret;
426} 405}
427 406
428static void read_hw_revision(struct mdp5_kms *mdp5_kms, 407static void read_mdp_hw_revision(struct mdp5_kms *mdp5_kms,
429 uint32_t *major, uint32_t *minor) 408 u32 *major, u32 *minor)
430{ 409{
431 uint32_t version; 410 u32 version;
432 411
433 mdp5_enable(mdp5_kms); 412 mdp5_enable(mdp5_kms);
434 version = mdp5_read(mdp5_kms, REG_MDSS_HW_VERSION); 413 version = mdp5_read(mdp5_kms, REG_MDP5_HW_VERSION);
435 mdp5_disable(mdp5_kms); 414 mdp5_disable(mdp5_kms);
436 415
437 *major = FIELD(version, MDSS_HW_VERSION_MAJOR); 416 *major = FIELD(version, MDP5_HW_VERSION_MAJOR);
438 *minor = FIELD(version, MDSS_HW_VERSION_MINOR); 417 *minor = FIELD(version, MDP5_HW_VERSION_MINOR);
439 418
440 DBG("MDP5 version v%d.%d", *major, *minor); 419 DBG("MDP5 version v%d.%d", *major, *minor);
441} 420}
@@ -580,51 +559,146 @@ static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
580 559
581struct msm_kms *mdp5_kms_init(struct drm_device *dev) 560struct msm_kms *mdp5_kms_init(struct drm_device *dev)
582{ 561{
583 struct platform_device *pdev = dev->platformdev; 562 struct msm_drm_private *priv = dev->dev_private;
584 struct mdp5_cfg *config; 563 struct platform_device *pdev;
585 struct mdp5_kms *mdp5_kms; 564 struct mdp5_kms *mdp5_kms;
586 struct msm_kms *kms = NULL; 565 struct mdp5_cfg *config;
566 struct msm_kms *kms;
587 struct msm_mmu *mmu; 567 struct msm_mmu *mmu;
588 uint32_t major, minor; 568 int irq, i, ret;
589 int i, ret;
590 569
591 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 570 /* priv->kms would have been populated by the MDP5 driver */
592 if (!mdp5_kms) { 571 kms = priv->kms;
593 dev_err(dev->dev, "failed to allocate kms\n"); 572 if (!kms)
594 ret = -ENOMEM; 573 return NULL;
574
575 mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
576
577 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
578
579 pdev = mdp5_kms->pdev;
580
581 irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
582 if (irq < 0) {
583 ret = irq;
584 dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
595 goto fail; 585 goto fail;
596 } 586 }
597 587
598 spin_lock_init(&mdp5_kms->resource_lock); 588 kms->irq = irq;
599 589
600 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 590 config = mdp5_cfg_get_config(mdp5_kms->cfg);
601 591
602 kms = &mdp5_kms->base.base; 592 /* make sure things are off before attaching iommu (bootloader could
593 * have left things on, in which case we'll start getting faults if
594 * we don't disable):
595 */
596 mdp5_enable(mdp5_kms);
597 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
598 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
599 !config->hw->intf.base[i])
600 continue;
601 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
603 602
604 mdp5_kms->dev = dev; 603 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
604 }
605 mdp5_disable(mdp5_kms);
606 mdelay(16);
605 607
606 /* mdp5_kms->mmio actually represents the MDSS base address */ 608 if (config->platform.iommu) {
607 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 609 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
608 if (IS_ERR(mdp5_kms->mmio)) { 610 if (IS_ERR(mmu)) {
609 ret = PTR_ERR(mdp5_kms->mmio); 611 ret = PTR_ERR(mmu);
612 dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
613 iommu_domain_free(config->platform.iommu);
614 goto fail;
615 }
616
617 ret = mmu->funcs->attach(mmu, iommu_ports,
618 ARRAY_SIZE(iommu_ports));
619 if (ret) {
620 dev_err(&pdev->dev, "failed to attach iommu: %d\n",
621 ret);
622 mmu->funcs->destroy(mmu);
623 goto fail;
624 }
625 } else {
626 dev_info(&pdev->dev,
627 "no iommu, fallback to phys contig buffers for scanout\n");
628 mmu = NULL;
629 }
630 mdp5_kms->mmu = mmu;
631
632 mdp5_kms->id = msm_register_mmu(dev, mmu);
633 if (mdp5_kms->id < 0) {
634 ret = mdp5_kms->id;
635 dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
610 goto fail; 636 goto fail;
611 } 637 }
612 638
613 mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF"); 639 ret = modeset_init(mdp5_kms);
614 if (IS_ERR(mdp5_kms->vbif)) { 640 if (ret) {
615 ret = PTR_ERR(mdp5_kms->vbif); 641 dev_err(&pdev->dev, "modeset_init failed: %d\n", ret);
616 goto fail; 642 goto fail;
617 } 643 }
618 644
619 mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd"); 645 dev->mode_config.min_width = 0;
620 if (IS_ERR(mdp5_kms->vdd)) { 646 dev->mode_config.min_height = 0;
621 ret = PTR_ERR(mdp5_kms->vdd); 647 dev->mode_config.max_width = config->hw->lm.max_width;
648 dev->mode_config.max_height = config->hw->lm.max_height;
649
650 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
651 dev->driver->get_scanout_position = mdp5_get_scanoutpos;
652 dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
653 dev->max_vblank_count = 0xffffffff;
654 dev->vblank_disable_immediate = true;
655
656 return kms;
657fail:
658 if (kms)
659 mdp5_kms_destroy(kms);
660 return ERR_PTR(ret);
661}
662
663static void mdp5_destroy(struct platform_device *pdev)
664{
665 struct mdp5_kms *mdp5_kms = platform_get_drvdata(pdev);
666
667 if (mdp5_kms->ctlm)
668 mdp5_ctlm_destroy(mdp5_kms->ctlm);
669 if (mdp5_kms->smp)
670 mdp5_smp_destroy(mdp5_kms->smp);
671 if (mdp5_kms->cfg)
672 mdp5_cfg_destroy(mdp5_kms->cfg);
673
674 if (mdp5_kms->rpm_enabled)
675 pm_runtime_disable(&pdev->dev);
676}
677
678static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
679{
680 struct msm_drm_private *priv = dev->dev_private;
681 struct mdp5_kms *mdp5_kms;
682 struct mdp5_cfg *config;
683 u32 major, minor;
684 int ret;
685
686 mdp5_kms = devm_kzalloc(&pdev->dev, sizeof(*mdp5_kms), GFP_KERNEL);
687 if (!mdp5_kms) {
688 ret = -ENOMEM;
622 goto fail; 689 goto fail;
623 } 690 }
624 691
625 ret = regulator_enable(mdp5_kms->vdd); 692 platform_set_drvdata(pdev, mdp5_kms);
626 if (ret) { 693
627 dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); 694 spin_lock_init(&mdp5_kms->resource_lock);
695
696 mdp5_kms->dev = dev;
697 mdp5_kms->pdev = pdev;
698
699 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
700 if (IS_ERR(mdp5_kms->mmio)) {
701 ret = PTR_ERR(mdp5_kms->mmio);
628 goto fail; 702 goto fail;
629 } 703 }
630 704
@@ -635,9 +709,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
635 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); 709 ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
636 if (ret) 710 if (ret)
637 goto fail; 711 goto fail;
638 ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
639 if (ret)
640 goto fail;
641 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); 712 ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
642 if (ret) 713 if (ret)
643 goto fail; 714 goto fail;
@@ -652,9 +723,12 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
652 * rate first, then figure out hw revision, and then set a 723 * rate first, then figure out hw revision, and then set a
653 * more optimal rate: 724 * more optimal rate:
654 */ 725 */
655 clk_set_rate(mdp5_kms->src_clk, 200000000); 726 clk_set_rate(mdp5_kms->core_clk, 200000000);
656 727
657 read_hw_revision(mdp5_kms, &major, &minor); 728 pm_runtime_enable(&pdev->dev);
729 mdp5_kms->rpm_enabled = true;
730
731 read_mdp_hw_revision(mdp5_kms, &major, &minor);
658 732
659 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor); 733 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
660 if (IS_ERR(mdp5_kms->cfg)) { 734 if (IS_ERR(mdp5_kms->cfg)) {
@@ -667,7 +741,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
667 mdp5_kms->caps = config->hw->mdp.caps; 741 mdp5_kms->caps = config->hw->mdp.caps;
668 742
669 /* TODO: compute core clock rate at runtime */ 743 /* TODO: compute core clock rate at runtime */
670 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk); 744 clk_set_rate(mdp5_kms->core_clk, config->hw->max_clk);
671 745
672 /* 746 /*
673 * Some chipsets have a Shared Memory Pool (SMP), while others 747 * Some chipsets have a Shared Memory Pool (SMP), while others
@@ -690,73 +764,76 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
690 goto fail; 764 goto fail;
691 } 765 }
692 766
693 /* make sure things are off before attaching iommu (bootloader could 767 /* set uninit-ed kms */
694 * have left things on, in which case we'll start getting faults if 768 priv->kms = &mdp5_kms->base.base;
695 * we don't disable):
696 */
697 mdp5_enable(mdp5_kms);
698 for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
699 if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
700 !config->hw->intf.base[i])
701 continue;
702 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
703 769
704 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3); 770 return 0;
705 } 771fail:
706 mdp5_disable(mdp5_kms); 772 mdp5_destroy(pdev);
707 mdelay(16); 773 return ret;
774}
708 775
709 if (config->platform.iommu) { 776static int mdp5_bind(struct device *dev, struct device *master, void *data)
710 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu); 777{
711 if (IS_ERR(mmu)) { 778 struct drm_device *ddev = dev_get_drvdata(master);
712 ret = PTR_ERR(mmu); 779 struct platform_device *pdev = to_platform_device(dev);
713 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
714 iommu_domain_free(config->platform.iommu);
715 goto fail;
716 }
717 780
718 ret = mmu->funcs->attach(mmu, iommu_ports, 781 DBG("");
719 ARRAY_SIZE(iommu_ports));
720 if (ret) {
721 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
722 mmu->funcs->destroy(mmu);
723 goto fail;
724 }
725 } else {
726 dev_info(dev->dev, "no iommu, fallback to phys "
727 "contig buffers for scanout\n");
728 mmu = NULL;
729 }
730 mdp5_kms->mmu = mmu;
731 782
732 mdp5_kms->id = msm_register_mmu(dev, mmu); 783 return mdp5_init(pdev, ddev);
733 if (mdp5_kms->id < 0) { 784}
734 ret = mdp5_kms->id;
735 dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
736 goto fail;
737 }
738 785
739 ret = modeset_init(mdp5_kms); 786static void mdp5_unbind(struct device *dev, struct device *master,
740 if (ret) { 787 void *data)
741 dev_err(dev->dev, "modeset_init failed: %d\n", ret); 788{
742 goto fail; 789 struct platform_device *pdev = to_platform_device(dev);
743 }
744 790
745 dev->mode_config.min_width = 0; 791 mdp5_destroy(pdev);
746 dev->mode_config.min_height = 0; 792}
747 dev->mode_config.max_width = config->hw->lm.max_width;
748 dev->mode_config.max_height = config->hw->lm.max_height;
749 793
750 dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp; 794static const struct component_ops mdp5_ops = {
751 dev->driver->get_scanout_position = mdp5_get_scanoutpos; 795 .bind = mdp5_bind,
752 dev->driver->get_vblank_counter = mdp5_get_vblank_counter; 796 .unbind = mdp5_unbind,
753 dev->max_vblank_count = 0xffffffff; 797};
754 dev->vblank_disable_immediate = true;
755 798
756 return kms; 799static int mdp5_dev_probe(struct platform_device *pdev)
800{
801 DBG("");
802 return component_add(&pdev->dev, &mdp5_ops);
803}
757 804
758fail: 805static int mdp5_dev_remove(struct platform_device *pdev)
759 if (kms) 806{
760 mdp5_destroy(kms); 807 DBG("");
761 return ERR_PTR(ret); 808 component_del(&pdev->dev, &mdp5_ops);
809 return 0;
810}
811
812static const struct of_device_id mdp5_dt_match[] = {
813 { .compatible = "qcom,mdp5", },
814 /* to support downstream DT files */
815 { .compatible = "qcom,mdss_mdp", },
816 {}
817};
818MODULE_DEVICE_TABLE(of, mdp5_dt_match);
819
820static struct platform_driver mdp5_driver = {
821 .probe = mdp5_dev_probe,
822 .remove = mdp5_dev_remove,
823 .driver = {
824 .name = "msm_mdp",
825 .of_match_table = mdp5_dt_match,
826 },
827};
828
829void __init msm_mdp_register(void)
830{
831 DBG("");
832 platform_driver_register(&mdp5_driver);
833}
834
835void __exit msm_mdp_unregister(void)
836{
837 DBG("");
838 platform_driver_unregister(&mdp5_driver);
762} 839}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 9a25898239d3..03738927be10 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -31,6 +31,8 @@ struct mdp5_kms {
31 31
32 struct drm_device *dev; 32 struct drm_device *dev;
33 33
34 struct platform_device *pdev;
35
34 struct mdp5_cfg_handler *cfg; 36 struct mdp5_cfg_handler *cfg;
35 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */ 37 uint32_t caps; /* MDP capabilities (MDP_CAP_XXX bits) */
36 38
@@ -43,29 +45,23 @@ struct mdp5_kms {
43 struct mdp5_ctl_manager *ctlm; 45 struct mdp5_ctl_manager *ctlm;
44 46
45 /* io/register spaces: */ 47 /* io/register spaces: */
46 void __iomem *mmio, *vbif; 48 void __iomem *mmio;
47
48 struct regulator *vdd;
49 49
50 struct clk *axi_clk; 50 struct clk *axi_clk;
51 struct clk *ahb_clk; 51 struct clk *ahb_clk;
52 struct clk *src_clk;
53 struct clk *core_clk; 52 struct clk *core_clk;
54 struct clk *lut_clk; 53 struct clk *lut_clk;
55 struct clk *vsync_clk; 54 struct clk *vsync_clk;
56 55
57 /* 56 /*
58 * lock to protect access to global resources: ie., following register: 57 * lock to protect access to global resources: ie., following register:
59 * - REG_MDP5_MDP_DISP_INTF_SEL 58 * - REG_MDP5_DISP_INTF_SEL
60 */ 59 */
61 spinlock_t resource_lock; 60 spinlock_t resource_lock;
62 61
63 struct mdp_irq error_handler; 62 bool rpm_enabled;
64 63
65 struct { 64 struct mdp_irq error_handler;
66 volatile unsigned long enabled_mask;
67 struct irq_domain *domain;
68 } irqcontroller;
69}; 65};
70#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 66#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
71 67
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
new file mode 100644
index 000000000000..d444a6901fff
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_mdss.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright (c) 2016, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License version 2 as published by
6 * the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program. If not, see <http://www.gnu.org/licenses/>.
15 */
16
17#include <linux/irqdomain.h>
18#include <linux/irq.h>
19
20#include "msm_drv.h"
21#include "mdp5_kms.h"
22
23/*
24 * If needed, this can become more specific: something like struct mdp5_mdss,
25 * which contains a 'struct msm_mdss base' member.
26 */
27struct msm_mdss {
28 struct drm_device *dev;
29
30 void __iomem *mmio, *vbif;
31
32 struct regulator *vdd;
33
34 struct {
35 volatile unsigned long enabled_mask;
36 struct irq_domain *domain;
37 } irqcontroller;
38};
39
40static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
41{
42 msm_writel(data, mdss->mmio + reg);
43}
44
45static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
46{
47 return msm_readl(mdss->mmio + reg);
48}
49
50static irqreturn_t mdss_irq(int irq, void *arg)
51{
52 struct msm_mdss *mdss = arg;
53 u32 intr;
54
55 intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
56
57 VERB("intr=%08x", intr);
58
59 while (intr) {
60 irq_hw_number_t hwirq = fls(intr) - 1;
61
62 generic_handle_irq(irq_find_mapping(
63 mdss->irqcontroller.domain, hwirq));
64 intr &= ~(1 << hwirq);
65 }
66
67 return IRQ_HANDLED;
68}
69
70/*
71 * interrupt-controller implementation, so sub-blocks (MDP/HDMI/eDP/DSI/etc)
72 * can register to get their irq's delivered
73 */
74
75#define VALID_IRQS (MDSS_HW_INTR_STATUS_INTR_MDP | \
76 MDSS_HW_INTR_STATUS_INTR_DSI0 | \
77 MDSS_HW_INTR_STATUS_INTR_DSI1 | \
78 MDSS_HW_INTR_STATUS_INTR_HDMI | \
79 MDSS_HW_INTR_STATUS_INTR_EDP)
80
81static void mdss_hw_mask_irq(struct irq_data *irqd)
82{
83 struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
84
85 smp_mb__before_atomic();
86 clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
87 smp_mb__after_atomic();
88}
89
90static void mdss_hw_unmask_irq(struct irq_data *irqd)
91{
92 struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
93
94 smp_mb__before_atomic();
95 set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
96 smp_mb__after_atomic();
97}
98
99static struct irq_chip mdss_hw_irq_chip = {
100 .name = "mdss",
101 .irq_mask = mdss_hw_mask_irq,
102 .irq_unmask = mdss_hw_unmask_irq,
103};
104
105static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
106 irq_hw_number_t hwirq)
107{
108 struct msm_mdss *mdss = d->host_data;
109
110 if (!(VALID_IRQS & (1 << hwirq)))
111 return -EPERM;
112
113 irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
114 irq_set_chip_data(irq, mdss);
115
116 return 0;
117}
118
119static struct irq_domain_ops mdss_hw_irqdomain_ops = {
120 .map = mdss_hw_irqdomain_map,
121 .xlate = irq_domain_xlate_onecell,
122};
123
124
125static int mdss_irq_domain_init(struct msm_mdss *mdss)
126{
127 struct device *dev = mdss->dev->dev;
128 struct irq_domain *d;
129
130 d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
131 mdss);
132 if (!d) {
133 dev_err(dev, "mdss irq domain add failed\n");
134 return -ENXIO;
135 }
136
137 mdss->irqcontroller.enabled_mask = 0;
138 mdss->irqcontroller.domain = d;
139
140 return 0;
141}
142
143void msm_mdss_destroy(struct drm_device *dev)
144{
145 struct msm_drm_private *priv = dev->dev_private;
146 struct msm_mdss *mdss = priv->mdss;
147
148 if (!mdss)
149 return;
150
151 irq_domain_remove(mdss->irqcontroller.domain);
152 mdss->irqcontroller.domain = NULL;
153
154 regulator_disable(mdss->vdd);
155
156 pm_runtime_put_sync(dev->dev);
157
158 pm_runtime_disable(dev->dev);
159}
160
161int msm_mdss_init(struct drm_device *dev)
162{
163 struct platform_device *pdev = dev->platformdev;
164 struct msm_drm_private *priv = dev->dev_private;
165 struct msm_mdss *mdss;
166 int ret;
167
168 DBG("");
169
170 if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
171 return 0;
172
173 mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
174 if (!mdss) {
175 ret = -ENOMEM;
176 goto fail;
177 }
178
179 mdss->dev = dev;
180
181 mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
182 if (IS_ERR(mdss->mmio)) {
183 ret = PTR_ERR(mdss->mmio);
184 goto fail;
185 }
186
187 mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
188 if (IS_ERR(mdss->vbif)) {
189 ret = PTR_ERR(mdss->vbif);
190 goto fail;
191 }
192
193 /* Regulator to enable GDSCs in downstream kernels */
194 mdss->vdd = devm_regulator_get(dev->dev, "vdd");
195 if (IS_ERR(mdss->vdd)) {
196 ret = PTR_ERR(mdss->vdd);
197 goto fail;
198 }
199
200 ret = regulator_enable(mdss->vdd);
201 if (ret) {
202 dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
203 ret);
204 goto fail;
205 }
206
207 ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
208 mdss_irq, 0, "mdss_isr", mdss);
209 if (ret) {
210 dev_err(dev->dev, "failed to init irq: %d\n", ret);
211 goto fail_irq;
212 }
213
214 ret = mdss_irq_domain_init(mdss);
215 if (ret) {
216 dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
217 goto fail_irq;
218 }
219
220 priv->mdss = mdss;
221
222 pm_runtime_enable(dev->dev);
223
224 /*
225 * TODO: This is needed as the MDSS GDSC is only tied to MDSS's power
226 * domain. Remove this once runtime PM is adapted for all the devices.
227 */
228 pm_runtime_get_sync(dev->dev);
229
230 return 0;
231fail_irq:
232 regulator_disable(mdss->vdd);
233fail:
234 return ret;
235}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 6f425c25d9fe..27d7b55b52c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -42,7 +42,7 @@
42 * 42 *
43 * configured: 43 * configured:
44 * The block is allocated to some client, and assigned to that 44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers. 45 * client in MDP5_SMP_ALLOC registers.
46 * 46 *
47 * inuse: 47 * inuse:
48 * The block is being actively used by a client. 48 * The block is being actively used by a client.
@@ -59,7 +59,7 @@
59 * mdp5_smp_commit. 59 * mdp5_smp_commit.
60 * 60 *
61 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_SMP_ALLOC registers
63 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured. 64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run 65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
@@ -311,25 +311,25 @@ static void update_smp_state(struct mdp5_smp *smp,
311 int idx = blk / 3; 311 int idx = blk / 3;
312 int fld = blk % 3; 312 int fld = blk % 3;
313 313
314 val = mdp5_read(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx)); 314 val = mdp5_read(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx));
315 315
316 switch (fld) { 316 switch (fld) {
317 case 0: 317 case 0:
318 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0__MASK; 318 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT0__MASK;
319 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT0(cid); 319 val |= MDP5_SMP_ALLOC_W_REG_CLIENT0(cid);
320 break; 320 break;
321 case 1: 321 case 1:
322 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1__MASK; 322 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT1__MASK;
323 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT1(cid); 323 val |= MDP5_SMP_ALLOC_W_REG_CLIENT1(cid);
324 break; 324 break;
325 case 2: 325 case 2:
326 val &= ~MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2__MASK; 326 val &= ~MDP5_SMP_ALLOC_W_REG_CLIENT2__MASK;
327 val |= MDP5_MDP_SMP_ALLOC_W_REG_CLIENT2(cid); 327 val |= MDP5_SMP_ALLOC_W_REG_CLIENT2(cid);
328 break; 328 break;
329 } 329 }
330 330
331 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_W_REG(0, idx), val); 331 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_W_REG(idx), val);
332 mdp5_write(mdp5_kms, REG_MDP5_MDP_SMP_ALLOC_R_REG(0, idx), val); 332 mdp5_write(mdp5_kms, REG_MDP5_SMP_ALLOC_R_REG(idx), val);
333 } 333 }
334} 334}
335 335
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e3892c263f27..4a8a6f1f1151 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -84,17 +84,12 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
84 struct drm_atomic_state *old_state) 84 struct drm_atomic_state *old_state)
85{ 85{
86 struct drm_crtc *crtc; 86 struct drm_crtc *crtc;
87 struct drm_crtc_state *crtc_state;
87 struct msm_drm_private *priv = old_state->dev->dev_private; 88 struct msm_drm_private *priv = old_state->dev->dev_private;
88 struct msm_kms *kms = priv->kms; 89 struct msm_kms *kms = priv->kms;
89 int ncrtcs = old_state->dev->mode_config.num_crtc;
90 int i; 90 int i;
91 91
92 for (i = 0; i < ncrtcs; i++) { 92 for_each_crtc_in_state(old_state, crtc, crtc_state, i) {
93 crtc = old_state->crtcs[i];
94
95 if (!crtc)
96 continue;
97
98 if (!crtc->state->enable) 93 if (!crtc->state->enable)
99 continue; 94 continue;
100 95
@@ -192,9 +187,11 @@ int msm_atomic_commit(struct drm_device *dev,
192 struct drm_atomic_state *state, bool nonblock) 187 struct drm_atomic_state *state, bool nonblock)
193{ 188{
194 struct msm_drm_private *priv = dev->dev_private; 189 struct msm_drm_private *priv = dev->dev_private;
195 int nplanes = dev->mode_config.num_total_plane;
196 int ncrtcs = dev->mode_config.num_crtc;
197 struct msm_commit *c; 190 struct msm_commit *c;
191 struct drm_crtc *crtc;
192 struct drm_crtc_state *crtc_state;
193 struct drm_plane *plane;
194 struct drm_plane_state *plane_state;
198 int i, ret; 195 int i, ret;
199 196
200 ret = drm_atomic_helper_prepare_planes(dev, state); 197 ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -210,28 +207,18 @@ int msm_atomic_commit(struct drm_device *dev,
210 /* 207 /*
211 * Figure out what crtcs we have: 208 * Figure out what crtcs we have:
212 */ 209 */
213 for (i = 0; i < ncrtcs; i++) { 210 for_each_crtc_in_state(state, crtc, crtc_state, i)
214 struct drm_crtc *crtc = state->crtcs[i]; 211 c->crtc_mask |= drm_crtc_mask(crtc);
215 if (!crtc)
216 continue;
217 c->crtc_mask |= (1 << drm_crtc_index(crtc));
218 }
219 212
220 /* 213 /*
221 * Figure out what fence to wait for: 214 * Figure out what fence to wait for:
222 */ 215 */
223 for (i = 0; i < nplanes; i++) { 216 for_each_plane_in_state(state, plane, plane_state, i) {
224 struct drm_plane *plane = state->planes[i]; 217 if ((plane->state->fb != plane_state->fb) && plane_state->fb) {
225 struct drm_plane_state *new_state = state->plane_states[i]; 218 struct drm_gem_object *obj = msm_framebuffer_bo(plane_state->fb, 0);
226
227 if (!plane)
228 continue;
229
230 if ((plane->state->fb != new_state->fb) && new_state->fb) {
231 struct drm_gem_object *obj = msm_framebuffer_bo(new_state->fb, 0);
232 struct msm_gem_object *msm_obj = to_msm_bo(obj); 219 struct msm_gem_object *msm_obj = to_msm_bo(obj);
233 220
234 new_state->fence = reservation_object_get_excl_rcu(msm_obj->resv); 221 plane_state->fence = reservation_object_get_excl_rcu(msm_obj->resv);
235 } 222 }
236 } 223 }
237 224
@@ -251,7 +238,7 @@ int msm_atomic_commit(struct drm_device *dev,
251 * the software side now. 238 * the software side now.
252 */ 239 */
253 240
254 drm_atomic_helper_swap_state(dev, state); 241 drm_atomic_helper_swap_state(state, true);
255 242
256 /* 243 /*
257 * Everything below can be run asynchronously without the need to grab 244 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9c654092ef78..26f859ec24b3 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -21,6 +21,16 @@
21#include "msm_gpu.h" 21#include "msm_gpu.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23 23
24
25/*
26 * MSM driver version:
27 * - 1.0.0 - initial interface
28 * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
29 */
30#define MSM_VERSION_MAJOR 1
31#define MSM_VERSION_MINOR 1
32#define MSM_VERSION_PATCHLEVEL 0
33
24static void msm_fb_output_poll_changed(struct drm_device *dev) 34static void msm_fb_output_poll_changed(struct drm_device *dev)
25{ 35{
26 struct msm_drm_private *priv = dev->dev_private; 36 struct msm_drm_private *priv = dev->dev_private;
@@ -195,9 +205,9 @@ static int msm_drm_uninit(struct device *dev)
195 kfree(vbl_ev); 205 kfree(vbl_ev);
196 } 206 }
197 207
198 drm_kms_helper_poll_fini(ddev); 208 msm_gem_shrinker_cleanup(ddev);
199 209
200 drm_connector_unregister_all(ddev); 210 drm_kms_helper_poll_fini(ddev);
201 211
202 drm_dev_unregister(ddev); 212 drm_dev_unregister(ddev);
203 213
@@ -217,10 +227,8 @@ static int msm_drm_uninit(struct device *dev)
217 flush_workqueue(priv->atomic_wq); 227 flush_workqueue(priv->atomic_wq);
218 destroy_workqueue(priv->atomic_wq); 228 destroy_workqueue(priv->atomic_wq);
219 229
220 if (kms) { 230 if (kms)
221 pm_runtime_disable(dev);
222 kms->funcs->destroy(kms); 231 kms->funcs->destroy(kms);
223 }
224 232
225 if (gpu) { 233 if (gpu) {
226 mutex_lock(&ddev->struct_mutex); 234 mutex_lock(&ddev->struct_mutex);
@@ -239,6 +247,8 @@ static int msm_drm_uninit(struct device *dev)
239 247
240 component_unbind_all(dev, ddev); 248 component_unbind_all(dev, ddev);
241 249
250 msm_mdss_destroy(ddev);
251
242 ddev->dev_private = NULL; 252 ddev->dev_private = NULL;
243 drm_dev_unref(ddev); 253 drm_dev_unref(ddev);
244 254
@@ -284,6 +294,7 @@ static int msm_init_vram(struct drm_device *dev)
284 if (node) { 294 if (node) {
285 struct resource r; 295 struct resource r;
286 ret = of_address_to_resource(node, 0, &r); 296 ret = of_address_to_resource(node, 0, &r);
297 of_node_put(node);
287 if (ret) 298 if (ret)
288 return ret; 299 return ret;
289 size = r.end - r.start; 300 size = r.end - r.start;
@@ -352,6 +363,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
352 } 363 }
353 364
354 ddev->dev_private = priv; 365 ddev->dev_private = priv;
366 priv->dev = ddev;
367
368 ret = msm_mdss_init(ddev);
369 if (ret) {
370 kfree(priv);
371 drm_dev_unref(ddev);
372 return ret;
373 }
355 374
356 priv->wq = alloc_ordered_workqueue("msm", 0); 375 priv->wq = alloc_ordered_workqueue("msm", 0);
357 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0); 376 priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
@@ -367,6 +386,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
367 /* Bind all our sub-components: */ 386 /* Bind all our sub-components: */
368 ret = component_bind_all(dev, ddev); 387 ret = component_bind_all(dev, ddev);
369 if (ret) { 388 if (ret) {
389 msm_mdss_destroy(ddev);
370 kfree(priv); 390 kfree(priv);
371 drm_dev_unref(ddev); 391 drm_dev_unref(ddev);
372 return ret; 392 return ret;
@@ -376,9 +396,12 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
376 if (ret) 396 if (ret)
377 goto fail; 397 goto fail;
378 398
399 msm_gem_shrinker_init(ddev);
400
379 switch (get_mdp_ver(pdev)) { 401 switch (get_mdp_ver(pdev)) {
380 case 4: 402 case 4:
381 kms = mdp4_kms_init(ddev); 403 kms = mdp4_kms_init(ddev);
404 priv->kms = kms;
382 break; 405 break;
383 case 5: 406 case 5:
384 kms = mdp5_kms_init(ddev); 407 kms = mdp5_kms_init(ddev);
@@ -400,10 +423,7 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
400 goto fail; 423 goto fail;
401 } 424 }
402 425
403 priv->kms = kms;
404
405 if (kms) { 426 if (kms) {
406 pm_runtime_enable(dev);
407 ret = kms->funcs->hw_init(kms); 427 ret = kms->funcs->hw_init(kms);
408 if (ret) { 428 if (ret) {
409 dev_err(dev, "kms hw init failed: %d\n", ret); 429 dev_err(dev, "kms hw init failed: %d\n", ret);
@@ -419,24 +439,20 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
419 goto fail; 439 goto fail;
420 } 440 }
421 441
422 pm_runtime_get_sync(dev); 442 if (kms) {
423 ret = drm_irq_install(ddev, platform_get_irq(pdev, 0)); 443 pm_runtime_get_sync(dev);
424 pm_runtime_put_sync(dev); 444 ret = drm_irq_install(ddev, kms->irq);
425 if (ret < 0) { 445 pm_runtime_put_sync(dev);
426 dev_err(dev, "failed to install IRQ handler\n"); 446 if (ret < 0) {
427 goto fail; 447 dev_err(dev, "failed to install IRQ handler\n");
448 goto fail;
449 }
428 } 450 }
429 451
430 ret = drm_dev_register(ddev, 0); 452 ret = drm_dev_register(ddev, 0);
431 if (ret) 453 if (ret)
432 goto fail; 454 goto fail;
433 455
434 ret = drm_connector_register_all(ddev);
435 if (ret) {
436 dev_err(dev, "failed to register connectors\n");
437 goto fail;
438 }
439
440 drm_mode_config_reset(ddev); 456 drm_mode_config_reset(ddev);
441 457
442#ifdef CONFIG_DRM_FBDEV_EMULATION 458#ifdef CONFIG_DRM_FBDEV_EMULATION
@@ -690,6 +706,44 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
690 return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true); 706 return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
691} 707}
692 708
709static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
710 struct drm_file *file)
711{
712 struct drm_msm_gem_madvise *args = data;
713 struct drm_gem_object *obj;
714 int ret;
715
716 switch (args->madv) {
717 case MSM_MADV_DONTNEED:
718 case MSM_MADV_WILLNEED:
719 break;
720 default:
721 return -EINVAL;
722 }
723
724 ret = mutex_lock_interruptible(&dev->struct_mutex);
725 if (ret)
726 return ret;
727
728 obj = drm_gem_object_lookup(file, args->handle);
729 if (!obj) {
730 ret = -ENOENT;
731 goto unlock;
732 }
733
734 ret = msm_gem_madvise(obj, args->madv);
735 if (ret >= 0) {
736 args->retained = ret;
737 ret = 0;
738 }
739
740 drm_gem_object_unreference(obj);
741
742unlock:
743 mutex_unlock(&dev->struct_mutex);
744 return ret;
745}
746
693static const struct drm_ioctl_desc msm_ioctls[] = { 747static const struct drm_ioctl_desc msm_ioctls[] = {
694 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW), 748 DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
695 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW), 749 DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -698,6 +752,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
698 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW), 752 DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
699 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW), 753 DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
700 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW), 754 DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
755 DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE, msm_ioctl_gem_madvise, DRM_AUTH|DRM_RENDER_ALLOW),
701}; 756};
702 757
703static const struct vm_operations_struct vm_ops = { 758static const struct vm_operations_struct vm_ops = {
@@ -730,7 +785,6 @@ static struct drm_driver msm_driver = {
730 .open = msm_open, 785 .open = msm_open,
731 .preclose = msm_preclose, 786 .preclose = msm_preclose,
732 .lastclose = msm_lastclose, 787 .lastclose = msm_lastclose,
733 .set_busid = drm_platform_set_busid,
734 .irq_handler = msm_irq, 788 .irq_handler = msm_irq,
735 .irq_preinstall = msm_irq_preinstall, 789 .irq_preinstall = msm_irq_preinstall,
736 .irq_postinstall = msm_irq_postinstall, 790 .irq_postinstall = msm_irq_postinstall,
@@ -764,8 +818,9 @@ static struct drm_driver msm_driver = {
764 .name = "msm", 818 .name = "msm",
765 .desc = "MSM Snapdragon DRM", 819 .desc = "MSM Snapdragon DRM",
766 .date = "20130625", 820 .date = "20130625",
767 .major = 1, 821 .major = MSM_VERSION_MAJOR,
768 .minor = 0, 822 .minor = MSM_VERSION_MINOR,
823 .patchlevel = MSM_VERSION_PATCHLEVEL,
769}; 824};
770 825
771#ifdef CONFIG_PM_SLEEP 826#ifdef CONFIG_PM_SLEEP
@@ -805,22 +860,146 @@ static int compare_of(struct device *dev, void *data)
805 return dev->of_node == data; 860 return dev->of_node == data;
806} 861}
807 862
808static int add_components(struct device *dev, struct component_match **matchptr, 863/*
809 const char *name) 864 * Identify what components need to be added by parsing what remote-endpoints
865 * our MDP output ports are connected to. In the case of LVDS on MDP4, there
866 * is no external component that we need to add since LVDS is within MDP4
867 * itself.
868 */
869static int add_components_mdp(struct device *mdp_dev,
870 struct component_match **matchptr)
810{ 871{
811 struct device_node *np = dev->of_node; 872 struct device_node *np = mdp_dev->of_node;
812 unsigned i; 873 struct device_node *ep_node;
874 struct device *master_dev;
875
876 /*
877 * on MDP4 based platforms, the MDP platform device is the component
878 * master that adds other display interface components to itself.
879 *
880 * on MDP5 based platforms, the MDSS platform device is the component
881 * master that adds MDP5 and other display interface components to
882 * itself.
883 */
884 if (of_device_is_compatible(np, "qcom,mdp4"))
885 master_dev = mdp_dev;
886 else
887 master_dev = mdp_dev->parent;
813 888
814 for (i = 0; ; i++) { 889 for_each_endpoint_of_node(np, ep_node) {
815 struct device_node *node; 890 struct device_node *intf;
891 struct of_endpoint ep;
892 int ret;
816 893
817 node = of_parse_phandle(np, name, i); 894 ret = of_graph_parse_endpoint(ep_node, &ep);
818 if (!node) 895 if (ret) {
819 break; 896 dev_err(mdp_dev, "unable to parse port endpoint\n");
897 of_node_put(ep_node);
898 return ret;
899 }
900
901 /*
902 * The LCDC/LVDS port on MDP4 is a speacial case where the
903 * remote-endpoint isn't a component that we need to add
904 */
905 if (of_device_is_compatible(np, "qcom,mdp4") &&
906 ep.port == 0) {
907 of_node_put(ep_node);
908 continue;
909 }
910
911 /*
912 * It's okay if some of the ports don't have a remote endpoint
913 * specified. It just means that the port isn't connected to
914 * any external interface.
915 */
916 intf = of_graph_get_remote_port_parent(ep_node);
917 if (!intf) {
918 of_node_put(ep_node);
919 continue;
920 }
921
922 component_match_add(master_dev, matchptr, compare_of, intf);
923
924 of_node_put(intf);
925 of_node_put(ep_node);
926 }
927
928 return 0;
929}
930
931static int compare_name_mdp(struct device *dev, void *data)
932{
933 return (strstr(dev_name(dev), "mdp") != NULL);
934}
935
936static int add_display_components(struct device *dev,
937 struct component_match **matchptr)
938{
939 struct device *mdp_dev;
940 int ret;
941
942 /*
943 * MDP5 based devices don't have a flat hierarchy. There is a top level
944 * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
945 * children devices, find the MDP5 node, and then add the interfaces
946 * to our components list.
947 */
948 if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
949 ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
950 if (ret) {
951 dev_err(dev, "failed to populate children devices\n");
952 return ret;
953 }
954
955 mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
956 if (!mdp_dev) {
957 dev_err(dev, "failed to find MDSS MDP node\n");
958 of_platform_depopulate(dev);
959 return -ENODEV;
960 }
961
962 put_device(mdp_dev);
820 963
821 component_match_add(dev, matchptr, compare_of, node); 964 /* add the MDP component itself */
965 component_match_add(dev, matchptr, compare_of,
966 mdp_dev->of_node);
967 } else {
968 /* MDP4 */
969 mdp_dev = dev;
822 } 970 }
823 971
972 ret = add_components_mdp(mdp_dev, matchptr);
973 if (ret)
974 of_platform_depopulate(dev);
975
976 return ret;
977}
978
979/*
980 * We don't know what's the best binding to link the gpu with the drm device.
981 * Fow now, we just hunt for all the possible gpus that we support, and add them
982 * as components.
983 */
984static const struct of_device_id msm_gpu_match[] = {
985 { .compatible = "qcom,adreno-3xx" },
986 { .compatible = "qcom,kgsl-3d0" },
987 { },
988};
989
990static int add_gpu_components(struct device *dev,
991 struct component_match **matchptr)
992{
993 struct device_node *np;
994
995 np = of_find_matching_node(NULL, msm_gpu_match);
996 if (!np)
997 return 0;
998
999 component_match_add(dev, matchptr, compare_of, np);
1000
1001 of_node_put(np);
1002
824 return 0; 1003 return 0;
825} 1004}
826 1005
@@ -846,9 +1025,15 @@ static const struct component_master_ops msm_drm_ops = {
846static int msm_pdev_probe(struct platform_device *pdev) 1025static int msm_pdev_probe(struct platform_device *pdev)
847{ 1026{
848 struct component_match *match = NULL; 1027 struct component_match *match = NULL;
1028 int ret;
849 1029
850 add_components(&pdev->dev, &match, "connectors"); 1030 ret = add_display_components(&pdev->dev, &match);
851 add_components(&pdev->dev, &match, "gpus"); 1031 if (ret)
1032 return ret;
1033
1034 ret = add_gpu_components(&pdev->dev, &match);
1035 if (ret)
1036 return ret;
852 1037
853 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 1038 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
854 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match); 1039 return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
@@ -857,20 +1042,14 @@ static int msm_pdev_probe(struct platform_device *pdev)
857static int msm_pdev_remove(struct platform_device *pdev) 1042static int msm_pdev_remove(struct platform_device *pdev)
858{ 1043{
859 component_master_del(&pdev->dev, &msm_drm_ops); 1044 component_master_del(&pdev->dev, &msm_drm_ops);
1045 of_platform_depopulate(&pdev->dev);
860 1046
861 return 0; 1047 return 0;
862} 1048}
863 1049
864static const struct platform_device_id msm_id[] = {
865 { "mdp", 0 },
866 { }
867};
868
869static const struct of_device_id dt_match[] = { 1050static const struct of_device_id dt_match[] = {
870 { .compatible = "qcom,mdp4", .data = (void *) 4 }, /* mdp4 */ 1051 { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
871 { .compatible = "qcom,mdp5", .data = (void *) 5 }, /* mdp5 */ 1052 { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
872 /* to support downstream DT files */
873 { .compatible = "qcom,mdss_mdp", .data = (void *) 5 }, /* mdp5 */
874 {} 1053 {}
875}; 1054};
876MODULE_DEVICE_TABLE(of, dt_match); 1055MODULE_DEVICE_TABLE(of, dt_match);
@@ -883,12 +1062,12 @@ static struct platform_driver msm_platform_driver = {
883 .of_match_table = dt_match, 1062 .of_match_table = dt_match,
884 .pm = &msm_pm_ops, 1063 .pm = &msm_pm_ops,
885 }, 1064 },
886 .id_table = msm_id,
887}; 1065};
888 1066
889static int __init msm_drm_register(void) 1067static int __init msm_drm_register(void)
890{ 1068{
891 DBG("init"); 1069 DBG("init");
1070 msm_mdp_register();
892 msm_dsi_register(); 1071 msm_dsi_register();
893 msm_edp_register(); 1072 msm_edp_register();
894 msm_hdmi_register(); 1073 msm_hdmi_register();
@@ -904,6 +1083,7 @@ static void __exit msm_drm_unregister(void)
904 adreno_unregister(); 1083 adreno_unregister();
905 msm_edp_unregister(); 1084 msm_edp_unregister();
906 msm_dsi_unregister(); 1085 msm_dsi_unregister();
1086 msm_mdp_unregister();
907} 1087}
908 1088
909module_init(msm_drm_register); 1089module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 5b2963f32291..b4bc7f1ef717 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -46,6 +46,7 @@
46struct msm_kms; 46struct msm_kms;
47struct msm_gpu; 47struct msm_gpu;
48struct msm_mmu; 48struct msm_mmu;
49struct msm_mdss;
49struct msm_rd_state; 50struct msm_rd_state;
50struct msm_perf_state; 51struct msm_perf_state;
51struct msm_gem_submit; 52struct msm_gem_submit;
@@ -77,11 +78,16 @@ struct msm_vblank_ctrl {
77 78
78struct msm_drm_private { 79struct msm_drm_private {
79 80
81 struct drm_device *dev;
82
80 struct msm_kms *kms; 83 struct msm_kms *kms;
81 84
82 /* subordinate devices, if present: */ 85 /* subordinate devices, if present: */
83 struct platform_device *gpu_pdev; 86 struct platform_device *gpu_pdev;
84 87
88 /* top level MDSS wrapper device (for MDP5 only) */
89 struct msm_mdss *mdss;
90
85 /* possibly this should be in the kms component, but it is 91 /* possibly this should be in the kms component, but it is
86 * shared by both mdp4 and mdp5.. 92 * shared by both mdp4 and mdp5..
87 */ 93 */
@@ -147,6 +153,9 @@ struct msm_drm_private {
147 struct drm_mm mm; 153 struct drm_mm mm;
148 } vram; 154 } vram;
149 155
156 struct notifier_block vmap_notifier;
157 struct shrinker shrinker;
158
150 struct msm_vblank_ctrl vblank_ctrl; 159 struct msm_vblank_ctrl vblank_ctrl;
151}; 160};
152 161
@@ -165,6 +174,9 @@ void msm_gem_submit_free(struct msm_gem_submit *submit);
165int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 174int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
166 struct drm_file *file); 175 struct drm_file *file);
167 176
177void msm_gem_shrinker_init(struct drm_device *dev);
178void msm_gem_shrinker_cleanup(struct drm_device *dev);
179
168int msm_gem_mmap_obj(struct drm_gem_object *obj, 180int msm_gem_mmap_obj(struct drm_gem_object *obj,
169 struct vm_area_struct *vma); 181 struct vm_area_struct *vma);
170int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 182int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
@@ -189,8 +201,13 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
189 struct dma_buf_attachment *attach, struct sg_table *sg); 201 struct dma_buf_attachment *attach, struct sg_table *sg);
190int msm_gem_prime_pin(struct drm_gem_object *obj); 202int msm_gem_prime_pin(struct drm_gem_object *obj);
191void msm_gem_prime_unpin(struct drm_gem_object *obj); 203void msm_gem_prime_unpin(struct drm_gem_object *obj);
192void *msm_gem_vaddr_locked(struct drm_gem_object *obj); 204void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj);
193void *msm_gem_vaddr(struct drm_gem_object *obj); 205void *msm_gem_get_vaddr(struct drm_gem_object *obj);
206void msm_gem_put_vaddr_locked(struct drm_gem_object *obj);
207void msm_gem_put_vaddr(struct drm_gem_object *obj);
208int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
209void msm_gem_purge(struct drm_gem_object *obj);
210void msm_gem_vunmap(struct drm_gem_object *obj);
194int msm_gem_sync_object(struct drm_gem_object *obj, 211int msm_gem_sync_object(struct drm_gem_object *obj,
195 struct msm_fence_context *fctx, bool exclusive); 212 struct msm_fence_context *fctx, bool exclusive);
196void msm_gem_move_to_active(struct drm_gem_object *obj, 213void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -257,6 +274,9 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
257} 274}
258#endif 275#endif
259 276
277void __init msm_mdp_register(void);
278void __exit msm_mdp_unregister(void);
279
260#ifdef CONFIG_DEBUG_FS 280#ifdef CONFIG_DEBUG_FS
261void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m); 281void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
262void msm_gem_describe_objects(struct list_head *list, struct seq_file *m); 282void msm_gem_describe_objects(struct list_head *list, struct seq_file *m);
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 461dc8b873f0..95cf8fe72ee5 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -49,24 +49,16 @@ static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
49 49
50 for (i = 0; i < n; i++) { 50 for (i = 0; i < n; i++) {
51 struct drm_gem_object *bo = msm_fb->planes[i]; 51 struct drm_gem_object *bo = msm_fb->planes[i];
52 if (bo) 52
53 drm_gem_object_unreference_unlocked(bo); 53 drm_gem_object_unreference_unlocked(bo);
54 } 54 }
55 55
56 kfree(msm_fb); 56 kfree(msm_fb);
57} 57}
58 58
59static int msm_framebuffer_dirty(struct drm_framebuffer *fb,
60 struct drm_file *file_priv, unsigned flags, unsigned color,
61 struct drm_clip_rect *clips, unsigned num_clips)
62{
63 return 0;
64}
65
66static const struct drm_framebuffer_funcs msm_framebuffer_funcs = { 59static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
67 .create_handle = msm_framebuffer_create_handle, 60 .create_handle = msm_framebuffer_create_handle,
68 .destroy = msm_framebuffer_destroy, 61 .destroy = msm_framebuffer_destroy,
69 .dirty = msm_framebuffer_dirty,
70}; 62};
71 63
72#ifdef CONFIG_DEBUG_FS 64#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index c6cf837c5193..ffd4a338ca12 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -158,7 +158,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
158 158
159 dev->mode_config.fb_base = paddr; 159 dev->mode_config.fb_base = paddr;
160 160
161 fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); 161 fbi->screen_base = msm_gem_get_vaddr_locked(fbdev->bo);
162 if (IS_ERR(fbi->screen_base)) { 162 if (IS_ERR(fbi->screen_base)) {
163 ret = PTR_ERR(fbi->screen_base); 163 ret = PTR_ERR(fbi->screen_base);
164 goto fail_unlock; 164 goto fail_unlock;
@@ -188,21 +188,7 @@ fail:
188 return ret; 188 return ret;
189} 189}
190 190
191static void msm_crtc_fb_gamma_set(struct drm_crtc *crtc,
192 u16 red, u16 green, u16 blue, int regno)
193{
194 DBG("fbdev: set gamma");
195}
196
197static void msm_crtc_fb_gamma_get(struct drm_crtc *crtc,
198 u16 *red, u16 *green, u16 *blue, int regno)
199{
200 DBG("fbdev: get gamma");
201}
202
203static const struct drm_fb_helper_funcs msm_fb_helper_funcs = { 191static const struct drm_fb_helper_funcs msm_fb_helper_funcs = {
204 .gamma_set = msm_crtc_fb_gamma_set,
205 .gamma_get = msm_crtc_fb_gamma_get,
206 .fb_probe = msm_fbdev_create, 192 .fb_probe = msm_fbdev_create,
207}; 193};
208 194
@@ -265,6 +251,7 @@ void msm_fbdev_free(struct drm_device *dev)
265 251
266 /* this will free the backing object */ 252 /* this will free the backing object */
267 if (fbdev->fb) { 253 if (fbdev->fb) {
254 msm_gem_put_vaddr(fbdev->bo);
268 drm_framebuffer_unregister_private(fbdev->fb); 255 drm_framebuffer_unregister_private(fbdev->fb);
269 drm_framebuffer_remove(fbdev->fb); 256 drm_framebuffer_remove(fbdev->fb);
270 } 257 }
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 69836f5685b1..6cd4af443139 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -276,6 +276,26 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
276 return offset; 276 return offset;
277} 277}
278 278
279static void
280put_iova(struct drm_gem_object *obj)
281{
282 struct drm_device *dev = obj->dev;
283 struct msm_drm_private *priv = obj->dev->dev_private;
284 struct msm_gem_object *msm_obj = to_msm_bo(obj);
285 int id;
286
287 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
288
289 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
290 struct msm_mmu *mmu = priv->mmus[id];
291 if (mmu && msm_obj->domain[id].iova) {
292 uint32_t offset = msm_obj->domain[id].iova;
293 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
294 msm_obj->domain[id].iova = 0;
295 }
296 }
297}
298
279/* should be called under struct_mutex.. although it can be called 299/* should be called under struct_mutex.. although it can be called
280 * from atomic context without struct_mutex to acquire an extra 300 * from atomic context without struct_mutex to acquire an extra
281 * iova ref if you know one is already held. 301 * iova ref if you know one is already held.
@@ -388,7 +408,7 @@ fail:
388 return ret; 408 return ret;
389} 409}
390 410
391void *msm_gem_vaddr_locked(struct drm_gem_object *obj) 411void *msm_gem_get_vaddr_locked(struct drm_gem_object *obj)
392{ 412{
393 struct msm_gem_object *msm_obj = to_msm_bo(obj); 413 struct msm_gem_object *msm_obj = to_msm_bo(obj);
394 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 414 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
@@ -401,18 +421,91 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
401 if (msm_obj->vaddr == NULL) 421 if (msm_obj->vaddr == NULL)
402 return ERR_PTR(-ENOMEM); 422 return ERR_PTR(-ENOMEM);
403 } 423 }
424 msm_obj->vmap_count++;
404 return msm_obj->vaddr; 425 return msm_obj->vaddr;
405} 426}
406 427
407void *msm_gem_vaddr(struct drm_gem_object *obj) 428void *msm_gem_get_vaddr(struct drm_gem_object *obj)
408{ 429{
409 void *ret; 430 void *ret;
410 mutex_lock(&obj->dev->struct_mutex); 431 mutex_lock(&obj->dev->struct_mutex);
411 ret = msm_gem_vaddr_locked(obj); 432 ret = msm_gem_get_vaddr_locked(obj);
412 mutex_unlock(&obj->dev->struct_mutex); 433 mutex_unlock(&obj->dev->struct_mutex);
413 return ret; 434 return ret;
414} 435}
415 436
437void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
438{
439 struct msm_gem_object *msm_obj = to_msm_bo(obj);
440 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
441 WARN_ON(msm_obj->vmap_count < 1);
442 msm_obj->vmap_count--;
443}
444
445void msm_gem_put_vaddr(struct drm_gem_object *obj)
446{
447 mutex_lock(&obj->dev->struct_mutex);
448 msm_gem_put_vaddr_locked(obj);
449 mutex_unlock(&obj->dev->struct_mutex);
450}
451
452/* Update madvise status, returns true if not purged, else
453 * false or -errno.
454 */
455int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv)
456{
457 struct msm_gem_object *msm_obj = to_msm_bo(obj);
458
459 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
460
461 if (msm_obj->madv != __MSM_MADV_PURGED)
462 msm_obj->madv = madv;
463
464 return (msm_obj->madv != __MSM_MADV_PURGED);
465}
466
467void msm_gem_purge(struct drm_gem_object *obj)
468{
469 struct drm_device *dev = obj->dev;
470 struct msm_gem_object *msm_obj = to_msm_bo(obj);
471
472 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
473 WARN_ON(!is_purgeable(msm_obj));
474 WARN_ON(obj->import_attach);
475
476 put_iova(obj);
477
478 msm_gem_vunmap(obj);
479
480 put_pages(obj);
481
482 msm_obj->madv = __MSM_MADV_PURGED;
483
484 drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
485 drm_gem_free_mmap_offset(obj);
486
487 /* Our goal here is to return as much of the memory as
488 * is possible back to the system as we are called from OOM.
489 * To do this we must instruct the shmfs to drop all of its
490 * backing pages, *now*.
491 */
492 shmem_truncate_range(file_inode(obj->filp), 0, (loff_t)-1);
493
494 invalidate_mapping_pages(file_inode(obj->filp)->i_mapping,
495 0, (loff_t)-1);
496}
497
498void msm_gem_vunmap(struct drm_gem_object *obj)
499{
500 struct msm_gem_object *msm_obj = to_msm_bo(obj);
501
502 if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
503 return;
504
505 vunmap(msm_obj->vaddr);
506 msm_obj->vaddr = NULL;
507}
508
416/* must be called before _move_to_active().. */ 509/* must be called before _move_to_active().. */
417int msm_gem_sync_object(struct drm_gem_object *obj, 510int msm_gem_sync_object(struct drm_gem_object *obj,
418 struct msm_fence_context *fctx, bool exclusive) 511 struct msm_fence_context *fctx, bool exclusive)
@@ -464,6 +557,7 @@ void msm_gem_move_to_active(struct drm_gem_object *obj,
464 struct msm_gpu *gpu, bool exclusive, struct fence *fence) 557 struct msm_gpu *gpu, bool exclusive, struct fence *fence)
465{ 558{
466 struct msm_gem_object *msm_obj = to_msm_bo(obj); 559 struct msm_gem_object *msm_obj = to_msm_bo(obj);
560 WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED);
467 msm_obj->gpu = gpu; 561 msm_obj->gpu = gpu;
468 if (exclusive) 562 if (exclusive)
469 reservation_object_add_excl_fence(msm_obj->resv, fence); 563 reservation_object_add_excl_fence(msm_obj->resv, fence);
@@ -532,13 +626,27 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
532 struct reservation_object_list *fobj; 626 struct reservation_object_list *fobj;
533 struct fence *fence; 627 struct fence *fence;
534 uint64_t off = drm_vma_node_start(&obj->vma_node); 628 uint64_t off = drm_vma_node_start(&obj->vma_node);
629 const char *madv;
535 630
536 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex)); 631 WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
537 632
538 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu\n", 633 switch (msm_obj->madv) {
634 case __MSM_MADV_PURGED:
635 madv = " purged";
636 break;
637 case MSM_MADV_DONTNEED:
638 madv = " purgeable";
639 break;
640 case MSM_MADV_WILLNEED:
641 default:
642 madv = "";
643 break;
644 }
645
646 seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
539 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I', 647 msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
540 obj->name, obj->refcount.refcount.counter, 648 obj->name, obj->refcount.refcount.counter,
541 off, msm_obj->vaddr, obj->size); 649 off, msm_obj->vaddr, obj->size, madv);
542 650
543 rcu_read_lock(); 651 rcu_read_lock();
544 fobj = rcu_dereference(robj->fence); 652 fobj = rcu_dereference(robj->fence);
@@ -578,9 +686,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
578void msm_gem_free_object(struct drm_gem_object *obj) 686void msm_gem_free_object(struct drm_gem_object *obj)
579{ 687{
580 struct drm_device *dev = obj->dev; 688 struct drm_device *dev = obj->dev;
581 struct msm_drm_private *priv = obj->dev->dev_private;
582 struct msm_gem_object *msm_obj = to_msm_bo(obj); 689 struct msm_gem_object *msm_obj = to_msm_bo(obj);
583 int id;
584 690
585 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); 691 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
586 692
@@ -589,13 +695,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
589 695
590 list_del(&msm_obj->mm_list); 696 list_del(&msm_obj->mm_list);
591 697
592 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 698 put_iova(obj);
593 struct msm_mmu *mmu = priv->mmus[id];
594 if (mmu && msm_obj->domain[id].iova) {
595 uint32_t offset = msm_obj->domain[id].iova;
596 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
597 }
598 }
599 699
600 if (obj->import_attach) { 700 if (obj->import_attach) {
601 if (msm_obj->vaddr) 701 if (msm_obj->vaddr)
@@ -609,7 +709,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
609 709
610 drm_prime_gem_destroy(obj, msm_obj->sgt); 710 drm_prime_gem_destroy(obj, msm_obj->sgt);
611 } else { 711 } else {
612 vunmap(msm_obj->vaddr); 712 msm_gem_vunmap(obj);
613 put_pages(obj); 713 put_pages(obj);
614 } 714 }
615 715
@@ -688,6 +788,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
688 msm_obj->vram_node = (void *)&msm_obj[1]; 788 msm_obj->vram_node = (void *)&msm_obj[1];
689 789
690 msm_obj->flags = flags; 790 msm_obj->flags = flags;
791 msm_obj->madv = MSM_MADV_WILLNEED;
691 792
692 if (resv) { 793 if (resv) {
693 msm_obj->resv = resv; 794 msm_obj->resv = resv;
@@ -729,9 +830,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
729 return obj; 830 return obj;
730 831
731fail: 832fail:
732 if (obj) 833 drm_gem_object_unreference(obj);
733 drm_gem_object_unreference(obj);
734
735 return ERR_PTR(ret); 834 return ERR_PTR(ret);
736} 835}
737 836
@@ -774,8 +873,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
774 return obj; 873 return obj;
775 874
776fail: 875fail:
777 if (obj) 876 drm_gem_object_unreference_unlocked(obj);
778 drm_gem_object_unreference_unlocked(obj);
779
780 return ERR_PTR(ret); 877 return ERR_PTR(ret);
781} 878}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 9facd4b6ffd9..b2f13cfe945e 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -29,6 +29,16 @@ struct msm_gem_object {
29 29
30 uint32_t flags; 30 uint32_t flags;
31 31
32 /**
33 * Advice: are the backing pages purgeable?
34 */
35 uint8_t madv;
36
37 /**
38 * count of active vmap'ing
39 */
40 uint8_t vmap_count;
41
32 /* And object is either: 42 /* And object is either:
33 * inactive - on priv->inactive_list 43 * inactive - on priv->inactive_list
34 * active - on one one of the gpu's active_list.. well, at 44 * active - on one one of the gpu's active_list.. well, at
@@ -72,7 +82,16 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
72 return msm_obj->gpu != NULL; 82 return msm_obj->gpu != NULL;
73} 83}
74 84
75#define MAX_CMDS 4 85static inline bool is_purgeable(struct msm_gem_object *msm_obj)
86{
87 return (msm_obj->madv == MSM_MADV_DONTNEED) && msm_obj->sgt &&
88 !msm_obj->base.dma_buf && !msm_obj->base.import_attach;
89}
90
91static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
92{
93 return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
94}
76 95
77/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 96/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
78 * associated with the cmdstream submission for synchronization (and 97 * associated with the cmdstream submission for synchronization (and
@@ -95,7 +114,7 @@ struct msm_gem_submit {
95 uint32_t size; /* in dwords */ 114 uint32_t size; /* in dwords */
96 uint32_t iova; 115 uint32_t iova;
97 uint32_t idx; /* cmdstream buffer idx in bos[] */ 116 uint32_t idx; /* cmdstream buffer idx in bos[] */
98 } cmd[MAX_CMDS]; 117 } *cmd; /* array of size nr_cmds */
99 struct { 118 struct {
100 uint32_t flags; 119 uint32_t flags;
101 struct msm_gem_object *obj; 120 struct msm_gem_object *obj;
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 6b90890faffe..60bb290700ce 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -33,12 +33,12 @@ struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
33 33
34void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
35{ 35{
36 return msm_gem_vaddr(obj); 36 return msm_gem_get_vaddr(obj);
37} 37}
38 38
39void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 39void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
40{ 40{
41 /* TODO msm_gem_vunmap() */ 41 msm_gem_put_vaddr(obj);
42} 42}
43 43
44int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) 44int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
new file mode 100644
index 000000000000..283d2841ba58
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -0,0 +1,168 @@
1/*
2 * Copyright (C) 2016 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_gem.h"
20
21static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
22{
23 if (!mutex_is_locked(mutex))
24 return false;
25
26#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
27 return mutex->owner == task;
28#else
29 /* Since UP may be pre-empted, we cannot assume that we own the lock */
30 return false;
31#endif
32}
33
34static bool msm_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
35{
36 if (!mutex_trylock(&dev->struct_mutex)) {
37 if (!mutex_is_locked_by(&dev->struct_mutex, current))
38 return false;
39 *unlock = false;
40 } else {
41 *unlock = true;
42 }
43
44 return true;
45}
46
47
48static unsigned long
49msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
50{
51 struct msm_drm_private *priv =
52 container_of(shrinker, struct msm_drm_private, shrinker);
53 struct drm_device *dev = priv->dev;
54 struct msm_gem_object *msm_obj;
55 unsigned long count = 0;
56 bool unlock;
57
58 if (!msm_gem_shrinker_lock(dev, &unlock))
59 return 0;
60
61 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
62 if (is_purgeable(msm_obj))
63 count += msm_obj->base.size >> PAGE_SHIFT;
64 }
65
66 if (unlock)
67 mutex_unlock(&dev->struct_mutex);
68
69 return count;
70}
71
72static unsigned long
73msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
74{
75 struct msm_drm_private *priv =
76 container_of(shrinker, struct msm_drm_private, shrinker);
77 struct drm_device *dev = priv->dev;
78 struct msm_gem_object *msm_obj;
79 unsigned long freed = 0;
80 bool unlock;
81
82 if (!msm_gem_shrinker_lock(dev, &unlock))
83 return SHRINK_STOP;
84
85 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
86 if (freed >= sc->nr_to_scan)
87 break;
88 if (is_purgeable(msm_obj)) {
89 msm_gem_purge(&msm_obj->base);
90 freed += msm_obj->base.size >> PAGE_SHIFT;
91 }
92 }
93
94 if (unlock)
95 mutex_unlock(&dev->struct_mutex);
96
97 if (freed > 0)
98 pr_info_ratelimited("Purging %lu bytes\n", freed << PAGE_SHIFT);
99
100 return freed;
101}
102
103static int
104msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
105{
106 struct msm_drm_private *priv =
107 container_of(nb, struct msm_drm_private, vmap_notifier);
108 struct drm_device *dev = priv->dev;
109 struct msm_gem_object *msm_obj;
110 unsigned unmapped = 0;
111 bool unlock;
112
113 if (!msm_gem_shrinker_lock(dev, &unlock))
114 return NOTIFY_DONE;
115
116 list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
117 if (is_vunmapable(msm_obj)) {
118 msm_gem_vunmap(&msm_obj->base);
119 /* since we don't know any better, lets bail after a few
120 * and if necessary the shrinker will be invoked again.
121 * Seems better than unmapping *everything*
122 */
123 if (++unmapped >= 15)
124 break;
125 }
126 }
127
128 if (unlock)
129 mutex_unlock(&dev->struct_mutex);
130
131 *(unsigned long *)ptr += unmapped;
132
133 if (unmapped > 0)
134 pr_info_ratelimited("Purging %u vmaps\n", unmapped);
135
136 return NOTIFY_DONE;
137}
138
139/**
140 * msm_gem_shrinker_init - Initialize msm shrinker
141 * @dev_priv: msm device
142 *
143 * This function registers and sets up the msm shrinker.
144 */
145void msm_gem_shrinker_init(struct drm_device *dev)
146{
147 struct msm_drm_private *priv = dev->dev_private;
148 priv->shrinker.count_objects = msm_gem_shrinker_count;
149 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
150 priv->shrinker.seeks = DEFAULT_SEEKS;
151 WARN_ON(register_shrinker(&priv->shrinker));
152
153 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
154 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
155}
156
157/**
158 * msm_gem_shrinker_cleanup - Clean up msm shrinker
159 * @dev_priv: msm device
160 *
161 * This function unregisters the msm shrinker.
162 */
163void msm_gem_shrinker_cleanup(struct drm_device *dev)
164{
165 struct msm_drm_private *priv = dev->dev_private;
166 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
167 unregister_shrinker(&priv->shrinker);
168}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index eb4bb8b2f3a5..9766f9ae4b7d 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -29,10 +29,11 @@
29#define BO_PINNED 0x2000 29#define BO_PINNED 0x2000
30 30
31static struct msm_gem_submit *submit_create(struct drm_device *dev, 31static struct msm_gem_submit *submit_create(struct drm_device *dev,
32 struct msm_gpu *gpu, int nr) 32 struct msm_gpu *gpu, int nr_bos, int nr_cmds)
33{ 33{
34 struct msm_gem_submit *submit; 34 struct msm_gem_submit *submit;
35 int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0])); 35 int sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) +
36 (nr_cmds * sizeof(*submit->cmd));
36 37
37 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 38 submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
38 if (!submit) 39 if (!submit)
@@ -42,6 +43,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
42 submit->gpu = gpu; 43 submit->gpu = gpu;
43 submit->fence = NULL; 44 submit->fence = NULL;
44 submit->pid = get_pid(task_pid(current)); 45 submit->pid = get_pid(task_pid(current));
46 submit->cmd = (void *)&submit->bos[nr_bos];
45 47
46 /* initially, until copy_from_user() and bo lookup succeeds: */ 48 /* initially, until copy_from_user() and bo lookup succeeds: */
47 submit->nr_bos = 0; 49 submit->nr_bos = 0;
@@ -279,7 +281,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
279 /* For now, just map the entire thing. Eventually we probably 281 /* For now, just map the entire thing. Eventually we probably
280 * to do it page-by-page, w/ kmap() if not vmap()d.. 282 * to do it page-by-page, w/ kmap() if not vmap()d..
281 */ 283 */
282 ptr = msm_gem_vaddr_locked(&obj->base); 284 ptr = msm_gem_get_vaddr_locked(&obj->base);
283 285
284 if (IS_ERR(ptr)) { 286 if (IS_ERR(ptr)) {
285 ret = PTR_ERR(ptr); 287 ret = PTR_ERR(ptr);
@@ -332,6 +334,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
332 last_offset = off; 334 last_offset = off;
333 } 335 }
334 336
337 msm_gem_put_vaddr_locked(&obj->base);
338
335 return 0; 339 return 0;
336} 340}
337 341
@@ -369,14 +373,15 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
369 if (args->pipe != MSM_PIPE_3D0) 373 if (args->pipe != MSM_PIPE_3D0)
370 return -EINVAL; 374 return -EINVAL;
371 375
372 if (args->nr_cmds > MAX_CMDS) 376 ret = mutex_lock_interruptible(&dev->struct_mutex);
373 return -EINVAL; 377 if (ret)
374 378 return ret;
375 submit = submit_create(dev, gpu, args->nr_bos);
376 if (!submit)
377 return -ENOMEM;
378 379
379 mutex_lock(&dev->struct_mutex); 380 submit = submit_create(dev, gpu, args->nr_bos, args->nr_cmds);
381 if (!submit) {
382 ret = -ENOMEM;
383 goto out_unlock;
384 }
380 385
381 ret = submit_lookup_objects(submit, args, file); 386 ret = submit_lookup_objects(submit, args, file);
382 if (ret) 387 if (ret)
@@ -462,6 +467,7 @@ out:
462 submit_cleanup(submit); 467 submit_cleanup(submit);
463 if (ret) 468 if (ret)
464 msm_gem_submit_free(submit); 469 msm_gem_submit_free(submit);
470out_unlock:
465 mutex_unlock(&dev->struct_mutex); 471 mutex_unlock(&dev->struct_mutex);
466 return ret; 472 return ret;
467} 473}
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index a7a0b6d9b057..3a294d0da3a0 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -59,10 +59,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
59 return -EINVAL; 59 return -EINVAL;
60 60
61 for_each_sg(sgt->sgl, sg, sgt->nents, i) { 61 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
62 u32 pa = sg_phys(sg) - sg->offset; 62 dma_addr_t pa = sg_phys(sg) - sg->offset;
63 size_t bytes = sg->length + sg->offset; 63 size_t bytes = sg->length + sg->offset;
64 64
65 VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes); 65 VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
66 66
67 ret = iommu_map(domain, da, pa, bytes, prot); 67 ret = iommu_map(domain, da, pa, bytes, prot);
68 if (ret) 68 if (ret)
@@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
101 if (unmapped < bytes) 101 if (unmapped < bytes)
102 return unmapped; 102 return unmapped;
103 103
104 VERB("unmap[%d]: %08x(%zx)", i, iova, bytes); 104 VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
105 105
106 BUG_ON(!PAGE_ALIGNED(bytes)); 106 BUG_ON(!PAGE_ALIGNED(bytes));
107 107
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index e32222c3d44f..40e41e5cdbc6 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -61,10 +61,8 @@ struct msm_kms_funcs {
61struct msm_kms { 61struct msm_kms {
62 const struct msm_kms_funcs *funcs; 62 const struct msm_kms_funcs *funcs;
63 63
64 /* irq handling: */ 64 /* irq number to be passed on to drm_irq_install */
65 bool in_irq; 65 int irq;
66 struct list_head irq_list; /* list of mdp4_irq */
67 uint32_t vblank_mask; /* irq bits set for userspace vblank */
68}; 66};
69 67
70static inline void msm_kms_init(struct msm_kms *kms, 68static inline void msm_kms_init(struct msm_kms *kms,
@@ -75,5 +73,7 @@ static inline void msm_kms_init(struct msm_kms *kms,
75 73
76struct msm_kms *mdp4_kms_init(struct drm_device *dev); 74struct msm_kms *mdp4_kms_init(struct drm_device *dev);
77struct msm_kms *mdp5_kms_init(struct drm_device *dev); 75struct msm_kms *mdp5_kms_init(struct drm_device *dev);
76int msm_mdss_init(struct drm_device *dev);
77void msm_mdss_destroy(struct drm_device *dev);
78 78
79#endif /* __MSM_KMS_H__ */ 79#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 830857c47c86..17fe4e53e0d1 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -132,7 +132,7 @@ static ssize_t perf_read(struct file *file, char __user *buf,
132 size_t sz, loff_t *ppos) 132 size_t sz, loff_t *ppos)
133{ 133{
134 struct msm_perf_state *perf = file->private_data; 134 struct msm_perf_state *perf = file->private_data;
135 int n = 0, ret; 135 int n = 0, ret = 0;
136 136
137 mutex_lock(&perf->read_lock); 137 mutex_lock(&perf->read_lock);
138 138
@@ -143,9 +143,10 @@ static ssize_t perf_read(struct file *file, char __user *buf,
143 } 143 }
144 144
145 n = min((int)sz, perf->buftot - perf->bufpos); 145 n = min((int)sz, perf->buftot - perf->bufpos);
146 ret = copy_to_user(buf, &perf->buf[perf->bufpos], n); 146 if (copy_to_user(buf, &perf->buf[perf->bufpos], n)) {
147 if (ret) 147 ret = -EFAULT;
148 goto out; 148 goto out;
149 }
149 150
150 perf->bufpos += n; 151 perf->bufpos += n;
151 *ppos += n; 152 *ppos += n;
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 0857710c2ff2..3a5fdfcd67ae 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -27,6 +27,11 @@
27 * This bypasses drm_debugfs_create_files() mainly because we need to use 27 * This bypasses drm_debugfs_create_files() mainly because we need to use
28 * our own fops for a bit more control. In particular, we don't want to 28 * our own fops for a bit more control. In particular, we don't want to
29 * do anything if userspace doesn't have the debugfs file open. 29 * do anything if userspace doesn't have the debugfs file open.
30 *
31 * The module-param "rd_full", which defaults to false, enables snapshotting
32 * all (non-written) buffers in the submit, rather than just cmdstream bo's.
33 * This is useful to capture the contents of (for example) vbo's or textures,
34 * or shader programs (if not emitted inline in cmdstream).
30 */ 35 */
31 36
32#ifdef CONFIG_DEBUG_FS 37#ifdef CONFIG_DEBUG_FS
@@ -40,6 +45,10 @@
40#include "msm_gpu.h" 45#include "msm_gpu.h"
41#include "msm_gem.h" 46#include "msm_gem.h"
42 47
48static bool rd_full = false;
49MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
50module_param_named(rd_full, rd_full, bool, 0600);
51
43enum rd_sect_type { 52enum rd_sect_type {
44 RD_NONE, 53 RD_NONE,
45 RD_TEST, /* ascii text */ 54 RD_TEST, /* ascii text */
@@ -140,9 +149,10 @@ static ssize_t rd_read(struct file *file, char __user *buf,
140 goto out; 149 goto out;
141 150
142 n = min_t(int, sz, circ_count_to_end(&rd->fifo)); 151 n = min_t(int, sz, circ_count_to_end(&rd->fifo));
143 ret = copy_to_user(buf, fptr, n); 152 if (copy_to_user(buf, fptr, n)) {
144 if (ret) 153 ret = -EFAULT;
145 goto out; 154 goto out;
155 }
146 156
147 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); 157 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1);
148 *ppos += n; 158 *ppos += n;
@@ -277,6 +287,31 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
277 kfree(rd); 287 kfree(rd);
278} 288}
279 289
290static void snapshot_buf(struct msm_rd_state *rd,
291 struct msm_gem_submit *submit, int idx,
292 uint32_t iova, uint32_t size)
293{
294 struct msm_gem_object *obj = submit->bos[idx].obj;
295 const char *buf;
296
297 buf = msm_gem_get_vaddr_locked(&obj->base);
298 if (IS_ERR(buf))
299 return;
300
301 if (iova) {
302 buf += iova - submit->bos[idx].iova;
303 } else {
304 iova = submit->bos[idx].iova;
305 size = obj->base.size;
306 }
307
308 rd_write_section(rd, RD_GPUADDR,
309 (uint32_t[2]){ iova, size }, 8);
310 rd_write_section(rd, RD_BUFFER_CONTENTS, buf, size);
311
312 msm_gem_put_vaddr_locked(&obj->base);
313}
314
280/* called under struct_mutex */ 315/* called under struct_mutex */
281void msm_rd_dump_submit(struct msm_gem_submit *submit) 316void msm_rd_dump_submit(struct msm_gem_submit *submit)
282{ 317{
@@ -300,27 +335,27 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
300 335
301 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); 336 rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
302 337
303 /* could be nice to have an option (module-param?) to snapshot 338 if (rd_full) {
304 * all the bo's associated with the submit. Handy to see vtx 339 for (i = 0; i < submit->nr_bos; i++) {
305 * buffers, etc. For now just the cmdstream bo's is enough. 340 /* buffers that are written to probably don't start out
306 */ 341 * with anything interesting:
342 */
343 if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
344 continue;
345
346 snapshot_buf(rd, submit, i, 0, 0);
347 }
348 }
307 349
308 for (i = 0; i < submit->nr_cmds; i++) { 350 for (i = 0; i < submit->nr_cmds; i++) {
309 uint32_t idx = submit->cmd[i].idx;
310 uint32_t iova = submit->cmd[i].iova; 351 uint32_t iova = submit->cmd[i].iova;
311 uint32_t szd = submit->cmd[i].size; /* in dwords */ 352 uint32_t szd = submit->cmd[i].size; /* in dwords */
312 struct msm_gem_object *obj = submit->bos[idx].obj;
313 const char *buf = msm_gem_vaddr_locked(&obj->base);
314
315 if (IS_ERR(buf))
316 continue;
317 353
318 buf += iova - submit->bos[idx].iova; 354 /* snapshot cmdstream bo's (if we haven't already): */
319 355 if (!rd_full) {
320 rd_write_section(rd, RD_GPUADDR, 356 snapshot_buf(rd, submit, submit->cmd[i].idx,
321 (uint32_t[2]){ iova, szd * 4 }, 8); 357 submit->cmd[i].iova, szd * 4);
322 rd_write_section(rd, RD_BUFFER_CONTENTS, 358 }
323 buf, szd * 4);
324 359
325 switch (submit->cmd[i].type) { 360 switch (submit->cmd[i].type) {
326 case MSM_SUBMIT_CMD_IB_TARGET_BUF: 361 case MSM_SUBMIT_CMD_IB_TARGET_BUF:
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 42f5359cf988..f326cf6a32e6 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -39,7 +39,7 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
39 goto fail; 39 goto fail;
40 } 40 }
41 41
42 ring->start = msm_gem_vaddr_locked(ring->bo); 42 ring->start = msm_gem_get_vaddr_locked(ring->bo);
43 if (IS_ERR(ring->start)) { 43 if (IS_ERR(ring->start)) {
44 ret = PTR_ERR(ring->start); 44 ret = PTR_ERR(ring->start);
45 goto fail; 45 goto fail;
@@ -59,7 +59,9 @@ fail:
59 59
60void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) 60void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
61{ 61{
62 if (ring->bo) 62 if (ring->bo) {
63 msm_gem_put_vaddr(ring->bo);
63 drm_gem_object_unreference_unlocked(ring->bo); 64 drm_gem_object_unreference_unlocked(ring->bo);
65 }
64 kfree(ring); 66 kfree(ring);
65} 67}
diff --git a/drivers/gpu/drm/nouveau/Kconfig b/drivers/gpu/drm/nouveau/Kconfig
index 5ab13e7939db..2922a82cba8e 100644
--- a/drivers/gpu/drm/nouveau/Kconfig
+++ b/drivers/gpu/drm/nouveau/Kconfig
@@ -3,13 +3,7 @@ config DRM_NOUVEAU
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FW_LOADER 4 select FW_LOADER
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER
7 select DRM_TTM 6 select DRM_TTM
8 select FB_CFB_FILLRECT
9 select FB_CFB_COPYAREA
10 select FB_CFB_IMAGEBLIT
11 select FB
12 select FRAMEBUFFER_CONSOLE if !EXPERT
13 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT 7 select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
14 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT 8 select ACPI_VIDEO if ACPI && X86 && BACKLIGHT_CLASS_DEVICE && INPUT
15 select X86_PLATFORM_DEVICES if ACPI && X86 9 select X86_PLATFORM_DEVICES if ACPI && X86
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 6f318c54da33..0cb7a18cde26 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -785,14 +785,14 @@ nv_crtc_disable(struct drm_crtc *crtc)
785 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); 785 nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]);
786} 786}
787 787
788static void 788static int
789nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, 789nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
790 uint32_t size) 790 uint32_t size)
791{ 791{
792 int end = (start + size > 256) ? 256 : start + size, i;
793 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 792 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
793 int i;
794 794
795 for (i = start; i < end; i++) { 795 for (i = 0; i < size; i++) {
796 nv_crtc->lut.r[i] = r[i]; 796 nv_crtc->lut.r[i] = r[i];
797 nv_crtc->lut.g[i] = g[i]; 797 nv_crtc->lut.g[i] = g[i];
798 nv_crtc->lut.b[i] = b[i]; 798 nv_crtc->lut.b[i] = b[i];
@@ -805,10 +805,12 @@ nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start,
805 */ 805 */
806 if (!nv_crtc->base.primary->fb) { 806 if (!nv_crtc->base.primary->fb) {
807 nv_crtc->lut.depth = 0; 807 nv_crtc->lut.depth = 0;
808 return; 808 return 0;
809 } 809 }
810 810
811 nv_crtc_gamma_load(crtc); 811 nv_crtc_gamma_load(crtc);
812
813 return 0;
812} 814}
813 815
814static int 816static int
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index aea81a547e85..34c0f2f67548 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev)
125 struct nv04_display *disp = nv04_display(dev); 125 struct nv04_display *disp = nv04_display(dev);
126 struct nouveau_drm *drm = nouveau_drm(dev); 126 struct nouveau_drm *drm = nouveau_drm(dev);
127 struct nouveau_encoder *encoder; 127 struct nouveau_encoder *encoder;
128 struct drm_crtc *crtc;
129 struct nouveau_crtc *nv_crtc; 128 struct nouveau_crtc *nv_crtc;
130 129
131 /* Turn every CRTC off. */
132 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
133 struct drm_mode_set modeset = {
134 .crtc = crtc,
135 };
136
137 drm_mode_set_config_internal(&modeset);
138 }
139
140 /* Restore state */ 130 /* Restore state */
141 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) 131 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head)
142 encoder->enc_restore(&encoder->base.base); 132 encoder->enc_restore(&encoder->base.base);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
index a665b78b2af5..434d1e29f279 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
@@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
749 749
750 /* Disable the crtc to ensure a full modeset is 750 /* Disable the crtc to ensure a full modeset is
751 * performed whenever it's turned on again. */ 751 * performed whenever it's turned on again. */
752 if (crtc) { 752 if (crtc)
753 struct drm_mode_set modeset = { 753 drm_crtc_force_disable(crtc);
754 .crtc = crtc,
755 };
756
757 drm_mode_set_config_internal(&modeset);
758 }
759 } 754 }
760 755
761 return 0; 756 return 0;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
index 331620a52afa..287a7d6fa480 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h
@@ -29,6 +29,7 @@ struct nv_device_info_v0 {
29#define NV_DEVICE_INFO_V0_FERMI 0x07 29#define NV_DEVICE_INFO_V0_FERMI 0x07
30#define NV_DEVICE_INFO_V0_KEPLER 0x08 30#define NV_DEVICE_INFO_V0_KEPLER 0x08
31#define NV_DEVICE_INFO_V0_MAXWELL 0x09 31#define NV_DEVICE_INFO_V0_MAXWELL 0x09
32#define NV_DEVICE_INFO_V0_PASCAL 0x0a
32 __u8 family; 33 __u8 family;
33 __u8 pad06[2]; 34 __u8 pad06[2];
34 __u64 ram_size; 35 __u64 ram_size;
diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h
index 982aad8fa645..e6e9537537cf 100644
--- a/drivers/gpu/drm/nouveau/include/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/include/nvif/class.h
@@ -39,6 +39,7 @@
39#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f 39#define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f
40#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f 40#define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f
41#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f 41#define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f
42#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f
42 43
43#define NV50_DISP /* cl5070.h */ 0x00005070 44#define NV50_DISP /* cl5070.h */ 0x00005070
44#define G82_DISP /* cl5070.h */ 0x00008270 45#define G82_DISP /* cl5070.h */ 0x00008270
@@ -50,6 +51,8 @@
50#define GK110_DISP /* cl5070.h */ 0x00009270 51#define GK110_DISP /* cl5070.h */ 0x00009270
51#define GM107_DISP /* cl5070.h */ 0x00009470 52#define GM107_DISP /* cl5070.h */ 0x00009470
52#define GM200_DISP /* cl5070.h */ 0x00009570 53#define GM200_DISP /* cl5070.h */ 0x00009570
54#define GP100_DISP /* cl5070.h */ 0x00009770
55#define GP104_DISP /* cl5070.h */ 0x00009870
53 56
54#define NV31_MPEG 0x00003174 57#define NV31_MPEG 0x00003174
55#define G82_MPEG 0x00008274 58#define G82_MPEG 0x00008274
@@ -86,6 +89,8 @@
86#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d 89#define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d
87#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d 90#define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d
88#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d 91#define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d
92#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d
93#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d
89 94
90#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e 95#define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e
91#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e 96#define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e
@@ -105,6 +110,8 @@
105#define MAXWELL_A /* cl9097.h */ 0x0000b097 110#define MAXWELL_A /* cl9097.h */ 0x0000b097
106#define MAXWELL_B /* cl9097.h */ 0x0000b197 111#define MAXWELL_B /* cl9097.h */ 0x0000b197
107 112
113#define PASCAL_A /* cl9097.h */ 0x0000c097
114
108#define NV74_BSP 0x000074b0 115#define NV74_BSP 0x000074b0
109 116
110#define GT212_MSVLD 0x000085b1 117#define GT212_MSVLD 0x000085b1
@@ -128,6 +135,8 @@
128#define FERMI_DMA 0x000090b5 135#define FERMI_DMA 0x000090b5
129#define KEPLER_DMA_COPY_A 0x0000a0b5 136#define KEPLER_DMA_COPY_A 0x0000a0b5
130#define MAXWELL_DMA_COPY_A 0x0000b0b5 137#define MAXWELL_DMA_COPY_A 0x0000b0b5
138#define PASCAL_DMA_COPY_A 0x0000c0b5
139#define PASCAL_DMA_COPY_B 0x0000c1b5
131 140
132#define FERMI_DECOMPRESS 0x000090b8 141#define FERMI_DECOMPRESS 0x000090b8
133 142
@@ -137,6 +146,7 @@
137#define KEPLER_COMPUTE_B 0x0000a1c0 146#define KEPLER_COMPUTE_B 0x0000a1c0
138#define MAXWELL_COMPUTE_A 0x0000b0c0 147#define MAXWELL_COMPUTE_A 0x0000b0c0
139#define MAXWELL_COMPUTE_B 0x0000b1c0 148#define MAXWELL_COMPUTE_B 0x0000b1c0
149#define PASCAL_COMPUTE_A 0x0000c0c0
140 150
141#define NV74_CIPHER 0x000074c1 151#define NV74_CIPHER 0x000074c1
142#endif 152#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
index 126a85cc81bc..7ea8aa7ca408 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h
@@ -33,7 +33,10 @@ enum nvkm_devidx {
33 NVKM_ENGINE_CE0, 33 NVKM_ENGINE_CE0,
34 NVKM_ENGINE_CE1, 34 NVKM_ENGINE_CE1,
35 NVKM_ENGINE_CE2, 35 NVKM_ENGINE_CE2,
36 NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2, 36 NVKM_ENGINE_CE3,
37 NVKM_ENGINE_CE4,
38 NVKM_ENGINE_CE5,
39 NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5,
37 40
38 NVKM_ENGINE_CIPHER, 41 NVKM_ENGINE_CIPHER,
39 NVKM_ENGINE_DISP, 42 NVKM_ENGINE_DISP,
@@ -50,7 +53,8 @@ enum nvkm_devidx {
50 53
51 NVKM_ENGINE_NVENC0, 54 NVKM_ENGINE_NVENC0,
52 NVKM_ENGINE_NVENC1, 55 NVKM_ENGINE_NVENC1,
53 NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1, 56 NVKM_ENGINE_NVENC2,
57 NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2,
54 58
55 NVKM_ENGINE_NVDEC, 59 NVKM_ENGINE_NVDEC,
56 NVKM_ENGINE_PM, 60 NVKM_ENGINE_PM,
@@ -102,6 +106,7 @@ struct nvkm_device {
102 NV_C0 = 0xc0, 106 NV_C0 = 0xc0,
103 NV_E0 = 0xe0, 107 NV_E0 = 0xe0,
104 GM100 = 0x110, 108 GM100 = 0x110,
109 GP100 = 0x130,
105 } card_type; 110 } card_type;
106 u32 chipset; 111 u32 chipset;
107 u8 chiprev; 112 u8 chiprev;
@@ -136,7 +141,7 @@ struct nvkm_device {
136 struct nvkm_volt *volt; 141 struct nvkm_volt *volt;
137 142
138 struct nvkm_engine *bsp; 143 struct nvkm_engine *bsp;
139 struct nvkm_engine *ce[3]; 144 struct nvkm_engine *ce[6];
140 struct nvkm_engine *cipher; 145 struct nvkm_engine *cipher;
141 struct nvkm_disp *disp; 146 struct nvkm_disp *disp;
142 struct nvkm_dma *dma; 147 struct nvkm_dma *dma;
@@ -149,7 +154,7 @@ struct nvkm_device {
149 struct nvkm_engine *mspdec; 154 struct nvkm_engine *mspdec;
150 struct nvkm_engine *msppp; 155 struct nvkm_engine *msppp;
151 struct nvkm_engine *msvld; 156 struct nvkm_engine *msvld;
152 struct nvkm_engine *nvenc[2]; 157 struct nvkm_engine *nvenc[3];
153 struct nvkm_engine *nvdec; 158 struct nvkm_engine *nvdec;
154 struct nvkm_pm *pm; 159 struct nvkm_pm *pm;
155 struct nvkm_engine *sec; 160 struct nvkm_engine *sec;
@@ -170,7 +175,6 @@ struct nvkm_device_func {
170 void (*fini)(struct nvkm_device *, bool suspend); 175 void (*fini)(struct nvkm_device *, bool suspend);
171 resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); 176 resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar);
172 resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); 177 resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar);
173 bool cpu_coherent;
174}; 178};
175 179
176struct nvkm_device_quirk { 180struct nvkm_device_quirk {
@@ -206,7 +210,7 @@ struct nvkm_device_chip {
206 int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **); 210 int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **);
207 211
208 int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **); 212 int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **);
209 int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **); 213 int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **);
210 int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **); 214 int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **);
211 int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **); 215 int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **);
212 int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **); 216 int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **);
@@ -219,7 +223,7 @@ struct nvkm_device_chip {
219 int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); 223 int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
220 int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); 224 int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **);
221 int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); 225 int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **);
222 int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **); 226 int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **);
223 int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **); 227 int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **);
224 int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); 228 int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **);
225 int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); 229 int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
index b5370cb56e3c..e5c9b6268dcc 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h
@@ -28,6 +28,7 @@ struct nvkm_device_tegra {
28 } iommu; 28 } iommu;
29 29
30 int gpu_speedo; 30 int gpu_speedo;
31 int gpu_speedo_id;
31}; 32};
32 33
33struct nvkm_device_tegra_func { 34struct nvkm_device_tegra_func {
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
index 594d719ba41e..d3d26a1e215d 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h
@@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
7int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); 7int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
8int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **); 8int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
9int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **); 9int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
10int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
11int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **);
10#endif 12#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
index d4fdce27b297..e82049667ce4 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h
@@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
32int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **); 32int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
33int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **); 33int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
34int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); 34int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
35int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
36int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **);
35#endif 37#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
index 15ddfcf5e8db..ed92fec5292c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h
@@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
66int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); 66int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
67int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); 67int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
68int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); 68int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
69int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **);
69#endif 70#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
index 6515f5810a26..89cf99307828 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h
@@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
42int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 42int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
43int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 43int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
44int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); 44int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
45int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **);
45#endif 46#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
index e39a1fea930b..a72f3290528a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h
@@ -7,6 +7,9 @@ struct nvkm_bios {
7 u32 size; 7 u32 size;
8 u8 *data; 8 u8 *data;
9 9
10 u32 image0_size;
11 u32 imaged_addr;
12
10 u32 bmp_offset; 13 u32 bmp_offset;
11 u32 bit_offset; 14 u32 bit_offset;
12 15
@@ -22,10 +25,9 @@ struct nvkm_bios {
22u8 nvbios_checksum(const u8 *data, int size); 25u8 nvbios_checksum(const u8 *data, int size);
23u16 nvbios_findstr(const u8 *data, int size, const char *str, int len); 26u16 nvbios_findstr(const u8 *data, int size, const char *str, int len);
24int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len); 27int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len);
25 28u8 nvbios_rd08(struct nvkm_bios *, u32 addr);
26#define nvbios_rd08(b,o) (b)->data[(o)] 29u16 nvbios_rd16(struct nvkm_bios *, u32 addr);
27#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)]) 30u32 nvbios_rd32(struct nvkm_bios *, u32 addr);
28#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)])
29 31
30int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **); 32int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **);
31#endif 33#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
index 0a734fd06acf..3a410275fa71 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h
@@ -56,6 +56,8 @@ struct nvkm_fb {
56 int regions; 56 int regions;
57 } tile; 57 } tile;
58 58
59 u8 page;
60
59 struct nvkm_memory *mmu_rd; 61 struct nvkm_memory *mmu_rd;
60 struct nvkm_memory *mmu_wr; 62 struct nvkm_memory *mmu_wr;
61}; 63};
@@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
91int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 93int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
92int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 94int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
93int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); 95int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
96int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
97int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **);
94 98
95#include <subdev/bios.h> 99#include <subdev/bios.h>
96#include <subdev/bios/ramcfg.h> 100#include <subdev/bios/ramcfg.h>
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
index c6b90b6543b3..cd755baf9cab 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h
@@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
38int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 38int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
39int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 39int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
40int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); 40int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
41int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **);
41#endif 42#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
index 2e80682b2da1..27d25b18d85c 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h
@@ -7,11 +7,14 @@ struct nvkm_mc {
7 struct nvkm_subdev subdev; 7 struct nvkm_subdev subdev;
8}; 8};
9 9
10void nvkm_mc_intr(struct nvkm_mc *, bool *handled); 10void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
11void nvkm_mc_intr_unarm(struct nvkm_mc *); 11void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
12void nvkm_mc_intr_rearm(struct nvkm_mc *); 12void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
13void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx); 13void nvkm_mc_intr(struct nvkm_device *, bool *handled);
14void nvkm_mc_unk260(struct nvkm_mc *, u32 data); 14void nvkm_mc_intr_unarm(struct nvkm_device *);
15void nvkm_mc_intr_rearm(struct nvkm_device *);
16void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable);
17void nvkm_mc_unk260(struct nvkm_device *, u32 data);
15 18
16int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **); 19int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
17int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **); 20int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
@@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
24int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); 27int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
25int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **); 28int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
26int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **); 29int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
30int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **);
27#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
index ddb913889d7e..e6523e2cea9f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h
@@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
47int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 47int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
48int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 48int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
49int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **); 49int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
50int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
50 51
51/* pcie functions */ 52/* pcie functions */
52int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width); 53int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width);
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
index c6edd95a5b69..b04c38c07761 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h
@@ -43,9 +43,8 @@ struct nvkm_secboot {
43 const struct nvkm_secboot_func *func; 43 const struct nvkm_secboot_func *func;
44 struct nvkm_subdev subdev; 44 struct nvkm_subdev subdev;
45 45
46 enum nvkm_devidx devidx;
46 u32 base; 47 u32 base;
47 u32 irq_mask;
48 u32 enable_mask;
49}; 48};
50#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) 49#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
51 50
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
index 8fb575a92c48..71ebbfd4484f 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h
@@ -8,10 +8,11 @@ struct nvkm_top {
8 struct list_head device; 8 struct list_head device;
9}; 9};
10 10
11u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx); 11u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
12u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs); 12u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
13enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault); 13u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
14enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn); 14enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
15enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
15 16
16int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **); 17int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **);
17#endif 18#endif
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
index feff55cff05b..b765f4ffcde6 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h
@@ -12,6 +12,9 @@ struct nvkm_volt {
12 u32 uv; 12 u32 uv;
13 u8 vid; 13 u8 vid;
14 } vid[256]; 14 } vid[256];
15
16 u32 max_uv;
17 u32 min_uv;
15}; 18};
16 19
17int nvkm_volt_get(struct nvkm_volt *); 20int nvkm_volt_get(struct nvkm_volt *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index eb7de487a2b3..7bd4683216d0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm)
100 case NV_DEVICE_INFO_V0_FERMI: 100 case NV_DEVICE_INFO_V0_FERMI:
101 case NV_DEVICE_INFO_V0_KEPLER: 101 case NV_DEVICE_INFO_V0_KEPLER:
102 case NV_DEVICE_INFO_V0_MAXWELL: 102 case NV_DEVICE_INFO_V0_MAXWELL:
103 case NV_DEVICE_INFO_V0_PASCAL:
103 return NVIF_CLASS_SW_GF100; 104 return NVIF_CLASS_SW_GF100;
104 } 105 }
105 106
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index db76b94e6e26..f2ad17aa33f0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -45,6 +45,8 @@
45static struct nouveau_dsm_priv { 45static struct nouveau_dsm_priv {
46 bool dsm_detected; 46 bool dsm_detected;
47 bool optimus_detected; 47 bool optimus_detected;
48 bool optimus_flags_detected;
49 bool optimus_skip_dsm;
48 acpi_handle dhandle; 50 acpi_handle dhandle;
49 acpi_handle rom_handle; 51 acpi_handle rom_handle;
50} nouveau_dsm_priv; 52} nouveau_dsm_priv;
@@ -57,9 +59,6 @@ bool nouveau_is_v1_dsm(void) {
57 return nouveau_dsm_priv.dsm_detected; 59 return nouveau_dsm_priv.dsm_detected;
58} 60}
59 61
60#define NOUVEAU_DSM_HAS_MUX 0x1
61#define NOUVEAU_DSM_HAS_OPT 0x2
62
63#ifdef CONFIG_VGA_SWITCHEROO 62#ifdef CONFIG_VGA_SWITCHEROO
64static const char nouveau_dsm_muid[] = { 63static const char nouveau_dsm_muid[] = {
65 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D, 64 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
@@ -110,7 +109,7 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
110 * requirements on the fourth parameter, so a private implementation 109 * requirements on the fourth parameter, so a private implementation
111 * instead of using acpi_check_dsm(). 110 * instead of using acpi_check_dsm().
112 */ 111 */
113static int nouveau_check_optimus_dsm(acpi_handle handle) 112static int nouveau_dsm_get_optimus_functions(acpi_handle handle)
114{ 113{
115 int result; 114 int result;
116 115
@@ -125,7 +124,9 @@ static int nouveau_check_optimus_dsm(acpi_handle handle)
125 * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. 124 * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
126 * If the n-th bit is enabled, function n is supported 125 * If the n-th bit is enabled, function n is supported
127 */ 126 */
128 return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS); 127 if (result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS))
128 return result;
129 return 0;
129} 130}
130 131
131static int nouveau_dsm(acpi_handle handle, int func, int arg) 132static int nouveau_dsm(acpi_handle handle, int func, int arg)
@@ -212,26 +213,55 @@ static const struct vga_switcheroo_handler nouveau_dsm_handler = {
212 .get_client_id = nouveau_dsm_get_client_id, 213 .get_client_id = nouveau_dsm_get_client_id,
213}; 214};
214 215
215static int nouveau_dsm_pci_probe(struct pci_dev *pdev) 216/*
217 * Firmware supporting Windows 8 or later do not use _DSM to put the device into
218 * D3cold, they instead rely on disabling power resources on the parent.
219 */
220static bool nouveau_pr3_present(struct pci_dev *pdev)
221{
222 struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
223 struct acpi_device *parent_adev;
224
225 if (!parent_pdev)
226 return false;
227
228 parent_adev = ACPI_COMPANION(&parent_pdev->dev);
229 if (!parent_adev)
230 return false;
231
232 return acpi_has_method(parent_adev->handle, "_PR3");
233}
234
235static void nouveau_dsm_pci_probe(struct pci_dev *pdev, acpi_handle *dhandle_out,
236 bool *has_mux, bool *has_opt,
237 bool *has_opt_flags, bool *has_pr3)
216{ 238{
217 acpi_handle dhandle; 239 acpi_handle dhandle;
218 int retval = 0; 240 bool supports_mux;
241 int optimus_funcs;
219 242
220 dhandle = ACPI_HANDLE(&pdev->dev); 243 dhandle = ACPI_HANDLE(&pdev->dev);
221 if (!dhandle) 244 if (!dhandle)
222 return false; 245 return;
223 246
224 if (!acpi_has_method(dhandle, "_DSM")) 247 if (!acpi_has_method(dhandle, "_DSM"))
225 return false; 248 return;
249
250 supports_mux = acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102,
251 1 << NOUVEAU_DSM_POWER);
252 optimus_funcs = nouveau_dsm_get_optimus_functions(dhandle);
226 253
227 if (acpi_check_dsm(dhandle, nouveau_dsm_muid, 0x00000102, 254 /* Does not look like a Nvidia device. */
228 1 << NOUVEAU_DSM_POWER)) 255 if (!supports_mux && !optimus_funcs)
229 retval |= NOUVEAU_DSM_HAS_MUX; 256 return;
230 257
231 if (nouveau_check_optimus_dsm(dhandle)) 258 *dhandle_out = dhandle;
232 retval |= NOUVEAU_DSM_HAS_OPT; 259 *has_mux = supports_mux;
260 *has_opt = !!optimus_funcs;
261 *has_opt_flags = optimus_funcs & (1 << NOUVEAU_DSM_OPTIMUS_FLAGS);
262 *has_pr3 = false;
233 263
234 if (retval & NOUVEAU_DSM_HAS_OPT) { 264 if (optimus_funcs) {
235 uint32_t result; 265 uint32_t result;
236 nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0, 266 nouveau_optimus_dsm(dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 0,
237 &result); 267 &result);
@@ -239,11 +269,9 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
239 (result & OPTIMUS_ENABLED) ? "enabled" : "disabled", 269 (result & OPTIMUS_ENABLED) ? "enabled" : "disabled",
240 (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "", 270 (result & OPTIMUS_DYNAMIC_PWR_CAP) ? "dynamic power, " : "",
241 (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : ""); 271 (result & OPTIMUS_HDA_CODEC_MASK) ? "hda bios codec supported" : "");
242 }
243 if (retval)
244 nouveau_dsm_priv.dhandle = dhandle;
245 272
246 return retval; 273 *has_pr3 = nouveau_pr3_present(pdev);
274 }
247} 275}
248 276
249static bool nouveau_dsm_detect(void) 277static bool nouveau_dsm_detect(void)
@@ -251,11 +279,13 @@ static bool nouveau_dsm_detect(void)
251 char acpi_method_name[255] = { 0 }; 279 char acpi_method_name[255] = { 0 };
252 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; 280 struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
253 struct pci_dev *pdev = NULL; 281 struct pci_dev *pdev = NULL;
254 int has_dsm = 0; 282 acpi_handle dhandle = NULL;
255 int has_optimus = 0; 283 bool has_mux = false;
284 bool has_optimus = false;
285 bool has_optimus_flags = false;
286 bool has_power_resources = false;
256 int vga_count = 0; 287 int vga_count = 0;
257 bool guid_valid; 288 bool guid_valid;
258 int retval;
259 bool ret = false; 289 bool ret = false;
260 290
261 /* lookup the MXM GUID */ 291 /* lookup the MXM GUID */
@@ -268,32 +298,32 @@ static bool nouveau_dsm_detect(void)
268 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 298 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
269 vga_count++; 299 vga_count++;
270 300
271 retval = nouveau_dsm_pci_probe(pdev); 301 nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
272 if (retval & NOUVEAU_DSM_HAS_MUX) 302 &has_optimus_flags, &has_power_resources);
273 has_dsm |= 1;
274 if (retval & NOUVEAU_DSM_HAS_OPT)
275 has_optimus = 1;
276 } 303 }
277 304
278 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) { 305 while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_3D << 8, pdev)) != NULL) {
279 vga_count++; 306 vga_count++;
280 307
281 retval = nouveau_dsm_pci_probe(pdev); 308 nouveau_dsm_pci_probe(pdev, &dhandle, &has_mux, &has_optimus,
282 if (retval & NOUVEAU_DSM_HAS_MUX) 309 &has_optimus_flags, &has_power_resources);
283 has_dsm |= 1;
284 if (retval & NOUVEAU_DSM_HAS_OPT)
285 has_optimus = 1;
286 } 310 }
287 311
288 /* find the optimus DSM or the old v1 DSM */ 312 /* find the optimus DSM or the old v1 DSM */
289 if (has_optimus == 1) { 313 if (has_optimus) {
314 nouveau_dsm_priv.dhandle = dhandle;
290 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 315 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
291 &buffer); 316 &buffer);
292 printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n", 317 printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
293 acpi_method_name); 318 acpi_method_name);
319 if (has_power_resources)
320 pr_info("nouveau: detected PR support, will not use DSM\n");
294 nouveau_dsm_priv.optimus_detected = true; 321 nouveau_dsm_priv.optimus_detected = true;
322 nouveau_dsm_priv.optimus_flags_detected = has_optimus_flags;
323 nouveau_dsm_priv.optimus_skip_dsm = has_power_resources;
295 ret = true; 324 ret = true;
296 } else if (vga_count == 2 && has_dsm && guid_valid) { 325 } else if (vga_count == 2 && has_mux && guid_valid) {
326 nouveau_dsm_priv.dhandle = dhandle;
297 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, 327 acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
298 &buffer); 328 &buffer);
299 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n", 329 printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
@@ -321,11 +351,12 @@ void nouveau_register_dsm_handler(void)
321void nouveau_switcheroo_optimus_dsm(void) 351void nouveau_switcheroo_optimus_dsm(void)
322{ 352{
323 u32 result = 0; 353 u32 result = 0;
324 if (!nouveau_dsm_priv.optimus_detected) 354 if (!nouveau_dsm_priv.optimus_detected || nouveau_dsm_priv.optimus_skip_dsm)
325 return; 355 return;
326 356
327 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS, 357 if (nouveau_dsm_priv.optimus_flags_detected)
328 0x3, &result); 358 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FLAGS,
359 0x3, &result);
329 360
330 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS, 361 nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_CAPS,
331 NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result); 362 NOUVEAU_DSM_OPTIMUS_SET_POWERDOWN, &result);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 5e3f3e826476..528bdeffb339 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
209 nvbo->tile_flags = tile_flags; 209 nvbo->tile_flags = tile_flags;
210 nvbo->bo.bdev = &drm->ttm.bdev; 210 nvbo->bo.bdev = &drm->ttm.bdev;
211 211
212 if (!nvxx_device(&drm->device)->func->cpu_coherent) 212 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
213 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
214 213
215 nvbo->page_shift = 12; 214 nvbo->page_shift = 12;
216 if (drm->client.vm) { 215 if (drm->client.vm) {
@@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
424 if (ret) 423 if (ret)
425 return ret; 424 return ret;
426 425
427 /* 426 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
428 * TTM buffers allocated using the DMA API already have a mapping, let's
429 * use it instead.
430 */
431 if (!nvbo->force_coherent)
432 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
433 &nvbo->kmap);
434 427
435 ttm_bo_unreserve(&nvbo->bo); 428 ttm_bo_unreserve(&nvbo->bo);
436 return ret; 429 return ret;
@@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
442 if (!nvbo) 435 if (!nvbo)
443 return; 436 return;
444 437
445 /* 438 ttm_bo_kunmap(&nvbo->kmap);
446 * TTM buffers allocated using the DMA API already had a coherent
447 * mapping which we used, no need to unmap.
448 */
449 if (!nvbo->force_coherent)
450 ttm_bo_kunmap(&nvbo->kmap);
451} 439}
452 440
453void 441void
@@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
506 return 0; 494 return 0;
507} 495}
508 496
509static inline void *
510_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
511{
512 struct ttm_dma_tt *dma_tt;
513 u8 *m = mem;
514
515 index *= sz;
516
517 if (m) {
518 /* kmap'd address, return the corresponding offset */
519 m += index;
520 } else {
521 /* DMA-API mapping, lookup the right address */
522 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
523 m = dma_tt->cpu_address[index / PAGE_SIZE];
524 m += index % PAGE_SIZE;
525 }
526
527 return m;
528}
529#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
530
531void 497void
532nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 498nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
533{ 499{
534 bool is_iomem; 500 bool is_iomem;
535 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 501 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
536 502
537 mem = nouveau_bo_mem_index(nvbo, index, mem); 503 mem += index;
538 504
539 if (is_iomem) 505 if (is_iomem)
540 iowrite16_native(val, (void __force __iomem *)mem); 506 iowrite16_native(val, (void __force __iomem *)mem);
@@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
548 bool is_iomem; 514 bool is_iomem;
549 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 515 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
550 516
551 mem = nouveau_bo_mem_index(nvbo, index, mem); 517 mem += index;
552 518
553 if (is_iomem) 519 if (is_iomem)
554 return ioread32_native((void __force __iomem *)mem); 520 return ioread32_native((void __force __iomem *)mem);
@@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
562 bool is_iomem; 528 bool is_iomem;
563 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 529 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
564 530
565 mem = nouveau_bo_mem_index(nvbo, index, mem); 531 mem += index;
566 532
567 if (is_iomem) 533 if (is_iomem)
568 iowrite32_native(val, (void __force __iomem *)mem); 534 iowrite32_native(val, (void __force __iomem *)mem);
@@ -1082,7 +1048,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
1082 ret = ttm_bo_move_accel_cleanup(bo, 1048 ret = ttm_bo_move_accel_cleanup(bo,
1083 &fence->base, 1049 &fence->base,
1084 evict, 1050 evict,
1085 no_wait_gpu,
1086 new_mem); 1051 new_mem);
1087 nouveau_fence_unref(&fence); 1052 nouveau_fence_unref(&fence);
1088 } 1053 }
@@ -1104,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
1104 struct ttm_mem_reg *, struct ttm_mem_reg *); 1069 struct ttm_mem_reg *, struct ttm_mem_reg *);
1105 int (*init)(struct nouveau_channel *, u32 handle); 1070 int (*init)(struct nouveau_channel *, u32 handle);
1106 } _methods[] = { 1071 } _methods[] = {
1072 { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init },
1073 { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init },
1074 { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init },
1075 { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1107 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1076 { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init },
1108 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, 1077 { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1109 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, 1078 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
@@ -1289,6 +1258,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1289 struct nouveau_drm_tile *new_tile = NULL; 1258 struct nouveau_drm_tile *new_tile = NULL;
1290 int ret = 0; 1259 int ret = 0;
1291 1260
1261 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1262 if (ret)
1263 return ret;
1264
1292 if (nvbo->pin_refcnt) 1265 if (nvbo->pin_refcnt)
1293 NV_WARN(drm, "Moving pinned object %p!\n", nvbo); 1266 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1294 1267
@@ -1324,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1324 /* Fallback to software copy. */ 1297 /* Fallback to software copy. */
1325 ret = ttm_bo_wait(bo, intr, no_wait_gpu); 1298 ret = ttm_bo_wait(bo, intr, no_wait_gpu);
1326 if (ret == 0) 1299 if (ret == 0)
1327 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 1300 ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem);
1328 1301
1329out: 1302out:
1330 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1303 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
@@ -1488,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1488 dev = drm->dev; 1461 dev = drm->dev;
1489 pdev = device->dev; 1462 pdev = device->dev;
1490 1463
1491 /*
1492 * Objects matching this condition have been marked as force_coherent,
1493 * so use the DMA API for them.
1494 */
1495 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1496 ttm->caching_state == tt_uncached)
1497 return ttm_dma_populate(ttm_dma, dev->dev);
1498
1499#if IS_ENABLED(CONFIG_AGP) 1464#if IS_ENABLED(CONFIG_AGP)
1500 if (drm->agp.bridge) { 1465 if (drm->agp.bridge) {
1501 return ttm_agp_tt_populate(ttm); 1466 return ttm_agp_tt_populate(ttm);
@@ -1553,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1553 dev = drm->dev; 1518 dev = drm->dev;
1554 pdev = device->dev; 1519 pdev = device->dev;
1555 1520
1556 /*
1557 * Objects matching this condition have been marked as force_coherent,
1558 * so use the DMA API for them.
1559 */
1560 if (!nvxx_device(&drm->device)->func->cpu_coherent &&
1561 ttm->caching_state == tt_uncached) {
1562 ttm_dma_unpopulate(ttm_dma, dev->dev);
1563 return;
1564 }
1565
1566#if IS_ENABLED(CONFIG_AGP) 1521#if IS_ENABLED(CONFIG_AGP)
1567 if (drm->agp.bridge) { 1522 if (drm->agp.bridge) {
1568 ttm_agp_tt_unpopulate(ttm); 1523 ttm_agp_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index b1d2527c5625..f9b3c811187e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -191,7 +191,8 @@ static int
191nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, 191nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device,
192 u32 engine, struct nouveau_channel **pchan) 192 u32 engine, struct nouveau_channel **pchan)
193{ 193{
194 static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A, 194 static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A,
195 MAXWELL_CHANNEL_GPFIFO_A,
195 KEPLER_CHANNEL_GPFIFO_B, 196 KEPLER_CHANNEL_GPFIFO_B,
196 KEPLER_CHANNEL_GPFIFO_A, 197 KEPLER_CHANNEL_GPFIFO_A,
197 FERMI_CHANNEL_GPFIFO, 198 FERMI_CHANNEL_GPFIFO,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 7c77f960c8b8..afbf557b23d4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
47{ 47{
48 struct nouveau_crtc *nv_crtc = 48 struct nouveau_crtc *nv_crtc =
49 container_of(notify, typeof(*nv_crtc), vblank); 49 container_of(notify, typeof(*nv_crtc), vblank);
50 drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index); 50 drm_crtc_handle_vblank(&nv_crtc->base);
51 return NVIF_NOTIFY_KEEP; 51 return NVIF_NOTIFY_KEEP;
52} 52}
53 53
@@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev)
495 495
496 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { 496 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
497 static const u16 oclass[] = { 497 static const u16 oclass[] = {
498 GP104_DISP,
499 GP100_DISP,
498 GM200_DISP, 500 GM200_DISP,
499 GM107_DISP, 501 GM107_DISP,
500 GK110_DISP, 502 GK110_DISP,
@@ -554,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev)
554 nouveau_display_vblank_fini(dev); 556 nouveau_display_vblank_fini(dev);
555 557
556 drm_kms_helper_poll_fini(dev); 558 drm_kms_helper_poll_fini(dev);
559 drm_crtc_force_disable_all(dev);
557 drm_mode_config_cleanup(dev); 560 drm_mode_config_cleanup(dev);
558 561
559 if (disp->dtor) 562 if (disp->dtor)
@@ -760,12 +763,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
760 763
761 /* Initialize a page flip struct */ 764 /* Initialize a page flip struct */
762 *s = (struct nouveau_page_flip_state) 765 *s = (struct nouveau_page_flip_state)
763 { { }, event, nouveau_crtc(crtc)->index, 766 { { }, event, crtc, fb->bits_per_pixel, fb->pitches[0],
764 fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
765 new_bo->bo.offset }; 767 new_bo->bo.offset };
766 768
767 /* Keep vblanks on during flip, for the target crtc of this flip */ 769 /* Keep vblanks on during flip, for the target crtc of this flip */
768 drm_vblank_get(dev, nouveau_crtc(crtc)->index); 770 drm_crtc_vblank_get(crtc);
769 771
770 /* Emit a page flip */ 772 /* Emit a page flip */
771 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 773 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
@@ -810,7 +812,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
810 return 0; 812 return 0;
811 813
812fail_unreserve: 814fail_unreserve:
813 drm_vblank_put(dev, nouveau_crtc(crtc)->index); 815 drm_crtc_vblank_put(crtc);
814 ttm_bo_unreserve(&old_bo->bo); 816 ttm_bo_unreserve(&old_bo->bo);
815fail_unpin: 817fail_unpin:
816 mutex_unlock(&cli->mutex); 818 mutex_unlock(&cli->mutex);
@@ -842,17 +844,17 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
842 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head); 844 s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
843 if (s->event) { 845 if (s->event) {
844 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 846 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
845 drm_arm_vblank_event(dev, s->crtc, s->event); 847 drm_crtc_arm_vblank_event(s->crtc, s->event);
846 } else { 848 } else {
847 drm_send_vblank_event(dev, s->crtc, s->event); 849 drm_crtc_send_vblank_event(s->crtc, s->event);
848 850
849 /* Give up ownership of vblank for page-flipped crtc */ 851 /* Give up ownership of vblank for page-flipped crtc */
850 drm_vblank_put(dev, s->crtc); 852 drm_crtc_vblank_put(s->crtc);
851 } 853 }
852 } 854 }
853 else { 855 else {
854 /* Give up ownership of vblank for page-flipped crtc */ 856 /* Give up ownership of vblank for page-flipped crtc */
855 drm_vblank_put(dev, s->crtc); 857 drm_crtc_vblank_put(s->crtc);
856 } 858 }
857 859
858 list_del(&s->head); 860 list_del(&s->head);
@@ -873,9 +875,10 @@ nouveau_flip_complete(struct nvif_notify *notify)
873 875
874 if (!nouveau_finish_page_flip(chan, &state)) { 876 if (!nouveau_finish_page_flip(chan, &state)) {
875 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 877 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
876 nv_set_crtc_base(drm->dev, state.crtc, state.offset + 878 nv_set_crtc_base(drm->dev, drm_crtc_index(state.crtc),
877 state.y * state.pitch + 879 state.offset + state.crtc->y *
878 state.x * state.bpp / 8); 880 state.pitch + state.crtc->x *
881 state.bpp / 8);
879 } 882 }
880 } 883 }
881 884
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 24273bacd885..0420ee861ea4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -28,7 +28,8 @@ int nouveau_framebuffer_init(struct drm_device *, struct nouveau_framebuffer *,
28struct nouveau_page_flip_state { 28struct nouveau_page_flip_state {
29 struct list_head head; 29 struct list_head head;
30 struct drm_pending_vblank_event *event; 30 struct drm_pending_vblank_event *event;
31 int crtc, bpp, pitch, x, y; 31 struct drm_crtc *crtc;
32 int bpp, pitch;
32 u64 offset; 33 u64 offset;
33}; 34};
34 35
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 11f8dd9c0edb..66c1280c0f1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -22,13 +22,11 @@
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 */ 23 */
24 24
25#include <linux/apple-gmux.h>
26#include <linux/console.h> 25#include <linux/console.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/module.h> 27#include <linux/module.h>
29#include <linux/pci.h> 28#include <linux/pci.h>
30#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
31#include <linux/vgaarb.h>
32#include <linux/vga_switcheroo.h> 30#include <linux/vga_switcheroo.h>
33 31
34#include "drmP.h" 32#include "drmP.h"
@@ -200,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
200 case KEPLER_CHANNEL_GPFIFO_A: 198 case KEPLER_CHANNEL_GPFIFO_A:
201 case KEPLER_CHANNEL_GPFIFO_B: 199 case KEPLER_CHANNEL_GPFIFO_B:
202 case MAXWELL_CHANNEL_GPFIFO_A: 200 case MAXWELL_CHANNEL_GPFIFO_A:
201 case PASCAL_CHANNEL_GPFIFO_A:
203 ret = nvc0_fence_create(drm); 202 ret = nvc0_fence_create(drm);
204 break; 203 break;
205 default: 204 default:
@@ -315,16 +314,19 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
315 bool boot = false; 314 bool boot = false;
316 int ret; 315 int ret;
317 316
318 /* 317 if (vga_switcheroo_client_probe_defer(pdev))
319 * apple-gmux is needed on dual GPU MacBook Pro
320 * to probe the panel if we're the inactive GPU.
321 */
322 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
323 apple_gmux_present() && pdev != vga_default_device() &&
324 !vga_switcheroo_handler_flags())
325 return -EPROBE_DEFER; 318 return -EPROBE_DEFER;
326 319
327 /* remove conflicting drivers (vesafb, efifb etc) */ 320 /* We need to check that the chipset is supported before booting
321 * fbdev off the hardware, as there's no way to put it back.
322 */
323 ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device);
324 if (ret)
325 return ret;
326
327 nvkm_device_del(&device);
328
329 /* Remove conflicting drivers (vesafb, efifb etc). */
328 aper = alloc_apertures(3); 330 aper = alloc_apertures(3);
329 if (!aper) 331 if (!aper)
330 return -ENOMEM; 332 return -ENOMEM;
@@ -438,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
438 nouveau_vga_init(drm); 440 nouveau_vga_init(drm);
439 441
440 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { 442 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
443 if (!nvxx_device(&drm->device)->mmu) {
444 ret = -ENOSYS;
445 goto fail_device;
446 }
447
441 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 448 ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
442 0x1000, NULL, &drm->client.vm); 449 0x1000, NULL, &drm->client.vm);
443 if (ret) 450 if (ret)
@@ -498,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev)
498{ 505{
499 struct nouveau_drm *drm = nouveau_drm(dev); 506 struct nouveau_drm *drm = nouveau_drm(dev);
500 507
501 pm_runtime_get_sync(dev->dev); 508 if (nouveau_runtime_pm != 0) {
509 pm_runtime_get_sync(dev->dev);
510 pm_runtime_forbid(dev->dev);
511 }
512
502 nouveau_fbcon_fini(dev); 513 nouveau_fbcon_fini(dev);
503 nouveau_accel_fini(drm); 514 nouveau_accel_fini(drm);
504 nouveau_hwmon_fini(dev); 515 nouveau_hwmon_fini(dev);
@@ -970,7 +981,7 @@ driver_stub = {
970 .gem_prime_vmap = nouveau_gem_prime_vmap, 981 .gem_prime_vmap = nouveau_gem_prime_vmap,
971 .gem_prime_vunmap = nouveau_gem_prime_vunmap, 982 .gem_prime_vunmap = nouveau_gem_prime_vunmap,
972 983
973 .gem_free_object = nouveau_gem_object_del, 984 .gem_free_object_unlocked = nouveau_gem_object_del,
974 .gem_open_object = nouveau_gem_object_open, 985 .gem_open_object = nouveau_gem_object_open,
975 .gem_close_object = nouveau_gem_object_close, 986 .gem_close_object = nouveau_gem_object_close,
976 987
@@ -1078,7 +1089,6 @@ nouveau_drm_init(void)
1078 driver_pci = driver_stub; 1089 driver_pci = driver_stub;
1079 driver_pci.set_busid = drm_pci_set_busid; 1090 driver_pci.set_busid = drm_pci_set_busid;
1080 driver_platform = driver_stub; 1091 driver_platform = driver_stub;
1081 driver_platform.set_busid = drm_platform_set_busid;
1082 1092
1083 nouveau_display_options(); 1093 nouveau_display_options();
1084 1094
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d38fe9..64c4ce7115ad 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
57 int (*context_new)(struct nouveau_channel *); 57 int (*context_new)(struct nouveau_channel *);
58 void (*context_del)(struct nouveau_channel *); 58 void (*context_del)(struct nouveau_channel *);
59 59
60 u32 contexts, context_base; 60 u32 contexts;
61 u64 context_base;
61 bool uevent; 62 bool uevent;
62}; 63};
63 64
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 1ff4166af26e..71f764bf4cc6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -535,6 +535,40 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO,
535 nouveau_hwmon_get_in0_input, NULL, 0); 535 nouveau_hwmon_get_in0_input, NULL, 0);
536 536
537static ssize_t 537static ssize_t
538nouveau_hwmon_get_in0_min(struct device *d,
539 struct device_attribute *a, char *buf)
540{
541 struct drm_device *dev = dev_get_drvdata(d);
542 struct nouveau_drm *drm = nouveau_drm(dev);
543 struct nvkm_volt *volt = nvxx_volt(&drm->device);
544
545 if (!volt || !volt->min_uv)
546 return -ENODEV;
547
548 return sprintf(buf, "%i\n", volt->min_uv / 1000);
549}
550
551static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO,
552 nouveau_hwmon_get_in0_min, NULL, 0);
553
554static ssize_t
555nouveau_hwmon_get_in0_max(struct device *d,
556 struct device_attribute *a, char *buf)
557{
558 struct drm_device *dev = dev_get_drvdata(d);
559 struct nouveau_drm *drm = nouveau_drm(dev);
560 struct nvkm_volt *volt = nvxx_volt(&drm->device);
561
562 if (!volt || !volt->max_uv)
563 return -ENODEV;
564
565 return sprintf(buf, "%i\n", volt->max_uv / 1000);
566}
567
568static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO,
569 nouveau_hwmon_get_in0_max, NULL, 0);
570
571static ssize_t
538nouveau_hwmon_get_in0_label(struct device *d, 572nouveau_hwmon_get_in0_label(struct device *d,
539 struct device_attribute *a, char *buf) 573 struct device_attribute *a, char *buf)
540{ 574{
@@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = {
594 628
595static struct attribute *hwmon_in0_attributes[] = { 629static struct attribute *hwmon_in0_attributes[] = {
596 &sensor_dev_attr_in0_input.dev_attr.attr, 630 &sensor_dev_attr_in0_input.dev_attr.attr,
631 &sensor_dev_attr_in0_min.dev_attr.attr,
632 &sensor_dev_attr_in0_max.dev_attr.attr,
597 &sensor_dev_attr_in0_label.dev_attr.attr, 633 &sensor_dev_attr_in0_label.dev_attr.attr,
598 NULL 634 NULL
599}; 635};
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index bcee91497eb9..1825dbc33192 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
164 case NV_DEVICE_INFO_V0_FERMI: 164 case NV_DEVICE_INFO_V0_FERMI:
165 case NV_DEVICE_INFO_V0_KEPLER: 165 case NV_DEVICE_INFO_V0_KEPLER:
166 case NV_DEVICE_INFO_V0_MAXWELL: 166 case NV_DEVICE_INFO_V0_MAXWELL:
167 case NV_DEVICE_INFO_V0_PASCAL:
167 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 168 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
168 break; 169 break;
169 default: 170 default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 675e9e077a95..08f9c6fa0f7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -212,7 +212,6 @@ usif_notify_get(struct drm_file *f, void *data, u32 size, void *argv, u32 argc)
212 ntfy->p->base.event = &ntfy->p->e.base; 212 ntfy->p->base.event = &ntfy->p->e.base;
213 ntfy->p->base.file_priv = f; 213 ntfy->p->base.file_priv = f;
214 ntfy->p->base.pid = current->pid; 214 ntfy->p->base.pid = current->pid;
215 ntfy->p->base.destroy =(void(*)(struct drm_pending_event *))kfree;
216 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF; 215 ntfy->p->e.base.type = DRM_NOUVEAU_EVENT_NVIF;
217 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; 216 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply;
218 217
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 7d9248b8c664..da8fd5ff9d0f 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -107,11 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
107 ((image->dx + image->width) & 0xffff)); 107 ((image->dx + image->width) & 0xffff));
108 OUT_RING(chan, bg); 108 OUT_RING(chan, bg);
109 OUT_RING(chan, fg); 109 OUT_RING(chan, fg);
110 OUT_RING(chan, (image->height << 16) | image->width); 110 OUT_RING(chan, (image->height << 16) | ALIGN(image->width, 8));
111 OUT_RING(chan, (image->height << 16) | image->width); 111 OUT_RING(chan, (image->height << 16) | image->width);
112 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); 112 OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
113 113
114 dsize = ALIGN(image->width * image->height, 32) >> 5; 114 dsize = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
115 while (dsize) { 115 while (dsize) {
116 int iter_len = dsize > 128 ? 128 : dsize; 116 int iter_len = dsize > 128 ? 128 : dsize;
117 117
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 3ffc2b0057bf..7d0edcbcfca7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp,
297 .pushbuf = 0xb0007d00, 297 .pushbuf = 0xb0007d00,
298 }; 298 };
299 static const s32 oclass[] = { 299 static const s32 oclass[] = {
300 GP104_DISP_CORE_CHANNEL_DMA,
301 GP100_DISP_CORE_CHANNEL_DMA,
300 GM200_DISP_CORE_CHANNEL_DMA, 302 GM200_DISP_CORE_CHANNEL_DMA,
301 GM107_DISP_CORE_CHANNEL_DMA, 303 GM107_DISP_CORE_CHANNEL_DMA,
302 GK110_DISP_CORE_CHANNEL_DMA, 304 GK110_DISP_CORE_CHANNEL_DMA,
@@ -1346,21 +1348,22 @@ nv50_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1346 return 0; 1348 return 0;
1347} 1349}
1348 1350
1349static void 1351static int
1350nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 1352nv50_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
1351 uint32_t start, uint32_t size) 1353 uint32_t size)
1352{ 1354{
1353 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1355 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1354 u32 end = min_t(u32, start + size, 256);
1355 u32 i; 1356 u32 i;
1356 1357
1357 for (i = start; i < end; i++) { 1358 for (i = 0; i < size; i++) {
1358 nv_crtc->lut.r[i] = r[i]; 1359 nv_crtc->lut.r[i] = r[i];
1359 nv_crtc->lut.g[i] = g[i]; 1360 nv_crtc->lut.g[i] = g[i];
1360 nv_crtc->lut.b[i] = b[i]; 1361 nv_crtc->lut.b[i] = b[i];
1361 } 1362 }
1362 1363
1363 nv50_crtc_lut_load(crtc); 1364 nv50_crtc_lut_load(crtc);
1365
1366 return 0;
1364} 1367}
1365 1368
1366static void 1369static void
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 1aeb698e9707..af3d3c49411a 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -125,7 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
125 OUT_RING(chan, 0); 125 OUT_RING(chan, 0);
126 OUT_RING(chan, image->dy); 126 OUT_RING(chan, image->dy);
127 127
128 dwords = ALIGN(image->width * image->height, 32) >> 5; 128 dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
129 while (dwords) { 129 while (dwords) {
130 int push = dwords > 2047 ? 2047 : dwords; 130 int push = dwords > 2047 ? 2047 : dwords;
131 131
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 839f4c8c1805..054b6a056d99 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -125,7 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
125 OUT_RING (chan, 0); 125 OUT_RING (chan, 0);
126 OUT_RING (chan, image->dy); 126 OUT_RING (chan, image->dy);
127 127
128 dwords = ALIGN(image->width * image->height, 32) >> 5; 128 dwords = ALIGN(ALIGN(image->width, 8) * image->height, 32) >> 5;
129 while (dwords) { 129 while (dwords) {
130 int push = dwords > 2047 ? 2047 : dwords; 130 int push = dwords > 2047 ? 2047 : dwords;
131 131
diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
index b18557858f19..19044aba265e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c
@@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
57 [NVKM_ENGINE_CE0 ] = "ce0", 57 [NVKM_ENGINE_CE0 ] = "ce0",
58 [NVKM_ENGINE_CE1 ] = "ce1", 58 [NVKM_ENGINE_CE1 ] = "ce1",
59 [NVKM_ENGINE_CE2 ] = "ce2", 59 [NVKM_ENGINE_CE2 ] = "ce2",
60 [NVKM_ENGINE_CE3 ] = "ce3",
61 [NVKM_ENGINE_CE4 ] = "ce4",
62 [NVKM_ENGINE_CE5 ] = "ce5",
60 [NVKM_ENGINE_CIPHER ] = "cipher", 63 [NVKM_ENGINE_CIPHER ] = "cipher",
61 [NVKM_ENGINE_DISP ] = "disp", 64 [NVKM_ENGINE_DISP ] = "disp",
62 [NVKM_ENGINE_DMAOBJ ] = "dma", 65 [NVKM_ENGINE_DMAOBJ ] = "dma",
@@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = {
71 [NVKM_ENGINE_MSVLD ] = "msvld", 74 [NVKM_ENGINE_MSVLD ] = "msvld",
72 [NVKM_ENGINE_NVENC0 ] = "nvenc0", 75 [NVKM_ENGINE_NVENC0 ] = "nvenc0",
73 [NVKM_ENGINE_NVENC1 ] = "nvenc1", 76 [NVKM_ENGINE_NVENC1 ] = "nvenc1",
77 [NVKM_ENGINE_NVENC2 ] = "nvenc2",
74 [NVKM_ENGINE_NVDEC ] = "nvdec", 78 [NVKM_ENGINE_NVDEC ] = "nvdec",
75 [NVKM_ENGINE_PM ] = "pm", 79 [NVKM_ENGINE_PM ] = "pm",
76 [NVKM_ENGINE_SEC ] = "sec", 80 [NVKM_ENGINE_SEC ] = "sec",
@@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend)
105 } 109 }
106 } 110 }
107 111
108 nvkm_mc_reset(device->mc, subdev->index); 112 nvkm_mc_reset(device, subdev->index);
109 113
110 time = ktime_to_us(ktime_get()) - time; 114 time = ktime_to_us(ktime_get()) - time;
111 nvkm_trace(subdev, "%s completed in %lldus\n", action, time); 115 nvkm_trace(subdev, "%s completed in %lldus\n", action, time);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
index 9c19d59b47df..a4458a8eb30a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild
@@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o
3nvkm-y += nvkm/engine/ce/gk104.o 3nvkm-y += nvkm/engine/ce/gk104.o
4nvkm-y += nvkm/engine/ce/gm107.o 4nvkm-y += nvkm/engine/ce/gm107.o
5nvkm-y += nvkm/engine/ce/gm200.o 5nvkm-y += nvkm/engine/ce/gm200.o
6nvkm-y += nvkm/engine/ce/gp100.o
7nvkm-y += nvkm/engine/ce/gp104.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
new file mode 100644
index 000000000000..c7710456bc30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c
@@ -0,0 +1,102 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include <core/enum.h>
26
27#include <nvif/class.h>
28
29static const struct nvkm_enum
30gp100_ce_launcherr_report[] = {
31 { 0x0, "NO_ERR" },
32 { 0x1, "2D_LAYER_EXCEEDS_DEPTH" },
33 { 0x2, "INVALID_ALIGNMENT" },
34 { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" },
35 { 0x4, "SRC_LINE_EXCEEDS_PITCH" },
36 { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" },
37 { 0x6, "DST_LINE_EXCEEDS_PITCH" },
38 { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" },
39 { 0x8, "BAD_SRC_PIXEL_COMP_REF" },
40 { 0x9, "INVALID_VALUE" },
41 { 0xa, "UNUSED_FIELD" },
42 { 0xb, "INVALID_OPERATION" },
43 { 0xc, "NO_RESOURCES" },
44 { 0xd, "INVALID_CONFIG" },
45 {}
46};
47
48static void
49gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base)
50{
51 struct nvkm_subdev *subdev = &ce->subdev;
52 struct nvkm_device *device = subdev->device;
53 u32 stat = nvkm_rd32(device, 0x104418 + base);
54 const struct nvkm_enum *en =
55 nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f);
56 nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : "");
57}
58
59void
60gp100_ce_intr(struct nvkm_engine *ce)
61{
62 const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80;
63 struct nvkm_subdev *subdev = &ce->subdev;
64 struct nvkm_device *device = subdev->device;
65 u32 mask = nvkm_rd32(device, 0x10440c + base);
66 u32 intr = nvkm_rd32(device, 0x104410 + base) & mask;
67 if (intr & 0x00000001) { //XXX: guess
68 nvkm_warn(subdev, "BLOCKPIPE\n");
69 nvkm_wr32(device, 0x104410 + base, 0x00000001);
70 intr &= ~0x00000001;
71 }
72 if (intr & 0x00000002) { //XXX: guess
73 nvkm_warn(subdev, "NONBLOCKPIPE\n");
74 nvkm_wr32(device, 0x104410 + base, 0x00000002);
75 intr &= ~0x00000002;
76 }
77 if (intr & 0x00000004) {
78 gp100_ce_intr_launcherr(ce, base);
79 nvkm_wr32(device, 0x104410 + base, 0x00000004);
80 intr &= ~0x00000004;
81 }
82 if (intr) {
83 nvkm_warn(subdev, "intr %08x\n", intr);
84 nvkm_wr32(device, 0x104410 + base, intr);
85 }
86}
87
88static const struct nvkm_engine_func
89gp100_ce = {
90 .intr = gp100_ce_intr,
91 .sclass = {
92 { -1, -1, PASCAL_DMA_COPY_A },
93 {}
94 }
95};
96
97int
98gp100_ce_new(struct nvkm_device *device, int index,
99 struct nvkm_engine **pengine)
100{
101 return nvkm_engine_new_(&gp100_ce, device, index, true, pengine);
102}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
new file mode 100644
index 000000000000..20e019788a53
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25#include <core/enum.h>
26
27#include <nvif/class.h>
28
29static const struct nvkm_engine_func
30gp104_ce = {
31 .intr = gp100_ce_intr,
32 .sclass = {
33 { -1, -1, PASCAL_DMA_COPY_B },
34 { -1, -1, PASCAL_DMA_COPY_A },
35 {}
36 }
37};
38
39int
40gp104_ce_new(struct nvkm_device *device, int index,
41 struct nvkm_engine **pengine)
42{
43 return nvkm_engine_new_(&gp104_ce, device, index, true, pengine);
44}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
index e2fa8b161943..2dce405976ad 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h
@@ -4,4 +4,5 @@
4 4
5void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *); 5void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *);
6void gk104_ce_intr(struct nvkm_engine *); 6void gk104_ce_intr(struct nvkm_engine *);
7void gp100_ce_intr(struct nvkm_engine *);
7#endif 8#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index 4572debcb0c9..7218a067a6c5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2148,6 +2148,67 @@ nv12b_chipset = {
2148 .sw = gf100_sw_new, 2148 .sw = gf100_sw_new,
2149}; 2149};
2150 2150
2151static const struct nvkm_device_chip
2152nv130_chipset = {
2153 .name = "GP100",
2154 .bar = gf100_bar_new,
2155 .bios = nvkm_bios_new,
2156 .bus = gf100_bus_new,
2157 .devinit = gm200_devinit_new,
2158 .fb = gp100_fb_new,
2159 .fuse = gm107_fuse_new,
2160 .gpio = gk104_gpio_new,
2161 .i2c = gm200_i2c_new,
2162 .ibus = gm200_ibus_new,
2163 .imem = nv50_instmem_new,
2164 .ltc = gp100_ltc_new,
2165 .mc = gp100_mc_new,
2166 .mmu = gf100_mmu_new,
2167 .secboot = gm200_secboot_new,
2168 .pci = gp100_pci_new,
2169 .timer = gk20a_timer_new,
2170 .top = gk104_top_new,
2171 .ce[0] = gp100_ce_new,
2172 .ce[1] = gp100_ce_new,
2173 .ce[2] = gp100_ce_new,
2174 .ce[3] = gp100_ce_new,
2175 .ce[4] = gp100_ce_new,
2176 .ce[5] = gp100_ce_new,
2177 .dma = gf119_dma_new,
2178 .disp = gp100_disp_new,
2179 .fifo = gp100_fifo_new,
2180 .gr = gp100_gr_new,
2181 .sw = gf100_sw_new,
2182};
2183
2184static const struct nvkm_device_chip
2185nv134_chipset = {
2186 .name = "GP104",
2187 .bar = gf100_bar_new,
2188 .bios = nvkm_bios_new,
2189 .bus = gf100_bus_new,
2190 .devinit = gm200_devinit_new,
2191 .fb = gp104_fb_new,
2192 .fuse = gm107_fuse_new,
2193 .gpio = gk104_gpio_new,
2194 .i2c = gm200_i2c_new,
2195 .ibus = gm200_ibus_new,
2196 .imem = nv50_instmem_new,
2197 .ltc = gp100_ltc_new,
2198 .mc = gp100_mc_new,
2199 .mmu = gf100_mmu_new,
2200 .pci = gp100_pci_new,
2201 .timer = gk20a_timer_new,
2202 .top = gk104_top_new,
2203 .ce[0] = gp104_ce_new,
2204 .ce[1] = gp104_ce_new,
2205 .ce[2] = gp104_ce_new,
2206 .ce[3] = gp104_ce_new,
2207 .disp = gp104_disp_new,
2208 .dma = gf119_dma_new,
2209 .fifo = gp100_fifo_new,
2210};
2211
2151static int 2212static int
2152nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, 2213nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size,
2153 struct nvkm_notify *notify) 2214 struct nvkm_notify *notify)
@@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index)
2221 _(CE0 , device->ce[0] , device->ce[0]); 2282 _(CE0 , device->ce[0] , device->ce[0]);
2222 _(CE1 , device->ce[1] , device->ce[1]); 2283 _(CE1 , device->ce[1] , device->ce[1]);
2223 _(CE2 , device->ce[2] , device->ce[2]); 2284 _(CE2 , device->ce[2] , device->ce[2]);
2285 _(CE3 , device->ce[3] , device->ce[3]);
2286 _(CE4 , device->ce[4] , device->ce[4]);
2287 _(CE5 , device->ce[5] , device->ce[5]);
2224 _(CIPHER , device->cipher , device->cipher); 2288 _(CIPHER , device->cipher , device->cipher);
2225 _(DISP , device->disp , &device->disp->engine); 2289 _(DISP , device->disp , &device->disp->engine);
2226 _(DMAOBJ , device->dma , &device->dma->engine); 2290 _(DMAOBJ , device->dma , &device->dma->engine);
@@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index)
2235 _(MSVLD , device->msvld , device->msvld); 2299 _(MSVLD , device->msvld , device->msvld);
2236 _(NVENC0 , device->nvenc[0], device->nvenc[0]); 2300 _(NVENC0 , device->nvenc[0], device->nvenc[0]);
2237 _(NVENC1 , device->nvenc[1], device->nvenc[1]); 2301 _(NVENC1 , device->nvenc[1], device->nvenc[1]);
2302 _(NVENC2 , device->nvenc[2], device->nvenc[2]);
2238 _(NVDEC , device->nvdec , device->nvdec); 2303 _(NVDEC , device->nvdec , device->nvdec);
2239 _(PM , device->pm , &device->pm->engine); 2304 _(PM , device->pm , &device->pm->engine);
2240 _(SEC , device->sec , device->sec); 2305 _(SEC , device->sec , device->sec);
@@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2492 case 0x100: device->card_type = NV_E0; break; 2557 case 0x100: device->card_type = NV_E0; break;
2493 case 0x110: 2558 case 0x110:
2494 case 0x120: device->card_type = GM100; break; 2559 case 0x120: device->card_type = GM100; break;
2560 case 0x130: device->card_type = GP100; break;
2495 default: 2561 default:
2496 break; 2562 break;
2497 } 2563 }
@@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2576 case 0x124: device->chip = &nv124_chipset; break; 2642 case 0x124: device->chip = &nv124_chipset; break;
2577 case 0x126: device->chip = &nv126_chipset; break; 2643 case 0x126: device->chip = &nv126_chipset; break;
2578 case 0x12b: device->chip = &nv12b_chipset; break; 2644 case 0x12b: device->chip = &nv12b_chipset; break;
2645 case 0x130: device->chip = &nv130_chipset; break;
2646 case 0x134: device->chip = &nv134_chipset; break;
2579 default: 2647 default:
2580 nvdev_error(device, "unknown chipset (%08x)\n", boot0); 2648 nvdev_error(device, "unknown chipset (%08x)\n", boot0);
2581 goto done; 2649 goto done;
@@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2659 _(NVKM_ENGINE_CE0 , ce[0]); 2727 _(NVKM_ENGINE_CE0 , ce[0]);
2660 _(NVKM_ENGINE_CE1 , ce[1]); 2728 _(NVKM_ENGINE_CE1 , ce[1]);
2661 _(NVKM_ENGINE_CE2 , ce[2]); 2729 _(NVKM_ENGINE_CE2 , ce[2]);
2730 _(NVKM_ENGINE_CE3 , ce[3]);
2731 _(NVKM_ENGINE_CE4 , ce[4]);
2732 _(NVKM_ENGINE_CE5 , ce[5]);
2662 _(NVKM_ENGINE_CIPHER , cipher); 2733 _(NVKM_ENGINE_CIPHER , cipher);
2663 _(NVKM_ENGINE_DISP , disp); 2734 _(NVKM_ENGINE_DISP , disp);
2664 _(NVKM_ENGINE_DMAOBJ , dma); 2735 _(NVKM_ENGINE_DMAOBJ , dma);
@@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
2673 _(NVKM_ENGINE_MSVLD , msvld); 2744 _(NVKM_ENGINE_MSVLD , msvld);
2674 _(NVKM_ENGINE_NVENC0 , nvenc[0]); 2745 _(NVKM_ENGINE_NVENC0 , nvenc[0]);
2675 _(NVKM_ENGINE_NVENC1 , nvenc[1]); 2746 _(NVKM_ENGINE_NVENC1 , nvenc[1]);
2747 _(NVKM_ENGINE_NVENC2 , nvenc[2]);
2676 _(NVKM_ENGINE_NVDEC , nvdec); 2748 _(NVKM_ENGINE_NVDEC , nvdec);
2677 _(NVKM_ENGINE_PM , pm); 2749 _(NVKM_ENGINE_PM , pm);
2678 _(NVKM_ENGINE_SEC , sec); 2750 _(NVKM_ENGINE_SEC , sec);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
index 62ad0300cfa5..b1b693219db3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
@@ -1614,7 +1614,6 @@ nvkm_device_pci_func = {
1614 .fini = nvkm_device_pci_fini, 1614 .fini = nvkm_device_pci_fini,
1615 .resource_addr = nvkm_device_pci_resource_addr, 1615 .resource_addr = nvkm_device_pci_resource_addr,
1616 .resource_size = nvkm_device_pci_resource_size, 1616 .resource_size = nvkm_device_pci_resource_size,
1617 .cpu_coherent = !IS_ENABLED(CONFIG_ARM),
1618}; 1617};
1619 1618
1620int 1619int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index ec12efb4689a..939682f18788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -191,13 +191,11 @@ static irqreturn_t
191nvkm_device_tegra_intr(int irq, void *arg) 191nvkm_device_tegra_intr(int irq, void *arg)
192{ 192{
193 struct nvkm_device_tegra *tdev = arg; 193 struct nvkm_device_tegra *tdev = arg;
194 struct nvkm_mc *mc = tdev->device.mc; 194 struct nvkm_device *device = &tdev->device;
195 bool handled = false; 195 bool handled = false;
196 if (likely(mc)) { 196 nvkm_mc_intr_unarm(device);
197 nvkm_mc_intr_unarm(mc); 197 nvkm_mc_intr(device, &handled);
198 nvkm_mc_intr(mc, &handled); 198 nvkm_mc_intr_rearm(device);
199 nvkm_mc_intr_rearm(mc);
200 }
201 return handled ? IRQ_HANDLED : IRQ_NONE; 199 return handled ? IRQ_HANDLED : IRQ_NONE;
202} 200}
203 201
@@ -247,7 +245,6 @@ nvkm_device_tegra_func = {
247 .fini = nvkm_device_tegra_fini, 245 .fini = nvkm_device_tegra_fini,
248 .resource_addr = nvkm_device_tegra_resource_addr, 246 .resource_addr = nvkm_device_tegra_resource_addr,
249 .resource_size = nvkm_device_tegra_resource_size, 247 .resource_size = nvkm_device_tegra_resource_size,
250 .cpu_coherent = false,
251}; 248};
252 249
253int 250int
@@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func,
313 goto remove; 310 goto remove;
314 311
315 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; 312 tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
313 tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id;
316 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, 314 ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
317 NVKM_DEVICE_TEGRA, pdev->id, NULL, 315 NVKM_DEVICE_TEGRA, pdev->id, NULL,
318 cfg, dbg, detect, mmio, subdev_mask, 316 cfg, dbg, detect, mmio, subdev_mask,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
index 137066426ed7..79a8f71cf788 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c
@@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size)
102 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; 102 case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break;
103 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; 103 case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break;
104 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; 104 case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break;
105 case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break;
105 default: 106 default:
106 args->v0.family = 0; 107 args->v0.family = 0;
107 break; 108 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
index e2a64ed14b22..77a52b54a31e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild
@@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o
10nvkm-y += nvkm/engine/disp/gk110.o 10nvkm-y += nvkm/engine/disp/gk110.o
11nvkm-y += nvkm/engine/disp/gm107.o 11nvkm-y += nvkm/engine/disp/gm107.o
12nvkm-y += nvkm/engine/disp/gm200.o 12nvkm-y += nvkm/engine/disp/gm200.o
13nvkm-y += nvkm/engine/disp/gp100.o
14nvkm-y += nvkm/engine/disp/gp104.o
13 15
14nvkm-y += nvkm/engine/disp/outp.o 16nvkm-y += nvkm/engine/disp/outp.o
15nvkm-y += nvkm/engine/disp/outpdp.o 17nvkm-y += nvkm/engine/disp/outpdp.o
@@ -45,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o
45nvkm-y += nvkm/engine/disp/rootgk110.o 47nvkm-y += nvkm/engine/disp/rootgk110.o
46nvkm-y += nvkm/engine/disp/rootgm107.o 48nvkm-y += nvkm/engine/disp/rootgm107.o
47nvkm-y += nvkm/engine/disp/rootgm200.o 49nvkm-y += nvkm/engine/disp/rootgm200.o
50nvkm-y += nvkm/engine/disp/rootgp100.o
51nvkm-y += nvkm/engine/disp/rootgp104.o
48 52
49nvkm-y += nvkm/engine/disp/channv50.o 53nvkm-y += nvkm/engine/disp/channv50.o
50nvkm-y += nvkm/engine/disp/changf119.o 54nvkm-y += nvkm/engine/disp/changf119.o
51 55
52nvkm-y += nvkm/engine/disp/dmacnv50.o 56nvkm-y += nvkm/engine/disp/dmacnv50.o
53nvkm-y += nvkm/engine/disp/dmacgf119.o 57nvkm-y += nvkm/engine/disp/dmacgf119.o
58nvkm-y += nvkm/engine/disp/dmacgp104.o
54 59
55nvkm-y += nvkm/engine/disp/basenv50.o 60nvkm-y += nvkm/engine/disp/basenv50.o
56nvkm-y += nvkm/engine/disp/baseg84.o 61nvkm-y += nvkm/engine/disp/baseg84.o
@@ -59,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o
59nvkm-y += nvkm/engine/disp/basegf119.o 64nvkm-y += nvkm/engine/disp/basegf119.o
60nvkm-y += nvkm/engine/disp/basegk104.o 65nvkm-y += nvkm/engine/disp/basegk104.o
61nvkm-y += nvkm/engine/disp/basegk110.o 66nvkm-y += nvkm/engine/disp/basegk110.o
67nvkm-y += nvkm/engine/disp/basegp104.o
62 68
63nvkm-y += nvkm/engine/disp/corenv50.o 69nvkm-y += nvkm/engine/disp/corenv50.o
64nvkm-y += nvkm/engine/disp/coreg84.o 70nvkm-y += nvkm/engine/disp/coreg84.o
@@ -70,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o
70nvkm-y += nvkm/engine/disp/coregk110.o 76nvkm-y += nvkm/engine/disp/coregk110.o
71nvkm-y += nvkm/engine/disp/coregm107.o 77nvkm-y += nvkm/engine/disp/coregm107.o
72nvkm-y += nvkm/engine/disp/coregm200.o 78nvkm-y += nvkm/engine/disp/coregm200.o
79nvkm-y += nvkm/engine/disp/coregp100.o
80nvkm-y += nvkm/engine/disp/coregp104.o
73 81
74nvkm-y += nvkm/engine/disp/ovlynv50.o 82nvkm-y += nvkm/engine/disp/ovlynv50.o
75nvkm-y += nvkm/engine/disp/ovlyg84.o 83nvkm-y += nvkm/engine/disp/ovlyg84.o
@@ -77,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o
77nvkm-y += nvkm/engine/disp/ovlygt215.o 85nvkm-y += nvkm/engine/disp/ovlygt215.o
78nvkm-y += nvkm/engine/disp/ovlygf119.o 86nvkm-y += nvkm/engine/disp/ovlygf119.o
79nvkm-y += nvkm/engine/disp/ovlygk104.o 87nvkm-y += nvkm/engine/disp/ovlygk104.o
88nvkm-y += nvkm/engine/disp/ovlygp104.o
80 89
81nvkm-y += nvkm/engine/disp/piocnv50.o 90nvkm-y += nvkm/engine/disp/piocnv50.o
82nvkm-y += nvkm/engine/disp/piocgf119.o 91nvkm-y += nvkm/engine/disp/piocgf119.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
new file mode 100644
index 000000000000..51688e37c54e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gp104_disp_base_oclass = {
31 .base.oclass = GK110_DISP_BASE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_base_new,
35 .func = &gp104_disp_dmac_func,
36 .mthd = &gf119_disp_base_chan_mthd,
37 .chid = 1,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
index aee374884c96..f5f683d9fd20 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h
@@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior;
85extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd; 85extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd;
86 86
87extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd; 87extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd;
88extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd;
88 89
89struct nv50_disp_pioc_oclass { 90struct nv50_disp_pioc_oclass {
90 int (*ctor)(const struct nv50_disp_chan_func *, 91 int (*ctor)(const struct nv50_disp_chan_func *,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
index 6b1dc703dac7..21fbf89b6319 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c
@@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = {
171 } 171 }
172}; 172};
173 173
174static void 174void
175gf119_disp_core_fini(struct nv50_disp_dmac *chan) 175gf119_disp_core_fini(struct nv50_disp_dmac *chan)
176{ 176{
177 struct nv50_disp *disp = chan->base.root->disp; 177 struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
new file mode 100644
index 000000000000..d5dff6619d4d
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gp100_disp_core_oclass = {
31 .base.oclass = GP100_DISP_CORE_CHANNEL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_core_new,
35 .func = &gf119_disp_core_func,
36 .mthd = &gk104_disp_core_chan_mthd,
37 .chid = 0,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
new file mode 100644
index 000000000000..6922f4007b61
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <subdev/timer.h>
28
29#include <nvif/class.h>
30
31static int
32gp104_disp_core_init(struct nv50_disp_dmac *chan)
33{
34 struct nv50_disp *disp = chan->base.root->disp;
35 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
36 struct nvkm_device *device = subdev->device;
37
38 /* enable error reporting */
39 nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001);
40
41 /* initialise channel for dma command submission */
42 nvkm_wr32(device, 0x611494, chan->push);
43 nvkm_wr32(device, 0x611498, 0x00010000);
44 nvkm_wr32(device, 0x61149c, 0x00000001);
45 nvkm_mask(device, 0x610490, 0x00000010, 0x00000010);
46 nvkm_wr32(device, 0x640000, 0x00000000);
47 nvkm_wr32(device, 0x610490, 0x01000013);
48
49 /* wait for it to go inactive */
50 if (nvkm_msec(device, 2000,
51 if (!(nvkm_rd32(device, 0x610490) & 0x80000000))
52 break;
53 ) < 0) {
54 nvkm_error(subdev, "core init: %08x\n",
55 nvkm_rd32(device, 0x610490));
56 return -EBUSY;
57 }
58
59 return 0;
60}
61
62const struct nv50_disp_dmac_func
63gp104_disp_core_func = {
64 .init = gp104_disp_core_init,
65 .fini = gf119_disp_core_fini,
66 .bind = gf119_disp_dmac_bind,
67};
68
69const struct nv50_disp_dmac_oclass
70gp104_disp_core_oclass = {
71 .base.oclass = GP104_DISP_CORE_CHANNEL_DMA,
72 .base.minver = 0,
73 .base.maxver = 0,
74 .ctor = nv50_disp_core_new,
75 .func = &gp104_disp_core_func,
76 .mthd = &gk104_disp_core_chan_mthd,
77 .chid = 0,
78};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
index 876b14549a58..a57f7cef307a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c
@@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan,
36 chan->base.chid << 27 | 0x00000001); 36 chan->base.chid << 27 | 0x00000001);
37} 37}
38 38
39static void 39void
40gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) 40gf119_disp_dmac_fini(struct nv50_disp_dmac *chan)
41{ 41{
42 struct nv50_disp *disp = chan->base.root->disp; 42 struct nv50_disp *disp = chan->base.root->disp;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
new file mode 100644
index 000000000000..ad24c2c57696
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <subdev/timer.h>
28
29static int
30gp104_disp_dmac_init(struct nv50_disp_dmac *chan)
31{
32 struct nv50_disp *disp = chan->base.root->disp;
33 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
34 struct nvkm_device *device = subdev->device;
35 int chid = chan->base.chid;
36
37 /* enable error reporting */
38 nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid);
39
40 /* initialise channel for dma command submission */
41 nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push);
42 nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000);
43 nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001);
44 nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010);
45 nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000);
46 nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013);
47
48 /* wait for it to go inactive */
49 if (nvkm_msec(device, 2000,
50 if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000))
51 break;
52 ) < 0) {
53 nvkm_error(subdev, "ch %d init: %08x\n", chid,
54 nvkm_rd32(device, 0x610490 + (chid * 0x10)));
55 return -EBUSY;
56 }
57
58 return 0;
59}
60
61const struct nv50_disp_dmac_func
62gp104_disp_dmac_func = {
63 .init = gp104_disp_dmac_init,
64 .fini = gf119_disp_dmac_fini,
65 .bind = gf119_disp_dmac_bind,
66};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
index fc84eb8b5c45..43ac05857853 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h
@@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
25extern const struct nv50_disp_dmac_func nv50_disp_core_func; 25extern const struct nv50_disp_dmac_func nv50_disp_core_func;
26 26
27extern const struct nv50_disp_dmac_func gf119_disp_dmac_func; 27extern const struct nv50_disp_dmac_func gf119_disp_dmac_func;
28void gf119_disp_dmac_fini(struct nv50_disp_dmac *);
28int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32); 29int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32);
29extern const struct nv50_disp_dmac_func gf119_disp_core_func; 30extern const struct nv50_disp_dmac_func gf119_disp_core_func;
31void gf119_disp_core_fini(struct nv50_disp_dmac *);
32
33extern const struct nv50_disp_dmac_func gp104_disp_dmac_func;
30 34
31struct nv50_disp_dmac_oclass { 35struct nv50_disp_dmac_oclass {
32 int (*ctor)(const struct nv50_disp_dmac_func *, 36 int (*ctor)(const struct nv50_disp_dmac_func *,
@@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass;
88extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass; 92extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass;
89 93
90extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass; 94extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass;
95
96extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass;
97
98extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass;
99extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass;
100extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass;
91#endif 101#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
index 5dd34382f55a..29e84b241cca 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c
@@ -76,12 +76,10 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
76 mask |= 0x0001 << or; 76 mask |= 0x0001 << or;
77 mask |= 0x0100 << head; 77 mask |= 0x0100 << head;
78 78
79
80 list_for_each_entry(outp, &disp->base.outp, head) { 79 list_for_each_entry(outp, &disp->base.outp, head) {
81 if ((outp->info.hasht & 0xff) == type && 80 if ((outp->info.hasht & 0xff) == type &&
82 (outp->info.hashm & mask) == mask) { 81 (outp->info.hashm & mask) == mask) {
83 *data = nvbios_outp_match(bios, outp->info.hasht, 82 *data = nvbios_outp_match(bios, outp->info.hasht, mask,
84 outp->info.hashm,
85 ver, hdr, cnt, len, info); 83 ver, hdr, cnt, len, info);
86 if (!*data) 84 if (!*data)
87 return NULL; 85 return NULL;
@@ -415,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work)
415 nvkm_wr32(device, 0x6101d0, 0x80000000); 413 nvkm_wr32(device, 0x6101d0, 0x80000000);
416} 414}
417 415
418static void 416void
419gf119_disp_intr_error(struct nv50_disp *disp, int chid) 417gf119_disp_intr_error(struct nv50_disp *disp, int chid)
420{ 418{
421 struct nvkm_subdev *subdev = &disp->base.engine.subdev; 419 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
@@ -463,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp)
463 u32 stat = nvkm_rd32(device, 0x61009c); 461 u32 stat = nvkm_rd32(device, 0x61009c);
464 int chid = ffs(stat) - 1; 462 int chid = ffs(stat) - 1;
465 if (chid >= 0) 463 if (chid >= 0)
466 gf119_disp_intr_error(disp, chid); 464 disp->func->intr_error(disp, chid);
467 intr &= ~0x00000002; 465 intr &= ~0x00000002;
468 } 466 }
469 467
@@ -507,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device,
507static const struct nv50_disp_func 505static const struct nv50_disp_func
508gf119_disp = { 506gf119_disp = {
509 .intr = gf119_disp_intr, 507 .intr = gf119_disp_intr,
508 .intr_error = gf119_disp_intr_error,
510 .uevent = &gf119_disp_chan_uevent, 509 .uevent = &gf119_disp_chan_uevent,
511 .super = gf119_disp_intr_supervisor, 510 .super = gf119_disp_intr_supervisor,
512 .root = &gf119_disp_root_oclass, 511 .root = &gf119_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
index a86384b8e388..37f145cf30d7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c
@@ -27,6 +27,7 @@
27static const struct nv50_disp_func 27static const struct nv50_disp_func
28gk104_disp = { 28gk104_disp = {
29 .intr = gf119_disp_intr, 29 .intr = gf119_disp_intr,
30 .intr_error = gf119_disp_intr_error,
30 .uevent = &gf119_disp_chan_uevent, 31 .uevent = &gf119_disp_chan_uevent,
31 .super = gf119_disp_intr_supervisor, 32 .super = gf119_disp_intr_supervisor,
32 .root = &gk104_disp_root_oclass, 33 .root = &gk104_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
index 0d574c7e594a..e14ac946608c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c
@@ -27,6 +27,7 @@
27static const struct nv50_disp_func 27static const struct nv50_disp_func
28gk110_disp = { 28gk110_disp = {
29 .intr = gf119_disp_intr, 29 .intr = gf119_disp_intr,
30 .intr_error = gf119_disp_intr_error,
30 .uevent = &gf119_disp_chan_uevent, 31 .uevent = &gf119_disp_chan_uevent,
31 .super = gf119_disp_intr_supervisor, 32 .super = gf119_disp_intr_supervisor,
32 .root = &gk110_disp_root_oclass, 33 .root = &gk110_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
index f4b9cf8574be..2f2437cc5891 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c
@@ -27,6 +27,7 @@
27static const struct nv50_disp_func 27static const struct nv50_disp_func
28gm107_disp = { 28gm107_disp = {
29 .intr = gf119_disp_intr, 29 .intr = gf119_disp_intr,
30 .intr_error = gf119_disp_intr_error,
30 .uevent = &gf119_disp_chan_uevent, 31 .uevent = &gf119_disp_chan_uevent,
31 .super = gf119_disp_intr_supervisor, 32 .super = gf119_disp_intr_supervisor,
32 .root = &gm107_disp_root_oclass, 33 .root = &gm107_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
index 67eec8620719..9f368d4ee61e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c
@@ -27,6 +27,7 @@
27static const struct nv50_disp_func 27static const struct nv50_disp_func
28gm200_disp = { 28gm200_disp = {
29 .intr = gf119_disp_intr, 29 .intr = gf119_disp_intr,
30 .intr_error = gf119_disp_intr_error,
30 .uevent = &gf119_disp_chan_uevent, 31 .uevent = &gf119_disp_chan_uevent,
31 .super = gf119_disp_intr_supervisor, 32 .super = gf119_disp_intr_supervisor,
32 .root = &gm200_disp_root_oclass, 33 .root = &gm200_disp_root_oclass,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
new file mode 100644
index 000000000000..4f81bf31435e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c
@@ -0,0 +1,55 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "nv50.h"
25#include "rootnv50.h"
26
27static const struct nv50_disp_func
28gp100_disp = {
29 .intr = gf119_disp_intr,
30 .intr_error = gf119_disp_intr_error,
31 .uevent = &gf119_disp_chan_uevent,
32 .super = gf119_disp_intr_supervisor,
33 .root = &gp100_disp_root_oclass,
34 .head.vblank_init = gf119_disp_vblank_init,
35 .head.vblank_fini = gf119_disp_vblank_fini,
36 .head.scanoutpos = gf119_disp_root_scanoutpos,
37 .outp.internal.crt = nv50_dac_output_new,
38 .outp.internal.tmds = nv50_sor_output_new,
39 .outp.internal.lvds = nv50_sor_output_new,
40 .outp.internal.dp = gm200_sor_dp_new,
41 .dac.nr = 3,
42 .dac.power = nv50_dac_power,
43 .dac.sense = nv50_dac_sense,
44 .sor.nr = 4,
45 .sor.power = nv50_sor_power,
46 .sor.hda_eld = gf119_hda_eld,
47 .sor.hdmi = gk104_hdmi_ctrl,
48 .sor.magic = gm200_sor_magic,
49};
50
51int
52gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
53{
54 return gf119_disp_new_(&gp100_disp, device, index, pdisp);
55}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
new file mode 100644
index 000000000000..3bf3380336e4
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c
@@ -0,0 +1,81 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "nv50.h"
25#include "rootnv50.h"
26
27static void
28gp104_disp_intr_error(struct nv50_disp *disp, int chid)
29{
30 struct nvkm_subdev *subdev = &disp->base.engine.subdev;
31 struct nvkm_device *device = subdev->device;
32 u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12));
33 u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12));
34 u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12));
35
36 nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n",
37 chid, (mthd & 0x0000ffc), data, mthd, unkn);
38
39 if (chid < ARRAY_SIZE(disp->chan)) {
40 switch (mthd & 0xffc) {
41 case 0x0080:
42 nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR);
43 break;
44 default:
45 break;
46 }
47 }
48
49 nvkm_wr32(device, 0x61009c, (1 << chid));
50 nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000);
51}
52
53static const struct nv50_disp_func
54gp104_disp = {
55 .intr = gf119_disp_intr,
56 .intr_error = gp104_disp_intr_error,
57 .uevent = &gf119_disp_chan_uevent,
58 .super = gf119_disp_intr_supervisor,
59 .root = &gp104_disp_root_oclass,
60 .head.vblank_init = gf119_disp_vblank_init,
61 .head.vblank_fini = gf119_disp_vblank_fini,
62 .head.scanoutpos = gf119_disp_root_scanoutpos,
63 .outp.internal.crt = nv50_dac_output_new,
64 .outp.internal.tmds = nv50_sor_output_new,
65 .outp.internal.lvds = nv50_sor_output_new,
66 .outp.internal.dp = gm200_sor_dp_new,
67 .dac.nr = 3,
68 .dac.power = nv50_dac_power,
69 .dac.sense = nv50_dac_sense,
70 .sor.nr = 4,
71 .sor.power = nv50_sor_power,
72 .sor.hda_eld = gf119_hda_eld,
73 .sor.hdmi = gk104_hdmi_ctrl,
74 .sor.magic = gm200_sor_magic,
75};
76
77int
78gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp)
79{
80 return gf119_disp_new_(&gp104_disp, device, index, pdisp);
81}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
index fcb1b0c46d64..fbb8c7dc18fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c
@@ -32,6 +32,7 @@
32#include <subdev/bios/init.h> 32#include <subdev/bios/init.h>
33#include <subdev/bios/pll.h> 33#include <subdev/bios/pll.h>
34#include <subdev/devinit.h> 34#include <subdev/devinit.h>
35#include <subdev/timer.h>
35 36
36static const struct nvkm_disp_oclass * 37static const struct nvkm_disp_oclass *
37nv50_disp_root_(struct nvkm_disp *base) 38nv50_disp_root_(struct nvkm_disp *base)
@@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl,
269 list_for_each_entry(outp, &disp->base.outp, head) { 270 list_for_each_entry(outp, &disp->base.outp, head) {
270 if ((outp->info.hasht & 0xff) == type && 271 if ((outp->info.hasht & 0xff) == type &&
271 (outp->info.hashm & mask) == mask) { 272 (outp->info.hashm & mask) == mask) {
272 *data = nvbios_outp_match(bios, outp->info.hasht, 273 *data = nvbios_outp_match(bios, outp->info.hasht, mask,
273 outp->info.hashm,
274 ver, hdr, cnt, len, info); 274 ver, hdr, cnt, len, info);
275 if (!*data) 275 if (!*data)
276 return NULL; 276 return NULL;
@@ -426,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf)
426 return outp; 426 return outp;
427} 427}
428 428
429static bool
430nv50_disp_dptmds_war(struct nvkm_device *device)
431{
432 switch (device->chipset) {
433 case 0x94:
434 case 0x96:
435 case 0x98:
436 case 0xaa:
437 case 0xac:
438 return true;
439 default:
440 break;
441 }
442 return false;
443}
444
445static bool
446nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp)
447{
448 struct nvkm_device *device = disp->base.engine.subdev.device;
449 const u32 soff = __ffs(outp->or) * 0x800;
450 if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) {
451 switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) {
452 case 0x00000000:
453 case 0x00030000:
454 return true;
455 default:
456 break;
457 }
458 }
459 return false;
460
461}
462
463static void
464nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp)
465{
466 struct nvkm_device *device = disp->base.engine.subdev.device;
467 const u32 soff = __ffs(outp->or) * 0x800;
468
469 if (!nv50_disp_dptmds_war_needed(disp, outp))
470 return;
471
472 nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000);
473 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000);
474 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001);
475
476 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000);
477 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000);
478 nvkm_usec(device, 400, NVKM_DELAY);
479 nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000);
480 nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000);
481
482 if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) {
483 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
484 u32 pu_pc = seqctl & 0x0000000f;
485 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000);
486 }
487}
488
489static void
490nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp)
491{
492 struct nvkm_device *device = disp->base.engine.subdev.device;
493 const u32 soff = __ffs(outp->or) * 0x800;
494 u32 sorpwr;
495
496 if (!nv50_disp_dptmds_war_needed(disp, outp))
497 return;
498
499 sorpwr = nvkm_rd32(device, 0x61c004 + soff);
500 if (sorpwr & 0x00000001) {
501 u32 seqctl = nvkm_rd32(device, 0x61c030 + soff);
502 u32 pd_pc = (seqctl & 0x00000f00) >> 8;
503 u32 pu_pc = seqctl & 0x0000000f;
504
505 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000);
506
507 nvkm_msec(device, 2000,
508 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
509 break;
510 );
511 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000);
512 nvkm_msec(device, 2000,
513 if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000))
514 break;
515 );
516
517 nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000);
518 nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000);
519 }
520
521 nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000);
522 nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000);
523
524 if (sorpwr & 0x00000001) {
525 nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001);
526 }
527}
528
529static void
530nv50_disp_update_sppll1(struct nv50_disp *disp)
531{
532 struct nvkm_device *device = disp->base.engine.subdev.device;
533 bool used = false;
534 int sor;
535
536 if (!nv50_disp_dptmds_war(device))
537 return;
538
539 for (sor = 0; sor < disp->func->sor.nr; sor++) {
540 u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800));
541 switch (clksor & 0x03000000) {
542 case 0x02000000:
543 case 0x03000000:
544 used = true;
545 break;
546 default:
547 break;
548 }
549 }
550
551 if (used)
552 return;
553
554 nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000);
555}
556
429static void 557static void
430nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) 558nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head)
431{ 559{
@@ -679,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head)
679 807
680 nvkm_mask(device, hreg, 0x0000000f, hval); 808 nvkm_mask(device, hreg, 0x0000000f, hval);
681 nvkm_mask(device, oreg, mask, oval); 809 nvkm_mask(device, oreg, mask, oval);
810
811 nv50_disp_dptmds_war_2(disp, &outp->info);
682} 812}
683 813
684/* If programming a TMDS output on a SOR that can also be configured for 814/* If programming a TMDS output on a SOR that can also be configured for
@@ -720,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head)
720 850
721 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) 851 if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS)
722 nv50_disp_intr_unk40_0_tmds(disp, &outp->info); 852 nv50_disp_intr_unk40_0_tmds(disp, &outp->info);
853 nv50_disp_dptmds_war_3(disp, &outp->info);
723} 854}
724 855
725void 856void
@@ -767,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work)
767 continue; 898 continue;
768 nv50_disp_intr_unk40_0(disp, head); 899 nv50_disp_intr_unk40_0(disp, head);
769 } 900 }
901 nv50_disp_update_sppll1(disp);
770 } 902 }
771 903
772 nvkm_wr32(device, 0x610030, 0x80000000); 904 nvkm_wr32(device, 0x610030, 0x80000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
index aecebd8717e5..1e1de6bfe85a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
@@ -68,6 +68,7 @@ struct nv50_disp_func_outp {
68 68
69struct nv50_disp_func { 69struct nv50_disp_func {
70 void (*intr)(struct nv50_disp *); 70 void (*intr)(struct nv50_disp *);
71 void (*intr_error)(struct nv50_disp *, int chid);
71 72
72 const struct nvkm_event_func *uevent; 73 const struct nvkm_event_func *uevent;
73 void (*super)(struct work_struct *); 74 void (*super)(struct work_struct *);
@@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int);
114void gf119_disp_vblank_fini(struct nv50_disp *, int); 115void gf119_disp_vblank_fini(struct nv50_disp *, int);
115void gf119_disp_intr(struct nv50_disp *); 116void gf119_disp_intr(struct nv50_disp *);
116void gf119_disp_intr_supervisor(struct work_struct *); 117void gf119_disp_intr_supervisor(struct work_struct *);
118void gf119_disp_intr_error(struct nv50_disp *, int);
117#endif 119#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
index 2e2dc0641ef2..2f0220b39f34 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c
@@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = {
80 } 80 }
81}; 81};
82 82
83static const struct nv50_disp_chan_mthd 83const struct nv50_disp_chan_mthd
84gk104_disp_ovly_chan_mthd = { 84gk104_disp_ovly_chan_mthd = {
85 .name = "Overlay", 85 .name = "Overlay",
86 .addr = 0x001000, 86 .addr = 0x001000,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
new file mode 100644
index 000000000000..97e2dd2d908e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c
@@ -0,0 +1,38 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "dmacnv50.h"
25#include "rootnv50.h"
26
27#include <nvif/class.h>
28
29const struct nv50_disp_dmac_oclass
30gp104_disp_ovly_oclass = {
31 .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA,
32 .base.minver = 0,
33 .base.maxver = 0,
34 .ctor = nv50_disp_ovly_new,
35 .func = &gp104_disp_dmac_func,
36 .mthd = &gk104_disp_ovly_chan_mthd,
37 .chid = 5,
38};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
new file mode 100644
index 000000000000..ac8fdd728ec6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gp100_disp_root = {
31 .init = gf119_disp_root_init,
32 .fini = gf119_disp_root_fini,
33 .dmac = {
34 &gp100_disp_core_oclass,
35 &gk110_disp_base_oclass,
36 &gk104_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 },
42};
43
44static int
45gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gp100_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gp100_disp_root_oclass = {
54 .base.oclass = GP100_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gp100_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
new file mode 100644
index 000000000000..8443e04dc626
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c
@@ -0,0 +1,58 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "rootnv50.h"
25#include "dmacnv50.h"
26
27#include <nvif/class.h>
28
29static const struct nv50_disp_root_func
30gp104_disp_root = {
31 .init = gf119_disp_root_init,
32 .fini = gf119_disp_root_fini,
33 .dmac = {
34 &gp104_disp_core_oclass,
35 &gp104_disp_base_oclass,
36 &gp104_disp_ovly_oclass,
37 },
38 .pioc = {
39 &gk104_disp_oimm_oclass,
40 &gk104_disp_curs_oclass,
41 },
42};
43
44static int
45gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass,
46 void *data, u32 size, struct nvkm_object **pobject)
47{
48 return nv50_disp_root_new_(&gp104_disp_root, disp, oclass,
49 data, size, pobject);
50}
51
52const struct nvkm_disp_oclass
53gp104_disp_root_oclass = {
54 .base.oclass = GP104_DISP,
55 .base.minver = -1,
56 .base.maxver = -1,
57 .ctor = gp104_disp_root_new,
58};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
index cb449ed8d92c..ad00f1724b72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h
@@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass;
40extern const struct nvkm_disp_oclass gk110_disp_root_oclass; 40extern const struct nvkm_disp_oclass gk110_disp_root_oclass;
41extern const struct nvkm_disp_oclass gm107_disp_root_oclass; 41extern const struct nvkm_disp_oclass gm107_disp_root_oclass;
42extern const struct nvkm_disp_oclass gm200_disp_root_oclass; 42extern const struct nvkm_disp_oclass gm200_disp_root_oclass;
43extern const struct nvkm_disp_oclass gp100_disp_root_oclass;
44extern const struct nvkm_disp_oclass gp104_disp_root_oclass;
43#endif 45#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
index 65e5d291ecda..98651a43bc12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild
@@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o
13nvkm-y += nvkm/engine/fifo/gm107.o 13nvkm-y += nvkm/engine/fifo/gm107.o
14nvkm-y += nvkm/engine/fifo/gm200.o 14nvkm-y += nvkm/engine/fifo/gm200.o
15nvkm-y += nvkm/engine/fifo/gm20b.o 15nvkm-y += nvkm/engine/fifo/gm20b.o
16nvkm-y += nvkm/engine/fifo/gp100.o
16 17
17nvkm-y += nvkm/engine/fifo/chan.o 18nvkm-y += nvkm/engine/fifo/chan.o
18nvkm-y += nvkm/engine/fifo/channv50.o 19nvkm-y += nvkm/engine/fifo/channv50.o
@@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o
31nvkm-y += nvkm/engine/fifo/gpfifogk104.o 32nvkm-y += nvkm/engine/fifo/gpfifogk104.o
32nvkm-y += nvkm/engine/fifo/gpfifogk110.o 33nvkm-y += nvkm/engine/fifo/gpfifogk110.o
33nvkm-y += nvkm/engine/fifo/gpfifogm200.o 34nvkm-y += nvkm/engine/fifo/gpfifogm200.o
35nvkm-y += nvkm/engine/fifo/gpfifogp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
index e06f4d46f802..230f64e5f731 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h
@@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *,
27extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass; 27extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass;
28extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass; 28extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass;
29extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass; 29extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass;
30extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass;
30#endif 31#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
index 743f3a189f28..103c0afaaa6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c
@@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
329 } 329 }
330 330
331 if (eu == NULL) { 331 if (eu == NULL) {
332 enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit); 332 enum nvkm_devidx engidx = nvkm_top_fault(device, unit);
333 if (engidx < NVKM_SUBDEV_NR) { 333 if (engidx < NVKM_SUBDEV_NR) {
334 const char *src = nvkm_subdev_name[engidx]; 334 const char *src = nvkm_subdev_name[engidx];
335 char *dst = en; 335 char *dst = en;
@@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
589 struct gk104_fifo *fifo = gk104_fifo(base); 589 struct gk104_fifo *fifo = gk104_fifo(base);
590 struct nvkm_subdev *subdev = &fifo->base.engine.subdev; 590 struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
591 struct nvkm_device *device = subdev->device; 591 struct nvkm_device *device = subdev->device;
592 struct nvkm_top *top = device->top;
593 int engn, runl, pbid, ret, i, j; 592 int engn, runl, pbid, ret, i, j;
594 enum nvkm_devidx engidx; 593 enum nvkm_devidx engidx;
595 u32 *map; 594 u32 *map;
@@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
608 607
609 /* Determine runlist configuration from topology device info. */ 608 /* Determine runlist configuration from topology device info. */
610 i = 0; 609 i = 0;
611 while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) { 610 while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) {
612 /* Determine which PBDMA handles requests for this engine. */ 611 /* Determine which PBDMA handles requests for this engine. */
613 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { 612 for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) {
614 if (map[j] & (1 << runl)) { 613 if (map[j] & (1 << runl)) {
@@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base)
617 } 616 }
618 } 617 }
619 618
620 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n", 619 nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n",
621 engn, runl, pbid); 620 engn, runl, pbid, nvkm_subdev_name[engidx]);
622 621
623 fifo->engine[engn].engine = nvkm_device_engine(device, engidx); 622 fifo->engine[engn].engine = nvkm_device_engine(device, engidx);
624 fifo->engine[engn].runl = runl; 623 fifo->engine[engn].runl = runl;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
new file mode 100644
index 000000000000..eff83f7fb705
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c
@@ -0,0 +1,67 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "gk104.h"
25#include "changk104.h"
26
27static const struct nvkm_enum
28gp100_fifo_fault_engine[] = {
29 { 0x01, "DISPLAY" },
30 { 0x03, "IFB", NULL, NVKM_ENGINE_IFB },
31 { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR },
32 { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM },
33 { 0x06, "HOST0" },
34 { 0x07, "HOST1" },
35 { 0x08, "HOST2" },
36 { 0x09, "HOST3" },
37 { 0x0a, "HOST4" },
38 { 0x0b, "HOST5" },
39 { 0x0c, "HOST6" },
40 { 0x0d, "HOST7" },
41 { 0x0e, "HOST8" },
42 { 0x0f, "HOST9" },
43 { 0x10, "HOST10" },
44 { 0x13, "PERF" },
45 { 0x17, "PMU" },
46 { 0x18, "PTP" },
47 { 0x1f, "PHYSICAL" },
48 {}
49};
50
51static const struct gk104_fifo_func
52gp100_fifo = {
53 .fault.engine = gp100_fifo_fault_engine,
54 .fault.reason = gk104_fifo_fault_reason,
55 .fault.hubclient = gk104_fifo_fault_hubclient,
56 .fault.gpcclient = gk104_fifo_fault_gpcclient,
57 .chan = {
58 &gp100_fifo_gpfifo_oclass,
59 NULL
60 },
61};
62
63int
64gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo)
65{
66 return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo);
67}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
new file mode 100644
index 000000000000..1530a9217aea
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c
@@ -0,0 +1,34 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "changk104.h"
25
26#include <nvif/class.h>
27
28const struct nvkm_fifo_chan_oclass
29gp100_fifo_gpfifo_oclass = {
30 .base.oclass = PASCAL_CHANNEL_GPFIFO_A,
31 .base.minver = 0,
32 .base.maxver = 0,
33 .ctor = gk104_fifo_gpfifo_new,
34};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
index 290ed0db8047..f1c494182248 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild
@@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o
31nvkm-y += nvkm/engine/gr/gm107.o 31nvkm-y += nvkm/engine/gr/gm107.o
32nvkm-y += nvkm/engine/gr/gm200.o 32nvkm-y += nvkm/engine/gr/gm200.o
33nvkm-y += nvkm/engine/gr/gm20b.o 33nvkm-y += nvkm/engine/gr/gm20b.o
34nvkm-y += nvkm/engine/gr/gp100.o
34 35
35nvkm-y += nvkm/engine/gr/ctxnv40.o 36nvkm-y += nvkm/engine/gr/ctxnv40.o
36nvkm-y += nvkm/engine/gr/ctxnv50.o 37nvkm-y += nvkm/engine/gr/ctxnv50.o
@@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o
48nvkm-y += nvkm/engine/gr/ctxgm107.o 49nvkm-y += nvkm/engine/gr/ctxgm107.o
49nvkm-y += nvkm/engine/gr/ctxgm200.o 50nvkm-y += nvkm/engine/gr/ctxgm200.o
50nvkm-y += nvkm/engine/gr/ctxgm20b.o 51nvkm-y += nvkm/engine/gr/ctxgm20b.o
52nvkm-y += nvkm/engine/gr/ctxgp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
index b02d8f50ea6a..bc77eea351a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c
@@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
1240 const struct gf100_grctx_func *grctx = gr->func->grctx; 1240 const struct gf100_grctx_func *grctx = gr->func->grctx;
1241 u32 idle_timeout; 1241 u32 idle_timeout;
1242 1242
1243 nvkm_mc_unk260(device->mc, 0); 1243 nvkm_mc_unk260(device, 0);
1244 1244
1245 gf100_gr_mmio(gr, grctx->hub); 1245 gf100_gr_mmio(gr, grctx->hub);
1246 gf100_gr_mmio(gr, grctx->gpc); 1246 gf100_gr_mmio(gr, grctx->gpc);
@@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
1264 gf100_gr_icmd(gr, grctx->icmd); 1264 gf100_gr_icmd(gr, grctx->icmd);
1265 nvkm_wr32(device, 0x404154, idle_timeout); 1265 nvkm_wr32(device, 0x404154, idle_timeout);
1266 gf100_gr_mthd(gr, grctx->mthd); 1266 gf100_gr_mthd(gr, grctx->mthd);
1267 nvkm_mc_unk260(device->mc, 1); 1267 nvkm_mc_unk260(device, 1);
1268} 1268}
1269 1269
1270int 1270int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
index ac895edce164..52048b5a5274 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h
@@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *);
101 101
102extern const struct gf100_grctx_func gm20b_grctx; 102extern const struct gf100_grctx_func gm20b_grctx;
103 103
104extern const struct gf100_grctx_func gp100_grctx;
105
104/* context init value lists */ 106/* context init value lists */
105 107
106extern const struct gf100_gr_pack gf100_grctx_pack_icmd[]; 108extern const struct gf100_gr_pack gf100_grctx_pack_icmd[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
index f521de11a299..c925ade5880e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c
@@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
226 u32 idle_timeout; 226 u32 idle_timeout;
227 int i; 227 int i;
228 228
229 nvkm_mc_unk260(device->mc, 0); 229 nvkm_mc_unk260(device, 0);
230 230
231 gf100_gr_mmio(gr, grctx->hub); 231 gf100_gr_mmio(gr, grctx->hub);
232 gf100_gr_mmio(gr, grctx->gpc); 232 gf100_gr_mmio(gr, grctx->gpc);
@@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
253 gf100_gr_icmd(gr, grctx->icmd); 253 gf100_gr_icmd(gr, grctx->icmd);
254 nvkm_wr32(device, 0x404154, idle_timeout); 254 nvkm_wr32(device, 0x404154, idle_timeout);
255 gf100_gr_mthd(gr, grctx->mthd); 255 gf100_gr_mthd(gr, grctx->mthd);
256 nvkm_mc_unk260(device->mc, 1); 256 nvkm_mc_unk260(device, 1);
257} 257}
258 258
259const struct gf100_grctx_func 259const struct gf100_grctx_func
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
index 9ba337778ef5..c46b3fdf7203 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c
@@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
950 u32 idle_timeout; 950 u32 idle_timeout;
951 int i; 951 int i;
952 952
953 nvkm_mc_unk260(device->mc, 0); 953 nvkm_mc_unk260(device, 0);
954 954
955 gf100_gr_mmio(gr, grctx->hub); 955 gf100_gr_mmio(gr, grctx->hub);
956 gf100_gr_mmio(gr, grctx->gpc); 956 gf100_gr_mmio(gr, grctx->gpc);
@@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
979 gf100_gr_icmd(gr, grctx->icmd); 979 gf100_gr_icmd(gr, grctx->icmd);
980 nvkm_wr32(device, 0x404154, idle_timeout); 980 nvkm_wr32(device, 0x404154, idle_timeout);
981 gf100_gr_mthd(gr, grctx->mthd); 981 gf100_gr_mthd(gr, grctx->mthd);
982 nvkm_mc_unk260(device->mc, 1); 982 nvkm_mc_unk260(device, 1);
983 983
984 nvkm_mask(device, 0x418800, 0x00200000, 0x00200000); 984 nvkm_mask(device, 0x418800, 0x00200000, 0x00200000);
985 nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000); 985 nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
new file mode 100644
index 000000000000..3d1ae7ddf7dd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c
@@ -0,0 +1,179 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "ctxgf100.h"
25
26#include <subdev/fb.h>
27
28/*******************************************************************************
29 * PGRAPH context implementation
30 ******************************************************************************/
31
32static void
33gp100_grctx_generate_pagepool(struct gf100_grctx *info)
34{
35 const struct gf100_grctx_func *grctx = info->gr->func->grctx;
36 const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS;
37 const int s = 8;
38 const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access);
39 mmio_refn(info, 0x40800c, 0x00000000, s, b);
40 mmio_wr32(info, 0x408010, 0x80000000);
41 mmio_refn(info, 0x419004, 0x00000000, s, b);
42 mmio_wr32(info, 0x419008, 0x00000000);
43}
44
45static void
46gp100_grctx_generate_attrib(struct gf100_grctx *info)
47{
48 struct gf100_gr *gr = info->gr;
49 const struct gf100_grctx_func *grctx = gr->func->grctx;
50 const u32 alpha = grctx->alpha_nr;
51 const u32 attrib = grctx->attrib_nr;
52 const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max);
53 const u32 size = roundup(gr->tpc_total * pertpc, 0x80);
54 const u32 access = NV_MEM_ACCESS_RW;
55 const int s = 12;
56 const int b = mmio_vram(info, size, (1 << s), access);
57 const int max_batches = 0xffff;
58 u32 ao = 0;
59 u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total;
60 int gpc, ppc, n = 0;
61
62 mmio_refn(info, 0x418810, 0x80000000, s, b);
63 mmio_refn(info, 0x419848, 0x10000000, s, b);
64 mmio_refn(info, 0x419c2c, 0x10000000, s, b);
65 mmio_refn(info, 0x419b00, 0x00000000, s, b);
66 mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7);
67 mmio_wr32(info, 0x405830, attrib);
68 mmio_wr32(info, 0x40585c, alpha);
69 mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches);
70
71 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
72 for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) {
73 const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc];
74 const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc];
75 const u32 u = 0x418ea0 + (n * 0x04);
76 const u32 o = PPC_UNIT(gpc, ppc, 0);
77 if (!(gr->ppc_mask[gpc] & (1 << ppc)))
78 continue;
79 mmio_wr32(info, o + 0xc0, bs);
80 mmio_wr32(info, o + 0xf4, bo);
81 mmio_wr32(info, o + 0xf0, bs);
82 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
83 mmio_wr32(info, o + 0xe4, as);
84 mmio_wr32(info, o + 0xf8, ao);
85 ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc];
86 mmio_wr32(info, u, bs);
87 }
88 }
89
90 mmio_wr32(info, 0x418eec, 0x00000000);
91 mmio_wr32(info, 0x41befc, 0x00000000);
92}
93
94static void
95gp100_grctx_generate_405b60(struct gf100_gr *gr)
96{
97 struct nvkm_device *device = gr->base.engine.subdev.device;
98 const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4);
99 u32 dist[TPC_MAX / 4] = {};
100 u32 gpcs[GPC_MAX * 2] = {};
101 u8 tpcnr[GPC_MAX];
102 int tpc, gpc, i;
103
104 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
105
106 /* won't result in the same distribution as the binary driver where
107 * some of the gpcs have more tpcs than others, but this shall do
108 * for the moment. the code for earlier gpus has this issue too.
109 */
110 for (gpc = -1, i = 0; i < gr->tpc_total; i++) {
111 do {
112 gpc = (gpc + 1) % gr->gpc_nr;
113 } while(!tpcnr[gpc]);
114 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
115
116 dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8);
117 gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8);
118 }
119
120 for (i = 0; i < dist_nr; i++)
121 nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]);
122 for (i = 0; i < gr->gpc_nr * 2; i++)
123 nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]);
124}
125
126static void
127gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info)
128{
129 struct nvkm_device *device = gr->base.engine.subdev.device;
130 const struct gf100_grctx_func *grctx = gr->func->grctx;
131 u32 idle_timeout, tmp;
132 int i;
133
134 gf100_gr_mmio(gr, gr->fuc_sw_ctx);
135
136 idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000);
137
138 grctx->pagepool(info);
139 grctx->bundle(info);
140 grctx->attrib(info);
141 grctx->unkn(gr);
142
143 gm200_grctx_generate_tpcid(gr);
144 gf100_grctx_generate_r406028(gr);
145 gk104_grctx_generate_r418bb8(gr);
146
147 for (i = 0; i < 8; i++)
148 nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000);
149 nvkm_wr32(device, 0x406500, 0x00000000);
150
151 nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr);
152
153 for (tmp = 0, i = 0; i < gr->gpc_nr; i++)
154 tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5);
155 nvkm_wr32(device, 0x4041c4, tmp);
156
157 gp100_grctx_generate_405b60(gr);
158
159 gf100_gr_icmd(gr, gr->fuc_bundle);
160 nvkm_wr32(device, 0x404154, idle_timeout);
161 gf100_gr_mthd(gr, gr->fuc_method);
162}
163
164const struct gf100_grctx_func
165gp100_grctx = {
166 .main = gp100_grctx_generate_main,
167 .unkn = gk104_grctx_generate_unkn,
168 .bundle = gm107_grctx_generate_bundle,
169 .bundle_size = 0x3000,
170 .bundle_min_gpm_fifo_depth = 0x180,
171 .bundle_token_limit = 0x1080,
172 .pagepool = gp100_grctx_generate_pagepool,
173 .pagepool_size = 0x20000,
174 .attrib = gp100_grctx_generate_attrib,
175 .attrib_nr_max = 0x660,
176 .attrib_nr = 0x440,
177 .alpha_nr_max = 0xc00,
178 .alpha_nr = 0x800,
179};
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index ae9ab5b1ab97..157919c788e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -1457,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
1457 struct nvkm_device *device = subdev->device; 1457 struct nvkm_device *device = subdev->device;
1458 struct nvkm_secboot *sb = device->secboot; 1458 struct nvkm_secboot *sb = device->secboot;
1459 int i; 1459 int i;
1460 int ret = 0;
1460 1461
1461 if (gr->firmware) { 1462 if (gr->firmware) {
1462 /* load fuc microcode */ 1463 /* load fuc microcode */
1463 nvkm_mc_unk260(device->mc, 0); 1464 nvkm_mc_unk260(device, 0);
1464 1465
1465 /* securely-managed falcons must be reset using secure boot */ 1466 /* securely-managed falcons must be reset using secure boot */
1466 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) 1467 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
1467 nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); 1468 ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
1468 else 1469 else
1469 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, 1470 gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
1470 &gr->fuc409d); 1471 &gr->fuc409d);
1472 if (ret)
1473 return ret;
1474
1471 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) 1475 if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
1472 nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); 1476 ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
1473 else 1477 else
1474 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, 1478 gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
1475 &gr->fuc41ad); 1479 &gr->fuc41ad);
1480 if (ret)
1481 return ret;
1476 1482
1477 nvkm_mc_unk260(device->mc, 1); 1483 nvkm_mc_unk260(device, 1);
1478 1484
1479 /* start both of them running */ 1485 /* start both of them running */
1480 nvkm_wr32(device, 0x409840, 0xffffffff); 1486 nvkm_wr32(device, 0x409840, 0xffffffff);
@@ -1576,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
1576 } 1582 }
1577 1583
1578 /* load HUB microcode */ 1584 /* load HUB microcode */
1579 nvkm_mc_unk260(device->mc, 0); 1585 nvkm_mc_unk260(device, 0);
1580 nvkm_wr32(device, 0x4091c0, 0x01000000); 1586 nvkm_wr32(device, 0x4091c0, 0x01000000);
1581 for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++) 1587 for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
1582 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); 1588 nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
@@ -1599,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
1599 nvkm_wr32(device, 0x41a188, i >> 6); 1605 nvkm_wr32(device, 0x41a188, i >> 6);
1600 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]); 1606 nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
1601 } 1607 }
1602 nvkm_mc_unk260(device->mc, 1); 1608 nvkm_mc_unk260(device, 1);
1603 1609
1604 /* load register lists */ 1610 /* load register lists */
1605 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); 1611 gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 2b98abdb9270..268b8d60ff73 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[];
292extern const struct gf100_gr_init gm107_gr_init_wwdx_0[]; 292extern const struct gf100_gr_init gm107_gr_init_wwdx_0[];
293extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; 293extern const struct gf100_gr_init gm107_gr_init_cbm_0[];
294void gm107_gr_init_bios(struct gf100_gr *); 294void gm107_gr_init_bios(struct gf100_gr *);
295
296void gm200_gr_init_gpc_mmu(struct gf100_gr *);
295#endif 297#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
index 4ca8ed15191c..de8b806b88fd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c
@@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
361 if (ret) 361 if (ret)
362 return ret; 362 return ret;
363 363
364
365 return 0; 364 return 0;
366} 365}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
index 4dfa4513bb6c..6435f1257572 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c
@@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr)
38 return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c); 38 return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c);
39} 39}
40 40
41static void 41void
42gm200_gr_init_gpc_mmu(struct gf100_gr *gr) 42gm200_gr_init_gpc_mmu(struct gf100_gr *gr)
43{ 43{
44 struct nvkm_device *device = gr->base.engine.subdev.device; 44 struct nvkm_device *device = gr->base.engine.subdev.device;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
new file mode 100644
index 000000000000..26ad79def0ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c
@@ -0,0 +1,171 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "gf100.h"
25#include "ctxgf100.h"
26
27#include <nvif/class.h>
28
29/*******************************************************************************
30 * PGRAPH engine/subdev functions
31 ******************************************************************************/
32
33static void
34gp100_gr_init_rop_active_fbps(struct gf100_gr *gr)
35{
36 struct nvkm_device *device = gr->base.engine.subdev.device;
37 /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */
38 const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f;
39 nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */
40 nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */
41}
42
43static int
44gp100_gr_init(struct gf100_gr *gr)
45{
46 struct nvkm_device *device = gr->base.engine.subdev.device;
47 const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total);
48 u32 data[TPC_MAX / 8] = {};
49 u8 tpcnr[GPC_MAX];
50 int gpc, tpc, rop;
51 int i;
52
53 gr->func->init_gpc_mmu(gr);
54
55 gf100_gr_mmio(gr, gr->fuc_sw_nonctx);
56
57 nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001);
58
59 memset(data, 0x00, sizeof(data));
60 memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr));
61 for (i = 0, gpc = -1; i < gr->tpc_total; i++) {
62 do {
63 gpc = (gpc + 1) % gr->gpc_nr;
64 } while (!tpcnr[gpc]);
65 tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--;
66
67 data[i / 8] |= tpc << ((i % 8) * 4);
68 }
69
70 nvkm_wr32(device, GPC_BCAST(0x0980), data[0]);
71 nvkm_wr32(device, GPC_BCAST(0x0984), data[1]);
72 nvkm_wr32(device, GPC_BCAST(0x0988), data[2]);
73 nvkm_wr32(device, GPC_BCAST(0x098c), data[3]);
74
75 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
76 nvkm_wr32(device, GPC_UNIT(gpc, 0x0914),
77 gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]);
78 nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 |
79 gr->tpc_total);
80 nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918);
81 }
82
83 nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918);
84 nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800));
85 nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804));
86
87 gr->func->init_rop_active_fbps(gr);
88
89 nvkm_wr32(device, 0x400500, 0x00010001);
90 nvkm_wr32(device, 0x400100, 0xffffffff);
91 nvkm_wr32(device, 0x40013c, 0xffffffff);
92 nvkm_wr32(device, 0x400124, 0x00000002);
93 nvkm_wr32(device, 0x409c24, 0x000f0002);
94 nvkm_wr32(device, 0x405848, 0xc0000000);
95 nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001);
96 nvkm_wr32(device, 0x404000, 0xc0000000);
97 nvkm_wr32(device, 0x404600, 0xc0000000);
98 nvkm_wr32(device, 0x408030, 0xc0000000);
99 nvkm_wr32(device, 0x404490, 0xc0000000);
100 nvkm_wr32(device, 0x406018, 0xc0000000);
101 nvkm_wr32(device, 0x407020, 0x40000000);
102 nvkm_wr32(device, 0x405840, 0xc0000000);
103 nvkm_wr32(device, 0x405844, 0x00ffffff);
104 nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008);
105
106 nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000);
107 nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000);
108
109 gr->func->init_ppc_exceptions(gr);
110
111 for (gpc = 0; gpc < gr->gpc_nr; gpc++) {
112 nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000);
113 nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000);
114 nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000);
115 nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000);
116 for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) {
117 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
118 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
119 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
120 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
121 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
122 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000);
123 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe);
124 nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105);
125 }
126 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
127 nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
128 }
129
130 for (rop = 0; rop < gr->rop_nr; rop++) {
131 nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000);
132 nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000);
133 nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff);
134 nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff);
135 }
136
137 nvkm_wr32(device, 0x400108, 0xffffffff);
138 nvkm_wr32(device, 0x400138, 0xffffffff);
139 nvkm_wr32(device, 0x400118, 0xffffffff);
140 nvkm_wr32(device, 0x400130, 0xffffffff);
141 nvkm_wr32(device, 0x40011c, 0xffffffff);
142 nvkm_wr32(device, 0x400134, 0xffffffff);
143
144 gf100_gr_zbc_init(gr);
145
146 return gf100_gr_init_ctxctl(gr);
147}
148
149static const struct gf100_gr_func
150gp100_gr = {
151 .init = gp100_gr_init,
152 .init_gpc_mmu = gm200_gr_init_gpc_mmu,
153 .init_rop_active_fbps = gp100_gr_init_rop_active_fbps,
154 .init_ppc_exceptions = gk104_gr_init_ppc_exceptions,
155 .rops = gm200_gr_rops,
156 .ppc_nr = 2,
157 .grctx = &gp100_grctx,
158 .sclass = {
159 { -1, -1, FERMI_TWOD_A },
160 { -1, -1, KEPLER_INLINE_TO_MEMORY_B },
161 { -1, -1, PASCAL_A, &gf100_fermi },
162 { -1, -1, PASCAL_COMPUTE_A },
163 {}
164 }
165};
166
167int
168gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr)
169{
170 return gm200_gr_new_(&gp100_gr, device, index, pgr);
171}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
index 69de8c6259fe..f1e15a4d4f64 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv30.c
@@ -76,8 +76,8 @@ nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
76 nvkm_wo32(chan->inst, i, 0x00040004); 76 nvkm_wo32(chan->inst, i, 0x00040004);
77 for (i = 0x1f18; i <= 0x3088 ; i += 16) { 77 for (i = 0x1f18; i <= 0x3088 ; i += 16) {
78 nvkm_wo32(chan->inst, i + 0, 0x10700ff9); 78 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
79 nvkm_wo32(chan->inst, i + 1, 0x0436086c); 79 nvkm_wo32(chan->inst, i + 4, 0x0436086c);
80 nvkm_wo32(chan->inst, i + 2, 0x000c001b); 80 nvkm_wo32(chan->inst, i + 8, 0x000c001b);
81 } 81 }
82 for (i = 0x30b8; i < 0x30c8; i += 4) 82 for (i = 0x30b8; i < 0x30c8; i += 4)
83 nvkm_wo32(chan->inst, i, 0x0000ffff); 83 nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
index 2207dac23981..300f5ed5de0b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/nv34.c
@@ -75,8 +75,8 @@ nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch,
75 nvkm_wo32(chan->inst, i, 0x00040004); 75 nvkm_wo32(chan->inst, i, 0x00040004);
76 for (i = 0x15ac; i <= 0x271c ; i += 16) { 76 for (i = 0x15ac; i <= 0x271c ; i += 16) {
77 nvkm_wo32(chan->inst, i + 0, 0x10700ff9); 77 nvkm_wo32(chan->inst, i + 0, 0x10700ff9);
78 nvkm_wo32(chan->inst, i + 1, 0x0436086c); 78 nvkm_wo32(chan->inst, i + 4, 0x0436086c);
79 nvkm_wo32(chan->inst, i + 2, 0x000c001b); 79 nvkm_wo32(chan->inst, i + 8, 0x000c001b);
80 } 80 }
81 for (i = 0x274c; i < 0x275c; i += 4) 81 for (i = 0x274c; i < 0x275c; i += 4)
82 nvkm_wo32(chan->inst, i, 0x0000ffff); 82 nvkm_wo32(chan->inst, i, 0x0000ffff);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
index e15b9627b07e..f3c30b2a788e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c
@@ -26,6 +26,49 @@
26#include <subdev/bios.h> 26#include <subdev/bios.h>
27#include <subdev/bios/bmp.h> 27#include <subdev/bios/bmp.h>
28#include <subdev/bios/bit.h> 28#include <subdev/bios/bit.h>
29#include <subdev/bios/image.h>
30
31static bool
32nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size)
33{
34 u32 p = *addr;
35
36 if (*addr > bios->image0_size && bios->imaged_addr) {
37 *addr -= bios->image0_size;
38 *addr += bios->imaged_addr;
39 }
40
41 if (unlikely(*addr + size >= bios->size)) {
42 nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr);
43 return false;
44 }
45
46 return true;
47}
48
49u8
50nvbios_rd08(struct nvkm_bios *bios, u32 addr)
51{
52 if (likely(nvbios_addr(bios, &addr, 1)))
53 return bios->data[addr];
54 return 0x00;
55}
56
57u16
58nvbios_rd16(struct nvkm_bios *bios, u32 addr)
59{
60 if (likely(nvbios_addr(bios, &addr, 2)))
61 return get_unaligned_le16(&bios->data[addr]);
62 return 0x0000;
63}
64
65u32
66nvbios_rd32(struct nvkm_bios *bios, u32 addr)
67{
68 if (likely(nvbios_addr(bios, &addr, 4)))
69 return get_unaligned_le32(&bios->data[addr]);
70 return 0x00000000;
71}
29 72
30u8 73u8
31nvbios_checksum(const u8 *data, int size) 74nvbios_checksum(const u8 *data, int size)
@@ -100,8 +143,9 @@ int
100nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) 143nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
101{ 144{
102 struct nvkm_bios *bios; 145 struct nvkm_bios *bios;
146 struct nvbios_image image;
103 struct bit_entry bit_i; 147 struct bit_entry bit_i;
104 int ret; 148 int ret, idx = 0;
105 149
106 if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL))) 150 if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL)))
107 return -ENOMEM; 151 return -ENOMEM;
@@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios)
111 if (ret) 155 if (ret)
112 return ret; 156 return ret;
113 157
158 /* Some tables have weird pointers that need adjustment before
159 * they're dereferenced. I'm not entirely sure why...
160 */
161 if (nvbios_image(bios, idx++, &image)) {
162 bios->image0_size = image.size;
163 while (nvbios_image(bios, idx++, &image)) {
164 if (image.type == 0xe0) {
165 bios->imaged_addr = image.base;
166 break;
167 }
168 }
169 }
170
114 /* detect type of vbios we're dealing with */ 171 /* detect type of vbios we're dealing with */
115 bios->bmp_offset = nvbios_findstr(bios->data, bios->size, 172 bios->bmp_offset = nvbios_findstr(bios->data, bios->size,
116 "\xff\x7f""NV\0", 5); 173 "\xff\x7f""NV\0", 5);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
index 05332476354a..d89e78c4e689 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
@@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
40 case 0x30: 40 case 0x30:
41 case 0x40: 41 case 0x40:
42 case 0x41: 42 case 0x41:
43 case 0x42:
43 *hdr = nvbios_rd08(bios, data + 0x01); 44 *hdr = nvbios_rd08(bios, data + 0x01);
44 *len = nvbios_rd08(bios, data + 0x02); 45 *len = nvbios_rd08(bios, data + 0x02);
45 *cnt = nvbios_rd08(bios, data + 0x03); 46 *cnt = nvbios_rd08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx,
70 break; 71 break;
71 case 0x40: 72 case 0x40:
72 case 0x41: 73 case 0x41:
74 case 0x42:
73 *hdr = nvbios_rd08(bios, data + 0x04); 75 *hdr = nvbios_rd08(bios, data + 0x04);
74 *cnt = 0; 76 *cnt = 0;
75 *len = 0; 77 *len = 0;
@@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx,
109 break; 111 break;
110 case 0x40: 112 case 0x40:
111 case 0x41: 113 case 0x41:
114 case 0x42:
112 info->flags = nvbios_rd08(bios, data + 0x04); 115 info->flags = nvbios_rd08(bios, data + 0x04);
113 info->script[0] = nvbios_rd16(bios, data + 0x05); 116 info->script[0] = nvbios_rd16(bios, data + 0x05);
114 info->script[1] = nvbios_rd16(bios, data + 0x07); 117 info->script[1] = nvbios_rd16(bios, data + 0x07);
@@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx,
180 info->pe = nvbios_rd08(bios, data + 0x02); 183 info->pe = nvbios_rd08(bios, data + 0x02);
181 info->tx_pu = nvbios_rd08(bios, data + 0x03); 184 info->tx_pu = nvbios_rd08(bios, data + 0x03);
182 break; 185 break;
186 case 0x42:
187 info->dc = nvbios_rd08(bios, data + 0x00);
188 info->pe = nvbios_rd08(bios, data + 0x01);
189 info->tx_pu = nvbios_rd08(bios, data + 0x02);
190 break;
183 default: 191 default:
184 data = 0x0000; 192 data = 0x0000;
185 break; 193 break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
index 74b14cf09308..1dbff7aeafec 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c
@@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image)
68bool 68bool
69nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image) 69nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image)
70{ 70{
71 u32 imaged_addr = bios->imaged_addr;
71 memset(image, 0x00, sizeof(*image)); 72 memset(image, 0x00, sizeof(*image));
73 bios->imaged_addr = 0;
72 do { 74 do {
73 image->base += image->size; 75 image->base += image->size;
74 if (image->last || !nvbios_imagen(bios, image)) 76 if (image->last || !nvbios_imagen(bios, image)) {
77 bios->imaged_addr = imaged_addr;
75 return false; 78 return false;
79 }
76 } while(idx--); 80 } while(idx--);
81 bios->imaged_addr = imaged_addr;
77 return true; 82 return true;
78} 83}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
index 91a7dc56e406..2ca23a9157ab 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c
@@ -77,15 +77,17 @@ g84_pll_mapping[] = {
77 {} 77 {}
78}; 78};
79 79
80static u16 80static u32
81pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 81pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
82{ 82{
83 struct bit_entry bit_C; 83 struct bit_entry bit_C;
84 u16 data = 0x0000; 84 u32 data = 0x0000;
85 85
86 if (!bit_entry(bios, 'C', &bit_C)) { 86 if (!bit_entry(bios, 'C', &bit_C)) {
87 if (bit_C.version == 1 && bit_C.length >= 10) 87 if (bit_C.version == 1 && bit_C.length >= 10)
88 data = nvbios_rd16(bios, bit_C.offset + 8); 88 data = nvbios_rd16(bios, bit_C.offset + 8);
89 if (bit_C.version == 2 && bit_C.length >= 4)
90 data = nvbios_rd32(bios, bit_C.offset + 0);
89 if (data) { 91 if (data) {
90 *ver = nvbios_rd08(bios, data + 0); 92 *ver = nvbios_rd08(bios, data + 0);
91 *hdr = nvbios_rd08(bios, data + 1); 93 *hdr = nvbios_rd08(bios, data + 1);
@@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios)
137 } 139 }
138} 140}
139 141
140static u16 142static u32
141pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) 143pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
142{ 144{
143 struct pll_mapping *map; 145 struct pll_mapping *map;
144 u8 hdr, cnt; 146 u8 hdr, cnt;
145 u16 data; 147 u32 data;
146 148
147 data = pll_limits_table(bios, ver, &hdr, &cnt, len); 149 data = pll_limits_table(bios, ver, &hdr, &cnt, len);
148 if (data && *ver >= 0x30) { 150 if (data && *ver >= 0x30) {
@@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
160 map = pll_map(bios); 162 map = pll_map(bios);
161 while (map && map->reg) { 163 while (map && map->reg) {
162 if (map->reg == reg && *ver >= 0x20) { 164 if (map->reg == reg && *ver >= 0x20) {
163 u16 addr = (data += hdr); 165 u32 addr = (data += hdr);
164 *type = map->type; 166 *type = map->type;
165 while (cnt--) { 167 while (cnt--) {
166 if (nvbios_rd32(bios, data) == map->reg) 168 if (nvbios_rd32(bios, data) == map->reg)
@@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len)
179 return 0x0000; 181 return 0x0000;
180} 182}
181 183
182static u16 184static u32
183pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) 185pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
184{ 186{
185 struct pll_mapping *map; 187 struct pll_mapping *map;
186 u8 hdr, cnt; 188 u8 hdr, cnt;
187 u16 data; 189 u32 data;
188 190
189 data = pll_limits_table(bios, ver, &hdr, &cnt, len); 191 data = pll_limits_table(bios, ver, &hdr, &cnt, len);
190 if (data && *ver >= 0x30) { 192 if (data && *ver >= 0x30) {
@@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len)
202 map = pll_map(bios); 204 map = pll_map(bios);
203 while (map && map->reg) { 205 while (map && map->reg) {
204 if (map->type == type && *ver >= 0x20) { 206 if (map->type == type && *ver >= 0x20) {
205 u16 addr = (data += hdr); 207 u32 addr = (data += hdr);
206 *reg = map->reg; 208 *reg = map->reg;
207 while (cnt--) { 209 while (cnt--) {
208 if (nvbios_rd32(bios, data) == map->reg) 210 if (nvbios_rd32(bios, data) == map->reg)
@@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info)
228 struct nvkm_device *device = subdev->device; 230 struct nvkm_device *device = subdev->device;
229 u8 ver, len; 231 u8 ver, len;
230 u32 reg = type; 232 u32 reg = type;
231 u16 data; 233 u32 data;
232 234
233 if (type > PLL_MAX) { 235 if (type > PLL_MAX) {
234 reg = type; 236 reg = type;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
index c268e5afe852..b4a308f3cf7b 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c
@@ -26,21 +26,6 @@
26#include <subdev/bios/image.h> 26#include <subdev/bios/image.h>
27#include <subdev/bios/pmu.h> 27#include <subdev/bios/pmu.h>
28 28
29static u32
30weirdo_pointer(struct nvkm_bios *bios, u32 data)
31{
32 struct nvbios_image image;
33 int idx = 0;
34 if (nvbios_image(bios, idx++, &image)) {
35 data -= image.size;
36 while (nvbios_image(bios, idx++, &image)) {
37 if (image.type == 0xe0)
38 return image.base + data;
39 }
40 }
41 return 0;
42}
43
44u32 29u32
45nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 30nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
46{ 31{
@@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
50 if (!bit_entry(bios, 'p', &bit_p)) { 35 if (!bit_entry(bios, 'p', &bit_p)) {
51 if (bit_p.version == 2 && bit_p.length >= 4) 36 if (bit_p.version == 2 && bit_p.length >= 4)
52 data = nvbios_rd32(bios, bit_p.offset + 0x00); 37 data = nvbios_rd32(bios, bit_p.offset + 0x00);
53 if ((data = weirdo_pointer(bios, data))) { 38 if (data) {
54 *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */ 39 *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */
55 *hdr = nvbios_rd08(bios, data + 0x01); 40 *hdr = nvbios_rd08(bios, data + 0x01);
56 *len = nvbios_rd08(bios, data + 0x02); 41 *len = nvbios_rd08(bios, data + 0x02);
@@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info)
97 u32 data; 82 u32 data;
98 memset(info, 0x00, sizeof(*info)); 83 memset(info, 0x00, sizeof(*info));
99 while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) { 84 while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
100 if ( pmuE.type == type && 85 if (pmuE.type == type && (data = pmuE.data)) {
101 (data = weirdo_pointer(bios, pmuE.data))) {
102 info->init_addr_pmu = nvbios_rd32(bios, data + 0x08); 86 info->init_addr_pmu = nvbios_rd32(bios, data + 0x08);
103 info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c); 87 info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c);
104 info->boot_addr = data + 0x30; 88 info->boot_addr = data + 0x30;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
index d0ae7454764e..b57c370c725d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c
@@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr,
30 u8 *cnt, u8 *len, u8 *snr, u8 *ssz) 30 u8 *cnt, u8 *len, u8 *snr, u8 *ssz)
31{ 31{
32 struct bit_entry bit_P; 32 struct bit_entry bit_P;
33 u16 rammap = 0x0000; 33 u32 rammap = 0x0000;
34 34
35 if (!bit_entry(bios, 'P', &bit_P)) { 35 if (!bit_entry(bios, 'P', &bit_P)) {
36 if (bit_P.version == 2) 36 if (bit_P.version == 2)
37 rammap = nvbios_rd16(bios, bit_P.offset + 4); 37 rammap = nvbios_rd32(bios, bit_P.offset + 4);
38 38
39 if (rammap) { 39 if (rammap) {
40 *ver = nvbios_rd08(bios, rammap + 0); 40 *ver = nvbios_rd08(bios, rammap + 0);
@@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx,
61 u8 *ver, u8 *hdr, u8 *cnt, u8 *len) 61 u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
62{ 62{
63 u8 snr, ssz; 63 u8 snr, ssz;
64 u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz); 64 u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz);
65 if (rammap && idx < *cnt) { 65 if (rammap && idx < *cnt) {
66 rammap = rammap + *hdr + (idx * (*len + (snr * ssz))); 66 rammap = rammap + *hdr + (idx * (*len + (snr * ssz)));
67 *hdr = *len; 67 *hdr = *len;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
index 78c449b417b7..89d5543118cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c
@@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
99{ 99{
100 struct nvkm_device *device = clk->base.subdev.device; 100 struct nvkm_device *device = clk->base.subdev.device;
101 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); 101 u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4));
102 u32 sctl = nvkm_rd32(device, dctl + (doff * 4)); 102 u32 sclk, sctl, sdiv = 2;
103 103
104 switch (ssrc & 0x00000003) { 104 switch (ssrc & 0x00000003) {
105 case 0: 105 case 0:
@@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl)
109 case 2: 109 case 2:
110 return 100000; 110 return 100000;
111 case 3: 111 case 3:
112 if (sctl & 0x80000000) { 112 sclk = read_vco(clk, dsrc + (doff * 4));
113 u32 sclk = read_vco(clk, dsrc + (doff * 4)); 113
114 u32 sdiv = (sctl & 0x0000003f) + 2; 114 /* Memclk has doff of 0 despite its alt. location */
115 return (sclk * 2) / sdiv; 115 if (doff <= 2) {
116 sctl = nvkm_rd32(device, dctl + (doff * 4));
117
118 if (sctl & 0x80000000) {
119 if (ssrc & 0x100)
120 sctl >>= 8;
121
122 sdiv = (sctl & 0x3f) + 2;
123 }
116 } 124 }
117 125
118 return read_vco(clk, dsrc + (doff * 4)); 126 return (sclk * 2) / sdiv;
119 default: 127 default:
120 return 0; 128 return 0;
121 } 129 }
@@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx)
366 if (info->coef) { 374 if (info->coef) {
367 nvkm_wr32(device, addr + 0x04, info->coef); 375 nvkm_wr32(device, addr + 0x04, info->coef);
368 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); 376 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
377
378 /* Test PLL lock */
379 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
369 nvkm_msec(device, 2000, 380 nvkm_msec(device, 2000,
370 if (nvkm_rd32(device, addr + 0x00) & 0x00020000) 381 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
371 break; 382 break;
372 ); 383 );
373 nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004); 384 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
385
386 /* Enable sync mode */
387 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
374 } 388 }
375 } 389 }
376} 390}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
index 975c401bccab..06bc0d2d6ae1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c
@@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx)
393 if (info->coef) { 393 if (info->coef) {
394 nvkm_wr32(device, addr + 0x04, info->coef); 394 nvkm_wr32(device, addr + 0x04, info->coef);
395 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); 395 nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001);
396
397 /* Test PLL lock */
398 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000);
396 nvkm_msec(device, 2000, 399 nvkm_msec(device, 2000,
397 if (nvkm_rd32(device, addr + 0x00) & 0x00020000) 400 if (nvkm_rd32(device, addr + 0x00) & 0x00020000)
398 break; 401 break;
399 ); 402 );
400 nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004); 403 nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010);
404
405 /* Enable sync mode */
406 nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004);
401 } 407 }
402} 408}
403 409
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
index 5f0ee24e31b8..218893e3e5f9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c
@@ -28,69 +28,6 @@
28#include <core/tegra.h> 28#include <core/tegra.h>
29#include <subdev/timer.h> 29#include <subdev/timer.h>
30 30
31#define KHZ (1000)
32#define MHZ (KHZ * 1000)
33
34#define MASK(w) ((1 << w) - 1)
35
36#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
37#define GPCPLL_CFG_ENABLE BIT(0)
38#define GPCPLL_CFG_IDDQ BIT(1)
39#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
40#define GPCPLL_CFG_LOCK BIT(17)
41
42#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
43#define GPCPLL_COEFF_M_SHIFT 0
44#define GPCPLL_COEFF_M_WIDTH 8
45#define GPCPLL_COEFF_N_SHIFT 8
46#define GPCPLL_COEFF_N_WIDTH 8
47#define GPCPLL_COEFF_P_SHIFT 16
48#define GPCPLL_COEFF_P_WIDTH 6
49
50#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
51#define GPCPLL_CFG2_SETUP2_SHIFT 16
52#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
53
54#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
55#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
56
57#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800
58#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
59#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
60#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
61#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
62#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
63#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
64
65#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
66#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
67
68#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
69#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
70#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
71#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
72#define GPC2CLK_OUT_VCODIV_WIDTH 6
73#define GPC2CLK_OUT_VCODIV_SHIFT 8
74#define GPC2CLK_OUT_VCODIV1 0
75#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
76 GPC2CLK_OUT_VCODIV_SHIFT)
77#define GPC2CLK_OUT_BYPDIV_WIDTH 6
78#define GPC2CLK_OUT_BYPDIV_SHIFT 0
79#define GPC2CLK_OUT_BYPDIV31 0x3c
80#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
81 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
82 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
83 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
84#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
85 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
86 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
87 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
88
89#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0)
90#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
91#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
92 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
93
94static const u8 _pl_to_div[] = { 31static const u8 _pl_to_div[] = {
95/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ 32/* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
96/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, 33/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32,
@@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
124 .min_pl = 1, .max_pl = 32, 61 .min_pl = 1, .max_pl = 32,
125}; 62};
126 63
127static void 64void
128gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) 65gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
129{ 66{
130 struct nvkm_device *device = clk->base.subdev.device; 67 struct nvkm_device *device = clk->base.subdev.device;
@@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll)
136 pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); 73 pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH);
137} 74}
138 75
139static u32 76void
140gk20a_pllg_calc_rate(struct gk20a_clk *clk) 77gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
78{
79 struct nvkm_device *device = clk->base.subdev.device;
80 u32 val;
81
82 val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT;
83 val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
84 val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT;
85 nvkm_wr32(device, GPCPLL_COEFF, val);
86}
87
88u32
89gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll)
141{ 90{
142 u32 rate; 91 u32 rate;
143 u32 divider; 92 u32 divider;
144 93
145 rate = clk->parent_rate * clk->pll.n; 94 rate = clk->parent_rate * pll->n;
146 divider = clk->pll.m * clk->pl_to_div(clk->pll.pl); 95 divider = pll->m * clk->pl_to_div(pll->pl);
147 96
148 return rate / divider / 2; 97 return rate / divider / 2;
149} 98}
150 99
151static int 100int
152gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) 101gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate,
102 struct gk20a_pll *pll)
153{ 103{
154 struct nvkm_subdev *subdev = &clk->base.subdev; 104 struct nvkm_subdev *subdev = &clk->base.subdev;
155 u32 target_clk_f, ref_clk_f, target_freq; 105 u32 target_clk_f, ref_clk_f, target_freq;
@@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
163 target_clk_f = rate * 2 / KHZ; 113 target_clk_f = rate * 2 / KHZ;
164 ref_clk_f = clk->parent_rate / KHZ; 114 ref_clk_f = clk->parent_rate / KHZ;
165 115
166 max_vco_f = clk->params->max_vco; 116 target_vco_f = target_clk_f + target_clk_f / 50;
117 max_vco_f = max(clk->params->max_vco, target_vco_f);
167 min_vco_f = clk->params->min_vco; 118 min_vco_f = clk->params->min_vco;
168 best_m = clk->params->max_m; 119 best_m = clk->params->max_m;
169 best_n = clk->params->min_n; 120 best_n = clk->params->min_n;
170 best_pl = clk->params->min_pl; 121 best_pl = clk->params->min_pl;
171 122
172 target_vco_f = target_clk_f + target_clk_f / 50;
173 if (max_vco_f < target_vco_f)
174 max_vco_f = target_vco_f;
175
176 /* min_pl <= high_pl <= max_pl */ 123 /* min_pl <= high_pl <= max_pl */
177 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; 124 high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f;
178 high_pl = min(high_pl, clk->params->max_pl); 125 high_pl = min(high_pl, clk->params->max_pl);
@@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
195 target_vco_f = target_clk_f * clk->pl_to_div(pl); 142 target_vco_f = target_clk_f * clk->pl_to_div(pl);
196 143
197 for (m = clk->params->min_m; m <= clk->params->max_m; m++) { 144 for (m = clk->params->min_m; m <= clk->params->max_m; m++) {
198 u32 u_f, vco_f; 145 u32 u_f = ref_clk_f / m;
199
200 u_f = ref_clk_f / m;
201 146
202 if (u_f < clk->params->min_u) 147 if (u_f < clk->params->min_u)
203 break; 148 break;
@@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate)
211 break; 156 break;
212 157
213 for (; n <= n2; n++) { 158 for (; n <= n2; n++) {
159 u32 vco_f;
160
214 if (n < clk->params->min_n) 161 if (n < clk->params->min_n)
215 continue; 162 continue;
216 if (n > clk->params->max_n) 163 if (n > clk->params->max_n)
@@ -247,16 +194,16 @@ found_match:
247 "no best match for target @ %dMHz on gpc_pll", 194 "no best match for target @ %dMHz on gpc_pll",
248 target_clk_f / KHZ); 195 target_clk_f / KHZ);
249 196
250 clk->pll.m = best_m; 197 pll->m = best_m;
251 clk->pll.n = best_n; 198 pll->n = best_n;
252 clk->pll.pl = best_pl; 199 pll->pl = best_pl;
253 200
254 target_freq = gk20a_pllg_calc_rate(clk); 201 target_freq = gk20a_pllg_calc_rate(clk, pll);
255 202
256 nvkm_debug(subdev, 203 nvkm_debug(subdev,
257 "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", 204 "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n",
258 target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl, 205 target_freq / KHZ, pll->m, pll->n, pll->pl,
259 clk->pl_to_div(clk->pll.pl)); 206 clk->pl_to_div(pll->pl));
260 return 0; 207 return 0;
261} 208}
262 209
@@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
265{ 212{
266 struct nvkm_subdev *subdev = &clk->base.subdev; 213 struct nvkm_subdev *subdev = &clk->base.subdev;
267 struct nvkm_device *device = subdev->device; 214 struct nvkm_device *device = subdev->device;
268 u32 val; 215 struct gk20a_pll pll;
269 int ramp_timeout; 216 int ret = 0;
270 217
271 /* get old coefficients */ 218 /* get old coefficients */
272 val = nvkm_rd32(device, GPCPLL_COEFF); 219 gk20a_pllg_read_mnp(clk, &pll);
273 /* do nothing if NDIV is the same */ 220 /* do nothing if NDIV is the same */
274 if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) 221 if (n == pll.n)
275 return 0; 222 return 0;
276 223
277 /* setup */
278 nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
279 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT);
280 nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
281 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT);
282
283 /* pll slowdown mode */ 224 /* pll slowdown mode */
284 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 225 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
285 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), 226 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
286 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); 227 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
287 228
288 /* new ndiv ready for ramp */ 229 /* new ndiv ready for ramp */
289 val = nvkm_rd32(device, GPCPLL_COEFF); 230 pll.n = n;
290 val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT);
291 val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT;
292 udelay(1); 231 udelay(1);
293 nvkm_wr32(device, GPCPLL_COEFF, val); 232 gk20a_pllg_write_mnp(clk, &pll);
294 233
295 /* dynamic ramp to new ndiv */ 234 /* dynamic ramp to new ndiv */
296 val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
297 val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT;
298 udelay(1); 235 udelay(1);
299 nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val); 236 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
237 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
238 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
300 239
301 for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { 240 /* wait for ramping to complete */
302 udelay(1); 241 if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
303 val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); 242 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
304 if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) 243 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
305 break; 244 ret = -ETIMEDOUT;
306 }
307 245
308 /* exit slowdown mode */ 246 /* exit slowdown mode */
309 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, 247 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
@@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n)
311 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); 249 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
312 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); 250 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
313 251
314 if (ramp_timeout <= 0) { 252 return ret;
315 nvkm_error(subdev, "gpcpll dynamic ramp timeout\n");
316 return -ETIMEDOUT;
317 }
318
319 return 0;
320} 253}
321 254
322static void 255static int
323gk20a_pllg_enable(struct gk20a_clk *clk) 256gk20a_pllg_enable(struct gk20a_clk *clk)
324{ 257{
325 struct nvkm_device *device = clk->base.subdev.device; 258 struct nvkm_device *device = clk->base.subdev.device;
259 u32 val;
326 260
327 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); 261 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
328 nvkm_rd32(device, GPCPLL_CFG); 262 nvkm_rd32(device, GPCPLL_CFG);
263
264 /* enable lock detection */
265 val = nvkm_rd32(device, GPCPLL_CFG);
266 if (val & GPCPLL_CFG_LOCK_DET_OFF) {
267 val &= ~GPCPLL_CFG_LOCK_DET_OFF;
268 nvkm_wr32(device, GPCPLL_CFG, val);
269 }
270
271 /* wait for lock */
272 if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK,
273 GPCPLL_CFG_LOCK) < 0)
274 return -ETIMEDOUT;
275
276 /* switch to VCO mode */
277 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
278 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
279
280 return 0;
329} 281}
330 282
331static void 283static void
@@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk)
333{ 285{
334 struct nvkm_device *device = clk->base.subdev.device; 286 struct nvkm_device *device = clk->base.subdev.device;
335 287
288 /* put PLL in bypass before disabling it */
289 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
290
336 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); 291 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
337 nvkm_rd32(device, GPCPLL_CFG); 292 nvkm_rd32(device, GPCPLL_CFG);
338} 293}
339 294
340static int 295static int
341_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) 296gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll)
342{ 297{
343 struct nvkm_subdev *subdev = &clk->base.subdev; 298 struct nvkm_subdev *subdev = &clk->base.subdev;
344 struct nvkm_device *device = subdev->device; 299 struct nvkm_device *device = subdev->device;
345 u32 val, cfg; 300 struct gk20a_pll cur_pll;
346 struct gk20a_pll old_pll; 301 int ret;
347 u32 n_lo;
348
349 /* get old coefficients */
350 gk20a_pllg_read_mnp(clk, &old_pll);
351
352 /* do NDIV slide if there is no change in M and PL */
353 cfg = nvkm_rd32(device, GPCPLL_CFG);
354 if (allow_slide && clk->pll.m == old_pll.m &&
355 clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) {
356 return gk20a_pllg_slide(clk, clk->pll.n);
357 }
358
359 /* slide down to NDIV_LO */
360 if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) {
361 int ret;
362
363 n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco,
364 clk->parent_rate / KHZ);
365 ret = gk20a_pllg_slide(clk, n_lo);
366 302
367 if (ret) 303 gk20a_pllg_read_mnp(clk, &cur_pll);
368 return ret;
369 }
370 304
371 /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ 305 /* split VCO-to-bypass jump in half by setting out divider 1:2 */
372 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, 306 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
373 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); 307 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
374 308 /* Intentional 2nd write to assure linear divider operation */
375 /* put PLL in bypass before programming it */ 309 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
376 val = nvkm_rd32(device, SEL_VCO); 310 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
377 val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); 311 nvkm_rd32(device, GPC2CLK_OUT);
378 udelay(2); 312 udelay(2);
379 nvkm_wr32(device, SEL_VCO, val);
380
381 /* get out from IDDQ */
382 val = nvkm_rd32(device, GPCPLL_CFG);
383 if (val & GPCPLL_CFG_IDDQ) {
384 val &= ~GPCPLL_CFG_IDDQ;
385 nvkm_wr32(device, GPCPLL_CFG, val);
386 nvkm_rd32(device, GPCPLL_CFG);
387 udelay(2);
388 }
389 313
390 gk20a_pllg_disable(clk); 314 gk20a_pllg_disable(clk);
391 315
392 nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__, 316 gk20a_pllg_write_mnp(clk, pll);
393 clk->pll.m, clk->pll.n, clk->pll.pl);
394
395 n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco,
396 clk->parent_rate / KHZ);
397 val = clk->pll.m << GPCPLL_COEFF_M_SHIFT;
398 val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT;
399 val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT;
400 nvkm_wr32(device, GPCPLL_COEFF, val);
401 317
402 gk20a_pllg_enable(clk); 318 ret = gk20a_pllg_enable(clk);
403 319 if (ret)
404 val = nvkm_rd32(device, GPCPLL_CFG); 320 return ret;
405 if (val & GPCPLL_CFG_LOCK_DET_OFF) {
406 val &= ~GPCPLL_CFG_LOCK_DET_OFF;
407 nvkm_wr32(device, GPCPLL_CFG, val);
408 }
409
410 if (nvkm_usec(device, 300,
411 if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK)
412 break;
413 ) < 0)
414 return -ETIMEDOUT;
415
416 /* switch to VCO mode */
417 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
418 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
419 321
420 /* restore out divider 1:1 */ 322 /* restore out divider 1:1 */
421 val = nvkm_rd32(device, GPC2CLK_OUT); 323 udelay(2);
422 if ((val & GPC2CLK_OUT_VCODIV_MASK) != 324 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
423 (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) { 325 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
424 val &= ~GPC2CLK_OUT_VCODIV_MASK; 326 /* Intentional 2nd write to assure linear divider operation */
425 val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT; 327 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
426 udelay(2); 328 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
427 nvkm_wr32(device, GPC2CLK_OUT, val); 329 nvkm_rd32(device, GPC2CLK_OUT);
428 /* Intentional 2nd write to assure linear divider operation */
429 nvkm_wr32(device, GPC2CLK_OUT, val);
430 nvkm_rd32(device, GPC2CLK_OUT);
431 }
432 330
433 /* slide up to new NDIV */ 331 return 0;
434 return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0;
435} 332}
436 333
437static int 334static int
438gk20a_pllg_program_mnp(struct gk20a_clk *clk) 335gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll)
439{ 336{
440 int err; 337 struct gk20a_pll cur_pll;
338 int ret;
441 339
442 err = _gk20a_pllg_program_mnp(clk, true); 340 if (gk20a_pllg_is_enabled(clk)) {
443 if (err) 341 gk20a_pllg_read_mnp(clk, &cur_pll);
444 err = _gk20a_pllg_program_mnp(clk, false); 342
343 /* just do NDIV slide if there is no change to M and PL */
344 if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
345 return gk20a_pllg_slide(clk, pll->n);
346
347 /* slide down to current NDIV_LO */
348 cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
349 ret = gk20a_pllg_slide(clk, cur_pll.n);
350 if (ret)
351 return ret;
352 }
353
354 /* program MNP with the new clock parameters and new NDIV_LO */
355 cur_pll = *pll;
356 cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll);
357 ret = gk20a_pllg_program_mnp(clk, &cur_pll);
358 if (ret)
359 return ret;
445 360
446 return err; 361 /* slide up to new NDIV */
362 return gk20a_pllg_slide(clk, pll->n);
447} 363}
448 364
449static struct nvkm_pstate 365static struct nvkm_pstate
@@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
546 struct gk20a_clk *clk = gk20a_clk(base); 462 struct gk20a_clk *clk = gk20a_clk(base);
547 struct nvkm_subdev *subdev = &clk->base.subdev; 463 struct nvkm_subdev *subdev = &clk->base.subdev;
548 struct nvkm_device *device = subdev->device; 464 struct nvkm_device *device = subdev->device;
465 struct gk20a_pll pll;
549 466
550 switch (src) { 467 switch (src) {
551 case nv_clk_src_crystal: 468 case nv_clk_src_crystal:
552 return device->crystal; 469 return device->crystal;
553 case nv_clk_src_gpc: 470 case nv_clk_src_gpc:
554 gk20a_pllg_read_mnp(clk, &clk->pll); 471 gk20a_pllg_read_mnp(clk, &pll);
555 return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV; 472 return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV;
556 default: 473 default:
557 nvkm_error(subdev, "invalid clock source %d\n", src); 474 nvkm_error(subdev, "invalid clock source %d\n", src);
558 return -EINVAL; 475 return -EINVAL;
@@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
565 struct gk20a_clk *clk = gk20a_clk(base); 482 struct gk20a_clk *clk = gk20a_clk(base);
566 483
567 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * 484 return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] *
568 GK20A_CLK_GPC_MDIV); 485 GK20A_CLK_GPC_MDIV, &clk->pll);
569} 486}
570 487
571int 488int
572gk20a_clk_prog(struct nvkm_clk *base) 489gk20a_clk_prog(struct nvkm_clk *base)
573{ 490{
574 struct gk20a_clk *clk = gk20a_clk(base); 491 struct gk20a_clk *clk = gk20a_clk(base);
492 int ret;
493
494 ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll);
495 if (ret)
496 ret = gk20a_pllg_program_mnp(clk, &clk->pll);
575 497
576 return gk20a_pllg_program_mnp(clk); 498 return ret;
577} 499}
578 500
579void 501void
@@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base)
581{ 503{
582} 504}
583 505
506int
507gk20a_clk_setup_slide(struct gk20a_clk *clk)
508{
509 struct nvkm_subdev *subdev = &clk->base.subdev;
510 struct nvkm_device *device = subdev->device;
511 u32 step_a, step_b;
512
513 switch (clk->parent_rate) {
514 case 12000000:
515 case 12800000:
516 case 13000000:
517 step_a = 0x2b;
518 step_b = 0x0b;
519 break;
520 case 19200000:
521 step_a = 0x12;
522 step_b = 0x08;
523 break;
524 case 38400000:
525 step_a = 0x04;
526 step_b = 0x05;
527 break;
528 default:
529 nvkm_error(subdev, "invalid parent clock rate %u KHz",
530 clk->parent_rate / KHZ);
531 return -EINVAL;
532 }
533
534 nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT,
535 step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT);
536 nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT,
537 step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT);
538
539 return 0;
540}
541
584void 542void
585gk20a_clk_fini(struct nvkm_clk *base) 543gk20a_clk_fini(struct nvkm_clk *base)
586{ 544{
587 struct nvkm_device *device = base->subdev.device; 545 struct nvkm_device *device = base->subdev.device;
588 struct gk20a_clk *clk = gk20a_clk(base); 546 struct gk20a_clk *clk = gk20a_clk(base);
589 u32 val;
590 547
591 /* slide to VCO min */ 548 /* slide to VCO min */
592 val = nvkm_rd32(device, GPCPLL_CFG); 549 if (gk20a_pllg_is_enabled(clk)) {
593 if (val & GPCPLL_CFG_ENABLE) {
594 struct gk20a_pll pll; 550 struct gk20a_pll pll;
595 u32 n_lo; 551 u32 n_lo;
596 552
597 gk20a_pllg_read_mnp(clk, &pll); 553 gk20a_pllg_read_mnp(clk, &pll);
598 n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco, 554 n_lo = gk20a_pllg_n_lo(clk, &pll);
599 clk->parent_rate / KHZ);
600 gk20a_pllg_slide(clk, n_lo); 555 gk20a_pllg_slide(clk, n_lo);
601 } 556 }
602 557
603 /* put PLL in bypass before disabling it */
604 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
605
606 gk20a_pllg_disable(clk); 558 gk20a_pllg_disable(clk);
559
560 /* set IDDQ */
561 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
607} 562}
608 563
609static int 564static int
@@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base)
614 struct nvkm_device *device = subdev->device; 569 struct nvkm_device *device = subdev->device;
615 int ret; 570 int ret;
616 571
572 /* get out from IDDQ */
573 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
574 nvkm_rd32(device, GPCPLL_CFG);
575 udelay(5);
576
617 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, 577 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
618 GPC2CLK_OUT_INIT_VAL); 578 GPC2CLK_OUT_INIT_VAL);
619 579
580 ret = gk20a_clk_setup_slide(clk);
581 if (ret)
582 return ret;
583
620 /* Start with lowest frequency */ 584 /* Start with lowest frequency */
621 base->func->calc(base, &base->func->pstates[0].base); 585 base->func->calc(base, &base->func->pstates[0].base);
622 ret = base->func->prog(&clk->base); 586 ret = base->func->prog(&clk->base);
@@ -646,7 +610,7 @@ gk20a_clk = {
646}; 610};
647 611
648int 612int
649_gk20a_clk_ctor(struct nvkm_device *device, int index, 613gk20a_clk_ctor(struct nvkm_device *device, int index,
650 const struct nvkm_clk_func *func, 614 const struct nvkm_clk_func *func,
651 const struct gk20a_clk_pllg_params *params, 615 const struct gk20a_clk_pllg_params *params,
652 struct gk20a_clk *clk) 616 struct gk20a_clk *clk)
@@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
685 return -ENOMEM; 649 return -ENOMEM;
686 *pclk = &clk->base; 650 *pclk = &clk->base;
687 651
688 ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, 652 ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params,
689 clk); 653 clk);
690 654
691 clk->pl_to_div = pl_to_div; 655 clk->pl_to_div = pl_to_div;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
index 13c46740197d..0d1450972162 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h
@@ -24,9 +24,79 @@
24#ifndef __NVKM_CLK_GK20A_H__ 24#ifndef __NVKM_CLK_GK20A_H__
25#define __NVKM_CLK_GK20A_H__ 25#define __NVKM_CLK_GK20A_H__
26 26
27#define KHZ (1000)
28#define MHZ (KHZ * 1000)
29
30#define MASK(w) ((1 << (w)) - 1)
31
27#define GK20A_CLK_GPC_MDIV 1000 32#define GK20A_CLK_GPC_MDIV 1000
28 33
29#define SYS_GPCPLL_CFG_BASE 0x00137000 34#define SYS_GPCPLL_CFG_BASE 0x00137000
35#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0)
36#define GPCPLL_CFG_ENABLE BIT(0)
37#define GPCPLL_CFG_IDDQ BIT(1)
38#define GPCPLL_CFG_LOCK_DET_OFF BIT(4)
39#define GPCPLL_CFG_LOCK BIT(17)
40
41#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc)
42#define GPCPLL_CFG2_SETUP2_SHIFT 16
43#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
44
45#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18)
46#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0
47#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9
48#define GPCPLL_CFG3_VCO_CTRL_MASK \
49 (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT)
50#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16
51#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8
52
53#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4)
54#define GPCPLL_COEFF_M_SHIFT 0
55#define GPCPLL_COEFF_M_WIDTH 8
56#define GPCPLL_COEFF_N_SHIFT 8
57#define GPCPLL_COEFF_N_WIDTH 8
58#define GPCPLL_COEFF_N_MASK \
59 (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT)
60#define GPCPLL_COEFF_P_SHIFT 16
61#define GPCPLL_COEFF_P_WIDTH 6
62
63#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c)
64#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0
65#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8
66#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16
67#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22
68#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31
69
70#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800
71#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0)
72#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24
73#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \
74 (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT)
75
76#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100)
77#define SEL_VCO_GPC2CLK_OUT_SHIFT 0
78
79#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250)
80#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1
81#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31
82#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1
83#define GPC2CLK_OUT_VCODIV_WIDTH 6
84#define GPC2CLK_OUT_VCODIV_SHIFT 8
85#define GPC2CLK_OUT_VCODIV1 0
86#define GPC2CLK_OUT_VCODIV2 2
87#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \
88 GPC2CLK_OUT_VCODIV_SHIFT)
89#define GPC2CLK_OUT_BYPDIV_WIDTH 6
90#define GPC2CLK_OUT_BYPDIV_SHIFT 0
91#define GPC2CLK_OUT_BYPDIV31 0x3c
92#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \
93 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\
94 | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\
95 | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT))
96#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \
97 GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \
98 | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \
99 | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT))
30 100
31/* All frequencies in Khz */ 101/* All frequencies in Khz */
32struct gk20a_clk_pllg_params { 102struct gk20a_clk_pllg_params {
@@ -54,7 +124,29 @@ struct gk20a_clk {
54}; 124};
55#define gk20a_clk(p) container_of((p), struct gk20a_clk, base) 125#define gk20a_clk(p) container_of((p), struct gk20a_clk, base)
56 126
57int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *, 127u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *);
128int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *);
129void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *);
130void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *);
131
132static inline bool
133gk20a_pllg_is_enabled(struct gk20a_clk *clk)
134{
135 struct nvkm_device *device = clk->base.subdev.device;
136 u32 val;
137
138 val = nvkm_rd32(device, GPCPLL_CFG);
139 return val & GPCPLL_CFG_ENABLE;
140}
141
142static inline u32
143gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll)
144{
145 return DIV_ROUND_UP(pll->m * clk->params->min_vco,
146 clk->parent_rate / KHZ);
147}
148
149int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *,
58 const struct gk20a_clk_pllg_params *, struct gk20a_clk *); 150 const struct gk20a_clk_pllg_params *, struct gk20a_clk *);
59void gk20a_clk_fini(struct nvkm_clk *); 151void gk20a_clk_fini(struct nvkm_clk *);
60int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src); 152int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src);
@@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *);
62int gk20a_clk_prog(struct nvkm_clk *); 154int gk20a_clk_prog(struct nvkm_clk *);
63void gk20a_clk_tidy(struct nvkm_clk *); 155void gk20a_clk_tidy(struct nvkm_clk *);
64 156
157int gk20a_clk_setup_slide(struct gk20a_clk *);
158
65#endif 159#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
index 71b2bbb61973..b284e949f732 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c
@@ -21,20 +21,123 @@
21 */ 21 */
22 22
23#include <subdev/clk.h> 23#include <subdev/clk.h>
24#include <subdev/volt.h>
25#include <subdev/timer.h>
24#include <core/device.h> 26#include <core/device.h>
27#include <core/tegra.h>
25 28
26#include "priv.h" 29#include "priv.h"
27#include "gk20a.h" 30#include "gk20a.h"
28 31
29#define KHZ (1000) 32#define GPCPLL_CFG_SYNC_MODE BIT(2)
30#define MHZ (KHZ * 1000)
31
32#define MASK(w) ((1 << w) - 1)
33 33
34#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340) 34#define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340)
35#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0 35#define BYPASSCTRL_SYS_GPCPLL_SHIFT 0
36#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1 36#define BYPASSCTRL_SYS_GPCPLL_WIDTH 1
37 37
38#define GPCPLL_CFG2_SDM_DIN_SHIFT 0
39#define GPCPLL_CFG2_SDM_DIN_WIDTH 8
40#define GPCPLL_CFG2_SDM_DIN_MASK \
41 (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
42#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8
43#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15
44#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \
45 (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
46#define GPCPLL_CFG2_SETUP2_SHIFT 16
47#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24
48
49#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10)
50#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0
51#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7
52#define GPCPLL_DVFS0_DFS_COEFF_MASK \
53 (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
54#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8
55#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7
56#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \
57 (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
58
59#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14)
60#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0
61#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7
62#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7
63#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1
64#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8
65#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7
66#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15
67#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1
68#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16
69#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12
70#define GPCPLL_DVFS1_EN_SDM_SHIFT 28
71#define GPCPLL_DVFS1_EN_SDM_WIDTH 1
72#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28)
73#define GPCPLL_DVFS1_EN_DFS_SHIFT 29
74#define GPCPLL_DVFS1_EN_DFS_WIDTH 1
75#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29)
76#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30
77#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1
78#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30)
79#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31
80#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1
81#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31)
82
83#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
84#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16)
85
86#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24
87#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7
88
89#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */
90#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */
91
92struct gm20b_clk_dvfs_params {
93 s32 coeff_slope;
94 s32 coeff_offs;
95 u32 vco_ctrl;
96};
97
98static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
99 .coeff_slope = -165230,
100 .coeff_offs = 214007,
101 .vco_ctrl = 0x7 << 3,
102};
103
104/*
105 * base.n is now the *integer* part of the N factor.
106 * sdm_din contains n's decimal part.
107 */
108struct gm20b_pll {
109 struct gk20a_pll base;
110 u32 sdm_din;
111};
112
113struct gm20b_clk_dvfs {
114 u32 dfs_coeff;
115 s32 dfs_det_max;
116 s32 dfs_ext_cal;
117};
118
119struct gm20b_clk {
120 /* currently applied parameters */
121 struct gk20a_clk base;
122 struct gm20b_clk_dvfs dvfs;
123 u32 uv;
124
125 /* new parameters to apply */
126 struct gk20a_pll new_pll;
127 struct gm20b_clk_dvfs new_dvfs;
128 u32 new_uv;
129
130 const struct gm20b_clk_dvfs_params *dvfs_params;
131
132 /* fused parameters */
133 s32 uvdet_slope;
134 s32 uvdet_offs;
135
136 /* safe frequency we can use at minimum voltage */
137 u32 safe_fmax_vmin;
138};
139#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
140
38static u32 pl_to_div(u32 pl) 141static u32 pl_to_div(u32 pl)
39{ 142{
40 return pl; 143 return pl;
@@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
53 .min_pl = 1, .max_pl = 31, 156 .min_pl = 1, .max_pl = 31,
54}; 157};
55 158
159static void
160gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
161{
162 struct nvkm_subdev *subdev = &clk->base.base.subdev;
163 struct nvkm_device *device = subdev->device;
164 u32 val;
165
166 gk20a_pllg_read_mnp(&clk->base, &pll->base);
167 val = nvkm_rd32(device, GPCPLL_CFG2);
168 pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
169 MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
170}
171
172static void
173gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
174{
175 struct nvkm_device *device = clk->base.base.subdev.device;
176
177 nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
178 pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
179 gk20a_pllg_write_mnp(&clk->base, &pll->base);
180}
181
182/*
183 * Determine DFS_COEFF for the requested voltage. Always select external
184 * calibration override equal to the voltage, and set maximum detection
185 * limit "0" (to make sure that PLL output remains under F/V curve when
186 * voltage increases).
187 */
188static void
189gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
190 struct gm20b_clk_dvfs *dvfs)
191{
192 struct nvkm_subdev *subdev = &clk->base.base.subdev;
193 const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
194 u32 coeff;
195 /* Work with mv as uv would likely trigger an overflow */
196 s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
197
198 /* coeff = slope * voltage + offset */
199 coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
200 coeff = DIV_ROUND_CLOSEST(coeff, 1000);
201 dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
202
203 dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
204 clk->uvdet_slope);
205 /* should never happen */
206 if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
207 nvkm_error(subdev, "dfs_ext_cal overflow!\n");
208
209 dvfs->dfs_det_max = 0;
210
211 nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
212 __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
213 dvfs->dfs_det_max);
214}
215
216/*
217 * Solve equation for integer and fractional part of the effective NDIV:
218 *
219 * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
220 * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
221 *
222 * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
223 */
224static void
225gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
226{
227 struct nvkm_subdev *subdev = &clk->base.base.subdev;
228 const struct gk20a_clk_pllg_params *p = clk->base.params;
229 u32 n;
230 s32 det_delta;
231 u32 rem, rem_range;
232
233 /* calculate current ext_cal and subtract previous one */
234 det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
235 clk->uvdet_slope);
236 det_delta -= clk->dvfs.dfs_ext_cal;
237 det_delta = min(det_delta, clk->dvfs.dfs_det_max);
238 det_delta *= clk->dvfs.dfs_coeff;
239
240 /* integer part of n */
241 n = (n_eff << DFS_DET_RANGE) - det_delta;
242 /* should never happen! */
243 if (n <= 0) {
244 nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
245 n = 1 << DFS_DET_RANGE;
246 }
247 if (n >> DFS_DET_RANGE > p->max_n) {
248 nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
249 n = p->max_n << DFS_DET_RANGE;
250 }
251 *n_int = n >> DFS_DET_RANGE;
252
253 /* fractional part of n */
254 rem = ((u32)n) & MASK(DFS_DET_RANGE);
255 rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
256 /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
257 rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
258 /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
259 *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
260
261 nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
262 n_eff, *n_int, *sdm_din);
263}
264
265static int
266gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
267{
268 struct nvkm_subdev *subdev = &clk->base.base.subdev;
269 struct nvkm_device *device = subdev->device;
270 struct gm20b_pll pll;
271 u32 n_int, sdm_din;
272 int ret = 0;
273
274 /* calculate the new n_int/sdm_din for this n/uv */
275 gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
276
277 /* get old coefficients */
278 gm20b_pllg_read_mnp(clk, &pll);
279 /* do nothing if NDIV is the same */
280 if (n_int == pll.base.n && sdm_din == pll.sdm_din)
281 return 0;
282
283 /* pll slowdown mode */
284 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
285 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
286 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
287
288 /* new ndiv ready for ramp */
289 /* in DVFS mode SDM is updated via "new" field */
290 nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
291 sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
292 pll.base.n = n_int;
293 udelay(1);
294 gk20a_pllg_write_mnp(&clk->base, &pll.base);
295
296 /* dynamic ramp to new ndiv */
297 udelay(1);
298 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
299 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
300 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
301
302 /* wait for ramping to complete */
303 if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
304 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
305 GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
306 ret = -ETIMEDOUT;
307
308 /* in DVFS mode complete SDM update */
309 nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
310 sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
311
312 /* exit slowdown mode */
313 nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
314 BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
315 BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
316 nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
317
318 return ret;
319}
320
321static int
322gm20b_pllg_enable(struct gm20b_clk *clk)
323{
324 struct nvkm_device *device = clk->base.base.subdev.device;
325
326 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
327 nvkm_rd32(device, GPCPLL_CFG);
328
329 /* In DVFS mode lock cannot be used - so just delay */
330 udelay(40);
331
332 /* set SYNC_MODE for glitchless switch out of bypass */
333 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
334 GPCPLL_CFG_SYNC_MODE);
335 nvkm_rd32(device, GPCPLL_CFG);
336
337 /* switch to VCO mode */
338 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
339 BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
340
341 return 0;
342}
343
344static void
345gm20b_pllg_disable(struct gm20b_clk *clk)
346{
347 struct nvkm_device *device = clk->base.base.subdev.device;
348
349 /* put PLL in bypass before disabling it */
350 nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
351
352 /* clear SYNC_MODE before disabling PLL */
353 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
354
355 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
356 nvkm_rd32(device, GPCPLL_CFG);
357}
358
359static int
360gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
361{
362 struct nvkm_subdev *subdev = &clk->base.base.subdev;
363 struct nvkm_device *device = subdev->device;
364 struct gm20b_pll cur_pll;
365 u32 n_int, sdm_din;
366 /* if we only change pdiv, we can do a glitchless transition */
367 bool pdiv_only;
368 int ret;
369
370 gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
371 gm20b_pllg_read_mnp(clk, &cur_pll);
372 pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
373 cur_pll.base.m == pll->m;
374
375 /* need full sequence if clock not enabled yet */
376 if (!gk20a_pllg_is_enabled(&clk->base))
377 pdiv_only = false;
378
379 /* split VCO-to-bypass jump in half by setting out divider 1:2 */
380 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
381 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
382 /* Intentional 2nd write to assure linear divider operation */
383 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
384 GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
385 nvkm_rd32(device, GPC2CLK_OUT);
386 udelay(2);
387
388 if (pdiv_only) {
389 u32 old = cur_pll.base.pl;
390 u32 new = pll->pl;
391
392 /*
393 * we can do a glitchless transition only if the old and new PL
394 * parameters share at least one bit set to 1. If this is not
395 * the case, calculate and program an interim PL that will allow
396 * us to respect that rule.
397 */
398 if ((old & new) == 0) {
399 cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
400 new | BIT(ffs(old) - 1));
401 gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
402 }
403
404 cur_pll.base.pl = new;
405 gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
406 } else {
407 /* disable before programming if more than pdiv changes */
408 gm20b_pllg_disable(clk);
409
410 cur_pll.base = *pll;
411 cur_pll.base.n = n_int;
412 cur_pll.sdm_din = sdm_din;
413 gm20b_pllg_write_mnp(clk, &cur_pll);
414
415 ret = gm20b_pllg_enable(clk);
416 if (ret)
417 return ret;
418 }
419
420 /* restore out divider 1:1 */
421 udelay(2);
422 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
423 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
424 /* Intentional 2nd write to assure linear divider operation */
425 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
426 GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
427 nvkm_rd32(device, GPC2CLK_OUT);
428
429 return 0;
430}
431
432static int
433gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
434{
435 struct gk20a_pll cur_pll;
436 int ret;
437
438 if (gk20a_pllg_is_enabled(&clk->base)) {
439 gk20a_pllg_read_mnp(&clk->base, &cur_pll);
440
441 /* just do NDIV slide if there is no change to M and PL */
442 if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
443 return gm20b_pllg_slide(clk, pll->n);
444
445 /* slide down to current NDIV_LO */
446 cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
447 ret = gm20b_pllg_slide(clk, cur_pll.n);
448 if (ret)
449 return ret;
450 }
451
452 /* program MNP with the new clock parameters and new NDIV_LO */
453 cur_pll = *pll;
454 cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
455 ret = gm20b_pllg_program_mnp(clk, &cur_pll);
456 if (ret)
457 return ret;
458
459 /* slide up to new NDIV */
460 return gm20b_pllg_slide(clk, pll->n);
461}
462
463static int
464gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
465{
466 struct gm20b_clk *clk = gm20b_clk(base);
467 struct nvkm_subdev *subdev = &base->subdev;
468 struct nvkm_volt *volt = base->subdev.device->volt;
469 int ret;
470
471 ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
472 GK20A_CLK_GPC_MDIV, &clk->new_pll);
473 if (ret)
474 return ret;
475
476 clk->new_uv = volt->vid[cstate->voltage].uv;
477 gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
478
479 nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
480
481 return 0;
482}
483
484/*
485 * Compute PLL parameters that are always safe for the current voltage
486 */
487static void
488gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
489{
490 u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
491 u32 parent_rate = clk->base.parent_rate / KHZ;
492 u32 nmin, nsafe;
493
494 /* remove a safe margin of 10% */
495 if (rate > clk->safe_fmax_vmin)
496 rate = rate * (100 - 10) / 100;
497
498 /* gpc2clk */
499 rate *= 2;
500
501 nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
502 nsafe = pll->m * rate / (clk->base.parent_rate);
503
504 if (nsafe < nmin) {
505 pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
506 nsafe = nmin;
507 }
508
509 pll->n = nsafe;
510}
511
512static void
513gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
514{
515 struct nvkm_device *device = clk->base.base.subdev.device;
516
517 /* strobe to read external DFS coefficient */
518 nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
519 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
520 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
521
522 nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
523 coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
524
525 udelay(1);
526 nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
527 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
528}
529
530static void
531gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
532{
533 struct nvkm_device *device = clk->base.base.subdev.device;
534 u32 val;
535
536 nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
537 dfs_det_cal);
538 udelay(1);
539
540 val = nvkm_rd32(device, GPCPLL_DVFS1);
541 if (!(val & BIT(25))) {
542 /* Use external value to overwrite calibration value */
543 val |= BIT(25) | BIT(16);
544 nvkm_wr32(device, GPCPLL_DVFS1, val);
545 }
546}
547
548static void
549gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
550 struct gm20b_clk_dvfs *dvfs)
551{
552 struct nvkm_device *device = clk->base.base.subdev.device;
553
554 /* strobe to read external DFS coefficient */
555 nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
556 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
557 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
558
559 nvkm_mask(device, GPCPLL_DVFS0,
560 GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
561 dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
562 dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
563
564 udelay(1);
565 nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
566 GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
567
568 gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
569}
570
571static int
572gm20b_clk_prog(struct nvkm_clk *base)
573{
574 struct gm20b_clk *clk = gm20b_clk(base);
575 u32 cur_freq;
576 int ret;
577
578 /* No change in DVFS settings? */
579 if (clk->uv == clk->new_uv)
580 goto prog;
581
582 /*
583 * Interim step for changing DVFS detection settings: low enough
584 * frequency to be safe at at DVFS coeff = 0.
585 *
586 * 1. If voltage is increasing:
587 * - safe frequency target matches the lowest - old - frequency
588 * - DVFS settings are still old
589 * - Voltage already increased to new level by volt, but maximum
590 * detection limit assures PLL output remains under F/V curve
591 *
592 * 2. If voltage is decreasing:
593 * - safe frequency target matches the lowest - new - frequency
594 * - DVFS settings are still old
595 * - Voltage is also old, it will be lowered by volt afterwards
596 *
597 * Interim step can be skipped if old frequency is below safe minimum,
598 * i.e., it is low enough to be safe at any voltage in operating range
599 * with zero DVFS coefficient.
600 */
601 cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
602 if (cur_freq > clk->safe_fmax_vmin) {
603 struct gk20a_pll pll_safe;
604
605 if (clk->uv < clk->new_uv)
606 /* voltage will raise: safe frequency is current one */
607 pll_safe = clk->base.pll;
608 else
609 /* voltage will drop: safe frequency is new one */
610 pll_safe = clk->new_pll;
611
612 gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
613 ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
614 if (ret)
615 return ret;
616 }
617
618 /*
619 * DVFS detection settings transition:
620 * - Set DVFS coefficient zero
621 * - Set calibration level to new voltage
622 * - Set DVFS coefficient to match new voltage
623 */
624 gm20b_dvfs_program_coeff(clk, 0);
625 gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
626 gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
627 gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
628
629prog:
630 clk->uv = clk->new_uv;
631 clk->dvfs = clk->new_dvfs;
632 clk->base.pll = clk->new_pll;
633
634 return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
635}
636
56static struct nvkm_pstate 637static struct nvkm_pstate
57gm20b_pstates[] = { 638gm20b_pstates[] = {
58 { 639 {
@@ -133,9 +714,99 @@ gm20b_pstates[] = {
133 .voltage = 12, 714 .voltage = 12,
134 }, 715 },
135 }, 716 },
136
137}; 717};
138 718
719static void
720gm20b_clk_fini(struct nvkm_clk *base)
721{
722 struct nvkm_device *device = base->subdev.device;
723 struct gm20b_clk *clk = gm20b_clk(base);
724
725 /* slide to VCO min */
726 if (gk20a_pllg_is_enabled(&clk->base)) {
727 struct gk20a_pll pll;
728 u32 n_lo;
729
730 gk20a_pllg_read_mnp(&clk->base, &pll);
731 n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
732 gm20b_pllg_slide(clk, n_lo);
733 }
734
735 gm20b_pllg_disable(clk);
736
737 /* set IDDQ */
738 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
739}
740
741static int
742gm20b_clk_init_dvfs(struct gm20b_clk *clk)
743{
744 struct nvkm_subdev *subdev = &clk->base.base.subdev;
745 struct nvkm_device *device = subdev->device;
746 bool fused = clk->uvdet_offs && clk->uvdet_slope;
747 static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
748 u32 data;
749 int ret;
750
751 /* Enable NA DVFS */
752 nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
753 GPCPLL_DVFS1_EN_DFS_BIT);
754
755 /* Set VCO_CTRL */
756 if (clk->dvfs_params->vco_ctrl)
757 nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
758 clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
759
760 if (fused) {
761 /* Start internal calibration, but ignore results */
762 nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
763 GPCPLL_DVFS1_EN_DFS_CAL_BIT);
764
765 /* got uvdev parameters from fuse, skip calibration */
766 goto calibrated;
767 }
768
769 /*
770 * If calibration parameters are not fused, start internal calibration,
771 * wait for completion, and use results along with default slope to
772 * calculate ADC offset during boot.
773 */
774 nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
775 GPCPLL_DVFS1_EN_DFS_CAL_BIT);
776
777 /* Wait for internal calibration done (spec < 2us). */
778 ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
779 GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
780 GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
781 if (ret < 0) {
782 nvkm_error(subdev, "GPCPLL calibration timeout\n");
783 return -ETIMEDOUT;
784 }
785
786 data = nvkm_rd32(device, GPCPLL_CFG3) >>
787 GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
788 data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
789
790 clk->uvdet_slope = ADC_SLOPE_UV;
791 clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
792
793 nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
794 clk->uvdet_offs, clk->uvdet_slope);
795
796calibrated:
797 /* Compute and apply initial DVFS parameters */
798 gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
799 gm20b_dvfs_program_coeff(clk, 0);
800 gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
801 gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
802 gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
803
804 return 0;
805}
806
807/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
808static const struct nvkm_clk_func gm20b_clk;
809
139static int 810static int
140gm20b_clk_init(struct nvkm_clk *base) 811gm20b_clk_init(struct nvkm_clk *base)
141{ 812{
@@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base)
143 struct nvkm_subdev *subdev = &clk->base.subdev; 814 struct nvkm_subdev *subdev = &clk->base.subdev;
144 struct nvkm_device *device = subdev->device; 815 struct nvkm_device *device = subdev->device;
145 int ret; 816 int ret;
817 u32 data;
818
819 /* get out from IDDQ */
820 nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
821 nvkm_rd32(device, GPCPLL_CFG);
822 udelay(5);
823
824 nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
825 GPC2CLK_OUT_INIT_VAL);
146 826
147 /* Set the global bypass control to VCO */ 827 /* Set the global bypass control to VCO */
148 nvkm_mask(device, BYPASSCTRL_SYS, 828 nvkm_mask(device, BYPASSCTRL_SYS,
149 MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT, 829 MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
150 0); 830 0);
151 831
832 ret = gk20a_clk_setup_slide(clk);
833 if (ret)
834 return ret;
835
836 /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
837 data = nvkm_rd32(device, 0x021944);
838 if (!(data & 0x3)) {
839 data |= 0x2;
840 nvkm_wr32(device, 0x021944, data);
841
842 data = nvkm_rd32(device, 0x021948);
843 data |= 0x1;
844 nvkm_wr32(device, 0x021948, data);
845 }
846
847 /* Disable idle slow down */
848 nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
849
850 /* speedo >= 1? */
851 if (clk->base.func == &gm20b_clk) {
852 struct gm20b_clk *_clk = gm20b_clk(base);
853 struct nvkm_volt *volt = device->volt;
854
855 /* Get current voltage */
856 _clk->uv = nvkm_volt_get(volt);
857
858 /* Initialize DVFS */
859 ret = gm20b_clk_init_dvfs(_clk);
860 if (ret)
861 return ret;
862 }
863
152 /* Start with lowest frequency */ 864 /* Start with lowest frequency */
153 base->func->calc(base, &base->func->pstates[0].base); 865 base->func->calc(base, &base->func->pstates[0].base);
154 ret = base->func->prog(&clk->base); 866 ret = base->func->prog(base);
155 if (ret) { 867 if (ret) {
156 nvkm_error(subdev, "cannot initialize clock\n"); 868 nvkm_error(subdev, "cannot initialize clock\n");
157 return ret; 869 return ret;
@@ -169,6 +881,7 @@ gm20b_clk_speedo0 = {
169 .prog = gk20a_clk_prog, 881 .prog = gk20a_clk_prog,
170 .tidy = gk20a_clk_tidy, 882 .tidy = gk20a_clk_tidy,
171 .pstates = gm20b_pstates, 883 .pstates = gm20b_pstates,
884 /* Speedo 0 only supports 12 voltages */
172 .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1, 885 .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
173 .domains = { 886 .domains = {
174 { nv_clk_src_crystal, 0xff }, 887 { nv_clk_src_crystal, 0xff },
@@ -177,8 +890,26 @@ gm20b_clk_speedo0 = {
177 }, 890 },
178}; 891};
179 892
180int 893static const struct nvkm_clk_func
181gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) 894gm20b_clk = {
895 .init = gm20b_clk_init,
896 .fini = gm20b_clk_fini,
897 .read = gk20a_clk_read,
898 .calc = gm20b_clk_calc,
899 .prog = gm20b_clk_prog,
900 .tidy = gk20a_clk_tidy,
901 .pstates = gm20b_pstates,
902 .nr_pstates = ARRAY_SIZE(gm20b_pstates),
903 .domains = {
904 { nv_clk_src_crystal, 0xff },
905 { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
906 { nv_clk_src_max },
907 },
908};
909
910static int
911gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
912 struct nvkm_clk **pclk)
182{ 913{
183 struct gk20a_clk *clk; 914 struct gk20a_clk *clk;
184 int ret; 915 int ret;
@@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
188 return -ENOMEM; 919 return -ENOMEM;
189 *pclk = &clk->base; 920 *pclk = &clk->base;
190 921
191 ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0, 922 ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
192 &gm20b_pllg_params, clk); 923 &gm20b_pllg_params, clk);
193 924
194 clk->pl_to_div = pl_to_div; 925 clk->pl_to_div = pl_to_div;
195 clk->div_to_pl = div_to_pl; 926 clk->div_to_pl = div_to_pl;
196 927
197 return ret; 928 return ret;
198} 929}
930
931/* FUSE register */
932#define FUSE_RESERVED_CALIB0 0x204
933#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0
934#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4
935#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4
936#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10
937#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14
938#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10
939#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24
940#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6
941#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30
942#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2
943
944static int
945gm20b_clk_init_fused_params(struct gm20b_clk *clk)
946{
947 struct nvkm_subdev *subdev = &clk->base.base.subdev;
948 u32 val = 0;
949 u32 rev = 0;
950
951#if IS_ENABLED(CONFIG_ARCH_TEGRA)
952 tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
953 rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
954 MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
955#endif
956
957 /* No fused parameters, we will calibrate later */
958 if (rev == 0)
959 return -EINVAL;
960
961 /* Integer part in mV + fractional part in uV */
962 clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
963 MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
964 ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
965 MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
966
967 /* Integer part in mV + fractional part in 100uV */
968 clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
969 MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
970 ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
971 MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
972
973 nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
974 clk->uvdet_slope, clk->uvdet_offs);
975 return 0;
976}
977
978static int
979gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
980{
981 struct nvkm_subdev *subdev = &clk->base.base.subdev;
982 struct nvkm_volt *volt = subdev->device->volt;
983 struct nvkm_pstate *pstates = clk->base.base.func->pstates;
984 int nr_pstates = clk->base.base.func->nr_pstates;
985 int vmin, id = 0;
986 u32 fmax = 0;
987 int i;
988
989 /* find lowest voltage we can use */
990 vmin = volt->vid[0].uv;
991 for (i = 1; i < volt->vid_nr; i++) {
992 if (volt->vid[i].uv <= vmin) {
993 vmin = volt->vid[i].uv;
994 id = volt->vid[i].vid;
995 }
996 }
997
998 /* find max frequency at this voltage */
999 for (i = 0; i < nr_pstates; i++)
1000 if (pstates[i].base.voltage == id)
1001 fmax = max(fmax,
1002 pstates[i].base.domain[nv_clk_src_gpc]);
1003
1004 if (!fmax) {
1005 nvkm_error(subdev, "failed to evaluate safe fmax\n");
1006 return -EINVAL;
1007 }
1008
1009 /* we are safe at 90% of the max frequency */
1010 clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
1011 nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
1012
1013 return 0;
1014}
1015
1016int
1017gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
1018{
1019 struct nvkm_device_tegra *tdev = device->func->tegra(device);
1020 struct gm20b_clk *clk;
1021 struct nvkm_subdev *subdev;
1022 struct gk20a_clk_pllg_params *clk_params;
1023 int ret;
1024
1025 /* Speedo 0 GPUs cannot use noise-aware PLL */
1026 if (tdev->gpu_speedo_id == 0)
1027 return gm20b_clk_new_speedo0(device, index, pclk);
1028
1029 /* Speedo >= 1, use NAPLL */
1030 clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
1031 if (!clk)
1032 return -ENOMEM;
1033 *pclk = &clk->base.base;
1034 subdev = &clk->base.base.subdev;
1035
1036 /* duplicate the clock parameters since we will patch them below */
1037 clk_params = (void *) (clk + 1);
1038 *clk_params = gm20b_pllg_params;
1039 ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
1040 &clk->base);
1041 if (ret)
1042 return ret;
1043
1044 /*
1045 * NAPLL can only work with max_u, clamp the m range so
1046 * gk20a_pllg_calc_mnp always uses it
1047 */
1048 clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
1049 (clk->base.parent_rate / KHZ));
1050 if (clk_params->max_m == 0) {
1051 nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
1052 kfree(clk);
1053 return gm20b_clk_new_speedo0(device, index, pclk);
1054 }
1055
1056 clk->base.pl_to_div = pl_to_div;
1057 clk->base.div_to_pl = div_to_pl;
1058
1059 clk->dvfs_params = &gm20b_dvfs_params;
1060
1061 ret = gm20b_clk_init_fused_params(clk);
1062 /*
1063 * we will calibrate during init - should never happen on
1064 * prod parts
1065 */
1066 if (ret)
1067 nvkm_warn(subdev, "no fused calibration parameters\n");
1068
1069 ret = gm20b_clk_init_safe_fmax(clk);
1070 if (ret)
1071 return ret;
1072
1073 return 0;
1074}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
index 842d5de96d73..edcc157e6ac8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild
@@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o
24nvkm-y += nvkm/subdev/fb/gk20a.o 24nvkm-y += nvkm/subdev/fb/gk20a.o
25nvkm-y += nvkm/subdev/fb/gm107.o 25nvkm-y += nvkm/subdev/fb/gm107.o
26nvkm-y += nvkm/subdev/fb/gm200.o 26nvkm-y += nvkm/subdev/fb/gm200.o
27nvkm-y += nvkm/subdev/fb/gp100.o
28nvkm-y += nvkm/subdev/fb/gp104.o
27 29
28nvkm-y += nvkm/subdev/fb/ram.o 30nvkm-y += nvkm/subdev/fb/ram.o
29nvkm-y += nvkm/subdev/fb/ramnv04.o 31nvkm-y += nvkm/subdev/fb/ramnv04.o
@@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o
41nvkm-y += nvkm/subdev/fb/ramgf100.o 43nvkm-y += nvkm/subdev/fb/ramgf100.o
42nvkm-y += nvkm/subdev/fb/ramgk104.o 44nvkm-y += nvkm/subdev/fb/ramgk104.o
43nvkm-y += nvkm/subdev/fb/ramgm107.o 45nvkm-y += nvkm/subdev/fb/ramgm107.o
46nvkm-y += nvkm/subdev/fb/ramgp100.o
44nvkm-y += nvkm/subdev/fb/sddr2.o 47nvkm-y += nvkm/subdev/fb/sddr2.o
45nvkm-y += nvkm/subdev/fb/sddr3.o 48nvkm-y += nvkm/subdev/fb/sddr3.o
46nvkm-y += nvkm/subdev/fb/gddr3.o 49nvkm-y += nvkm/subdev/fb/gddr3.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index ce90242b8cce..a7049c041594 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -25,6 +25,7 @@
25#include "ram.h" 25#include "ram.h"
26 26
27#include <core/memory.h> 27#include <core/memory.h>
28#include <core/option.h>
28#include <subdev/bios.h> 29#include <subdev/bios.h>
29#include <subdev/bios/M0203.h> 30#include <subdev/bios/M0203.h>
30#include <engine/gr.h> 31#include <engine/gr.h>
@@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
134 135
135 if (fb->func->init) 136 if (fb->func->init)
136 fb->func->init(fb); 137 fb->func->init(fb);
138 if (fb->func->init_page)
139 fb->func->init_page(fb);
140 if (fb->func->init_unkn)
141 fb->func->init_unkn(fb);
137 return 0; 142 return 0;
138} 143}
139 144
@@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device,
171 nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev); 176 nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev);
172 fb->func = func; 177 fb->func = func;
173 fb->tile.regions = fb->func->tile.regions; 178 fb->tile.regions = fb->func->tile.regions;
179 fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0);
174} 180}
175 181
176int 182int
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
index e649ead5ccfc..76433cc66fff 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c
@@ -72,6 +72,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb)
72} 72}
73 73
74void 74void
75gf100_fb_init_page(struct nvkm_fb *fb)
76{
77 struct nvkm_device *device = fb->subdev.device;
78 switch (fb->page) {
79 case 16:
80 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001);
81 break;
82 case 17:
83 default:
84 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000);
85 fb->page = 17;
86 break;
87 }
88}
89
90void
75gf100_fb_init(struct nvkm_fb *base) 91gf100_fb_init(struct nvkm_fb *base)
76{ 92{
77 struct gf100_fb *fb = gf100_fb(base); 93 struct gf100_fb *fb = gf100_fb(base);
@@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base)
79 95
80 if (fb->r100c10_page) 96 if (fb->r100c10_page)
81 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); 97 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
82
83 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
84} 98}
85 99
86void * 100void *
@@ -125,6 +139,7 @@ gf100_fb = {
125 .dtor = gf100_fb_dtor, 139 .dtor = gf100_fb_dtor,
126 .oneinit = gf100_fb_oneinit, 140 .oneinit = gf100_fb_oneinit,
127 .init = gf100_fb_init, 141 .init = gf100_fb_init,
142 .init_page = gf100_fb_init_page,
128 .intr = gf100_fb_intr, 143 .intr = gf100_fb_intr,
129 .ram_new = gf100_ram_new, 144 .ram_new = gf100_ram_new,
130 .memtype_valid = gf100_fb_memtype_valid, 145 .memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
index 2160e5a39c9a..449f431644b3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h
@@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *,
14void *gf100_fb_dtor(struct nvkm_fb *); 14void *gf100_fb_dtor(struct nvkm_fb *);
15void gf100_fb_init(struct nvkm_fb *); 15void gf100_fb_init(struct nvkm_fb *);
16void gf100_fb_intr(struct nvkm_fb *); 16void gf100_fb_intr(struct nvkm_fb *);
17
18void gp100_fb_init(struct nvkm_fb *);
17#endif 19#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
index b41f0f70038c..4245e2e6e604 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c
@@ -29,6 +29,7 @@ gk104_fb = {
29 .dtor = gf100_fb_dtor, 29 .dtor = gf100_fb_dtor,
30 .oneinit = gf100_fb_oneinit, 30 .oneinit = gf100_fb_oneinit,
31 .init = gf100_fb_init, 31 .init = gf100_fb_init,
32 .init_page = gf100_fb_init_page,
32 .intr = gf100_fb_intr, 33 .intr = gf100_fb_intr,
33 .ram_new = gk104_ram_new, 34 .ram_new = gk104_ram_new,
34 .memtype_valid = gf100_fb_memtype_valid, 35 .memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
index 7306f7dfc3b9..f815fe2bbf08 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c
@@ -27,7 +27,6 @@ static void
27gk20a_fb_init(struct nvkm_fb *fb) 27gk20a_fb_init(struct nvkm_fb *fb)
28{ 28{
29 struct nvkm_device *device = fb->subdev.device; 29 struct nvkm_device *device = fb->subdev.device;
30 nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
31 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8); 30 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8);
32 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8); 31 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8);
33} 32}
@@ -36,6 +35,7 @@ static const struct nvkm_fb_func
36gk20a_fb = { 35gk20a_fb = {
37 .oneinit = gf100_fb_oneinit, 36 .oneinit = gf100_fb_oneinit,
38 .init = gk20a_fb_init, 37 .init = gk20a_fb_init,
38 .init_page = gf100_fb_init_page,
39 .memtype_valid = gf100_fb_memtype_valid, 39 .memtype_valid = gf100_fb_memtype_valid,
40}; 40};
41 41
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
index 4869fdb753c9..db699025f546 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c
@@ -29,6 +29,7 @@ gm107_fb = {
29 .dtor = gf100_fb_dtor, 29 .dtor = gf100_fb_dtor,
30 .oneinit = gf100_fb_oneinit, 30 .oneinit = gf100_fb_oneinit,
31 .init = gf100_fb_init, 31 .init = gf100_fb_init,
32 .init_page = gf100_fb_init_page,
32 .intr = gf100_fb_intr, 33 .intr = gf100_fb_intr,
33 .ram_new = gm107_ram_new, 34 .ram_new = gm107_ram_new,
34 .memtype_valid = gf100_fb_memtype_valid, 35 .memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
index 44f5716f64d8..62f653240be3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c
@@ -26,6 +26,24 @@
26 26
27#include <core/memory.h> 27#include <core/memory.h>
28 28
29void
30gm200_fb_init_page(struct nvkm_fb *fb)
31{
32 struct nvkm_device *device = fb->subdev.device;
33 switch (fb->page) {
34 case 16:
35 nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001);
36 break;
37 case 17:
38 nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000);
39 break;
40 default:
41 nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800);
42 fb->page = 0;
43 break;
44 }
45}
46
29static void 47static void
30gm200_fb_init(struct nvkm_fb *base) 48gm200_fb_init(struct nvkm_fb *base)
31{ 49{
@@ -48,6 +66,7 @@ gm200_fb = {
48 .dtor = gf100_fb_dtor, 66 .dtor = gf100_fb_dtor,
49 .oneinit = gf100_fb_oneinit, 67 .oneinit = gf100_fb_oneinit,
50 .init = gm200_fb_init, 68 .init = gm200_fb_init,
69 .init_page = gm200_fb_init_page,
51 .intr = gf100_fb_intr, 70 .intr = gf100_fb_intr,
52 .ram_new = gm107_ram_new, 71 .ram_new = gm107_ram_new,
53 .memtype_valid = gf100_fb_memtype_valid, 72 .memtype_valid = gf100_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
new file mode 100644
index 000000000000..98474aec1921
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "gf100.h"
25#include "ram.h"
26
27#include <core/memory.h>
28
29static void
30gp100_fb_init_unkn(struct nvkm_fb *base)
31{
32 struct nvkm_device *device = gf100_fb(base)->base.subdev.device;
33 nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80));
34 nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4));
35 nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8));
36 nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc));
37}
38
39void
40gp100_fb_init(struct nvkm_fb *base)
41{
42 struct gf100_fb *fb = gf100_fb(base);
43 struct nvkm_device *device = fb->base.subdev.device;
44
45 if (fb->r100c10_page)
46 nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8);
47
48 nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8);
49 nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8);
50 nvkm_mask(device, 0x100cc4, 0x00060000,
51 max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17);
52}
53
54static const struct nvkm_fb_func
55gp100_fb = {
56 .dtor = gf100_fb_dtor,
57 .oneinit = gf100_fb_oneinit,
58 .init = gp100_fb_init,
59 .init_page = gm200_fb_init_page,
60 .init_unkn = gp100_fb_init_unkn,
61 .ram_new = gp100_ram_new,
62 .memtype_valid = gf100_fb_memtype_valid,
63};
64
65int
66gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
67{
68 return gf100_fb_new_(&gp100_fb, device, index, pfb);
69}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
new file mode 100644
index 000000000000..92cb71861bec
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "gf100.h"
25#include "ram.h"
26
27#include <core/memory.h>
28
29static const struct nvkm_fb_func
30gp104_fb = {
31 .dtor = gf100_fb_dtor,
32 .oneinit = gf100_fb_oneinit,
33 .init = gp100_fb_init,
34 .init_page = gm200_fb_init_page,
35 .ram_new = gp100_ram_new,
36 .memtype_valid = gf100_fb_memtype_valid,
37};
38
39int
40gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb)
41{
42 return gf100_fb_new_(&gp104_fb, device, index, pfb);
43}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index d97d640e60a0..e905d44fa1d5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -8,6 +8,8 @@ struct nvkm_fb_func {
8 void *(*dtor)(struct nvkm_fb *); 8 void *(*dtor)(struct nvkm_fb *);
9 int (*oneinit)(struct nvkm_fb *); 9 int (*oneinit)(struct nvkm_fb *);
10 void (*init)(struct nvkm_fb *); 10 void (*init)(struct nvkm_fb *);
11 void (*init_page)(struct nvkm_fb *);
12 void (*init_unkn)(struct nvkm_fb *);
11 void (*intr)(struct nvkm_fb *); 13 void (*intr)(struct nvkm_fb *);
12 14
13 struct { 15 struct {
@@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size,
60 u32 pitch, u32 flags, struct nvkm_fb_tile *); 62 u32 pitch, u32 flags, struct nvkm_fb_tile *);
61 63
62int gf100_fb_oneinit(struct nvkm_fb *); 64int gf100_fb_oneinit(struct nvkm_fb *);
65void gf100_fb_init_page(struct nvkm_fb *);
63bool gf100_fb_memtype_valid(struct nvkm_fb *, u32); 66bool gf100_fb_memtype_valid(struct nvkm_fb *, u32);
67
68void gm200_fb_init_page(struct nvkm_fb *);
64#endif 69#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
index f816cbf2ced3..b9ec0ae6723a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h
@@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **);
47int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **); 47int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
48int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **); 48int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **);
49int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **); 49int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **);
50int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **);
50#endif 51#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
new file mode 100644
index 000000000000..f3be408b5e5e
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c
@@ -0,0 +1,146 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "ram.h"
25
26#include <subdev/bios.h>
27#include <subdev/bios/init.h>
28#include <subdev/bios/rammap.h>
29
30static int
31gp100_ram_init(struct nvkm_ram *ram)
32{
33 struct nvkm_subdev *subdev = &ram->fb->subdev;
34 struct nvkm_device *device = subdev->device;
35 struct nvkm_bios *bios = device->bios;
36 u8 ver, hdr, cnt, len, snr, ssz;
37 u32 data;
38 int i;
39
40 /* run a bunch of tables from rammap table. there's actually
41 * individual pointers for each rammap entry too, but, nvidia
42 * seem to just run the last two entries' scripts early on in
43 * their init, and never again.. we'll just run 'em all once
44 * for now.
45 *
46 * i strongly suspect that each script is for a separate mode
47 * (likely selected by 0x9a065c's lower bits?), and the
48 * binary driver skips the one that's already been setup by
49 * the init tables.
50 */
51 data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz);
52 if (!data || hdr < 0x15)
53 return -EINVAL;
54
55 cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */
56 data = nvbios_rd32(bios, data + 0x10); /* guess u32... */
57 if (cnt) {
58 u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0;
59 for (i = 0; i < cnt; i++, data += 4) {
60 if (i != save >> 4) {
61 nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4);
62 nvbios_exec(&(struct nvbios_init) {
63 .subdev = subdev,
64 .bios = bios,
65 .offset = nvbios_rd32(bios, data),
66 .execute = 1,
67 });
68 }
69 }
70 nvkm_mask(device, 0x9a065c, 0x000000f0, save);
71 }
72
73 nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000);
74 nvkm_wr32(device, 0x10ecc0, 0xffffffff);
75 nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010);
76 return 0;
77}
78
79static const struct nvkm_ram_func
80gp100_ram_func = {
81 .init = gp100_ram_init,
82 .get = gf100_ram_get,
83 .put = gf100_ram_put,
84};
85
86int
87gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram)
88{
89 struct nvkm_ram *ram;
90 struct nvkm_subdev *subdev = &fb->subdev;
91 struct nvkm_device *device = subdev->device;
92 enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios);
93 const u32 rsvd_head = ( 256 * 1024); /* vga memory */
94 const u32 rsvd_tail = (1024 * 1024); /* vbios etc */
95 u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa;
96 u32 fbio_opt = nvkm_rd32(device, 0x021c14);
97 u64 part, size = 0, comm = ~0ULL;
98 bool mixed = false;
99 int ret;
100
101 nvkm_debug(subdev, "022438: %08x\n", fbpa_num);
102 nvkm_debug(subdev, "021c14: %08x\n", fbio_opt);
103 for (fbpa = 0; fbpa < fbpa_num; fbpa++) {
104 if (!(fbio_opt & (1 << fbpa))) {
105 part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000));
106 nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part);
107 part = part << 20;
108 if (part != comm) {
109 if (comm != ~0ULL)
110 mixed = true;
111 comm = min(comm, part);
112 }
113 size = size + part;
114 }
115 }
116
117 ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram);
118 *pram = ram;
119 if (ret)
120 return ret;
121
122 nvkm_mm_fini(&ram->vram);
123
124 if (mixed) {
125 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
126 ((comm * fbpa_num) - rsvd_head) >>
127 NVKM_RAM_MM_SHIFT, 1);
128 if (ret)
129 return ret;
130
131 ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >>
132 NVKM_RAM_MM_SHIFT,
133 (size - (comm * fbpa_num) - rsvd_tail) >>
134 NVKM_RAM_MM_SHIFT, 1);
135 if (ret)
136 return ret;
137 } else {
138 ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT,
139 (size - rsvd_head - rsvd_tail) >>
140 NVKM_RAM_MM_SHIFT, 1);
141 if (ret)
142 return ret;
143 }
144
145 return 0;
146}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
index 932b366598aa..12d6f4f102cb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild
@@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o
3nvkm-y += nvkm/subdev/ltc/gk104.o 3nvkm-y += nvkm/subdev/ltc/gk104.o
4nvkm-y += nvkm/subdev/ltc/gm107.o 4nvkm-y += nvkm/subdev/ltc/gm107.o
5nvkm-y += nvkm/subdev/ltc/gm200.o 5nvkm-y += nvkm/subdev/ltc/gm200.o
6nvkm-y += nvkm/subdev/ltc/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
index c9eb677967a8..4a0fa0a9b802 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c
@@ -23,7 +23,6 @@
23 */ 23 */
24#include "priv.h" 24#include "priv.h"
25 25
26#include <core/enum.h>
27#include <subdev/fb.h> 26#include <subdev/fb.h>
28#include <subdev/timer.h> 27#include <subdev/timer.h>
29 28
@@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
71 nvkm_wr32(device, 0x17ea58, depth); 70 nvkm_wr32(device, 0x17ea58, depth);
72} 71}
73 72
74static const struct nvkm_bitfield 73const struct nvkm_bitfield
75gf100_ltc_lts_intr_name[] = { 74gf100_ltc_lts_intr_name[] = {
76 { 0x00000001, "IDLE_ERROR_IQ" }, 75 { 0x00000001, "IDLE_ERROR_IQ" },
77 { 0x00000002, "IDLE_ERROR_CBC" }, 76 { 0x00000002, "IDLE_ERROR_CBC" },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
index 389fb13a1998..ec0a3844b2d1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c
@@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth)
68 nvkm_wr32(device, 0x17e34c, depth); 68 nvkm_wr32(device, 0x17e34c, depth);
69} 69}
70 70
71static void 71void
72gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) 72gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s)
73{ 73{
74 struct nvkm_subdev *subdev = &ltc->subdev; 74 struct nvkm_subdev *subdev = &ltc->subdev;
75 struct nvkm_device *device = subdev->device; 75 struct nvkm_device *device = subdev->device;
76 u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); 76 u32 base = 0x140400 + (c * 0x2000) + (s * 0x200);
77 u32 stat = nvkm_rd32(device, base + 0x00c); 77 u32 intr = nvkm_rd32(device, base + 0x00c);
78 u16 stat = intr & 0x0000ffff;
79 char msg[128];
78 80
79 if (stat) { 81 if (stat) {
80 nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat); 82 nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat);
81 nvkm_wr32(device, base + 0x00c, stat); 83 nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg);
82 } 84 }
85
86 nvkm_wr32(device, base + 0x00c, intr);
83} 87}
84 88
85void 89void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
new file mode 100644
index 000000000000..0bdfb2f40266
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c
@@ -0,0 +1,75 @@
1/*
2 * Copyright 2016 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#include "priv.h"
25
26static void
27gp100_ltc_intr(struct nvkm_ltc *ltc)
28{
29 struct nvkm_device *device = ltc->subdev.device;
30 u32 mask;
31
32 mask = nvkm_rd32(device, 0x0001c0);
33 while (mask) {
34 u32 s, c = __ffs(mask);
35 for (s = 0; s < ltc->lts_nr; s++)
36 gm107_ltc_intr_lts(ltc, c, s);
37 mask &= ~(1 << c);
38 }
39}
40
41static int
42gp100_ltc_oneinit(struct nvkm_ltc *ltc)
43{
44 struct nvkm_device *device = ltc->subdev.device;
45 ltc->ltc_nr = nvkm_rd32(device, 0x12006c);
46 ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28;
47 /*XXX: tagram allocation - TBD */
48 return nvkm_mm_init(&ltc->tags, 0, 0, 1);
49}
50
51static void
52gp100_ltc_init(struct nvkm_ltc *ltc)
53{
54 /*XXX: PMU LS call to setup tagram address */
55}
56
57static const struct nvkm_ltc_func
58gp100_ltc = {
59 .oneinit = gp100_ltc_oneinit,
60 .init = gp100_ltc_init,
61 .intr = gp100_ltc_intr,
62 .cbc_clear = gm107_ltc_cbc_clear,
63 .cbc_wait = gm107_ltc_cbc_wait,
64 .zbc = 16,
65 .zbc_clear_color = gm107_ltc_zbc_clear_color,
66 .zbc_clear_depth = gm107_ltc_zbc_clear_depth,
67 .invalidate = gf100_ltc_invalidate,
68 .flush = gf100_ltc_flush,
69};
70
71int
72gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc)
73{
74 return nvkm_ltc_new_(&gp100_ltc, device, index, pltc);
75}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
index 6d81c695ed0d..8b95f96e3ffa 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h
@@ -2,6 +2,7 @@
2#define __NVKM_LTC_PRIV_H__ 2#define __NVKM_LTC_PRIV_H__
3#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev) 3#define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev)
4#include <subdev/ltc.h> 4#include <subdev/ltc.h>
5#include <core/enum.h>
5 6
6int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, 7int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *,
7 int index, struct nvkm_ltc **); 8 int index, struct nvkm_ltc **);
@@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
31void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); 32void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32);
32void gf100_ltc_invalidate(struct nvkm_ltc *); 33void gf100_ltc_invalidate(struct nvkm_ltc *);
33void gf100_ltc_flush(struct nvkm_ltc *); 34void gf100_ltc_flush(struct nvkm_ltc *);
35extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[];
34 36
35void gm107_ltc_intr(struct nvkm_ltc *); 37void gm107_ltc_intr(struct nvkm_ltc *);
38void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts);
36void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32); 39void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32);
37void gm107_ltc_cbc_wait(struct nvkm_ltc *); 40void gm107_ltc_cbc_wait(struct nvkm_ltc *);
38void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); 41void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
index 49695ac7be2e..12943f92c206 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o
10nvkm-y += nvkm/subdev/mc/gf100.o 10nvkm-y += nvkm/subdev/mc/gf100.o
11nvkm-y += nvkm/subdev/mc/gk104.o 11nvkm-y += nvkm/subdev/mc/gk104.o
12nvkm-y += nvkm/subdev/mc/gk20a.o 12nvkm-y += nvkm/subdev/mc/gk20a.o
13nvkm-y += nvkm/subdev/mc/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
index 350a8caa84c8..6b25e25f9eba 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c
@@ -27,43 +27,67 @@
27#include <subdev/top.h> 27#include <subdev/top.h>
28 28
29void 29void
30nvkm_mc_unk260(struct nvkm_mc *mc, u32 data) 30nvkm_mc_unk260(struct nvkm_device *device, u32 data)
31{ 31{
32 if (mc->func->unk260) 32 struct nvkm_mc *mc = device->mc;
33 if (likely(mc) && mc->func->unk260)
33 mc->func->unk260(mc, data); 34 mc->func->unk260(mc, data);
34} 35}
35 36
36void 37void
37nvkm_mc_intr_unarm(struct nvkm_mc *mc) 38nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en)
38{ 39{
39 return mc->func->intr_unarm(mc); 40 struct nvkm_mc *mc = device->mc;
41 const struct nvkm_mc_map *map;
42 if (likely(mc) && mc->func->intr_mask) {
43 u32 mask = nvkm_top_intr_mask(device, devidx);
44 for (map = mc->func->intr; !mask && map->stat; map++) {
45 if (map->unit == devidx)
46 mask = map->stat;
47 }
48 mc->func->intr_mask(mc, mask, en ? mask : 0);
49 }
50}
51
52void
53nvkm_mc_intr_unarm(struct nvkm_device *device)
54{
55 struct nvkm_mc *mc = device->mc;
56 if (likely(mc))
57 mc->func->intr_unarm(mc);
40} 58}
41 59
42void 60void
43nvkm_mc_intr_rearm(struct nvkm_mc *mc) 61nvkm_mc_intr_rearm(struct nvkm_device *device)
44{ 62{
45 return mc->func->intr_rearm(mc); 63 struct nvkm_mc *mc = device->mc;
64 if (likely(mc))
65 mc->func->intr_rearm(mc);
46} 66}
47 67
48static u32 68static u32
49nvkm_mc_intr_mask(struct nvkm_mc *mc) 69nvkm_mc_intr_stat(struct nvkm_mc *mc)
50{ 70{
51 u32 intr = mc->func->intr_mask(mc); 71 u32 intr = mc->func->intr_stat(mc);
52 if (WARN_ON_ONCE(intr == 0xffffffff)) 72 if (WARN_ON_ONCE(intr == 0xffffffff))
53 intr = 0; /* likely fallen off the bus */ 73 intr = 0; /* likely fallen off the bus */
54 return intr; 74 return intr;
55} 75}
56 76
57void 77void
58nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) 78nvkm_mc_intr(struct nvkm_device *device, bool *handled)
59{ 79{
60 struct nvkm_device *device = mc->subdev.device; 80 struct nvkm_mc *mc = device->mc;
61 struct nvkm_subdev *subdev; 81 struct nvkm_subdev *subdev;
62 const struct nvkm_mc_map *map = mc->func->intr; 82 const struct nvkm_mc_map *map;
63 u32 stat, intr = nvkm_mc_intr_mask(mc); 83 u32 stat, intr;
64 u64 subdevs; 84 u64 subdevs;
65 85
66 stat = nvkm_top_intr(device->top, intr, &subdevs); 86 if (unlikely(!mc))
87 return;
88
89 intr = nvkm_mc_intr_stat(mc);
90 stat = nvkm_top_intr(device, intr, &subdevs);
67 while (subdevs) { 91 while (subdevs) {
68 enum nvkm_devidx subidx = __ffs64(subdevs); 92 enum nvkm_devidx subidx = __ffs64(subdevs);
69 subdev = nvkm_device_subdev(device, subidx); 93 subdev = nvkm_device_subdev(device, subidx);
@@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
72 subdevs &= ~BIT_ULL(subidx); 96 subdevs &= ~BIT_ULL(subidx);
73 } 97 }
74 98
75 while (map->stat) { 99 for (map = mc->func->intr; map->stat; map++) {
76 if (intr & map->stat) { 100 if (intr & map->stat) {
77 subdev = nvkm_device_subdev(device, map->unit); 101 subdev = nvkm_device_subdev(device, map->unit);
78 if (subdev) 102 if (subdev)
79 nvkm_subdev_intr(subdev); 103 nvkm_subdev_intr(subdev);
80 stat &= ~map->stat; 104 stat &= ~map->stat;
81 } 105 }
82 map++;
83 } 106 }
84 107
85 if (stat) 108 if (stat)
@@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled)
87 *handled = intr != 0; 110 *handled = intr != 0;
88} 111}
89 112
90static void 113static u32
91nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) 114nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto,
115 enum nvkm_devidx devidx)
92{ 116{
93 struct nvkm_device *device = mc->subdev.device; 117 struct nvkm_mc *mc = device->mc;
94 const struct nvkm_mc_map *map; 118 const struct nvkm_mc_map *map;
95 u64 pmc_enable; 119 u64 pmc_enable = 0;
96 120 if (likely(mc)) {
97 if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) { 121 if (!(pmc_enable = nvkm_top_reset(device, devidx))) {
98 for (map = mc->func->reset; map && map->stat; map++) { 122 for (map = mc->func->reset; map && map->stat; map++) {
99 if (map->unit == devidx) { 123 if (!isauto || !map->noauto) {
100 pmc_enable = map->stat; 124 if (map->unit == devidx) {
101 break; 125 pmc_enable = map->stat;
126 break;
127 }
128 }
102 } 129 }
103 } 130 }
104 } 131 }
132 return pmc_enable;
133}
105 134
135void
136nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx)
137{
138 u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx);
106 if (pmc_enable) { 139 if (pmc_enable) {
107 nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); 140 nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
108 nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); 141 nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
@@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx)
111} 144}
112 145
113void 146void
114nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx) 147nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx)
115{ 148{
116 if (likely(mc)) 149 u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
117 nvkm_mc_reset_(mc, devidx); 150 if (pmc_enable)
151 nvkm_mask(device, 0x000200, pmc_enable, 0x00000000);
152}
153
154void
155nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx)
156{
157 u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx);
158 if (pmc_enable) {
159 nvkm_mask(device, 0x000200, pmc_enable, pmc_enable);
160 nvkm_rd32(device, 0x000200);
161 }
118} 162}
119 163
120static int 164static int
121nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) 165nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend)
122{ 166{
123 struct nvkm_mc *mc = nvkm_mc(subdev); 167 nvkm_mc_intr_unarm(subdev->device);
124 nvkm_mc_intr_unarm(mc);
125 return 0; 168 return 0;
126} 169}
127 170
@@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev)
131 struct nvkm_mc *mc = nvkm_mc(subdev); 174 struct nvkm_mc *mc = nvkm_mc(subdev);
132 if (mc->func->init) 175 if (mc->func->init)
133 mc->func->init(mc); 176 mc->func->init(mc);
134 nvkm_mc_intr_rearm(mc); 177 nvkm_mc_intr_rearm(subdev->device);
135 return 0; 178 return 0;
136} 179}
137 180
@@ -148,16 +191,21 @@ nvkm_mc = {
148 .fini = nvkm_mc_fini, 191 .fini = nvkm_mc_fini,
149}; 192};
150 193
194void
195nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device,
196 int index, struct nvkm_mc *mc)
197{
198 nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
199 mc->func = func;
200}
201
151int 202int
152nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, 203nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device,
153 int index, struct nvkm_mc **pmc) 204 int index, struct nvkm_mc **pmc)
154{ 205{
155 struct nvkm_mc *mc; 206 struct nvkm_mc *mc;
156
157 if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL))) 207 if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL)))
158 return -ENOMEM; 208 return -ENOMEM;
159 209 nvkm_mc_ctor(func, device, index, *pmc);
160 nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev);
161 mc->func = func;
162 return 0; 210 return 0;
163} 211}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
index 5c85b47f071d..c3d66ef5dc12 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c
@@ -57,7 +57,7 @@ g84_mc = {
57 .intr = g84_mc_intr, 57 .intr = g84_mc_intr,
58 .intr_unarm = nv04_mc_intr_unarm, 58 .intr_unarm = nv04_mc_intr_unarm,
59 .intr_rearm = nv04_mc_intr_rearm, 59 .intr_rearm = nv04_mc_intr_rearm,
60 .intr_mask = nv04_mc_intr_mask, 60 .intr_stat = nv04_mc_intr_stat,
61 .reset = g84_mc_reset, 61 .reset = g84_mc_reset,
62}; 62};
63 63
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
index 0280b43cc10c..93ad4982ce5f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c
@@ -57,7 +57,7 @@ g98_mc = {
57 .intr = g98_mc_intr, 57 .intr = g98_mc_intr,
58 .intr_unarm = nv04_mc_intr_unarm, 58 .intr_unarm = nv04_mc_intr_unarm,
59 .intr_rearm = nv04_mc_intr_rearm, 59 .intr_rearm = nv04_mc_intr_rearm,
60 .intr_mask = nv04_mc_intr_mask, 60 .intr_stat = nv04_mc_intr_stat,
61 .reset = g98_mc_reset, 61 .reset = g98_mc_reset,
62}; 62};
63 63
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
index 8397e223bd43..d2c4d6033abb 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c
@@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc)
76} 76}
77 77
78u32 78u32
79gf100_mc_intr_mask(struct nvkm_mc *mc) 79gf100_mc_intr_stat(struct nvkm_mc *mc)
80{ 80{
81 struct nvkm_device *device = mc->subdev.device; 81 struct nvkm_device *device = mc->subdev.device;
82 u32 intr0 = nvkm_rd32(device, 0x000100); 82 u32 intr0 = nvkm_rd32(device, 0x000100);
@@ -85,6 +85,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc)
85} 85}
86 86
87void 87void
88gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
89{
90 struct nvkm_device *device = mc->subdev.device;
91 nvkm_mask(device, 0x000640, mask, stat);
92 nvkm_mask(device, 0x000644, mask, stat);
93}
94
95void
88gf100_mc_unk260(struct nvkm_mc *mc, u32 data) 96gf100_mc_unk260(struct nvkm_mc *mc, u32 data)
89{ 97{
90 nvkm_wr32(mc->subdev.device, 0x000260, data); 98 nvkm_wr32(mc->subdev.device, 0x000260, data);
@@ -97,6 +105,7 @@ gf100_mc = {
97 .intr_unarm = gf100_mc_intr_unarm, 105 .intr_unarm = gf100_mc_intr_unarm,
98 .intr_rearm = gf100_mc_intr_rearm, 106 .intr_rearm = gf100_mc_intr_rearm,
99 .intr_mask = gf100_mc_intr_mask, 107 .intr_mask = gf100_mc_intr_mask,
108 .intr_stat = gf100_mc_intr_stat,
100 .reset = gf100_mc_reset, 109 .reset = gf100_mc_reset,
101 .unk260 = gf100_mc_unk260, 110 .unk260 = gf100_mc_unk260,
102}; 111};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
index 317464212c7d..7b8c6ecad1a5 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c
@@ -26,6 +26,7 @@
26const struct nvkm_mc_map 26const struct nvkm_mc_map
27gk104_mc_reset[] = { 27gk104_mc_reset[] = {
28 { 0x00000100, NVKM_ENGINE_FIFO }, 28 { 0x00000100, NVKM_ENGINE_FIFO },
29 { 0x00002000, NVKM_SUBDEV_PMU, true },
29 {} 30 {}
30}; 31};
31 32
@@ -53,6 +54,7 @@ gk104_mc = {
53 .intr_unarm = gf100_mc_intr_unarm, 54 .intr_unarm = gf100_mc_intr_unarm,
54 .intr_rearm = gf100_mc_intr_rearm, 55 .intr_rearm = gf100_mc_intr_rearm,
55 .intr_mask = gf100_mc_intr_mask, 56 .intr_mask = gf100_mc_intr_mask,
57 .intr_stat = gf100_mc_intr_stat,
56 .reset = gk104_mc_reset, 58 .reset = gk104_mc_reset,
57 .unk260 = gf100_mc_unk260, 59 .unk260 = gf100_mc_unk260,
58}; 60};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
index 60b044f517ed..ca1bf3279dbe 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c
@@ -30,6 +30,7 @@ gk20a_mc = {
30 .intr_unarm = gf100_mc_intr_unarm, 30 .intr_unarm = gf100_mc_intr_unarm,
31 .intr_rearm = gf100_mc_intr_rearm, 31 .intr_rearm = gf100_mc_intr_rearm,
32 .intr_mask = gf100_mc_intr_mask, 32 .intr_mask = gf100_mc_intr_mask,
33 .intr_stat = gf100_mc_intr_stat,
33 .reset = gk104_mc_reset, 34 .reset = gk104_mc_reset,
34}; 35};
35 36
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
new file mode 100644
index 000000000000..4d22f4abd6de
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c
@@ -0,0 +1,103 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24#define gp100_mc(p) container_of((p), struct gp100_mc, base)
25#include "priv.h"
26
27struct gp100_mc {
28 struct nvkm_mc base;
29 spinlock_t lock;
30 bool intr;
31 u32 mask;
32};
33
34static void
35gp100_mc_intr_update(struct gp100_mc *mc)
36{
37 struct nvkm_device *device = mc->base.subdev.device;
38 u32 mask = mc->intr ? mc->mask : 0, i;
39 for (i = 0; i < 2; i++) {
40 nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask);
41 nvkm_wr32(device, 0x000160 + (i * 0x04), mask);
42 }
43}
44
45static void
46gp100_mc_intr_unarm(struct nvkm_mc *base)
47{
48 struct gp100_mc *mc = gp100_mc(base);
49 unsigned long flags;
50 spin_lock_irqsave(&mc->lock, flags);
51 mc->intr = false;
52 gp100_mc_intr_update(mc);
53 spin_unlock_irqrestore(&mc->lock, flags);
54}
55
56static void
57gp100_mc_intr_rearm(struct nvkm_mc *base)
58{
59 struct gp100_mc *mc = gp100_mc(base);
60 unsigned long flags;
61 spin_lock_irqsave(&mc->lock, flags);
62 mc->intr = true;
63 gp100_mc_intr_update(mc);
64 spin_unlock_irqrestore(&mc->lock, flags);
65}
66
67static void
68gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr)
69{
70 struct gp100_mc *mc = gp100_mc(base);
71 unsigned long flags;
72 spin_lock_irqsave(&mc->lock, flags);
73 mc->mask = (mc->mask & ~mask) | intr;
74 gp100_mc_intr_update(mc);
75 spin_unlock_irqrestore(&mc->lock, flags);
76}
77
78static const struct nvkm_mc_func
79gp100_mc = {
80 .init = nv50_mc_init,
81 .intr = gk104_mc_intr,
82 .intr_unarm = gp100_mc_intr_unarm,
83 .intr_rearm = gp100_mc_intr_rearm,
84 .intr_mask = gp100_mc_intr_mask,
85 .intr_stat = gf100_mc_intr_stat,
86 .reset = gk104_mc_reset,
87};
88
89int
90gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc)
91{
92 struct gp100_mc *mc;
93
94 if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL)))
95 return -ENOMEM;
96 nvkm_mc_ctor(&gp100_mc, device, index, &mc->base);
97 *pmc = &mc->base;
98
99 spin_lock_init(&mc->lock);
100 mc->intr = false;
101 mc->mask = 0x7fffffff;
102 return 0;
103}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
index aad0ba95bf18..99d50a3d956f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c
@@ -53,13 +53,20 @@ gt215_mc_intr[] = {
53 {}, 53 {},
54}; 54};
55 55
56static void
57gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat)
58{
59 nvkm_mask(mc->subdev.device, 0x000640, mask, stat);
60}
61
56static const struct nvkm_mc_func 62static const struct nvkm_mc_func
57gt215_mc = { 63gt215_mc = {
58 .init = nv50_mc_init, 64 .init = nv50_mc_init,
59 .intr = gt215_mc_intr, 65 .intr = gt215_mc_intr,
60 .intr_unarm = nv04_mc_intr_unarm, 66 .intr_unarm = nv04_mc_intr_unarm,
61 .intr_rearm = nv04_mc_intr_rearm, 67 .intr_rearm = nv04_mc_intr_rearm,
62 .intr_mask = nv04_mc_intr_mask, 68 .intr_mask = gt215_mc_intr_mask,
69 .intr_stat = nv04_mc_intr_stat,
63 .reset = gt215_mc_reset, 70 .reset = gt215_mc_reset,
64}; 71};
65 72
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
index a062624e906b..6509defd1460 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c
@@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc)
56} 56}
57 57
58u32 58u32
59nv04_mc_intr_mask(struct nvkm_mc *mc) 59nv04_mc_intr_stat(struct nvkm_mc *mc)
60{ 60{
61 return nvkm_rd32(mc->subdev.device, 0x000100); 61 return nvkm_rd32(mc->subdev.device, 0x000100);
62} 62}
@@ -75,7 +75,7 @@ nv04_mc = {
75 .intr = nv04_mc_intr, 75 .intr = nv04_mc_intr,
76 .intr_unarm = nv04_mc_intr_unarm, 76 .intr_unarm = nv04_mc_intr_unarm,
77 .intr_rearm = nv04_mc_intr_rearm, 77 .intr_rearm = nv04_mc_intr_rearm,
78 .intr_mask = nv04_mc_intr_mask, 78 .intr_stat = nv04_mc_intr_stat,
79 .reset = nv04_mc_reset, 79 .reset = nv04_mc_reset,
80}; 80};
81 81
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
index 55f0b9166b52..9213107901e6 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c
@@ -39,7 +39,7 @@ nv11_mc = {
39 .intr = nv11_mc_intr, 39 .intr = nv11_mc_intr,
40 .intr_unarm = nv04_mc_intr_unarm, 40 .intr_unarm = nv04_mc_intr_unarm,
41 .intr_rearm = nv04_mc_intr_rearm, 41 .intr_rearm = nv04_mc_intr_rearm,
42 .intr_mask = nv04_mc_intr_mask, 42 .intr_stat = nv04_mc_intr_stat,
43 .reset = nv04_mc_reset, 43 .reset = nv04_mc_reset,
44}; 44};
45 45
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
index c40fa67f79a5..64bf5bbf8146 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c
@@ -48,7 +48,7 @@ nv17_mc = {
48 .intr = nv17_mc_intr, 48 .intr = nv17_mc_intr,
49 .intr_unarm = nv04_mc_intr_unarm, 49 .intr_unarm = nv04_mc_intr_unarm,
50 .intr_rearm = nv04_mc_intr_rearm, 50 .intr_rearm = nv04_mc_intr_rearm,
51 .intr_mask = nv04_mc_intr_mask, 51 .intr_stat = nv04_mc_intr_stat,
52 .reset = nv17_mc_reset, 52 .reset = nv17_mc_reset,
53}; 53};
54 54
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
index cc56271db564..65fa44a64b98 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c
@@ -43,7 +43,7 @@ nv44_mc = {
43 .intr = nv17_mc_intr, 43 .intr = nv17_mc_intr,
44 .intr_unarm = nv04_mc_intr_unarm, 44 .intr_unarm = nv04_mc_intr_unarm,
45 .intr_rearm = nv04_mc_intr_rearm, 45 .intr_rearm = nv04_mc_intr_rearm,
46 .intr_mask = nv04_mc_intr_mask, 46 .intr_stat = nv04_mc_intr_stat,
47 .reset = nv17_mc_reset, 47 .reset = nv17_mc_reset,
48}; 48};
49 49
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
index 343b6078580d..fe93b4fd7100 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c
@@ -50,7 +50,7 @@ nv50_mc = {
50 .intr = nv50_mc_intr, 50 .intr = nv50_mc_intr,
51 .intr_unarm = nv04_mc_intr_unarm, 51 .intr_unarm = nv04_mc_intr_unarm,
52 .intr_rearm = nv04_mc_intr_rearm, 52 .intr_rearm = nv04_mc_intr_rearm,
53 .intr_mask = nv04_mc_intr_mask, 53 .intr_stat = nv04_mc_intr_stat,
54 .reset = nv17_mc_reset, 54 .reset = nv17_mc_reset,
55}; 55};
56 56
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
index a12038118512..4f0576a06d24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h
@@ -3,12 +3,15 @@
3#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev) 3#define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev)
4#include <subdev/mc.h> 4#include <subdev/mc.h>
5 5
6void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *,
7 int index, struct nvkm_mc *);
6int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, 8int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *,
7 int index, struct nvkm_mc **); 9 int index, struct nvkm_mc **);
8 10
9struct nvkm_mc_map { 11struct nvkm_mc_map {
10 u32 stat; 12 u32 stat;
11 u32 unit; 13 u32 unit;
14 bool noauto;
12}; 15};
13 16
14struct nvkm_mc_func { 17struct nvkm_mc_func {
@@ -18,8 +21,10 @@ struct nvkm_mc_func {
18 void (*intr_unarm)(struct nvkm_mc *); 21 void (*intr_unarm)(struct nvkm_mc *);
19 /* enable reporting of interrupts to host */ 22 /* enable reporting of interrupts to host */
20 void (*intr_rearm)(struct nvkm_mc *); 23 void (*intr_rearm)(struct nvkm_mc *);
24 /* (un)mask delivery of specific interrupts */
25 void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat);
21 /* retrieve pending interrupt mask (NV_PMC_INTR) */ 26 /* retrieve pending interrupt mask (NV_PMC_INTR) */
22 u32 (*intr_mask)(struct nvkm_mc *); 27 u32 (*intr_stat)(struct nvkm_mc *);
23 const struct nvkm_mc_map *reset; 28 const struct nvkm_mc_map *reset;
24 void (*unk260)(struct nvkm_mc *, u32); 29 void (*unk260)(struct nvkm_mc *, u32);
25}; 30};
@@ -27,7 +32,7 @@ struct nvkm_mc_func {
27void nv04_mc_init(struct nvkm_mc *); 32void nv04_mc_init(struct nvkm_mc *);
28void nv04_mc_intr_unarm(struct nvkm_mc *); 33void nv04_mc_intr_unarm(struct nvkm_mc *);
29void nv04_mc_intr_rearm(struct nvkm_mc *); 34void nv04_mc_intr_rearm(struct nvkm_mc *);
30u32 nv04_mc_intr_mask(struct nvkm_mc *); 35u32 nv04_mc_intr_stat(struct nvkm_mc *);
31extern const struct nvkm_mc_map nv04_mc_reset[]; 36extern const struct nvkm_mc_map nv04_mc_reset[];
32 37
33extern const struct nvkm_mc_map nv17_mc_intr[]; 38extern const struct nvkm_mc_map nv17_mc_intr[];
@@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *);
39 44
40void gf100_mc_intr_unarm(struct nvkm_mc *); 45void gf100_mc_intr_unarm(struct nvkm_mc *);
41void gf100_mc_intr_rearm(struct nvkm_mc *); 46void gf100_mc_intr_rearm(struct nvkm_mc *);
42u32 gf100_mc_intr_mask(struct nvkm_mc *); 47void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32);
48u32 gf100_mc_intr_stat(struct nvkm_mc *);
43void gf100_mc_unk260(struct nvkm_mc *, u32); 49void gf100_mc_unk260(struct nvkm_mc *, u32);
44 50
45extern const struct nvkm_mc_map gk104_mc_intr[]; 51extern const struct nvkm_mc_map gk104_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
index 3c2519fdeb81..2a31b7d66a6d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild
@@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o
10nvkm-y += nvkm/subdev/pci/gf100.o 10nvkm-y += nvkm/subdev/pci/gf100.o
11nvkm-y += nvkm/subdev/pci/gf106.o 11nvkm-y += nvkm/subdev/pci/gf106.o
12nvkm-y += nvkm/subdev/pci/gk104.o 12nvkm-y += nvkm/subdev/pci/gk104.o
13nvkm-y += nvkm/subdev/pci/gp100.o
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index 6b0328bd7eed..eb9b278198b2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -69,15 +69,13 @@ static irqreturn_t
69nvkm_pci_intr(int irq, void *arg) 69nvkm_pci_intr(int irq, void *arg)
70{ 70{
71 struct nvkm_pci *pci = arg; 71 struct nvkm_pci *pci = arg;
72 struct nvkm_mc *mc = pci->subdev.device->mc; 72 struct nvkm_device *device = pci->subdev.device;
73 bool handled = false; 73 bool handled = false;
74 if (likely(mc)) { 74 nvkm_mc_intr_unarm(device);
75 nvkm_mc_intr_unarm(mc); 75 if (pci->msi)
76 if (pci->msi) 76 pci->func->msi_rearm(pci);
77 pci->func->msi_rearm(pci); 77 nvkm_mc_intr(device, &handled);
78 nvkm_mc_intr(mc, &handled); 78 nvkm_mc_intr_rearm(device);
79 nvkm_mc_intr_rearm(mc);
80 }
81 return handled ? IRQ_HANDLED : IRQ_NONE; 79 return handled ? IRQ_HANDLED : IRQ_NONE;
82} 80}
83 81
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
new file mode 100644
index 000000000000..82c5234a06ff
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c
@@ -0,0 +1,44 @@
1/*
2 * Copyright 2015 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24#include "priv.h"
25
26static void
27gp100_pci_msi_rearm(struct nvkm_pci *pci)
28{
29 nvkm_pci_wr32(pci, 0x0704, 0x00000000);
30}
31
32static const struct nvkm_pci_func
33gp100_pci_func = {
34 .rd32 = nv40_pci_rd32,
35 .wr08 = nv40_pci_wr08,
36 .wr32 = nv40_pci_wr32,
37 .msi_rearm = gp100_pci_msi_rearm,
38};
39
40int
41gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci)
42{
43 return nvkm_pci_new_(&gp100_pci_func, device, index, ppci);
44}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
index 213fdba6cfa0..314be2192b7d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c
@@ -19,8 +19,9 @@
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE. 20 * DEALINGS IN THE SOFTWARE.
21 */ 21 */
22
23#include "priv.h" 22#include "priv.h"
23
24#include <subdev/mc.h>
24#include <subdev/timer.h> 25#include <subdev/timer.h>
25 26
26static const char * 27static const char *
@@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
70 int ret; 71 int ret;
71 72
72 /* enable engine */ 73 /* enable engine */
73 nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask); 74 nvkm_mc_enable(device, sb->devidx);
74 nvkm_rd32(device, 0x200);
75 ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0); 75 ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0);
76 if (ret < 0) { 76 if (ret < 0) {
77 nvkm_mask(device, 0x200, sb->enable_mask, 0x0);
78 nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n"); 77 nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n");
78 nvkm_mc_disable(device, sb->devidx);
79 return ret; 79 return ret;
80 } 80 }
81 81
@@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb)
85 85
86 /* enable IRQs */ 86 /* enable IRQs */
87 nvkm_wr32(device, sb->base + 0x010, 0xff); 87 nvkm_wr32(device, sb->base + 0x010, 0xff);
88 nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask); 88 nvkm_mc_intr_mask(device, sb->devidx, true);
89 nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask);
90 89
91 return 0; 90 return 0;
92} 91}
@@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb)
97 struct nvkm_device *device = sb->subdev.device; 96 struct nvkm_device *device = sb->subdev.device;
98 97
99 /* disable IRQs and wait for any previous code to complete */ 98 /* disable IRQs and wait for any previous code to complete */
100 nvkm_mask(device, 0x644, sb->irq_mask, 0x0); 99 nvkm_mc_intr_mask(device, sb->devidx, false);
101 nvkm_mask(device, 0x640, sb->irq_mask, 0x0);
102 nvkm_wr32(device, sb->base + 0x014, 0xff); 100 nvkm_wr32(device, sb->base + 0x014, 0xff);
103 101
104 falcon_wait_idle(device, sb->base); 102 falcon_wait_idle(device, sb->base);
105 103
106 /* disable engine */ 104 /* disable engine */
107 nvkm_mask(device, 0x200, sb->enable_mask, 0x0); 105 nvkm_mc_disable(device, sb->devidx);
108 106
109 return 0; 107 return 0;
110} 108}
@@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev)
216 return ret; 214 return ret;
217 } 215 }
218 216
219 /* 217 return 0;
220 * Build all blobs - the same blobs can be used to perform secure boot
221 * multiple times
222 */
223 if (sb->func->prepare_blobs)
224 ret = sb->func->prepare_blobs(sb);
225
226 return ret;
227} 218}
228 219
229static int 220static int
@@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func,
270 /* setup the performing falcon's base address and masks */ 261 /* setup the performing falcon's base address and masks */
271 switch (func->boot_falcon) { 262 switch (func->boot_falcon) {
272 case NVKM_SECBOOT_FALCON_PMU: 263 case NVKM_SECBOOT_FALCON_PMU:
264 sb->devidx = NVKM_SUBDEV_PMU;
273 sb->base = 0x10a000; 265 sb->base = 0x10a000;
274 sb->irq_mask = 0x1000000;
275 sb->enable_mask = 0x2000;
276 break; 266 break;
277 default: 267 default:
278 nvkm_error(&sb->subdev, "invalid secure boot falcon\n"); 268 nvkm_error(&sb->subdev, "invalid secure boot falcon\n");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
index cc100dc940ea..f1e2dc914366 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c
@@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb)
860 860
861 /* Write LS blob */ 861 /* Write LS blob */
862 ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob); 862 ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob);
863 if (ret)
864 nvkm_gpuobj_del(&gsb->ls_blob);
863 865
864cleanup: 866cleanup:
865 ls_ucode_mgr_cleanup(&mgr); 867 ls_ucode_mgr_cleanup(&mgr);
@@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb)
1023 int ret; 1025 int ret;
1024 1026
1025 /* Load and prepare the managed falcon's firmwares */ 1027 /* Load and prepare the managed falcon's firmwares */
1026 ret = gm200_secboot_prepare_ls_blob(gsb); 1028 if (!gsb->ls_blob) {
1027 if (ret) 1029 ret = gm200_secboot_prepare_ls_blob(gsb);
1028 return ret; 1030 if (ret)
1031 return ret;
1032 }
1029 1033
1030 /* Load the HS firmware that will load the LS firmwares */ 1034 /* Load the HS firmware that will load the LS firmwares */
1031 ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", 1035 if (!gsb->acr_load_blob) {
1032 &gsb->acr_load_blob, 1036 ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load",
1033 &gsb->acr_load_bl_desc, true); 1037 &gsb->acr_load_blob,
1034 if (ret) 1038 &gsb->acr_load_bl_desc, true);
1035 return ret; 1039 if (ret)
1040 return ret;
1041 }
1036 1042
1037 /* Load the HS firmware bootloader */ 1043 /* Load the HS firmware bootloader */
1038 ret = gm200_secboot_prepare_hsbl_blob(gsb); 1044 if (!gsb->hsbl_blob) {
1039 if (ret) 1045 ret = gm200_secboot_prepare_hsbl_blob(gsb);
1040 return ret; 1046 if (ret)
1047 return ret;
1048 }
1041 1049
1042 return 0; 1050 return 0;
1043} 1051}
1044 1052
1045static int 1053static int
1046gm200_secboot_prepare_blobs(struct nvkm_secboot *sb) 1054gm200_secboot_prepare_blobs(struct gm200_secboot *gsb)
1047{ 1055{
1048 struct gm200_secboot *gsb = gm200_secboot(sb);
1049 int ret; 1056 int ret;
1050 1057
1051 ret = gm20x_secboot_prepare_blobs(gsb); 1058 ret = gm20x_secboot_prepare_blobs(gsb);
@@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb)
1053 return ret; 1060 return ret;
1054 1061
1055 /* dGPU only: load the HS firmware that unprotects the WPR region */ 1062 /* dGPU only: load the HS firmware that unprotects the WPR region */
1056 ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", 1063 if (!gsb->acr_unload_blob) {
1057 &gsb->acr_unload_blob, 1064 ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload",
1058 &gsb->acr_unload_bl_desc, false); 1065 &gsb->acr_unload_blob,
1059 if (ret) 1066 &gsb->acr_unload_bl_desc, false);
1060 return ret; 1067 if (ret)
1068 return ret;
1069 }
1061 1070
1062 return 0; 1071 return 0;
1063} 1072}
1064 1073
1074static int
1075gm200_secboot_blobs_ready(struct gm200_secboot *gsb)
1076{
1077 struct nvkm_subdev *subdev = &gsb->base.subdev;
1078 int ret;
1079
1080 /* firmware already loaded, nothing to do... */
1081 if (gsb->firmware_ok)
1082 return 0;
1083
1084 ret = gsb->func->prepare_blobs(gsb);
1085 if (ret) {
1086 nvkm_error(subdev, "failed to load secure firmware\n");
1087 return ret;
1088 }
1089
1090 gsb->firmware_ok = true;
1091
1092 return 0;
1093}
1065 1094
1066 1095
1067/* 1096/*
@@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon)
1234 struct gm200_secboot *gsb = gm200_secboot(sb); 1263 struct gm200_secboot *gsb = gm200_secboot(sb);
1235 int ret; 1264 int ret;
1236 1265
1266 /* Make sure all blobs are ready */
1267 ret = gm200_secboot_blobs_ready(gsb);
1268 if (ret)
1269 return ret;
1270
1237 /* 1271 /*
1238 * Dummy GM200 implementation: perform secure boot each time we are 1272 * Dummy GM200 implementation: perform secure boot each time we are
1239 * called on FECS. Since only FECS and GPCCS are managed and started 1273 * called on FECS. Since only FECS and GPCCS are managed and started
@@ -1373,7 +1407,6 @@ gm200_secboot = {
1373 .dtor = gm200_secboot_dtor, 1407 .dtor = gm200_secboot_dtor,
1374 .init = gm200_secboot_init, 1408 .init = gm200_secboot_init,
1375 .fini = gm200_secboot_fini, 1409 .fini = gm200_secboot_fini,
1376 .prepare_blobs = gm200_secboot_prepare_blobs,
1377 .reset = gm200_secboot_reset, 1410 .reset = gm200_secboot_reset,
1378 .start = gm200_secboot_start, 1411 .start = gm200_secboot_start,
1379 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) | 1412 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) |
@@ -1415,6 +1448,7 @@ gm200_secboot_func = {
1415 .bl_desc_size = sizeof(struct gm200_flcn_bl_desc), 1448 .bl_desc_size = sizeof(struct gm200_flcn_bl_desc),
1416 .fixup_bl_desc = gm200_secboot_fixup_bl_desc, 1449 .fixup_bl_desc = gm200_secboot_fixup_bl_desc,
1417 .fixup_hs_desc = gm200_secboot_fixup_hs_desc, 1450 .fixup_hs_desc = gm200_secboot_fixup_hs_desc,
1451 .prepare_blobs = gm200_secboot_prepare_blobs,
1418}; 1452};
1419 1453
1420int 1454int
@@ -1487,3 +1521,19 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin");
1487MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); 1521MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin");
1488MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); 1522MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin");
1489MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); 1523MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin");
1524
1525MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin");
1526MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin");
1527MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin");
1528MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin");
1529MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin");
1530MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin");
1531MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin");
1532MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin");
1533MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin");
1534MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin");
1535MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin");
1536MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin");
1537MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin");
1538MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin");
1539MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin");
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
index 684320484b70..d5395ebfe8d3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c
@@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc {
42 u32 data_size; 42 u32 data_size;
43}; 43};
44 44
45static int
46gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb)
47{
48 struct nvkm_subdev *subdev = &gsb->base.subdev;
49 int acr_size;
50 int ret;
51
52 ret = gm20x_secboot_prepare_blobs(gsb);
53 if (ret)
54 return ret;
55
56 acr_size = gsb->acr_load_blob->size;
57 /*
58 * On Tegra the WPR region is set by the bootloader. It is illegal for
59 * the HS blob to be larger than this region.
60 */
61 if (acr_size > gsb->wpr_size) {
62 nvkm_error(subdev, "WPR region too small for FW blob!\n");
63 nvkm_error(subdev, "required: %dB\n", acr_size);
64 nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size);
65 return -ENOSPC;
66 }
67
68 return 0;
69}
70
45/** 71/**
46 * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW 72 * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW
47 * 73 *
@@ -88,6 +114,7 @@ gm20b_secboot_func = {
88 .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc), 114 .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc),
89 .fixup_bl_desc = gm20b_secboot_fixup_bl_desc, 115 .fixup_bl_desc = gm20b_secboot_fixup_bl_desc,
90 .fixup_hs_desc = gm20b_secboot_fixup_hs_desc, 116 .fixup_hs_desc = gm20b_secboot_fixup_hs_desc,
117 .prepare_blobs = gm20b_secboot_prepare_blobs,
91}; 118};
92 119
93 120
@@ -147,32 +174,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb)
147#endif 174#endif
148 175
149static int 176static int
150gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb)
151{
152 struct gm200_secboot *gsb = gm200_secboot(sb);
153 int acr_size;
154 int ret;
155
156 ret = gm20x_secboot_prepare_blobs(gsb);
157 if (ret)
158 return ret;
159
160 acr_size = gsb->acr_load_blob->size;
161 /*
162 * On Tegra the WPR region is set by the bootloader. It is illegal for
163 * the HS blob to be larger than this region.
164 */
165 if (acr_size > gsb->wpr_size) {
166 nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n");
167 nvkm_error(&sb->subdev, "required: %dB\n", acr_size);
168 nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size);
169 return -ENOSPC;
170 }
171
172 return 0;
173}
174
175static int
176gm20b_secboot_init(struct nvkm_secboot *sb) 177gm20b_secboot_init(struct nvkm_secboot *sb)
177{ 178{
178 struct gm200_secboot *gsb = gm200_secboot(sb); 179 struct gm200_secboot *gsb = gm200_secboot(sb);
@@ -189,7 +190,6 @@ static const struct nvkm_secboot_func
189gm20b_secboot = { 190gm20b_secboot = {
190 .dtor = gm200_secboot_dtor, 191 .dtor = gm200_secboot_dtor,
191 .init = gm20b_secboot_init, 192 .init = gm20b_secboot_init,
192 .prepare_blobs = gm20b_secboot_prepare_blobs,
193 .reset = gm200_secboot_reset, 193 .reset = gm200_secboot_reset,
194 .start = gm200_secboot_start, 194 .start = gm200_secboot_start,
195 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS), 195 .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS),
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
index f2b09dee7c5d..a9a8a0e1017e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h
@@ -30,7 +30,6 @@ struct nvkm_secboot_func {
30 int (*init)(struct nvkm_secboot *); 30 int (*init)(struct nvkm_secboot *);
31 int (*fini)(struct nvkm_secboot *, bool suspend); 31 int (*fini)(struct nvkm_secboot *, bool suspend);
32 void *(*dtor)(struct nvkm_secboot *); 32 void *(*dtor)(struct nvkm_secboot *);
33 int (*prepare_blobs)(struct nvkm_secboot *);
34 int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon); 33 int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
35 int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon); 34 int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon);
36 35
@@ -147,10 +146,8 @@ struct hsflcn_acr_desc {
147 * @inst: instance block for HS falcon 146 * @inst: instance block for HS falcon
148 * @pgd: page directory for the HS falcon 147 * @pgd: page directory for the HS falcon
149 * @vm: address space used by the HS falcon 148 * @vm: address space used by the HS falcon
150 * @bl_desc_size: size of the BL descriptor used by this chip. 149 * @falcon_state: current state of the managed falcons
151 * @fixup_bl_desc: hook that generates the proper BL descriptor format from 150 * @firmware_ok: whether the firmware blobs have been created
152 * the generic GM200 format into a data array of size
153 * bl_desc_size
154 */ 151 */
155struct gm200_secboot { 152struct gm200_secboot {
156 struct nvkm_secboot base; 153 struct nvkm_secboot base;
@@ -196,9 +193,19 @@ struct gm200_secboot {
196 RUNNING, 193 RUNNING,
197 } falcon_state[NVKM_SECBOOT_FALCON_END]; 194 } falcon_state[NVKM_SECBOOT_FALCON_END];
198 195
196 bool firmware_ok;
199}; 197};
200#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) 198#define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base)
201 199
200/**
201 * Contains functions we wish to abstract between GM200-like implementations
202 * @bl_desc_size: size of the BL descriptor used by this chip.
203 * @fixup_bl_desc: hook that generates the proper BL descriptor format from
204 * the generic GM200 format into a data array of size
205 * bl_desc_size
206 * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used
207 * @prepare_blobs: prepares the various blobs needed for secure booting
208 */
202struct gm200_secboot_func { 209struct gm200_secboot_func {
203 /* 210 /*
204 * Size of the bootloader descriptor for this chip. A block of this 211 * Size of the bootloader descriptor for this chip. A block of this
@@ -214,6 +221,7 @@ struct gm200_secboot_func {
214 * we want the HS FW to set up. 221 * we want the HS FW to set up.
215 */ 222 */
216 void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *); 223 void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *);
224 int (*prepare_blobs)(struct gm200_secboot *);
217}; 225};
218 226
219int gm200_secboot_init(struct nvkm_secboot *); 227int gm200_secboot_init(struct nvkm_secboot *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
index a1b264664aad..fe063d5728e2 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c
@@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top)
41} 41}
42 42
43u32 43u32
44nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index) 44nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index)
45{ 45{
46 struct nvkm_top *top = device->top;
46 struct nvkm_top_device *info; 47 struct nvkm_top_device *info;
47 48
48 if (top) { 49 if (top) {
@@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index)
56} 57}
57 58
58u32 59u32
59nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs) 60nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx)
60{ 61{
62 struct nvkm_top *top = device->top;
63 struct nvkm_top_device *info;
64
65 if (top) {
66 list_for_each_entry(info, &top->device, head) {
67 if (info->index == devidx && info->intr >= 0)
68 return BIT(info->intr);
69 }
70 }
71
72 return 0;
73}
74
75u32
76nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs)
77{
78 struct nvkm_top *top = device->top;
61 struct nvkm_top_device *info; 79 struct nvkm_top_device *info;
62 u64 subdevs = 0; 80 u64 subdevs = 0;
63 u32 handled = 0; 81 u32 handled = 0;
@@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs)
78} 96}
79 97
80enum nvkm_devidx 98enum nvkm_devidx
81nvkm_top_fault(struct nvkm_top *top, int fault) 99nvkm_top_fault(struct nvkm_device *device, int fault)
82{ 100{
101 struct nvkm_top *top = device->top;
83 struct nvkm_top_device *info; 102 struct nvkm_top_device *info;
84 103
85 list_for_each_entry(info, &top->device, head) { 104 list_for_each_entry(info, &top->device, head) {
@@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault)
91} 110}
92 111
93enum nvkm_devidx 112enum nvkm_devidx
94nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn) 113nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn)
95{ 114{
115 struct nvkm_top *top = device->top;
96 struct nvkm_top_device *info; 116 struct nvkm_top_device *info;
97 int n = 0; 117 int n = 0;
98 118
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
index e06acc340e99..efac3402f9dd 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c
@@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top)
29 struct nvkm_subdev *subdev = &top->subdev; 29 struct nvkm_subdev *subdev = &top->subdev;
30 struct nvkm_device *device = subdev->device; 30 struct nvkm_device *device = subdev->device;
31 struct nvkm_top_device *info = NULL; 31 struct nvkm_top_device *info = NULL;
32 u32 data, type; 32 u32 data, type, inst;
33 int i; 33 int i;
34 34
35 for (i = 0; i < 64; i++) { 35 for (i = 0; i < 64; i++) {
@@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top)
37 if (!(info = nvkm_top_device_new(top))) 37 if (!(info = nvkm_top_device_new(top)))
38 return -ENOMEM; 38 return -ENOMEM;
39 type = ~0; 39 type = ~0;
40 inst = 0;
40 } 41 }
41 42
42 data = nvkm_rd32(device, 0x022700 + (i * 0x04)); 43 data = nvkm_rd32(device, 0x022700 + (i * 0x04));
@@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top)
45 case 0x00000000: /* NOT_VALID */ 46 case 0x00000000: /* NOT_VALID */
46 continue; 47 continue;
47 case 0x00000001: /* DATA */ 48 case 0x00000001: /* DATA */
49 inst = (data & 0x3c000000) >> 26;
48 info->addr = (data & 0x00fff000); 50 info->addr = (data & 0x00fff000);
49 info->fault = (data & 0x000000f8) >> 3; 51 info->fault = (data & 0x000000f8) >> 3;
50 break; 52 break;
@@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top)
67 continue; 69 continue;
68 70
69 /* Translate engine type to NVKM engine identifier. */ 71 /* Translate engine type to NVKM engine identifier. */
72#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A
73#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \
74 info->index = NVKM_ENGINE_##A##0 + inst
70 switch (type) { 75 switch (type) {
71 case 0x00000000: info->index = NVKM_ENGINE_GR; break; 76 case 0x00000000: A_(GR ); break;
72 case 0x00000001: info->index = NVKM_ENGINE_CE0; break; 77 case 0x00000001: A_(CE0 ); break;
73 case 0x00000002: info->index = NVKM_ENGINE_CE1; break; 78 case 0x00000002: A_(CE1 ); break;
74 case 0x00000003: info->index = NVKM_ENGINE_CE2; break; 79 case 0x00000003: A_(CE2 ); break;
75 case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break; 80 case 0x00000008: A_(MSPDEC); break;
76 case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break; 81 case 0x00000009: A_(MSPPP ); break;
77 case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break; 82 case 0x0000000a: A_(MSVLD ); break;
78 case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break; 83 case 0x0000000b: A_(MSENC ); break;
79 case 0x0000000c: info->index = NVKM_ENGINE_VIC; break; 84 case 0x0000000c: A_(VIC ); break;
80 case 0x0000000d: info->index = NVKM_ENGINE_SEC; break; 85 case 0x0000000d: A_(SEC ); break;
81 case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break; 86 case 0x0000000e: B_(NVENC ); break;
82 case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break; 87 case 0x0000000f: A_(NVENC1); break;
83 case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break; 88 case 0x00000010: A_(NVDEC ); break;
89 case 0x00000013: B_(CE ); break;
84 break; 90 break;
85 default: 91 default:
86 break; 92 break;
87 } 93 }
88 94
89 nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d " 95 nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d "
90 "runlist %2d intr %2d reset %2d\n", type, 96 "engine %2d runlist %2d intr %2d "
97 "reset %2d\n", type, inst,
91 info->index == NVKM_SUBDEV_NR ? NULL : 98 info->index == NVKM_SUBDEV_NR ? NULL :
92 nvkm_subdev_name[info->index], 99 nvkm_subdev_name[info->index],
93 info->addr, info->fault, info->engine, info->runlist, 100 info->addr, info->fault, info->engine, info->runlist,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
index 6b2d7531a7ff..1c3d23b0e84a 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c
@@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
120 120
121 data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info); 121 data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
122 if (data && info.vidmask && info.base && info.step) { 122 if (data && info.vidmask && info.base && info.step) {
123 volt->min_uv = info.min;
124 volt->max_uv = info.max;
123 for (i = 0; i < info.vidmask + 1; i++) { 125 for (i = 0; i < info.vidmask + 1; i++) {
124 if (info.base >= info.min && 126 if (info.base >= info.min &&
125 info.base <= info.max) { 127 info.base <= info.max) {
@@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
131 } 133 }
132 volt->vid_mask = info.vidmask; 134 volt->vid_mask = info.vidmask;
133 } else if (data && info.vidmask) { 135 } else if (data && info.vidmask) {
136 volt->min_uv = 0xffffffff;
137 volt->max_uv = 0;
134 for (i = 0; i < cnt; i++) { 138 for (i = 0; i < cnt; i++) {
135 data = nvbios_volt_entry_parse(bios, i, &ver, &hdr, 139 data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
136 &ivid); 140 &ivid);
@@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt)
138 volt->vid[volt->vid_nr].uv = ivid.voltage; 142 volt->vid[volt->vid_nr].uv = ivid.voltage;
139 volt->vid[volt->vid_nr].vid = ivid.vid; 143 volt->vid[volt->vid_nr].vid = ivid.vid;
140 volt->vid_nr++; 144 volt->vid_nr++;
145 volt->min_uv = min(volt->min_uv, ivid.voltage);
146 volt->max_uv = max(volt->max_uv, ivid.voltage);
141 } 147 }
142 } 148 }
143 volt->vid_mask = info.vidmask; 149 volt->vid_mask = info.vidmask;
150 } else if (data && info.type == NVBIOS_VOLT_PWM) {
151 volt->min_uv = info.base;
152 volt->max_uv = info.base + info.pwm_range;
144 } 153 }
145} 154}
146 155
@@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device,
181 volt->func = func; 190 volt->func = func;
182 191
183 /* Assuming the non-bios device should build the voltage table later */ 192 /* Assuming the non-bios device should build the voltage table later */
184 if (bios) 193 if (bios) {
185 nvkm_volt_parse_bios(bios, volt); 194 nvkm_volt_parse_bios(bios, volt);
195 nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n",
196 volt->min_uv, volt->max_uv);
197 }
186 198
187 if (volt->vid_nr) { 199 if (volt->vid_nr) {
188 for (i = 0; i < volt->vid_nr; i++) { 200 for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
index d554455326da..ce5d83cdc7cf 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c
@@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
77 return mv; 77 return mv;
78} 78}
79 79
80int 80static int
81gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo) 81gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
82{ 82{
83 static const int v_scale = 1000;
83 int mv; 84 int mv;
84 85
85 mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef); 86 mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
86 mv = DIV_ROUND_UP(mv, 1000); 87 mv = DIV_ROUND_UP(mv, v_scale);
87 88
88 return mv * 1000; 89 return mv * 1000;
89} 90}
90 91
91int 92static int
92gk20a_volt_vid_get(struct nvkm_volt *base) 93gk20a_volt_vid_get(struct nvkm_volt *base)
93{ 94{
94 struct gk20a_volt *volt = gk20a_volt(base); 95 struct gk20a_volt *volt = gk20a_volt(base);
@@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base)
103 return -EINVAL; 104 return -EINVAL;
104} 105}
105 106
106int 107static int
107gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid) 108gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
108{ 109{
109 struct gk20a_volt *volt = gk20a_volt(base); 110 struct gk20a_volt *volt = gk20a_volt(base);
@@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid)
113 return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000); 114 return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000);
114} 115}
115 116
116int 117static int
117gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition) 118gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition)
118{ 119{
119 struct gk20a_volt *volt = gk20a_volt(base); 120 struct gk20a_volt *volt = gk20a_volt(base);
@@ -143,9 +144,9 @@ gk20a_volt = {
143}; 144};
144 145
145int 146int
146_gk20a_volt_ctor(struct nvkm_device *device, int index, 147gk20a_volt_ctor(struct nvkm_device *device, int index,
147 const struct cvb_coef *coefs, int nb_coefs, 148 const struct cvb_coef *coefs, int nb_coefs,
148 struct gk20a_volt *volt) 149 int vmin, struct gk20a_volt *volt)
149{ 150{
150 struct nvkm_device_tegra *tdev = device->func->tegra(device); 151 struct nvkm_device_tegra *tdev = device->func->tegra(device);
151 int i, uv; 152 int i, uv;
@@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index,
160 volt->base.vid_nr = nb_coefs; 161 volt->base.vid_nr = nb_coefs;
161 for (i = 0; i < volt->base.vid_nr; i++) { 162 for (i = 0; i < volt->base.vid_nr; i++) {
162 volt->base.vid[i].vid = i; 163 volt->base.vid[i].vid = i;
163 volt->base.vid[i].uv = 164 volt->base.vid[i].uv = max(
164 gk20a_volt_calc_voltage(&coefs[i], 165 gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo),
165 tdev->gpu_speedo); 166 vmin);
166 nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i, 167 nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i,
167 volt->base.vid[i].vid, volt->base.vid[i].uv); 168 volt->base.vid[i].vid, volt->base.vid[i].uv);
168 } 169 }
@@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
180 return -ENOMEM; 181 return -ENOMEM;
181 *pvolt = &volt->base; 182 *pvolt = &volt->base;
182 183
183 return _gk20a_volt_ctor(device, index, gk20a_cvb_coef, 184 return gk20a_volt_ctor(device, index, gk20a_cvb_coef,
184 ARRAY_SIZE(gk20a_cvb_coef), volt); 185 ARRAY_SIZE(gk20a_cvb_coef), 0, volt);
185} 186}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
index 0fa3b502bcf8..6a6c97f9684e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h
@@ -37,13 +37,8 @@ struct gk20a_volt {
37 struct regulator *vdd; 37 struct regulator *vdd;
38}; 38};
39 39
40int _gk20a_volt_ctor(struct nvkm_device *device, int index, 40int gk20a_volt_ctor(struct nvkm_device *device, int index,
41 const struct cvb_coef *coefs, int nb_coefs, 41 const struct cvb_coef *coefs, int nb_coefs,
42 struct gk20a_volt *volt); 42 int vmin, struct gk20a_volt *volt);
43
44int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo);
45int gk20a_volt_vid_get(struct nvkm_volt *volt);
46int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid);
47int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition);
48 43
49#endif 44#endif
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
index 49b5ecb701e4..74db4d28930f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c
@@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = {
41 /* 921600 */ { 2647676, -106455, 1632 }, 41 /* 921600 */ { 2647676, -106455, 1632 },
42}; 42};
43 43
44static const struct cvb_coef gm20b_na_cvb_coef[] = {
45 /* KHz, c0, c1, c2, c3, c4, c5 */
46 /* 76800 */ { 814294, 8144, -940, 808, -21583, 226 },
47 /* 153600 */ { 856185, 8144, -940, 808, -21583, 226 },
48 /* 230400 */ { 898077, 8144, -940, 808, -21583, 226 },
49 /* 307200 */ { 939968, 8144, -940, 808, -21583, 226 },
50 /* 384000 */ { 981860, 8144, -940, 808, -21583, 226 },
51 /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 },
52 /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 },
53 /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 },
54 /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 },
55 /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 },
56 /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 },
57 /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 },
58 /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 },
59};
60
61const u32 speedo_to_vmin[] = {
62 /* 0, 1, 2, 3, 4, */
63 950000, 840000, 818750, 840000, 810000,
64};
65
44int 66int
45gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) 67gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt)
46{ 68{
69 struct nvkm_device_tegra *tdev = device->func->tegra(device);
47 struct gk20a_volt *volt; 70 struct gk20a_volt *volt;
71 u32 vmin;
72
73 if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) {
74 nvdev_error(device, "unsupported speedo %d\n",
75 tdev->gpu_speedo_id);
76 return -EINVAL;
77 }
48 78
49 volt = kzalloc(sizeof(*volt), GFP_KERNEL); 79 volt = kzalloc(sizeof(*volt), GFP_KERNEL);
50 if (!volt) 80 if (!volt)
51 return -ENOMEM; 81 return -ENOMEM;
52 *pvolt = &volt->base; 82 *pvolt = &volt->base;
53 83
54 return _gk20a_volt_ctor(device, index, gm20b_cvb_coef, 84 vmin = speedo_to_vmin[tdev->gpu_speedo_id];
55 ARRAY_SIZE(gm20b_cvb_coef), volt); 85
86 if (tdev->gpu_speedo_id >= 1)
87 return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef,
88 ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt);
89 else
90 return gk20a_volt_ctor(device, index, gm20b_cvb_coef,
91 ARRAY_SIZE(gm20b_cvb_coef), vmin, volt);
56} 92}
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 336ad4de9981..556f81f6b2c7 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -4,11 +4,6 @@ config DRM_OMAP
4 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM 4 depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
5 select OMAP2_DSS 5 select OMAP2_DSS
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_KMS_FB_HELPER
8 select FB_SYS_FILLRECT
9 select FB_SYS_COPYAREA
10 select FB_SYS_IMAGEBLIT
11 select FB_SYS_FOPS
12 default n 7 default n
13 help 8 help
14 DRM display driver for OMAP2/3/4 based boards. 9 DRM display driver for OMAP2/3/4 based boards.
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index 2a618afe0f53..c226da145fb3 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,80 +1,80 @@
1menu "OMAPDRM External Display Device Drivers" 1menu "OMAPDRM External Display Device Drivers"
2 2
3config DISPLAY_ENCODER_OPA362 3config DRM_OMAP_ENCODER_OPA362
4 tristate "OPA362 external analog amplifier" 4 tristate "OPA362 external analog amplifier"
5 help 5 help
6 Driver for OPA362 external analog TV amplifier controlled 6 Driver for OPA362 external analog TV amplifier controlled
7 through a GPIO. 7 through a GPIO.
8 8
9config DISPLAY_ENCODER_TFP410 9config DRM_OMAP_ENCODER_TFP410
10 tristate "TFP410 DPI to DVI Encoder" 10 tristate "TFP410 DPI to DVI Encoder"
11 help 11 help
12 Driver for TFP410 DPI to DVI encoder. 12 Driver for TFP410 DPI to DVI encoder.
13 13
14config DISPLAY_ENCODER_TPD12S015 14config DRM_OMAP_ENCODER_TPD12S015
15 tristate "TPD12S015 HDMI ESD protection and level shifter" 15 tristate "TPD12S015 HDMI ESD protection and level shifter"
16 help 16 help
17 Driver for TPD12S015, which offers HDMI ESD protection and level 17 Driver for TPD12S015, which offers HDMI ESD protection and level
18 shifting. 18 shifting.
19 19
20config DISPLAY_CONNECTOR_DVI 20config DRM_OMAP_CONNECTOR_DVI
21 tristate "DVI Connector" 21 tristate "DVI Connector"
22 depends on I2C 22 depends on I2C
23 help 23 help
24 Driver for a generic DVI connector. 24 Driver for a generic DVI connector.
25 25
26config DISPLAY_CONNECTOR_HDMI 26config DRM_OMAP_CONNECTOR_HDMI
27 tristate "HDMI Connector" 27 tristate "HDMI Connector"
28 help 28 help
29 Driver for a generic HDMI connector. 29 Driver for a generic HDMI connector.
30 30
31config DISPLAY_CONNECTOR_ANALOG_TV 31config DRM_OMAP_CONNECTOR_ANALOG_TV
32 tristate "Analog TV Connector" 32 tristate "Analog TV Connector"
33 help 33 help
34 Driver for a generic analog TV connector. 34 Driver for a generic analog TV connector.
35 35
36config DISPLAY_PANEL_DPI 36config DRM_OMAP_PANEL_DPI
37 tristate "Generic DPI panel" 37 tristate "Generic DPI panel"
38 help 38 help
39 Driver for generic DPI panels. 39 Driver for generic DPI panels.
40 40
41config DISPLAY_PANEL_DSI_CM 41config DRM_OMAP_PANEL_DSI_CM
42 tristate "Generic DSI Command Mode Panel" 42 tristate "Generic DSI Command Mode Panel"
43 depends on BACKLIGHT_CLASS_DEVICE 43 depends on BACKLIGHT_CLASS_DEVICE
44 help 44 help
45 Driver for generic DSI command mode panels. 45 Driver for generic DSI command mode panels.
46 46
47config DISPLAY_PANEL_SONY_ACX565AKM 47config DRM_OMAP_PANEL_SONY_ACX565AKM
48 tristate "ACX565AKM Panel" 48 tristate "ACX565AKM Panel"
49 depends on SPI && BACKLIGHT_CLASS_DEVICE 49 depends on SPI && BACKLIGHT_CLASS_DEVICE
50 help 50 help
51 This is the LCD panel used on Nokia N900 51 This is the LCD panel used on Nokia N900
52 52
53config DISPLAY_PANEL_LGPHILIPS_LB035Q02 53config DRM_OMAP_PANEL_LGPHILIPS_LB035Q02
54 tristate "LG.Philips LB035Q02 LCD Panel" 54 tristate "LG.Philips LB035Q02 LCD Panel"
55 depends on SPI 55 depends on SPI
56 help 56 help
57 LCD Panel used on the Gumstix Overo Palo35 57 LCD Panel used on the Gumstix Overo Palo35
58 58
59config DISPLAY_PANEL_SHARP_LS037V7DW01 59config DRM_OMAP_PANEL_SHARP_LS037V7DW01
60 tristate "Sharp LS037V7DW01 LCD Panel" 60 tristate "Sharp LS037V7DW01 LCD Panel"
61 depends on BACKLIGHT_CLASS_DEVICE 61 depends on BACKLIGHT_CLASS_DEVICE
62 help 62 help
63 LCD Panel used in TI's SDP3430 and EVM boards 63 LCD Panel used in TI's SDP3430 and EVM boards
64 64
65config DISPLAY_PANEL_TPO_TD028TTEC1 65config DRM_OMAP_PANEL_TPO_TD028TTEC1
66 tristate "TPO TD028TTEC1 LCD Panel" 66 tristate "TPO TD028TTEC1 LCD Panel"
67 depends on SPI 67 depends on SPI
68 help 68 help
69 LCD panel used in Openmoko. 69 LCD panel used in Openmoko.
70 70
71config DISPLAY_PANEL_TPO_TD043MTEA1 71config DRM_OMAP_PANEL_TPO_TD043MTEA1
72 tristate "TPO TD043MTEA1 LCD Panel" 72 tristate "TPO TD043MTEA1 LCD Panel"
73 depends on SPI 73 depends on SPI
74 help 74 help
75 LCD Panel used in OMAP3 Pandora 75 LCD Panel used in OMAP3 Pandora
76 76
77config DISPLAY_PANEL_NEC_NL8048HL11 77config DRM_OMAP_PANEL_NEC_NL8048HL11
78 tristate "NEC NL8048HL11 Panel" 78 tristate "NEC NL8048HL11 Panel"
79 depends on SPI 79 depends on SPI
80 depends on BACKLIGHT_CLASS_DEVICE 80 depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index 9aa176bfbf2e..46baafb1a83e 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,14 +1,14 @@
1obj-$(CONFIG_DISPLAY_ENCODER_OPA362) += encoder-opa362.o 1obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
2obj-$(CONFIG_DISPLAY_ENCODER_TFP410) += encoder-tfp410.o 2obj-$(CONFIG_DRM_OMAP_ENCODER_TFP410) += encoder-tfp410.o
3obj-$(CONFIG_DISPLAY_ENCODER_TPD12S015) += encoder-tpd12s015.o 3obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
4obj-$(CONFIG_DISPLAY_CONNECTOR_DVI) += connector-dvi.o 4obj-$(CONFIG_DRM_OMAP_CONNECTOR_DVI) += connector-dvi.o
5obj-$(CONFIG_DISPLAY_CONNECTOR_HDMI) += connector-hdmi.o 5obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
6obj-$(CONFIG_DISPLAY_CONNECTOR_ANALOG_TV) += connector-analog-tv.o 6obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
7obj-$(CONFIG_DISPLAY_PANEL_DPI) += panel-dpi.o 7obj-$(CONFIG_DRM_OMAP_PANEL_DPI) += panel-dpi.o
8obj-$(CONFIG_DISPLAY_PANEL_DSI_CM) += panel-dsi-cm.o 8obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
9obj-$(CONFIG_DISPLAY_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o 9obj-$(CONFIG_DRM_OMAP_PANEL_SONY_ACX565AKM) += panel-sony-acx565akm.o
10obj-$(CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o 10obj-$(CONFIG_DRM_OMAP_PANEL_LGPHILIPS_LB035Q02) += panel-lgphilips-lb035q02.o
11obj-$(CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o 11obj-$(CONFIG_DRM_OMAP_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
12obj-$(CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o 12obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD028TTEC1) += panel-tpo-td028ttec1.o
13obj-$(CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o 13obj-$(CONFIG_DRM_OMAP_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
14obj-$(CONFIG_DISPLAY_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o 14obj-$(CONFIG_DRM_OMAP_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 8511c648a15c..3485d1ecd655 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -14,9 +14,10 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of.h> 15#include <linux/of.h>
16 16
17#include <video/omapdss.h>
18#include <video/omap-panel-data.h> 17#include <video/omap-panel-data.h>
19 18
19#include "../dss/omapdss.h"
20
20struct panel_drv_data { 21struct panel_drv_data {
21 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
22 struct omap_dss_device *in; 23 struct omap_dss_device *in;
@@ -25,7 +26,6 @@ struct panel_drv_data {
25 26
26 struct omap_video_timings timings; 27 struct omap_video_timings timings;
27 28
28 enum omap_dss_venc_type connector_type;
29 bool invert_polarity; 29 bool invert_polarity;
30}; 30};
31 31
@@ -45,10 +45,6 @@ static const struct omap_video_timings tvc_pal_timings = {
45 45
46static const struct of_device_id tvc_of_match[]; 46static const struct of_device_id tvc_of_match[];
47 47
48struct tvc_of_data {
49 enum omap_dss_venc_type connector_type;
50};
51
52#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 48#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
53 49
54static int tvc_connect(struct omap_dss_device *dssdev) 50static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +95,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
99 in->ops.atv->set_timings(in, &ddata->timings); 95 in->ops.atv->set_timings(in, &ddata->timings);
100 96
101 if (!ddata->dev->of_node) { 97 if (!ddata->dev->of_node) {
102 in->ops.atv->set_type(in, ddata->connector_type); 98 in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
103 99
104 in->ops.atv->invert_vid_out_polarity(in, 100 in->ops.atv->invert_vid_out_polarity(in,
105 ddata->invert_polarity); 101 ddata->invert_polarity);
@@ -207,7 +203,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
207 203
208 ddata->in = in; 204 ddata->in = in;
209 205
210 ddata->connector_type = pdata->connector_type;
211 ddata->invert_polarity = pdata->invert_polarity; 206 ddata->invert_polarity = pdata->invert_polarity;
212 207
213 dssdev = &ddata->dssdev; 208 dssdev = &ddata->dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 747f26a55e43..684b7aeda411 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -15,10 +15,10 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16 16
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18
19#include <video/omapdss.h>
20#include <video/omap-panel-data.h> 18#include <video/omap-panel-data.h>
21 19
20#include "../dss/omapdss.h"
21
22static const struct omap_video_timings dvic_default_timings = { 22static const struct omap_video_timings dvic_default_timings = {
23 .x_res = 640, 23 .x_res = 640,
24 .y_res = 480, 24 .y_res = 480,
@@ -255,6 +255,7 @@ static int dvic_probe_of(struct platform_device *pdev)
255 adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); 255 adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0);
256 if (adapter_node) { 256 if (adapter_node) {
257 adapter = of_get_i2c_adapter_by_node(adapter_node); 257 adapter = of_get_i2c_adapter_by_node(adapter_node);
258 of_node_put(adapter_node);
258 if (adapter == NULL) { 259 if (adapter == NULL) {
259 dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); 260 dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n");
260 omap_dss_put_device(ddata->in); 261 omap_dss_put_device(ddata->in);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index 667ca4a24ece..7bdf83af9797 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -17,10 +17,10 @@
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18 18
19#include <drm/drm_edid.h> 19#include <drm/drm_edid.h>
20
21#include <video/omapdss.h>
22#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
23 21
22#include "../dss/omapdss.h"
23
24static const struct omap_video_timings hdmic_default_timings = { 24static const struct omap_video_timings hdmic_default_timings = {
25 .x_res = 640, 25 .x_res = 640,
26 .y_res = 480, 26 .y_res = 480,
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index 9594ff7a2b0c..fe4e7ec3bab0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -18,9 +18,8 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_gpio.h>
22 21
23#include <video/omapdss.h> 22#include "../dss/omapdss.h"
24 23
25struct panel_drv_data { 24struct panel_drv_data {
26 struct omap_dss_device dssdev; 25 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index 671806ca7d6a..d768217cefe0 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
17 17
18#include <video/omapdss.h> 18#include "../dss/omapdss.h"
19#include <video/omap-panel-data.h>
20 19
21struct panel_drv_data { 20struct panel_drv_data {
22 struct omap_dss_device dssdev; 21 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index 916a89978387..46855c8f5cbf 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/gpio/consumer.h> 17#include <linux/gpio/consumer.h>
18 18
19#include <video/omapdss.h> 19#include "../dss/omapdss.h"
20#include <video/omap-panel-data.h>
21 20
22struct panel_drv_data { 21struct panel_drv_data {
23 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 7c2331be8d15..7f16f985ab22 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -15,11 +15,13 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18#include <linux/regulator/consumer.h>
18 19
19#include <video/omapdss.h>
20#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
21#include <video/of_display_timing.h> 21#include <video/of_display_timing.h>
22 22
23#include "../dss/omapdss.h"
24
23struct panel_drv_data { 25struct panel_drv_data {
24 struct omap_dss_device dssdev; 26 struct omap_dss_device dssdev;
25 struct omap_dss_device *in; 27 struct omap_dss_device *in;
@@ -32,6 +34,7 @@ struct panel_drv_data {
32 int backlight_gpio; 34 int backlight_gpio;
33 35
34 struct gpio_desc *enable_gpio; 36 struct gpio_desc *enable_gpio;
37 struct regulator *vcc_supply;
35}; 38};
36 39
37#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev) 40#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -83,6 +86,12 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
83 if (r) 86 if (r)
84 return r; 87 return r;
85 88
89 r = regulator_enable(ddata->vcc_supply);
90 if (r) {
91 in->ops.dpi->disable(in);
92 return r;
93 }
94
86 gpiod_set_value_cansleep(ddata->enable_gpio, 1); 95 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
87 96
88 if (gpio_is_valid(ddata->backlight_gpio)) 97 if (gpio_is_valid(ddata->backlight_gpio))
@@ -105,6 +114,7 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
105 gpio_set_value_cansleep(ddata->backlight_gpio, 0); 114 gpio_set_value_cansleep(ddata->backlight_gpio, 0);
106 115
107 gpiod_set_value_cansleep(ddata->enable_gpio, 0); 116 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
117 regulator_disable(ddata->vcc_supply);
108 118
109 in->ops.dpi->disable(in); 119 in->ops.dpi->disable(in);
110 120
@@ -213,6 +223,20 @@ static int panel_dpi_probe_of(struct platform_device *pdev)
213 223
214 ddata->enable_gpio = gpio; 224 ddata->enable_gpio = gpio;
215 225
226 /*
227 * Many different panels are supported by this driver and there are
228 * probably very different needs for their reset pins in regards to
229 * timing and order relative to the enable gpio. So for now it's just
230 * ensured that the reset line isn't active.
231 */
232 gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
233 if (IS_ERR(gpio))
234 return PTR_ERR(gpio);
235
236 ddata->vcc_supply = devm_regulator_get(&pdev->dev, "vcc");
237 if (IS_ERR(ddata->vcc_supply))
238 return PTR_ERR(ddata->vcc_supply);
239
216 ddata->backlight_gpio = -ENOENT; 240 ddata->backlight_gpio = -ENOENT;
217 241
218 r = of_get_display_timing(node, "panel-timing", &timing); 242 r = of_get_display_timing(node, "panel-timing", &timing);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 2b118071b5a1..0eae8afaed90 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -25,10 +25,10 @@
25#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27 27
28#include <video/omapdss.h>
29#include <video/omap-panel-data.h>
30#include <video/mipi_display.h> 28#include <video/mipi_display.h>
31 29
30#include "../dss/omapdss.h"
31
32/* DSI Virtual channel. Hardcoded for now. */ 32/* DSI Virtual channel. Hardcoded for now. */
33#define TCH 0 33#define TCH 0
34 34
@@ -1284,8 +1284,7 @@ static int dsicm_probe(struct platform_device *pdev)
1284 return 0; 1284 return 0;
1285 1285
1286err_sysfs_create: 1286err_sysfs_create:
1287 if (bldev != NULL) 1287 backlight_device_unregister(bldev);
1288 backlight_device_unregister(bldev);
1289err_bl: 1288err_bl:
1290 destroy_workqueue(ddata->workqueue); 1289 destroy_workqueue(ddata->workqueue);
1291err_reg: 1290err_reg:
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index ac680e1de603..6dfb96cea293 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -17,8 +17,7 @@
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18#include <linux/gpio/consumer.h> 18#include <linux/gpio/consumer.h>
19 19
20#include <video/omapdss.h> 20#include "../dss/omapdss.h"
21#include <video/omap-panel-data.h>
22 21
23static struct omap_video_timings lb035q02_timings = { 22static struct omap_video_timings lb035q02_timings = {
24 .x_res = 320, 23 .x_res = 320,
@@ -51,9 +50,6 @@ struct panel_drv_data {
51 50
52 struct omap_video_timings videomode; 51 struct omap_video_timings videomode;
53 52
54 /* used for non-DT boot, to be removed */
55 int backlight_gpio;
56
57 struct gpio_desc *enable_gpio; 53 struct gpio_desc *enable_gpio;
58}; 54};
59 55
@@ -171,9 +167,6 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
171 if (ddata->enable_gpio) 167 if (ddata->enable_gpio)
172 gpiod_set_value_cansleep(ddata->enable_gpio, 1); 168 gpiod_set_value_cansleep(ddata->enable_gpio, 1);
173 169
174 if (gpio_is_valid(ddata->backlight_gpio))
175 gpio_set_value_cansleep(ddata->backlight_gpio, 1);
176
177 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 170 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
178 171
179 return 0; 172 return 0;
@@ -190,9 +183,6 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
190 if (ddata->enable_gpio) 183 if (ddata->enable_gpio)
191 gpiod_set_value_cansleep(ddata->enable_gpio, 0); 184 gpiod_set_value_cansleep(ddata->enable_gpio, 0);
192 185
193 if (gpio_is_valid(ddata->backlight_gpio))
194 gpio_set_value_cansleep(ddata->backlight_gpio, 0);
195
196 in->ops.dpi->disable(in); 186 in->ops.dpi->disable(in);
197 187
198 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 188 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -256,8 +246,6 @@ static int lb035q02_probe_of(struct spi_device *spi)
256 246
257 ddata->enable_gpio = gpio; 247 ddata->enable_gpio = gpio;
258 248
259 ddata->backlight_gpio = -ENOENT;
260
261 in = omapdss_of_find_source_for_first_ep(node); 249 in = omapdss_of_find_source_for_first_ep(node);
262 if (IS_ERR(in)) { 250 if (IS_ERR(in)) {
263 dev_err(&spi->dev, "failed to find video source\n"); 251 dev_err(&spi->dev, "failed to find video source\n");
@@ -290,13 +278,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
290 if (r) 278 if (r)
291 return r; 279 return r;
292 280
293 if (gpio_is_valid(ddata->backlight_gpio)) {
294 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
295 GPIOF_OUT_INIT_LOW, "panel backlight");
296 if (r)
297 goto err_gpio;
298 }
299
300 ddata->videomode = lb035q02_timings; 281 ddata->videomode = lb035q02_timings;
301 282
302 dssdev = &ddata->dssdev; 283 dssdev = &ddata->dssdev;
@@ -316,7 +297,6 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
316 return 0; 297 return 0;
317 298
318err_reg: 299err_reg:
319err_gpio:
320 omap_dss_put_device(ddata->in); 300 omap_dss_put_device(ddata->in);
321 return r; 301 return r;
322} 302}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 38d2920a95e6..fc4c238c9583 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -18,7 +18,7 @@
18#include <linux/gpio/consumer.h> 18#include <linux/gpio/consumer.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20 20
21#include <video/omapdss.h> 21#include "../dss/omapdss.h"
22 22
23struct panel_drv_data { 23struct panel_drv_data {
24 struct omap_dss_device dssdev; 24 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index 4363fffc87e3..3d3efc561ea9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -13,11 +13,11 @@
13#include <linux/gpio/consumer.h> 13#include <linux/gpio/consumer.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_gpio.h>
17#include <linux/platform_device.h> 16#include <linux/platform_device.h>
18#include <linux/slab.h> 17#include <linux/slab.h>
19#include <linux/regulator/consumer.h> 18#include <linux/regulator/consumer.h>
20#include <video/omapdss.h> 19
20#include "../dss/omapdss.h"
21 21
22struct panel_drv_data { 22struct panel_drv_data {
23 struct omap_dss_device dssdev; 23 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index deb416736aad..157c512205d1 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -33,9 +33,10 @@
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
35 35
36#include <video/omapdss.h>
37#include <video/omap-panel-data.h> 36#include <video/omap-panel-data.h>
38 37
38#include "../dss/omapdss.h"
39
39#define MIPID_CMD_READ_DISP_ID 0x04 40#define MIPID_CMD_READ_DISP_ID 0x04
40#define MIPID_CMD_READ_RED 0x06 41#define MIPID_CMD_READ_RED 0x06
41#define MIPID_CMD_READ_GREEN 0x07 42#define MIPID_CMD_READ_GREEN 0x07
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index bd8d85041926..e859b3f893f7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -28,7 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <video/omapdss.h> 31
32#include "../dss/omapdss.h"
32 33
33struct panel_drv_data { 34struct panel_drv_data {
34 struct omap_dss_device dssdev; 35 struct omap_dss_device dssdev;
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index d93175b03a12..66c6bbe6472b 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -19,7 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <video/omapdss.h> 22#include "../dss/omapdss.h"
23 23
24#define TPO_R02_MODE(x) ((x) & 7) 24#define TPO_R02_MODE(x) ((x) & 7)
25#define TPO_R02_MODE_800x480 7 25#define TPO_R02_MODE_800x480 7
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 7e4e5bebabbe..6a3ebfcd7223 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -35,8 +35,7 @@
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37 37
38#include <video/omapdss.h> 38#include "omapdss.h"
39
40#include "dss.h" 39#include "dss.h"
41#include "dss_features.h" 40#include "dss_features.h"
42 41
@@ -196,8 +195,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
196 core.default_display_name = def_disp_name; 195 core.default_display_name = def_disp_name;
197 else if (pdata->default_display_name) 196 else if (pdata->default_display_name)
198 core.default_display_name = pdata->default_display_name; 197 core.default_display_name = pdata->default_display_name;
199 else if (pdata->default_device)
200 core.default_display_name = pdata->default_device->name;
201 198
202 return 0; 199 return 0;
203 200
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index f83608b69e68..535240fba671 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -41,8 +41,7 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include "omapdss.h"
45
46#include "dss.h" 45#include "dss.h"
47#include "dss_features.h" 46#include "dss_features.h"
48#include "dispc.h" 47#include "dispc.h"
@@ -113,9 +112,14 @@ struct dispc_features {
113 * never both, we can just use this flag for now. 112 * never both, we can just use this flag for now.
114 */ 113 */
115 bool reverse_ilace_field_order:1; 114 bool reverse_ilace_field_order:1;
115
116 bool has_gamma_table:1;
117
118 bool has_gamma_i734_bug:1;
116}; 119};
117 120
118#define DISPC_MAX_NR_FIFOS 5 121#define DISPC_MAX_NR_FIFOS 5
122#define DISPC_MAX_CHANNEL_GAMMA 4
119 123
120static struct { 124static struct {
121 struct platform_device *pdev; 125 struct platform_device *pdev;
@@ -135,6 +139,8 @@ static struct {
135 bool ctx_valid; 139 bool ctx_valid;
136 u32 ctx[DISPC_SZ_REGS / sizeof(u32)]; 140 u32 ctx[DISPC_SZ_REGS / sizeof(u32)];
137 141
142 u32 *gamma_table[DISPC_MAX_CHANNEL_GAMMA];
143
138 const struct dispc_features *feat; 144 const struct dispc_features *feat;
139 145
140 bool is_enabled; 146 bool is_enabled;
@@ -178,11 +184,19 @@ struct dispc_reg_field {
178 u8 low; 184 u8 low;
179}; 185};
180 186
187struct dispc_gamma_desc {
188 u32 len;
189 u32 bits;
190 u16 reg;
191 bool has_index;
192};
193
181static const struct { 194static const struct {
182 const char *name; 195 const char *name;
183 u32 vsync_irq; 196 u32 vsync_irq;
184 u32 framedone_irq; 197 u32 framedone_irq;
185 u32 sync_lost_irq; 198 u32 sync_lost_irq;
199 struct dispc_gamma_desc gamma;
186 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM]; 200 struct dispc_reg_field reg_desc[DISPC_MGR_FLD_NUM];
187} mgr_desc[] = { 201} mgr_desc[] = {
188 [OMAP_DSS_CHANNEL_LCD] = { 202 [OMAP_DSS_CHANNEL_LCD] = {
@@ -190,6 +204,12 @@ static const struct {
190 .vsync_irq = DISPC_IRQ_VSYNC, 204 .vsync_irq = DISPC_IRQ_VSYNC,
191 .framedone_irq = DISPC_IRQ_FRAMEDONE, 205 .framedone_irq = DISPC_IRQ_FRAMEDONE,
192 .sync_lost_irq = DISPC_IRQ_SYNC_LOST, 206 .sync_lost_irq = DISPC_IRQ_SYNC_LOST,
207 .gamma = {
208 .len = 256,
209 .bits = 8,
210 .reg = DISPC_GAMMA_TABLE0,
211 .has_index = true,
212 },
193 .reg_desc = { 213 .reg_desc = {
194 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, 214 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 },
195 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, 215 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 },
@@ -207,6 +227,12 @@ static const struct {
207 .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, 227 .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN,
208 .framedone_irq = DISPC_IRQ_FRAMEDONETV, 228 .framedone_irq = DISPC_IRQ_FRAMEDONETV,
209 .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, 229 .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT,
230 .gamma = {
231 .len = 1024,
232 .bits = 10,
233 .reg = DISPC_GAMMA_TABLE2,
234 .has_index = false,
235 },
210 .reg_desc = { 236 .reg_desc = {
211 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, 237 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 },
212 [DISPC_MGR_FLD_STNTFT] = { }, 238 [DISPC_MGR_FLD_STNTFT] = { },
@@ -224,6 +250,12 @@ static const struct {
224 .vsync_irq = DISPC_IRQ_VSYNC2, 250 .vsync_irq = DISPC_IRQ_VSYNC2,
225 .framedone_irq = DISPC_IRQ_FRAMEDONE2, 251 .framedone_irq = DISPC_IRQ_FRAMEDONE2,
226 .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, 252 .sync_lost_irq = DISPC_IRQ_SYNC_LOST2,
253 .gamma = {
254 .len = 256,
255 .bits = 8,
256 .reg = DISPC_GAMMA_TABLE1,
257 .has_index = true,
258 },
227 .reg_desc = { 259 .reg_desc = {
228 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, 260 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 },
229 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, 261 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 },
@@ -241,6 +273,12 @@ static const struct {
241 .vsync_irq = DISPC_IRQ_VSYNC3, 273 .vsync_irq = DISPC_IRQ_VSYNC3,
242 .framedone_irq = DISPC_IRQ_FRAMEDONE3, 274 .framedone_irq = DISPC_IRQ_FRAMEDONE3,
243 .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, 275 .sync_lost_irq = DISPC_IRQ_SYNC_LOST3,
276 .gamma = {
277 .len = 256,
278 .bits = 8,
279 .reg = DISPC_GAMMA_TABLE3,
280 .has_index = true,
281 },
244 .reg_desc = { 282 .reg_desc = {
245 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, 283 [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 },
246 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, 284 [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 },
@@ -1084,20 +1122,6 @@ static u32 dispc_ovl_get_burst_size(enum omap_plane plane)
1084 return unit * 8; 1122 return unit * 8;
1085} 1123}
1086 1124
1087void dispc_enable_gamma_table(bool enable)
1088{
1089 /*
1090 * This is partially implemented to support only disabling of
1091 * the gamma table.
1092 */
1093 if (enable) {
1094 DSSWARN("Gamma table enabling for TV not yet supported");
1095 return;
1096 }
1097
1098 REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
1099}
1100
1101static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) 1125static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
1102{ 1126{
1103 if (channel == OMAP_DSS_CHANNEL_DIGIT) 1127 if (channel == OMAP_DSS_CHANNEL_DIGIT)
@@ -3299,30 +3323,21 @@ static void dispc_mgr_get_lcd_divisor(enum omap_channel channel, int *lck_div,
3299 3323
3300static unsigned long dispc_fclk_rate(void) 3324static unsigned long dispc_fclk_rate(void)
3301{ 3325{
3302 struct dss_pll *pll; 3326 unsigned long r;
3303 unsigned long r = 0; 3327 enum dss_clk_source src;
3328
3329 src = dss_get_dispc_clk_source();
3304 3330
3305 switch (dss_get_dispc_clk_source()) { 3331 if (src == DSS_CLK_SRC_FCK) {
3306 case OMAP_DSS_CLK_SRC_FCK:
3307 r = dss_get_dispc_clk_rate(); 3332 r = dss_get_dispc_clk_rate();
3308 break; 3333 } else {
3309 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 3334 struct dss_pll *pll;
3310 pll = dss_pll_find("dsi0"); 3335 unsigned clkout_idx;
3311 if (!pll)
3312 pll = dss_pll_find("video0");
3313 3336
3314 r = pll->cinfo.clkout[0]; 3337 pll = dss_pll_find_by_src(src);
3315 break; 3338 clkout_idx = dss_pll_get_clkout_idx_for_src(src);
3316 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
3317 pll = dss_pll_find("dsi1");
3318 if (!pll)
3319 pll = dss_pll_find("video1");
3320 3339
3321 r = pll->cinfo.clkout[0]; 3340 r = pll->cinfo.clkout[clkout_idx];
3322 break;
3323 default:
3324 BUG();
3325 return 0;
3326 } 3341 }
3327 3342
3328 return r; 3343 return r;
@@ -3330,43 +3345,31 @@ static unsigned long dispc_fclk_rate(void)
3330 3345
3331static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel) 3346static unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
3332{ 3347{
3333 struct dss_pll *pll;
3334 int lcd; 3348 int lcd;
3335 unsigned long r; 3349 unsigned long r;
3336 u32 l; 3350 enum dss_clk_source src;
3337
3338 if (dss_mgr_is_lcd(channel)) {
3339 l = dispc_read_reg(DISPC_DIVISORo(channel));
3340 3351
3341 lcd = FLD_GET(l, 23, 16); 3352 /* for TV, LCLK rate is the FCLK rate */
3353 if (!dss_mgr_is_lcd(channel))
3354 return dispc_fclk_rate();
3342 3355
3343 switch (dss_get_lcd_clk_source(channel)) { 3356 src = dss_get_lcd_clk_source(channel);
3344 case OMAP_DSS_CLK_SRC_FCK:
3345 r = dss_get_dispc_clk_rate();
3346 break;
3347 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
3348 pll = dss_pll_find("dsi0");
3349 if (!pll)
3350 pll = dss_pll_find("video0");
3351 3357
3352 r = pll->cinfo.clkout[0]; 3358 if (src == DSS_CLK_SRC_FCK) {
3353 break; 3359 r = dss_get_dispc_clk_rate();
3354 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 3360 } else {
3355 pll = dss_pll_find("dsi1"); 3361 struct dss_pll *pll;
3356 if (!pll) 3362 unsigned clkout_idx;
3357 pll = dss_pll_find("video1");
3358 3363
3359 r = pll->cinfo.clkout[0]; 3364 pll = dss_pll_find_by_src(src);
3360 break; 3365 clkout_idx = dss_pll_get_clkout_idx_for_src(src);
3361 default:
3362 BUG();
3363 return 0;
3364 }
3365 3366
3366 return r / lcd; 3367 r = pll->cinfo.clkout[clkout_idx];
3367 } else {
3368 return dispc_fclk_rate();
3369 } 3368 }
3369
3370 lcd = REG_GET(DISPC_DIVISORo(channel), 23, 16);
3371
3372 return r / lcd;
3370} 3373}
3371 3374
3372static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) 3375static unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
@@ -3426,15 +3429,14 @@ static unsigned long dispc_plane_lclk_rate(enum omap_plane plane)
3426static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) 3429static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel)
3427{ 3430{
3428 int lcd, pcd; 3431 int lcd, pcd;
3429 enum omap_dss_clk_source lcd_clk_src; 3432 enum dss_clk_source lcd_clk_src;
3430 3433
3431 seq_printf(s, "- %s -\n", mgr_desc[channel].name); 3434 seq_printf(s, "- %s -\n", mgr_desc[channel].name);
3432 3435
3433 lcd_clk_src = dss_get_lcd_clk_source(channel); 3436 lcd_clk_src = dss_get_lcd_clk_source(channel);
3434 3437
3435 seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, 3438 seq_printf(s, "%s clk source = %s\n", mgr_desc[channel].name,
3436 dss_get_generic_clk_source_name(lcd_clk_src), 3439 dss_get_clk_source_name(lcd_clk_src));
3437 dss_feat_get_clk_source_name(lcd_clk_src));
3438 3440
3439 dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); 3441 dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd);
3440 3442
@@ -3448,16 +3450,15 @@ void dispc_dump_clocks(struct seq_file *s)
3448{ 3450{
3449 int lcd; 3451 int lcd;
3450 u32 l; 3452 u32 l;
3451 enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); 3453 enum dss_clk_source dispc_clk_src = dss_get_dispc_clk_source();
3452 3454
3453 if (dispc_runtime_get()) 3455 if (dispc_runtime_get())
3454 return; 3456 return;
3455 3457
3456 seq_printf(s, "- DISPC -\n"); 3458 seq_printf(s, "- DISPC -\n");
3457 3459
3458 seq_printf(s, "dispc fclk source = %s (%s)\n", 3460 seq_printf(s, "dispc fclk source = %s\n",
3459 dss_get_generic_clk_source_name(dispc_clk_src), 3461 dss_get_clk_source_name(dispc_clk_src));
3460 dss_feat_get_clk_source_name(dispc_clk_src));
3461 3462
3462 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate()); 3463 seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
3463 3464
@@ -3814,6 +3815,139 @@ void dispc_disable_sidle(void)
3814 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */ 3815 REG_FLD_MOD(DISPC_SYSCONFIG, 1, 4, 3); /* SIDLEMODE: no idle */
3815} 3816}
3816 3817
3818u32 dispc_mgr_gamma_size(enum omap_channel channel)
3819{
3820 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3821
3822 if (!dispc.feat->has_gamma_table)
3823 return 0;
3824
3825 return gdesc->len;
3826}
3827EXPORT_SYMBOL(dispc_mgr_gamma_size);
3828
3829static void dispc_mgr_write_gamma_table(enum omap_channel channel)
3830{
3831 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3832 u32 *table = dispc.gamma_table[channel];
3833 unsigned int i;
3834
3835 DSSDBG("%s: channel %d\n", __func__, channel);
3836
3837 for (i = 0; i < gdesc->len; ++i) {
3838 u32 v = table[i];
3839
3840 if (gdesc->has_index)
3841 v |= i << 24;
3842 else if (i == 0)
3843 v |= 1 << 31;
3844
3845 dispc_write_reg(gdesc->reg, v);
3846 }
3847}
3848
3849static void dispc_restore_gamma_tables(void)
3850{
3851 DSSDBG("%s()\n", __func__);
3852
3853 if (!dispc.feat->has_gamma_table)
3854 return;
3855
3856 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD);
3857
3858 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_DIGIT);
3859
3860 if (dss_has_feature(FEAT_MGR_LCD2))
3861 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD2);
3862
3863 if (dss_has_feature(FEAT_MGR_LCD3))
3864 dispc_mgr_write_gamma_table(OMAP_DSS_CHANNEL_LCD3);
3865}
3866
3867static const struct drm_color_lut dispc_mgr_gamma_default_lut[] = {
3868 { .red = 0, .green = 0, .blue = 0, },
3869 { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
3870};
3871
3872void dispc_mgr_set_gamma(enum omap_channel channel,
3873 const struct drm_color_lut *lut,
3874 unsigned int length)
3875{
3876 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3877 u32 *table = dispc.gamma_table[channel];
3878 uint i;
3879
3880 DSSDBG("%s: channel %d, lut len %u, hw len %u\n", __func__,
3881 channel, length, gdesc->len);
3882
3883 if (!dispc.feat->has_gamma_table)
3884 return;
3885
3886 if (lut == NULL || length < 2) {
3887 lut = dispc_mgr_gamma_default_lut;
3888 length = ARRAY_SIZE(dispc_mgr_gamma_default_lut);
3889 }
3890
3891 for (i = 0; i < length - 1; ++i) {
3892 uint first = i * (gdesc->len - 1) / (length - 1);
3893 uint last = (i + 1) * (gdesc->len - 1) / (length - 1);
3894 uint w = last - first;
3895 u16 r, g, b;
3896 uint j;
3897
3898 if (w == 0)
3899 continue;
3900
3901 for (j = 0; j <= w; j++) {
3902 r = (lut[i].red * (w - j) + lut[i+1].red * j) / w;
3903 g = (lut[i].green * (w - j) + lut[i+1].green * j) / w;
3904 b = (lut[i].blue * (w - j) + lut[i+1].blue * j) / w;
3905
3906 r >>= 16 - gdesc->bits;
3907 g >>= 16 - gdesc->bits;
3908 b >>= 16 - gdesc->bits;
3909
3910 table[first + j] = (r << (gdesc->bits * 2)) |
3911 (g << gdesc->bits) | b;
3912 }
3913 }
3914
3915 if (dispc.is_enabled)
3916 dispc_mgr_write_gamma_table(channel);
3917}
3918EXPORT_SYMBOL(dispc_mgr_set_gamma);
3919
3920static int dispc_init_gamma_tables(void)
3921{
3922 int channel;
3923
3924 if (!dispc.feat->has_gamma_table)
3925 return 0;
3926
3927 for (channel = 0; channel < ARRAY_SIZE(dispc.gamma_table); channel++) {
3928 const struct dispc_gamma_desc *gdesc = &mgr_desc[channel].gamma;
3929 u32 *gt;
3930
3931 if (channel == OMAP_DSS_CHANNEL_LCD2 &&
3932 !dss_has_feature(FEAT_MGR_LCD2))
3933 continue;
3934
3935 if (channel == OMAP_DSS_CHANNEL_LCD3 &&
3936 !dss_has_feature(FEAT_MGR_LCD3))
3937 continue;
3938
3939 gt = devm_kmalloc_array(&dispc.pdev->dev, gdesc->len,
3940 sizeof(u32), GFP_KERNEL);
3941 if (!gt)
3942 return -ENOMEM;
3943
3944 dispc.gamma_table[channel] = gt;
3945
3946 dispc_mgr_set_gamma(channel, NULL, 0);
3947 }
3948 return 0;
3949}
3950
3817static void _omap_dispc_initial_config(void) 3951static void _omap_dispc_initial_config(void)
3818{ 3952{
3819 u32 l; 3953 u32 l;
@@ -3829,8 +3963,15 @@ static void _omap_dispc_initial_config(void)
3829 dispc.core_clk_rate = dispc_fclk_rate(); 3963 dispc.core_clk_rate = dispc_fclk_rate();
3830 } 3964 }
3831 3965
3832 /* FUNCGATED */ 3966 /* Use gamma table mode, instead of palette mode */
3833 if (dss_has_feature(FEAT_FUNCGATED)) 3967 if (dispc.feat->has_gamma_table)
3968 REG_FLD_MOD(DISPC_CONFIG, 1, 3, 3);
3969
3970 /* For older DSS versions (FEAT_FUNCGATED) this enables
3971 * func-clock auto-gating. For newer versions
3972 * (dispc.feat->has_gamma_table) this enables tv-out gamma tables.
3973 */
3974 if (dss_has_feature(FEAT_FUNCGATED) || dispc.feat->has_gamma_table)
3834 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9); 3975 REG_FLD_MOD(DISPC_CONFIG, 1, 9, 9);
3835 3976
3836 dispc_setup_color_conv_coef(); 3977 dispc_setup_color_conv_coef();
@@ -3934,6 +4075,8 @@ static const struct dispc_features omap44xx_dispc_feats = {
3934 .has_writeback = true, 4075 .has_writeback = true,
3935 .supports_double_pixel = true, 4076 .supports_double_pixel = true,
3936 .reverse_ilace_field_order = true, 4077 .reverse_ilace_field_order = true,
4078 .has_gamma_table = true,
4079 .has_gamma_i734_bug = true,
3937}; 4080};
3938 4081
3939static const struct dispc_features omap54xx_dispc_feats = { 4082static const struct dispc_features omap54xx_dispc_feats = {
@@ -3959,6 +4102,8 @@ static const struct dispc_features omap54xx_dispc_feats = {
3959 .has_writeback = true, 4102 .has_writeback = true,
3960 .supports_double_pixel = true, 4103 .supports_double_pixel = true,
3961 .reverse_ilace_field_order = true, 4104 .reverse_ilace_field_order = true,
4105 .has_gamma_table = true,
4106 .has_gamma_i734_bug = true,
3962}; 4107};
3963 4108
3964static int dispc_init_features(struct platform_device *pdev) 4109static int dispc_init_features(struct platform_device *pdev)
@@ -4050,6 +4195,168 @@ void dispc_free_irq(void *dev_id)
4050} 4195}
4051EXPORT_SYMBOL(dispc_free_irq); 4196EXPORT_SYMBOL(dispc_free_irq);
4052 4197
4198/*
4199 * Workaround for errata i734 in DSS dispc
4200 * - LCD1 Gamma Correction Is Not Working When GFX Pipe Is Disabled
4201 *
4202 * For gamma tables to work on LCD1 the GFX plane has to be used at
4203 * least once after DSS HW has come out of reset. The workaround
4204 * sets up a minimal LCD setup with GFX plane and waits for one
4205 * vertical sync irq before disabling the setup and continuing with
4206 * the context restore. The physical outputs are gated during the
4207 * operation. This workaround requires that gamma table's LOADMODE
4208 * is set to 0x2 in DISPC_CONTROL1 register.
4209 *
4210 * For details see:
4211 * OMAP543x Multimedia Device Silicon Revision 2.0 Silicon Errata
4212 * Literature Number: SWPZ037E
4213 * Or some other relevant errata document for the DSS IP version.
4214 */
4215
4216static const struct dispc_errata_i734_data {
4217 struct omap_video_timings timings;
4218 struct omap_overlay_info ovli;
4219 struct omap_overlay_manager_info mgri;
4220 struct dss_lcd_mgr_config lcd_conf;
4221} i734 = {
4222 .timings = {
4223 .x_res = 8, .y_res = 1,
4224 .pixelclock = 16000000,
4225 .hsw = 8, .hfp = 4, .hbp = 4,
4226 .vsw = 1, .vfp = 1, .vbp = 1,
4227 .vsync_level = OMAPDSS_SIG_ACTIVE_LOW,
4228 .hsync_level = OMAPDSS_SIG_ACTIVE_LOW,
4229 .interlace = false,
4230 .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
4231 .de_level = OMAPDSS_SIG_ACTIVE_HIGH,
4232 .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE,
4233 .double_pixel = false,
4234 },
4235 .ovli = {
4236 .screen_width = 1,
4237 .width = 1, .height = 1,
4238 .color_mode = OMAP_DSS_COLOR_RGB24U,
4239 .rotation = OMAP_DSS_ROT_0,
4240 .rotation_type = OMAP_DSS_ROT_DMA,
4241 .mirror = 0,
4242 .pos_x = 0, .pos_y = 0,
4243 .out_width = 0, .out_height = 0,
4244 .global_alpha = 0xff,
4245 .pre_mult_alpha = 0,
4246 .zorder = 0,
4247 },
4248 .mgri = {
4249 .default_color = 0,
4250 .trans_enabled = false,
4251 .partial_alpha_enabled = false,
4252 .cpr_enable = false,
4253 },
4254 .lcd_conf = {
4255 .io_pad_mode = DSS_IO_PAD_MODE_BYPASS,
4256 .stallmode = false,
4257 .fifohandcheck = false,
4258 .clock_info = {
4259 .lck_div = 1,
4260 .pck_div = 2,
4261 },
4262 .video_port_width = 24,
4263 .lcden_sig_polarity = 0,
4264 },
4265};
4266
4267static struct i734_buf {
4268 size_t size;
4269 dma_addr_t paddr;
4270 void *vaddr;
4271} i734_buf;
4272
4273static int dispc_errata_i734_wa_init(void)
4274{
4275 if (!dispc.feat->has_gamma_i734_bug)
4276 return 0;
4277
4278 i734_buf.size = i734.ovli.width * i734.ovli.height *
4279 color_mode_to_bpp(i734.ovli.color_mode) / 8;
4280
4281 i734_buf.vaddr = dma_alloc_writecombine(&dispc.pdev->dev, i734_buf.size,
4282 &i734_buf.paddr, GFP_KERNEL);
4283 if (!i734_buf.vaddr) {
4284 dev_err(&dispc.pdev->dev, "%s: dma_alloc_writecombine failed",
4285 __func__);
4286 return -ENOMEM;
4287 }
4288
4289 return 0;
4290}
4291
4292static void dispc_errata_i734_wa_fini(void)
4293{
4294 if (!dispc.feat->has_gamma_i734_bug)
4295 return;
4296
4297 dma_free_writecombine(&dispc.pdev->dev, i734_buf.size, i734_buf.vaddr,
4298 i734_buf.paddr);
4299}
4300
4301static void dispc_errata_i734_wa(void)
4302{
4303 u32 framedone_irq = dispc_mgr_get_framedone_irq(OMAP_DSS_CHANNEL_LCD);
4304 struct omap_overlay_info ovli;
4305 struct dss_lcd_mgr_config lcd_conf;
4306 u32 gatestate;
4307 unsigned int count;
4308
4309 if (!dispc.feat->has_gamma_i734_bug)
4310 return;
4311
4312 gatestate = REG_GET(DISPC_CONFIG, 8, 4);
4313
4314 ovli = i734.ovli;
4315 ovli.paddr = i734_buf.paddr;
4316 lcd_conf = i734.lcd_conf;
4317
4318 /* Gate all LCD1 outputs */
4319 REG_FLD_MOD(DISPC_CONFIG, 0x1f, 8, 4);
4320
4321 /* Setup and enable GFX plane */
4322 dispc_ovl_set_channel_out(OMAP_DSS_GFX, OMAP_DSS_CHANNEL_LCD);
4323 dispc_ovl_setup(OMAP_DSS_GFX, &ovli, false, &i734.timings, false);
4324 dispc_ovl_enable(OMAP_DSS_GFX, true);
4325
4326 /* Set up and enable display manager for LCD1 */
4327 dispc_mgr_setup(OMAP_DSS_CHANNEL_LCD, &i734.mgri);
4328 dispc_calc_clock_rates(dss_get_dispc_clk_rate(),
4329 &lcd_conf.clock_info);
4330 dispc_mgr_set_lcd_config(OMAP_DSS_CHANNEL_LCD, &lcd_conf);
4331 dispc_mgr_set_timings(OMAP_DSS_CHANNEL_LCD, &i734.timings);
4332
4333 dispc_clear_irqstatus(framedone_irq);
4334
4335 /* Enable and shut the channel to produce just one frame */
4336 dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, true);
4337 dispc_mgr_enable(OMAP_DSS_CHANNEL_LCD, false);
4338
4339 /* Busy wait for framedone. We can't fiddle with irq handlers
4340 * in PM resume. Typically the loop runs less than 5 times and
4341 * waits less than a micro second.
4342 */
4343 count = 0;
4344 while (!(dispc_read_irqstatus() & framedone_irq)) {
4345 if (count++ > 10000) {
4346 dev_err(&dispc.pdev->dev, "%s: framedone timeout\n",
4347 __func__);
4348 break;
4349 }
4350 }
4351 dispc_ovl_enable(OMAP_DSS_GFX, false);
4352
4353 /* Clear all irq bits before continuing */
4354 dispc_clear_irqstatus(0xffffffff);
4355
4356 /* Restore the original state to LCD1 output gates */
4357 REG_FLD_MOD(DISPC_CONFIG, gatestate, 8, 4);
4358}
4359
4053/* DISPC HW IP initialisation */ 4360/* DISPC HW IP initialisation */
4054static int dispc_bind(struct device *dev, struct device *master, void *data) 4361static int dispc_bind(struct device *dev, struct device *master, void *data)
4055{ 4362{
@@ -4067,6 +4374,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
4067 if (r) 4374 if (r)
4068 return r; 4375 return r;
4069 4376
4377 r = dispc_errata_i734_wa_init();
4378 if (r)
4379 return r;
4380
4070 dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0); 4381 dispc_mem = platform_get_resource(dispc.pdev, IORESOURCE_MEM, 0);
4071 if (!dispc_mem) { 4382 if (!dispc_mem) {
4072 DSSERR("can't get IORESOURCE_MEM DISPC\n"); 4383 DSSERR("can't get IORESOURCE_MEM DISPC\n");
@@ -4100,6 +4411,10 @@ static int dispc_bind(struct device *dev, struct device *master, void *data)
4100 } 4411 }
4101 } 4412 }
4102 4413
4414 r = dispc_init_gamma_tables();
4415 if (r)
4416 return r;
4417
4103 pm_runtime_enable(&pdev->dev); 4418 pm_runtime_enable(&pdev->dev);
4104 4419
4105 r = dispc_runtime_get(); 4420 r = dispc_runtime_get();
@@ -4127,6 +4442,8 @@ static void dispc_unbind(struct device *dev, struct device *master,
4127 void *data) 4442 void *data)
4128{ 4443{
4129 pm_runtime_disable(dev); 4444 pm_runtime_disable(dev);
4445
4446 dispc_errata_i734_wa_fini();
4130} 4447}
4131 4448
4132static const struct component_ops dispc_component_ops = { 4449static const struct component_ops dispc_component_ops = {
@@ -4169,7 +4486,11 @@ static int dispc_runtime_resume(struct device *dev)
4169 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) { 4486 if (REG_GET(DISPC_CONFIG, 2, 1) != OMAP_DSS_LOAD_FRAME_ONLY) {
4170 _omap_dispc_initial_config(); 4487 _omap_dispc_initial_config();
4171 4488
4489 dispc_errata_i734_wa();
4490
4172 dispc_restore_context(); 4491 dispc_restore_context();
4492
4493 dispc_restore_gamma_tables();
4173 } 4494 }
4174 4495
4175 dispc.is_enabled = true; 4496 dispc.is_enabled = true;
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.h b/drivers/gpu/drm/omapdrm/dss/dispc.h
index 483744223dd1..bc1d8126ee87 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.h
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.h
@@ -42,6 +42,11 @@
42#define DISPC_MSTANDBY_CTRL 0x0858 42#define DISPC_MSTANDBY_CTRL 0x0858
43#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C 43#define DISPC_GLOBAL_MFLAG_ATTRIBUTE 0x085C
44 44
45#define DISPC_GAMMA_TABLE0 0x0630
46#define DISPC_GAMMA_TABLE1 0x0634
47#define DISPC_GAMMA_TABLE2 0x0638
48#define DISPC_GAMMA_TABLE3 0x0850
49
45/* DISPC overlay registers */ 50/* DISPC overlay registers */
46#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ 51#define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \
47 DISPC_BA0_OFFSET(n)) 52 DISPC_BA0_OFFSET(n))
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
index 038c15b04215..34fad2376f8d 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc_coefs.c
@@ -18,8 +18,8 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <video/omapdss.h>
22 21
22#include "omapdss.h"
23#include "dispc.h" 23#include "dispc.h"
24 24
25static const struct dispc_coef coef3_M8[8] = { 25static const struct dispc_coef coef3_M8[8] = {
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9f3dd09b0a6c..8dcdd7cf9937 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include <video/omapdss.h> 31#include "omapdss.h"
32#include "dss.h" 32#include "dss.h"
33#include "dss_features.h" 33#include "dss_features.h"
34 34
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 97ea60257884..b268295b76cf 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -34,17 +34,15 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/component.h> 35#include <linux/component.h>
36 36
37#include <video/omapdss.h> 37#include "omapdss.h"
38
39#include "dss.h" 38#include "dss.h"
40#include "dss_features.h" 39#include "dss_features.h"
41 40
42#define HSDIV_DISPC 0
43
44struct dpi_data { 41struct dpi_data {
45 struct platform_device *pdev; 42 struct platform_device *pdev;
46 43
47 struct regulator *vdds_dsi_reg; 44 struct regulator *vdds_dsi_reg;
45 enum dss_clk_source clk_src;
48 struct dss_pll *pll; 46 struct dss_pll *pll;
49 47
50 struct mutex lock; 48 struct mutex lock;
@@ -69,7 +67,7 @@ static struct dpi_data *dpi_get_data_from_pdev(struct platform_device *pdev)
69 return dev_get_drvdata(&pdev->dev); 67 return dev_get_drvdata(&pdev->dev);
70} 68}
71 69
72static struct dss_pll *dpi_get_pll(enum omap_channel channel) 70static enum dss_clk_source dpi_get_clk_src(enum omap_channel channel)
73{ 71{
74 /* 72 /*
75 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL 73 * XXX we can't currently use DSI PLL for DPI with OMAP3, as the DSI PLL
@@ -83,64 +81,51 @@ static struct dss_pll *dpi_get_pll(enum omap_channel channel)
83 case OMAPDSS_VER_OMAP3630: 81 case OMAPDSS_VER_OMAP3630:
84 case OMAPDSS_VER_AM35xx: 82 case OMAPDSS_VER_AM35xx:
85 case OMAPDSS_VER_AM43xx: 83 case OMAPDSS_VER_AM43xx:
86 return NULL; 84 return DSS_CLK_SRC_FCK;
87 85
88 case OMAPDSS_VER_OMAP4430_ES1: 86 case OMAPDSS_VER_OMAP4430_ES1:
89 case OMAPDSS_VER_OMAP4430_ES2: 87 case OMAPDSS_VER_OMAP4430_ES2:
90 case OMAPDSS_VER_OMAP4: 88 case OMAPDSS_VER_OMAP4:
91 switch (channel) { 89 switch (channel) {
92 case OMAP_DSS_CHANNEL_LCD: 90 case OMAP_DSS_CHANNEL_LCD:
93 return dss_pll_find("dsi0"); 91 return DSS_CLK_SRC_PLL1_1;
94 case OMAP_DSS_CHANNEL_LCD2: 92 case OMAP_DSS_CHANNEL_LCD2:
95 return dss_pll_find("dsi1"); 93 return DSS_CLK_SRC_PLL2_1;
96 default: 94 default:
97 return NULL; 95 return DSS_CLK_SRC_FCK;
98 } 96 }
99 97
100 case OMAPDSS_VER_OMAP5: 98 case OMAPDSS_VER_OMAP5:
101 switch (channel) { 99 switch (channel) {
102 case OMAP_DSS_CHANNEL_LCD: 100 case OMAP_DSS_CHANNEL_LCD:
103 return dss_pll_find("dsi0"); 101 return DSS_CLK_SRC_PLL1_1;
104 case OMAP_DSS_CHANNEL_LCD3: 102 case OMAP_DSS_CHANNEL_LCD3:
105 return dss_pll_find("dsi1"); 103 return DSS_CLK_SRC_PLL2_1;
104 case OMAP_DSS_CHANNEL_LCD2:
106 default: 105 default:
107 return NULL; 106 return DSS_CLK_SRC_FCK;
108 } 107 }
109 108
110 case OMAPDSS_VER_DRA7xx: 109 case OMAPDSS_VER_DRA7xx:
111 switch (channel) { 110 switch (channel) {
112 case OMAP_DSS_CHANNEL_LCD: 111 case OMAP_DSS_CHANNEL_LCD:
112 return DSS_CLK_SRC_PLL1_1;
113 case OMAP_DSS_CHANNEL_LCD2: 113 case OMAP_DSS_CHANNEL_LCD2:
114 return dss_pll_find("video0"); 114 return DSS_CLK_SRC_PLL1_3;
115 case OMAP_DSS_CHANNEL_LCD3: 115 case OMAP_DSS_CHANNEL_LCD3:
116 return dss_pll_find("video1"); 116 return DSS_CLK_SRC_PLL2_1;
117 default: 117 default:
118 return NULL; 118 return DSS_CLK_SRC_FCK;
119 } 119 }
120 120
121 default: 121 default:
122 return NULL; 122 return DSS_CLK_SRC_FCK;
123 }
124}
125
126static enum omap_dss_clk_source dpi_get_alt_clk_src(enum omap_channel channel)
127{
128 switch (channel) {
129 case OMAP_DSS_CHANNEL_LCD:
130 return OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC;
131 case OMAP_DSS_CHANNEL_LCD2:
132 return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
133 case OMAP_DSS_CHANNEL_LCD3:
134 return OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC;
135 default:
136 /* this shouldn't happen */
137 WARN_ON(1);
138 return OMAP_DSS_CLK_SRC_FCK;
139 } 123 }
140} 124}
141 125
142struct dpi_clk_calc_ctx { 126struct dpi_clk_calc_ctx {
143 struct dss_pll *pll; 127 struct dss_pll *pll;
128 unsigned clkout_idx;
144 129
145 /* inputs */ 130 /* inputs */
146 131
@@ -148,7 +133,7 @@ struct dpi_clk_calc_ctx {
148 133
149 /* outputs */ 134 /* outputs */
150 135
151 struct dss_pll_clock_info dsi_cinfo; 136 struct dss_pll_clock_info pll_cinfo;
152 unsigned long fck; 137 unsigned long fck;
153 struct dispc_clock_info dispc_cinfo; 138 struct dispc_clock_info dispc_cinfo;
154}; 139};
@@ -193,8 +178,8 @@ static bool dpi_calc_hsdiv_cb(int m_dispc, unsigned long dispc,
193 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) 178 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000)
194 return false; 179 return false;
195 180
196 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; 181 ctx->pll_cinfo.mX[ctx->clkout_idx] = m_dispc;
197 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; 182 ctx->pll_cinfo.clkout[ctx->clkout_idx] = dispc;
198 183
199 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, 184 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max,
200 dpi_calc_dispc_cb, ctx); 185 dpi_calc_dispc_cb, ctx);
@@ -207,12 +192,12 @@ static bool dpi_calc_pll_cb(int n, int m, unsigned long fint,
207{ 192{
208 struct dpi_clk_calc_ctx *ctx = data; 193 struct dpi_clk_calc_ctx *ctx = data;
209 194
210 ctx->dsi_cinfo.n = n; 195 ctx->pll_cinfo.n = n;
211 ctx->dsi_cinfo.m = m; 196 ctx->pll_cinfo.m = m;
212 ctx->dsi_cinfo.fint = fint; 197 ctx->pll_cinfo.fint = fint;
213 ctx->dsi_cinfo.clkdco = clkdco; 198 ctx->pll_cinfo.clkdco = clkdco;
214 199
215 return dss_pll_hsdiv_calc(ctx->pll, clkdco, 200 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco,
216 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 201 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
217 dpi_calc_hsdiv_cb, ctx); 202 dpi_calc_hsdiv_cb, ctx);
218} 203}
@@ -227,25 +212,39 @@ static bool dpi_calc_dss_cb(unsigned long fck, void *data)
227 dpi_calc_dispc_cb, ctx); 212 dpi_calc_dispc_cb, ctx);
228} 213}
229 214
230static bool dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, 215static bool dpi_pll_clk_calc(struct dpi_data *dpi, unsigned long pck,
231 struct dpi_clk_calc_ctx *ctx) 216 struct dpi_clk_calc_ctx *ctx)
232{ 217{
233 unsigned long clkin; 218 unsigned long clkin;
234 unsigned long pll_min, pll_max;
235 219
236 memset(ctx, 0, sizeof(*ctx)); 220 memset(ctx, 0, sizeof(*ctx));
237 ctx->pll = dpi->pll; 221 ctx->pll = dpi->pll;
238 ctx->pck_min = pck - 1000; 222 ctx->clkout_idx = dss_pll_get_clkout_idx_for_src(dpi->clk_src);
239 ctx->pck_max = pck + 1000;
240 223
241 pll_min = 0; 224 clkin = clk_get_rate(dpi->pll->clkin);
242 pll_max = 0;
243 225
244 clkin = clk_get_rate(ctx->pll->clkin); 226 if (dpi->pll->hw->type == DSS_PLL_TYPE_A) {
227 unsigned long pll_min, pll_max;
245 228
246 return dss_pll_calc(ctx->pll, clkin, 229 ctx->pck_min = pck - 1000;
247 pll_min, pll_max, 230 ctx->pck_max = pck + 1000;
248 dpi_calc_pll_cb, ctx); 231
232 pll_min = 0;
233 pll_max = 0;
234
235 return dss_pll_calc_a(ctx->pll, clkin,
236 pll_min, pll_max,
237 dpi_calc_pll_cb, ctx);
238 } else { /* DSS_PLL_TYPE_B */
239 dss_pll_calc_b(dpi->pll, clkin, pck, &ctx->pll_cinfo);
240
241 ctx->dispc_cinfo.lck_div = 1;
242 ctx->dispc_cinfo.pck_div = 1;
243 ctx->dispc_cinfo.lck = ctx->pll_cinfo.clkout[0];
244 ctx->dispc_cinfo.pck = ctx->dispc_cinfo.lck;
245
246 return true;
247 }
249} 248}
250 249
251static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) 250static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
@@ -279,7 +278,7 @@ static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx)
279 278
280 279
281 280
282static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel, 281static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
283 unsigned long pck_req, unsigned long *fck, int *lck_div, 282 unsigned long pck_req, unsigned long *fck, int *lck_div,
284 int *pck_div) 283 int *pck_div)
285{ 284{
@@ -287,20 +286,19 @@ static int dpi_set_dsi_clk(struct dpi_data *dpi, enum omap_channel channel,
287 int r; 286 int r;
288 bool ok; 287 bool ok;
289 288
290 ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); 289 ok = dpi_pll_clk_calc(dpi, pck_req, &ctx);
291 if (!ok) 290 if (!ok)
292 return -EINVAL; 291 return -EINVAL;
293 292
294 r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); 293 r = dss_pll_set_config(dpi->pll, &ctx.pll_cinfo);
295 if (r) 294 if (r)
296 return r; 295 return r;
297 296
298 dss_select_lcd_clk_source(channel, 297 dss_select_lcd_clk_source(channel, dpi->clk_src);
299 dpi_get_alt_clk_src(channel));
300 298
301 dpi->mgr_config.clock_info = ctx.dispc_cinfo; 299 dpi->mgr_config.clock_info = ctx.dispc_cinfo;
302 300
303 *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; 301 *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
304 *lck_div = ctx.dispc_cinfo.lck_div; 302 *lck_div = ctx.dispc_cinfo.lck_div;
305 *pck_div = ctx.dispc_cinfo.pck_div; 303 *pck_div = ctx.dispc_cinfo.pck_div;
306 304
@@ -342,7 +340,7 @@ static int dpi_set_mode(struct dpi_data *dpi)
342 int r = 0; 340 int r = 0;
343 341
344 if (dpi->pll) 342 if (dpi->pll)
345 r = dpi_set_dsi_clk(dpi, channel, t->pixelclock, &fck, 343 r = dpi_set_pll_clk(dpi, channel, t->pixelclock, &fck,
346 &lck_div, &pck_div); 344 &lck_div, &pck_div);
347 else 345 else
348 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck, 346 r = dpi_set_dispc_clk(dpi, t->pixelclock, &fck,
@@ -419,7 +417,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
419 if (dpi->pll) { 417 if (dpi->pll) {
420 r = dss_pll_enable(dpi->pll); 418 r = dss_pll_enable(dpi->pll);
421 if (r) 419 if (r)
422 goto err_dsi_pll_init; 420 goto err_pll_init;
423 } 421 }
424 422
425 r = dpi_set_mode(dpi); 423 r = dpi_set_mode(dpi);
@@ -442,7 +440,7 @@ err_mgr_enable:
442err_set_mode: 440err_set_mode:
443 if (dpi->pll) 441 if (dpi->pll)
444 dss_pll_disable(dpi->pll); 442 dss_pll_disable(dpi->pll);
445err_dsi_pll_init: 443err_pll_init:
446err_src_sel: 444err_src_sel:
447 dispc_runtime_put(); 445 dispc_runtime_put();
448err_get_dispc: 446err_get_dispc:
@@ -465,7 +463,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
465 dss_mgr_disable(channel); 463 dss_mgr_disable(channel);
466 464
467 if (dpi->pll) { 465 if (dpi->pll) {
468 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 466 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
469 dss_pll_disable(dpi->pll); 467 dss_pll_disable(dpi->pll);
470 } 468 }
471 469
@@ -524,11 +522,11 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
524 return -EINVAL; 522 return -EINVAL;
525 523
526 if (dpi->pll) { 524 if (dpi->pll) {
527 ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); 525 ok = dpi_pll_clk_calc(dpi, timings->pixelclock, &ctx);
528 if (!ok) 526 if (!ok)
529 return -EINVAL; 527 return -EINVAL;
530 528
531 fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; 529 fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
532 } else { 530 } else {
533 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); 531 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx);
534 if (!ok) 532 if (!ok)
@@ -558,7 +556,7 @@ static void dpi_set_data_lines(struct omap_dss_device *dssdev, int data_lines)
558 mutex_unlock(&dpi->lock); 556 mutex_unlock(&dpi->lock);
559} 557}
560 558
561static int dpi_verify_dsi_pll(struct dss_pll *pll) 559static int dpi_verify_pll(struct dss_pll *pll)
562{ 560{
563 int r; 561 int r;
564 562
@@ -602,16 +600,14 @@ static void dpi_init_pll(struct dpi_data *dpi)
602 if (dpi->pll) 600 if (dpi->pll)
603 return; 601 return;
604 602
605 pll = dpi_get_pll(dpi->output.dispc_channel); 603 dpi->clk_src = dpi_get_clk_src(dpi->output.dispc_channel);
604
605 pll = dss_pll_find_by_src(dpi->clk_src);
606 if (!pll) 606 if (!pll)
607 return; 607 return;
608 608
609 /* On DRA7 we need to set a mux to use the PLL */ 609 if (dpi_verify_pll(pll)) {
610 if (omapdss_get_version() == OMAPDSS_VER_DRA7xx) 610 DSSWARN("PLL not operational\n");
611 dss_ctrl_pll_set_control_mux(pll->id, dpi->output.dispc_channel);
612
613 if (dpi_verify_dsi_pll(pll)) {
614 DSSWARN("DSI PLL not operational\n");
615 return; 611 return;
616 } 612 }
617 613
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 56c43f355ce3..e1be5e795cd8 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -42,9 +42,9 @@
42#include <linux/of_platform.h> 42#include <linux/of_platform.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h>
46#include <video/mipi_display.h> 45#include <video/mipi_display.h>
47 46
47#include "omapdss.h"
48#include "dss.h" 48#include "dss.h"
49#include "dss_features.h" 49#include "dss_features.h"
50 50
@@ -1261,7 +1261,7 @@ static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1261 unsigned long r; 1261 unsigned long r;
1262 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1262 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1263 1263
1264 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) { 1264 if (dss_get_dsi_clk_source(dsi->module_id) == DSS_CLK_SRC_FCK) {
1265 /* DSI FCLK source is DSS_CLK_FCK */ 1265 /* DSI FCLK source is DSS_CLK_FCK */
1266 r = clk_get_rate(dsi->dss_clk); 1266 r = clk_get_rate(dsi->dss_clk);
1267 } else { 1267 } else {
@@ -1474,7 +1474,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1474{ 1474{
1475 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1475 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1476 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo; 1476 struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
1477 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1477 enum dss_clk_source dispc_clk_src, dsi_clk_src;
1478 int dsi_module = dsi->module_id; 1478 int dsi_module = dsi->module_id;
1479 struct dss_pll *pll = &dsi->pll; 1479 struct dss_pll *pll = &dsi->pll;
1480 1480
@@ -1494,28 +1494,27 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1494 cinfo->clkdco, cinfo->m); 1494 cinfo->clkdco, cinfo->m);
1495 1495
1496 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n", 1496 seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16lum_dispc %u\t(%s)\n",
1497 dss_feat_get_clk_source_name(dsi_module == 0 ? 1497 dss_get_clk_source_name(dsi_module == 0 ?
1498 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 1498 DSS_CLK_SRC_PLL1_1 :
1499 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC), 1499 DSS_CLK_SRC_PLL2_1),
1500 cinfo->clkout[HSDIV_DISPC], 1500 cinfo->clkout[HSDIV_DISPC],
1501 cinfo->mX[HSDIV_DISPC], 1501 cinfo->mX[HSDIV_DISPC],
1502 dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1502 dispc_clk_src == DSS_CLK_SRC_FCK ?
1503 "off" : "on"); 1503 "off" : "on");
1504 1504
1505 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n", 1505 seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16lum_dsi %u\t(%s)\n",
1506 dss_feat_get_clk_source_name(dsi_module == 0 ? 1506 dss_get_clk_source_name(dsi_module == 0 ?
1507 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 1507 DSS_CLK_SRC_PLL1_2 :
1508 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI), 1508 DSS_CLK_SRC_PLL2_2),
1509 cinfo->clkout[HSDIV_DSI], 1509 cinfo->clkout[HSDIV_DSI],
1510 cinfo->mX[HSDIV_DSI], 1510 cinfo->mX[HSDIV_DSI],
1511 dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ? 1511 dsi_clk_src == DSS_CLK_SRC_FCK ?
1512 "off" : "on"); 1512 "off" : "on");
1513 1513
1514 seq_printf(s, "- DSI%d -\n", dsi_module + 1); 1514 seq_printf(s, "- DSI%d -\n", dsi_module + 1);
1515 1515
1516 seq_printf(s, "dsi fclk source = %s (%s)\n", 1516 seq_printf(s, "dsi fclk source = %s\n",
1517 dss_get_generic_clk_source_name(dsi_clk_src), 1517 dss_get_clk_source_name(dsi_clk_src));
1518 dss_feat_get_clk_source_name(dsi_clk_src));
1519 1518
1520 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev)); 1519 seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate(dsidev));
1521 1520
@@ -4101,8 +4100,8 @@ static int dsi_display_init_dispc(struct platform_device *dsidev,
4101 int r; 4100 int r;
4102 4101
4103 dss_select_lcd_clk_source(channel, dsi->module_id == 0 ? 4102 dss_select_lcd_clk_source(channel, dsi->module_id == 0 ?
4104 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC : 4103 DSS_CLK_SRC_PLL1_1 :
4105 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC); 4104 DSS_CLK_SRC_PLL2_1);
4106 4105
4107 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) { 4106 if (dsi->mode == OMAP_DSS_DSI_CMD_MODE) {
4108 r = dss_mgr_register_framedone_handler(channel, 4107 r = dss_mgr_register_framedone_handler(channel,
@@ -4149,7 +4148,7 @@ err1:
4149 dss_mgr_unregister_framedone_handler(channel, 4148 dss_mgr_unregister_framedone_handler(channel,
4150 dsi_framedone_irq_callback, dsidev); 4149 dsi_framedone_irq_callback, dsidev);
4151err: 4150err:
4152 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 4151 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4153 return r; 4152 return r;
4154} 4153}
4155 4154
@@ -4162,7 +4161,7 @@ static void dsi_display_uninit_dispc(struct platform_device *dsidev,
4162 dss_mgr_unregister_framedone_handler(channel, 4161 dss_mgr_unregister_framedone_handler(channel,
4163 dsi_framedone_irq_callback, dsidev); 4162 dsi_framedone_irq_callback, dsidev);
4164 4163
4165 dss_select_lcd_clk_source(channel, OMAP_DSS_CLK_SRC_FCK); 4164 dss_select_lcd_clk_source(channel, DSS_CLK_SRC_FCK);
4166} 4165}
4167 4166
4168static int dsi_configure_dsi_clocks(struct platform_device *dsidev) 4167static int dsi_configure_dsi_clocks(struct platform_device *dsidev)
@@ -4196,8 +4195,8 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4196 goto err1; 4195 goto err1;
4197 4196
4198 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ? 4197 dss_select_dsi_clk_source(dsi->module_id, dsi->module_id == 0 ?
4199 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI : 4198 DSS_CLK_SRC_PLL1_2 :
4200 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI); 4199 DSS_CLK_SRC_PLL2_2);
4201 4200
4202 DSSDBG("PLL OK\n"); 4201 DSSDBG("PLL OK\n");
4203 4202
@@ -4229,7 +4228,7 @@ static int dsi_display_init_dsi(struct platform_device *dsidev)
4229err3: 4228err3:
4230 dsi_cio_uninit(dsidev); 4229 dsi_cio_uninit(dsidev);
4231err2: 4230err2:
4232 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4231 dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4233err1: 4232err1:
4234 dss_pll_disable(&dsi->pll); 4233 dss_pll_disable(&dsi->pll);
4235err0: 4234err0:
@@ -4251,7 +4250,7 @@ static void dsi_display_uninit_dsi(struct platform_device *dsidev,
4251 dsi_vc_enable(dsidev, 2, 0); 4250 dsi_vc_enable(dsidev, 2, 0);
4252 dsi_vc_enable(dsidev, 3, 0); 4251 dsi_vc_enable(dsidev, 3, 0);
4253 4252
4254 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK); 4253 dss_select_dsi_clk_source(dsi->module_id, DSS_CLK_SRC_FCK);
4255 dsi_cio_uninit(dsidev); 4254 dsi_cio_uninit(dsidev);
4256 dsi_pll_uninit(dsidev, disconnect_lanes); 4255 dsi_pll_uninit(dsidev, disconnect_lanes);
4257} 4256}
@@ -4452,7 +4451,7 @@ static bool dsi_cm_calc_pll_cb(int n, int m, unsigned long fint,
4452 ctx->dsi_cinfo.fint = fint; 4451 ctx->dsi_cinfo.fint = fint;
4453 ctx->dsi_cinfo.clkdco = clkdco; 4452 ctx->dsi_cinfo.clkdco = clkdco;
4454 4453
4455 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, 4454 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4456 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 4455 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4457 dsi_cm_calc_hsdiv_cb, ctx); 4456 dsi_cm_calc_hsdiv_cb, ctx);
4458} 4457}
@@ -4491,7 +4490,7 @@ static bool dsi_cm_calc(struct dsi_data *dsi,
4491 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4); 4490 pll_min = max(cfg->hs_clk_min * 4, txbyteclk * 4 * 4);
4492 pll_max = cfg->hs_clk_max * 4; 4491 pll_max = cfg->hs_clk_max * 4;
4493 4492
4494 return dss_pll_calc(ctx->pll, clkin, 4493 return dss_pll_calc_a(ctx->pll, clkin,
4495 pll_min, pll_max, 4494 pll_min, pll_max,
4496 dsi_cm_calc_pll_cb, ctx); 4495 dsi_cm_calc_pll_cb, ctx);
4497} 4496}
@@ -4750,7 +4749,7 @@ static bool dsi_vm_calc_pll_cb(int n, int m, unsigned long fint,
4750 ctx->dsi_cinfo.fint = fint; 4749 ctx->dsi_cinfo.fint = fint;
4751 ctx->dsi_cinfo.clkdco = clkdco; 4750 ctx->dsi_cinfo.clkdco = clkdco;
4752 4751
4753 return dss_pll_hsdiv_calc(ctx->pll, clkdco, ctx->req_pck_min, 4752 return dss_pll_hsdiv_calc_a(ctx->pll, clkdco, ctx->req_pck_min,
4754 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), 4753 dss_feat_get_param_max(FEAT_PARAM_DSS_FCK),
4755 dsi_vm_calc_hsdiv_cb, ctx); 4754 dsi_vm_calc_hsdiv_cb, ctx);
4756} 4755}
@@ -4792,7 +4791,7 @@ static bool dsi_vm_calc(struct dsi_data *dsi,
4792 pll_max = byteclk_max * 4 * 4; 4791 pll_max = byteclk_max * 4 * 4;
4793 } 4792 }
4794 4793
4795 return dss_pll_calc(ctx->pll, clkin, 4794 return dss_pll_calc_a(ctx->pll, clkin,
4796 pll_min, pll_max, 4795 pll_min, pll_max,
4797 dsi_vm_calc_pll_cb, ctx); 4796 dsi_vm_calc_pll_cb, ctx);
4798} 4797}
@@ -5138,6 +5137,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
5138}; 5137};
5139 5138
5140static const struct dss_pll_hw dss_omap3_dsi_pll_hw = { 5139static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5140 .type = DSS_PLL_TYPE_A,
5141
5141 .n_max = (1 << 7) - 1, 5142 .n_max = (1 << 7) - 1,
5142 .m_max = (1 << 11) - 1, 5143 .m_max = (1 << 11) - 1,
5143 .mX_max = (1 << 4) - 1, 5144 .mX_max = (1 << 4) - 1,
@@ -5163,6 +5164,8 @@ static const struct dss_pll_hw dss_omap3_dsi_pll_hw = {
5163}; 5164};
5164 5165
5165static const struct dss_pll_hw dss_omap4_dsi_pll_hw = { 5166static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5167 .type = DSS_PLL_TYPE_A,
5168
5166 .n_max = (1 << 8) - 1, 5169 .n_max = (1 << 8) - 1,
5167 .m_max = (1 << 12) - 1, 5170 .m_max = (1 << 12) - 1,
5168 .mX_max = (1 << 5) - 1, 5171 .mX_max = (1 << 5) - 1,
@@ -5188,6 +5191,8 @@ static const struct dss_pll_hw dss_omap4_dsi_pll_hw = {
5188}; 5191};
5189 5192
5190static const struct dss_pll_hw dss_omap5_dsi_pll_hw = { 5193static const struct dss_pll_hw dss_omap5_dsi_pll_hw = {
5194 .type = DSS_PLL_TYPE_A,
5195
5191 .n_max = (1 << 8) - 1, 5196 .n_max = (1 << 8) - 1,
5192 .m_max = (1 << 12) - 1, 5197 .m_max = (1 << 12) - 1,
5193 .mX_max = (1 << 5) - 1, 5198 .mX_max = (1 << 5) - 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index bf407b6ba15c..e256d879b25c 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -18,8 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21#include <video/omapdss.h> 21#include "omapdss.h"
22
23#include "dss.h" 22#include "dss.h"
24 23
25struct device_node * 24struct device_node *
@@ -126,15 +125,16 @@ u32 dss_of_port_get_port_number(struct device_node *port)
126 125
127static struct device_node *omapdss_of_get_remote_port(const struct device_node *node) 126static struct device_node *omapdss_of_get_remote_port(const struct device_node *node)
128{ 127{
129 struct device_node *np; 128 struct device_node *np, *np_parent;
130 129
131 np = of_parse_phandle(node, "remote-endpoint", 0); 130 np = of_parse_phandle(node, "remote-endpoint", 0);
132 if (!np) 131 if (!np)
133 return NULL; 132 return NULL;
134 133
135 np = of_get_next_parent(np); 134 np_parent = of_get_next_parent(np);
135 of_node_put(np);
136 136
137 return np; 137 return np_parent;
138} 138}
139 139
140struct device_node * 140struct device_node *
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 3303cfad4838..14887d5b02e5 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -42,8 +42,7 @@
42#include <linux/suspend.h> 42#include <linux/suspend.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h> 45#include "omapdss.h"
46
47#include "dss.h" 46#include "dss.h"
48#include "dss_features.h" 47#include "dss_features.h"
49 48
@@ -76,6 +75,8 @@ struct dss_features {
76 const enum omap_display_type *ports; 75 const enum omap_display_type *ports;
77 int num_ports; 76 int num_ports;
78 int (*dpi_select_source)(int port, enum omap_channel channel); 77 int (*dpi_select_source)(int port, enum omap_channel channel);
78 int (*select_lcd_source)(enum omap_channel channel,
79 enum dss_clk_source clk_src);
79}; 80};
80 81
81static struct { 82static struct {
@@ -92,9 +93,9 @@ static struct {
92 unsigned long cache_prate; 93 unsigned long cache_prate;
93 struct dispc_clock_info cache_dispc_cinfo; 94 struct dispc_clock_info cache_dispc_cinfo;
94 95
95 enum omap_dss_clk_source dsi_clk_source[MAX_NUM_DSI]; 96 enum dss_clk_source dsi_clk_source[MAX_NUM_DSI];
96 enum omap_dss_clk_source dispc_clk_source; 97 enum dss_clk_source dispc_clk_source;
97 enum omap_dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS]; 98 enum dss_clk_source lcd_clk_source[MAX_DSS_LCD_MANAGERS];
98 99
99 bool ctx_valid; 100 bool ctx_valid;
100 u32 ctx[DSS_SZ_REGS / sizeof(u32)]; 101 u32 ctx[DSS_SZ_REGS / sizeof(u32)];
@@ -106,11 +107,14 @@ static struct {
106} dss; 107} dss;
107 108
108static const char * const dss_generic_clk_source_names[] = { 109static const char * const dss_generic_clk_source_names[] = {
109 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI_PLL_HSDIV_DISPC", 110 [DSS_CLK_SRC_FCK] = "FCK",
110 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI_PLL_HSDIV_DSI", 111 [DSS_CLK_SRC_PLL1_1] = "PLL1:1",
111 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCK", 112 [DSS_CLK_SRC_PLL1_2] = "PLL1:2",
112 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DSI_PLL2_HSDIV_DISPC", 113 [DSS_CLK_SRC_PLL1_3] = "PLL1:3",
113 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DSI_PLL2_HSDIV_DSI", 114 [DSS_CLK_SRC_PLL2_1] = "PLL2:1",
115 [DSS_CLK_SRC_PLL2_2] = "PLL2:2",
116 [DSS_CLK_SRC_PLL2_3] = "PLL2:3",
117 [DSS_CLK_SRC_HDMI_PLL] = "HDMI PLL",
114}; 118};
115 119
116static bool dss_initialized; 120static bool dss_initialized;
@@ -203,68 +207,70 @@ void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable)
203 1 << shift, val << shift); 207 1 << shift, val << shift);
204} 208}
205 209
206void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id, 210static int dss_ctrl_pll_set_control_mux(enum dss_clk_source clk_src,
207 enum omap_channel channel) 211 enum omap_channel channel)
208{ 212{
209 unsigned shift, val; 213 unsigned shift, val;
210 214
211 if (!dss.syscon_pll_ctrl) 215 if (!dss.syscon_pll_ctrl)
212 return; 216 return -EINVAL;
213 217
214 switch (channel) { 218 switch (channel) {
215 case OMAP_DSS_CHANNEL_LCD: 219 case OMAP_DSS_CHANNEL_LCD:
216 shift = 3; 220 shift = 3;
217 221
218 switch (pll_id) { 222 switch (clk_src) {
219 case DSS_PLL_VIDEO1: 223 case DSS_CLK_SRC_PLL1_1:
220 val = 0; break; 224 val = 0; break;
221 case DSS_PLL_HDMI: 225 case DSS_CLK_SRC_HDMI_PLL:
222 val = 1; break; 226 val = 1; break;
223 default: 227 default:
224 DSSERR("error in PLL mux config for LCD\n"); 228 DSSERR("error in PLL mux config for LCD\n");
225 return; 229 return -EINVAL;
226 } 230 }
227 231
228 break; 232 break;
229 case OMAP_DSS_CHANNEL_LCD2: 233 case OMAP_DSS_CHANNEL_LCD2:
230 shift = 5; 234 shift = 5;
231 235
232 switch (pll_id) { 236 switch (clk_src) {
233 case DSS_PLL_VIDEO1: 237 case DSS_CLK_SRC_PLL1_3:
234 val = 0; break; 238 val = 0; break;
235 case DSS_PLL_VIDEO2: 239 case DSS_CLK_SRC_PLL2_3:
236 val = 1; break; 240 val = 1; break;
237 case DSS_PLL_HDMI: 241 case DSS_CLK_SRC_HDMI_PLL:
238 val = 2; break; 242 val = 2; break;
239 default: 243 default:
240 DSSERR("error in PLL mux config for LCD2\n"); 244 DSSERR("error in PLL mux config for LCD2\n");
241 return; 245 return -EINVAL;
242 } 246 }
243 247
244 break; 248 break;
245 case OMAP_DSS_CHANNEL_LCD3: 249 case OMAP_DSS_CHANNEL_LCD3:
246 shift = 7; 250 shift = 7;
247 251
248 switch (pll_id) { 252 switch (clk_src) {
249 case DSS_PLL_VIDEO1: 253 case DSS_CLK_SRC_PLL2_1:
250 val = 1; break;
251 case DSS_PLL_VIDEO2:
252 val = 0; break; 254 val = 0; break;
253 case DSS_PLL_HDMI: 255 case DSS_CLK_SRC_PLL1_3:
256 val = 1; break;
257 case DSS_CLK_SRC_HDMI_PLL:
254 val = 2; break; 258 val = 2; break;
255 default: 259 default:
256 DSSERR("error in PLL mux config for LCD3\n"); 260 DSSERR("error in PLL mux config for LCD3\n");
257 return; 261 return -EINVAL;
258 } 262 }
259 263
260 break; 264 break;
261 default: 265 default:
262 DSSERR("error in PLL mux config\n"); 266 DSSERR("error in PLL mux config\n");
263 return; 267 return -EINVAL;
264 } 268 }
265 269
266 regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset, 270 regmap_update_bits(dss.syscon_pll_ctrl, dss.syscon_pll_ctrl_offset,
267 0x3 << shift, val << shift); 271 0x3 << shift, val << shift);
272
273 return 0;
268} 274}
269 275
270void dss_sdi_init(int datapairs) 276void dss_sdi_init(int datapairs)
@@ -354,14 +360,14 @@ void dss_sdi_disable(void)
354 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */ 360 REG_FLD_MOD(DSS_PLL_CONTROL, 0, 18, 18); /* SDI_PLL_SYSRESET */
355} 361}
356 362
357const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src) 363const char *dss_get_clk_source_name(enum dss_clk_source clk_src)
358{ 364{
359 return dss_generic_clk_source_names[clk_src]; 365 return dss_generic_clk_source_names[clk_src];
360} 366}
361 367
362void dss_dump_clocks(struct seq_file *s) 368void dss_dump_clocks(struct seq_file *s)
363{ 369{
364 const char *fclk_name, *fclk_real_name; 370 const char *fclk_name;
365 unsigned long fclk_rate; 371 unsigned long fclk_rate;
366 372
367 if (dss_runtime_get()) 373 if (dss_runtime_get())
@@ -369,12 +375,11 @@ void dss_dump_clocks(struct seq_file *s)
369 375
370 seq_printf(s, "- DSS -\n"); 376 seq_printf(s, "- DSS -\n");
371 377
372 fclk_name = dss_get_generic_clk_source_name(OMAP_DSS_CLK_SRC_FCK); 378 fclk_name = dss_get_clk_source_name(DSS_CLK_SRC_FCK);
373 fclk_real_name = dss_feat_get_clk_source_name(OMAP_DSS_CLK_SRC_FCK);
374 fclk_rate = clk_get_rate(dss.dss_clk); 379 fclk_rate = clk_get_rate(dss.dss_clk);
375 380
376 seq_printf(s, "%s (%s) = %lu\n", 381 seq_printf(s, "%s = %lu\n",
377 fclk_name, fclk_real_name, 382 fclk_name,
378 fclk_rate); 383 fclk_rate);
379 384
380 dss_runtime_put(); 385 dss_runtime_put();
@@ -403,19 +408,42 @@ static void dss_dump_regs(struct seq_file *s)
403#undef DUMPREG 408#undef DUMPREG
404} 409}
405 410
406static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src) 411static int dss_get_channel_index(enum omap_channel channel)
412{
413 switch (channel) {
414 case OMAP_DSS_CHANNEL_LCD:
415 return 0;
416 case OMAP_DSS_CHANNEL_LCD2:
417 return 1;
418 case OMAP_DSS_CHANNEL_LCD3:
419 return 2;
420 default:
421 WARN_ON(1);
422 return 0;
423 }
424}
425
426static void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
407{ 427{
408 int b; 428 int b;
409 u8 start, end; 429 u8 start, end;
410 430
431 /*
432 * We always use PRCM clock as the DISPC func clock, except on DSS3,
433 * where we don't have separate DISPC and LCD clock sources.
434 */
435 if (WARN_ON(dss_has_feature(FEAT_LCD_CLK_SRC) &&
436 clk_src != DSS_CLK_SRC_FCK))
437 return;
438
411 switch (clk_src) { 439 switch (clk_src) {
412 case OMAP_DSS_CLK_SRC_FCK: 440 case DSS_CLK_SRC_FCK:
413 b = 0; 441 b = 0;
414 break; 442 break;
415 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC: 443 case DSS_CLK_SRC_PLL1_1:
416 b = 1; 444 b = 1;
417 break; 445 break;
418 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: 446 case DSS_CLK_SRC_PLL2_1:
419 b = 2; 447 b = 2;
420 break; 448 break;
421 default: 449 default:
@@ -431,19 +459,19 @@ static void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
431} 459}
432 460
433void dss_select_dsi_clk_source(int dsi_module, 461void dss_select_dsi_clk_source(int dsi_module,
434 enum omap_dss_clk_source clk_src) 462 enum dss_clk_source clk_src)
435{ 463{
436 int b, pos; 464 int b, pos;
437 465
438 switch (clk_src) { 466 switch (clk_src) {
439 case OMAP_DSS_CLK_SRC_FCK: 467 case DSS_CLK_SRC_FCK:
440 b = 0; 468 b = 0;
441 break; 469 break;
442 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI: 470 case DSS_CLK_SRC_PLL1_2:
443 BUG_ON(dsi_module != 0); 471 BUG_ON(dsi_module != 0);
444 b = 1; 472 b = 1;
445 break; 473 break;
446 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI: 474 case DSS_CLK_SRC_PLL2_2:
447 BUG_ON(dsi_module != 1); 475 BUG_ON(dsi_module != 1);
448 b = 1; 476 b = 1;
449 break; 477 break;
@@ -458,59 +486,125 @@ void dss_select_dsi_clk_source(int dsi_module,
458 dss.dsi_clk_source[dsi_module] = clk_src; 486 dss.dsi_clk_source[dsi_module] = clk_src;
459} 487}
460 488
489static int dss_lcd_clk_mux_dra7(enum omap_channel channel,
490 enum dss_clk_source clk_src)
491{
492 const u8 ctrl_bits[] = {
493 [OMAP_DSS_CHANNEL_LCD] = 0,
494 [OMAP_DSS_CHANNEL_LCD2] = 12,
495 [OMAP_DSS_CHANNEL_LCD3] = 19,
496 };
497
498 u8 ctrl_bit = ctrl_bits[channel];
499 int r;
500
501 if (clk_src == DSS_CLK_SRC_FCK) {
502 /* LCDx_CLK_SWITCH */
503 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
504 return -EINVAL;
505 }
506
507 r = dss_ctrl_pll_set_control_mux(clk_src, channel);
508 if (r)
509 return r;
510
511 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
512
513 return 0;
514}
515
516static int dss_lcd_clk_mux_omap5(enum omap_channel channel,
517 enum dss_clk_source clk_src)
518{
519 const u8 ctrl_bits[] = {
520 [OMAP_DSS_CHANNEL_LCD] = 0,
521 [OMAP_DSS_CHANNEL_LCD2] = 12,
522 [OMAP_DSS_CHANNEL_LCD3] = 19,
523 };
524 const enum dss_clk_source allowed_plls[] = {
525 [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
526 [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_FCK,
527 [OMAP_DSS_CHANNEL_LCD3] = DSS_CLK_SRC_PLL2_1,
528 };
529
530 u8 ctrl_bit = ctrl_bits[channel];
531
532 if (clk_src == DSS_CLK_SRC_FCK) {
533 /* LCDx_CLK_SWITCH */
534 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
535 return -EINVAL;
536 }
537
538 if (WARN_ON(allowed_plls[channel] != clk_src))
539 return -EINVAL;
540
541 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
542
543 return 0;
544}
545
546static int dss_lcd_clk_mux_omap4(enum omap_channel channel,
547 enum dss_clk_source clk_src)
548{
549 const u8 ctrl_bits[] = {
550 [OMAP_DSS_CHANNEL_LCD] = 0,
551 [OMAP_DSS_CHANNEL_LCD2] = 12,
552 };
553 const enum dss_clk_source allowed_plls[] = {
554 [OMAP_DSS_CHANNEL_LCD] = DSS_CLK_SRC_PLL1_1,
555 [OMAP_DSS_CHANNEL_LCD2] = DSS_CLK_SRC_PLL2_1,
556 };
557
558 u8 ctrl_bit = ctrl_bits[channel];
559
560 if (clk_src == DSS_CLK_SRC_FCK) {
561 /* LCDx_CLK_SWITCH */
562 REG_FLD_MOD(DSS_CONTROL, 0, ctrl_bit, ctrl_bit);
563 return 0;
564 }
565
566 if (WARN_ON(allowed_plls[channel] != clk_src))
567 return -EINVAL;
568
569 REG_FLD_MOD(DSS_CONTROL, 1, ctrl_bit, ctrl_bit);
570
571 return 0;
572}
573
461void dss_select_lcd_clk_source(enum omap_channel channel, 574void dss_select_lcd_clk_source(enum omap_channel channel,
462 enum omap_dss_clk_source clk_src) 575 enum dss_clk_source clk_src)
463{ 576{
464 int b, ix, pos; 577 int idx = dss_get_channel_index(channel);
578 int r;
465 579
466 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) { 580 if (!dss_has_feature(FEAT_LCD_CLK_SRC)) {
467 dss_select_dispc_clk_source(clk_src); 581 dss_select_dispc_clk_source(clk_src);
582 dss.lcd_clk_source[idx] = clk_src;
468 return; 583 return;
469 } 584 }
470 585
471 switch (clk_src) { 586 r = dss.feat->select_lcd_source(channel, clk_src);
472 case OMAP_DSS_CLK_SRC_FCK: 587 if (r)
473 b = 0;
474 break;
475 case OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC:
476 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD);
477 b = 1;
478 break;
479 case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC:
480 BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 &&
481 channel != OMAP_DSS_CHANNEL_LCD3);
482 b = 1;
483 break;
484 default:
485 BUG();
486 return; 588 return;
487 }
488
489 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 :
490 (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19);
491 REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */
492 589
493 ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 590 dss.lcd_clk_source[idx] = clk_src;
494 (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2);
495 dss.lcd_clk_source[ix] = clk_src;
496} 591}
497 592
498enum omap_dss_clk_source dss_get_dispc_clk_source(void) 593enum dss_clk_source dss_get_dispc_clk_source(void)
499{ 594{
500 return dss.dispc_clk_source; 595 return dss.dispc_clk_source;
501} 596}
502 597
503enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) 598enum dss_clk_source dss_get_dsi_clk_source(int dsi_module)
504{ 599{
505 return dss.dsi_clk_source[dsi_module]; 600 return dss.dsi_clk_source[dsi_module];
506} 601}
507 602
508enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) 603enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel)
509{ 604{
510 if (dss_has_feature(FEAT_LCD_CLK_SRC)) { 605 if (dss_has_feature(FEAT_LCD_CLK_SRC)) {
511 int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 606 int idx = dss_get_channel_index(channel);
512 (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); 607 return dss.lcd_clk_source[idx];
513 return dss.lcd_clk_source[ix];
514 } else { 608 } else {
515 /* LCD_CLK source is the same as DISPC_FCLK source for 609 /* LCD_CLK source is the same as DISPC_FCLK source for
516 * OMAP2 and OMAP3 */ 610 * OMAP2 and OMAP3 */
@@ -859,6 +953,7 @@ static const struct dss_features omap44xx_dss_feats = {
859 .dpi_select_source = &dss_dpi_select_source_omap4, 953 .dpi_select_source = &dss_dpi_select_source_omap4,
860 .ports = omap2plus_ports, 954 .ports = omap2plus_ports,
861 .num_ports = ARRAY_SIZE(omap2plus_ports), 955 .num_ports = ARRAY_SIZE(omap2plus_ports),
956 .select_lcd_source = &dss_lcd_clk_mux_omap4,
862}; 957};
863 958
864static const struct dss_features omap54xx_dss_feats = { 959static const struct dss_features omap54xx_dss_feats = {
@@ -868,6 +963,7 @@ static const struct dss_features omap54xx_dss_feats = {
868 .dpi_select_source = &dss_dpi_select_source_omap5, 963 .dpi_select_source = &dss_dpi_select_source_omap5,
869 .ports = omap2plus_ports, 964 .ports = omap2plus_ports,
870 .num_ports = ARRAY_SIZE(omap2plus_ports), 965 .num_ports = ARRAY_SIZE(omap2plus_ports),
966 .select_lcd_source = &dss_lcd_clk_mux_omap5,
871}; 967};
872 968
873static const struct dss_features am43xx_dss_feats = { 969static const struct dss_features am43xx_dss_feats = {
@@ -886,6 +982,7 @@ static const struct dss_features dra7xx_dss_feats = {
886 .dpi_select_source = &dss_dpi_select_source_dra7xx, 982 .dpi_select_source = &dss_dpi_select_source_dra7xx,
887 .ports = dra7xx_ports, 983 .ports = dra7xx_ports,
888 .num_ports = ARRAY_SIZE(dra7xx_ports), 984 .num_ports = ARRAY_SIZE(dra7xx_ports),
985 .select_lcd_source = &dss_lcd_clk_mux_dra7,
889}; 986};
890 987
891static int dss_init_features(struct platform_device *pdev) 988static int dss_init_features(struct platform_device *pdev)
@@ -1143,18 +1240,18 @@ static int dss_bind(struct device *dev)
1143 /* Select DPLL */ 1240 /* Select DPLL */
1144 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0); 1241 REG_FLD_MOD(DSS_CONTROL, 0, 0, 0);
1145 1242
1146 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 1243 dss_select_dispc_clk_source(DSS_CLK_SRC_FCK);
1147 1244
1148#ifdef CONFIG_OMAP2_DSS_VENC 1245#ifdef CONFIG_OMAP2_DSS_VENC
1149 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */ 1246 REG_FLD_MOD(DSS_CONTROL, 1, 4, 4); /* venc dac demen */
1150 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */ 1247 REG_FLD_MOD(DSS_CONTROL, 1, 3, 3); /* venc clock 4x enable */
1151 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */ 1248 REG_FLD_MOD(DSS_CONTROL, 0, 2, 2); /* venc clock mode = normal */
1152#endif 1249#endif
1153 dss.dsi_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 1250 dss.dsi_clk_source[0] = DSS_CLK_SRC_FCK;
1154 dss.dsi_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 1251 dss.dsi_clk_source[1] = DSS_CLK_SRC_FCK;
1155 dss.dispc_clk_source = OMAP_DSS_CLK_SRC_FCK; 1252 dss.dispc_clk_source = DSS_CLK_SRC_FCK;
1156 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 1253 dss.lcd_clk_source[0] = DSS_CLK_SRC_FCK;
1157 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 1254 dss.lcd_clk_source[1] = DSS_CLK_SRC_FCK;
1158 1255
1159 rev = dss_read_reg(DSS_REVISION); 1256 rev = dss_read_reg(DSS_REVISION);
1160 printk(KERN_INFO "OMAP DSS rev %d.%d\n", 1257 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38e6ab50142d..4fd06dc41cb3 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -102,6 +102,20 @@ enum dss_writeback_channel {
102 DSS_WB_LCD3_MGR = 7, 102 DSS_WB_LCD3_MGR = 7,
103}; 103};
104 104
105enum dss_clk_source {
106 DSS_CLK_SRC_FCK = 0,
107
108 DSS_CLK_SRC_PLL1_1,
109 DSS_CLK_SRC_PLL1_2,
110 DSS_CLK_SRC_PLL1_3,
111
112 DSS_CLK_SRC_PLL2_1,
113 DSS_CLK_SRC_PLL2_2,
114 DSS_CLK_SRC_PLL2_3,
115
116 DSS_CLK_SRC_HDMI_PLL,
117};
118
105enum dss_pll_id { 119enum dss_pll_id {
106 DSS_PLL_DSI1, 120 DSS_PLL_DSI1,
107 DSS_PLL_DSI2, 121 DSS_PLL_DSI2,
@@ -114,6 +128,11 @@ struct dss_pll;
114 128
115#define DSS_PLL_MAX_HSDIVS 4 129#define DSS_PLL_MAX_HSDIVS 4
116 130
131enum dss_pll_type {
132 DSS_PLL_TYPE_A,
133 DSS_PLL_TYPE_B,
134};
135
117/* 136/*
118 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7. 137 * Type-A PLLs: clkout[]/mX[] refer to hsdiv outputs m4, m5, m6, m7.
119 * Type-B PLLs: clkout[0] refers to m2. 138 * Type-B PLLs: clkout[0] refers to m2.
@@ -140,6 +159,8 @@ struct dss_pll_ops {
140}; 159};
141 160
142struct dss_pll_hw { 161struct dss_pll_hw {
162 enum dss_pll_type type;
163
143 unsigned n_max; 164 unsigned n_max;
144 unsigned m_min; 165 unsigned m_min;
145 unsigned m_max; 166 unsigned m_max;
@@ -227,7 +248,7 @@ unsigned long dss_get_dispc_clk_rate(void);
227int dss_dpi_select_source(int port, enum omap_channel channel); 248int dss_dpi_select_source(int port, enum omap_channel channel);
228void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 249void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
229enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 250enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
230const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 251const char *dss_get_clk_source_name(enum dss_clk_source clk_src);
231void dss_dump_clocks(struct seq_file *s); 252void dss_dump_clocks(struct seq_file *s);
232 253
233/* DSS VIDEO PLL */ 254/* DSS VIDEO PLL */
@@ -244,20 +265,18 @@ void dss_debug_dump_clocks(struct seq_file *s);
244#endif 265#endif
245 266
246void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable); 267void dss_ctrl_pll_enable(enum dss_pll_id pll_id, bool enable);
247void dss_ctrl_pll_set_control_mux(enum dss_pll_id pll_id,
248 enum omap_channel channel);
249 268
250void dss_sdi_init(int datapairs); 269void dss_sdi_init(int datapairs);
251int dss_sdi_enable(void); 270int dss_sdi_enable(void);
252void dss_sdi_disable(void); 271void dss_sdi_disable(void);
253 272
254void dss_select_dsi_clk_source(int dsi_module, 273void dss_select_dsi_clk_source(int dsi_module,
255 enum omap_dss_clk_source clk_src); 274 enum dss_clk_source clk_src);
256void dss_select_lcd_clk_source(enum omap_channel channel, 275void dss_select_lcd_clk_source(enum omap_channel channel,
257 enum omap_dss_clk_source clk_src); 276 enum dss_clk_source clk_src);
258enum omap_dss_clk_source dss_get_dispc_clk_source(void); 277enum dss_clk_source dss_get_dispc_clk_source(void);
259enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module); 278enum dss_clk_source dss_get_dsi_clk_source(int dsi_module);
260enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel); 279enum dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel);
261 280
262void dss_set_venc_output(enum omap_dss_venc_type type); 281void dss_set_venc_output(enum omap_dss_venc_type type);
263void dss_set_dac_pwrdn_bgz(bool enable); 282void dss_set_dac_pwrdn_bgz(bool enable);
@@ -409,17 +428,23 @@ typedef bool (*dss_hsdiv_calc_func)(int m_dispc, unsigned long dispc,
409int dss_pll_register(struct dss_pll *pll); 428int dss_pll_register(struct dss_pll *pll);
410void dss_pll_unregister(struct dss_pll *pll); 429void dss_pll_unregister(struct dss_pll *pll);
411struct dss_pll *dss_pll_find(const char *name); 430struct dss_pll *dss_pll_find(const char *name);
431struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src);
432unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src);
412int dss_pll_enable(struct dss_pll *pll); 433int dss_pll_enable(struct dss_pll *pll);
413void dss_pll_disable(struct dss_pll *pll); 434void dss_pll_disable(struct dss_pll *pll);
414int dss_pll_set_config(struct dss_pll *pll, 435int dss_pll_set_config(struct dss_pll *pll,
415 const struct dss_pll_clock_info *cinfo); 436 const struct dss_pll_clock_info *cinfo);
416 437
417bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, 438bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
418 unsigned long out_min, unsigned long out_max, 439 unsigned long out_min, unsigned long out_max,
419 dss_hsdiv_calc_func func, void *data); 440 dss_hsdiv_calc_func func, void *data);
420bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, 441bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
421 unsigned long pll_min, unsigned long pll_max, 442 unsigned long pll_min, unsigned long pll_max,
422 dss_pll_calc_func func, void *data); 443 dss_pll_calc_func func, void *data);
444
445bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
446 unsigned long target_clkout, struct dss_pll_clock_info *cinfo);
447
423int dss_pll_write_config_type_a(struct dss_pll *pll, 448int dss_pll_write_config_type_a(struct dss_pll *pll,
424 const struct dss_pll_clock_info *cinfo); 449 const struct dss_pll_clock_info *cinfo);
425int dss_pll_write_config_type_b(struct dss_pll *pll, 450int dss_pll_write_config_type_b(struct dss_pll *pll,
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.c b/drivers/gpu/drm/omapdrm/dss/dss_features.c
index c886a2927f73..ee5b93ce2763 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.c
@@ -23,8 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <video/omapdss.h> 26#include "omapdss.h"
27
28#include "dss.h" 27#include "dss.h"
29#include "dss_features.h" 28#include "dss_features.h"
30 29
@@ -50,7 +49,6 @@ struct omap_dss_features {
50 const enum omap_dss_output_id *supported_outputs; 49 const enum omap_dss_output_id *supported_outputs;
51 const enum omap_color_mode *supported_color_modes; 50 const enum omap_color_mode *supported_color_modes;
52 const enum omap_overlay_caps *overlay_caps; 51 const enum omap_overlay_caps *overlay_caps;
53 const char * const *clksrc_names;
54 const struct dss_param_range *dss_params; 52 const struct dss_param_range *dss_params;
55 53
56 const enum omap_dss_rotation_type supported_rotation_types; 54 const enum omap_dss_rotation_type supported_rotation_types;
@@ -389,34 +387,6 @@ static const enum omap_overlay_caps omap4_dss_overlay_caps[] = {
389 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION, 387 OMAP_DSS_OVL_CAP_POS | OMAP_DSS_OVL_CAP_REPLICATION,
390}; 388};
391 389
392static const char * const omap2_dss_clk_source_names[] = {
393 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "N/A",
394 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "N/A",
395 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK1",
396};
397
398static const char * const omap3_dss_clk_source_names[] = {
399 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DSI1_PLL_FCLK",
400 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DSI2_PLL_FCLK",
401 [OMAP_DSS_CLK_SRC_FCK] = "DSS1_ALWON_FCLK",
402};
403
404static const char * const omap4_dss_clk_source_names[] = {
405 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "PLL1_CLK1",
406 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "PLL1_CLK2",
407 [OMAP_DSS_CLK_SRC_FCK] = "DSS_FCLK",
408 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "PLL2_CLK1",
409 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "PLL2_CLK2",
410};
411
412static const char * const omap5_dss_clk_source_names[] = {
413 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC] = "DPLL_DSI1_A_CLK1",
414 [OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI] = "DPLL_DSI1_A_CLK2",
415 [OMAP_DSS_CLK_SRC_FCK] = "DSS_CLK",
416 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC] = "DPLL_DSI1_C_CLK1",
417 [OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI] = "DPLL_DSI1_C_CLK2",
418};
419
420static const struct dss_param_range omap2_dss_param_range[] = { 390static const struct dss_param_range omap2_dss_param_range[] = {
421 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 }, 391 [FEAT_PARAM_DSS_FCK] = { 0, 133000000 },
422 [FEAT_PARAM_DSS_PCD] = { 2, 255 }, 392 [FEAT_PARAM_DSS_PCD] = { 2, 255 },
@@ -631,7 +601,6 @@ static const struct omap_dss_features omap2_dss_features = {
631 .supported_outputs = omap2_dss_supported_outputs, 601 .supported_outputs = omap2_dss_supported_outputs,
632 .supported_color_modes = omap2_dss_supported_color_modes, 602 .supported_color_modes = omap2_dss_supported_color_modes,
633 .overlay_caps = omap2_dss_overlay_caps, 603 .overlay_caps = omap2_dss_overlay_caps,
634 .clksrc_names = omap2_dss_clk_source_names,
635 .dss_params = omap2_dss_param_range, 604 .dss_params = omap2_dss_param_range,
636 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 605 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
637 .buffer_size_unit = 1, 606 .buffer_size_unit = 1,
@@ -652,7 +621,6 @@ static const struct omap_dss_features omap3430_dss_features = {
652 .supported_outputs = omap3430_dss_supported_outputs, 621 .supported_outputs = omap3430_dss_supported_outputs,
653 .supported_color_modes = omap3_dss_supported_color_modes, 622 .supported_color_modes = omap3_dss_supported_color_modes,
654 .overlay_caps = omap3430_dss_overlay_caps, 623 .overlay_caps = omap3430_dss_overlay_caps,
655 .clksrc_names = omap3_dss_clk_source_names,
656 .dss_params = omap3_dss_param_range, 624 .dss_params = omap3_dss_param_range,
657 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 625 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
658 .buffer_size_unit = 1, 626 .buffer_size_unit = 1,
@@ -676,7 +644,6 @@ static const struct omap_dss_features am35xx_dss_features = {
676 .supported_outputs = omap3430_dss_supported_outputs, 644 .supported_outputs = omap3430_dss_supported_outputs,
677 .supported_color_modes = omap3_dss_supported_color_modes, 645 .supported_color_modes = omap3_dss_supported_color_modes,
678 .overlay_caps = omap3430_dss_overlay_caps, 646 .overlay_caps = omap3430_dss_overlay_caps,
679 .clksrc_names = omap3_dss_clk_source_names,
680 .dss_params = omap3_dss_param_range, 647 .dss_params = omap3_dss_param_range,
681 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 648 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
682 .buffer_size_unit = 1, 649 .buffer_size_unit = 1,
@@ -696,7 +663,6 @@ static const struct omap_dss_features am43xx_dss_features = {
696 .supported_outputs = am43xx_dss_supported_outputs, 663 .supported_outputs = am43xx_dss_supported_outputs,
697 .supported_color_modes = omap3_dss_supported_color_modes, 664 .supported_color_modes = omap3_dss_supported_color_modes,
698 .overlay_caps = omap3430_dss_overlay_caps, 665 .overlay_caps = omap3430_dss_overlay_caps,
699 .clksrc_names = omap2_dss_clk_source_names,
700 .dss_params = am43xx_dss_param_range, 666 .dss_params = am43xx_dss_param_range,
701 .supported_rotation_types = OMAP_DSS_ROT_DMA, 667 .supported_rotation_types = OMAP_DSS_ROT_DMA,
702 .buffer_size_unit = 1, 668 .buffer_size_unit = 1,
@@ -716,7 +682,6 @@ static const struct omap_dss_features omap3630_dss_features = {
716 .supported_outputs = omap3630_dss_supported_outputs, 682 .supported_outputs = omap3630_dss_supported_outputs,
717 .supported_color_modes = omap3_dss_supported_color_modes, 683 .supported_color_modes = omap3_dss_supported_color_modes,
718 .overlay_caps = omap3630_dss_overlay_caps, 684 .overlay_caps = omap3630_dss_overlay_caps,
719 .clksrc_names = omap3_dss_clk_source_names,
720 .dss_params = omap3_dss_param_range, 685 .dss_params = omap3_dss_param_range,
721 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB, 686 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
722 .buffer_size_unit = 1, 687 .buffer_size_unit = 1,
@@ -738,7 +703,6 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
738 .supported_outputs = omap4_dss_supported_outputs, 703 .supported_outputs = omap4_dss_supported_outputs,
739 .supported_color_modes = omap4_dss_supported_color_modes, 704 .supported_color_modes = omap4_dss_supported_color_modes,
740 .overlay_caps = omap4_dss_overlay_caps, 705 .overlay_caps = omap4_dss_overlay_caps,
741 .clksrc_names = omap4_dss_clk_source_names,
742 .dss_params = omap4_dss_param_range, 706 .dss_params = omap4_dss_param_range,
743 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 707 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
744 .buffer_size_unit = 16, 708 .buffer_size_unit = 16,
@@ -759,7 +723,6 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
759 .supported_outputs = omap4_dss_supported_outputs, 723 .supported_outputs = omap4_dss_supported_outputs,
760 .supported_color_modes = omap4_dss_supported_color_modes, 724 .supported_color_modes = omap4_dss_supported_color_modes,
761 .overlay_caps = omap4_dss_overlay_caps, 725 .overlay_caps = omap4_dss_overlay_caps,
762 .clksrc_names = omap4_dss_clk_source_names,
763 .dss_params = omap4_dss_param_range, 726 .dss_params = omap4_dss_param_range,
764 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 727 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
765 .buffer_size_unit = 16, 728 .buffer_size_unit = 16,
@@ -780,7 +743,6 @@ static const struct omap_dss_features omap4_dss_features = {
780 .supported_outputs = omap4_dss_supported_outputs, 743 .supported_outputs = omap4_dss_supported_outputs,
781 .supported_color_modes = omap4_dss_supported_color_modes, 744 .supported_color_modes = omap4_dss_supported_color_modes,
782 .overlay_caps = omap4_dss_overlay_caps, 745 .overlay_caps = omap4_dss_overlay_caps,
783 .clksrc_names = omap4_dss_clk_source_names,
784 .dss_params = omap4_dss_param_range, 746 .dss_params = omap4_dss_param_range,
785 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 747 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
786 .buffer_size_unit = 16, 748 .buffer_size_unit = 16,
@@ -801,7 +763,6 @@ static const struct omap_dss_features omap5_dss_features = {
801 .supported_outputs = omap5_dss_supported_outputs, 763 .supported_outputs = omap5_dss_supported_outputs,
802 .supported_color_modes = omap4_dss_supported_color_modes, 764 .supported_color_modes = omap4_dss_supported_color_modes,
803 .overlay_caps = omap4_dss_overlay_caps, 765 .overlay_caps = omap4_dss_overlay_caps,
804 .clksrc_names = omap5_dss_clk_source_names,
805 .dss_params = omap5_dss_param_range, 766 .dss_params = omap5_dss_param_range,
806 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER, 767 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
807 .buffer_size_unit = 16, 768 .buffer_size_unit = 16,
@@ -859,11 +820,6 @@ bool dss_feat_color_mode_supported(enum omap_plane plane,
859 color_mode; 820 color_mode;
860} 821}
861 822
862const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id)
863{
864 return omap_current_dss_features->clksrc_names[id];
865}
866
867u32 dss_feat_get_buffer_size_unit(void) 823u32 dss_feat_get_buffer_size_unit(void)
868{ 824{
869 return omap_current_dss_features->buffer_size_unit; 825 return omap_current_dss_features->buffer_size_unit;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss_features.h b/drivers/gpu/drm/omapdrm/dss/dss_features.h
index 3d67d39f192f..bb4b7f0e642b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss_features.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss_features.h
@@ -91,7 +91,6 @@ unsigned long dss_feat_get_param_max(enum dss_range_param param);
91enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane); 91enum omap_overlay_caps dss_feat_get_overlay_caps(enum omap_plane plane);
92bool dss_feat_color_mode_supported(enum omap_plane plane, 92bool dss_feat_color_mode_supported(enum omap_plane plane,
93 enum omap_color_mode color_mode); 93 enum omap_color_mode color_mode);
94const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
95 94
96u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ 95u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
97u32 dss_feat_get_burst_size_unit(void); /* in bytes */ 96u32 dss_feat_get_burst_size_unit(void); /* in bytes */
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 53616b02b613..63e711545865 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -23,8 +23,9 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <video/omapdss.h> 26#include <sound/omap-hdmi-audio.h>
27 27
28#include "omapdss.h"
28#include "dss.h" 29#include "dss.h"
29 30
30/* HDMI Wrapper */ 31/* HDMI Wrapper */
@@ -240,6 +241,7 @@ struct hdmi_pll_data {
240 241
241 void __iomem *base; 242 void __iomem *base;
242 243
244 struct platform_device *pdev;
243 struct hdmi_wp_data *wp; 245 struct hdmi_wp_data *wp;
244}; 246};
245 247
@@ -306,8 +308,6 @@ phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
306 308
307/* HDMI PLL funcs */ 309/* HDMI PLL funcs */
308void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s); 310void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s);
309void hdmi_pll_compute(struct hdmi_pll_data *pll,
310 unsigned long target_tmds, struct dss_pll_clock_info *pi);
311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll, 311int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
312 struct hdmi_wp_data *wp); 312 struct hdmi_wp_data *wp);
313void hdmi_pll_uninit(struct hdmi_pll_data *hpll); 313void hdmi_pll_uninit(struct hdmi_pll_data *hpll);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 4d46cdf7a037..cbd28dfdb86a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -34,9 +34,9 @@
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <linux/of.h> 36#include <linux/of.h>
37#include <video/omapdss.h>
38#include <sound/omap-hdmi-audio.h> 37#include <sound/omap-hdmi-audio.h>
39 38
39#include "omapdss.h"
40#include "hdmi4_core.h" 40#include "hdmi4_core.h"
41#include "dss.h" 41#include "dss.h"
42#include "dss_features.h" 42#include "dss_features.h"
@@ -177,7 +177,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
177 if (p->double_pixel) 177 if (p->double_pixel)
178 pc *= 2; 178 pc *= 2;
179 179
180 hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); 180 /* DSS_HDMI_TCLK is bitclk / 10 */
181 pc *= 10;
182
183 dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
184 pc, &hdmi_cinfo);
181 185
182 r = dss_pll_enable(&hdmi.pll.pll); 186 r = dss_pll_enable(&hdmi.pll.pll);
183 if (r) { 187 if (r) {
@@ -204,9 +208,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
204 208
205 hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); 209 hdmi4_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
206 210
207 /* bypass TV gamma table */
208 dispc_enable_gamma_table(0);
209
210 /* tv size */ 211 /* tv size */
211 dss_mgr_set_timings(channel, p); 212 dss_mgr_set_timings(channel, p);
212 213
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index 9255c0e1e4a7..0c0a5139a301 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -39,9 +39,9 @@
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <linux/of.h> 41#include <linux/of.h>
42#include <video/omapdss.h>
43#include <sound/omap-hdmi-audio.h> 42#include <sound/omap-hdmi-audio.h>
44 43
44#include "omapdss.h"
45#include "hdmi5_core.h" 45#include "hdmi5_core.h"
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
@@ -189,7 +189,11 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
189 if (p->double_pixel) 189 if (p->double_pixel)
190 pc *= 2; 190 pc *= 2;
191 191
192 hdmi_pll_compute(&hdmi.pll, pc, &hdmi_cinfo); 192 /* DSS_HDMI_TCLK is bitclk / 10 */
193 pc *= 10;
194
195 dss_pll_calc_b(&hdmi.pll.pll, clk_get_rate(hdmi.pll.pll.clkin),
196 pc, &hdmi_cinfo);
193 197
194 /* disable and clear irqs */ 198 /* disable and clear irqs */
195 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff); 199 hdmi_wp_clear_irqenable(&hdmi.wp, 0xffffffff);
@@ -221,9 +225,6 @@ static int hdmi_power_on_full(struct omap_dss_device *dssdev)
221 225
222 hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg); 226 hdmi5_configure(&hdmi.core, &hdmi.wp, &hdmi.cfg);
223 227
224 /* bypass TV gamma table */
225 dispc_enable_gamma_table(0);
226
227 /* tv size */ 228 /* tv size */
228 dss_mgr_set_timings(channel, p); 229 dss_mgr_set_timings(channel, p);
229 230
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
index 1b8fcc6c4ba1..4dfb67fe5f6d 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_common.c
@@ -4,8 +4,8 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <video/omapdss.h>
8 7
8#include "omapdss.h"
9#include "hdmi.h" 9#include "hdmi.h"
10 10
11int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep, 11int hdmi_parse_lanes_of(struct platform_device *pdev, struct device_node *ep,
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
index f98b750fc499..3ead47cccac5 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_phy.c
@@ -14,8 +14,8 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <video/omapdss.h>
18 17
18#include "omapdss.h"
19#include "dss.h" 19#include "dss.h"
20#include "hdmi.h" 20#include "hdmi.h"
21 21
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
index f1015e8b8267..b8bf6a9e5557 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_pll.c
@@ -17,9 +17,9 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/pm_runtime.h>
20 21
21#include <video/omapdss.h> 22#include "omapdss.h"
22
23#include "dss.h" 23#include "dss.h"
24#include "hdmi.h" 24#include "hdmi.h"
25 25
@@ -39,71 +39,14 @@ void hdmi_pll_dump(struct hdmi_pll_data *pll, struct seq_file *s)
39 DUMPPLL(PLLCTRL_CFG4); 39 DUMPPLL(PLLCTRL_CFG4);
40} 40}
41 41
42void hdmi_pll_compute(struct hdmi_pll_data *pll,
43 unsigned long target_tmds, struct dss_pll_clock_info *pi)
44{
45 unsigned long fint, clkdco, clkout;
46 unsigned long target_bitclk, target_clkdco;
47 unsigned long min_dco;
48 unsigned n, m, mf, m2, sd;
49 unsigned long clkin;
50 const struct dss_pll_hw *hw = pll->pll.hw;
51
52 clkin = clk_get_rate(pll->pll.clkin);
53
54 DSSDBG("clkin %lu, target tmds %lu\n", clkin, target_tmds);
55
56 target_bitclk = target_tmds * 10;
57
58 /* Fint */
59 n = DIV_ROUND_UP(clkin, hw->fint_max);
60 fint = clkin / n;
61
62 /* adjust m2 so that the clkdco will be high enough */
63 min_dco = roundup(hw->clkdco_min, fint);
64 m2 = DIV_ROUND_UP(min_dco, target_bitclk);
65 if (m2 == 0)
66 m2 = 1;
67
68 target_clkdco = target_bitclk * m2;
69 m = target_clkdco / fint;
70
71 clkdco = fint * m;
72
73 /* adjust clkdco with fractional mf */
74 if (WARN_ON(target_clkdco - clkdco > fint))
75 mf = 0;
76 else
77 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
78
79 if (mf > 0)
80 clkdco += (u32)div_u64((u64)mf * fint, 262144);
81
82 clkout = clkdco / m2;
83
84 /* sigma-delta */
85 sd = DIV_ROUND_UP(fint * m, 250000000);
86
87 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
88 n, m, mf, m2, sd);
89 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
90
91 pi->n = n;
92 pi->m = m;
93 pi->mf = mf;
94 pi->mX[0] = m2;
95 pi->sd = sd;
96
97 pi->fint = fint;
98 pi->clkdco = clkdco;
99 pi->clkout[0] = clkout;
100}
101
102static int hdmi_pll_enable(struct dss_pll *dsspll) 42static int hdmi_pll_enable(struct dss_pll *dsspll)
103{ 43{
104 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); 44 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
105 struct hdmi_wp_data *wp = pll->wp; 45 struct hdmi_wp_data *wp = pll->wp;
106 u16 r = 0; 46 int r;
47
48 r = pm_runtime_get_sync(&pll->pdev->dev);
49 WARN_ON(r < 0);
107 50
108 dss_ctrl_pll_enable(DSS_PLL_HDMI, true); 51 dss_ctrl_pll_enable(DSS_PLL_HDMI, true);
109 52
@@ -118,10 +61,14 @@ static void hdmi_pll_disable(struct dss_pll *dsspll)
118{ 61{
119 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll); 62 struct hdmi_pll_data *pll = container_of(dsspll, struct hdmi_pll_data, pll);
120 struct hdmi_wp_data *wp = pll->wp; 63 struct hdmi_wp_data *wp = pll->wp;
64 int r;
121 65
122 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF); 66 hdmi_wp_set_pll_pwr(wp, HDMI_PLLPWRCMD_ALLOFF);
123 67
124 dss_ctrl_pll_enable(DSS_PLL_HDMI, false); 68 dss_ctrl_pll_enable(DSS_PLL_HDMI, false);
69
70 r = pm_runtime_put_sync(&pll->pdev->dev);
71 WARN_ON(r < 0 && r != -ENOSYS);
125} 72}
126 73
127static const struct dss_pll_ops dsi_pll_ops = { 74static const struct dss_pll_ops dsi_pll_ops = {
@@ -131,6 +78,8 @@ static const struct dss_pll_ops dsi_pll_ops = {
131}; 78};
132 79
133static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = { 80static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
81 .type = DSS_PLL_TYPE_B,
82
134 .n_max = 255, 83 .n_max = 255,
135 .m_min = 20, 84 .m_min = 20,
136 .m_max = 4095, 85 .m_max = 4095,
@@ -154,6 +103,8 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
154}; 103};
155 104
156static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = { 105static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
106 .type = DSS_PLL_TYPE_B,
107
157 .n_max = 255, 108 .n_max = 255,
158 .m_min = 20, 109 .m_min = 20,
159 .m_max = 2045, 110 .m_max = 2045,
@@ -225,6 +176,7 @@ int hdmi_pll_init(struct platform_device *pdev, struct hdmi_pll_data *pll,
225 int r; 176 int r;
226 struct resource *res; 177 struct resource *res;
227 178
179 pll->pdev = pdev;
228 pll->wp = wp; 180 pll->wp = wp;
229 181
230 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll"); 182 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pll");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 055f62fca5dc..203694a52d18 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -15,8 +15,8 @@
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/seq_file.h> 17#include <linux/seq_file.h>
18#include <video/omapdss.h>
19 18
19#include "omapdss.h"
20#include "dss.h" 20#include "dss.h"
21#include "hdmi.h" 21#include "hdmi.h"
22 22
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index d7e7c909bbc2..6eaf1adbd606 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -18,7 +18,872 @@
18#ifndef __OMAP_DRM_DSS_H 18#ifndef __OMAP_DRM_DSS_H
19#define __OMAP_DRM_DSS_H 19#define __OMAP_DRM_DSS_H
20 20
21#include <video/omapdss.h> 21#include <linux/list.h>
22#include <linux/kobject.h>
23#include <linux/device.h>
24#include <linux/interrupt.h>
25#include <video/videomode.h>
26#include <linux/platform_data/omapdss.h>
27#include <uapi/drm/drm_mode.h>
28
29#define DISPC_IRQ_FRAMEDONE (1 << 0)
30#define DISPC_IRQ_VSYNC (1 << 1)
31#define DISPC_IRQ_EVSYNC_EVEN (1 << 2)
32#define DISPC_IRQ_EVSYNC_ODD (1 << 3)
33#define DISPC_IRQ_ACBIAS_COUNT_STAT (1 << 4)
34#define DISPC_IRQ_PROG_LINE_NUM (1 << 5)
35#define DISPC_IRQ_GFX_FIFO_UNDERFLOW (1 << 6)
36#define DISPC_IRQ_GFX_END_WIN (1 << 7)
37#define DISPC_IRQ_PAL_GAMMA_MASK (1 << 8)
38#define DISPC_IRQ_OCP_ERR (1 << 9)
39#define DISPC_IRQ_VID1_FIFO_UNDERFLOW (1 << 10)
40#define DISPC_IRQ_VID1_END_WIN (1 << 11)
41#define DISPC_IRQ_VID2_FIFO_UNDERFLOW (1 << 12)
42#define DISPC_IRQ_VID2_END_WIN (1 << 13)
43#define DISPC_IRQ_SYNC_LOST (1 << 14)
44#define DISPC_IRQ_SYNC_LOST_DIGIT (1 << 15)
45#define DISPC_IRQ_WAKEUP (1 << 16)
46#define DISPC_IRQ_SYNC_LOST2 (1 << 17)
47#define DISPC_IRQ_VSYNC2 (1 << 18)
48#define DISPC_IRQ_VID3_END_WIN (1 << 19)
49#define DISPC_IRQ_VID3_FIFO_UNDERFLOW (1 << 20)
50#define DISPC_IRQ_ACBIAS_COUNT_STAT2 (1 << 21)
51#define DISPC_IRQ_FRAMEDONE2 (1 << 22)
52#define DISPC_IRQ_FRAMEDONEWB (1 << 23)
53#define DISPC_IRQ_FRAMEDONETV (1 << 24)
54#define DISPC_IRQ_WBBUFFEROVERFLOW (1 << 25)
55#define DISPC_IRQ_WBUNCOMPLETEERROR (1 << 26)
56#define DISPC_IRQ_SYNC_LOST3 (1 << 27)
57#define DISPC_IRQ_VSYNC3 (1 << 28)
58#define DISPC_IRQ_ACBIAS_COUNT_STAT3 (1 << 29)
59#define DISPC_IRQ_FRAMEDONE3 (1 << 30)
60
61struct omap_dss_device;
62struct omap_overlay_manager;
63struct dss_lcd_mgr_config;
64struct snd_aes_iec958;
65struct snd_cea_861_aud_if;
66struct hdmi_avi_infoframe;
67
68enum omap_display_type {
69 OMAP_DISPLAY_TYPE_NONE = 0,
70 OMAP_DISPLAY_TYPE_DPI = 1 << 0,
71 OMAP_DISPLAY_TYPE_DBI = 1 << 1,
72 OMAP_DISPLAY_TYPE_SDI = 1 << 2,
73 OMAP_DISPLAY_TYPE_DSI = 1 << 3,
74 OMAP_DISPLAY_TYPE_VENC = 1 << 4,
75 OMAP_DISPLAY_TYPE_HDMI = 1 << 5,
76 OMAP_DISPLAY_TYPE_DVI = 1 << 6,
77};
78
79enum omap_plane {
80 OMAP_DSS_GFX = 0,
81 OMAP_DSS_VIDEO1 = 1,
82 OMAP_DSS_VIDEO2 = 2,
83 OMAP_DSS_VIDEO3 = 3,
84 OMAP_DSS_WB = 4,
85};
86
87enum omap_channel {
88 OMAP_DSS_CHANNEL_LCD = 0,
89 OMAP_DSS_CHANNEL_DIGIT = 1,
90 OMAP_DSS_CHANNEL_LCD2 = 2,
91 OMAP_DSS_CHANNEL_LCD3 = 3,
92 OMAP_DSS_CHANNEL_WB = 4,
93};
94
95enum omap_color_mode {
96 OMAP_DSS_COLOR_CLUT1 = 1 << 0, /* BITMAP 1 */
97 OMAP_DSS_COLOR_CLUT2 = 1 << 1, /* BITMAP 2 */
98 OMAP_DSS_COLOR_CLUT4 = 1 << 2, /* BITMAP 4 */
99 OMAP_DSS_COLOR_CLUT8 = 1 << 3, /* BITMAP 8 */
100 OMAP_DSS_COLOR_RGB12U = 1 << 4, /* RGB12, 16-bit container */
101 OMAP_DSS_COLOR_ARGB16 = 1 << 5, /* ARGB16 */
102 OMAP_DSS_COLOR_RGB16 = 1 << 6, /* RGB16 */
103 OMAP_DSS_COLOR_RGB24U = 1 << 7, /* RGB24, 32-bit container */
104 OMAP_DSS_COLOR_RGB24P = 1 << 8, /* RGB24, 24-bit container */
105 OMAP_DSS_COLOR_YUV2 = 1 << 9, /* YUV2 4:2:2 co-sited */
106 OMAP_DSS_COLOR_UYVY = 1 << 10, /* UYVY 4:2:2 co-sited */
107 OMAP_DSS_COLOR_ARGB32 = 1 << 11, /* ARGB32 */
108 OMAP_DSS_COLOR_RGBA32 = 1 << 12, /* RGBA32 */
109 OMAP_DSS_COLOR_RGBX32 = 1 << 13, /* RGBx32 */
110 OMAP_DSS_COLOR_NV12 = 1 << 14, /* NV12 format: YUV 4:2:0 */
111 OMAP_DSS_COLOR_RGBA16 = 1 << 15, /* RGBA16 - 4444 */
112 OMAP_DSS_COLOR_RGBX16 = 1 << 16, /* RGBx16 - 4444 */
113 OMAP_DSS_COLOR_ARGB16_1555 = 1 << 17, /* ARGB16 - 1555 */
114 OMAP_DSS_COLOR_XRGB16_1555 = 1 << 18, /* xRGB16 - 1555 */
115};
116
117enum omap_dss_load_mode {
118 OMAP_DSS_LOAD_CLUT_AND_FRAME = 0,
119 OMAP_DSS_LOAD_CLUT_ONLY = 1,
120 OMAP_DSS_LOAD_FRAME_ONLY = 2,
121 OMAP_DSS_LOAD_CLUT_ONCE_FRAME = 3,
122};
123
124enum omap_dss_trans_key_type {
125 OMAP_DSS_COLOR_KEY_GFX_DST = 0,
126 OMAP_DSS_COLOR_KEY_VID_SRC = 1,
127};
128
129enum omap_rfbi_te_mode {
130 OMAP_DSS_RFBI_TE_MODE_1 = 1,
131 OMAP_DSS_RFBI_TE_MODE_2 = 2,
132};
133
134enum omap_dss_signal_level {
135 OMAPDSS_SIG_ACTIVE_LOW,
136 OMAPDSS_SIG_ACTIVE_HIGH,
137};
138
139enum omap_dss_signal_edge {
140 OMAPDSS_DRIVE_SIG_FALLING_EDGE,
141 OMAPDSS_DRIVE_SIG_RISING_EDGE,
142};
143
144enum omap_dss_venc_type {
145 OMAP_DSS_VENC_TYPE_COMPOSITE,
146 OMAP_DSS_VENC_TYPE_SVIDEO,
147};
148
149enum omap_dss_dsi_pixel_format {
150 OMAP_DSS_DSI_FMT_RGB888,
151 OMAP_DSS_DSI_FMT_RGB666,
152 OMAP_DSS_DSI_FMT_RGB666_PACKED,
153 OMAP_DSS_DSI_FMT_RGB565,
154};
155
156enum omap_dss_dsi_mode {
157 OMAP_DSS_DSI_CMD_MODE = 0,
158 OMAP_DSS_DSI_VIDEO_MODE,
159};
160
161enum omap_display_caps {
162 OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE = 1 << 0,
163 OMAP_DSS_DISPLAY_CAP_TEAR_ELIM = 1 << 1,
164};
165
166enum omap_dss_display_state {
167 OMAP_DSS_DISPLAY_DISABLED = 0,
168 OMAP_DSS_DISPLAY_ACTIVE,
169};
170
171enum omap_dss_rotation_type {
172 OMAP_DSS_ROT_DMA = 1 << 0,
173 OMAP_DSS_ROT_VRFB = 1 << 1,
174 OMAP_DSS_ROT_TILER = 1 << 2,
175};
176
177/* clockwise rotation angle */
178enum omap_dss_rotation_angle {
179 OMAP_DSS_ROT_0 = 0,
180 OMAP_DSS_ROT_90 = 1,
181 OMAP_DSS_ROT_180 = 2,
182 OMAP_DSS_ROT_270 = 3,
183};
184
185enum omap_overlay_caps {
186 OMAP_DSS_OVL_CAP_SCALE = 1 << 0,
187 OMAP_DSS_OVL_CAP_GLOBAL_ALPHA = 1 << 1,
188 OMAP_DSS_OVL_CAP_PRE_MULT_ALPHA = 1 << 2,
189 OMAP_DSS_OVL_CAP_ZORDER = 1 << 3,
190 OMAP_DSS_OVL_CAP_POS = 1 << 4,
191 OMAP_DSS_OVL_CAP_REPLICATION = 1 << 5,
192};
193
194enum omap_overlay_manager_caps {
195 OMAP_DSS_DUMMY_VALUE, /* add a dummy value to prevent compiler error */
196};
197
198enum omap_dss_clk_source {
199 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
200 * OMAP4: DSS_FCLK */
201 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
202 * OMAP4: PLL1_CLK1 */
203 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
204 * OMAP4: PLL1_CLK2 */
205 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
206 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
207};
208
209enum omap_hdmi_flags {
210 OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP = 1 << 0,
211};
212
213enum omap_dss_output_id {
214 OMAP_DSS_OUTPUT_DPI = 1 << 0,
215 OMAP_DSS_OUTPUT_DBI = 1 << 1,
216 OMAP_DSS_OUTPUT_SDI = 1 << 2,
217 OMAP_DSS_OUTPUT_DSI1 = 1 << 3,
218 OMAP_DSS_OUTPUT_DSI2 = 1 << 4,
219 OMAP_DSS_OUTPUT_VENC = 1 << 5,
220 OMAP_DSS_OUTPUT_HDMI = 1 << 6,
221};
222
223/* RFBI */
224
225struct rfbi_timings {
226 int cs_on_time;
227 int cs_off_time;
228 int we_on_time;
229 int we_off_time;
230 int re_on_time;
231 int re_off_time;
232 int we_cycle_time;
233 int re_cycle_time;
234 int cs_pulse_width;
235 int access_time;
236
237 int clk_div;
238
239 u32 tim[5]; /* set by rfbi_convert_timings() */
240
241 int converted;
242};
243
244/* DSI */
245
246enum omap_dss_dsi_trans_mode {
247 /* Sync Pulses: both sync start and end packets sent */
248 OMAP_DSS_DSI_PULSE_MODE,
249 /* Sync Events: only sync start packets sent */
250 OMAP_DSS_DSI_EVENT_MODE,
251 /* Burst: only sync start packets sent, pixels are time compressed */
252 OMAP_DSS_DSI_BURST_MODE,
253};
254
255struct omap_dss_dsi_videomode_timings {
256 unsigned long hsclk;
257
258 unsigned ndl;
259 unsigned bitspp;
260
261 /* pixels */
262 u16 hact;
263 /* lines */
264 u16 vact;
265
266 /* DSI video mode blanking data */
267 /* Unit: byte clock cycles */
268 u16 hss;
269 u16 hsa;
270 u16 hse;
271 u16 hfp;
272 u16 hbp;
273 /* Unit: line clocks */
274 u16 vsa;
275 u16 vfp;
276 u16 vbp;
277
278 /* DSI blanking modes */
279 int blanking_mode;
280 int hsa_blanking_mode;
281 int hbp_blanking_mode;
282 int hfp_blanking_mode;
283
284 enum omap_dss_dsi_trans_mode trans_mode;
285
286 bool ddr_clk_always_on;
287 int window_sync;
288};
289
290struct omap_dss_dsi_config {
291 enum omap_dss_dsi_mode mode;
292 enum omap_dss_dsi_pixel_format pixel_format;
293 const struct omap_video_timings *timings;
294
295 unsigned long hs_clk_min, hs_clk_max;
296 unsigned long lp_clk_min, lp_clk_max;
297
298 bool ddr_clk_always_on;
299 enum omap_dss_dsi_trans_mode trans_mode;
300};
301
302struct omap_video_timings {
303 /* Unit: pixels */
304 u16 x_res;
305 /* Unit: pixels */
306 u16 y_res;
307 /* Unit: Hz */
308 u32 pixelclock;
309 /* Unit: pixel clocks */
310 u16 hsw; /* Horizontal synchronization pulse width */
311 /* Unit: pixel clocks */
312 u16 hfp; /* Horizontal front porch */
313 /* Unit: pixel clocks */
314 u16 hbp; /* Horizontal back porch */
315 /* Unit: line clocks */
316 u16 vsw; /* Vertical synchronization pulse width */
317 /* Unit: line clocks */
318 u16 vfp; /* Vertical front porch */
319 /* Unit: line clocks */
320 u16 vbp; /* Vertical back porch */
321
322 /* Vsync logic level */
323 enum omap_dss_signal_level vsync_level;
324 /* Hsync logic level */
325 enum omap_dss_signal_level hsync_level;
326 /* Interlaced or Progressive timings */
327 bool interlace;
328 /* Pixel clock edge to drive LCD data */
329 enum omap_dss_signal_edge data_pclk_edge;
330 /* Data enable logic level */
331 enum omap_dss_signal_level de_level;
332 /* Pixel clock edges to drive HSYNC and VSYNC signals */
333 enum omap_dss_signal_edge sync_pclk_edge;
334
335 bool double_pixel;
336};
337
338/* Hardcoded timings for tv modes. Venc only uses these to
339 * identify the mode, and does not actually use the configs
340 * itself. However, the configs should be something that
341 * a normal monitor can also show */
342extern const struct omap_video_timings omap_dss_pal_timings;
343extern const struct omap_video_timings omap_dss_ntsc_timings;
344
345struct omap_dss_cpr_coefs {
346 s16 rr, rg, rb;
347 s16 gr, gg, gb;
348 s16 br, bg, bb;
349};
350
351struct omap_overlay_info {
352 dma_addr_t paddr;
353 dma_addr_t p_uv_addr; /* for NV12 format */
354 u16 screen_width;
355 u16 width;
356 u16 height;
357 enum omap_color_mode color_mode;
358 u8 rotation;
359 enum omap_dss_rotation_type rotation_type;
360 bool mirror;
361
362 u16 pos_x;
363 u16 pos_y;
364 u16 out_width; /* if 0, out_width == width */
365 u16 out_height; /* if 0, out_height == height */
366 u8 global_alpha;
367 u8 pre_mult_alpha;
368 u8 zorder;
369};
370
371struct omap_overlay {
372 struct kobject kobj;
373 struct list_head list;
374
375 /* static fields */
376 const char *name;
377 enum omap_plane id;
378 enum omap_color_mode supported_modes;
379 enum omap_overlay_caps caps;
380
381 /* dynamic fields */
382 struct omap_overlay_manager *manager;
383
384 /*
385 * The following functions do not block:
386 *
387 * is_enabled
388 * set_overlay_info
389 * get_overlay_info
390 *
391 * The rest of the functions may block and cannot be called from
392 * interrupt context
393 */
394
395 int (*enable)(struct omap_overlay *ovl);
396 int (*disable)(struct omap_overlay *ovl);
397 bool (*is_enabled)(struct omap_overlay *ovl);
398
399 int (*set_manager)(struct omap_overlay *ovl,
400 struct omap_overlay_manager *mgr);
401 int (*unset_manager)(struct omap_overlay *ovl);
402
403 int (*set_overlay_info)(struct omap_overlay *ovl,
404 struct omap_overlay_info *info);
405 void (*get_overlay_info)(struct omap_overlay *ovl,
406 struct omap_overlay_info *info);
407
408 int (*wait_for_go)(struct omap_overlay *ovl);
409
410 struct omap_dss_device *(*get_device)(struct omap_overlay *ovl);
411};
412
413struct omap_overlay_manager_info {
414 u32 default_color;
415
416 enum omap_dss_trans_key_type trans_key_type;
417 u32 trans_key;
418 bool trans_enabled;
419
420 bool partial_alpha_enabled;
421
422 bool cpr_enable;
423 struct omap_dss_cpr_coefs cpr_coefs;
424};
425
426struct omap_overlay_manager {
427 struct kobject kobj;
428
429 /* static fields */
430 const char *name;
431 enum omap_channel id;
432 enum omap_overlay_manager_caps caps;
433 struct list_head overlays;
434 enum omap_display_type supported_displays;
435 enum omap_dss_output_id supported_outputs;
436
437 /* dynamic fields */
438 struct omap_dss_device *output;
439
440 /*
441 * The following functions do not block:
442 *
443 * set_manager_info
444 * get_manager_info
445 * apply
446 *
447 * The rest of the functions may block and cannot be called from
448 * interrupt context
449 */
450
451 int (*set_output)(struct omap_overlay_manager *mgr,
452 struct omap_dss_device *output);
453 int (*unset_output)(struct omap_overlay_manager *mgr);
454
455 int (*set_manager_info)(struct omap_overlay_manager *mgr,
456 struct omap_overlay_manager_info *info);
457 void (*get_manager_info)(struct omap_overlay_manager *mgr,
458 struct omap_overlay_manager_info *info);
459
460 int (*apply)(struct omap_overlay_manager *mgr);
461 int (*wait_for_go)(struct omap_overlay_manager *mgr);
462 int (*wait_for_vsync)(struct omap_overlay_manager *mgr);
463
464 struct omap_dss_device *(*get_device)(struct omap_overlay_manager *mgr);
465};
466
467/* 22 pins means 1 clk lane and 10 data lanes */
468#define OMAP_DSS_MAX_DSI_PINS 22
469
470struct omap_dsi_pin_config {
471 int num_pins;
472 /*
473 * pin numbers in the following order:
474 * clk+, clk-
475 * data1+, data1-
476 * data2+, data2-
477 * ...
478 */
479 int pins[OMAP_DSS_MAX_DSI_PINS];
480};
481
482struct omap_dss_writeback_info {
483 u32 paddr;
484 u32 p_uv_addr;
485 u16 buf_width;
486 u16 width;
487 u16 height;
488 enum omap_color_mode color_mode;
489 u8 rotation;
490 enum omap_dss_rotation_type rotation_type;
491 bool mirror;
492 u8 pre_mult_alpha;
493};
494
495struct omapdss_dpi_ops {
496 int (*connect)(struct omap_dss_device *dssdev,
497 struct omap_dss_device *dst);
498 void (*disconnect)(struct omap_dss_device *dssdev,
499 struct omap_dss_device *dst);
500
501 int (*enable)(struct omap_dss_device *dssdev);
502 void (*disable)(struct omap_dss_device *dssdev);
503
504 int (*check_timings)(struct omap_dss_device *dssdev,
505 struct omap_video_timings *timings);
506 void (*set_timings)(struct omap_dss_device *dssdev,
507 struct omap_video_timings *timings);
508 void (*get_timings)(struct omap_dss_device *dssdev,
509 struct omap_video_timings *timings);
510
511 void (*set_data_lines)(struct omap_dss_device *dssdev, int data_lines);
512};
513
514struct omapdss_sdi_ops {
515 int (*connect)(struct omap_dss_device *dssdev,
516 struct omap_dss_device *dst);
517 void (*disconnect)(struct omap_dss_device *dssdev,
518 struct omap_dss_device *dst);
519
520 int (*enable)(struct omap_dss_device *dssdev);
521 void (*disable)(struct omap_dss_device *dssdev);
522
523 int (*check_timings)(struct omap_dss_device *dssdev,
524 struct omap_video_timings *timings);
525 void (*set_timings)(struct omap_dss_device *dssdev,
526 struct omap_video_timings *timings);
527 void (*get_timings)(struct omap_dss_device *dssdev,
528 struct omap_video_timings *timings);
529
530 void (*set_datapairs)(struct omap_dss_device *dssdev, int datapairs);
531};
532
533struct omapdss_dvi_ops {
534 int (*connect)(struct omap_dss_device *dssdev,
535 struct omap_dss_device *dst);
536 void (*disconnect)(struct omap_dss_device *dssdev,
537 struct omap_dss_device *dst);
538
539 int (*enable)(struct omap_dss_device *dssdev);
540 void (*disable)(struct omap_dss_device *dssdev);
541
542 int (*check_timings)(struct omap_dss_device *dssdev,
543 struct omap_video_timings *timings);
544 void (*set_timings)(struct omap_dss_device *dssdev,
545 struct omap_video_timings *timings);
546 void (*get_timings)(struct omap_dss_device *dssdev,
547 struct omap_video_timings *timings);
548};
549
550struct omapdss_atv_ops {
551 int (*connect)(struct omap_dss_device *dssdev,
552 struct omap_dss_device *dst);
553 void (*disconnect)(struct omap_dss_device *dssdev,
554 struct omap_dss_device *dst);
555
556 int (*enable)(struct omap_dss_device *dssdev);
557 void (*disable)(struct omap_dss_device *dssdev);
558
559 int (*check_timings)(struct omap_dss_device *dssdev,
560 struct omap_video_timings *timings);
561 void (*set_timings)(struct omap_dss_device *dssdev,
562 struct omap_video_timings *timings);
563 void (*get_timings)(struct omap_dss_device *dssdev,
564 struct omap_video_timings *timings);
565
566 void (*set_type)(struct omap_dss_device *dssdev,
567 enum omap_dss_venc_type type);
568 void (*invert_vid_out_polarity)(struct omap_dss_device *dssdev,
569 bool invert_polarity);
570
571 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
572 u32 (*get_wss)(struct omap_dss_device *dssdev);
573};
574
575struct omapdss_hdmi_ops {
576 int (*connect)(struct omap_dss_device *dssdev,
577 struct omap_dss_device *dst);
578 void (*disconnect)(struct omap_dss_device *dssdev,
579 struct omap_dss_device *dst);
580
581 int (*enable)(struct omap_dss_device *dssdev);
582 void (*disable)(struct omap_dss_device *dssdev);
583
584 int (*check_timings)(struct omap_dss_device *dssdev,
585 struct omap_video_timings *timings);
586 void (*set_timings)(struct omap_dss_device *dssdev,
587 struct omap_video_timings *timings);
588 void (*get_timings)(struct omap_dss_device *dssdev,
589 struct omap_video_timings *timings);
590
591 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
592 bool (*detect)(struct omap_dss_device *dssdev);
593
594 int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
595 int (*set_infoframe)(struct omap_dss_device *dssdev,
596 const struct hdmi_avi_infoframe *avi);
597};
598
599struct omapdss_dsi_ops {
600 int (*connect)(struct omap_dss_device *dssdev,
601 struct omap_dss_device *dst);
602 void (*disconnect)(struct omap_dss_device *dssdev,
603 struct omap_dss_device *dst);
604
605 int (*enable)(struct omap_dss_device *dssdev);
606 void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
607 bool enter_ulps);
608
609 /* bus configuration */
610 int (*set_config)(struct omap_dss_device *dssdev,
611 const struct omap_dss_dsi_config *cfg);
612 int (*configure_pins)(struct omap_dss_device *dssdev,
613 const struct omap_dsi_pin_config *pin_cfg);
614
615 void (*enable_hs)(struct omap_dss_device *dssdev, int channel,
616 bool enable);
617 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
618
619 int (*update)(struct omap_dss_device *dssdev, int channel,
620 void (*callback)(int, void *), void *data);
621
622 void (*bus_lock)(struct omap_dss_device *dssdev);
623 void (*bus_unlock)(struct omap_dss_device *dssdev);
624
625 int (*enable_video_output)(struct omap_dss_device *dssdev, int channel);
626 void (*disable_video_output)(struct omap_dss_device *dssdev,
627 int channel);
628
629 int (*request_vc)(struct omap_dss_device *dssdev, int *channel);
630 int (*set_vc_id)(struct omap_dss_device *dssdev, int channel,
631 int vc_id);
632 void (*release_vc)(struct omap_dss_device *dssdev, int channel);
633
634 /* data transfer */
635 int (*dcs_write)(struct omap_dss_device *dssdev, int channel,
636 u8 *data, int len);
637 int (*dcs_write_nosync)(struct omap_dss_device *dssdev, int channel,
638 u8 *data, int len);
639 int (*dcs_read)(struct omap_dss_device *dssdev, int channel, u8 dcs_cmd,
640 u8 *data, int len);
641
642 int (*gen_write)(struct omap_dss_device *dssdev, int channel,
643 u8 *data, int len);
644 int (*gen_write_nosync)(struct omap_dss_device *dssdev, int channel,
645 u8 *data, int len);
646 int (*gen_read)(struct omap_dss_device *dssdev, int channel,
647 u8 *reqdata, int reqlen,
648 u8 *data, int len);
649
650 int (*bta_sync)(struct omap_dss_device *dssdev, int channel);
651
652 int (*set_max_rx_packet_size)(struct omap_dss_device *dssdev,
653 int channel, u16 plen);
654};
655
656struct omap_dss_device {
657 struct kobject kobj;
658 struct device *dev;
659
660 struct module *owner;
661
662 struct list_head panel_list;
663
664 /* alias in the form of "display%d" */
665 char alias[16];
666
667 enum omap_display_type type;
668 enum omap_display_type output_type;
669
670 union {
671 struct {
672 u8 data_lines;
673 } dpi;
674
675 struct {
676 u8 channel;
677 u8 data_lines;
678 } rfbi;
679
680 struct {
681 u8 datapairs;
682 } sdi;
683
684 struct {
685 int module;
686 } dsi;
687
688 struct {
689 enum omap_dss_venc_type type;
690 bool invert_polarity;
691 } venc;
692 } phy;
693
694 struct {
695 struct omap_video_timings timings;
696
697 enum omap_dss_dsi_pixel_format dsi_pix_fmt;
698 enum omap_dss_dsi_mode dsi_mode;
699 } panel;
700
701 struct {
702 u8 pixel_size;
703 struct rfbi_timings rfbi_timings;
704 } ctrl;
705
706 const char *name;
707
708 /* used to match device to driver */
709 const char *driver_name;
710
711 void *data;
712
713 struct omap_dss_driver *driver;
714
715 union {
716 const struct omapdss_dpi_ops *dpi;
717 const struct omapdss_sdi_ops *sdi;
718 const struct omapdss_dvi_ops *dvi;
719 const struct omapdss_hdmi_ops *hdmi;
720 const struct omapdss_atv_ops *atv;
721 const struct omapdss_dsi_ops *dsi;
722 } ops;
723
724 /* helper variable for driver suspend/resume */
725 bool activate_after_resume;
726
727 enum omap_display_caps caps;
728
729 struct omap_dss_device *src;
730
731 enum omap_dss_display_state state;
732
733 /* OMAP DSS output specific fields */
734
735 struct list_head list;
736
737 /* DISPC channel for this output */
738 enum omap_channel dispc_channel;
739 bool dispc_channel_connected;
740
741 /* output instance */
742 enum omap_dss_output_id id;
743
744 /* the port number in the DT node */
745 int port_num;
746
747 /* dynamic fields */
748 struct omap_overlay_manager *manager;
749
750 struct omap_dss_device *dst;
751};
752
753struct omap_dss_driver {
754 int (*probe)(struct omap_dss_device *);
755 void (*remove)(struct omap_dss_device *);
756
757 int (*connect)(struct omap_dss_device *dssdev);
758 void (*disconnect)(struct omap_dss_device *dssdev);
759
760 int (*enable)(struct omap_dss_device *display);
761 void (*disable)(struct omap_dss_device *display);
762 int (*run_test)(struct omap_dss_device *display, int test);
763
764 int (*update)(struct omap_dss_device *dssdev,
765 u16 x, u16 y, u16 w, u16 h);
766 int (*sync)(struct omap_dss_device *dssdev);
767
768 int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
769 int (*get_te)(struct omap_dss_device *dssdev);
770
771 u8 (*get_rotate)(struct omap_dss_device *dssdev);
772 int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
773
774 bool (*get_mirror)(struct omap_dss_device *dssdev);
775 int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
776
777 int (*memory_read)(struct omap_dss_device *dssdev,
778 void *buf, size_t size,
779 u16 x, u16 y, u16 w, u16 h);
780
781 void (*get_resolution)(struct omap_dss_device *dssdev,
782 u16 *xres, u16 *yres);
783 void (*get_dimensions)(struct omap_dss_device *dssdev,
784 u32 *width, u32 *height);
785 int (*get_recommended_bpp)(struct omap_dss_device *dssdev);
786
787 int (*check_timings)(struct omap_dss_device *dssdev,
788 struct omap_video_timings *timings);
789 void (*set_timings)(struct omap_dss_device *dssdev,
790 struct omap_video_timings *timings);
791 void (*get_timings)(struct omap_dss_device *dssdev,
792 struct omap_video_timings *timings);
793
794 int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
795 u32 (*get_wss)(struct omap_dss_device *dssdev);
796
797 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
798 bool (*detect)(struct omap_dss_device *dssdev);
799
800 int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
801 int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
802 const struct hdmi_avi_infoframe *avi);
803};
804
805enum omapdss_version omapdss_get_version(void);
806bool omapdss_is_initialized(void);
807
808int omap_dss_register_driver(struct omap_dss_driver *);
809void omap_dss_unregister_driver(struct omap_dss_driver *);
810
811int omapdss_register_display(struct omap_dss_device *dssdev);
812void omapdss_unregister_display(struct omap_dss_device *dssdev);
813
814struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
815void omap_dss_put_device(struct omap_dss_device *dssdev);
816#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
817struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
818struct omap_dss_device *omap_dss_find_device(void *data,
819 int (*match)(struct omap_dss_device *dssdev, void *data));
820const char *omapdss_get_default_display_name(void);
821
822void videomode_to_omap_video_timings(const struct videomode *vm,
823 struct omap_video_timings *ovt);
824void omap_video_timings_to_videomode(const struct omap_video_timings *ovt,
825 struct videomode *vm);
826
827int dss_feat_get_num_mgrs(void);
828int dss_feat_get_num_ovls(void);
829enum omap_color_mode dss_feat_get_supported_color_modes(enum omap_plane plane);
830
831
832
833int omap_dss_get_num_overlay_managers(void);
834struct omap_overlay_manager *omap_dss_get_overlay_manager(int num);
835
836int omap_dss_get_num_overlays(void);
837struct omap_overlay *omap_dss_get_overlay(int num);
838
839int omapdss_register_output(struct omap_dss_device *output);
840void omapdss_unregister_output(struct omap_dss_device *output);
841struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
842struct omap_dss_device *omap_dss_find_output(const char *name);
843struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
844int omapdss_output_set_device(struct omap_dss_device *out,
845 struct omap_dss_device *dssdev);
846int omapdss_output_unset_device(struct omap_dss_device *out);
847
848struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
849struct omap_overlay_manager *omapdss_find_mgr_from_display(struct omap_dss_device *dssdev);
850
851void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
852 u16 *xres, u16 *yres);
853int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
854void omapdss_default_get_timings(struct omap_dss_device *dssdev,
855 struct omap_video_timings *timings);
856
857typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
858int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
859int omap_dispc_unregister_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
860
861int omapdss_compat_init(void);
862void omapdss_compat_uninit(void);
863
864static inline bool omapdss_device_is_connected(struct omap_dss_device *dssdev)
865{
866 return dssdev->src;
867}
868
869static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
870{
871 return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
872}
873
874struct device_node *
875omapdss_of_get_next_port(const struct device_node *parent,
876 struct device_node *prev);
877
878struct device_node *
879omapdss_of_get_next_endpoint(const struct device_node *parent,
880 struct device_node *prev);
881
882struct device_node *
883omapdss_of_get_first_endpoint(const struct device_node *parent);
884
885struct omap_dss_device *
886omapdss_of_find_source_for_first_ep(struct device_node *node);
22 887
23u32 dispc_read_irqstatus(void); 888u32 dispc_read_irqstatus(void);
24void dispc_clear_irqstatus(u32 mask); 889void dispc_clear_irqstatus(u32 mask);
@@ -44,6 +909,10 @@ void dispc_mgr_set_timings(enum omap_channel channel,
44 const struct omap_video_timings *timings); 909 const struct omap_video_timings *timings);
45void dispc_mgr_setup(enum omap_channel channel, 910void dispc_mgr_setup(enum omap_channel channel,
46 const struct omap_overlay_manager_info *info); 911 const struct omap_overlay_manager_info *info);
912u32 dispc_mgr_gamma_size(enum omap_channel channel);
913void dispc_mgr_set_gamma(enum omap_channel channel,
914 const struct drm_color_lut *lut,
915 unsigned int length);
47 916
48int dispc_ovl_enable(enum omap_plane plane, bool enable); 917int dispc_ovl_enable(enum omap_plane plane, bool enable);
49bool dispc_ovl_enabled(enum omap_plane plane); 918bool dispc_ovl_enabled(enum omap_plane plane);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 829232ad8c81..24f859488201 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,8 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23 23
24#include <video/omapdss.h> 24#include "omapdss.h"
25
26#include "dss.h" 25#include "dss.h"
27 26
28static LIST_HEAD(output_list); 27static LIST_HEAD(output_list);
diff --git a/drivers/gpu/drm/omapdrm/dss/pll.c b/drivers/gpu/drm/omapdrm/dss/pll.c
index f974ddcd3b6e..0a76c89cdc2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/pll.c
@@ -22,8 +22,7 @@
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#include <video/omapdss.h> 25#include "omapdss.h"
26
27#include "dss.h" 26#include "dss.h"
28 27
29#define PLL_CONTROL 0x0000 28#define PLL_CONTROL 0x0000
@@ -76,6 +75,59 @@ struct dss_pll *dss_pll_find(const char *name)
76 return NULL; 75 return NULL;
77} 76}
78 77
78struct dss_pll *dss_pll_find_by_src(enum dss_clk_source src)
79{
80 struct dss_pll *pll;
81
82 switch (src) {
83 default:
84 case DSS_CLK_SRC_FCK:
85 return NULL;
86
87 case DSS_CLK_SRC_HDMI_PLL:
88 return dss_pll_find("hdmi");
89
90 case DSS_CLK_SRC_PLL1_1:
91 case DSS_CLK_SRC_PLL1_2:
92 case DSS_CLK_SRC_PLL1_3:
93 pll = dss_pll_find("dsi0");
94 if (!pll)
95 pll = dss_pll_find("video0");
96 return pll;
97
98 case DSS_CLK_SRC_PLL2_1:
99 case DSS_CLK_SRC_PLL2_2:
100 case DSS_CLK_SRC_PLL2_3:
101 pll = dss_pll_find("dsi1");
102 if (!pll)
103 pll = dss_pll_find("video1");
104 return pll;
105 }
106}
107
108unsigned dss_pll_get_clkout_idx_for_src(enum dss_clk_source src)
109{
110 switch (src) {
111 case DSS_CLK_SRC_HDMI_PLL:
112 return 0;
113
114 case DSS_CLK_SRC_PLL1_1:
115 case DSS_CLK_SRC_PLL2_1:
116 return 0;
117
118 case DSS_CLK_SRC_PLL1_2:
119 case DSS_CLK_SRC_PLL2_2:
120 return 1;
121
122 case DSS_CLK_SRC_PLL1_3:
123 case DSS_CLK_SRC_PLL2_3:
124 return 2;
125
126 default:
127 return 0;
128 }
129}
130
79int dss_pll_enable(struct dss_pll *pll) 131int dss_pll_enable(struct dss_pll *pll)
80{ 132{
81 int r; 133 int r;
@@ -129,7 +181,7 @@ int dss_pll_set_config(struct dss_pll *pll, const struct dss_pll_clock_info *cin
129 return 0; 181 return 0;
130} 182}
131 183
132bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco, 184bool dss_pll_hsdiv_calc_a(const struct dss_pll *pll, unsigned long clkdco,
133 unsigned long out_min, unsigned long out_max, 185 unsigned long out_min, unsigned long out_max,
134 dss_hsdiv_calc_func func, void *data) 186 dss_hsdiv_calc_func func, void *data)
135{ 187{
@@ -154,7 +206,11 @@ bool dss_pll_hsdiv_calc(const struct dss_pll *pll, unsigned long clkdco,
154 return false; 206 return false;
155} 207}
156 208
157bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin, 209/*
210 * clkdco = clkin / n * m * 2
211 * clkoutX = clkdco / mX
212 */
213bool dss_pll_calc_a(const struct dss_pll *pll, unsigned long clkin,
158 unsigned long pll_min, unsigned long pll_max, 214 unsigned long pll_min, unsigned long pll_max,
159 dss_pll_calc_func func, void *data) 215 dss_pll_calc_func func, void *data)
160{ 216{
@@ -195,6 +251,71 @@ bool dss_pll_calc(const struct dss_pll *pll, unsigned long clkin,
195 return false; 251 return false;
196} 252}
197 253
254/*
255 * This calculates a PLL config that will provide the target_clkout rate
256 * for clkout. Additionally clkdco rate will be the same as clkout rate
257 * when clkout rate is >= min_clkdco.
258 *
259 * clkdco = clkin / n * m + clkin / n * mf / 262144
260 * clkout = clkdco / m2
261 */
262bool dss_pll_calc_b(const struct dss_pll *pll, unsigned long clkin,
263 unsigned long target_clkout, struct dss_pll_clock_info *cinfo)
264{
265 unsigned long fint, clkdco, clkout;
266 unsigned long target_clkdco;
267 unsigned long min_dco;
268 unsigned n, m, mf, m2, sd;
269 const struct dss_pll_hw *hw = pll->hw;
270
271 DSSDBG("clkin %lu, target clkout %lu\n", clkin, target_clkout);
272
273 /* Fint */
274 n = DIV_ROUND_UP(clkin, hw->fint_max);
275 fint = clkin / n;
276
277 /* adjust m2 so that the clkdco will be high enough */
278 min_dco = roundup(hw->clkdco_min, fint);
279 m2 = DIV_ROUND_UP(min_dco, target_clkout);
280 if (m2 == 0)
281 m2 = 1;
282
283 target_clkdco = target_clkout * m2;
284 m = target_clkdco / fint;
285
286 clkdco = fint * m;
287
288 /* adjust clkdco with fractional mf */
289 if (WARN_ON(target_clkdco - clkdco > fint))
290 mf = 0;
291 else
292 mf = (u32)div_u64(262144ull * (target_clkdco - clkdco), fint);
293
294 if (mf > 0)
295 clkdco += (u32)div_u64((u64)mf * fint, 262144);
296
297 clkout = clkdco / m2;
298
299 /* sigma-delta */
300 sd = DIV_ROUND_UP(fint * m, 250000000);
301
302 DSSDBG("N = %u, M = %u, M.f = %u, M2 = %u, SD = %u\n",
303 n, m, mf, m2, sd);
304 DSSDBG("Fint %lu, clkdco %lu, clkout %lu\n", fint, clkdco, clkout);
305
306 cinfo->n = n;
307 cinfo->m = m;
308 cinfo->mf = mf;
309 cinfo->mX[0] = m2;
310 cinfo->sd = sd;
311
312 cinfo->fint = fint;
313 cinfo->clkdco = clkdco;
314 cinfo->clkout[0] = clkout;
315
316 return true;
317}
318
198static int wait_for_bit_change(void __iomem *reg, int bitnum, int value) 319static int wait_for_bit_change(void __iomem *reg, int bitnum, int value)
199{ 320{
200 unsigned long timeout; 321 unsigned long timeout;
diff --git a/drivers/gpu/drm/omapdrm/dss/rfbi.c b/drivers/gpu/drm/omapdrm/dss/rfbi.c
index 3796576dfadf..cd53566d75eb 100644
--- a/drivers/gpu/drm/omapdrm/dss/rfbi.c
+++ b/drivers/gpu/drm/omapdrm/dss/rfbi.c
@@ -38,7 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/component.h> 39#include <linux/component.h>
40 40
41#include <video/omapdss.h> 41#include "omapdss.h"
42#include "dss.h" 42#include "dss.h"
43 43
44struct rfbi_reg { u16 idx; }; 44struct rfbi_reg { u16 idx; };
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index cd6d3bfb041d..0a96c321ce62 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/component.h> 30#include <linux/component.h>
31 31
32#include <video/omapdss.h> 32#include "omapdss.h"
33#include "dss.h" 33#include "dss.h"
34 34
35static struct { 35static struct {
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 08a2cc778ba9..6eedf2118708 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -37,8 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/component.h> 38#include <linux/component.h>
39 39
40#include <video/omapdss.h> 40#include "omapdss.h"
41
42#include "dss.h" 41#include "dss.h"
43#include "dss_features.h" 42#include "dss_features.h"
44 43
diff --git a/drivers/gpu/drm/omapdrm/dss/video-pll.c b/drivers/gpu/drm/omapdrm/dss/video-pll.c
index b1ec59e42940..7429de928d4e 100644
--- a/drivers/gpu/drm/omapdrm/dss/video-pll.c
+++ b/drivers/gpu/drm/omapdrm/dss/video-pll.c
@@ -17,8 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19 19
20#include <video/omapdss.h> 20#include "omapdss.h"
21
22#include "dss.h" 21#include "dss.h"
23#include "dss_features.h" 22#include "dss_features.h"
24 23
@@ -108,6 +107,8 @@ static const struct dss_pll_ops dss_pll_ops = {
108}; 107};
109 108
110static const struct dss_pll_hw dss_dra7_video_pll_hw = { 109static const struct dss_pll_hw dss_dra7_video_pll_hw = {
110 .type = DSS_PLL_TYPE_A,
111
111 .n_max = (1 << 8) - 1, 112 .n_max = (1 << 8) - 1,
112 .m_max = (1 << 12) - 1, 113 .m_max = (1 << 12) - 1,
113 .mX_max = (1 << 5) - 1, 114 .mX_max = (1 << 5) - 1,
@@ -124,6 +125,10 @@ static const struct dss_pll_hw dss_dra7_video_pll_hw = {
124 .mX_lsb[0] = 21, 125 .mX_lsb[0] = 21,
125 .mX_msb[1] = 30, 126 .mX_msb[1] = 30,
126 .mX_lsb[1] = 26, 127 .mX_lsb[1] = 26,
128 .mX_msb[2] = 4,
129 .mX_lsb[2] = 0,
130 .mX_msb[3] = 9,
131 .mX_lsb[3] = 5,
127 132
128 .has_refsel = true, 133 .has_refsel = true,
129}; 134};
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index ce2d67b6a8c7..137fe690a0da 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -32,7 +32,6 @@
32struct omap_connector { 32struct omap_connector {
33 struct drm_connector base; 33 struct drm_connector base;
34 struct omap_dss_device *dssdev; 34 struct omap_dss_device *dssdev;
35 struct drm_encoder *encoder;
36 bool hdmi_mode; 35 bool hdmi_mode;
37}; 36};
38 37
@@ -256,13 +255,6 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
256 return ret; 255 return ret;
257} 256}
258 257
259struct drm_encoder *omap_connector_attached_encoder(
260 struct drm_connector *connector)
261{
262 struct omap_connector *omap_connector = to_omap_connector(connector);
263 return omap_connector->encoder;
264}
265
266static const struct drm_connector_funcs omap_connector_funcs = { 258static const struct drm_connector_funcs omap_connector_funcs = {
267 .dpms = drm_atomic_helper_connector_dpms, 259 .dpms = drm_atomic_helper_connector_dpms,
268 .reset = drm_atomic_helper_connector_reset, 260 .reset = drm_atomic_helper_connector_reset,
@@ -276,7 +268,6 @@ static const struct drm_connector_funcs omap_connector_funcs = {
276static const struct drm_connector_helper_funcs omap_connector_helper_funcs = { 268static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
277 .get_modes = omap_connector_get_modes, 269 .get_modes = omap_connector_get_modes,
278 .mode_valid = omap_connector_mode_valid, 270 .mode_valid = omap_connector_mode_valid,
279 .best_encoder = omap_connector_attached_encoder,
280}; 271};
281 272
282/* initialize connector */ 273/* initialize connector */
@@ -296,7 +287,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
296 goto fail; 287 goto fail;
297 288
298 omap_connector->dssdev = dssdev; 289 omap_connector->dssdev = dssdev;
299 omap_connector->encoder = encoder;
300 290
301 connector = &omap_connector->base; 291 connector = &omap_connector->base;
302 292
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 075f2bb44867..180f644e861e 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -372,6 +372,20 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
372 copy_timings_drm_to_omap(&omap_crtc->timings, mode); 372 copy_timings_drm_to_omap(&omap_crtc->timings, mode);
373} 373}
374 374
375static int omap_crtc_atomic_check(struct drm_crtc *crtc,
376 struct drm_crtc_state *state)
377{
378 if (state->color_mgmt_changed && state->gamma_lut) {
379 uint length = state->gamma_lut->length /
380 sizeof(struct drm_color_lut);
381
382 if (length < 2)
383 return -EINVAL;
384 }
385
386 return 0;
387}
388
375static void omap_crtc_atomic_begin(struct drm_crtc *crtc, 389static void omap_crtc_atomic_begin(struct drm_crtc *crtc,
376 struct drm_crtc_state *old_crtc_state) 390 struct drm_crtc_state *old_crtc_state)
377{ 391{
@@ -384,6 +398,32 @@ static void omap_crtc_atomic_flush(struct drm_crtc *crtc,
384 398
385 WARN_ON(omap_crtc->vblank_irq.registered); 399 WARN_ON(omap_crtc->vblank_irq.registered);
386 400
401 if (crtc->state->color_mgmt_changed) {
402 struct drm_color_lut *lut = NULL;
403 uint length = 0;
404
405 if (crtc->state->gamma_lut) {
406 lut = (struct drm_color_lut *)
407 crtc->state->gamma_lut->data;
408 length = crtc->state->gamma_lut->length /
409 sizeof(*lut);
410 }
411 dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
412 }
413
414 if (crtc->state->color_mgmt_changed) {
415 struct drm_color_lut *lut = NULL;
416 uint length = 0;
417
418 if (crtc->state->gamma_lut) {
419 lut = (struct drm_color_lut *)
420 crtc->state->gamma_lut->data;
421 length = crtc->state->gamma_lut->length /
422 sizeof(*lut);
423 }
424 dispc_mgr_set_gamma(omap_crtc->channel, lut, length);
425 }
426
387 if (dispc_mgr_is_enabled(omap_crtc->channel)) { 427 if (dispc_mgr_is_enabled(omap_crtc->channel)) {
388 428
389 DBG("%s: GO", omap_crtc->name); 429 DBG("%s: GO", omap_crtc->name);
@@ -460,6 +500,7 @@ static const struct drm_crtc_funcs omap_crtc_funcs = {
460 .set_config = drm_atomic_helper_set_config, 500 .set_config = drm_atomic_helper_set_config,
461 .destroy = omap_crtc_destroy, 501 .destroy = omap_crtc_destroy,
462 .page_flip = drm_atomic_helper_page_flip, 502 .page_flip = drm_atomic_helper_page_flip,
503 .gamma_set = drm_atomic_helper_legacy_gamma_set,
463 .set_property = drm_atomic_helper_crtc_set_property, 504 .set_property = drm_atomic_helper_crtc_set_property,
464 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 505 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
465 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 506 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -471,6 +512,7 @@ static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
471 .mode_set_nofb = omap_crtc_mode_set_nofb, 512 .mode_set_nofb = omap_crtc_mode_set_nofb,
472 .disable = omap_crtc_disable, 513 .disable = omap_crtc_disable,
473 .enable = omap_crtc_enable, 514 .enable = omap_crtc_enable,
515 .atomic_check = omap_crtc_atomic_check,
474 .atomic_begin = omap_crtc_atomic_begin, 516 .atomic_begin = omap_crtc_atomic_begin,
475 .atomic_flush = omap_crtc_atomic_flush, 517 .atomic_flush = omap_crtc_atomic_flush,
476}; 518};
@@ -534,6 +576,20 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
534 576
535 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); 577 drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
536 578
579 /* The dispc API adapts to what ever size, but the HW supports
580 * 256 element gamma table for LCDs and 1024 element table for
581 * OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
582 * tables so lets use that. Size of HW gamma table can be
583 * extracted with dispc_mgr_gamma_size(). If it returns 0
584 * gamma table is not supprted.
585 */
586 if (dispc_mgr_gamma_size(channel)) {
587 uint gamma_lut_size = 256;
588
589 drm_crtc_enable_color_mgmt(crtc, 0, false, gamma_lut_size);
590 drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
591 }
592
537 omap_plane_install_properties(crtc->primary, &crtc->base); 593 omap_plane_install_properties(crtc->primary, &crtc->base);
538 594
539 omap_crtcs[channel] = omap_crtc; 595 omap_crtcs[channel] = omap_crtc;
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d86f5479345b..26c6134eb744 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -142,8 +142,9 @@ static int omap_atomic_commit(struct drm_device *dev,
142{ 142{
143 struct omap_drm_private *priv = dev->dev_private; 143 struct omap_drm_private *priv = dev->dev_private;
144 struct omap_atomic_state_commit *commit; 144 struct omap_atomic_state_commit *commit;
145 unsigned int i; 145 struct drm_crtc *crtc;
146 int ret; 146 struct drm_crtc_state *crtc_state;
147 int i, ret;
147 148
148 ret = drm_atomic_helper_prepare_planes(dev, state); 149 ret = drm_atomic_helper_prepare_planes(dev, state);
149 if (ret) 150 if (ret)
@@ -163,10 +164,8 @@ static int omap_atomic_commit(struct drm_device *dev,
163 /* Wait until all affected CRTCs have completed previous commits and 164 /* Wait until all affected CRTCs have completed previous commits and
164 * mark them as pending. 165 * mark them as pending.
165 */ 166 */
166 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 167 for_each_crtc_in_state(state, crtc, crtc_state, i)
167 if (state->crtcs[i]) 168 commit->crtcs |= drm_crtc_mask(crtc);
168 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
169 }
170 169
171 wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit)); 170 wait_event(priv->commit.wait, !omap_atomic_is_pending(priv, commit));
172 171
@@ -175,7 +174,7 @@ static int omap_atomic_commit(struct drm_device *dev,
175 spin_unlock(&priv->commit.lock); 174 spin_unlock(&priv->commit.lock);
176 175
177 /* Swap the state, this is the point of no return. */ 176 /* Swap the state, this is the point of no return. */
178 drm_atomic_helper_swap_state(dev, state); 177 drm_atomic_helper_swap_state(state, true);
179 178
180 if (nonblock) 179 if (nonblock)
181 schedule_work(&commit->work); 180 schedule_work(&commit->work);
@@ -203,6 +202,8 @@ static int get_connector_type(struct omap_dss_device *dssdev)
203 return DRM_MODE_CONNECTOR_HDMIA; 202 return DRM_MODE_CONNECTOR_HDMIA;
204 case OMAP_DISPLAY_TYPE_DVI: 203 case OMAP_DISPLAY_TYPE_DVI:
205 return DRM_MODE_CONNECTOR_DVID; 204 return DRM_MODE_CONNECTOR_DVID;
205 case OMAP_DISPLAY_TYPE_DSI:
206 return DRM_MODE_CONNECTOR_DSI;
206 default: 207 default:
207 return DRM_MODE_CONNECTOR_Unknown; 208 return DRM_MODE_CONNECTOR_Unknown;
208 } 209 }
@@ -800,7 +801,6 @@ static struct drm_driver omap_drm_driver = {
800 .unload = dev_unload, 801 .unload = dev_unload,
801 .open = dev_open, 802 .open = dev_open,
802 .lastclose = dev_lastclose, 803 .lastclose = dev_lastclose,
803 .set_busid = drm_platform_set_busid,
804 .get_vblank_counter = drm_vblank_no_hw_counter, 804 .get_vblank_counter = drm_vblank_no_hw_counter,
805 .enable_vblank = omap_irq_enable_vblank, 805 .enable_vblank = omap_irq_enable_vblank,
806 .disable_vblank = omap_irq_disable_vblank, 806 .disable_vblank = omap_irq_disable_vblank,
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index 3f823c368912..dcc30a98b9d4 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -24,7 +24,6 @@
24#include <linux/platform_data/omap_drm.h> 24#include <linux/platform_data/omap_drm.h>
25#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/wait.h> 26#include <linux/wait.h>
27#include <video/omapdss.h>
28 27
29#include <drm/drmP.h> 28#include <drm/drmP.h>
30#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
@@ -183,7 +182,6 @@ struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
183 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd); 182 struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd);
184struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev, 183struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
185 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos); 184 const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
186struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
187int omap_framebuffer_pin(struct drm_framebuffer *fb); 185int omap_framebuffer_pin(struct drm_framebuffer *fb);
188void omap_framebuffer_unpin(struct drm_framebuffer *fb); 186void omap_framebuffer_unpin(struct drm_framebuffer *fb);
189void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, 187void omap_framebuffer_update_scanout(struct drm_framebuffer *fb,
@@ -231,7 +229,6 @@ int omap_gem_rotated_paddr(struct drm_gem_object *obj, uint32_t orient,
231 int x, int y, dma_addr_t *paddr); 229 int x, int y, dma_addr_t *paddr);
232uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj); 230uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
233size_t omap_gem_mmap_size(struct drm_gem_object *obj); 231size_t omap_gem_mmap_size(struct drm_gem_object *obj);
234int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h);
235int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient); 232int omap_gem_tiled_stride(struct drm_gem_object *obj, uint32_t orient);
236 233
237struct dma_buf *omap_gem_prime_export(struct drm_device *dev, 234struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
@@ -239,17 +236,6 @@ struct dma_buf *omap_gem_prime_export(struct drm_device *dev,
239struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev, 236struct drm_gem_object *omap_gem_prime_import(struct drm_device *dev,
240 struct dma_buf *buffer); 237 struct dma_buf *buffer);
241 238
242static inline int align_pitch(int pitch, int width, int bpp)
243{
244 int bytespp = (bpp + 7) / 8;
245 /* in case someone tries to feed us a completely bogus stride: */
246 pitch = max(pitch, width * bytespp);
247 /* PVR needs alignment to 8 pixels.. right now that is the most
248 * restrictive stride requirement..
249 */
250 return roundup(pitch, 8 * bytespp);
251}
252
253/* map crtc to vblank mask */ 239/* map crtc to vblank mask */
254uint32_t pipe2vbl(struct drm_crtc *crtc); 240uint32_t pipe2vbl(struct drm_crtc *crtc);
255struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder); 241struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c
index f84570d1636c..31f5178c22c7 100644
--- a/drivers/gpu/drm/omapdrm/omap_fb.c
+++ b/drivers/gpu/drm/omapdrm/omap_fb.c
@@ -115,24 +115,16 @@ static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
115 115
116 for (i = 0; i < n; i++) { 116 for (i = 0; i < n; i++) {
117 struct plane *plane = &omap_fb->planes[i]; 117 struct plane *plane = &omap_fb->planes[i];
118 if (plane->bo) 118
119 drm_gem_object_unreference_unlocked(plane->bo); 119 drm_gem_object_unreference_unlocked(plane->bo);
120 } 120 }
121 121
122 kfree(omap_fb); 122 kfree(omap_fb);
123} 123}
124 124
125static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
126 struct drm_file *file_priv, unsigned flags, unsigned color,
127 struct drm_clip_rect *clips, unsigned num_clips)
128{
129 return 0;
130}
131
132static const struct drm_framebuffer_funcs omap_framebuffer_funcs = { 125static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
133 .create_handle = omap_framebuffer_create_handle, 126 .create_handle = omap_framebuffer_create_handle,
134 .destroy = omap_framebuffer_destroy, 127 .destroy = omap_framebuffer_destroy,
135 .dirty = omap_framebuffer_dirty,
136}; 128};
137 129
138static uint32_t get_linear_addr(struct plane *plane, 130static uint32_t get_linear_addr(struct plane *plane,
@@ -320,14 +312,6 @@ void omap_framebuffer_unpin(struct drm_framebuffer *fb)
320 mutex_unlock(&omap_fb->lock); 312 mutex_unlock(&omap_fb->lock);
321} 313}
322 314
323struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
324{
325 struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
326 if (p >= drm_format_num_planes(fb->pixel_format))
327 return NULL;
328 return omap_fb->planes[p].bo;
329}
330
331/* iterate thru all the connectors, returning ones that are attached 315/* iterate thru all the connectors, returning ones that are attached
332 * to the same fb.. 316 * to the same fb..
333 */ 317 */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index 89da41ac64d2..adb10fbe918d 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -125,9 +125,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
125 mode_cmd.width = sizes->surface_width; 125 mode_cmd.width = sizes->surface_width;
126 mode_cmd.height = sizes->surface_height; 126 mode_cmd.height = sizes->surface_height;
127 127
128 mode_cmd.pitches[0] = align_pitch( 128 mode_cmd.pitches[0] =
129 mode_cmd.width * ((sizes->surface_bpp + 7) / 8), 129 DIV_ROUND_UP(mode_cmd.width * sizes->surface_bpp, 8);
130 mode_cmd.width, sizes->surface_bpp);
131 130
132 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled; 131 fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
133 if (fbdev->ywrap_enabled) { 132 if (fbdev->ywrap_enabled) {
@@ -280,9 +279,6 @@ struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
280 if (ret) 279 if (ret)
281 goto fini; 280 goto fini;
282 281
283 /* disable all the possible outputs/crtcs before entering KMS mode */
284 drm_helper_disable_unused_functions(dev);
285
286 ret = drm_fb_helper_initial_config(helper, 32); 282 ret = drm_fb_helper_initial_config(helper, 32);
287 if (ret) 283 if (ret)
288 goto fini; 284 goto fini;
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index 0dbd0f03f9bd..505dee0db973 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -383,18 +383,6 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj)
383 return size; 383 return size;
384} 384}
385 385
386/* get tiled size, returns -EINVAL if not tiled buffer */
387int omap_gem_tiled_size(struct drm_gem_object *obj, uint16_t *w, uint16_t *h)
388{
389 struct omap_gem_object *omap_obj = to_omap_bo(obj);
390 if (omap_obj->flags & OMAP_BO_TILED) {
391 *w = omap_obj->width;
392 *h = omap_obj->height;
393 return 0;
394 }
395 return -EINVAL;
396}
397
398/* ----------------------------------------------------------------------------- 386/* -----------------------------------------------------------------------------
399 * Fault Handling 387 * Fault Handling
400 */ 388 */
@@ -661,7 +649,8 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
661{ 649{
662 union omap_gem_size gsize; 650 union omap_gem_size gsize;
663 651
664 args->pitch = align_pitch(0, args->width, args->bpp); 652 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
653
665 args->size = PAGE_ALIGN(args->pitch * args->height); 654 args->size = PAGE_ALIGN(args->pitch * args->height);
666 655
667 gsize = (union omap_gem_size){ 656 gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 3a7bdf1c842b..85143d1b9b31 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -168,6 +168,7 @@ static int panel_simple_disable(struct drm_panel *panel)
168 168
169 if (p->backlight) { 169 if (p->backlight) {
170 p->backlight->props.power = FB_BLANK_POWERDOWN; 170 p->backlight->props.power = FB_BLANK_POWERDOWN;
171 p->backlight->props.state |= BL_CORE_FBBLANK;
171 backlight_update_status(p->backlight); 172 backlight_update_status(p->backlight);
172 } 173 }
173 174
@@ -235,6 +236,7 @@ static int panel_simple_enable(struct drm_panel *panel)
235 msleep(p->desc->delay.enable); 236 msleep(p->desc->delay.enable);
236 237
237 if (p->backlight) { 238 if (p->backlight) {
239 p->backlight->props.state &= ~BL_CORE_FBBLANK;
238 p->backlight->props.power = FB_BLANK_UNBLANK; 240 p->backlight->props.power = FB_BLANK_UNBLANK;
239 backlight_update_status(p->backlight); 241 backlight_update_status(p->backlight);
240 } 242 }
@@ -964,8 +966,8 @@ static const struct panel_desc innolux_zj070na_01p = {
964 .num_modes = 1, 966 .num_modes = 1,
965 .bpc = 6, 967 .bpc = 6,
966 .size = { 968 .size = {
967 .width = 1024, 969 .width = 154,
968 .height = 600, 970 .height = 90,
969 }, 971 },
970}; 972};
971 973
@@ -1017,6 +1019,51 @@ static const struct panel_desc lg_lb070wv8 = {
1017 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 1019 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
1018}; 1020};
1019 1021
1022static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
1023 .clock = 200000,
1024 .hdisplay = 1536,
1025 .hsync_start = 1536 + 12,
1026 .hsync_end = 1536 + 12 + 16,
1027 .htotal = 1536 + 12 + 16 + 48,
1028 .vdisplay = 2048,
1029 .vsync_start = 2048 + 8,
1030 .vsync_end = 2048 + 8 + 4,
1031 .vtotal = 2048 + 8 + 4 + 8,
1032 .vrefresh = 60,
1033 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
1034};
1035
1036static const struct panel_desc lg_lp079qx1_sp0v = {
1037 .modes = &lg_lp079qx1_sp0v_mode,
1038 .num_modes = 1,
1039 .size = {
1040 .width = 129,
1041 .height = 171,
1042 },
1043};
1044
1045static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
1046 .clock = 205210,
1047 .hdisplay = 2048,
1048 .hsync_start = 2048 + 150,
1049 .hsync_end = 2048 + 150 + 5,
1050 .htotal = 2048 + 150 + 5 + 5,
1051 .vdisplay = 1536,
1052 .vsync_start = 1536 + 3,
1053 .vsync_end = 1536 + 3 + 1,
1054 .vtotal = 1536 + 3 + 1 + 9,
1055 .vrefresh = 60,
1056};
1057
1058static const struct panel_desc lg_lp097qx1_spa1 = {
1059 .modes = &lg_lp097qx1_spa1_mode,
1060 .num_modes = 1,
1061 .size = {
1062 .width = 208,
1063 .height = 147,
1064 },
1065};
1066
1020static const struct drm_display_mode lg_lp120up1_mode = { 1067static const struct drm_display_mode lg_lp120up1_mode = {
1021 .clock = 162300, 1068 .clock = 162300,
1022 .hdisplay = 1920, 1069 .hdisplay = 1920,
@@ -1224,6 +1271,28 @@ static const struct panel_desc qd43003c0_40 = {
1224 .bus_format = MEDIA_BUS_FMT_RGB888_1X24, 1271 .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
1225}; 1272};
1226 1273
1274static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
1275 .clock = 271560,
1276 .hdisplay = 2560,
1277 .hsync_start = 2560 + 48,
1278 .hsync_end = 2560 + 48 + 32,
1279 .htotal = 2560 + 48 + 32 + 80,
1280 .vdisplay = 1600,
1281 .vsync_start = 1600 + 2,
1282 .vsync_end = 1600 + 2 + 5,
1283 .vtotal = 1600 + 2 + 5 + 57,
1284 .vrefresh = 60,
1285};
1286
1287static const struct panel_desc samsung_lsn122dl01_c01 = {
1288 .modes = &samsung_lsn122dl01_c01_mode,
1289 .num_modes = 1,
1290 .size = {
1291 .width = 263,
1292 .height = 164,
1293 },
1294};
1295
1227static const struct drm_display_mode samsung_ltn101nt05_mode = { 1296static const struct drm_display_mode samsung_ltn101nt05_mode = {
1228 .clock = 54030, 1297 .clock = 54030,
1229 .hdisplay = 1024, 1298 .hdisplay = 1024,
@@ -1242,8 +1311,8 @@ static const struct panel_desc samsung_ltn101nt05 = {
1242 .num_modes = 1, 1311 .num_modes = 1,
1243 .bpc = 6, 1312 .bpc = 6,
1244 .size = { 1313 .size = {
1245 .width = 1024, 1314 .width = 223,
1246 .height = 600, 1315 .height = 125,
1247 }, 1316 },
1248}; 1317};
1249 1318
@@ -1270,6 +1339,53 @@ static const struct panel_desc samsung_ltn140at29_301 = {
1270 }, 1339 },
1271}; 1340};
1272 1341
1342static const struct display_timing sharp_lq101k1ly04_timing = {
1343 .pixelclock = { 60000000, 65000000, 80000000 },
1344 .hactive = { 1280, 1280, 1280 },
1345 .hfront_porch = { 20, 20, 20 },
1346 .hback_porch = { 20, 20, 20 },
1347 .hsync_len = { 10, 10, 10 },
1348 .vactive = { 800, 800, 800 },
1349 .vfront_porch = { 4, 4, 4 },
1350 .vback_porch = { 4, 4, 4 },
1351 .vsync_len = { 4, 4, 4 },
1352 .flags = DISPLAY_FLAGS_PIXDATA_POSEDGE,
1353};
1354
1355static const struct panel_desc sharp_lq101k1ly04 = {
1356 .timings = &sharp_lq101k1ly04_timing,
1357 .num_timings = 1,
1358 .bpc = 8,
1359 .size = {
1360 .width = 217,
1361 .height = 136,
1362 },
1363 .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
1364};
1365
1366static const struct drm_display_mode sharp_lq123p1jx31_mode = {
1367 .clock = 252750,
1368 .hdisplay = 2400,
1369 .hsync_start = 2400 + 48,
1370 .hsync_end = 2400 + 48 + 32,
1371 .htotal = 2400 + 48 + 32 + 80,
1372 .vdisplay = 1600,
1373 .vsync_start = 1600 + 3,
1374 .vsync_end = 1600 + 3 + 10,
1375 .vtotal = 1600 + 3 + 10 + 33,
1376 .vrefresh = 60,
1377 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
1378};
1379
1380static const struct panel_desc sharp_lq123p1jx31 = {
1381 .modes = &sharp_lq123p1jx31_mode,
1382 .num_modes = 1,
1383 .size = {
1384 .width = 259,
1385 .height = 173,
1386 },
1387};
1388
1273static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = { 1389static const struct drm_display_mode shelly_sca07010_bfn_lnn_mode = {
1274 .clock = 33300, 1390 .clock = 33300,
1275 .hdisplay = 800, 1391 .hdisplay = 800,
@@ -1293,6 +1409,29 @@ static const struct panel_desc shelly_sca07010_bfn_lnn = {
1293 .bus_format = MEDIA_BUS_FMT_RGB666_1X18, 1409 .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
1294}; 1410};
1295 1411
1412static const struct drm_display_mode starry_kr122ea0sra_mode = {
1413 .clock = 147000,
1414 .hdisplay = 1920,
1415 .hsync_start = 1920 + 16,
1416 .hsync_end = 1920 + 16 + 16,
1417 .htotal = 1920 + 16 + 16 + 32,
1418 .vdisplay = 1200,
1419 .vsync_start = 1200 + 15,
1420 .vsync_end = 1200 + 15 + 2,
1421 .vtotal = 1200 + 15 + 2 + 18,
1422 .vrefresh = 60,
1423 .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
1424};
1425
1426static const struct panel_desc starry_kr122ea0sra = {
1427 .modes = &starry_kr122ea0sra_mode,
1428 .num_modes = 1,
1429 .size = {
1430 .width = 263,
1431 .height = 164,
1432 },
1433};
1434
1296static const struct drm_display_mode tpk_f07a_0102_mode = { 1435static const struct drm_display_mode tpk_f07a_0102_mode = {
1297 .clock = 33260, 1436 .clock = 33260,
1298 .hdisplay = 800, 1437 .hdisplay = 800,
@@ -1457,6 +1596,12 @@ static const struct of_device_id platform_of_match[] = {
1457 .compatible = "lg,lb070wv8", 1596 .compatible = "lg,lb070wv8",
1458 .data = &lg_lb070wv8, 1597 .data = &lg_lb070wv8,
1459 }, { 1598 }, {
1599 .compatible = "lg,lp079qx1-sp0v",
1600 .data = &lg_lp079qx1_sp0v,
1601 }, {
1602 .compatible = "lg,lp097qx1-spa1",
1603 .data = &lg_lp097qx1_spa1,
1604 }, {
1460 .compatible = "lg,lp120up1", 1605 .compatible = "lg,lp120up1",
1461 .data = &lg_lp120up1, 1606 .data = &lg_lp120up1,
1462 }, { 1607 }, {
@@ -1481,15 +1626,27 @@ static const struct of_device_id platform_of_match[] = {
1481 .compatible = "qiaodian,qd43003c0-40", 1626 .compatible = "qiaodian,qd43003c0-40",
1482 .data = &qd43003c0_40, 1627 .data = &qd43003c0_40,
1483 }, { 1628 }, {
1629 .compatible = "samsung,lsn122dl01-c01",
1630 .data = &samsung_lsn122dl01_c01,
1631 }, {
1484 .compatible = "samsung,ltn101nt05", 1632 .compatible = "samsung,ltn101nt05",
1485 .data = &samsung_ltn101nt05, 1633 .data = &samsung_ltn101nt05,
1486 }, { 1634 }, {
1487 .compatible = "samsung,ltn140at29-301", 1635 .compatible = "samsung,ltn140at29-301",
1488 .data = &samsung_ltn140at29_301, 1636 .data = &samsung_ltn140at29_301,
1489 }, { 1637 }, {
1638 .compatible = "sharp,lq101k1ly04",
1639 .data = &sharp_lq101k1ly04,
1640 }, {
1641 .compatible = "sharp,lq123p1jx31",
1642 .data = &sharp_lq123p1jx31,
1643 }, {
1490 .compatible = "shelly,sca07010-bfn-lnn", 1644 .compatible = "shelly,sca07010-bfn-lnn",
1491 .data = &shelly_sca07010_bfn_lnn, 1645 .data = &shelly_sca07010_bfn_lnn,
1492 }, { 1646 }, {
1647 .compatible = "starry,kr122ea0sra",
1648 .data = &starry_kr122ea0sra,
1649 }, {
1493 .compatible = "tpk,f07a-0102", 1650 .compatible = "tpk,f07a-0102",
1494 .data = &tpk_f07a_0102, 1651 .data = &tpk_f07a_0102,
1495 }, { 1652 }, {
@@ -1701,7 +1858,6 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
1701 .lanes = 4, 1858 .lanes = 4,
1702}; 1859};
1703 1860
1704
1705static const struct of_device_id dsi_of_match[] = { 1861static const struct of_device_id dsi_of_match[] = {
1706 { 1862 {
1707 .compatible = "auo,b080uan01", 1863 .compatible = "auo,b080uan01",
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 38c2bb72e456..da45b11b66b8 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -1,12 +1,7 @@
1config DRM_QXL 1config DRM_QXL
2 tristate "QXL virtual GPU" 2 tristate "QXL virtual GPU"
3 depends on DRM && PCI 3 depends on DRM && PCI
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select FB_DEFERRED_IO
8 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
10 select DRM_TTM 5 select DRM_TTM
11 select CRC32 6 select CRC32
12 help 7 help
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index b5d4b41361bd..04270f5d110c 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas
203bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) 203bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush)
204{ 204{
205 if (!qxl_check_idle(qdev->release_ring)) { 205 if (!qxl_check_idle(qdev->release_ring)) {
206 queue_work(qdev->gc_queue, &qdev->gc_work); 206 schedule_work(&qdev->gc_work);
207 if (flush) 207 if (flush)
208 flush_work(&qdev->gc_work); 208 flush_work(&qdev->gc_work);
209 return true; 209 return true;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 8b5d54385892..3aef12742a53 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -221,7 +221,6 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
221{ 221{
222 struct drm_device *dev = crtc->dev; 222 struct drm_device *dev = crtc->dev;
223 struct qxl_device *qdev = dev->dev_private; 223 struct qxl_device *qdev = dev->dev_private;
224 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
225 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb); 224 struct qxl_framebuffer *qfb_src = to_qxl_framebuffer(fb);
226 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb); 225 struct qxl_framebuffer *qfb_old = to_qxl_framebuffer(crtc->primary->fb);
227 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj); 226 struct qxl_bo *bo_old = gem_to_qxl_bo(qfb_old->obj);
@@ -252,14 +251,14 @@ static int qxl_crtc_page_flip(struct drm_crtc *crtc,
252 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0, 251 qxl_draw_dirty_fb(qdev, qfb_src, bo, 0, 0,
253 &norect, one_clip_rect, inc); 252 &norect, one_clip_rect, inc);
254 253
255 drm_vblank_get(dev, qcrtc->index); 254 drm_crtc_vblank_get(crtc);
256 255
257 if (event) { 256 if (event) {
258 spin_lock_irqsave(&dev->event_lock, flags); 257 spin_lock_irqsave(&dev->event_lock, flags);
259 drm_send_vblank_event(dev, qcrtc->index, event); 258 drm_crtc_send_vblank_event(crtc, event);
260 spin_unlock_irqrestore(&dev->event_lock, flags); 259 spin_unlock_irqrestore(&dev->event_lock, flags);
261 } 260 }
262 drm_vblank_put(dev, qcrtc->index); 261 drm_crtc_vblank_put(crtc);
263 262
264 ret = qxl_bo_reserve(bo, false); 263 ret = qxl_bo_reserve(bo, false);
265 if (!ret) { 264 if (!ret) {
@@ -469,8 +468,7 @@ void qxl_user_framebuffer_destroy(struct drm_framebuffer *fb)
469{ 468{
470 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); 469 struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb);
471 470
472 if (qxl_fb->obj) 471 drm_gem_object_unreference_unlocked(qxl_fb->obj);
473 drm_gem_object_unreference_unlocked(qxl_fb->obj);
474 drm_framebuffer_cleanup(fb); 472 drm_framebuffer_cleanup(fb);
475 kfree(qxl_fb); 473 kfree(qxl_fb);
476} 474}
@@ -730,7 +728,6 @@ static int qdev_crtc_init(struct drm_device *dev, int crtc_id)
730 728
731 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs); 729 drm_crtc_init(dev, &qxl_crtc->base, &qxl_crtc_funcs);
732 qxl_crtc->index = crtc_id; 730 qxl_crtc->index = crtc_id;
733 drm_mode_crtc_set_gamma_size(&qxl_crtc->base, 256);
734 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); 731 drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs);
735 return 0; 732 return 0;
736} 733}
diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c
index 56e1d633875e..ffe885395145 100644
--- a/drivers/gpu/drm/qxl/qxl_draw.c
+++ b/drivers/gpu/drm/qxl/qxl_draw.c
@@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev,
37 * the qxl_clip_rects. This is *not* the same as the memory allocated 37 * the qxl_clip_rects. This is *not* the same as the memory allocated
38 * on the device, it is offset to qxl_clip_rects.chunk.data */ 38 * on the device, it is offset to qxl_clip_rects.chunk.data */
39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, 39static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
40 struct qxl_drawable *drawable,
41 unsigned num_clips, 40 unsigned num_clips,
42 struct qxl_bo *clips_bo) 41 struct qxl_bo *clips_bo)
43{ 42{
@@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo,
136 * correctly globaly, since that would require 135 * correctly globaly, since that would require
137 * tracking all of our palettes. */ 136 * tracking all of our palettes. */
138 ret = qxl_bo_kmap(palette_bo, (void **)&pal); 137 ret = qxl_bo_kmap(palette_bo, (void **)&pal);
138 if (ret)
139 return ret;
139 pal->num_ents = 2; 140 pal->num_ents = 2;
140 pal->unique = unique++; 141 pal->unique = unique++;
141 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { 142 if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
@@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev,
349 if (ret) 350 if (ret)
350 goto out_release_backoff; 351 goto out_release_backoff;
351 352
352 rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); 353 rects = drawable_set_clipping(qdev, num_clips, clips_bo);
353 if (!rects) 354 if (!rects)
354 goto out_release_backoff; 355 goto out_release_backoff;
355 356
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index dc9df5fe50ba..460bbceae297 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -256,7 +256,7 @@ static struct drm_driver qxl_driver = {
256 .gem_prime_vmap = qxl_gem_prime_vmap, 256 .gem_prime_vmap = qxl_gem_prime_vmap,
257 .gem_prime_vunmap = qxl_gem_prime_vunmap, 257 .gem_prime_vunmap = qxl_gem_prime_vunmap,
258 .gem_prime_mmap = qxl_gem_prime_mmap, 258 .gem_prime_mmap = qxl_gem_prime_mmap,
259 .gem_free_object = qxl_gem_object_free, 259 .gem_free_object_unlocked = qxl_gem_object_free,
260 .gem_open_object = qxl_gem_object_open, 260 .gem_open_object = qxl_gem_object_open,
261 .gem_close_object = qxl_gem_object_close, 261 .gem_close_object = qxl_gem_object_close,
262 .fops = &qxl_fops, 262 .fops = &qxl_fops,
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 3ad6604b34ce..8e633caa4078 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -321,7 +321,6 @@ struct qxl_device {
321 struct qxl_bo *current_release_bo[3]; 321 struct qxl_bo *current_release_bo[3];
322 int current_release_bo_offset[3]; 322 int current_release_bo_offset[3];
323 323
324 struct workqueue_struct *gc_queue;
325 struct work_struct gc_work; 324 struct work_struct gc_work;
326 325
327 struct drm_property *hotplug_mode_update_property; 326 struct drm_property *hotplug_mode_update_property;
diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c
index 5ea57f6320b8..df2657051afd 100644
--- a/drivers/gpu/drm/qxl/qxl_fb.c
+++ b/drivers/gpu/drm/qxl/qxl_fb.c
@@ -131,10 +131,6 @@ static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev,
131 int ret; 131 int ret;
132 int aligned_size, size; 132 int aligned_size, size;
133 int height = mode_cmd->height; 133 int height = mode_cmd->height;
134 int bpp;
135 int depth;
136
137 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth);
138 134
139 size = mode_cmd->pitches[0] * height; 135 size = mode_cmd->pitches[0] * height;
140 aligned_size = ALIGN(size, PAGE_SIZE); 136 aligned_size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 2319800b7add..e642242728c0 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev,
258 (unsigned long)qdev->surfaceram_size); 258 (unsigned long)qdev->surfaceram_size);
259 259
260 260
261 qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
262 INIT_WORK(&qdev->gc_work, qxl_gc_work); 261 INIT_WORK(&qdev->gc_work, qxl_gc_work);
263 262
264 return 0; 263 return 0;
@@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev)
270 qxl_bo_unref(&qdev->current_release_bo[0]); 269 qxl_bo_unref(&qdev->current_release_bo[0]);
271 if (qdev->current_release_bo[1]) 270 if (qdev->current_release_bo[1])
272 qxl_bo_unref(&qdev->current_release_bo[1]); 271 qxl_bo_unref(&qdev->current_release_bo[1]);
273 flush_workqueue(qdev->gc_queue); 272 flush_work(&qdev->gc_work);
274 destroy_workqueue(qdev->gc_queue);
275 qdev->gc_queue = NULL;
276
277 qxl_ring_free(qdev->command_ring); 273 qxl_ring_free(qdev->command_ring);
278 qxl_ring_free(qdev->cursor_ring); 274 qxl_ring_free(qdev->cursor_ring);
279 qxl_ring_free(qdev->release_ring); 275 qxl_ring_free(qdev->release_ring);
@@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags)
310 struct qxl_device *qdev; 306 struct qxl_device *qdev;
311 int r; 307 int r;
312 308
313 /* require kms */
314 if (!drm_core_check_feature(dev, DRIVER_MODESET))
315 return -ENODEV;
316
317 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); 309 qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
318 if (qdev == NULL) 310 if (qdev == NULL)
319 return -ENOMEM; 311 return -ENOMEM;
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 4efa8e261baf..f599cd073b72 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -96,7 +96,7 @@ retry:
96 return 0; 96 return 0;
97 97
98 if (have_drawable_releases && sc > 300) { 98 if (have_drawable_releases && sc > 300) {
99 FENCE_WARN(fence, "failed to wait on release %d " 99 FENCE_WARN(fence, "failed to wait on release %llu "
100 "after spincount %d\n", 100 "after spincount %d\n",
101 fence->context & ~0xf0000000, sc); 101 fence->context & ~0xf0000000, sc);
102 goto signaled; 102 goto signaled;
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 0738d74c8d04..d50c9679e631 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo,
350 struct ttm_mem_reg *new_mem) 350 struct ttm_mem_reg *new_mem)
351{ 351{
352 struct ttm_mem_reg *old_mem = &bo->mem; 352 struct ttm_mem_reg *old_mem = &bo->mem;
353 int ret;
354
355 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
356 if (ret)
357 return ret;
358
359
353 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { 360 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
354 qxl_move_null(bo, new_mem); 361 qxl_move_null(bo, new_mem);
355 return 0; 362 return 0;
356 } 363 }
357 return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 364 return ttm_bo_move_memcpy(bo, evict, interruptible,
365 no_wait_gpu, new_mem);
358} 366}
359 367
360static void qxl_bo_move_notify(struct ttm_buffer_object *bo, 368static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 259cd6e6d71c..a97abc8af657 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -276,14 +276,14 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE); 276 atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
277 atombios_blank_crtc(crtc, ATOM_DISABLE); 277 atombios_blank_crtc(crtc, ATOM_DISABLE);
278 if (dev->num_crtcs > radeon_crtc->crtc_id) 278 if (dev->num_crtcs > radeon_crtc->crtc_id)
279 drm_vblank_on(dev, radeon_crtc->crtc_id); 279 drm_crtc_vblank_on(crtc);
280 radeon_crtc_load_lut(crtc); 280 radeon_crtc_load_lut(crtc);
281 break; 281 break;
282 case DRM_MODE_DPMS_STANDBY: 282 case DRM_MODE_DPMS_STANDBY:
283 case DRM_MODE_DPMS_SUSPEND: 283 case DRM_MODE_DPMS_SUSPEND:
284 case DRM_MODE_DPMS_OFF: 284 case DRM_MODE_DPMS_OFF:
285 if (dev->num_crtcs > radeon_crtc->crtc_id) 285 if (dev->num_crtcs > radeon_crtc->crtc_id)
286 drm_vblank_off(dev, radeon_crtc->crtc_id); 286 drm_crtc_vblank_off(crtc);
287 if (radeon_crtc->enabled) 287 if (radeon_crtc->enabled)
288 atombios_blank_crtc(crtc, ATOM_ENABLE); 288 atombios_blank_crtc(crtc, ATOM_ENABLE);
289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) 289 if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 587cae4e73c9..56bb758f4e33 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -120,6 +120,7 @@ atombios_set_backlight_level(struct radeon_encoder *radeon_encoder, u8 level)
120 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: 120 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
121 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 121 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
122 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 122 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
123 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3:
123 if (dig->backlight_level == 0) 124 if (dig->backlight_level == 0)
124 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0); 125 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_LCD_BLOFF, 0, 0);
125 else { 126 else {
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 35e0fc3ae8a7..7ba450832e6b 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -3843,7 +3843,10 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3843 if (i >= sclk_table->count) { 3843 if (i >= sclk_table->count) {
3844 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3844 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3845 } else { 3845 } else {
3846 /* XXX check display min clock requirements */ 3846 /* XXX The current code always reprogrammed the sclk levels,
3847 * but we don't currently handle disp sclk requirements
3848 * so just skip it.
3849 */
3847 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK) 3850 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3848 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3851 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3849 } 3852 }
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index ba192a35c607..0c1b9ff433af 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -53,6 +53,7 @@ MODULE_FIRMWARE("radeon/bonaire_mc.bin");
53MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); 53MODULE_FIRMWARE("radeon/bonaire_rlc.bin");
54MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); 54MODULE_FIRMWARE("radeon/bonaire_sdma.bin");
55MODULE_FIRMWARE("radeon/bonaire_smc.bin"); 55MODULE_FIRMWARE("radeon/bonaire_smc.bin");
56MODULE_FIRMWARE("radeon/bonaire_k_smc.bin");
56 57
57MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); 58MODULE_FIRMWARE("radeon/HAWAII_pfp.bin");
58MODULE_FIRMWARE("radeon/HAWAII_me.bin"); 59MODULE_FIRMWARE("radeon/HAWAII_me.bin");
@@ -72,6 +73,7 @@ MODULE_FIRMWARE("radeon/hawaii_mc.bin");
72MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); 73MODULE_FIRMWARE("radeon/hawaii_rlc.bin");
73MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); 74MODULE_FIRMWARE("radeon/hawaii_sdma.bin");
74MODULE_FIRMWARE("radeon/hawaii_smc.bin"); 75MODULE_FIRMWARE("radeon/hawaii_smc.bin");
76MODULE_FIRMWARE("radeon/hawaii_k_smc.bin");
75 77
76MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); 78MODULE_FIRMWARE("radeon/KAVERI_pfp.bin");
77MODULE_FIRMWARE("radeon/KAVERI_me.bin"); 79MODULE_FIRMWARE("radeon/KAVERI_me.bin");
@@ -1990,12 +1992,17 @@ static int cik_init_microcode(struct radeon_device *rdev)
1990 int new_fw = 0; 1992 int new_fw = 0;
1991 int err; 1993 int err;
1992 int num_fw; 1994 int num_fw;
1995 bool new_smc = false;
1993 1996
1994 DRM_DEBUG("\n"); 1997 DRM_DEBUG("\n");
1995 1998
1996 switch (rdev->family) { 1999 switch (rdev->family) {
1997 case CHIP_BONAIRE: 2000 case CHIP_BONAIRE:
1998 chip_name = "BONAIRE"; 2001 chip_name = "BONAIRE";
2002 if ((rdev->pdev->revision == 0x80) ||
2003 (rdev->pdev->revision == 0x81) ||
2004 (rdev->pdev->device == 0x665f))
2005 new_smc = true;
1999 new_chip_name = "bonaire"; 2006 new_chip_name = "bonaire";
2000 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 2007 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
2001 me_req_size = CIK_ME_UCODE_SIZE * 4; 2008 me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2010,6 +2017,8 @@ static int cik_init_microcode(struct radeon_device *rdev)
2010 break; 2017 break;
2011 case CHIP_HAWAII: 2018 case CHIP_HAWAII:
2012 chip_name = "HAWAII"; 2019 chip_name = "HAWAII";
2020 if (rdev->pdev->revision == 0x80)
2021 new_smc = true;
2013 new_chip_name = "hawaii"; 2022 new_chip_name = "hawaii";
2014 pfp_req_size = CIK_PFP_UCODE_SIZE * 4; 2023 pfp_req_size = CIK_PFP_UCODE_SIZE * 4;
2015 me_req_size = CIK_ME_UCODE_SIZE * 4; 2024 me_req_size = CIK_ME_UCODE_SIZE * 4;
@@ -2259,7 +2268,10 @@ static int cik_init_microcode(struct radeon_device *rdev)
2259 } 2268 }
2260 } 2269 }
2261 2270
2262 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); 2271 if (new_smc)
2272 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
2273 else
2274 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
2263 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 2275 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
2264 if (err) { 2276 if (err) {
2265 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 2277 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
@@ -8354,7 +8366,8 @@ static int cik_startup(struct radeon_device *rdev)
8354 } 8366 }
8355 } 8367 }
8356 rdev->rlc.cs_data = ci_cs_data; 8368 rdev->rlc.cs_data = ci_cs_data;
8357 rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; 8369 rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */
8370 rdev->rlc.cp_table_size += 64 * 1024; /* GDS */
8358 r = sumo_rlc_init(rdev); 8371 r = sumo_rlc_init(rdev);
8359 if (r) { 8372 if (r) {
8360 DRM_ERROR("Failed to init rlc BOs!\n"); 8373 DRM_ERROR("Failed to init rlc BOs!\n");
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 0d3f744de35a..d960d3915408 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
2209 } 2209 }
2210 break; 2210 break;
2211 } 2211 }
2212 case PACKET3_PFP_SYNC_ME:
2213 if (pkt->count) {
2214 DRM_ERROR("bad PFP_SYNC_ME\n");
2215 return -EINVAL;
2216 }
2217 break;
2212 case PACKET3_SURFACE_SYNC: 2218 case PACKET3_SURFACE_SYNC:
2213 if (pkt->count != 3) { 2219 if (pkt->count != 3) {
2214 DRM_ERROR("bad SURFACE_SYNC\n"); 2220 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev,
3381 case PACKET3_MPEG_INDEX: 3387 case PACKET3_MPEG_INDEX:
3382 case PACKET3_WAIT_REG_MEM: 3388 case PACKET3_WAIT_REG_MEM:
3383 case PACKET3_MEM_WRITE: 3389 case PACKET3_MEM_WRITE:
3390 case PACKET3_PFP_SYNC_ME:
3384 case PACKET3_SURFACE_SYNC: 3391 case PACKET3_SURFACE_SYNC:
3385 case PACKET3_EVENT_WRITE: 3392 case PACKET3_EVENT_WRITE:
3386 case PACKET3_EVENT_WRITE_EOP: 3393 case PACKET3_EVENT_WRITE_EOP:
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 0b174e14e9a6..c8e3d394cde7 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -1624,6 +1624,7 @@
1624 */ 1624 */
1625# define PACKET3_CP_DMA_CMD_SAIC (1 << 28) 1625# define PACKET3_CP_DMA_CMD_SAIC (1 << 28)
1626# define PACKET3_CP_DMA_CMD_DAIC (1 << 29) 1626# define PACKET3_CP_DMA_CMD_DAIC (1 << 29)
1627#define PACKET3_PFP_SYNC_ME 0x42
1627#define PACKET3_SURFACE_SYNC 0x43 1628#define PACKET3_SURFACE_SYNC 0x43
1628# define PACKET3_CB0_DEST_BASE_ENA (1 << 6) 1629# define PACKET3_CB0_DEST_BASE_ENA (1 << 6)
1629# define PACKET3_CB1_DEST_BASE_ENA (1 << 7) 1630# define PACKET3_CB1_DEST_BASE_ENA (1 << 7)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 80b24a495d6c..5633ee3eb46e 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -2386,7 +2386,7 @@ struct radeon_device {
2386 struct radeon_mman mman; 2386 struct radeon_mman mman;
2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS]; 2387 struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
2388 wait_queue_head_t fence_queue; 2388 wait_queue_head_t fence_queue;
2389 unsigned fence_context; 2389 u64 fence_context;
2390 struct mutex ring_lock; 2390 struct mutex ring_lock;
2391 struct radeon_ring ring[RADEON_NUM_RINGS]; 2391 struct radeon_ring ring[RADEON_NUM_RINGS];
2392 bool ib_pool_ready; 2392 bool ib_pool_ready;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 59acd0e5c2c6..31c9a92d6a1b 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -741,13 +741,6 @@ int radeon_acpi_init(struct radeon_device *rdev)
741 } 741 }
742 742
743 atif->encoder_for_bl = target; 743 atif->encoder_for_bl = target;
744 if (!target) {
745 /* Brightness change notification is enabled, but we
746 * didn't find a backlight controller, this should
747 * never happen.
748 */
749 DRM_ERROR("Cannot find a backlight controller\n");
750 }
751 } 744 }
752 745
753 if (atif->functions.sbios_requests && !atif->functions.system_params) { 746 if (atif->functions.sbios_requests && !atif->functions.system_params) {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index f8097a0e7a79..5df3ec73021b 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1155,7 +1155,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1155 le16_to_cpu(firmware_info->info.usReferenceClock); 1155 le16_to_cpu(firmware_info->info.usReferenceClock);
1156 p1pll->reference_div = 0; 1156 p1pll->reference_div = 0;
1157 1157
1158 if (crev < 2) 1158 if ((frev < 2) && (crev < 2))
1159 p1pll->pll_out_min = 1159 p1pll->pll_out_min =
1160 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); 1160 le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output);
1161 else 1161 else
@@ -1164,7 +1164,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1164 p1pll->pll_out_max = 1164 p1pll->pll_out_max =
1165 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); 1165 le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
1166 1166
1167 if (crev >= 4) { 1167 if (((frev < 2) && (crev >= 4)) || (frev >= 2)) {
1168 p1pll->lcd_pll_out_min = 1168 p1pll->lcd_pll_out_min =
1169 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; 1169 le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
1170 if (p1pll->lcd_pll_out_min == 0) 1170 if (p1pll->lcd_pll_out_min == 0)
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 95f4fea89302..6de342861202 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -10,6 +10,7 @@
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/acpi.h> 11#include <linux/acpi.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/delay.h>
13 14
14#include "radeon_acpi.h" 15#include "radeon_acpi.h"
15 16
@@ -27,6 +28,7 @@ struct radeon_atpx_functions {
27struct radeon_atpx { 28struct radeon_atpx {
28 acpi_handle handle; 29 acpi_handle handle;
29 struct radeon_atpx_functions functions; 30 struct radeon_atpx_functions functions;
31 bool is_hybrid;
30}; 32};
31 33
32static struct radeon_atpx_priv { 34static struct radeon_atpx_priv {
@@ -62,6 +64,14 @@ bool radeon_has_atpx(void) {
62 return radeon_atpx_priv.atpx_detected; 64 return radeon_atpx_priv.atpx_detected;
63} 65}
64 66
67bool radeon_has_atpx_dgpu_power_cntl(void) {
68 return radeon_atpx_priv.atpx.functions.power_cntl;
69}
70
71bool radeon_is_atpx_hybrid(void) {
72 return radeon_atpx_priv.atpx.is_hybrid;
73}
74
65/** 75/**
66 * radeon_atpx_call - call an ATPX method 76 * radeon_atpx_call - call an ATPX method
67 * 77 *
@@ -141,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas
141 */ 151 */
142static int radeon_atpx_validate(struct radeon_atpx *atpx) 152static int radeon_atpx_validate(struct radeon_atpx *atpx)
143{ 153{
144 /* make sure required functions are enabled */ 154 u32 valid_bits = 0;
145 /* dGPU power control is required */
146 if (atpx->functions.power_cntl == false) {
147 printk("ATPX dGPU power cntl not present, forcing\n");
148 atpx->functions.power_cntl = true;
149 }
150 155
151 if (atpx->functions.px_params) { 156 if (atpx->functions.px_params) {
152 union acpi_object *info; 157 union acpi_object *info;
153 struct atpx_px_params output; 158 struct atpx_px_params output;
154 size_t size; 159 size_t size;
155 u32 valid_bits;
156 160
157 info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); 161 info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL);
158 if (!info) 162 if (!info)
@@ -171,19 +175,42 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx)
171 memcpy(&output, info->buffer.pointer, size); 175 memcpy(&output, info->buffer.pointer, size);
172 176
173 valid_bits = output.flags & output.valid_flags; 177 valid_bits = output.flags & output.valid_flags;
174 /* if separate mux flag is set, mux controls are required */
175 if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
176 atpx->functions.i2c_mux_cntl = true;
177 atpx->functions.disp_mux_cntl = true;
178 }
179 /* if any outputs are muxed, mux controls are required */
180 if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
181 ATPX_TV_SIGNAL_MUXED |
182 ATPX_DFP_SIGNAL_MUXED))
183 atpx->functions.disp_mux_cntl = true;
184 178
185 kfree(info); 179 kfree(info);
186 } 180 }
181
182 /* if separate mux flag is set, mux controls are required */
183 if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) {
184 atpx->functions.i2c_mux_cntl = true;
185 atpx->functions.disp_mux_cntl = true;
186 }
187 /* if any outputs are muxed, mux controls are required */
188 if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED |
189 ATPX_TV_SIGNAL_MUXED |
190 ATPX_DFP_SIGNAL_MUXED))
191 atpx->functions.disp_mux_cntl = true;
192
193 /* some bioses set these bits rather than flagging power_cntl as supported */
194 if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED |
195 ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED))
196 atpx->functions.power_cntl = true;
197
198 atpx->is_hybrid = false;
199 if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
200 printk("ATPX Hybrid Graphics\n");
201#if 1
202 /* This is a temporary hack until the D3 cold support
203 * makes it upstream. The ATPX power_control method seems
204 * to still work on even if the system should be using
205 * the new standardized hybrid D3 cold ACPI interface.
206 */
207 atpx->functions.power_cntl = true;
208#else
209 atpx->functions.power_cntl = false;
210#endif
211 atpx->is_hybrid = true;
212 }
213
187 return 0; 214 return 0;
188} 215}
189 216
@@ -258,6 +285,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state)
258 if (!info) 285 if (!info)
259 return -EIO; 286 return -EIO;
260 kfree(info); 287 kfree(info);
288
289 /* 200ms delay is required after off */
290 if (state == 0)
291 msleep(200);
261 } 292 }
262 return 0; 293 return 0;
263} 294}
@@ -505,7 +536,6 @@ static int radeon_atpx_get_client_id(struct pci_dev *pdev)
505static const struct vga_switcheroo_handler radeon_atpx_handler = { 536static const struct vga_switcheroo_handler radeon_atpx_handler = {
506 .switchto = radeon_atpx_switchto, 537 .switchto = radeon_atpx_switchto,
507 .power_state = radeon_atpx_power_state, 538 .power_state = radeon_atpx_power_state,
508 .init = radeon_atpx_init,
509 .get_client_id = radeon_atpx_get_client_id, 539 .get_client_id = radeon_atpx_get_client_id,
510}; 540};
511 541
@@ -541,6 +571,7 @@ static bool radeon_atpx_detect(void)
541 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n", 571 printk(KERN_INFO "vga_switcheroo: detected switching method %s handle\n",
542 acpi_method_name); 572 acpi_method_name);
543 radeon_atpx_priv.atpx_detected = true; 573 radeon_atpx_priv.atpx_detected = true;
574 radeon_atpx_init();
544 return true; 575 return true;
545 } 576 }
546 return false; 577 return false;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 81a63d7f5cd9..b79f3b002471 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev,
2064 RADEON_OUTPUT_CSC_BYPASS); 2064 RADEON_OUTPUT_CSC_BYPASS);
2065 /* no HPD on analog connectors */ 2065 /* no HPD on analog connectors */
2066 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 2066 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
2067 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2068 connector->interlace_allowed = true; 2067 connector->interlace_allowed = true;
2069 connector->doublescan_allowed = true; 2068 connector->doublescan_allowed = true;
2070 break; 2069 break;
@@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev,
2314 } 2313 }
2315 2314
2316 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { 2315 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
2317 if (i2c_bus->valid) 2316 if (i2c_bus->valid) {
2318 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 2317 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
2318 DRM_CONNECTOR_POLL_DISCONNECT;
2319 }
2319 } else 2320 } else
2320 connector->polled = DRM_CONNECTOR_POLL_HPD; 2321 connector->polled = DRM_CONNECTOR_POLL_HPD;
2321 2322
@@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev,
2391 1); 2392 1);
2392 /* no HPD on analog connectors */ 2393 /* no HPD on analog connectors */
2393 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 2394 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
2394 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
2395 connector->interlace_allowed = true; 2395 connector->interlace_allowed = true;
2396 connector->doublescan_allowed = true; 2396 connector->doublescan_allowed = true;
2397 break; 2397 break;
@@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
2476 } 2476 }
2477 2477
2478 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { 2478 if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) {
2479 if (i2c_bus->valid) 2479 if (i2c_bus->valid) {
2480 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 2480 connector->polled = DRM_CONNECTOR_POLL_CONNECT |
2481 DRM_CONNECTOR_POLL_DISCONNECT;
2482 }
2481 } else 2483 } else
2482 connector->polled = DRM_CONNECTOR_POLL_HPD; 2484 connector->polled = DRM_CONNECTOR_POLL_HPD;
2485
2483 connector->display_info.subpixel_order = subpixel_order; 2486 connector->display_info.subpixel_order = subpixel_order;
2484 drm_connector_register(connector); 2487 drm_connector_register(connector);
2485} 2488}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 21c44b2293bc..a00dd2f74527 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
30#include <drm/drmP.h> 30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h> 31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h> 32#include <drm/radeon_drm.h>
33#include <linux/pm_runtime.h>
33#include <linux/vgaarb.h> 34#include <linux/vgaarb.h>
34#include <linux/vga_switcheroo.h> 35#include <linux/vga_switcheroo.h>
35#include <linux/efi.h> 36#include <linux/efi.h>
@@ -1526,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev,
1526 return 0; 1527 return 0;
1527 1528
1528failed: 1529failed:
1530 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1531 if (radeon_is_px(ddev))
1532 pm_runtime_put_noidle(ddev->dev);
1529 if (runtime) 1533 if (runtime)
1530 vga_switcheroo_fini_domain_pm_ops(rdev->dev); 1534 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1531 return r; 1535 return r;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 6a41b4982647..c3206fb8f4cf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -231,19 +231,21 @@ void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
231 *blue = radeon_crtc->lut_b[regno] << 6; 231 *blue = radeon_crtc->lut_b[regno] << 6;
232} 232}
233 233
234static void radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 234static int radeon_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
235 u16 *blue, uint32_t start, uint32_t size) 235 u16 *blue, uint32_t size)
236{ 236{
237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 237 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
238 int end = (start + size > 256) ? 256 : start + size, i; 238 int i;
239 239
240 /* userspace palettes are always correct as is */ 240 /* userspace palettes are always correct as is */
241 for (i = start; i < end; i++) { 241 for (i = 0; i < size; i++) {
242 radeon_crtc->lut_r[i] = red[i] >> 6; 242 radeon_crtc->lut_r[i] = red[i] >> 6;
243 radeon_crtc->lut_g[i] = green[i] >> 6; 243 radeon_crtc->lut_g[i] = green[i] >> 6;
244 radeon_crtc->lut_b[i] = blue[i] >> 6; 244 radeon_crtc->lut_b[i] = blue[i] >> 6;
245 } 245 }
246 radeon_crtc_load_lut(crtc); 246 radeon_crtc_load_lut(crtc);
247
248 return 0;
247} 249}
248 250
249static void radeon_crtc_destroy(struct drm_crtc *crtc) 251static void radeon_crtc_destroy(struct drm_crtc *crtc)
@@ -381,7 +383,7 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
381 383
382 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 384 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
383 385
384 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 386 drm_crtc_vblank_put(&radeon_crtc->base);
385 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 387 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
386 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 388 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
387} 389}
@@ -598,7 +600,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
598 } 600 }
599 work->base = base; 601 work->base = base;
600 602
601 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 603 r = drm_crtc_vblank_get(crtc);
602 if (r) { 604 if (r) {
603 DRM_ERROR("failed to get vblank before flip\n"); 605 DRM_ERROR("failed to get vblank before flip\n");
604 goto pflip_cleanup; 606 goto pflip_cleanup;
@@ -625,7 +627,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
625 return 0; 627 return 0;
626 628
627vblank_cleanup: 629vblank_cleanup:
628 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id); 630 drm_crtc_vblank_put(crtc);
629 631
630pflip_cleanup: 632pflip_cleanup:
631 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) { 633 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
@@ -688,6 +690,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
688 pm_runtime_put_autosuspend(dev->dev); 690 pm_runtime_put_autosuspend(dev->dev);
689 return ret; 691 return ret;
690} 692}
693
691static const struct drm_crtc_funcs radeon_crtc_funcs = { 694static const struct drm_crtc_funcs radeon_crtc_funcs = {
692 .cursor_set2 = radeon_crtc_cursor_set2, 695 .cursor_set2 = radeon_crtc_cursor_set2,
693 .cursor_move = radeon_crtc_cursor_move, 696 .cursor_move = radeon_crtc_cursor_move,
@@ -711,7 +714,7 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
711 714
712 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256); 715 drm_mode_crtc_set_gamma_size(&radeon_crtc->base, 256);
713 radeon_crtc->crtc_id = index; 716 radeon_crtc->crtc_id = index;
714 radeon_crtc->flip_queue = create_singlethread_workqueue("radeon-crtc"); 717 radeon_crtc->flip_queue = alloc_workqueue("radeon-crtc", WQ_HIGHPRI, 0);
715 rdev->mode_info.crtcs[index] = radeon_crtc; 718 rdev->mode_info.crtcs[index] = radeon_crtc;
716 719
717 if (rdev->family >= CHIP_BONAIRE) { 720 if (rdev->family >= CHIP_BONAIRE) {
@@ -1321,9 +1324,7 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
1321{ 1324{
1322 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); 1325 struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
1323 1326
1324 if (radeon_fb->obj) { 1327 drm_gem_object_unreference_unlocked(radeon_fb->obj);
1325 drm_gem_object_unreference_unlocked(radeon_fb->obj);
1326 }
1327 drm_framebuffer_cleanup(fb); 1328 drm_framebuffer_cleanup(fb);
1328 kfree(radeon_fb); 1329 kfree(radeon_fb);
1329} 1330}
@@ -1708,6 +1709,7 @@ void radeon_modeset_fini(struct radeon_device *rdev)
1708 radeon_afmt_fini(rdev); 1709 radeon_afmt_fini(rdev);
1709 drm_kms_helper_poll_fini(rdev->ddev); 1710 drm_kms_helper_poll_fini(rdev->ddev);
1710 radeon_hpd_fini(rdev); 1711 radeon_hpd_fini(rdev);
1712 drm_crtc_force_disable_all(rdev->ddev);
1711 drm_mode_config_cleanup(rdev->ddev); 1713 drm_mode_config_cleanup(rdev->ddev);
1712 rdev->mode_info.mode_config_initialized = false; 1714 rdev->mode_info.mode_config_initialized = false;
1713 } 1715 }
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index b55aa740171f..c01a7c6abb49 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -34,11 +34,9 @@
34#include "radeon_drv.h" 34#include "radeon_drv.h"
35 35
36#include <drm/drm_pciids.h> 36#include <drm/drm_pciids.h>
37#include <linux/apple-gmux.h>
38#include <linux/console.h> 37#include <linux/console.h>
39#include <linux/module.h> 38#include <linux/module.h>
40#include <linux/pm_runtime.h> 39#include <linux/pm_runtime.h>
41#include <linux/vgaarb.h>
42#include <linux/vga_switcheroo.h> 40#include <linux/vga_switcheroo.h>
43#include <drm/drm_gem.h> 41#include <drm/drm_gem.h>
44 42
@@ -95,9 +93,10 @@
95 * 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER 93 * 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER
96 * 2.44.0 - SET_APPEND_CNT packet3 support 94 * 2.44.0 - SET_APPEND_CNT packet3 support
97 * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI 95 * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI
96 * 2.46.0 - Add PFP_SYNC_ME support on evergreen
98 */ 97 */
99#define KMS_DRIVER_MAJOR 2 98#define KMS_DRIVER_MAJOR 2
100#define KMS_DRIVER_MINOR 45 99#define KMS_DRIVER_MINOR 46
101#define KMS_DRIVER_PATCHLEVEL 0 100#define KMS_DRIVER_PATCHLEVEL 0
102int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 101int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
103int radeon_driver_unload_kms(struct drm_device *dev); 102int radeon_driver_unload_kms(struct drm_device *dev);
@@ -164,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor);
164#if defined(CONFIG_VGA_SWITCHEROO) 163#if defined(CONFIG_VGA_SWITCHEROO)
165void radeon_register_atpx_handler(void); 164void radeon_register_atpx_handler(void);
166void radeon_unregister_atpx_handler(void); 165void radeon_unregister_atpx_handler(void);
166bool radeon_has_atpx_dgpu_power_cntl(void);
167bool radeon_is_atpx_hybrid(void);
167#else 168#else
168static inline void radeon_register_atpx_handler(void) {} 169static inline void radeon_register_atpx_handler(void) {}
169static inline void radeon_unregister_atpx_handler(void) {} 170static inline void radeon_unregister_atpx_handler(void) {}
171static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
172static inline bool radeon_is_atpx_hybrid(void) { return false; }
170#endif 173#endif
171 174
172int radeon_no_wb; 175int radeon_no_wb;
@@ -340,13 +343,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
340 if (ret == -EPROBE_DEFER) 343 if (ret == -EPROBE_DEFER)
341 return ret; 344 return ret;
342 345
343 /* 346 if (vga_switcheroo_client_probe_defer(pdev))
344 * apple-gmux is needed on dual GPU MacBook Pro
345 * to probe the panel if we're the inactive GPU.
346 */
347 if (IS_ENABLED(CONFIG_VGA_ARB) && IS_ENABLED(CONFIG_VGA_SWITCHEROO) &&
348 apple_gmux_present() && pdev != vga_default_device() &&
349 !vga_switcheroo_handler_flags())
350 return -EPROBE_DEFER; 347 return -EPROBE_DEFER;
351 348
352 /* Get rid of things like offb */ 349 /* Get rid of things like offb */
@@ -412,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
412 pci_save_state(pdev); 409 pci_save_state(pdev);
413 pci_disable_device(pdev); 410 pci_disable_device(pdev);
414 pci_ignore_hotplug(pdev); 411 pci_ignore_hotplug(pdev);
415 pci_set_power_state(pdev, PCI_D3cold); 412 if (radeon_is_atpx_hybrid())
413 pci_set_power_state(pdev, PCI_D3cold);
414 else if (!radeon_has_atpx_dgpu_power_cntl())
415 pci_set_power_state(pdev, PCI_D3hot);
416 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 416 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
417 417
418 return 0; 418 return 0;
@@ -429,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev)
429 429
430 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 430 drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
431 431
432 pci_set_power_state(pdev, PCI_D0); 432 if (radeon_is_atpx_hybrid() ||
433 !radeon_has_atpx_dgpu_power_cntl())
434 pci_set_power_state(pdev, PCI_D0);
433 pci_restore_state(pdev); 435 pci_restore_state(pdev);
434 ret = pci_enable_device(pdev); 436 ret = pci_enable_device(pdev);
435 if (ret) 437 if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 414953c46a38..835563c1f0ed 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev)
63 if (rdev->rmmio == NULL) 63 if (rdev->rmmio == NULL)
64 goto done_free; 64 goto done_free;
65 65
66 pm_runtime_get_sync(dev->dev); 66 if (radeon_is_px(dev)) {
67 pm_runtime_get_sync(dev->dev);
68 pm_runtime_forbid(dev->dev);
69 }
67 70
68 radeon_kfd_device_fini(rdev); 71 radeon_kfd_device_fini(rdev);
69 72
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 478d4099b0d0..d0de4022fff9 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -332,14 +332,14 @@ static void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl)); 332 WREG32_P(RADEON_CRTC_EXT_CNTL, crtc_ext_cntl, ~(mask | crtc_ext_cntl));
333 } 333 }
334 if (dev->num_crtcs > radeon_crtc->crtc_id) 334 if (dev->num_crtcs > radeon_crtc->crtc_id)
335 drm_vblank_on(dev, radeon_crtc->crtc_id); 335 drm_crtc_vblank_on(crtc);
336 radeon_crtc_load_lut(crtc); 336 radeon_crtc_load_lut(crtc);
337 break; 337 break;
338 case DRM_MODE_DPMS_STANDBY: 338 case DRM_MODE_DPMS_STANDBY:
339 case DRM_MODE_DPMS_SUSPEND: 339 case DRM_MODE_DPMS_SUSPEND:
340 case DRM_MODE_DPMS_OFF: 340 case DRM_MODE_DPMS_OFF:
341 if (dev->num_crtcs > radeon_crtc->crtc_id) 341 if (dev->num_crtcs > radeon_crtc->crtc_id)
342 drm_vblank_off(dev, radeon_crtc->crtc_id); 342 drm_crtc_vblank_off(crtc);
343 if (radeon_crtc->crtc_id) 343 if (radeon_crtc->crtc_id)
344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask)); 344 WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
345 else { 345 else {
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 38226d925a5b..4b6542538ff9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -246,6 +246,7 @@ static void radeon_set_power_state(struct radeon_device *rdev)
246 246
247static void radeon_pm_set_clocks(struct radeon_device *rdev) 247static void radeon_pm_set_clocks(struct radeon_device *rdev)
248{ 248{
249 struct drm_crtc *crtc;
249 int i, r; 250 int i, r;
250 251
251 /* no need to take locks, etc. if nothing's going to change */ 252 /* no need to take locks, etc. if nothing's going to change */
@@ -274,26 +275,30 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
274 radeon_unmap_vram_bos(rdev); 275 radeon_unmap_vram_bos(rdev);
275 276
276 if (rdev->irq.installed) { 277 if (rdev->irq.installed) {
277 for (i = 0; i < rdev->num_crtc; i++) { 278 i = 0;
279 drm_for_each_crtc(crtc, rdev->ddev) {
278 if (rdev->pm.active_crtcs & (1 << i)) { 280 if (rdev->pm.active_crtcs & (1 << i)) {
279 /* This can fail if a modeset is in progress */ 281 /* This can fail if a modeset is in progress */
280 if (drm_vblank_get(rdev->ddev, i) == 0) 282 if (drm_crtc_vblank_get(crtc) == 0)
281 rdev->pm.req_vblank |= (1 << i); 283 rdev->pm.req_vblank |= (1 << i);
282 else 284 else
283 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n", 285 DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
284 i); 286 i);
285 } 287 }
288 i++;
286 } 289 }
287 } 290 }
288 291
289 radeon_set_power_state(rdev); 292 radeon_set_power_state(rdev);
290 293
291 if (rdev->irq.installed) { 294 if (rdev->irq.installed) {
292 for (i = 0; i < rdev->num_crtc; i++) { 295 i = 0;
296 drm_for_each_crtc(crtc, rdev->ddev) {
293 if (rdev->pm.req_vblank & (1 << i)) { 297 if (rdev->pm.req_vblank & (1 << i)) {
294 rdev->pm.req_vblank &= ~(1 << i); 298 rdev->pm.req_vblank &= ~(1 << i);
295 drm_vblank_put(rdev->ddev, i); 299 drm_crtc_vblank_put(crtc);
296 } 300 }
301 i++;
297 } 302 }
298 } 303 }
299 304
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 590b0377fbe2..ffdad81ef964 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
300 if (IS_ERR(fence)) 300 if (IS_ERR(fence))
301 return PTR_ERR(fence); 301 return PTR_ERR(fence);
302 302
303 r = ttm_bo_move_accel_cleanup(bo, &fence->base, 303 r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem);
304 evict, no_wait_gpu, new_mem);
305 radeon_fence_unref(&fence); 304 radeon_fence_unref(&fence);
306 return r; 305 return r;
307} 306}
@@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
403 struct ttm_mem_reg *old_mem = &bo->mem; 402 struct ttm_mem_reg *old_mem = &bo->mem;
404 int r; 403 int r;
405 404
405 r = ttm_bo_wait(bo, interruptible, no_wait_gpu);
406 if (r)
407 return r;
408
406 /* Can't move a pinned BO */ 409 /* Can't move a pinned BO */
407 rbo = container_of(bo, struct radeon_bo, tbo); 410 rbo = container_of(bo, struct radeon_bo, tbo);
408 if (WARN_ON_ONCE(rbo->pin_count > 0)) 411 if (WARN_ON_ONCE(rbo->pin_count > 0))
@@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
441 444
442 if (r) { 445 if (r) {
443memcpy: 446memcpy:
444 r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); 447 r = ttm_bo_move_memcpy(bo, evict, interruptible,
448 no_wait_gpu, new_mem);
445 if (r) { 449 if (r) {
446 return r; 450 return r;
447 } 451 }
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index b30e719dd56d..2523ca96c6c7 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -50,6 +50,7 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
50MODULE_FIRMWARE("radeon/tahiti_mc.bin"); 50MODULE_FIRMWARE("radeon/tahiti_mc.bin");
51MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); 51MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
52MODULE_FIRMWARE("radeon/tahiti_smc.bin"); 52MODULE_FIRMWARE("radeon/tahiti_smc.bin");
53MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
53 54
54MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); 55MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
55MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); 56MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -65,6 +66,7 @@ MODULE_FIRMWARE("radeon/pitcairn_ce.bin");
65MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); 66MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
66MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); 67MODULE_FIRMWARE("radeon/pitcairn_rlc.bin");
67MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); 68MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
69MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
68 70
69MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); 71MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
70MODULE_FIRMWARE("radeon/VERDE_me.bin"); 72MODULE_FIRMWARE("radeon/VERDE_me.bin");
@@ -80,6 +82,7 @@ MODULE_FIRMWARE("radeon/verde_ce.bin");
80MODULE_FIRMWARE("radeon/verde_mc.bin"); 82MODULE_FIRMWARE("radeon/verde_mc.bin");
81MODULE_FIRMWARE("radeon/verde_rlc.bin"); 83MODULE_FIRMWARE("radeon/verde_rlc.bin");
82MODULE_FIRMWARE("radeon/verde_smc.bin"); 84MODULE_FIRMWARE("radeon/verde_smc.bin");
85MODULE_FIRMWARE("radeon/verde_k_smc.bin");
83 86
84MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); 87MODULE_FIRMWARE("radeon/OLAND_pfp.bin");
85MODULE_FIRMWARE("radeon/OLAND_me.bin"); 88MODULE_FIRMWARE("radeon/OLAND_me.bin");
@@ -95,6 +98,7 @@ MODULE_FIRMWARE("radeon/oland_ce.bin");
95MODULE_FIRMWARE("radeon/oland_mc.bin"); 98MODULE_FIRMWARE("radeon/oland_mc.bin");
96MODULE_FIRMWARE("radeon/oland_rlc.bin"); 99MODULE_FIRMWARE("radeon/oland_rlc.bin");
97MODULE_FIRMWARE("radeon/oland_smc.bin"); 100MODULE_FIRMWARE("radeon/oland_smc.bin");
101MODULE_FIRMWARE("radeon/oland_k_smc.bin");
98 102
99MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); 103MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
100MODULE_FIRMWARE("radeon/HAINAN_me.bin"); 104MODULE_FIRMWARE("radeon/HAINAN_me.bin");
@@ -110,6 +114,7 @@ MODULE_FIRMWARE("radeon/hainan_ce.bin");
110MODULE_FIRMWARE("radeon/hainan_mc.bin"); 114MODULE_FIRMWARE("radeon/hainan_mc.bin");
111MODULE_FIRMWARE("radeon/hainan_rlc.bin"); 115MODULE_FIRMWARE("radeon/hainan_rlc.bin");
112MODULE_FIRMWARE("radeon/hainan_smc.bin"); 116MODULE_FIRMWARE("radeon/hainan_smc.bin");
117MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
113 118
114static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); 119static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
115static void si_pcie_gen3_enable(struct radeon_device *rdev); 120static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1653,12 +1658,16 @@ static int si_init_microcode(struct radeon_device *rdev)
1653 char fw_name[30]; 1658 char fw_name[30];
1654 int err; 1659 int err;
1655 int new_fw = 0; 1660 int new_fw = 0;
1661 bool new_smc = false;
1656 1662
1657 DRM_DEBUG("\n"); 1663 DRM_DEBUG("\n");
1658 1664
1659 switch (rdev->family) { 1665 switch (rdev->family) {
1660 case CHIP_TAHITI: 1666 case CHIP_TAHITI:
1661 chip_name = "TAHITI"; 1667 chip_name = "TAHITI";
1668 /* XXX: figure out which Tahitis need the new ucode */
1669 if (0)
1670 new_smc = true;
1662 new_chip_name = "tahiti"; 1671 new_chip_name = "tahiti";
1663 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1672 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1664 me_req_size = SI_PM4_UCODE_SIZE * 4; 1673 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1670,6 +1679,13 @@ static int si_init_microcode(struct radeon_device *rdev)
1670 break; 1679 break;
1671 case CHIP_PITCAIRN: 1680 case CHIP_PITCAIRN:
1672 chip_name = "PITCAIRN"; 1681 chip_name = "PITCAIRN";
1682 if ((rdev->pdev->revision == 0x81) ||
1683 (rdev->pdev->device == 0x6810) ||
1684 (rdev->pdev->device == 0x6811) ||
1685 (rdev->pdev->device == 0x6816) ||
1686 (rdev->pdev->device == 0x6817) ||
1687 (rdev->pdev->device == 0x6806))
1688 new_smc = true;
1673 new_chip_name = "pitcairn"; 1689 new_chip_name = "pitcairn";
1674 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1690 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1675 me_req_size = SI_PM4_UCODE_SIZE * 4; 1691 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1681,6 +1697,16 @@ static int si_init_microcode(struct radeon_device *rdev)
1681 break; 1697 break;
1682 case CHIP_VERDE: 1698 case CHIP_VERDE:
1683 chip_name = "VERDE"; 1699 chip_name = "VERDE";
1700 if ((rdev->pdev->revision == 0x81) ||
1701 (rdev->pdev->revision == 0x83) ||
1702 (rdev->pdev->revision == 0x87) ||
1703 (rdev->pdev->device == 0x6820) ||
1704 (rdev->pdev->device == 0x6821) ||
1705 (rdev->pdev->device == 0x6822) ||
1706 (rdev->pdev->device == 0x6823) ||
1707 (rdev->pdev->device == 0x682A) ||
1708 (rdev->pdev->device == 0x682B))
1709 new_smc = true;
1684 new_chip_name = "verde"; 1710 new_chip_name = "verde";
1685 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1711 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1686 me_req_size = SI_PM4_UCODE_SIZE * 4; 1712 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1692,6 +1718,13 @@ static int si_init_microcode(struct radeon_device *rdev)
1692 break; 1718 break;
1693 case CHIP_OLAND: 1719 case CHIP_OLAND:
1694 chip_name = "OLAND"; 1720 chip_name = "OLAND";
1721 if ((rdev->pdev->revision == 0xC7) ||
1722 (rdev->pdev->revision == 0x80) ||
1723 (rdev->pdev->revision == 0x81) ||
1724 (rdev->pdev->revision == 0x83) ||
1725 (rdev->pdev->device == 0x6604) ||
1726 (rdev->pdev->device == 0x6605))
1727 new_smc = true;
1695 new_chip_name = "oland"; 1728 new_chip_name = "oland";
1696 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1729 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1697 me_req_size = SI_PM4_UCODE_SIZE * 4; 1730 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1702,6 +1735,13 @@ static int si_init_microcode(struct radeon_device *rdev)
1702 break; 1735 break;
1703 case CHIP_HAINAN: 1736 case CHIP_HAINAN:
1704 chip_name = "HAINAN"; 1737 chip_name = "HAINAN";
1738 if ((rdev->pdev->revision == 0x81) ||
1739 (rdev->pdev->revision == 0x83) ||
1740 (rdev->pdev->revision == 0xC3) ||
1741 (rdev->pdev->device == 0x6664) ||
1742 (rdev->pdev->device == 0x6665) ||
1743 (rdev->pdev->device == 0x6667))
1744 new_smc = true;
1705 new_chip_name = "hainan"; 1745 new_chip_name = "hainan";
1706 pfp_req_size = SI_PFP_UCODE_SIZE * 4; 1746 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1707 me_req_size = SI_PM4_UCODE_SIZE * 4; 1747 me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1847,7 +1887,10 @@ static int si_init_microcode(struct radeon_device *rdev)
1847 } 1887 }
1848 } 1888 }
1849 1889
1850 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); 1890 if (new_smc)
1891 snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
1892 else
1893 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
1851 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); 1894 err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev);
1852 if (err) { 1895 if (err) {
1853 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); 1896 snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name);
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index 7fc3ca5ce6c7..4c2fd056dd6d 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -6,7 +6,6 @@ config DRM_RCAR_DU
6 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
7 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
8 select DRM_GEM_CMA_HELPER 8 select DRM_GEM_CMA_HELPER
9 select DRM_KMS_FB_HELPER
10 select VIDEOMODE_HELPERS 9 select VIDEOMODE_HELPERS
11 help 10 help
12 Choose this option if you have an R-Car chipset. 11 Choose this option if you have an R-Car chipset.
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 827711e28226..d3b44651061a 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -7,8 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
7 rcar_du_plane.o \ 7 rcar_du_plane.o \
8 rcar_du_vgacon.o 8 rcar_du_vgacon.o
9 9
10rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \ 10rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmienc.o
11 rcar_du_hdmienc.o 11
12rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o 12rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
13 13
14rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o 14rcar-du-drm-$(CONFIG_DRM_RCAR_VSP) += rcar_du_vsp.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 0d8bdda736f9..e39fcef2e033 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg)
552 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); 552 rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK);
553 553
554 if (status & DSSR_FRM) { 554 if (status & DSSR_FRM) {
555 drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); 555 drm_crtc_handle_vblank(&rcrtc->crtc);
556 rcar_du_crtc_finish_page_flip(rcrtc); 556 rcar_du_crtc_finish_page_flip(rcrtc);
557 ret = IRQ_HANDLED; 557 ret = IRQ_HANDLED;
558 } 558 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index fb9242d27883..899ef7a2a7b4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -217,7 +217,7 @@ static struct drm_driver rcar_du_driver = {
217 .get_vblank_counter = drm_vblank_no_hw_counter, 217 .get_vblank_counter = drm_vblank_no_hw_counter,
218 .enable_vblank = rcar_du_enable_vblank, 218 .enable_vblank = rcar_du_enable_vblank,
219 .disable_vblank = rcar_du_disable_vblank, 219 .disable_vblank = rcar_du_disable_vblank,
220 .gem_free_object = drm_gem_cma_free_object, 220 .gem_free_object_unlocked = drm_gem_cma_free_object,
221 .gem_vm_ops = &drm_gem_cma_vm_ops, 221 .gem_vm_ops = &drm_gem_cma_vm_ops,
222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 222 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 223 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
@@ -278,7 +278,6 @@ static int rcar_du_remove(struct platform_device *pdev)
278 struct rcar_du_device *rcdu = platform_get_drvdata(pdev); 278 struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
279 struct drm_device *ddev = rcdu->ddev; 279 struct drm_device *ddev = rcdu->ddev;
280 280
281 drm_connector_unregister_all(ddev);
282 drm_dev_unregister(ddev); 281 drm_dev_unregister(ddev);
283 282
284 if (rcdu->fbdev) 283 if (rcdu->fbdev)
@@ -320,8 +319,6 @@ static int rcar_du_probe(struct platform_device *pdev)
320 if (!ddev) 319 if (!ddev)
321 return -ENOMEM; 320 return -ENOMEM;
322 321
323 drm_dev_set_unique(ddev, dev_name(&pdev->dev));
324
325 rcdu->ddev = ddev; 322 rcdu->ddev = ddev;
326 ddev->dev_private = rcdu; 323 ddev->dev_private = rcdu;
327 324
@@ -339,15 +336,15 @@ static int rcar_du_probe(struct platform_device *pdev)
339 * disabled for all CRTCs. 336 * disabled for all CRTCs.
340 */ 337 */
341 ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1); 338 ret = drm_vblank_init(ddev, (1 << rcdu->info->num_crtcs) - 1);
342 if (ret < 0) { 339 if (ret < 0)
343 dev_err(&pdev->dev, "failed to initialize vblank\n");
344 goto error; 340 goto error;
345 }
346 341
347 /* DRM/KMS objects */ 342 /* DRM/KMS objects */
348 ret = rcar_du_modeset_init(rcdu); 343 ret = rcar_du_modeset_init(rcdu);
349 if (ret < 0) { 344 if (ret < 0) {
350 dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret); 345 if (ret != -EPROBE_DEFER)
346 dev_err(&pdev->dev,
347 "failed to initialize DRM/KMS (%d)\n", ret);
351 goto error; 348 goto error;
352 } 349 }
353 350
@@ -360,10 +357,6 @@ static int rcar_du_probe(struct platform_device *pdev)
360 if (ret) 357 if (ret)
361 goto error; 358 goto error;
362 359
363 ret = drm_connector_register_all(ddev);
364 if (ret < 0)
365 goto error;
366
367 DRM_INFO("Device %s probed\n", dev_name(&pdev->dev)); 360 DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
368 361
369 return 0; 362 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4e939e41f030..ab8645c57e2d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -19,7 +19,6 @@
19 19
20#include "rcar_du_drv.h" 20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h" 21#include "rcar_du_encoder.h"
22#include "rcar_du_hdmicon.h"
23#include "rcar_du_hdmienc.h" 22#include "rcar_du_hdmienc.h"
24#include "rcar_du_kms.h" 23#include "rcar_du_kms.h"
25#include "rcar_du_lvdscon.h" 24#include "rcar_du_lvdscon.h"
@@ -27,18 +26,6 @@
27#include "rcar_du_vgacon.h" 26#include "rcar_du_vgacon.h"
28 27
29/* ----------------------------------------------------------------------------- 28/* -----------------------------------------------------------------------------
30 * Common connector functions
31 */
32
33struct drm_encoder *
34rcar_du_connector_best_encoder(struct drm_connector *connector)
35{
36 struct rcar_du_connector *rcon = to_rcar_connector(connector);
37
38 return rcar_encoder_to_drm_encoder(rcon->encoder);
39}
40
41/* -----------------------------------------------------------------------------
42 * Encoder 29 * Encoder
43 */ 30 */
44 31
@@ -186,7 +173,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
186 break; 173 break;
187 174
188 case DRM_MODE_ENCODER_TMDS: 175 case DRM_MODE_ENCODER_TMDS:
189 ret = rcar_du_hdmi_connector_init(rcdu, renc); 176 /* connector managed by the bridge driver */
190 break; 177 break;
191 178
192 default: 179 default:
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index 719b6f2a031c..7fc10a9c34c3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -15,7 +15,6 @@
15#define __RCAR_DU_ENCODER_H__ 15#define __RCAR_DU_ENCODER_H__
16 16
17#include <drm/drm_crtc.h> 17#include <drm/drm_crtc.h>
18#include <drm/drm_encoder_slave.h>
19 18
20struct rcar_du_device; 19struct rcar_du_device;
21struct rcar_du_hdmienc; 20struct rcar_du_hdmienc;
@@ -30,16 +29,16 @@ enum rcar_du_encoder_type {
30}; 29};
31 30
32struct rcar_du_encoder { 31struct rcar_du_encoder {
33 struct drm_encoder_slave slave; 32 struct drm_encoder base;
34 enum rcar_du_output output; 33 enum rcar_du_output output;
35 struct rcar_du_hdmienc *hdmi; 34 struct rcar_du_hdmienc *hdmi;
36 struct rcar_du_lvdsenc *lvds; 35 struct rcar_du_lvdsenc *lvds;
37}; 36};
38 37
39#define to_rcar_encoder(e) \ 38#define to_rcar_encoder(e) \
40 container_of(e, struct rcar_du_encoder, slave.base) 39 container_of(e, struct rcar_du_encoder, base)
41 40
42#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base) 41#define rcar_encoder_to_drm_encoder(e) (&(e)->base)
43 42
44struct rcar_du_connector { 43struct rcar_du_connector {
45 struct drm_connector connector; 44 struct drm_connector connector;
@@ -49,9 +48,6 @@ struct rcar_du_connector {
49#define to_rcar_connector(c) \ 48#define to_rcar_connector(c) \
50 container_of(c, struct rcar_du_connector, connector) 49 container_of(c, struct rcar_du_connector, connector)
51 50
52struct drm_encoder *
53rcar_du_connector_best_encoder(struct drm_connector *connector);
54
55int rcar_du_encoder_init(struct rcar_du_device *rcdu, 51int rcar_du_encoder_init(struct rcar_du_device *rcdu,
56 enum rcar_du_encoder_type type, 52 enum rcar_du_encoder_type type,
57 enum rcar_du_output output, 53 enum rcar_du_output output,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
deleted file mode 100644
index 6c927144b5c9..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * R-Car Display Unit HDMI Connector
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_atomic_helper.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_encoder_slave.h>
19
20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h"
22#include "rcar_du_hdmicon.h"
23#include "rcar_du_kms.h"
24
25#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
26
27static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
28{
29 struct rcar_du_connector *con = to_rcar_connector(connector);
30 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
31 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
32
33 if (sfuncs->get_modes == NULL)
34 return 0;
35
36 return sfuncs->get_modes(encoder, connector);
37}
38
39static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
40 struct drm_display_mode *mode)
41{
42 struct rcar_du_connector *con = to_rcar_connector(connector);
43 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
44 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
45
46 if (sfuncs->mode_valid == NULL)
47 return MODE_OK;
48
49 return sfuncs->mode_valid(encoder, mode);
50}
51
52static const struct drm_connector_helper_funcs connector_helper_funcs = {
53 .get_modes = rcar_du_hdmi_connector_get_modes,
54 .mode_valid = rcar_du_hdmi_connector_mode_valid,
55 .best_encoder = rcar_du_connector_best_encoder,
56};
57
58static enum drm_connector_status
59rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
60{
61 struct rcar_du_connector *con = to_rcar_connector(connector);
62 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
63 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
64
65 if (sfuncs->detect == NULL)
66 return connector_status_unknown;
67
68 return sfuncs->detect(encoder, connector);
69}
70
71static const struct drm_connector_funcs connector_funcs = {
72 .dpms = drm_atomic_helper_connector_dpms,
73 .reset = drm_atomic_helper_connector_reset,
74 .detect = rcar_du_hdmi_connector_detect,
75 .fill_modes = drm_helper_probe_single_connector_modes,
76 .destroy = drm_connector_cleanup,
77 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
78 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
79};
80
81int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
82 struct rcar_du_encoder *renc)
83{
84 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
85 struct rcar_du_connector *rcon;
86 struct drm_connector *connector;
87 int ret;
88
89 rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
90 if (rcon == NULL)
91 return -ENOMEM;
92
93 connector = &rcon->connector;
94 connector->display_info.width_mm = 0;
95 connector->display_info.height_mm = 0;
96 connector->interlace_allowed = true;
97 connector->polled = DRM_CONNECTOR_POLL_HPD;
98
99 ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
100 DRM_MODE_CONNECTOR_HDMIA);
101 if (ret < 0)
102 return ret;
103
104 drm_connector_helper_add(connector, &connector_helper_funcs);
105
106 connector->dpms = DRM_MODE_DPMS_OFF;
107 drm_object_property_set_value(&connector->base,
108 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
109
110 ret = drm_mode_connector_attach_encoder(connector, encoder);
111 if (ret < 0)
112 return ret;
113
114 rcon->encoder = renc;
115
116 return 0;
117}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
deleted file mode 100644
index 87daa949227f..000000000000
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * R-Car Display Unit HDMI Connector
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_HDMICON_H__
15#define __RCAR_DU_HDMICON_H__
16
17struct rcar_du_device;
18struct rcar_du_encoder;
19
20#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
21int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
22 struct rcar_du_encoder *renc);
23#else
24static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
25 struct rcar_du_encoder *renc)
26{
27 return -ENOSYS;
28}
29#endif
30
31#endif /* __RCAR_DU_HDMICON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
index 461662d231e2..4de3ff0dbebd 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -16,7 +16,6 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_crtc.h> 17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_encoder_slave.h>
20 19
21#include "rcar_du_drv.h" 20#include "rcar_du_drv.h"
22#include "rcar_du_encoder.h" 21#include "rcar_du_encoder.h"
@@ -25,20 +24,14 @@
25 24
26struct rcar_du_hdmienc { 25struct rcar_du_hdmienc {
27 struct rcar_du_encoder *renc; 26 struct rcar_du_encoder *renc;
28 struct device *dev;
29 bool enabled; 27 bool enabled;
30}; 28};
31 29
32#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi) 30#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
33#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
34 31
35static void rcar_du_hdmienc_disable(struct drm_encoder *encoder) 32static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
36{ 33{
37 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 34 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
38 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
39
40 if (sfuncs->dpms)
41 sfuncs->dpms(encoder, DRM_MODE_DPMS_OFF);
42 35
43 if (hdmienc->renc->lvds) 36 if (hdmienc->renc->lvds)
44 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, 37 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
@@ -50,15 +43,11 @@ static void rcar_du_hdmienc_disable(struct drm_encoder *encoder)
50static void rcar_du_hdmienc_enable(struct drm_encoder *encoder) 43static void rcar_du_hdmienc_enable(struct drm_encoder *encoder)
51{ 44{
52 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 45 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
53 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
54 46
55 if (hdmienc->renc->lvds) 47 if (hdmienc->renc->lvds)
56 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc, 48 rcar_du_lvdsenc_enable(hdmienc->renc->lvds, encoder->crtc,
57 true); 49 true);
58 50
59 if (sfuncs->dpms)
60 sfuncs->dpms(encoder, DRM_MODE_DPMS_ON);
61
62 hdmienc->enabled = true; 51 hdmienc->enabled = true;
63} 52}
64 53
@@ -67,29 +56,21 @@ static int rcar_du_hdmienc_atomic_check(struct drm_encoder *encoder,
67 struct drm_connector_state *conn_state) 56 struct drm_connector_state *conn_state)
68{ 57{
69 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 58 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
70 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
71 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; 59 struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
72 const struct drm_display_mode *mode = &crtc_state->mode;
73 60
74 if (hdmienc->renc->lvds) 61 if (hdmienc->renc->lvds)
75 rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds, 62 rcar_du_lvdsenc_atomic_check(hdmienc->renc->lvds,
76 adjusted_mode); 63 adjusted_mode);
77 64
78 if (sfuncs->mode_fixup == NULL) 65 return 0;
79 return 0;
80
81 return sfuncs->mode_fixup(encoder, mode, adjusted_mode) ? 0 : -EINVAL;
82} 66}
83 67
68
84static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder, 69static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
85 struct drm_display_mode *mode, 70 struct drm_display_mode *mode,
86 struct drm_display_mode *adjusted_mode) 71 struct drm_display_mode *adjusted_mode)
87{ 72{
88 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder); 73 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
89 const struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
90
91 if (sfuncs->mode_set)
92 sfuncs->mode_set(encoder, mode, adjusted_mode);
93 74
94 rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output); 75 rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
95} 76}
@@ -109,7 +90,6 @@ static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
109 rcar_du_hdmienc_disable(encoder); 90 rcar_du_hdmienc_disable(encoder);
110 91
111 drm_encoder_cleanup(encoder); 92 drm_encoder_cleanup(encoder);
112 put_device(hdmienc->dev);
113} 93}
114 94
115static const struct drm_encoder_funcs encoder_funcs = { 95static const struct drm_encoder_funcs encoder_funcs = {
@@ -120,8 +100,7 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
120 struct rcar_du_encoder *renc, struct device_node *np) 100 struct rcar_du_encoder *renc, struct device_node *np)
121{ 101{
122 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc); 102 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
123 struct drm_i2c_encoder_driver *driver; 103 struct drm_bridge *bridge;
124 struct i2c_client *i2c_slave;
125 struct rcar_du_hdmienc *hdmienc; 104 struct rcar_du_hdmienc *hdmienc;
126 int ret; 105 int ret;
127 106
@@ -129,44 +108,29 @@ int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
129 if (hdmienc == NULL) 108 if (hdmienc == NULL)
130 return -ENOMEM; 109 return -ENOMEM;
131 110
132 /* Locate the slave I2C device and driver. */ 111 /* Locate drm bridge from the hdmi encoder DT node */
133 i2c_slave = of_find_i2c_device_by_node(np); 112 bridge = of_drm_find_bridge(np);
134 if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { 113 if (!bridge)
135 dev_dbg(rcdu->dev,
136 "can't get I2C slave for %s, deferring probe\n",
137 of_node_full_name(np));
138 return -EPROBE_DEFER; 114 return -EPROBE_DEFER;
139 }
140
141 hdmienc->dev = &i2c_slave->dev;
142
143 if (hdmienc->dev->driver == NULL) {
144 dev_dbg(rcdu->dev,
145 "I2C slave %s not probed yet, deferring probe\n",
146 dev_name(hdmienc->dev));
147 ret = -EPROBE_DEFER;
148 goto error;
149 }
150
151 /* Initialize the slave encoder. */
152 driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
153 ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
154 if (ret < 0)
155 goto error;
156 115
157 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs, 116 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
158 DRM_MODE_ENCODER_TMDS, NULL); 117 DRM_MODE_ENCODER_TMDS, NULL);
159 if (ret < 0) 118 if (ret < 0)
160 goto error; 119 return ret;
161 120
162 drm_encoder_helper_add(encoder, &encoder_helper_funcs); 121 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
163 122
164 renc->hdmi = hdmienc; 123 renc->hdmi = hdmienc;
165 hdmienc->renc = renc; 124 hdmienc->renc = renc;
166 125
167 return 0; 126 /* Link drm_bridge to encoder */
127 bridge->encoder = encoder;
128
129 ret = drm_bridge_attach(rcdu->ddev, bridge);
130 if (ret) {
131 drm_encoder_cleanup(encoder);
132 return ret;
133 }
168 134
169error: 135 return 0;
170 put_device(hdmienc->dev);
171 return ret;
172} 136}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index e70a4f33d970..6bb032d8ac6b 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -288,6 +288,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
288{ 288{
289 struct rcar_du_device *rcdu = dev->dev_private; 289 struct rcar_du_device *rcdu = dev->dev_private;
290 struct rcar_du_commit *commit; 290 struct rcar_du_commit *commit;
291 struct drm_crtc *crtc;
292 struct drm_crtc_state *crtc_state;
291 unsigned int i; 293 unsigned int i;
292 int ret; 294 int ret;
293 295
@@ -309,10 +311,8 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
309 /* Wait until all affected CRTCs have completed previous commits and 311 /* Wait until all affected CRTCs have completed previous commits and
310 * mark them as pending. 312 * mark them as pending.
311 */ 313 */
312 for (i = 0; i < dev->mode_config.num_crtc; ++i) { 314 for_each_crtc_in_state(state, crtc, crtc_state, i)
313 if (state->crtcs[i]) 315 commit->crtcs |= drm_crtc_mask(crtc);
314 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
315 }
316 316
317 spin_lock(&rcdu->commit.wait.lock); 317 spin_lock(&rcdu->commit.wait.lock);
318 ret = wait_event_interruptible_locked(rcdu->commit.wait, 318 ret = wait_event_interruptible_locked(rcdu->commit.wait,
@@ -327,7 +327,7 @@ static int rcar_du_atomic_commit(struct drm_device *dev,
327 } 327 }
328 328
329 /* Swap the state, this is the point of no return. */ 329 /* Swap the state, this is the point of no return. */
330 drm_atomic_helper_swap_state(dev, state); 330 drm_atomic_helper_swap_state(state, true);
331 331
332 if (nonblock) 332 if (nonblock)
333 schedule_work(&commit->work); 333 schedule_work(&commit->work);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index e905f5da7aaa..6afd0af312ba 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -59,7 +59,6 @@ static int rcar_du_lvds_connector_get_modes(struct drm_connector *connector)
59 59
60static const struct drm_connector_helper_funcs connector_helper_funcs = { 60static const struct drm_connector_helper_funcs connector_helper_funcs = {
61 .get_modes = rcar_du_lvds_connector_get_modes, 61 .get_modes = rcar_du_lvds_connector_get_modes,
62 .best_encoder = rcar_du_connector_best_encoder,
63}; 62};
64 63
65static enum drm_connector_status 64static enum drm_connector_status
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index d445e67f78e1..bfe31ca870cc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -140,18 +140,17 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
140 bool needs_realloc = false; 140 bool needs_realloc = false;
141 unsigned int groups = 0; 141 unsigned int groups = 0;
142 unsigned int i; 142 unsigned int i;
143 struct drm_plane *drm_plane;
144 struct drm_plane_state *drm_plane_state;
143 145
144 /* Check if hardware planes need to be reallocated. */ 146 /* Check if hardware planes need to be reallocated. */
145 for (i = 0; i < dev->mode_config.num_total_plane; ++i) { 147 for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
146 struct rcar_du_plane_state *plane_state; 148 struct rcar_du_plane_state *plane_state;
147 struct rcar_du_plane *plane; 149 struct rcar_du_plane *plane;
148 unsigned int index; 150 unsigned int index;
149 151
150 if (!state->planes[i]) 152 plane = to_rcar_plane(drm_plane);
151 continue; 153 plane_state = to_rcar_plane_state(drm_plane_state);
152
153 plane = to_rcar_plane(state->planes[i]);
154 plane_state = to_rcar_plane_state(state->plane_states[i]);
155 154
156 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__, 155 dev_dbg(rcdu->dev, "%s: checking plane (%u,%tu)\n", __func__,
157 plane->group->index, plane - plane->group->planes); 156 plane->group->index, plane - plane->group->planes);
@@ -247,18 +246,15 @@ int rcar_du_atomic_check_planes(struct drm_device *dev,
247 } 246 }
248 247
249 /* Reallocate hardware planes for each plane that needs it. */ 248 /* Reallocate hardware planes for each plane that needs it. */
250 for (i = 0; i < dev->mode_config.num_total_plane; ++i) { 249 for_each_plane_in_state(state, drm_plane, drm_plane_state, i) {
251 struct rcar_du_plane_state *plane_state; 250 struct rcar_du_plane_state *plane_state;
252 struct rcar_du_plane *plane; 251 struct rcar_du_plane *plane;
253 unsigned int crtc_planes; 252 unsigned int crtc_planes;
254 unsigned int free; 253 unsigned int free;
255 int idx; 254 int idx;
256 255
257 if (!state->planes[i]) 256 plane = to_rcar_plane(drm_plane);
258 continue; 257 plane_state = to_rcar_plane_state(drm_plane_state);
259
260 plane = to_rcar_plane(state->planes[i]);
261 plane_state = to_rcar_plane_state(state->plane_states[i]);
262 258
263 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__, 259 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%tu)\n", __func__,
264 plane->group->index, plane - plane->group->planes); 260 plane->group->index, plane - plane->group->planes);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index d2f66068e52c..fedb0161e234 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -195,9 +195,10 @@
195#define DEFR6_ODPM12_DISP (2 << 8) 195#define DEFR6_ODPM12_DISP (2 << 8)
196#define DEFR6_ODPM12_CDE (3 << 8) 196#define DEFR6_ODPM12_CDE (3 << 8)
197#define DEFR6_ODPM12_MASK (3 << 8) 197#define DEFR6_ODPM12_MASK (3 << 8)
198#define DEFR6_TCNE2 (1 << 6) 198#define DEFR6_TCNE1 (1 << 6)
199#define DEFR6_TCNE0 (1 << 4)
199#define DEFR6_MLOS1 (1 << 2) 200#define DEFR6_MLOS1 (1 << 2)
200#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE2) 201#define DEFR6_DEFAULT (DEFR6_CODE | DEFR6_TCNE1)
201 202
202/* ----------------------------------------------------------------------------- 203/* -----------------------------------------------------------------------------
203 * R8A7790-only Control Registers 204 * R8A7790-only Control Registers
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 9d7e5c99caf6..8d6125c1c0f9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -28,7 +28,6 @@ static int rcar_du_vga_connector_get_modes(struct drm_connector *connector)
28 28
29static const struct drm_connector_helper_funcs connector_helper_funcs = { 29static const struct drm_connector_helper_funcs connector_helper_funcs = {
30 .get_modes = rcar_du_vga_connector_get_modes, 30 .get_modes = rcar_du_vga_connector_get_modes,
31 .best_encoder = rcar_du_connector_best_encoder,
32}; 31};
33 32
34static enum drm_connector_status 33static enum drm_connector_status
@@ -79,7 +78,5 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
79 if (ret < 0) 78 if (ret < 0)
80 return ret; 79 return ret;
81 80
82 rcon->encoder = renc;
83
84 return 0; 81 return 0;
85} 82}
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index d30bdc38a760..3c58669a06ce 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -2,12 +2,9 @@ config DRM_ROCKCHIP
2 tristate "DRM Support for Rockchip" 2 tristate "DRM Support for Rockchip"
3 depends on DRM && ROCKCHIP_IOMMU 3 depends on DRM && ROCKCHIP_IOMMU
4 depends on RESET_CONTROLLER 4 depends on RESET_CONTROLLER
5 select DRM_GEM_CMA_HELPER
5 select DRM_KMS_HELPER 6 select DRM_KMS_HELPER
6 select DRM_KMS_FB_HELPER
7 select DRM_PANEL 7 select DRM_PANEL
8 select FB_CFB_FILLRECT
9 select FB_CFB_COPYAREA
10 select FB_CFB_IMAGEBLIT
11 select VIDEOMODE_HELPERS 8 select VIDEOMODE_HELPERS
12 help 9 help
13 Choose this option if you have a Rockchip soc chipset. 10 Choose this option if you have a Rockchip soc chipset.
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index 7f6a55cae27a..89aadbf465f8 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/component.h> 15#include <linux/component.h>
16#include <linux/mfd/syscon.h> 16#include <linux/mfd/syscon.h>
17#include <linux/of_device.h>
17#include <linux/of_graph.h> 18#include <linux/of_graph.h>
18#include <linux/regmap.h> 19#include <linux/regmap.h>
19#include <linux/reset.h> 20#include <linux/reset.h>
@@ -33,13 +34,28 @@
33#include "rockchip_drm_drv.h" 34#include "rockchip_drm_drv.h"
34#include "rockchip_drm_vop.h" 35#include "rockchip_drm_vop.h"
35 36
37#define RK3288_GRF_SOC_CON6 0x25c
38#define RK3288_EDP_LCDC_SEL BIT(5)
39#define RK3399_GRF_SOC_CON20 0x6250
40#define RK3399_EDP_LCDC_SEL BIT(5)
41
42#define HIWORD_UPDATE(val, mask) (val | (mask) << 16)
43
36#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm) 44#define to_dp(nm) container_of(nm, struct rockchip_dp_device, nm)
37 45
38/* dp grf register offset */ 46/**
39#define GRF_SOC_CON6 0x025c 47 * struct rockchip_dp_chip_data - splite the grf setting of kind of chips
40#define GRF_EDP_LCD_SEL_MASK BIT(5) 48 * @lcdsel_grf_reg: grf register offset of lcdc select
41#define GRF_EDP_SEL_VOP_LIT BIT(5) 49 * @lcdsel_big: reg value of selecting vop big for eDP
42#define GRF_EDP_SEL_VOP_BIG 0 50 * @lcdsel_lit: reg value of selecting vop little for eDP
51 * @chip_type: specific chip type
52 */
53struct rockchip_dp_chip_data {
54 u32 lcdsel_grf_reg;
55 u32 lcdsel_big;
56 u32 lcdsel_lit;
57 u32 chip_type;
58};
43 59
44struct rockchip_dp_device { 60struct rockchip_dp_device {
45 struct drm_device *drm_dev; 61 struct drm_device *drm_dev;
@@ -48,9 +64,12 @@ struct rockchip_dp_device {
48 struct drm_display_mode mode; 64 struct drm_display_mode mode;
49 65
50 struct clk *pclk; 66 struct clk *pclk;
67 struct clk *grfclk;
51 struct regmap *grf; 68 struct regmap *grf;
52 struct reset_control *rst; 69 struct reset_control *rst;
53 70
71 const struct rockchip_dp_chip_data *data;
72
54 struct analogix_dp_plat_data plat_data; 73 struct analogix_dp_plat_data plat_data;
55}; 74};
56 75
@@ -77,6 +96,7 @@ static int rockchip_dp_poweron(struct analogix_dp_plat_data *plat_data)
77 ret = rockchip_dp_pre_init(dp); 96 ret = rockchip_dp_pre_init(dp);
78 if (ret < 0) { 97 if (ret < 0) {
79 dev_err(dp->dev, "failed to dp pre init %d\n", ret); 98 dev_err(dp->dev, "failed to dp pre init %d\n", ret);
99 clk_disable_unprepare(dp->pclk);
80 return ret; 100 return ret;
81 } 101 }
82 102
@@ -92,6 +112,23 @@ static int rockchip_dp_powerdown(struct analogix_dp_plat_data *plat_data)
92 return 0; 112 return 0;
93} 113}
94 114
115static int rockchip_dp_get_modes(struct analogix_dp_plat_data *plat_data,
116 struct drm_connector *connector)
117{
118 struct drm_display_info *di = &connector->display_info;
119 /* VOP couldn't output YUV video format for eDP rightly */
120 u32 mask = DRM_COLOR_FORMAT_YCRCB444 | DRM_COLOR_FORMAT_YCRCB422;
121
122 if ((di->color_formats & mask)) {
123 DRM_DEBUG_KMS("Swapping display color format from YUV to RGB\n");
124 di->color_formats &= ~mask;
125 di->color_formats |= DRM_COLOR_FORMAT_RGB444;
126 di->bpc = 8;
127 }
128
129 return 0;
130}
131
95static bool 132static bool
96rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder, 133rockchip_dp_drm_encoder_mode_fixup(struct drm_encoder *encoder,
97 const struct drm_display_mode *mode, 134 const struct drm_display_mode *mode,
@@ -119,17 +156,23 @@ static void rockchip_dp_drm_encoder_enable(struct drm_encoder *encoder)
119 return; 156 return;
120 157
121 if (ret) 158 if (ret)
122 val = GRF_EDP_SEL_VOP_LIT | (GRF_EDP_LCD_SEL_MASK << 16); 159 val = dp->data->lcdsel_lit;
123 else 160 else
124 val = GRF_EDP_SEL_VOP_BIG | (GRF_EDP_LCD_SEL_MASK << 16); 161 val = dp->data->lcdsel_big;
125 162
126 dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG"); 163 dev_dbg(dp->dev, "vop %s output to dp\n", (ret) ? "LIT" : "BIG");
127 164
128 ret = regmap_write(dp->grf, GRF_SOC_CON6, val); 165 ret = clk_prepare_enable(dp->grfclk);
129 if (ret != 0) { 166 if (ret < 0) {
130 dev_err(dp->dev, "Could not write to GRF: %d\n", ret); 167 dev_err(dp->dev, "failed to enable grfclk %d\n", ret);
131 return; 168 return;
132 } 169 }
170
171 ret = regmap_write(dp->grf, dp->data->lcdsel_grf_reg, val);
172 if (ret != 0)
173 dev_err(dp->dev, "Could not write to GRF: %d\n", ret);
174
175 clk_disable_unprepare(dp->grfclk);
133} 176}
134 177
135static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder) 178static void rockchip_dp_drm_encoder_nop(struct drm_encoder *encoder)
@@ -143,22 +186,29 @@ rockchip_dp_drm_encoder_atomic_check(struct drm_encoder *encoder,
143 struct drm_connector_state *conn_state) 186 struct drm_connector_state *conn_state)
144{ 187{
145 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); 188 struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
189 struct rockchip_dp_device *dp = to_dp(encoder);
190 int ret;
146 191
147 /* 192 /*
148 * FIXME(Yakir): driver should configure the CRTC output video 193 * The hardware IC designed that VOP must output the RGB10 video
149 * mode with the display information which indicated the monitor 194 * format to eDP controller, and if eDP panel only support RGB8,
150 * support colorimetry. 195 * then eDP controller should cut down the video data, not via VOP
151 * 196 * controller, that's why we need to hardcode the VOP output mode
152 * But don't know why the CRTC driver seems could only output the 197 * to RGA10 here.
153 * RGBaaa rightly. For example, if connect the "innolux,n116bge"
154 * eDP screen, EDID would indicated that screen only accepted the
155 * 6bpc mode. But if I configure CRTC to RGB666 output, then eDP
156 * screen would show a blue picture (RGB888 show a green picture).
157 * But if I configure CTRC to RGBaaa, and eDP driver still keep
158 * RGB666 input video mode, then screen would works prefect.
159 */ 198 */
199
160 s->output_mode = ROCKCHIP_OUT_MODE_AAAA; 200 s->output_mode = ROCKCHIP_OUT_MODE_AAAA;
161 s->output_type = DRM_MODE_CONNECTOR_eDP; 201 s->output_type = DRM_MODE_CONNECTOR_eDP;
202 if (dp->data->chip_type == RK3399_EDP) {
203 /*
204 * For RK3399, VOP Lit must code the out mode to RGB888,
205 * VOP Big must code the out mode to RGB10.
206 */
207 ret = drm_of_encoder_active_endpoint_id(dp->dev->of_node,
208 encoder);
209 if (ret > 0)
210 s->output_mode = ROCKCHIP_OUT_MODE_P888;
211 }
162 212
163 return 0; 213 return 0;
164} 214}
@@ -192,6 +242,16 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
192 return PTR_ERR(dp->grf); 242 return PTR_ERR(dp->grf);
193 } 243 }
194 244
245 dp->grfclk = devm_clk_get(dev, "grf");
246 if (PTR_ERR(dp->grfclk) == -ENOENT) {
247 dp->grfclk = NULL;
248 } else if (PTR_ERR(dp->grfclk) == -EPROBE_DEFER) {
249 return -EPROBE_DEFER;
250 } else if (IS_ERR(dp->grfclk)) {
251 dev_err(dev, "failed to get grf clock\n");
252 return PTR_ERR(dp->grfclk);
253 }
254
195 dp->pclk = devm_clk_get(dev, "pclk"); 255 dp->pclk = devm_clk_get(dev, "pclk");
196 if (IS_ERR(dp->pclk)) { 256 if (IS_ERR(dp->pclk)) {
197 dev_err(dev, "failed to get pclk property\n"); 257 dev_err(dev, "failed to get pclk property\n");
@@ -213,6 +273,7 @@ static int rockchip_dp_init(struct rockchip_dp_device *dp)
213 ret = rockchip_dp_pre_init(dp); 273 ret = rockchip_dp_pre_init(dp);
214 if (ret < 0) { 274 if (ret < 0) {
215 dev_err(dp->dev, "failed to pre init %d\n", ret); 275 dev_err(dp->dev, "failed to pre init %d\n", ret);
276 clk_disable_unprepare(dp->pclk);
216 return ret; 277 return ret;
217 } 278 }
218 279
@@ -246,6 +307,7 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
246 void *data) 307 void *data)
247{ 308{
248 struct rockchip_dp_device *dp = dev_get_drvdata(dev); 309 struct rockchip_dp_device *dp = dev_get_drvdata(dev);
310 const struct rockchip_dp_chip_data *dp_data;
249 struct drm_device *drm_dev = data; 311 struct drm_device *drm_dev = data;
250 int ret; 312 int ret;
251 313
@@ -256,10 +318,15 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
256 */ 318 */
257 dev_set_drvdata(dev, NULL); 319 dev_set_drvdata(dev, NULL);
258 320
321 dp_data = of_device_get_match_data(dev);
322 if (!dp_data)
323 return -ENODEV;
324
259 ret = rockchip_dp_init(dp); 325 ret = rockchip_dp_init(dp);
260 if (ret < 0) 326 if (ret < 0)
261 return ret; 327 return ret;
262 328
329 dp->data = dp_data;
263 dp->drm_dev = drm_dev; 330 dp->drm_dev = drm_dev;
264 331
265 ret = rockchip_dp_drm_create_encoder(dp); 332 ret = rockchip_dp_drm_create_encoder(dp);
@@ -270,9 +337,10 @@ static int rockchip_dp_bind(struct device *dev, struct device *master,
270 337
271 dp->plat_data.encoder = &dp->encoder; 338 dp->plat_data.encoder = &dp->encoder;
272 339
273 dp->plat_data.dev_type = RK3288_DP; 340 dp->plat_data.dev_type = dp->data->chip_type;
274 dp->plat_data.power_on = rockchip_dp_poweron; 341 dp->plat_data.power_on = rockchip_dp_poweron;
275 dp->plat_data.power_off = rockchip_dp_powerdown; 342 dp->plat_data.power_off = rockchip_dp_powerdown;
343 dp->plat_data.get_modes = rockchip_dp_get_modes;
276 344
277 return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data); 345 return analogix_dp_bind(dev, dp->drm_dev, &dp->plat_data);
278} 346}
@@ -292,38 +360,33 @@ static int rockchip_dp_probe(struct platform_device *pdev)
292{ 360{
293 struct device *dev = &pdev->dev; 361 struct device *dev = &pdev->dev;
294 struct device_node *panel_node, *port, *endpoint; 362 struct device_node *panel_node, *port, *endpoint;
363 struct drm_panel *panel = NULL;
295 struct rockchip_dp_device *dp; 364 struct rockchip_dp_device *dp;
296 struct drm_panel *panel;
297 365
298 port = of_graph_get_port_by_id(dev->of_node, 1); 366 port = of_graph_get_port_by_id(dev->of_node, 1);
299 if (!port) { 367 if (port) {
300 dev_err(dev, "can't find output port\n"); 368 endpoint = of_get_child_by_name(port, "endpoint");
301 return -EINVAL; 369 of_node_put(port);
302 } 370 if (!endpoint) {
303 371 dev_err(dev, "no output endpoint found\n");
304 endpoint = of_get_child_by_name(port, "endpoint"); 372 return -EINVAL;
305 of_node_put(port); 373 }
306 if (!endpoint) { 374
307 dev_err(dev, "no output endpoint found\n"); 375 panel_node = of_graph_get_remote_port_parent(endpoint);
308 return -EINVAL; 376 of_node_put(endpoint);
309 } 377 if (!panel_node) {
310 378 dev_err(dev, "no output node found\n");
311 panel_node = of_graph_get_remote_port_parent(endpoint); 379 return -EINVAL;
312 of_node_put(endpoint); 380 }
313 if (!panel_node) { 381
314 dev_err(dev, "no output node found\n"); 382 panel = of_drm_find_panel(panel_node);
315 return -EINVAL;
316 }
317
318 panel = of_drm_find_panel(panel_node);
319 if (!panel) {
320 DRM_ERROR("failed to find panel\n");
321 of_node_put(panel_node); 383 of_node_put(panel_node);
322 return -EPROBE_DEFER; 384 if (!panel) {
385 DRM_ERROR("failed to find panel\n");
386 return -EPROBE_DEFER;
387 }
323 } 388 }
324 389
325 of_node_put(panel_node);
326
327 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); 390 dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL);
328 if (!dp) 391 if (!dp)
329 return -ENOMEM; 392 return -ENOMEM;
@@ -349,24 +412,30 @@ static int rockchip_dp_remove(struct platform_device *pdev)
349 return 0; 412 return 0;
350} 413}
351 414
415static const struct dev_pm_ops rockchip_dp_pm_ops = {
352#ifdef CONFIG_PM_SLEEP 416#ifdef CONFIG_PM_SLEEP
353static int rockchip_dp_suspend(struct device *dev) 417 .suspend = analogix_dp_suspend,
354{ 418 .resume_early = analogix_dp_resume,
355 return analogix_dp_suspend(dev);
356}
357
358static int rockchip_dp_resume(struct device *dev)
359{
360 return analogix_dp_resume(dev);
361}
362#endif 419#endif
420};
363 421
364static const struct dev_pm_ops rockchip_dp_pm_ops = { 422static const struct rockchip_dp_chip_data rk3399_edp = {
365 SET_SYSTEM_SLEEP_PM_OPS(rockchip_dp_suspend, rockchip_dp_resume) 423 .lcdsel_grf_reg = RK3399_GRF_SOC_CON20,
424 .lcdsel_big = HIWORD_UPDATE(0, RK3399_EDP_LCDC_SEL),
425 .lcdsel_lit = HIWORD_UPDATE(RK3399_EDP_LCDC_SEL, RK3399_EDP_LCDC_SEL),
426 .chip_type = RK3399_EDP,
427};
428
429static const struct rockchip_dp_chip_data rk3288_dp = {
430 .lcdsel_grf_reg = RK3288_GRF_SOC_CON6,
431 .lcdsel_big = HIWORD_UPDATE(0, RK3288_EDP_LCDC_SEL),
432 .lcdsel_lit = HIWORD_UPDATE(RK3288_EDP_LCDC_SEL, RK3288_EDP_LCDC_SEL),
433 .chip_type = RK3288_DP,
366}; 434};
367 435
368static const struct of_device_id rockchip_dp_dt_ids[] = { 436static const struct of_device_id rockchip_dp_dt_ids[] = {
369 {.compatible = "rockchip,rk3288-dp",}, 437 {.compatible = "rockchip,rk3288-dp", .data = &rk3288_dp },
438 {.compatible = "rockchip,rk3399-edp", .data = &rk3399_edp },
370 {} 439 {}
371}; 440};
372MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids); 441MODULE_DEVICE_TABLE(of, rockchip_dp_dt_ids);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
index dedc65b40f36..ca22e5ee89ca 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
@@ -964,18 +964,9 @@ static enum drm_mode_status dw_mipi_dsi_mode_valid(
964 return mode_status; 964 return mode_status;
965} 965}
966 966
967static struct drm_encoder *dw_mipi_dsi_connector_best_encoder(
968 struct drm_connector *connector)
969{
970 struct dw_mipi_dsi *dsi = con_to_dsi(connector);
971
972 return &dsi->encoder;
973}
974
975static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = { 967static struct drm_connector_helper_funcs dw_mipi_dsi_connector_helper_funcs = {
976 .get_modes = dw_mipi_dsi_connector_get_modes, 968 .get_modes = dw_mipi_dsi_connector_get_modes,
977 .mode_valid = dw_mipi_dsi_mode_valid, 969 .mode_valid = dw_mipi_dsi_mode_valid,
978 .best_encoder = dw_mipi_dsi_connector_best_encoder,
979}; 970};
980 971
981static enum drm_connector_status 972static enum drm_connector_status
diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
index 801110f65a63..0665fb915579 100644
--- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c
@@ -15,7 +15,6 @@
15#include <drm/drmP.h> 15#include <drm/drmP.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18#include <drm/drm_encoder_slave.h>
19#include <drm/bridge/dw_hdmi.h> 18#include <drm/bridge/dw_hdmi.h>
20 19
21#include "rockchip_drm_drv.h" 20#include "rockchip_drm_drv.h"
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index f8b4feb60b25..006260de9dbd 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -579,14 +579,6 @@ inno_hdmi_connector_mode_valid(struct drm_connector *connector,
579 return MODE_OK; 579 return MODE_OK;
580} 580}
581 581
582static struct drm_encoder *
583inno_hdmi_connector_best_encoder(struct drm_connector *connector)
584{
585 struct inno_hdmi *hdmi = to_inno_hdmi(connector);
586
587 return &hdmi->encoder;
588}
589
590static int 582static int
591inno_hdmi_probe_single_connector_modes(struct drm_connector *connector, 583inno_hdmi_probe_single_connector_modes(struct drm_connector *connector,
592 uint32_t maxX, uint32_t maxY) 584 uint32_t maxX, uint32_t maxY)
@@ -613,7 +605,6 @@ static struct drm_connector_funcs inno_hdmi_connector_funcs = {
613static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = { 605static struct drm_connector_helper_funcs inno_hdmi_connector_helper_funcs = {
614 .get_modes = inno_hdmi_connector_get_modes, 606 .get_modes = inno_hdmi_connector_get_modes,
615 .mode_valid = inno_hdmi_connector_mode_valid, 607 .mode_valid = inno_hdmi_connector_mode_valid,
616 .best_encoder = inno_hdmi_connector_best_encoder,
617}; 608};
618 609
619static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi) 610static int inno_hdmi_register(struct drm_device *drm, struct inno_hdmi *hdmi)
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index a409d1f703cb..a822d49a255a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -19,11 +19,13 @@
19#include <drm/drmP.h> 19#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h> 21#include <drm/drm_fb_helper.h>
22#include <drm/drm_gem_cma_helper.h>
22#include <linux/dma-mapping.h> 23#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/of_graph.h> 26#include <linux/of_graph.h>
26#include <linux/component.h> 27#include <linux/component.h>
28#include <linux/console.h>
27 29
28#include "rockchip_drm_drv.h" 30#include "rockchip_drm_drv.h"
29#include "rockchip_drm_fb.h" 31#include "rockchip_drm_fb.h"
@@ -37,6 +39,7 @@
37#define DRIVER_MINOR 0 39#define DRIVER_MINOR 0
38 40
39static bool is_support_iommu = true; 41static bool is_support_iommu = true;
42static struct drm_driver rockchip_drm_driver;
40 43
41/* 44/*
42 * Attach a (component) device to the shared drm dma mapping from master drm 45 * Attach a (component) device to the shared drm dma mapping from master drm
@@ -76,7 +79,7 @@ int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
76 int pipe = drm_crtc_index(crtc); 79 int pipe = drm_crtc_index(crtc);
77 struct rockchip_drm_private *priv = crtc->dev->dev_private; 80 struct rockchip_drm_private *priv = crtc->dev->dev_private;
78 81
79 if (pipe > ROCKCHIP_MAX_CRTC) 82 if (pipe >= ROCKCHIP_MAX_CRTC)
80 return -EINVAL; 83 return -EINVAL;
81 84
82 priv->crtc_funcs[pipe] = crtc_funcs; 85 priv->crtc_funcs[pipe] = crtc_funcs;
@@ -89,7 +92,7 @@ void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc)
89 int pipe = drm_crtc_index(crtc); 92 int pipe = drm_crtc_index(crtc);
90 struct rockchip_drm_private *priv = crtc->dev->dev_private; 93 struct rockchip_drm_private *priv = crtc->dev->dev_private;
91 94
92 if (pipe > ROCKCHIP_MAX_CRTC) 95 if (pipe >= ROCKCHIP_MAX_CRTC)
93 return; 96 return;
94 97
95 priv->crtc_funcs[pipe] = NULL; 98 priv->crtc_funcs[pipe] = NULL;
@@ -132,20 +135,24 @@ static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev,
132 priv->crtc_funcs[pipe]->disable_vblank(crtc); 135 priv->crtc_funcs[pipe]->disable_vblank(crtc);
133} 136}
134 137
135static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags) 138static int rockchip_drm_bind(struct device *dev)
136{ 139{
140 struct drm_device *drm_dev;
137 struct rockchip_drm_private *private; 141 struct rockchip_drm_private *private;
138 struct dma_iommu_mapping *mapping = NULL; 142 struct dma_iommu_mapping *mapping = NULL;
139 struct device *dev = drm_dev->dev;
140 struct drm_connector *connector;
141 int ret; 143 int ret;
142 144
143 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL); 145 drm_dev = drm_dev_alloc(&rockchip_drm_driver, dev);
144 if (!private) 146 if (!drm_dev)
145 return -ENOMEM; 147 return -ENOMEM;
146 148
147 mutex_init(&private->commit.lock); 149 dev_set_drvdata(dev, drm_dev);
148 INIT_WORK(&private->commit.work, rockchip_drm_atomic_work); 150
151 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
152 if (!private) {
153 ret = -ENOMEM;
154 goto err_free;
155 }
149 156
150 drm_dev->dev_private = private; 157 drm_dev->dev_private = private;
151 158
@@ -186,23 +193,6 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
186 if (ret) 193 if (ret)
187 goto err_detach_device; 194 goto err_detach_device;
188 195
189 /*
190 * All components are now added, we can publish the connector sysfs
191 * entries to userspace. This will generate hotplug events and so
192 * userspace will expect to be able to access DRM at this point.
193 */
194 list_for_each_entry(connector, &drm_dev->mode_config.connector_list,
195 head) {
196 ret = drm_connector_register(connector);
197 if (ret) {
198 dev_err(drm_dev->dev,
199 "[CONNECTOR:%d:%s] drm_connector_register failed: %d\n",
200 connector->base.id,
201 connector->name, ret);
202 goto err_unbind;
203 }
204 }
205
206 /* init kms poll for handling hpd */ 196 /* init kms poll for handling hpd */
207 drm_kms_helper_poll_init(drm_dev); 197 drm_kms_helper_poll_init(drm_dev);
208 198
@@ -222,14 +212,19 @@ static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
222 if (ret) 212 if (ret)
223 goto err_vblank_cleanup; 213 goto err_vblank_cleanup;
224 214
215 ret = drm_dev_register(drm_dev, 0);
216 if (ret)
217 goto err_fbdev_fini;
218
225 if (is_support_iommu) 219 if (is_support_iommu)
226 arm_iommu_release_mapping(mapping); 220 arm_iommu_release_mapping(mapping);
227 return 0; 221 return 0;
222err_fbdev_fini:
223 rockchip_drm_fbdev_fini(drm_dev);
228err_vblank_cleanup: 224err_vblank_cleanup:
229 drm_vblank_cleanup(drm_dev); 225 drm_vblank_cleanup(drm_dev);
230err_kms_helper_poll_fini: 226err_kms_helper_poll_fini:
231 drm_kms_helper_poll_fini(drm_dev); 227 drm_kms_helper_poll_fini(drm_dev);
232err_unbind:
233 component_unbind_all(dev, drm_dev); 228 component_unbind_all(dev, drm_dev);
234err_detach_device: 229err_detach_device:
235 if (is_support_iommu) 230 if (is_support_iommu)
@@ -240,12 +235,14 @@ err_release_mapping:
240err_config_cleanup: 235err_config_cleanup:
241 drm_mode_config_cleanup(drm_dev); 236 drm_mode_config_cleanup(drm_dev);
242 drm_dev->dev_private = NULL; 237 drm_dev->dev_private = NULL;
238err_free:
239 drm_dev_unref(drm_dev);
243 return ret; 240 return ret;
244} 241}
245 242
246static int rockchip_drm_unload(struct drm_device *drm_dev) 243static void rockchip_drm_unbind(struct device *dev)
247{ 244{
248 struct device *dev = drm_dev->dev; 245 struct drm_device *drm_dev = dev_get_drvdata(dev);
249 246
250 rockchip_drm_fbdev_fini(drm_dev); 247 rockchip_drm_fbdev_fini(drm_dev);
251 drm_vblank_cleanup(drm_dev); 248 drm_vblank_cleanup(drm_dev);
@@ -255,32 +252,12 @@ static int rockchip_drm_unload(struct drm_device *drm_dev)
255 arm_iommu_detach_device(dev); 252 arm_iommu_detach_device(dev);
256 drm_mode_config_cleanup(drm_dev); 253 drm_mode_config_cleanup(drm_dev);
257 drm_dev->dev_private = NULL; 254 drm_dev->dev_private = NULL;
258 255 drm_dev_unregister(drm_dev);
259 return 0; 256 drm_dev_unref(drm_dev);
260} 257 dev_set_drvdata(dev, NULL);
261
262static void rockchip_drm_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
263 struct drm_file *file_priv)
264{
265 struct rockchip_drm_private *priv = crtc->dev->dev_private;
266 int pipe = drm_crtc_index(crtc);
267
268 if (pipe < ROCKCHIP_MAX_CRTC &&
269 priv->crtc_funcs[pipe] &&
270 priv->crtc_funcs[pipe]->cancel_pending_vblank)
271 priv->crtc_funcs[pipe]->cancel_pending_vblank(crtc, file_priv);
272}
273
274static void rockchip_drm_preclose(struct drm_device *dev,
275 struct drm_file *file_priv)
276{
277 struct drm_crtc *crtc;
278
279 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
280 rockchip_drm_crtc_cancel_pending_vblank(crtc, file_priv);
281} 258}
282 259
283void rockchip_drm_lastclose(struct drm_device *dev) 260static void rockchip_drm_lastclose(struct drm_device *dev)
284{ 261{
285 struct rockchip_drm_private *priv = dev->dev_private; 262 struct rockchip_drm_private *priv = dev->dev_private;
286 263
@@ -300,23 +277,15 @@ static const struct file_operations rockchip_drm_driver_fops = {
300 .release = drm_release, 277 .release = drm_release,
301}; 278};
302 279
303const struct vm_operations_struct rockchip_drm_vm_ops = {
304 .open = drm_gem_vm_open,
305 .close = drm_gem_vm_close,
306};
307
308static struct drm_driver rockchip_drm_driver = { 280static struct drm_driver rockchip_drm_driver = {
309 .driver_features = DRIVER_MODESET | DRIVER_GEM | 281 .driver_features = DRIVER_MODESET | DRIVER_GEM |
310 DRIVER_PRIME | DRIVER_ATOMIC, 282 DRIVER_PRIME | DRIVER_ATOMIC,
311 .load = rockchip_drm_load,
312 .unload = rockchip_drm_unload,
313 .preclose = rockchip_drm_preclose,
314 .lastclose = rockchip_drm_lastclose, 283 .lastclose = rockchip_drm_lastclose,
315 .get_vblank_counter = drm_vblank_no_hw_counter, 284 .get_vblank_counter = drm_vblank_no_hw_counter,
316 .enable_vblank = rockchip_drm_crtc_enable_vblank, 285 .enable_vblank = rockchip_drm_crtc_enable_vblank,
317 .disable_vblank = rockchip_drm_crtc_disable_vblank, 286 .disable_vblank = rockchip_drm_crtc_disable_vblank,
318 .gem_vm_ops = &rockchip_drm_vm_ops, 287 .gem_vm_ops = &drm_gem_cma_vm_ops,
319 .gem_free_object = rockchip_gem_free_object, 288 .gem_free_object_unlocked = rockchip_gem_free_object,
320 .dumb_create = rockchip_gem_dumb_create, 289 .dumb_create = rockchip_gem_dumb_create,
321 .dumb_map_offset = rockchip_gem_dumb_map_offset, 290 .dumb_map_offset = rockchip_gem_dumb_map_offset,
322 .dumb_destroy = drm_gem_dumb_destroy, 291 .dumb_destroy = drm_gem_dumb_destroy,
@@ -337,25 +306,38 @@ static struct drm_driver rockchip_drm_driver = {
337}; 306};
338 307
339#ifdef CONFIG_PM_SLEEP 308#ifdef CONFIG_PM_SLEEP
340static int rockchip_drm_sys_suspend(struct device *dev) 309void rockchip_drm_fb_suspend(struct drm_device *drm)
341{ 310{
342 struct drm_device *drm = dev_get_drvdata(dev); 311 struct rockchip_drm_private *priv = drm->dev_private;
343 struct drm_connector *connector;
344 312
345 if (!drm) 313 console_lock();
346 return 0; 314 drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
315 console_unlock();
316}
347 317
348 drm_modeset_lock_all(drm); 318void rockchip_drm_fb_resume(struct drm_device *drm)
349 list_for_each_entry(connector, &drm->mode_config.connector_list, head) { 319{
350 int old_dpms = connector->dpms; 320 struct rockchip_drm_private *priv = drm->dev_private;
351 321
352 if (connector->funcs->dpms) 322 console_lock();
353 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF); 323 drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
324 console_unlock();
325}
354 326
355 /* Set the old mode back to the connector for resume */ 327static int rockchip_drm_sys_suspend(struct device *dev)
356 connector->dpms = old_dpms; 328{
329 struct drm_device *drm = dev_get_drvdata(dev);
330 struct rockchip_drm_private *priv = drm->dev_private;
331
332 drm_kms_helper_poll_disable(drm);
333 rockchip_drm_fb_suspend(drm);
334
335 priv->state = drm_atomic_helper_suspend(drm);
336 if (IS_ERR(priv->state)) {
337 rockchip_drm_fb_resume(drm);
338 drm_kms_helper_poll_enable(drm);
339 return PTR_ERR(priv->state);
357 } 340 }
358 drm_modeset_unlock_all(drm);
359 341
360 return 0; 342 return 0;
361} 343}
@@ -363,47 +345,11 @@ static int rockchip_drm_sys_suspend(struct device *dev)
363static int rockchip_drm_sys_resume(struct device *dev) 345static int rockchip_drm_sys_resume(struct device *dev)
364{ 346{
365 struct drm_device *drm = dev_get_drvdata(dev); 347 struct drm_device *drm = dev_get_drvdata(dev);
366 struct drm_connector *connector; 348 struct rockchip_drm_private *priv = drm->dev_private;
367 enum drm_connector_status status;
368 bool changed = false;
369
370 if (!drm)
371 return 0;
372 349
373 drm_modeset_lock_all(drm); 350 drm_atomic_helper_resume(drm, priv->state);
374 list_for_each_entry(connector, &drm->mode_config.connector_list, head) { 351 rockchip_drm_fb_resume(drm);
375 int desired_mode = connector->dpms; 352 drm_kms_helper_poll_enable(drm);
376
377 /*
378 * at suspend time, we save dpms to connector->dpms,
379 * restore the old_dpms, and at current time, the connector
380 * dpms status must be DRM_MODE_DPMS_OFF.
381 */
382 connector->dpms = DRM_MODE_DPMS_OFF;
383
384 /*
385 * If the connector has been disconnected during suspend,
386 * disconnect it from the encoder and leave it off. We'll notify
387 * userspace at the end.
388 */
389 if (desired_mode == DRM_MODE_DPMS_ON) {
390 status = connector->funcs->detect(connector, true);
391 if (status == connector_status_disconnected) {
392 connector->encoder = NULL;
393 connector->status = status;
394 changed = true;
395 continue;
396 }
397 }
398 if (connector->funcs->dpms)
399 connector->funcs->dpms(connector, desired_mode);
400 }
401 drm_modeset_unlock_all(drm);
402
403 drm_helper_resume_force_mode(drm);
404
405 if (changed)
406 drm_kms_helper_hotplug_event(drm);
407 353
408 return 0; 354 return 0;
409} 355}
@@ -444,37 +390,6 @@ static void rockchip_add_endpoints(struct device *dev,
444 } 390 }
445} 391}
446 392
447static int rockchip_drm_bind(struct device *dev)
448{
449 struct drm_device *drm;
450 int ret;
451
452 drm = drm_dev_alloc(&rockchip_drm_driver, dev);
453 if (!drm)
454 return -ENOMEM;
455
456 ret = drm_dev_register(drm, 0);
457 if (ret)
458 goto err_free;
459
460 dev_set_drvdata(dev, drm);
461
462 return 0;
463
464err_free:
465 drm_dev_unref(drm);
466 return ret;
467}
468
469static void rockchip_drm_unbind(struct device *dev)
470{
471 struct drm_device *drm = dev_get_drvdata(dev);
472
473 drm_dev_unregister(drm);
474 drm_dev_unref(drm);
475 dev_set_drvdata(dev, NULL);
476}
477
478static const struct component_master_ops rockchip_drm_ops = { 393static const struct component_master_ops rockchip_drm_ops = {
479 .bind = rockchip_drm_bind, 394 .bind = rockchip_drm_bind,
480 .unbind = rockchip_drm_unbind, 395 .unbind = rockchip_drm_unbind,
@@ -518,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev)
518 is_support_iommu = false; 433 is_support_iommu = false;
519 } 434 }
520 435
436 of_node_put(iommu);
521 component_match_add(dev, &match, compare_of, port->parent); 437 component_match_add(dev, &match, compare_of, port->parent);
522 of_node_put(port); 438 of_node_put(port);
523 } 439 }
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 56f43a364c7f..ea3932940061 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -40,14 +40,6 @@ struct rockchip_crtc_funcs {
40 int (*enable_vblank)(struct drm_crtc *crtc); 40 int (*enable_vblank)(struct drm_crtc *crtc);
41 void (*disable_vblank)(struct drm_crtc *crtc); 41 void (*disable_vblank)(struct drm_crtc *crtc);
42 void (*wait_for_update)(struct drm_crtc *crtc); 42 void (*wait_for_update)(struct drm_crtc *crtc);
43 void (*cancel_pending_vblank)(struct drm_crtc *crtc, struct drm_file *file_priv);
44};
45
46struct rockchip_atomic_commit {
47 struct work_struct work;
48 struct drm_atomic_state *state;
49 struct drm_device *dev;
50 struct mutex lock;
51}; 43};
52 44
53struct rockchip_crtc_state { 45struct rockchip_crtc_state {
@@ -68,11 +60,9 @@ struct rockchip_drm_private {
68 struct drm_fb_helper fbdev_helper; 60 struct drm_fb_helper fbdev_helper;
69 struct drm_gem_object *fbdev_bo; 61 struct drm_gem_object *fbdev_bo;
70 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC]; 62 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
71 63 struct drm_atomic_state *state;
72 struct rockchip_atomic_commit commit;
73}; 64};
74 65
75void rockchip_drm_atomic_work(struct work_struct *work);
76int rockchip_register_crtc_funcs(struct drm_crtc *crtc, 66int rockchip_register_crtc_funcs(struct drm_crtc *crtc,
77 const struct rockchip_crtc_funcs *crtc_funcs); 67 const struct rockchip_crtc_funcs *crtc_funcs);
78void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc); 68void rockchip_unregister_crtc_funcs(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
index 755cfdba61cd..55c52734c52d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -20,6 +20,7 @@
20#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
21 21
22#include "rockchip_drm_drv.h" 22#include "rockchip_drm_drv.h"
23#include "rockchip_drm_fb.h"
23#include "rockchip_drm_gem.h" 24#include "rockchip_drm_gem.h"
24 25
25#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb) 26#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
@@ -43,14 +44,10 @@ struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
43static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb) 44static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
44{ 45{
45 struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb); 46 struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
46 struct drm_gem_object *obj;
47 int i; 47 int i;
48 48
49 for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) { 49 for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++)
50 obj = rockchip_fb->obj[i]; 50 drm_gem_object_unreference_unlocked(rockchip_fb->obj[i]);
51 if (obj)
52 drm_gem_object_unreference_unlocked(obj);
53 }
54 51
55 drm_framebuffer_cleanup(fb); 52 drm_framebuffer_cleanup(fb);
56 kfree(rockchip_fb); 53 kfree(rockchip_fb);
@@ -228,87 +225,32 @@ rockchip_atomic_wait_for_complete(struct drm_device *dev, struct drm_atomic_stat
228} 225}
229 226
230static void 227static void
231rockchip_atomic_commit_complete(struct rockchip_atomic_commit *commit) 228rockchip_atomic_commit_tail(struct drm_atomic_state *state)
232{ 229{
233 struct drm_atomic_state *state = commit->state; 230 struct drm_device *dev = state->dev;
234 struct drm_device *dev = commit->dev;
235 231
236 /*
237 * TODO: do fence wait here.
238 */
239
240 /*
241 * Rockchip crtc support runtime PM, can't update display planes
242 * when crtc is disabled.
243 *
244 * drm_atomic_helper_commit comments detail that:
245 * For drivers supporting runtime PM the recommended sequence is
246 *
247 * drm_atomic_helper_commit_modeset_disables(dev, state);
248 *
249 * drm_atomic_helper_commit_modeset_enables(dev, state);
250 *
251 * drm_atomic_helper_commit_planes(dev, state, true);
252 *
253 * See the kerneldoc entries for these three functions for more details.
254 */
255 drm_atomic_helper_commit_modeset_disables(dev, state); 232 drm_atomic_helper_commit_modeset_disables(dev, state);
256 233
257 drm_atomic_helper_commit_modeset_enables(dev, state); 234 drm_atomic_helper_commit_modeset_enables(dev, state);
258 235
259 drm_atomic_helper_commit_planes(dev, state, true); 236 drm_atomic_helper_commit_planes(dev, state, true);
260 237
238 drm_atomic_helper_commit_hw_done(state);
239
261 rockchip_atomic_wait_for_complete(dev, state); 240 rockchip_atomic_wait_for_complete(dev, state);
262 241
263 drm_atomic_helper_cleanup_planes(dev, state); 242 drm_atomic_helper_cleanup_planes(dev, state);
264
265 drm_atomic_state_free(state);
266}
267
268void rockchip_drm_atomic_work(struct work_struct *work)
269{
270 struct rockchip_atomic_commit *commit = container_of(work,
271 struct rockchip_atomic_commit, work);
272
273 rockchip_atomic_commit_complete(commit);
274} 243}
275 244
276int rockchip_drm_atomic_commit(struct drm_device *dev, 245static struct drm_mode_config_helper_funcs rockchip_mode_config_helpers = {
277 struct drm_atomic_state *state, 246 .atomic_commit_tail = rockchip_atomic_commit_tail,
278 bool nonblock) 247};
279{
280 struct rockchip_drm_private *private = dev->dev_private;
281 struct rockchip_atomic_commit *commit = &private->commit;
282 int ret;
283
284 ret = drm_atomic_helper_prepare_planes(dev, state);
285 if (ret)
286 return ret;
287
288 /* serialize outstanding nonblocking commits */
289 mutex_lock(&commit->lock);
290 flush_work(&commit->work);
291
292 drm_atomic_helper_swap_state(dev, state);
293
294 commit->dev = dev;
295 commit->state = state;
296
297 if (nonblock)
298 schedule_work(&commit->work);
299 else
300 rockchip_atomic_commit_complete(commit);
301
302 mutex_unlock(&commit->lock);
303
304 return 0;
305}
306 248
307static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = { 249static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
308 .fb_create = rockchip_user_fb_create, 250 .fb_create = rockchip_user_fb_create,
309 .output_poll_changed = rockchip_drm_output_poll_changed, 251 .output_poll_changed = rockchip_drm_output_poll_changed,
310 .atomic_check = drm_atomic_helper_check, 252 .atomic_check = drm_atomic_helper_check,
311 .atomic_commit = rockchip_drm_atomic_commit, 253 .atomic_commit = drm_atomic_helper_commit,
312}; 254};
313 255
314struct drm_framebuffer * 256struct drm_framebuffer *
@@ -339,4 +281,5 @@ void rockchip_drm_mode_config_init(struct drm_device *dev)
339 dev->mode_config.max_height = 4096; 281 dev->mode_config.max_height = 4096;
340 282
341 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs; 283 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
284 dev->mode_config.helper_private = &rockchip_mode_config_helpers;
342} 285}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index f261512bb4a0..207e01de6e32 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -108,7 +108,7 @@ static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
108 fbi->screen_size = rk_obj->base.size; 108 fbi->screen_size = rk_obj->base.size;
109 fbi->fix.smem_len = rk_obj->base.size; 109 fbi->fix.smem_len = rk_obj->base.size;
110 110
111 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n", 111 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%zu\n",
112 fb->width, fb->height, fb->depth, rk_obj->kvaddr, 112 fb->width, fb->height, fb->depth, rk_obj->kvaddr,
113 offset, size); 113 offset, size);
114 114
@@ -156,9 +156,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
156 goto err_drm_fb_helper_fini; 156 goto err_drm_fb_helper_fini;
157 } 157 }
158 158
159 /* disable all the possible outputs/crtcs before entering KMS mode */
160 drm_helper_disable_unused_functions(dev);
161
162 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP); 159 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
163 if (ret < 0) { 160 if (ret < 0) {
164 dev_err(dev->dev, "Failed to set initial hw config - %d.\n", 161 dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 9c2d8a894093..059e902f872d 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -38,7 +38,7 @@ static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
38 &rk_obj->dma_addr, GFP_KERNEL, 38 &rk_obj->dma_addr, GFP_KERNEL,
39 &rk_obj->dma_attrs); 39 &rk_obj->dma_attrs);
40 if (!rk_obj->kvaddr) { 40 if (!rk_obj->kvaddr) {
41 DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size); 41 DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
42 return -ENOMEM; 42 return -ENOMEM;
43 } 43 }
44 44
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1c4d5b5a70a2..91305eb7d312 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -98,7 +98,9 @@ struct vop_win {
98 const struct vop_win_data *data; 98 const struct vop_win_data *data;
99 struct vop *vop; 99 struct vop *vop;
100 100
101 struct vop_plane_state state; 101 /* protected by dev->event_lock */
102 bool enable;
103 dma_addr_t yrgb_mst;
102}; 104};
103 105
104struct vop { 106struct vop {
@@ -112,6 +114,8 @@ struct vop {
112 bool vsync_work_pending; 114 bool vsync_work_pending;
113 struct completion dsp_hold_completion; 115 struct completion dsp_hold_completion;
114 struct completion wait_update_complete; 116 struct completion wait_update_complete;
117
118 /* protected by dev->event_lock */
115 struct drm_pending_vblank_event *event; 119 struct drm_pending_vblank_event *event;
116 120
117 const struct vop_data *data; 121 const struct vop_data *data;
@@ -324,9 +328,9 @@ static void scl_vop_cal_scl_fac(struct vop *vop, const struct vop_win_data *win,
324 scl_cal_scale2(src_h, dst_h)); 328 scl_cal_scale2(src_h, dst_h));
325 if (is_yuv) { 329 if (is_yuv) {
326 VOP_SCL_SET(vop, win, scale_cbcr_x, 330 VOP_SCL_SET(vop, win, scale_cbcr_x,
327 scl_cal_scale2(src_w, dst_w)); 331 scl_cal_scale2(cbcr_src_w, dst_w));
328 VOP_SCL_SET(vop, win, scale_cbcr_y, 332 VOP_SCL_SET(vop, win, scale_cbcr_y,
329 scl_cal_scale2(src_h, dst_h)); 333 scl_cal_scale2(cbcr_src_h, dst_h));
330 } 334 }
331 return; 335 return;
332 } 336 }
@@ -431,9 +435,6 @@ static void vop_enable(struct drm_crtc *crtc)
431 struct vop *vop = to_vop(crtc); 435 struct vop *vop = to_vop(crtc);
432 int ret; 436 int ret;
433 437
434 if (vop->is_enabled)
435 return;
436
437 ret = pm_runtime_get_sync(vop->dev); 438 ret = pm_runtime_get_sync(vop->dev);
438 if (ret < 0) { 439 if (ret < 0) {
439 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret); 440 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
@@ -501,8 +502,7 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
501 struct vop *vop = to_vop(crtc); 502 struct vop *vop = to_vop(crtc);
502 int i; 503 int i;
503 504
504 if (!vop->is_enabled) 505 WARN_ON(vop->event);
505 return;
506 506
507 /* 507 /*
508 * We need to make sure that all windows are disabled before we 508 * We need to make sure that all windows are disabled before we
@@ -553,6 +553,14 @@ static void vop_crtc_disable(struct drm_crtc *crtc)
553 clk_disable(vop->aclk); 553 clk_disable(vop->aclk);
554 clk_disable(vop->hclk); 554 clk_disable(vop->hclk);
555 pm_runtime_put(vop->dev); 555 pm_runtime_put(vop->dev);
556
557 if (crtc->state->event && !crtc->state->active) {
558 spin_lock_irq(&crtc->dev->event_lock);
559 drm_crtc_send_vblank_event(crtc, crtc->state->event);
560 spin_unlock_irq(&crtc->dev->event_lock);
561
562 crtc->state->event = NULL;
563 }
556} 564}
557 565
558static void vop_plane_destroy(struct drm_plane *plane) 566static void vop_plane_destroy(struct drm_plane *plane)
@@ -618,6 +626,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
618 626
619 ret = drm_plane_helper_check_update(plane, crtc, state->fb, 627 ret = drm_plane_helper_check_update(plane, crtc, state->fb,
620 src, dest, &clip, 628 src, dest, &clip,
629 state->rotation,
621 min_scale, 630 min_scale,
622 max_scale, 631 max_scale,
623 true, true, &visible); 632 true, true, &visible);
@@ -658,6 +667,11 @@ static void vop_plane_atomic_disable(struct drm_plane *plane,
658 if (!old_state->crtc) 667 if (!old_state->crtc)
659 return; 668 return;
660 669
670 spin_lock_irq(&plane->dev->event_lock);
671 vop_win->enable = false;
672 vop_win->yrgb_mst = 0;
673 spin_unlock_irq(&plane->dev->event_lock);
674
661 spin_lock(&vop->reg_lock); 675 spin_lock(&vop->reg_lock);
662 676
663 VOP_WIN_SET(vop, win, enable, 0); 677 VOP_WIN_SET(vop, win, enable, 0);
@@ -692,7 +706,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
692 /* 706 /*
693 * can't update plane when vop is disabled. 707 * can't update plane when vop is disabled.
694 */ 708 */
695 if (!crtc) 709 if (WARN_ON(!crtc))
696 return; 710 return;
697 711
698 if (WARN_ON(!vop->is_enabled)) 712 if (WARN_ON(!vop->is_enabled))
@@ -721,6 +735,11 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
721 offset += (src->y1 >> 16) * fb->pitches[0]; 735 offset += (src->y1 >> 16) * fb->pitches[0];
722 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0]; 736 vop_plane_state->yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
723 737
738 spin_lock_irq(&plane->dev->event_lock);
739 vop_win->enable = true;
740 vop_win->yrgb_mst = vop_plane_state->yrgb_mst;
741 spin_unlock_irq(&plane->dev->event_lock);
742
724 spin_lock(&vop->reg_lock); 743 spin_lock(&vop->reg_lock);
725 744
726 VOP_WIN_SET(vop, win, format, vop_plane_state->format); 745 VOP_WIN_SET(vop, win, format, vop_plane_state->format);
@@ -779,7 +798,7 @@ static const struct drm_plane_helper_funcs plane_helper_funcs = {
779 .atomic_disable = vop_plane_atomic_disable, 798 .atomic_disable = vop_plane_atomic_disable,
780}; 799};
781 800
782void vop_atomic_plane_reset(struct drm_plane *plane) 801static void vop_atomic_plane_reset(struct drm_plane *plane)
783{ 802{
784 struct vop_plane_state *vop_plane_state = 803 struct vop_plane_state *vop_plane_state =
785 to_vop_plane_state(plane->state); 804 to_vop_plane_state(plane->state);
@@ -796,7 +815,7 @@ void vop_atomic_plane_reset(struct drm_plane *plane)
796 plane->state->plane = plane; 815 plane->state->plane = plane;
797} 816}
798 817
799struct drm_plane_state * 818static struct drm_plane_state *
800vop_atomic_plane_duplicate_state(struct drm_plane *plane) 819vop_atomic_plane_duplicate_state(struct drm_plane *plane)
801{ 820{
802 struct vop_plane_state *old_vop_plane_state; 821 struct vop_plane_state *old_vop_plane_state;
@@ -876,30 +895,10 @@ static void vop_crtc_wait_for_update(struct drm_crtc *crtc)
876 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100)); 895 WARN_ON(!wait_for_completion_timeout(&vop->wait_update_complete, 100));
877} 896}
878 897
879static void vop_crtc_cancel_pending_vblank(struct drm_crtc *crtc,
880 struct drm_file *file_priv)
881{
882 struct drm_device *drm = crtc->dev;
883 struct vop *vop = to_vop(crtc);
884 struct drm_pending_vblank_event *e;
885 unsigned long flags;
886
887 spin_lock_irqsave(&drm->event_lock, flags);
888 e = vop->event;
889 if (e && e->base.file_priv == file_priv) {
890 vop->event = NULL;
891
892 e->base.destroy(&e->base);
893 file_priv->event_space += sizeof(e->event);
894 }
895 spin_unlock_irqrestore(&drm->event_lock, flags);
896}
897
898static const struct rockchip_crtc_funcs private_crtc_funcs = { 898static const struct rockchip_crtc_funcs private_crtc_funcs = {
899 .enable_vblank = vop_crtc_enable_vblank, 899 .enable_vblank = vop_crtc_enable_vblank,
900 .disable_vblank = vop_crtc_disable_vblank, 900 .disable_vblank = vop_crtc_disable_vblank,
901 .wait_for_update = vop_crtc_wait_for_update, 901 .wait_for_update = vop_crtc_wait_for_update,
902 .cancel_pending_vblank = vop_crtc_cancel_pending_vblank,
903}; 902};
904 903
905static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, 904static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -931,6 +930,8 @@ static void vop_crtc_enable(struct drm_crtc *crtc)
931 u16 vact_end = vact_st + vdisplay; 930 u16 vact_end = vact_st + vdisplay;
932 uint32_t val; 931 uint32_t val;
933 932
933 WARN_ON(vop->event);
934
934 vop_enable(crtc); 935 vop_enable(crtc);
935 /* 936 /*
936 * If dclk rate is zero, mean that scanout is stop, 937 * If dclk rate is zero, mean that scanout is stop,
@@ -1027,12 +1028,15 @@ static void vop_crtc_atomic_begin(struct drm_crtc *crtc,
1027{ 1028{
1028 struct vop *vop = to_vop(crtc); 1029 struct vop *vop = to_vop(crtc);
1029 1030
1031 spin_lock_irq(&crtc->dev->event_lock);
1030 if (crtc->state->event) { 1032 if (crtc->state->event) {
1031 WARN_ON(drm_crtc_vblank_get(crtc) != 0); 1033 WARN_ON(drm_crtc_vblank_get(crtc) != 0);
1034 WARN_ON(vop->event);
1032 1035
1033 vop->event = crtc->state->event; 1036 vop->event = crtc->state->event;
1034 crtc->state->event = NULL; 1037 crtc->state->event = NULL;
1035 } 1038 }
1039 spin_unlock_irq(&crtc->dev->event_lock);
1036} 1040}
1037 1041
1038static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { 1042static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
@@ -1048,6 +1052,17 @@ static void vop_crtc_destroy(struct drm_crtc *crtc)
1048 drm_crtc_cleanup(crtc); 1052 drm_crtc_cleanup(crtc);
1049} 1053}
1050 1054
1055static void vop_crtc_reset(struct drm_crtc *crtc)
1056{
1057 if (crtc->state)
1058 __drm_atomic_helper_crtc_destroy_state(crtc->state);
1059 kfree(crtc->state);
1060
1061 crtc->state = kzalloc(sizeof(struct rockchip_crtc_state), GFP_KERNEL);
1062 if (crtc->state)
1063 crtc->state->crtc = crtc;
1064}
1065
1051static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc) 1066static struct drm_crtc_state *vop_crtc_duplicate_state(struct drm_crtc *crtc)
1052{ 1067{
1053 struct rockchip_crtc_state *rockchip_state; 1068 struct rockchip_crtc_state *rockchip_state;
@@ -1073,23 +1088,21 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
1073 .set_config = drm_atomic_helper_set_config, 1088 .set_config = drm_atomic_helper_set_config,
1074 .page_flip = drm_atomic_helper_page_flip, 1089 .page_flip = drm_atomic_helper_page_flip,
1075 .destroy = vop_crtc_destroy, 1090 .destroy = vop_crtc_destroy,
1076 .reset = drm_atomic_helper_crtc_reset, 1091 .reset = vop_crtc_reset,
1077 .atomic_duplicate_state = vop_crtc_duplicate_state, 1092 .atomic_duplicate_state = vop_crtc_duplicate_state,
1078 .atomic_destroy_state = vop_crtc_destroy_state, 1093 .atomic_destroy_state = vop_crtc_destroy_state,
1079}; 1094};
1080 1095
1081static bool vop_win_pending_is_complete(struct vop_win *vop_win) 1096static bool vop_win_pending_is_complete(struct vop_win *vop_win)
1082{ 1097{
1083 struct drm_plane *plane = &vop_win->base;
1084 struct vop_plane_state *state = to_vop_plane_state(plane->state);
1085 dma_addr_t yrgb_mst; 1098 dma_addr_t yrgb_mst;
1086 1099
1087 if (!state->enable) 1100 if (!vop_win->enable)
1088 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0; 1101 return VOP_WIN_GET(vop_win->vop, vop_win->data, enable) == 0;
1089 1102
1090 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data); 1103 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
1091 1104
1092 return yrgb_mst == state->yrgb_mst; 1105 return yrgb_mst == vop_win->yrgb_mst;
1093} 1106}
1094 1107
1095static void vop_handle_vblank(struct vop *vop) 1108static void vop_handle_vblank(struct vop *vop)
@@ -1104,15 +1117,16 @@ static void vop_handle_vblank(struct vop *vop)
1104 return; 1117 return;
1105 } 1118 }
1106 1119
1120 spin_lock_irqsave(&drm->event_lock, flags);
1107 if (vop->event) { 1121 if (vop->event) {
1108 spin_lock_irqsave(&drm->event_lock, flags);
1109 1122
1110 drm_crtc_send_vblank_event(crtc, vop->event); 1123 drm_crtc_send_vblank_event(crtc, vop->event);
1111 drm_crtc_vblank_put(crtc); 1124 drm_crtc_vblank_put(crtc);
1112 vop->event = NULL; 1125 vop->event = NULL;
1113 1126
1114 spin_unlock_irqrestore(&drm->event_lock, flags);
1115 } 1127 }
1128 spin_unlock_irqrestore(&drm->event_lock, flags);
1129
1116 if (!completion_done(&vop->wait_update_complete)) 1130 if (!completion_done(&vop->wait_update_complete))
1117 complete(&vop->wait_update_complete); 1131 complete(&vop->wait_update_complete);
1118} 1132}
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 3166b46a5893..919992cdc97e 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -190,7 +190,7 @@ static const struct vop_data rk3288_vop = {
190 .win_size = ARRAY_SIZE(rk3288_vop_win_data), 190 .win_size = ARRAY_SIZE(rk3288_vop_win_data),
191}; 191};
192 192
193static const struct vop_scl_regs rk3066_win_scl = { 193static const struct vop_scl_regs rk3036_win_scl = {
194 .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0), 194 .scale_yrgb_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
195 .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16), 195 .scale_yrgb_y = VOP_REG(RK3036_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
196 .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0), 196 .scale_cbcr_x = VOP_REG(RK3036_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
@@ -198,7 +198,7 @@ static const struct vop_scl_regs rk3066_win_scl = {
198}; 198};
199 199
200static const struct vop_win_phy rk3036_win0_data = { 200static const struct vop_win_phy rk3036_win0_data = {
201 .scl = &rk3066_win_scl, 201 .scl = &rk3036_win_scl,
202 .data_formats = formats_win_full, 202 .data_formats = formats_win_full,
203 .nformats = ARRAY_SIZE(formats_win_full), 203 .nformats = ARRAY_SIZE(formats_win_full),
204 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0), 204 .enable = VOP_REG(RK3036_SYS_CTRL, 0x1, 0),
@@ -210,6 +210,7 @@ static const struct vop_win_phy rk3036_win0_data = {
210 .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0), 210 .yrgb_mst = VOP_REG(RK3036_WIN0_YRGB_MST, 0xffffffff, 0),
211 .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0), 211 .uv_mst = VOP_REG(RK3036_WIN0_CBR_MST, 0xffffffff, 0),
212 .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0), 212 .yrgb_vir = VOP_REG(RK3036_WIN0_VIR, 0xffff, 0),
213 .uv_vir = VOP_REG(RK3036_WIN0_VIR, 0x1fff, 16),
213}; 214};
214 215
215static const struct vop_win_phy rk3036_win1_data = { 216static const struct vop_win_phy rk3036_win1_data = {
@@ -299,7 +300,7 @@ static int vop_remove(struct platform_device *pdev)
299 return 0; 300 return 0;
300} 301}
301 302
302struct platform_driver vop_platform_driver = { 303static struct platform_driver vop_platform_driver = {
303 .probe = vop_probe, 304 .probe = vop_probe,
304 .remove = vop_remove, 305 .remove = vop_remove,
305 .driver = { 306 .driver = {
diff --git a/drivers/gpu/drm/shmobile/Kconfig b/drivers/gpu/drm/shmobile/Kconfig
index 8d17d00ddb4b..c987c826daa3 100644
--- a/drivers/gpu/drm/shmobile/Kconfig
+++ b/drivers/gpu/drm/shmobile/Kconfig
@@ -6,7 +6,6 @@ config DRM_SHMOBILE
6 select BACKLIGHT_CLASS_DEVICE 6 select BACKLIGHT_CLASS_DEVICE
7 select BACKLIGHT_LCD_SUPPORT 7 select BACKLIGHT_LCD_SUPPORT
8 select DRM_KMS_HELPER 8 select DRM_KMS_HELPER
9 select DRM_KMS_FB_HELPER
10 select DRM_KMS_CMA_HELPER 9 select DRM_KMS_CMA_HELPER
11 select DRM_GEM_CMA_HELPER 10 select DRM_GEM_CMA_HELPER
12 help 11 help
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 1e154fc779d5..6547b1db460a 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -441,7 +441,7 @@ void shmob_drm_crtc_finish_page_flip(struct shmob_drm_crtc *scrtc)
441 scrtc->event = NULL; 441 scrtc->event = NULL;
442 if (event) { 442 if (event) {
443 drm_crtc_send_vblank_event(&scrtc->crtc, event); 443 drm_crtc_send_vblank_event(&scrtc->crtc, event);
444 drm_vblank_put(dev, 0); 444 drm_crtc_vblank_put(&scrtc->crtc);
445 } 445 }
446 spin_unlock_irqrestore(&dev->event_lock, flags); 446 spin_unlock_irqrestore(&dev->event_lock, flags);
447} 447}
@@ -467,7 +467,7 @@ static int shmob_drm_crtc_page_flip(struct drm_crtc *crtc,
467 467
468 if (event) { 468 if (event) {
469 event->pipe = 0; 469 event->pipe = 0;
470 drm_vblank_get(dev, 0); 470 drm_crtc_vblank_get(&scrtc->crtc);
471 spin_lock_irqsave(&dev->event_lock, flags); 471 spin_lock_irqsave(&dev->event_lock, flags);
472 scrtc->event = event; 472 scrtc->event = event;
473 spin_unlock_irqrestore(&dev->event_lock, flags); 473 spin_unlock_irqrestore(&dev->event_lock, flags);
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7700ff172079..f0492603ea88 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -259,12 +259,11 @@ static struct drm_driver shmob_drm_driver = {
259 | DRIVER_PRIME, 259 | DRIVER_PRIME,
260 .load = shmob_drm_load, 260 .load = shmob_drm_load,
261 .unload = shmob_drm_unload, 261 .unload = shmob_drm_unload,
262 .set_busid = drm_platform_set_busid,
263 .irq_handler = shmob_drm_irq, 262 .irq_handler = shmob_drm_irq,
264 .get_vblank_counter = drm_vblank_no_hw_counter, 263 .get_vblank_counter = drm_vblank_no_hw_counter,
265 .enable_vblank = shmob_drm_enable_vblank, 264 .enable_vblank = shmob_drm_enable_vblank,
266 .disable_vblank = shmob_drm_disable_vblank, 265 .disable_vblank = shmob_drm_disable_vblank,
267 .gem_free_object = drm_gem_cma_free_object, 266 .gem_free_object_unlocked = drm_gem_cma_free_object,
268 .gem_vm_ops = &drm_gem_cma_vm_ops, 267 .gem_vm_ops = &drm_gem_cma_vm_ops,
269 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 268 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
270 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 269 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 93ad8a5704d1..03defda77766 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -316,7 +316,7 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
316 struct sis_file_private *file_priv = file->driver_priv; 316 struct sis_file_private *file_priv = file->driver_priv;
317 struct sis_memblock *entry, *next; 317 struct sis_memblock *entry, *next;
318 318
319 if (!(file->minor->master && file->master->lock.hw_lock)) 319 if (!(dev->master && file->master->lock.hw_lock))
320 return; 320 return;
321 321
322 drm_legacy_idlelock_take(&file->master->lock); 322 drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index 5ad43a1bb260..494ab257f77c 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -7,5 +7,6 @@ config DRM_STI
7 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
8 select DRM_PANEL 8 select DRM_PANEL
9 select FW_LOADER 9 select FW_LOADER
10 select SND_SOC_HDMI_CODEC if SND_SOC
10 help 11 help
11 Choose this option to enable DRM on STM stiH41x chipset 12 Choose this option to enable DRM on STM stiH41x chipset
diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c
index a516eb869f6f..2da7d6866d5d 100644
--- a/drivers/gpu/drm/sti/sti_awg_utils.c
+++ b/drivers/gpu/drm/sti/sti_awg_utils.c
@@ -6,6 +6,8 @@
6 6
7#include "sti_awg_utils.h" 7#include "sti_awg_utils.h"
8 8
9#define AWG_DELAY (-5)
10
9#define AWG_OPCODE_OFFSET 10 11#define AWG_OPCODE_OFFSET 10
10#define AWG_MAX_ARG 0x3ff 12#define AWG_MAX_ARG 0x3ff
11 13
@@ -125,7 +127,7 @@ static int awg_generate_line_signal(
125 val = timing->blanking_level; 127 val = timing->blanking_level;
126 ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams); 128 ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams);
127 129
128 val = timing->trailing_pixels - 1; 130 val = timing->trailing_pixels - 1 + AWG_DELAY;
129 ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams); 131 ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams);
130 } 132 }
131 133
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 3d2fa3ab33df..134201ecc6fd 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -55,6 +55,26 @@ struct sti_compositor_data stih416_compositor_data = {
55 }, 55 },
56}; 56};
57 57
58int sti_compositor_debufs_init(struct sti_compositor *compo,
59 struct drm_minor *minor)
60{
61 int ret = 0, i;
62
63 for (i = 0; compo->vid[i]; i++) {
64 ret = vid_debugfs_init(compo->vid[i], minor);
65 if (ret)
66 return ret;
67 }
68
69 for (i = 0; compo->mixer[i]; i++) {
70 ret = sti_mixer_debugfs_init(compo->mixer[i], minor);
71 if (ret)
72 return ret;
73 }
74
75 return 0;
76}
77
58static int sti_compositor_bind(struct device *dev, 78static int sti_compositor_bind(struct device *dev,
59 struct device *master, 79 struct device *master,
60 void *data) 80 void *data)
@@ -234,12 +254,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
234 } 254 }
235 255
236 /* Get reset resources */ 256 /* Get reset resources */
237 compo->rst_main = devm_reset_control_get(dev, "compo-main"); 257 compo->rst_main = devm_reset_control_get_shared(dev, "compo-main");
238 /* Take compo main out of reset */ 258 /* Take compo main out of reset */
239 if (!IS_ERR(compo->rst_main)) 259 if (!IS_ERR(compo->rst_main))
240 reset_control_deassert(compo->rst_main); 260 reset_control_deassert(compo->rst_main);
241 261
242 compo->rst_aux = devm_reset_control_get(dev, "compo-aux"); 262 compo->rst_aux = devm_reset_control_get_shared(dev, "compo-aux");
243 /* Take compo aux out of reset */ 263 /* Take compo aux out of reset */
244 if (!IS_ERR(compo->rst_aux)) 264 if (!IS_ERR(compo->rst_aux))
245 reset_control_deassert(compo->rst_aux); 265 reset_control_deassert(compo->rst_aux);
@@ -247,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev)
247 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); 267 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
248 if (vtg_np) 268 if (vtg_np)
249 compo->vtg_main = of_vtg_find(vtg_np); 269 compo->vtg_main = of_vtg_find(vtg_np);
270 of_node_put(vtg_np);
250 271
251 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1); 272 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1);
252 if (vtg_np) 273 if (vtg_np)
253 compo->vtg_aux = of_vtg_find(vtg_np); 274 compo->vtg_aux = of_vtg_find(vtg_np);
275 of_node_put(vtg_np);
254 276
255 platform_set_drvdata(pdev, compo); 277 platform_set_drvdata(pdev, compo);
256 278
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 1a4a73dab11e..24444ef42a98 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -81,4 +81,7 @@ struct sti_compositor {
81 struct notifier_block vtg_vblank_nb; 81 struct notifier_block vtg_vblank_nb;
82}; 82};
83 83
84int sti_compositor_debufs_init(struct sti_compositor *compo,
85 struct drm_minor *minor);
86
84#endif 87#endif
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index e04deedabd4a..c7d734dc3cf4 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -23,22 +23,11 @@
23static void sti_crtc_enable(struct drm_crtc *crtc) 23static void sti_crtc_enable(struct drm_crtc *crtc)
24{ 24{
25 struct sti_mixer *mixer = to_sti_mixer(crtc); 25 struct sti_mixer *mixer = to_sti_mixer(crtc);
26 struct device *dev = mixer->dev;
27 struct sti_compositor *compo = dev_get_drvdata(dev);
28 26
29 DRM_DEBUG_DRIVER("\n"); 27 DRM_DEBUG_DRIVER("\n");
30 28
31 mixer->status = STI_MIXER_READY; 29 mixer->status = STI_MIXER_READY;
32 30
33 /* Prepare and enable the compo IP clock */
34 if (mixer->id == STI_MIXER_MAIN) {
35 if (clk_prepare_enable(compo->clk_compo_main))
36 DRM_INFO("Failed to prepare/enable compo_main clk\n");
37 } else {
38 if (clk_prepare_enable(compo->clk_compo_aux))
39 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
40 }
41
42 drm_crtc_vblank_on(crtc); 31 drm_crtc_vblank_on(crtc);
43} 32}
44 33
@@ -57,9 +46,8 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
57 struct sti_mixer *mixer = to_sti_mixer(crtc); 46 struct sti_mixer *mixer = to_sti_mixer(crtc);
58 struct device *dev = mixer->dev; 47 struct device *dev = mixer->dev;
59 struct sti_compositor *compo = dev_get_drvdata(dev); 48 struct sti_compositor *compo = dev_get_drvdata(dev);
60 struct clk *clk; 49 struct clk *compo_clk, *pix_clk;
61 int rate = mode->clock * 1000; 50 int rate = mode->clock * 1000;
62 int res;
63 51
64 DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n", 52 DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n",
65 crtc->base.id, sti_mixer_to_str(mixer), 53 crtc->base.id, sti_mixer_to_str(mixer),
@@ -74,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode)
74 mode->vsync_start, mode->vsync_end, 62 mode->vsync_start, mode->vsync_end,
75 mode->vtotal, mode->type, mode->flags); 63 mode->vtotal, mode->type, mode->flags);
76 64
77 /* Set rate and prepare/enable pixel clock */ 65 if (mixer->id == STI_MIXER_MAIN) {
78 if (mixer->id == STI_MIXER_MAIN) 66 compo_clk = compo->clk_compo_main;
79 clk = compo->clk_pix_main; 67 pix_clk = compo->clk_pix_main;
80 else 68 } else {
81 clk = compo->clk_pix_aux; 69 compo_clk = compo->clk_compo_aux;
70 pix_clk = compo->clk_pix_aux;
71 }
72
73 /* Prepare and enable the compo IP clock */
74 if (clk_prepare_enable(compo_clk)) {
75 DRM_INFO("Failed to prepare/enable compositor clk\n");
76 goto compo_error;
77 }
82 78
83 res = clk_set_rate(clk, rate); 79 /* Set rate and prepare/enable pixel clock */
84 if (res < 0) { 80 if (clk_set_rate(pix_clk, rate) < 0) {
85 DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate); 81 DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate);
86 return -EINVAL; 82 goto pix_error;
87 } 83 }
88 if (clk_prepare_enable(clk)) { 84 if (clk_prepare_enable(pix_clk)) {
89 DRM_ERROR("Failed to prepare/enable pix clk\n"); 85 DRM_ERROR("Failed to prepare/enable pix clk\n");
90 return -EINVAL; 86 goto pix_error;
91 } 87 }
92 88
93 sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ? 89 sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ?
94 compo->vtg_main : compo->vtg_aux, &crtc->mode); 90 compo->vtg_main : compo->vtg_aux, &crtc->mode);
95 91
96 res = sti_mixer_active_video_area(mixer, &crtc->mode); 92 if (sti_mixer_active_video_area(mixer, &crtc->mode)) {
97 if (res) {
98 DRM_ERROR("Can't set active video area\n"); 93 DRM_ERROR("Can't set active video area\n");
99 return -EINVAL; 94 goto mixer_error;
100 } 95 }
101 96
102 return res; 97 return 0;
98
99mixer_error:
100 clk_disable_unprepare(pix_clk);
101pix_error:
102 clk_disable_unprepare(compo_clk);
103compo_error:
104 return -EINVAL;
103} 105}
104 106
105static void sti_crtc_disable(struct drm_crtc *crtc) 107static void sti_crtc_disable(struct drm_crtc *crtc)
@@ -130,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc)
130static void 132static void
131sti_crtc_mode_set_nofb(struct drm_crtc *crtc) 133sti_crtc_mode_set_nofb(struct drm_crtc *crtc)
132{ 134{
133 sti_crtc_enable(crtc);
134 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); 135 sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode);
135} 136}
136 137
@@ -221,9 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc,
221static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 222static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
222 .enable = sti_crtc_enable, 223 .enable = sti_crtc_enable,
223 .disable = sti_crtc_disabling, 224 .disable = sti_crtc_disabling,
224 .mode_set = drm_helper_crtc_mode_set,
225 .mode_set_nofb = sti_crtc_mode_set_nofb, 225 .mode_set_nofb = sti_crtc_mode_set_nofb,
226 .mode_set_base = drm_helper_crtc_mode_set_base,
227 .atomic_begin = sti_crtc_atomic_begin, 226 .atomic_begin = sti_crtc_atomic_begin,
228 .atomic_flush = sti_crtc_atomic_flush, 227 .atomic_flush = sti_crtc_atomic_flush,
229}; 228};
@@ -331,6 +330,17 @@ void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
331 } 330 }
332} 331}
333 332
333static int sti_crtc_late_register(struct drm_crtc *crtc)
334{
335 struct sti_mixer *mixer = to_sti_mixer(crtc);
336 struct sti_compositor *compo = dev_get_drvdata(mixer->dev);
337
338 if (drm_crtc_index(crtc) == 0)
339 return sti_compositor_debufs_init(compo, crtc->dev->primary);
340
341 return 0;
342}
343
334static const struct drm_crtc_funcs sti_crtc_funcs = { 344static const struct drm_crtc_funcs sti_crtc_funcs = {
335 .set_config = drm_atomic_helper_set_config, 345 .set_config = drm_atomic_helper_set_config,
336 .page_flip = drm_atomic_helper_page_flip, 346 .page_flip = drm_atomic_helper_page_flip,
@@ -339,6 +349,7 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
339 .reset = drm_atomic_helper_crtc_reset, 349 .reset = drm_atomic_helper_crtc_reset,
340 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 350 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
341 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 351 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
352 .late_register = sti_crtc_late_register,
342}; 353};
343 354
344bool sti_crtc_is_main(struct drm_crtc *crtc) 355bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
index 4e990299735c..a263bbba4119 100644
--- a/drivers/gpu/drm/sti/sti_cursor.c
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -105,12 +105,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
105{ 105{
106 struct drm_info_node *node = s->private; 106 struct drm_info_node *node = s->private;
107 struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data; 107 struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
108 struct drm_device *dev = node->minor->dev;
109 int ret;
110
111 ret = mutex_lock_interruptible(&dev->struct_mutex);
112 if (ret)
113 return ret;
114 108
115 seq_printf(s, "%s: (vaddr = 0x%p)", 109 seq_printf(s, "%s: (vaddr = 0x%p)",
116 sti_plane_to_str(&cursor->plane), cursor->regs); 110 sti_plane_to_str(&cursor->plane), cursor->regs);
@@ -129,7 +123,6 @@ static int cursor_dbg_show(struct seq_file *s, void *data)
129 DBGFS_DUMP(CUR_AWE); 123 DBGFS_DUMP(CUR_AWE);
130 seq_puts(s, "\n"); 124 seq_puts(s, "\n");
131 125
132 mutex_unlock(&dev->struct_mutex);
133 return 0; 126 return 0;
134} 127}
135 128
@@ -336,6 +329,33 @@ static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
336 .atomic_disable = sti_cursor_atomic_disable, 329 .atomic_disable = sti_cursor_atomic_disable,
337}; 330};
338 331
332static void sti_cursor_destroy(struct drm_plane *drm_plane)
333{
334 DRM_DEBUG_DRIVER("\n");
335
336 drm_plane_helper_disable(drm_plane);
337 drm_plane_cleanup(drm_plane);
338}
339
340static int sti_cursor_late_register(struct drm_plane *drm_plane)
341{
342 struct sti_plane *plane = to_sti_plane(drm_plane);
343 struct sti_cursor *cursor = to_sti_cursor(plane);
344
345 return cursor_debugfs_init(cursor, drm_plane->dev->primary);
346}
347
348struct drm_plane_funcs sti_cursor_plane_helpers_funcs = {
349 .update_plane = drm_atomic_helper_update_plane,
350 .disable_plane = drm_atomic_helper_disable_plane,
351 .destroy = sti_cursor_destroy,
352 .set_property = sti_plane_set_property,
353 .reset = drm_atomic_helper_plane_reset,
354 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
355 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
356 .late_register = sti_cursor_late_register,
357};
358
339struct drm_plane *sti_cursor_create(struct drm_device *drm_dev, 359struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
340 struct device *dev, int desc, 360 struct device *dev, int desc,
341 void __iomem *baseaddr, 361 void __iomem *baseaddr,
@@ -370,7 +390,7 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
370 390
371 res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane, 391 res = drm_universal_plane_init(drm_dev, &cursor->plane.drm_plane,
372 possible_crtcs, 392 possible_crtcs,
373 &sti_plane_helpers_funcs, 393 &sti_cursor_plane_helpers_funcs,
374 cursor_supported_formats, 394 cursor_supported_formats,
375 ARRAY_SIZE(cursor_supported_formats), 395 ARRAY_SIZE(cursor_supported_formats),
376 DRM_PLANE_TYPE_CURSOR, NULL); 396 DRM_PLANE_TYPE_CURSOR, NULL);
@@ -384,9 +404,6 @@ struct drm_plane *sti_cursor_create(struct drm_device *drm_dev,
384 404
385 sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR); 405 sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
386 406
387 if (cursor_debugfs_init(cursor, drm_dev->primary))
388 DRM_ERROR("CURSOR debugfs setup failed\n");
389
390 return &cursor->plane.drm_plane; 407 return &cursor->plane.drm_plane;
391 408
392err_plane: 409err_plane:
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index 872495e72294..96bd3d08b2d4 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -72,11 +72,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
72 struct drm_info_node *node = s->private; 72 struct drm_info_node *node = s->private;
73 struct drm_device *dev = node->minor->dev; 73 struct drm_device *dev = node->minor->dev;
74 struct drm_plane *p; 74 struct drm_plane *p;
75 int ret;
76
77 ret = mutex_lock_interruptible(&dev->struct_mutex);
78 if (ret)
79 return ret;
80 75
81 list_for_each_entry(p, &dev->mode_config.plane_list, head) { 76 list_for_each_entry(p, &dev->mode_config.plane_list, head) {
82 struct sti_plane *plane = to_sti_plane(p); 77 struct sti_plane *plane = to_sti_plane(p);
@@ -86,7 +81,6 @@ static int sti_drm_fps_dbg_show(struct seq_file *s, void *data)
86 plane->fps_info.fips_str); 81 plane->fps_info.fips_str);
87 } 82 }
88 83
89 mutex_unlock(&dev->struct_mutex);
90 return 0; 84 return 0;
91} 85}
92 86
@@ -221,7 +215,7 @@ static int sti_atomic_commit(struct drm_device *drm,
221 * the software side now. 215 * the software side now.
222 */ 216 */
223 217
224 drm_atomic_helper_swap_state(drm, state); 218 drm_atomic_helper_swap_state(state, true);
225 219
226 if (nonblock) 220 if (nonblock)
227 sti_atomic_schedule(private, state); 221 sti_atomic_schedule(private, state);
@@ -232,8 +226,28 @@ static int sti_atomic_commit(struct drm_device *drm,
232 return 0; 226 return 0;
233} 227}
234 228
229static void sti_output_poll_changed(struct drm_device *ddev)
230{
231 struct sti_private *private = ddev->dev_private;
232
233 if (!ddev->mode_config.num_connector)
234 return;
235
236 if (private->fbdev) {
237 drm_fbdev_cma_hotplug_event(private->fbdev);
238 return;
239 }
240
241 private->fbdev = drm_fbdev_cma_init(ddev, 32,
242 ddev->mode_config.num_crtc,
243 ddev->mode_config.num_connector);
244 if (IS_ERR(private->fbdev))
245 private->fbdev = NULL;
246}
247
235static const struct drm_mode_config_funcs sti_mode_config_funcs = { 248static const struct drm_mode_config_funcs sti_mode_config_funcs = {
236 .fb_create = drm_fb_cma_create, 249 .fb_create = drm_fb_cma_create,
250 .output_poll_changed = sti_output_poll_changed,
237 .atomic_check = drm_atomic_helper_check, 251 .atomic_check = drm_atomic_helper_check,
238 .atomic_commit = sti_atomic_commit, 252 .atomic_commit = sti_atomic_commit,
239}; 253};
@@ -254,45 +268,6 @@ static void sti_mode_config_init(struct drm_device *dev)
254 dev->mode_config.funcs = &sti_mode_config_funcs; 268 dev->mode_config.funcs = &sti_mode_config_funcs;
255} 269}
256 270
257static int sti_load(struct drm_device *dev, unsigned long flags)
258{
259 struct sti_private *private;
260 int ret;
261
262 private = kzalloc(sizeof(*private), GFP_KERNEL);
263 if (!private) {
264 DRM_ERROR("Failed to allocate private\n");
265 return -ENOMEM;
266 }
267 dev->dev_private = (void *)private;
268 private->drm_dev = dev;
269
270 mutex_init(&private->commit.lock);
271 INIT_WORK(&private->commit.work, sti_atomic_work);
272
273 drm_mode_config_init(dev);
274 drm_kms_helper_poll_init(dev);
275
276 sti_mode_config_init(dev);
277
278 ret = component_bind_all(dev->dev, dev);
279 if (ret) {
280 drm_kms_helper_poll_fini(dev);
281 drm_mode_config_cleanup(dev);
282 kfree(private);
283 return ret;
284 }
285
286 drm_mode_config_reset(dev);
287
288 drm_helper_disable_unused_functions(dev);
289 drm_fbdev_cma_init(dev, 32,
290 dev->mode_config.num_crtc,
291 dev->mode_config.num_connector);
292
293 return 0;
294}
295
296static const struct file_operations sti_driver_fops = { 271static const struct file_operations sti_driver_fops = {
297 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
298 .open = drm_open, 273 .open = drm_open,
@@ -309,8 +284,7 @@ static const struct file_operations sti_driver_fops = {
309static struct drm_driver sti_driver = { 284static struct drm_driver sti_driver = {
310 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET | 285 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
311 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC, 286 DRIVER_GEM | DRIVER_PRIME | DRIVER_ATOMIC,
312 .load = sti_load, 287 .gem_free_object_unlocked = drm_gem_cma_free_object,
313 .gem_free_object = drm_gem_cma_free_object,
314 .gem_vm_ops = &drm_gem_cma_vm_ops, 288 .gem_vm_ops = &drm_gem_cma_vm_ops,
315 .dumb_create = drm_gem_cma_dumb_create, 289 .dumb_create = drm_gem_cma_dumb_create,
316 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 290 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
@@ -346,14 +320,88 @@ static int compare_of(struct device *dev, void *data)
346 return dev->of_node == data; 320 return dev->of_node == data;
347} 321}
348 322
323static int sti_init(struct drm_device *ddev)
324{
325 struct sti_private *private;
326
327 private = kzalloc(sizeof(*private), GFP_KERNEL);
328 if (!private)
329 return -ENOMEM;
330
331 ddev->dev_private = (void *)private;
332 dev_set_drvdata(ddev->dev, ddev);
333 private->drm_dev = ddev;
334
335 mutex_init(&private->commit.lock);
336 INIT_WORK(&private->commit.work, sti_atomic_work);
337
338 drm_mode_config_init(ddev);
339
340 sti_mode_config_init(ddev);
341
342 drm_kms_helper_poll_init(ddev);
343
344 return 0;
345}
346
347static void sti_cleanup(struct drm_device *ddev)
348{
349 struct sti_private *private = ddev->dev_private;
350
351 if (private->fbdev) {
352 drm_fbdev_cma_fini(private->fbdev);
353 private->fbdev = NULL;
354 }
355
356 drm_kms_helper_poll_fini(ddev);
357 drm_vblank_cleanup(ddev);
358 kfree(private);
359 ddev->dev_private = NULL;
360}
361
349static int sti_bind(struct device *dev) 362static int sti_bind(struct device *dev)
350{ 363{
351 return drm_platform_init(&sti_driver, to_platform_device(dev)); 364 struct drm_device *ddev;
365 int ret;
366
367 ddev = drm_dev_alloc(&sti_driver, dev);
368 if (!ddev)
369 return -ENOMEM;
370
371 ddev->platformdev = to_platform_device(dev);
372
373 ret = sti_init(ddev);
374 if (ret)
375 goto err_drm_dev_unref;
376
377 ret = component_bind_all(ddev->dev, ddev);
378 if (ret)
379 goto err_cleanup;
380
381 ret = drm_dev_register(ddev, 0);
382 if (ret)
383 goto err_register;
384
385 drm_mode_config_reset(ddev);
386
387 return 0;
388
389err_register:
390 drm_mode_config_cleanup(ddev);
391err_cleanup:
392 sti_cleanup(ddev);
393err_drm_dev_unref:
394 drm_dev_unref(ddev);
395 return ret;
352} 396}
353 397
354static void sti_unbind(struct device *dev) 398static void sti_unbind(struct device *dev)
355{ 399{
356 drm_put_dev(dev_get_drvdata(dev)); 400 struct drm_device *ddev = dev_get_drvdata(dev);
401
402 drm_dev_unregister(ddev);
403 sti_cleanup(ddev);
404 drm_dev_unref(ddev);
357} 405}
358 406
359static const struct component_master_ops sti_ops = { 407static const struct component_master_ops sti_ops = {
diff --git a/drivers/gpu/drm/sti/sti_drv.h b/drivers/gpu/drm/sti/sti_drv.h
index 30ddc20841c3..78ebe5e30f53 100644
--- a/drivers/gpu/drm/sti/sti_drv.h
+++ b/drivers/gpu/drm/sti/sti_drv.h
@@ -24,6 +24,7 @@ struct sti_private {
24 struct sti_compositor *compo; 24 struct sti_compositor *compo;
25 struct drm_property *plane_zorder_property; 25 struct drm_property *plane_zorder_property;
26 struct drm_device *drm_dev; 26 struct drm_device *drm_dev;
27 struct drm_fbdev_cma *fbdev;
27 28
28 struct { 29 struct {
29 struct drm_atomic_state *state; 30 struct drm_atomic_state *state;
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index 25f76632002c..00881eb4536e 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -177,12 +177,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
177{ 177{
178 struct drm_info_node *node = s->private; 178 struct drm_info_node *node = s->private;
179 struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data; 179 struct sti_dvo *dvo = (struct sti_dvo *)node->info_ent->data;
180 struct drm_device *dev = node->minor->dev;
181 int ret;
182
183 ret = mutex_lock_interruptible(&dev->struct_mutex);
184 if (ret)
185 return ret;
186 180
187 seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs); 181 seq_printf(s, "DVO: (vaddr = 0x%p)", dvo->regs);
188 DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL); 182 DBGFS_DUMP(DVO_AWG_DIGSYNC_CTRL);
@@ -193,7 +187,6 @@ static int dvo_dbg_show(struct seq_file *s, void *data)
193 dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I); 187 dvo_dbg_awg_microcode(s, dvo->regs + DVO_DIGSYNC_INSTR_I);
194 seq_puts(s, "\n"); 188 seq_puts(s, "\n");
195 189
196 mutex_unlock(&dev->struct_mutex);
197 return 0; 190 return 0;
198} 191}
199 192
@@ -384,20 +377,10 @@ static int sti_dvo_connector_mode_valid(struct drm_connector *connector,
384 return MODE_OK; 377 return MODE_OK;
385} 378}
386 379
387struct drm_encoder *sti_dvo_best_encoder(struct drm_connector *connector)
388{
389 struct sti_dvo_connector *dvo_connector
390 = to_sti_dvo_connector(connector);
391
392 /* Best encoder is the one associated during connector creation */
393 return dvo_connector->encoder;
394}
395
396static const 380static const
397struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = { 381struct drm_connector_helper_funcs sti_dvo_connector_helper_funcs = {
398 .get_modes = sti_dvo_connector_get_modes, 382 .get_modes = sti_dvo_connector_get_modes,
399 .mode_valid = sti_dvo_connector_mode_valid, 383 .mode_valid = sti_dvo_connector_mode_valid,
400 .best_encoder = sti_dvo_best_encoder,
401}; 384};
402 385
403static enum drm_connector_status 386static enum drm_connector_status
@@ -421,24 +404,29 @@ sti_dvo_connector_detect(struct drm_connector *connector, bool force)
421 return connector_status_disconnected; 404 return connector_status_disconnected;
422} 405}
423 406
424static void sti_dvo_connector_destroy(struct drm_connector *connector) 407static int sti_dvo_late_register(struct drm_connector *connector)
425{ 408{
426 struct sti_dvo_connector *dvo_connector 409 struct sti_dvo_connector *dvo_connector
427 = to_sti_dvo_connector(connector); 410 = to_sti_dvo_connector(connector);
411 struct sti_dvo *dvo = dvo_connector->dvo;
428 412
429 drm_connector_unregister(connector); 413 if (dvo_debugfs_init(dvo, dvo->drm_dev->primary)) {
430 drm_connector_cleanup(connector); 414 DRM_ERROR("DVO debugfs setup failed\n");
431 kfree(dvo_connector); 415 return -EINVAL;
416 }
417
418 return 0;
432} 419}
433 420
434static const struct drm_connector_funcs sti_dvo_connector_funcs = { 421static const struct drm_connector_funcs sti_dvo_connector_funcs = {
435 .dpms = drm_atomic_helper_connector_dpms, 422 .dpms = drm_atomic_helper_connector_dpms,
436 .fill_modes = drm_helper_probe_single_connector_modes, 423 .fill_modes = drm_helper_probe_single_connector_modes,
437 .detect = sti_dvo_connector_detect, 424 .detect = sti_dvo_connector_detect,
438 .destroy = sti_dvo_connector_destroy, 425 .destroy = drm_connector_cleanup,
439 .reset = drm_atomic_helper_connector_reset, 426 .reset = drm_atomic_helper_connector_reset,
440 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 427 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
441 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 428 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
429 .late_register = sti_dvo_late_register,
442}; 430};
443 431
444static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev) 432static struct drm_encoder *sti_dvo_find_encoder(struct drm_device *dev)
@@ -509,26 +497,16 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
509 drm_connector_helper_add(drm_connector, 497 drm_connector_helper_add(drm_connector,
510 &sti_dvo_connector_helper_funcs); 498 &sti_dvo_connector_helper_funcs);
511 499
512 err = drm_connector_register(drm_connector);
513 if (err)
514 goto err_connector;
515
516 err = drm_mode_connector_attach_encoder(drm_connector, encoder); 500 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
517 if (err) { 501 if (err) {
518 DRM_ERROR("Failed to attach a connector to a encoder\n"); 502 DRM_ERROR("Failed to attach a connector to a encoder\n");
519 goto err_sysfs; 503 goto err_sysfs;
520 } 504 }
521 505
522 if (dvo_debugfs_init(dvo, drm_dev->primary))
523 DRM_ERROR("DVO debugfs setup failed\n");
524
525 return 0; 506 return 0;
526 507
527err_sysfs: 508err_sysfs:
528 drm_connector_unregister(drm_connector);
529err_connector:
530 drm_bridge_remove(bridge); 509 drm_bridge_remove(bridge);
531 drm_connector_cleanup(drm_connector);
532 return -EINVAL; 510 return -EINVAL;
533} 511}
534 512
@@ -602,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev)
602 dvo->panel_node = of_parse_phandle(np, "sti,panel", 0); 580 dvo->panel_node = of_parse_phandle(np, "sti,panel", 0);
603 if (!dvo->panel_node) 581 if (!dvo->panel_node)
604 DRM_ERROR("No panel associated to the dvo output\n"); 582 DRM_ERROR("No panel associated to the dvo output\n");
583 of_node_put(dvo->panel_node);
605 584
606 platform_set_drvdata(pdev, dvo); 585 platform_set_drvdata(pdev, dvo);
607 586
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index ff33c38da197..bf63086a3dc8 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -208,14 +208,8 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
208{ 208{
209 struct drm_info_node *node = s->private; 209 struct drm_info_node *node = s->private;
210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; 210 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
211 struct drm_device *dev = node->minor->dev;
212 struct drm_plane *drm_plane = &gdp->plane.drm_plane; 211 struct drm_plane *drm_plane = &gdp->plane.drm_plane;
213 struct drm_crtc *crtc = drm_plane->crtc; 212 struct drm_crtc *crtc = drm_plane->crtc;
214 int ret;
215
216 ret = mutex_lock_interruptible(&dev->struct_mutex);
217 if (ret)
218 return ret;
219 213
220 seq_printf(s, "%s: (vaddr = 0x%p)", 214 seq_printf(s, "%s: (vaddr = 0x%p)",
221 sti_plane_to_str(&gdp->plane), gdp->regs); 215 sti_plane_to_str(&gdp->plane), gdp->regs);
@@ -248,7 +242,6 @@ static int gdp_dbg_show(struct seq_file *s, void *data)
248 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n", 242 seq_printf(s, " Connected to DRM CRTC #%d (%s)\n",
249 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc))); 243 crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
250 244
251 mutex_unlock(&dev->struct_mutex);
252 return 0; 245 return 0;
253} 246}
254 247
@@ -279,13 +272,7 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
279{ 272{
280 struct drm_info_node *node = s->private; 273 struct drm_info_node *node = s->private;
281 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data; 274 struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
282 struct drm_device *dev = node->minor->dev;
283 unsigned int b; 275 unsigned int b;
284 int ret;
285
286 ret = mutex_lock_interruptible(&dev->struct_mutex);
287 if (ret)
288 return ret;
289 276
290 for (b = 0; b < GDP_NODE_NB_BANK; b++) { 277 for (b = 0; b < GDP_NODE_NB_BANK; b++) {
291 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b); 278 seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
@@ -294,7 +281,6 @@ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
294 gdp_node_dump_node(s, gdp->node_list[b].btm_field); 281 gdp_node_dump_node(s, gdp->node_list[b].btm_field);
295 } 282 }
296 283
297 mutex_unlock(&dev->struct_mutex);
298 return 0; 284 return 0;
299} 285}
300 286
@@ -880,6 +866,33 @@ static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
880 .atomic_disable = sti_gdp_atomic_disable, 866 .atomic_disable = sti_gdp_atomic_disable,
881}; 867};
882 868
869static void sti_gdp_destroy(struct drm_plane *drm_plane)
870{
871 DRM_DEBUG_DRIVER("\n");
872
873 drm_plane_helper_disable(drm_plane);
874 drm_plane_cleanup(drm_plane);
875}
876
877static int sti_gdp_late_register(struct drm_plane *drm_plane)
878{
879 struct sti_plane *plane = to_sti_plane(drm_plane);
880 struct sti_gdp *gdp = to_sti_gdp(plane);
881
882 return gdp_debugfs_init(gdp, drm_plane->dev->primary);
883}
884
885struct drm_plane_funcs sti_gdp_plane_helpers_funcs = {
886 .update_plane = drm_atomic_helper_update_plane,
887 .disable_plane = drm_atomic_helper_disable_plane,
888 .destroy = sti_gdp_destroy,
889 .set_property = sti_plane_set_property,
890 .reset = drm_atomic_helper_plane_reset,
891 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
892 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
893 .late_register = sti_gdp_late_register,
894};
895
883struct drm_plane *sti_gdp_create(struct drm_device *drm_dev, 896struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
884 struct device *dev, int desc, 897 struct device *dev, int desc,
885 void __iomem *baseaddr, 898 void __iomem *baseaddr,
@@ -906,7 +919,7 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
906 919
907 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane, 920 res = drm_universal_plane_init(drm_dev, &gdp->plane.drm_plane,
908 possible_crtcs, 921 possible_crtcs,
909 &sti_plane_helpers_funcs, 922 &sti_gdp_plane_helpers_funcs,
910 gdp_supported_formats, 923 gdp_supported_formats,
911 ARRAY_SIZE(gdp_supported_formats), 924 ARRAY_SIZE(gdp_supported_formats),
912 type, NULL); 925 type, NULL);
@@ -919,9 +932,6 @@ struct drm_plane *sti_gdp_create(struct drm_device *drm_dev,
919 932
920 sti_plane_init_property(&gdp->plane, type); 933 sti_plane_init_property(&gdp->plane, type);
921 934
922 if (gdp_debugfs_init(gdp, drm_dev->primary))
923 DRM_ERROR("GDP debugfs setup failed\n");
924
925 return &gdp->plane.drm_plane; 935 return &gdp->plane.drm_plane;
926 936
927err: 937err:
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index f7d3464cdf09..8505569f75de 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -376,12 +376,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
376{ 376{
377 struct drm_info_node *node = s->private; 377 struct drm_info_node *node = s->private;
378 struct sti_hda *hda = (struct sti_hda *)node->info_ent->data; 378 struct sti_hda *hda = (struct sti_hda *)node->info_ent->data;
379 struct drm_device *dev = node->minor->dev;
380 int ret;
381
382 ret = mutex_lock_interruptible(&dev->struct_mutex);
383 if (ret)
384 return ret;
385 379
386 seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs); 380 seq_printf(s, "HD Analog: (vaddr = 0x%p)", hda->regs);
387 DBGFS_DUMP(HDA_ANA_CFG); 381 DBGFS_DUMP(HDA_ANA_CFG);
@@ -397,7 +391,6 @@ static int hda_dbg_show(struct seq_file *s, void *data)
397 hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl); 391 hda_dbg_video_dacs_ctrl(s, hda->video_dacs_ctrl);
398 seq_puts(s, "\n"); 392 seq_puts(s, "\n");
399 393
400 mutex_unlock(&dev->struct_mutex);
401 return 0; 394 return 0;
402} 395}
403 396
@@ -676,20 +669,10 @@ static int sti_hda_connector_mode_valid(struct drm_connector *connector,
676 return MODE_OK; 669 return MODE_OK;
677} 670}
678 671
679struct drm_encoder *sti_hda_best_encoder(struct drm_connector *connector)
680{
681 struct sti_hda_connector *hda_connector
682 = to_sti_hda_connector(connector);
683
684 /* Best encoder is the one associated during connector creation */
685 return hda_connector->encoder;
686}
687
688static const 672static const
689struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = { 673struct drm_connector_helper_funcs sti_hda_connector_helper_funcs = {
690 .get_modes = sti_hda_connector_get_modes, 674 .get_modes = sti_hda_connector_get_modes,
691 .mode_valid = sti_hda_connector_mode_valid, 675 .mode_valid = sti_hda_connector_mode_valid,
692 .best_encoder = sti_hda_best_encoder,
693}; 676};
694 677
695static enum drm_connector_status 678static enum drm_connector_status
@@ -698,24 +681,29 @@ sti_hda_connector_detect(struct drm_connector *connector, bool force)
698 return connector_status_connected; 681 return connector_status_connected;
699} 682}
700 683
701static void sti_hda_connector_destroy(struct drm_connector *connector) 684static int sti_hda_late_register(struct drm_connector *connector)
702{ 685{
703 struct sti_hda_connector *hda_connector 686 struct sti_hda_connector *hda_connector
704 = to_sti_hda_connector(connector); 687 = to_sti_hda_connector(connector);
688 struct sti_hda *hda = hda_connector->hda;
689
690 if (hda_debugfs_init(hda, hda->drm_dev->primary)) {
691 DRM_ERROR("HDA debugfs setup failed\n");
692 return -EINVAL;
693 }
705 694
706 drm_connector_unregister(connector); 695 return 0;
707 drm_connector_cleanup(connector);
708 kfree(hda_connector);
709} 696}
710 697
711static const struct drm_connector_funcs sti_hda_connector_funcs = { 698static const struct drm_connector_funcs sti_hda_connector_funcs = {
712 .dpms = drm_atomic_helper_connector_dpms, 699 .dpms = drm_atomic_helper_connector_dpms,
713 .fill_modes = drm_helper_probe_single_connector_modes, 700 .fill_modes = drm_helper_probe_single_connector_modes,
714 .detect = sti_hda_connector_detect, 701 .detect = sti_hda_connector_detect,
715 .destroy = sti_hda_connector_destroy, 702 .destroy = drm_connector_cleanup,
716 .reset = drm_atomic_helper_connector_reset, 703 .reset = drm_atomic_helper_connector_reset,
717 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 704 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
718 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 705 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
706 .late_register = sti_hda_late_register,
719}; 707};
720 708
721static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev) 709static struct drm_encoder *sti_hda_find_encoder(struct drm_device *dev)
@@ -773,10 +761,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
773 drm_connector_helper_add(drm_connector, 761 drm_connector_helper_add(drm_connector,
774 &sti_hda_connector_helper_funcs); 762 &sti_hda_connector_helper_funcs);
775 763
776 err = drm_connector_register(drm_connector);
777 if (err)
778 goto err_connector;
779
780 err = drm_mode_connector_attach_encoder(drm_connector, encoder); 764 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
781 if (err) { 765 if (err) {
782 DRM_ERROR("Failed to attach a connector to a encoder\n"); 766 DRM_ERROR("Failed to attach a connector to a encoder\n");
@@ -786,15 +770,10 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
786 /* force to disable hd dacs at startup */ 770 /* force to disable hd dacs at startup */
787 hda_enable_hd_dacs(hda, false); 771 hda_enable_hd_dacs(hda, false);
788 772
789 if (hda_debugfs_init(hda, drm_dev->primary))
790 DRM_ERROR("HDA debugfs setup failed\n");
791
792 return 0; 773 return 0;
793 774
794err_sysfs: 775err_sysfs:
795 drm_connector_unregister(drm_connector); 776 drm_bridge_remove(bridge);
796err_connector:
797 drm_connector_cleanup(drm_connector);
798 return -EINVAL; 777 return -EINVAL;
799} 778}
800 779
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 6ef0715bd5b9..fedc17f98d9b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -18,6 +18,8 @@
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_edid.h> 19#include <drm/drm_edid.h>
20 20
21#include <sound/hdmi-codec.h>
22
21#include "sti_hdmi.h" 23#include "sti_hdmi.h"
22#include "sti_hdmi_tx3g4c28phy.h" 24#include "sti_hdmi_tx3g4c28phy.h"
23#include "sti_hdmi_tx3g0c55phy.h" 25#include "sti_hdmi_tx3g0c55phy.h"
@@ -35,6 +37,8 @@
35#define HDMI_DFLT_CHL0_DAT 0x0110 37#define HDMI_DFLT_CHL0_DAT 0x0110
36#define HDMI_DFLT_CHL1_DAT 0x0114 38#define HDMI_DFLT_CHL1_DAT 0x0114
37#define HDMI_DFLT_CHL2_DAT 0x0118 39#define HDMI_DFLT_CHL2_DAT 0x0118
40#define HDMI_AUDIO_CFG 0x0200
41#define HDMI_SPDIF_FIFO_STATUS 0x0204
38#define HDMI_SW_DI_1_HEAD_WORD 0x0210 42#define HDMI_SW_DI_1_HEAD_WORD 0x0210
39#define HDMI_SW_DI_1_PKT_WORD0 0x0214 43#define HDMI_SW_DI_1_PKT_WORD0 0x0214
40#define HDMI_SW_DI_1_PKT_WORD1 0x0218 44#define HDMI_SW_DI_1_PKT_WORD1 0x0218
@@ -44,6 +48,9 @@
44#define HDMI_SW_DI_1_PKT_WORD5 0x0228 48#define HDMI_SW_DI_1_PKT_WORD5 0x0228
45#define HDMI_SW_DI_1_PKT_WORD6 0x022C 49#define HDMI_SW_DI_1_PKT_WORD6 0x022C
46#define HDMI_SW_DI_CFG 0x0230 50#define HDMI_SW_DI_CFG 0x0230
51#define HDMI_SAMPLE_FLAT_MASK 0x0244
52#define HDMI_AUDN 0x0400
53#define HDMI_AUD_CTS 0x0404
47#define HDMI_SW_DI_2_HEAD_WORD 0x0600 54#define HDMI_SW_DI_2_HEAD_WORD 0x0600
48#define HDMI_SW_DI_2_PKT_WORD0 0x0604 55#define HDMI_SW_DI_2_PKT_WORD0 0x0604
49#define HDMI_SW_DI_2_PKT_WORD1 0x0608 56#define HDMI_SW_DI_2_PKT_WORD1 0x0608
@@ -103,6 +110,7 @@
103#define HDMI_INT_DLL_LCK BIT(5) 110#define HDMI_INT_DLL_LCK BIT(5)
104#define HDMI_INT_NEW_FRAME BIT(6) 111#define HDMI_INT_NEW_FRAME BIT(6)
105#define HDMI_INT_GENCTRL_PKT BIT(7) 112#define HDMI_INT_GENCTRL_PKT BIT(7)
113#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8)
106#define HDMI_INT_SINK_TERM_PRESENT BIT(11) 114#define HDMI_INT_SINK_TERM_PRESENT BIT(11)
107 115
108#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \ 116#define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \
@@ -111,6 +119,7 @@
111 | HDMI_INT_GLOBAL) 119 | HDMI_INT_GLOBAL)
112 120
113#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \ 121#define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \
122 | HDMI_INT_AUDIO_FIFO_XRUN \
114 | HDMI_INT_GENCTRL_PKT \ 123 | HDMI_INT_GENCTRL_PKT \
115 | HDMI_INT_NEW_FRAME \ 124 | HDMI_INT_NEW_FRAME \
116 | HDMI_INT_DLL_LCK \ 125 | HDMI_INT_DLL_LCK \
@@ -121,6 +130,27 @@
121 130
122#define HDMI_STA_SW_RST BIT(1) 131#define HDMI_STA_SW_RST BIT(1)
123 132
133#define HDMI_AUD_CFG_8CH BIT(0)
134#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1)
135#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2)
136#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2))
137#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12)
138#define HDMI_AUD_CFG_DTS_INVALID BIT(16)
139#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21))
140#define HDMI_AUD_CFG_CH12_VALID BIT(28)
141#define HDMI_AUD_CFG_CH34_VALID BIT(29)
142#define HDMI_AUD_CFG_CH56_VALID BIT(30)
143#define HDMI_AUD_CFG_CH78_VALID BIT(31)
144
145/* sample flat mask */
146#define HDMI_SAMPLE_FLAT_NO 0
147#define HDMI_SAMPLE_FLAT_SP0 BIT(0)
148#define HDMI_SAMPLE_FLAT_SP1 BIT(1)
149#define HDMI_SAMPLE_FLAT_SP2 BIT(2)
150#define HDMI_SAMPLE_FLAT_SP3 BIT(3)
151#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\
152 HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3)
153
124#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) 154#define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0)
125#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) 155#define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8)
126#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) 156#define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16)
@@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
171 wake_up_interruptible(&hdmi->wait_event); 201 wake_up_interruptible(&hdmi->wait_event);
172 } 202 }
173 203
204 /* Audio FIFO underrun IRQ */
205 if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN)
206 DRM_INFO("Warning: audio FIFO underrun occurs!");
207
174 return IRQ_HANDLED; 208 return IRQ_HANDLED;
175} 209}
176 210
@@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
441 */ 475 */
442static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi) 476static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi)
443{ 477{
444 struct hdmi_audio_infoframe infofame; 478 struct hdmi_audio_params *audio = &hdmi->audio;
445 u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)]; 479 u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)];
446 int ret; 480 int ret, val;
447 481
448 ret = hdmi_audio_infoframe_init(&infofame); 482 DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__,
449 if (ret < 0) { 483 audio->enabled ? "enable" : "disable");
450 DRM_ERROR("failed to setup audio infoframe: %d\n", ret); 484 if (audio->enabled) {
451 return ret; 485 /* set audio parameters stored*/
452 } 486 ret = hdmi_audio_infoframe_pack(&audio->cea, buffer,
453 487 sizeof(buffer));
454 infofame.channels = 2; 488 if (ret < 0) {
455 489 DRM_ERROR("failed to pack audio infoframe: %d\n", ret);
456 ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer)); 490 return ret;
457 if (ret < 0) { 491 }
458 DRM_ERROR("failed to pack audio infoframe: %d\n", ret); 492 hdmi_infoframe_write_infopack(hdmi, buffer, ret);
459 return ret; 493 } else {
494 /*disable audio info frame transmission */
495 val = hdmi_read(hdmi, HDMI_SW_DI_CFG);
496 val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK,
497 HDMI_IFRAME_SLOT_AUDIO);
498 hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
460 } 499 }
461 500
462 hdmi_infoframe_write_infopack(hdmi, buffer, ret);
463
464 return 0; 501 return 0;
465} 502}
466 503
@@ -628,12 +665,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
628{ 665{
629 struct drm_info_node *node = s->private; 666 struct drm_info_node *node = s->private;
630 struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data; 667 struct sti_hdmi *hdmi = (struct sti_hdmi *)node->info_ent->data;
631 struct drm_device *dev = node->minor->dev;
632 int ret;
633
634 ret = mutex_lock_interruptible(&dev->struct_mutex);
635 if (ret)
636 return ret;
637 668
638 seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs); 669 seq_printf(s, "HDMI: (vaddr = 0x%p)", hdmi->regs);
639 DBGFS_DUMP("\n", HDMI_CFG); 670 DBGFS_DUMP("\n", HDMI_CFG);
@@ -656,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
656 DBGFS_DUMP("", HDMI_SW_DI_CFG); 687 DBGFS_DUMP("", HDMI_SW_DI_CFG);
657 hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG)); 688 hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG));
658 689
690 DBGFS_DUMP("\n", HDMI_AUDIO_CFG);
691 DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS);
692 DBGFS_DUMP("\n", HDMI_AUDN);
693
659 seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):", 694 seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):",
660 HDMI_IFRAME_SLOT_AVI); 695 HDMI_IFRAME_SLOT_AVI);
661 DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI); 696 DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI);
@@ -690,7 +725,6 @@ static int hdmi_dbg_show(struct seq_file *s, void *data)
690 DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR); 725 DBGFS_DUMP_DI(HDMI_SW_DI_N_PKT_WORD6, HDMI_IFRAME_SLOT_VENDOR);
691 seq_puts(s, "\n"); 726 seq_puts(s, "\n");
692 727
693 mutex_unlock(&dev->struct_mutex);
694 return 0; 728 return 0;
695} 729}
696 730
@@ -861,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
861 895
862 count = drm_add_edid_modes(connector, edid); 896 count = drm_add_edid_modes(connector, edid);
863 drm_mode_connector_update_edid_property(connector, edid); 897 drm_mode_connector_update_edid_property(connector, edid);
898 drm_edid_to_eld(connector, edid);
864 899
865 kfree(edid); 900 kfree(edid);
866 return count; 901 return count;
@@ -897,20 +932,10 @@ static int sti_hdmi_connector_mode_valid(struct drm_connector *connector,
897 return MODE_OK; 932 return MODE_OK;
898} 933}
899 934
900struct drm_encoder *sti_hdmi_best_encoder(struct drm_connector *connector)
901{
902 struct sti_hdmi_connector *hdmi_connector
903 = to_sti_hdmi_connector(connector);
904
905 /* Best encoder is the one associated during connector creation */
906 return hdmi_connector->encoder;
907}
908
909static const 935static const
910struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = { 936struct drm_connector_helper_funcs sti_hdmi_connector_helper_funcs = {
911 .get_modes = sti_hdmi_connector_get_modes, 937 .get_modes = sti_hdmi_connector_get_modes,
912 .mode_valid = sti_hdmi_connector_mode_valid, 938 .mode_valid = sti_hdmi_connector_mode_valid,
913 .best_encoder = sti_hdmi_best_encoder,
914}; 939};
915 940
916/* get detection status of display device */ 941/* get detection status of display device */
@@ -932,16 +957,6 @@ sti_hdmi_connector_detect(struct drm_connector *connector, bool force)
932 return connector_status_disconnected; 957 return connector_status_disconnected;
933} 958}
934 959
935static void sti_hdmi_connector_destroy(struct drm_connector *connector)
936{
937 struct sti_hdmi_connector *hdmi_connector
938 = to_sti_hdmi_connector(connector);
939
940 drm_connector_unregister(connector);
941 drm_connector_cleanup(connector);
942 kfree(hdmi_connector);
943}
944
945static void sti_hdmi_connector_init_property(struct drm_device *drm_dev, 960static void sti_hdmi_connector_init_property(struct drm_device *drm_dev,
946 struct drm_connector *connector) 961 struct drm_connector *connector)
947{ 962{
@@ -1024,17 +1039,31 @@ sti_hdmi_connector_get_property(struct drm_connector *connector,
1024 return -EINVAL; 1039 return -EINVAL;
1025} 1040}
1026 1041
1042static int sti_hdmi_late_register(struct drm_connector *connector)
1043{
1044 struct sti_hdmi_connector *hdmi_connector
1045 = to_sti_hdmi_connector(connector);
1046 struct sti_hdmi *hdmi = hdmi_connector->hdmi;
1047
1048 if (hdmi_debugfs_init(hdmi, hdmi->drm_dev->primary)) {
1049 DRM_ERROR("HDMI debugfs setup failed\n");
1050 return -EINVAL;
1051 }
1052
1053 return 0;
1054}
1055
1027static const struct drm_connector_funcs sti_hdmi_connector_funcs = { 1056static const struct drm_connector_funcs sti_hdmi_connector_funcs = {
1028 .dpms = drm_atomic_helper_connector_dpms,
1029 .fill_modes = drm_helper_probe_single_connector_modes, 1057 .fill_modes = drm_helper_probe_single_connector_modes,
1030 .detect = sti_hdmi_connector_detect, 1058 .detect = sti_hdmi_connector_detect,
1031 .destroy = sti_hdmi_connector_destroy, 1059 .destroy = drm_connector_cleanup,
1032 .reset = drm_atomic_helper_connector_reset, 1060 .reset = drm_atomic_helper_connector_reset,
1033 .set_property = drm_atomic_helper_connector_set_property, 1061 .set_property = drm_atomic_helper_connector_set_property,
1034 .atomic_set_property = sti_hdmi_connector_set_property, 1062 .atomic_set_property = sti_hdmi_connector_set_property,
1035 .atomic_get_property = sti_hdmi_connector_get_property, 1063 .atomic_get_property = sti_hdmi_connector_get_property,
1036 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1064 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
1037 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1065 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1066 .late_register = sti_hdmi_late_register,
1038}; 1067};
1039 1068
1040static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) 1069static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
@@ -1049,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev)
1049 return NULL; 1078 return NULL;
1050} 1079}
1051 1080
1081/**
1082 * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent
1083 * clocks. None-coherent clocks means that audio and TMDS clocks have not the
1084 * same source (drifts between clocks). In this case assumption is that CTS is
1085 * automatically calculated by hardware.
1086 *
1087 * @audio_fs: audio frame clock frequency in Hz
1088 *
1089 * Values computed are based on table described in HDMI specification 1.4b
1090 *
1091 * Returns n value.
1092 */
1093static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs)
1094{
1095 unsigned int n;
1096
1097 switch (audio_fs) {
1098 case 32000:
1099 n = 4096;
1100 break;
1101 case 44100:
1102 n = 6272;
1103 break;
1104 case 48000:
1105 n = 6144;
1106 break;
1107 case 88200:
1108 n = 6272 * 2;
1109 break;
1110 case 96000:
1111 n = 6144 * 2;
1112 break;
1113 case 176400:
1114 n = 6272 * 4;
1115 break;
1116 case 192000:
1117 n = 6144 * 4;
1118 break;
1119 default:
1120 /* Not pre-defined, recommended value: 128 * fs / 1000 */
1121 n = (audio_fs * 128) / 1000;
1122 }
1123
1124 return n;
1125}
1126
1127static int hdmi_audio_configure(struct sti_hdmi *hdmi,
1128 struct hdmi_audio_params *params)
1129{
1130 int audio_cfg, n;
1131 struct hdmi_audio_infoframe *info = &params->cea;
1132
1133 DRM_DEBUG_DRIVER("\n");
1134
1135 if (!hdmi->enabled)
1136 return 0;
1137
1138 /* update N parameter */
1139 n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate);
1140
1141 DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n",
1142 params->sample_rate, hdmi->mode.clock * 1000, n);
1143 hdmi_write(hdmi, n, HDMI_AUDN);
1144
1145 /* update HDMI registers according to configuration */
1146 audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
1147 HDMI_AUD_CFG_ONE_BIT_INVALID;
1148
1149 switch (info->channels) {
1150 case 8:
1151 audio_cfg |= HDMI_AUD_CFG_CH78_VALID;
1152 case 6:
1153 audio_cfg |= HDMI_AUD_CFG_CH56_VALID;
1154 case 4:
1155 audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH;
1156 case 2:
1157 audio_cfg |= HDMI_AUD_CFG_CH12_VALID;
1158 break;
1159 default:
1160 DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n",
1161 info->channels);
1162 return -EINVAL;
1163 }
1164
1165 hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
1166
1167 hdmi->audio = *params;
1168
1169 return hdmi_audio_infoframe_config(hdmi);
1170}
1171
1172static void hdmi_audio_shutdown(struct device *dev, void *data)
1173{
1174 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1175 int audio_cfg;
1176
1177 DRM_DEBUG_DRIVER("\n");
1178
1179 /* disable audio */
1180 audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID |
1181 HDMI_AUD_CFG_ONE_BIT_INVALID;
1182 hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG);
1183
1184 hdmi->audio.enabled = 0;
1185 hdmi_audio_infoframe_config(hdmi);
1186}
1187
1188static int hdmi_audio_hw_params(struct device *dev,
1189 void *data,
1190 struct hdmi_codec_daifmt *daifmt,
1191 struct hdmi_codec_params *params)
1192{
1193 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1194 int ret;
1195 struct hdmi_audio_params audio = {
1196 .sample_width = params->sample_width,
1197 .sample_rate = params->sample_rate,
1198 .cea = params->cea,
1199 };
1200
1201 DRM_DEBUG_DRIVER("\n");
1202
1203 if (!hdmi->enabled)
1204 return 0;
1205
1206 if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv ||
1207 daifmt->frame_clk_inv || daifmt->bit_clk_master ||
1208 daifmt->frame_clk_master) {
1209 dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__,
1210 daifmt->bit_clk_inv, daifmt->frame_clk_inv,
1211 daifmt->bit_clk_master,
1212 daifmt->frame_clk_master);
1213 return -EINVAL;
1214 }
1215
1216 audio.enabled = 1;
1217
1218 ret = hdmi_audio_configure(hdmi, &audio);
1219 if (ret < 0)
1220 return ret;
1221
1222 return 0;
1223}
1224
1225static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable)
1226{
1227 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1228
1229 DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable");
1230
1231 if (enable)
1232 hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK);
1233 else
1234 hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK);
1235
1236 return 0;
1237}
1238
1239static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len)
1240{
1241 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
1242 struct drm_connector *connector = hdmi->drm_connector;
1243
1244 DRM_DEBUG_DRIVER("\n");
1245 memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
1246
1247 return 0;
1248}
1249
1250static const struct hdmi_codec_ops audio_codec_ops = {
1251 .hw_params = hdmi_audio_hw_params,
1252 .audio_shutdown = hdmi_audio_shutdown,
1253 .digital_mute = hdmi_audio_digital_mute,
1254 .get_eld = hdmi_audio_get_eld,
1255};
1256
1257static int sti_hdmi_register_audio_driver(struct device *dev,
1258 struct sti_hdmi *hdmi)
1259{
1260 struct hdmi_codec_pdata codec_data = {
1261 .ops = &audio_codec_ops,
1262 .max_i2s_channels = 8,
1263 .i2s = 1,
1264 };
1265
1266 DRM_DEBUG_DRIVER("\n");
1267
1268 hdmi->audio.enabled = 0;
1269
1270 hdmi->audio_pdev = platform_device_register_data(
1271 dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO,
1272 &codec_data, sizeof(codec_data));
1273
1274 if (IS_ERR(hdmi->audio_pdev))
1275 return PTR_ERR(hdmi->audio_pdev);
1276
1277 DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev));
1278
1279 return 0;
1280}
1281
1052static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) 1282static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
1053{ 1283{
1054 struct sti_hdmi *hdmi = dev_get_drvdata(dev); 1284 struct sti_hdmi *hdmi = dev_get_drvdata(dev);
@@ -1095,9 +1325,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
1095 /* initialise property */ 1325 /* initialise property */
1096 sti_hdmi_connector_init_property(drm_dev, drm_connector); 1326 sti_hdmi_connector_init_property(drm_dev, drm_connector);
1097 1327
1098 err = drm_connector_register(drm_connector); 1328 hdmi->drm_connector = drm_connector;
1099 if (err)
1100 goto err_connector;
1101 1329
1102 err = drm_mode_connector_attach_encoder(drm_connector, encoder); 1330 err = drm_mode_connector_attach_encoder(drm_connector, encoder);
1103 if (err) { 1331 if (err) {
@@ -1105,19 +1333,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
1105 goto err_sysfs; 1333 goto err_sysfs;
1106 } 1334 }
1107 1335
1336 err = sti_hdmi_register_audio_driver(dev, hdmi);
1337 if (err) {
1338 DRM_ERROR("Failed to attach an audio codec\n");
1339 goto err_sysfs;
1340 }
1341
1342 /* Initialize audio infoframe */
1343 err = hdmi_audio_infoframe_init(&hdmi->audio.cea);
1344 if (err) {
1345 DRM_ERROR("Failed to init audio infoframe\n");
1346 goto err_sysfs;
1347 }
1348
1108 /* Enable default interrupts */ 1349 /* Enable default interrupts */
1109 hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); 1350 hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN);
1110 1351
1111 if (hdmi_debugfs_init(hdmi, drm_dev->primary))
1112 DRM_ERROR("HDMI debugfs setup failed\n");
1113
1114 return 0; 1352 return 0;
1115 1353
1116err_sysfs: 1354err_sysfs:
1117 drm_connector_unregister(drm_connector); 1355 drm_bridge_remove(bridge);
1118err_connector: 1356 hdmi->drm_connector = NULL;
1119 drm_connector_cleanup(drm_connector);
1120
1121 return -EINVAL; 1357 return -EINVAL;
1122} 1358}
1123 1359
@@ -1267,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev)
1267 struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev); 1503 struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
1268 1504
1269 i2c_put_adapter(hdmi->ddc_adapt); 1505 i2c_put_adapter(hdmi->ddc_adapt);
1506 if (hdmi->audio_pdev)
1507 platform_device_unregister(hdmi->audio_pdev);
1270 component_del(&pdev->dev, &sti_hdmi_ops); 1508 component_del(&pdev->dev, &sti_hdmi_ops);
1271 1509
1272 return 0; 1510 return 0;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index ef3a94583bbd..119bc3582ac7 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -23,6 +23,13 @@ struct hdmi_phy_ops {
23 void (*stop)(struct sti_hdmi *hdmi); 23 void (*stop)(struct sti_hdmi *hdmi);
24}; 24};
25 25
26struct hdmi_audio_params {
27 bool enabled;
28 unsigned int sample_width;
29 unsigned int sample_rate;
30 struct hdmi_audio_infoframe cea;
31};
32
26/* values for the framing mode property */ 33/* values for the framing mode property */
27enum sti_hdmi_modes { 34enum sti_hdmi_modes {
28 HDMI_MODE_HDMI, 35 HDMI_MODE_HDMI,
@@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = {
67 * @ddc_adapt: i2c ddc adapter 74 * @ddc_adapt: i2c ddc adapter
68 * @colorspace: current colorspace selected 75 * @colorspace: current colorspace selected
69 * @hdmi_mode: select framing for HDMI or DVI 76 * @hdmi_mode: select framing for HDMI or DVI
77 * @audio_pdev: ASoC hdmi-codec platform device
78 * @audio: hdmi audio parameters.
79 * @drm_connector: hdmi connector
70 */ 80 */
71struct sti_hdmi { 81struct sti_hdmi {
72 struct device dev; 82 struct device dev;
@@ -89,6 +99,9 @@ struct sti_hdmi {
89 struct i2c_adapter *ddc_adapt; 99 struct i2c_adapter *ddc_adapt;
90 enum hdmi_colorspace colorspace; 100 enum hdmi_colorspace colorspace;
91 enum sti_hdmi_modes hdmi_mode; 101 enum sti_hdmi_modes hdmi_mode;
102 struct platform_device *audio_pdev;
103 struct hdmi_audio_params audio;
104 struct drm_connector *drm_connector;
92}; 105};
93 106
94u32 hdmi_read(struct sti_hdmi *hdmi, int offset); 107u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index 1edec29b9e45..b03232247966 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -555,14 +555,8 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
555{ 555{
556 struct drm_info_node *node = s->private; 556 struct drm_info_node *node = s->private;
557 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data; 557 struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
558 struct drm_device *dev = node->minor->dev;
559 int cmd, cmd_offset, infoxp70; 558 int cmd, cmd_offset, infoxp70;
560 void *virt; 559 void *virt;
561 int ret;
562
563 ret = mutex_lock_interruptible(&dev->struct_mutex);
564 if (ret)
565 return ret;
566 560
567 seq_printf(s, "%s: (vaddr = 0x%p)", 561 seq_printf(s, "%s: (vaddr = 0x%p)",
568 sti_plane_to_str(&hqvdp->plane), hqvdp->regs); 562 sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
@@ -630,7 +624,6 @@ static int hqvdp_dbg_show(struct seq_file *s, void *data)
630 624
631 seq_puts(s, "\n"); 625 seq_puts(s, "\n");
632 626
633 mutex_unlock(&dev->struct_mutex);
634 return 0; 627 return 0;
635} 628}
636 629
@@ -1241,6 +1234,33 @@ static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
1241 .atomic_disable = sti_hqvdp_atomic_disable, 1234 .atomic_disable = sti_hqvdp_atomic_disable,
1242}; 1235};
1243 1236
1237static void sti_hqvdp_destroy(struct drm_plane *drm_plane)
1238{
1239 DRM_DEBUG_DRIVER("\n");
1240
1241 drm_plane_helper_disable(drm_plane);
1242 drm_plane_cleanup(drm_plane);
1243}
1244
1245static int sti_hqvdp_late_register(struct drm_plane *drm_plane)
1246{
1247 struct sti_plane *plane = to_sti_plane(drm_plane);
1248 struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
1249
1250 return hqvdp_debugfs_init(hqvdp, drm_plane->dev->primary);
1251}
1252
1253struct drm_plane_funcs sti_hqvdp_plane_helpers_funcs = {
1254 .update_plane = drm_atomic_helper_update_plane,
1255 .disable_plane = drm_atomic_helper_disable_plane,
1256 .destroy = sti_hqvdp_destroy,
1257 .set_property = sti_plane_set_property,
1258 .reset = drm_atomic_helper_plane_reset,
1259 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
1260 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
1261 .late_register = sti_hqvdp_late_register,
1262};
1263
1244static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev, 1264static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1245 struct device *dev, int desc) 1265 struct device *dev, int desc)
1246{ 1266{
@@ -1253,7 +1273,7 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1253 sti_hqvdp_init(hqvdp); 1273 sti_hqvdp_init(hqvdp);
1254 1274
1255 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1, 1275 res = drm_universal_plane_init(drm_dev, &hqvdp->plane.drm_plane, 1,
1256 &sti_plane_helpers_funcs, 1276 &sti_hqvdp_plane_helpers_funcs,
1257 hqvdp_supported_formats, 1277 hqvdp_supported_formats,
1258 ARRAY_SIZE(hqvdp_supported_formats), 1278 ARRAY_SIZE(hqvdp_supported_formats),
1259 DRM_PLANE_TYPE_OVERLAY, NULL); 1279 DRM_PLANE_TYPE_OVERLAY, NULL);
@@ -1266,9 +1286,6 @@ static struct drm_plane *sti_hqvdp_create(struct drm_device *drm_dev,
1266 1286
1267 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY); 1287 sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
1268 1288
1269 if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
1270 DRM_ERROR("HQVDP debugfs setup failed\n");
1271
1272 return &hqvdp->plane.drm_plane; 1289 return &hqvdp->plane.drm_plane;
1273} 1290}
1274 1291
@@ -1346,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev)
1346 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); 1363 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1347 if (vtg_np) 1364 if (vtg_np)
1348 hqvdp->vtg = of_vtg_find(vtg_np); 1365 hqvdp->vtg = of_vtg_find(vtg_np);
1366 of_node_put(vtg_np);
1349 1367
1350 platform_set_drvdata(pdev, hqvdp); 1368 platform_set_drvdata(pdev, hqvdp);
1351 1369
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index aed7801b51f7..1885c7ab5a8b 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -151,12 +151,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
151{ 151{
152 struct drm_info_node *node = s->private; 152 struct drm_info_node *node = s->private;
153 struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data; 153 struct sti_mixer *mixer = (struct sti_mixer *)node->info_ent->data;
154 struct drm_device *dev = node->minor->dev;
155 int ret;
156
157 ret = mutex_lock_interruptible(&dev->struct_mutex);
158 if (ret)
159 return ret;
160 154
161 seq_printf(s, "%s: (vaddr = 0x%p)", 155 seq_printf(s, "%s: (vaddr = 0x%p)",
162 sti_mixer_to_str(mixer), mixer->regs); 156 sti_mixer_to_str(mixer), mixer->regs);
@@ -176,7 +170,6 @@ static int mixer_dbg_show(struct seq_file *s, void *arg)
176 mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0); 170 mixer_dbg_mxn(s, mixer->regs + GAM_MIXER_MX0);
177 seq_puts(s, "\n"); 171 seq_puts(s, "\n");
178 172
179 mutex_unlock(&dev->struct_mutex);
180 return 0; 173 return 0;
181} 174}
182 175
@@ -188,7 +181,7 @@ static struct drm_info_list mixer1_debugfs_files[] = {
188 { "mixer_aux", mixer_dbg_show, 0, NULL }, 181 { "mixer_aux", mixer_dbg_show, 0, NULL },
189}; 182};
190 183
191static int mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor) 184int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor)
192{ 185{
193 unsigned int i; 186 unsigned int i;
194 struct drm_info_list *mixer_debugfs_files; 187 struct drm_info_list *mixer_debugfs_files;
@@ -400,8 +393,5 @@ struct sti_mixer *sti_mixer_create(struct device *dev,
400 DRM_DEBUG_DRIVER("%s created. Regs=%p\n", 393 DRM_DEBUG_DRIVER("%s created. Regs=%p\n",
401 sti_mixer_to_str(mixer), mixer->regs); 394 sti_mixer_to_str(mixer), mixer->regs);
402 395
403 if (mixer_debugfs_init(mixer, drm_dev->primary))
404 DRM_ERROR("MIXER debugfs setup failed\n");
405
406 return mixer; 396 return mixer;
407} 397}
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 6f35fc086873..830a3c42d886 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -55,6 +55,8 @@ int sti_mixer_active_video_area(struct sti_mixer *mixer,
55 55
56void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable); 56void sti_mixer_set_background_status(struct sti_mixer *mixer, bool enable);
57 57
58int sti_mixer_debugfs_init(struct sti_mixer *mixer, struct drm_minor *minor);
59
58/* depth in Cross-bar control = z order */ 60/* depth in Cross-bar control = z order */
59#define GAM_MIXER_NB_DEPTH_LEVEL 6 61#define GAM_MIXER_NB_DEPTH_LEVEL 6
60 62
diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c
index f10c98d3f012..0cf3335ef37c 100644
--- a/drivers/gpu/drm/sti/sti_plane.c
+++ b/drivers/gpu/drm/sti/sti_plane.c
@@ -45,25 +45,15 @@ const char *sti_plane_to_str(struct sti_plane *plane)
45 45
46#define STI_FPS_INTERVAL_MS 3000 46#define STI_FPS_INTERVAL_MS 3000
47 47
48static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs)
49{
50 struct timespec tmp_ts = timespec_sub(lhs, rhs);
51 u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts);
52
53 do_div(tmp_ns, NSEC_PER_MSEC);
54
55 return (u32)tmp_ns;
56}
57
58void sti_plane_update_fps(struct sti_plane *plane, 48void sti_plane_update_fps(struct sti_plane *plane,
59 bool new_frame, 49 bool new_frame,
60 bool new_field) 50 bool new_field)
61{ 51{
62 struct timespec now; 52 ktime_t now;
63 struct sti_fps_info *fps; 53 struct sti_fps_info *fps;
64 int fpks, fipks, ms_since_last, num_frames, num_fields; 54 int fpks, fipks, ms_since_last, num_frames, num_fields;
65 55
66 getrawmonotonic(&now); 56 now = ktime_get();
67 57
68 /* Compute number of frame updates */ 58 /* Compute number of frame updates */
69 fps = &plane->fps_info; 59 fps = &plane->fps_info;
@@ -76,7 +66,7 @@ void sti_plane_update_fps(struct sti_plane *plane,
76 return; 66 return;
77 67
78 fps->curr_frame_counter++; 68 fps->curr_frame_counter++;
79 ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp); 69 ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp));
80 num_frames = fps->curr_frame_counter - fps->last_frame_counter; 70 num_frames = fps->curr_frame_counter - fps->last_frame_counter;
81 71
82 if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS) 72 if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS)
@@ -106,17 +96,9 @@ void sti_plane_update_fps(struct sti_plane *plane,
106 plane->fps_info.fips_str); 96 plane->fps_info.fips_str);
107} 97}
108 98
109static void sti_plane_destroy(struct drm_plane *drm_plane) 99int sti_plane_set_property(struct drm_plane *drm_plane,
110{ 100 struct drm_property *property,
111 DRM_DEBUG_DRIVER("\n"); 101 uint64_t val)
112
113 drm_plane_helper_disable(drm_plane);
114 drm_plane_cleanup(drm_plane);
115}
116
117static int sti_plane_set_property(struct drm_plane *drm_plane,
118 struct drm_property *property,
119 uint64_t val)
120{ 102{
121 struct drm_device *dev = drm_plane->dev; 103 struct drm_device *dev = drm_plane->dev;
122 struct sti_private *private = dev->dev_private; 104 struct sti_private *private = dev->dev_private;
@@ -170,13 +152,3 @@ void sti_plane_init_property(struct sti_plane *plane,
170 plane->drm_plane.base.id, 152 plane->drm_plane.base.id,
171 sti_plane_to_str(plane), plane->zorder); 153 sti_plane_to_str(plane), plane->zorder);
172} 154}
173
174struct drm_plane_funcs sti_plane_helpers_funcs = {
175 .update_plane = drm_atomic_helper_update_plane,
176 .disable_plane = drm_atomic_helper_disable_plane,
177 .destroy = sti_plane_destroy,
178 .set_property = sti_plane_set_property,
179 .reset = drm_atomic_helper_plane_reset,
180 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
181 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
182};
diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h
index c50a3b9f5d37..e0ea1dd3bb88 100644
--- a/drivers/gpu/drm/sti/sti_plane.h
+++ b/drivers/gpu/drm/sti/sti_plane.h
@@ -11,8 +11,6 @@
11#include <drm/drm_atomic_helper.h> 11#include <drm/drm_atomic_helper.h>
12#include <drm/drm_plane_helper.h> 12#include <drm/drm_plane_helper.h>
13 13
14extern struct drm_plane_funcs sti_plane_helpers_funcs;
15
16#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane) 14#define to_sti_plane(x) container_of(x, struct sti_plane, drm_plane)
17 15
18#define STI_PLANE_TYPE_SHIFT 8 16#define STI_PLANE_TYPE_SHIFT 8
@@ -57,7 +55,7 @@ struct sti_fps_info {
57 unsigned int last_frame_counter; 55 unsigned int last_frame_counter;
58 unsigned int curr_field_counter; 56 unsigned int curr_field_counter;
59 unsigned int last_field_counter; 57 unsigned int last_field_counter;
60 struct timespec last_timestamp; 58 ktime_t last_timestamp;
61 char fps_str[FPS_LENGTH]; 59 char fps_str[FPS_LENGTH];
62 char fips_str[FPS_LENGTH]; 60 char fips_str[FPS_LENGTH];
63}; 61};
@@ -83,6 +81,11 @@ const char *sti_plane_to_str(struct sti_plane *plane);
83void sti_plane_update_fps(struct sti_plane *plane, 81void sti_plane_update_fps(struct sti_plane *plane,
84 bool new_frame, 82 bool new_frame,
85 bool new_field); 83 bool new_field);
84
85int sti_plane_set_property(struct drm_plane *drm_plane,
86 struct drm_property *property,
87 uint64_t val);
88
86void sti_plane_init_property(struct sti_plane *plane, 89void sti_plane_init_property(struct sti_plane *plane,
87 enum drm_plane_type type); 90 enum drm_plane_type type);
88#endif 91#endif
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index f983db5a59da..e25995b35715 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -112,6 +112,7 @@ struct sti_tvout {
112 struct drm_encoder *hdmi; 112 struct drm_encoder *hdmi;
113 struct drm_encoder *hda; 113 struct drm_encoder *hda;
114 struct drm_encoder *dvo; 114 struct drm_encoder *dvo;
115 bool debugfs_registered;
115}; 116};
116 117
117struct sti_tvout_encoder { 118struct sti_tvout_encoder {
@@ -515,13 +516,7 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
515{ 516{
516 struct drm_info_node *node = s->private; 517 struct drm_info_node *node = s->private;
517 struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data; 518 struct sti_tvout *tvout = (struct sti_tvout *)node->info_ent->data;
518 struct drm_device *dev = node->minor->dev;
519 struct drm_crtc *crtc; 519 struct drm_crtc *crtc;
520 int ret;
521
522 ret = mutex_lock_interruptible(&dev->struct_mutex);
523 if (ret)
524 return ret;
525 520
526 seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs); 521 seq_printf(s, "TVOUT: (vaddr = 0x%p)", tvout->regs);
527 522
@@ -587,7 +582,6 @@ static int tvout_dbg_show(struct seq_file *s, void *data)
587 DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT); 582 DBGFS_DUMP(TVO_AUX_IN_VID_FORMAT);
588 seq_puts(s, "\n"); 583 seq_puts(s, "\n");
589 584
590 mutex_unlock(&dev->struct_mutex);
591 return 0; 585 return 0;
592} 586}
593 587
@@ -632,8 +626,37 @@ static void sti_tvout_encoder_destroy(struct drm_encoder *encoder)
632 kfree(sti_encoder); 626 kfree(sti_encoder);
633} 627}
634 628
629static int sti_tvout_late_register(struct drm_encoder *encoder)
630{
631 struct sti_tvout *tvout = to_sti_tvout(encoder);
632 int ret;
633
634 if (tvout->debugfs_registered)
635 return 0;
636
637 ret = tvout_debugfs_init(tvout, encoder->dev->primary);
638 if (ret)
639 return ret;
640
641 tvout->debugfs_registered = true;
642 return 0;
643}
644
645static void sti_tvout_early_unregister(struct drm_encoder *encoder)
646{
647 struct sti_tvout *tvout = to_sti_tvout(encoder);
648
649 if (!tvout->debugfs_registered)
650 return;
651
652 tvout_debugfs_exit(tvout, encoder->dev->primary);
653 tvout->debugfs_registered = false;
654}
655
635static const struct drm_encoder_funcs sti_tvout_encoder_funcs = { 656static const struct drm_encoder_funcs sti_tvout_encoder_funcs = {
636 .destroy = sti_tvout_encoder_destroy, 657 .destroy = sti_tvout_encoder_destroy,
658 .late_register = sti_tvout_late_register,
659 .early_unregister = sti_tvout_early_unregister,
637}; 660};
638 661
639static void sti_dvo_encoder_enable(struct drm_encoder *encoder) 662static void sti_dvo_encoder_enable(struct drm_encoder *encoder)
@@ -820,9 +843,6 @@ static int sti_tvout_bind(struct device *dev, struct device *master, void *data)
820 843
821 sti_tvout_create_encoders(drm_dev, tvout); 844 sti_tvout_create_encoders(drm_dev, tvout);
822 845
823 if (tvout_debugfs_init(tvout, drm_dev->primary))
824 DRM_ERROR("TVOUT debugfs setup failed\n");
825
826 return 0; 846 return 0;
827} 847}
828 848
@@ -830,11 +850,8 @@ static void sti_tvout_unbind(struct device *dev, struct device *master,
830 void *data) 850 void *data)
831{ 851{
832 struct sti_tvout *tvout = dev_get_drvdata(dev); 852 struct sti_tvout *tvout = dev_get_drvdata(dev);
833 struct drm_device *drm_dev = data;
834 853
835 sti_tvout_destroy_encoders(tvout); 854 sti_tvout_destroy_encoders(tvout);
836
837 tvout_debugfs_exit(tvout, drm_dev->primary);
838} 855}
839 856
840static const struct component_ops sti_tvout_ops = { 857static const struct component_ops sti_tvout_ops = {
diff --git a/drivers/gpu/drm/sti/sti_vid.c b/drivers/gpu/drm/sti/sti_vid.c
index 523ed19f5ac6..47634a0251fc 100644
--- a/drivers/gpu/drm/sti/sti_vid.c
+++ b/drivers/gpu/drm/sti/sti_vid.c
@@ -92,12 +92,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
92{ 92{
93 struct drm_info_node *node = s->private; 93 struct drm_info_node *node = s->private;
94 struct sti_vid *vid = (struct sti_vid *)node->info_ent->data; 94 struct sti_vid *vid = (struct sti_vid *)node->info_ent->data;
95 struct drm_device *dev = node->minor->dev;
96 int ret;
97
98 ret = mutex_lock_interruptible(&dev->struct_mutex);
99 if (ret)
100 return ret;
101 95
102 seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs); 96 seq_printf(s, "VID: (vaddr= 0x%p)", vid->regs);
103 97
@@ -122,7 +116,6 @@ static int vid_dbg_show(struct seq_file *s, void *arg)
122 DBGFS_DUMP(VID_CSAT); 116 DBGFS_DUMP(VID_CSAT);
123 seq_puts(s, "\n"); 117 seq_puts(s, "\n");
124 118
125 mutex_unlock(&dev->struct_mutex);
126 return 0; 119 return 0;
127} 120}
128 121
@@ -130,7 +123,7 @@ static struct drm_info_list vid_debugfs_files[] = {
130 { "vid", vid_dbg_show, 0, NULL }, 123 { "vid", vid_dbg_show, 0, NULL },
131}; 124};
132 125
133static int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor) 126int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor)
134{ 127{
135 unsigned int i; 128 unsigned int i;
136 129
@@ -227,8 +220,5 @@ struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
227 220
228 sti_vid_init(vid); 221 sti_vid_init(vid);
229 222
230 if (vid_debugfs_init(vid, drm_dev->primary))
231 DRM_ERROR("VID debugfs setup failed\n");
232
233 return vid; 223 return vid;
234} 224}
diff --git a/drivers/gpu/drm/sti/sti_vid.h b/drivers/gpu/drm/sti/sti_vid.h
index 6c842344f3d8..fdc90f922a05 100644
--- a/drivers/gpu/drm/sti/sti_vid.h
+++ b/drivers/gpu/drm/sti/sti_vid.h
@@ -26,4 +26,6 @@ void sti_vid_disable(struct sti_vid *vid);
26struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev, 26struct sti_vid *sti_vid_create(struct device *dev, struct drm_device *drm_dev,
27 int id, void __iomem *baseaddr); 27 int id, void __iomem *baseaddr);
28 28
29int vid_debugfs_init(struct sti_vid *vid, struct drm_minor *minor);
30
29#endif 31#endif
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 6bf4ce466d20..0bdc385eec17 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -65,7 +65,7 @@
65#define HDMI_DELAY (5) 65#define HDMI_DELAY (5)
66 66
67/* Delay introduced by the DVO in nb of pixel */ 67/* Delay introduced by the DVO in nb of pixel */
68#define DVO_DELAY (2) 68#define DVO_DELAY (7)
69 69
70/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ 70/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
71#define AWG_DELAY_HD (-9) 71#define AWG_DELAY_HD (-9)
@@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev)
432 np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); 432 np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0);
433 if (np) { 433 if (np) {
434 vtg->slave = of_vtg_find(np); 434 vtg->slave = of_vtg_find(np);
435 of_node_put(np);
435 436
436 if (!vtg->slave) 437 if (!vtg->slave)
437 return -EPROBE_DEFER; 438 return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/sun4i/sun4i_crtc.c b/drivers/gpu/drm/sun4i/sun4i_crtc.c
index 41cacecbea9a..4a192210574f 100644
--- a/drivers/gpu/drm/sun4i/sun4i_crtc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_crtc.c
@@ -51,10 +51,22 @@ static void sun4i_crtc_atomic_flush(struct drm_crtc *crtc,
51{ 51{
52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc); 52 struct sun4i_crtc *scrtc = drm_crtc_to_sun4i_crtc(crtc);
53 struct sun4i_drv *drv = scrtc->drv; 53 struct sun4i_drv *drv = scrtc->drv;
54 struct drm_pending_vblank_event *event = crtc->state->event;
54 55
55 DRM_DEBUG_DRIVER("Committing plane changes\n"); 56 DRM_DEBUG_DRIVER("Committing plane changes\n");
56 57
57 sun4i_backend_commit(drv->backend); 58 sun4i_backend_commit(drv->backend);
59
60 if (event) {
61 crtc->state->event = NULL;
62
63 spin_lock_irq(&crtc->dev->event_lock);
64 if (drm_crtc_vblank_get(crtc) == 0)
65 drm_crtc_arm_vblank_event(crtc, event);
66 else
67 drm_crtc_send_vblank_event(crtc, event);
68 spin_unlock_irq(&crtc->dev->event_lock);
69 }
58} 70}
59 71
60static void sun4i_crtc_disable(struct drm_crtc *crtc) 72static void sun4i_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 937394cbc241..7092daaf6c43 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -75,7 +75,7 @@ static struct drm_driver sun4i_drv_driver = {
75 .dumb_create = drm_gem_cma_dumb_create, 75 .dumb_create = drm_gem_cma_dumb_create,
76 .dumb_destroy = drm_gem_dumb_destroy, 76 .dumb_destroy = drm_gem_dumb_destroy,
77 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 77 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
78 .gem_free_object = drm_gem_cma_free_object, 78 .gem_free_object_unlocked = drm_gem_cma_free_object,
79 .gem_vm_ops = &drm_gem_cma_vm_ops, 79 .gem_vm_ops = &drm_gem_cma_vm_ops,
80 80
81 /* PRIME Operations */ 81 /* PRIME Operations */
@@ -123,10 +123,6 @@ static int sun4i_drv_bind(struct device *dev)
123 if (!drm) 123 if (!drm)
124 return -ENOMEM; 124 return -ENOMEM;
125 125
126 ret = drm_dev_set_unique(drm, dev_name(drm->dev));
127 if (ret)
128 goto free_drm;
129
130 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL); 126 drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
131 if (!drv) { 127 if (!drv) {
132 ret = -ENOMEM; 128 ret = -ENOMEM;
@@ -178,14 +174,8 @@ static int sun4i_drv_bind(struct device *dev)
178 if (ret) 174 if (ret)
179 goto free_drm; 175 goto free_drm;
180 176
181 ret = drm_connector_register_all(drm);
182 if (ret)
183 goto unregister_drm;
184
185 return 0; 177 return 0;
186 178
187unregister_drm:
188 drm_dev_unregister(drm);
189free_drm: 179free_drm:
190 drm_dev_unref(drm); 180 drm_dev_unref(drm);
191 return ret; 181 return ret;
@@ -195,7 +185,6 @@ static void sun4i_drv_unbind(struct device *dev)
195{ 185{
196 struct drm_device *drm = dev_get_drvdata(dev); 186 struct drm_device *drm = dev_get_drvdata(dev);
197 187
198 drm_connector_unregister_all(drm);
199 drm_dev_unregister(drm); 188 drm_dev_unregister(drm);
200 drm_kms_helper_poll_fini(drm); 189 drm_kms_helper_poll_fini(drm);
201 sun4i_framebuffer_free(drm); 190 sun4i_framebuffer_free(drm);
diff --git a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
index a0b30c216a5b..70688febd7ac 100644
--- a/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_framebuffer.c
@@ -20,8 +20,7 @@ static void sun4i_de_output_poll_changed(struct drm_device *drm)
20{ 20{
21 struct sun4i_drv *drv = drm->dev_private; 21 struct sun4i_drv *drv = drm->dev_private;
22 22
23 if (drv->fbdev) 23 drm_fbdev_cma_hotplug_event(drv->fbdev);
24 drm_fbdev_cma_hotplug_event(drv->fbdev);
25} 24}
26 25
27static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = { 26static const struct drm_mode_config_funcs sun4i_de_mode_config_funcs = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index aaffe9e64ffb..f5bbac6efb4c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -104,19 +104,9 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector,
104 return MODE_OK; 104 return MODE_OK;
105} 105}
106 106
107static struct drm_encoder *
108sun4i_rgb_best_encoder(struct drm_connector *connector)
109{
110 struct sun4i_rgb *rgb =
111 drm_connector_to_sun4i_rgb(connector);
112
113 return &rgb->encoder;
114}
115
116static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = { 107static struct drm_connector_helper_funcs sun4i_rgb_con_helper_funcs = {
117 .get_modes = sun4i_rgb_get_modes, 108 .get_modes = sun4i_rgb_get_modes,
118 .mode_valid = sun4i_rgb_mode_valid, 109 .mode_valid = sun4i_rgb_mode_valid,
119 .best_encoder = sun4i_rgb_best_encoder,
120}; 110};
121 111
122static enum drm_connector_status 112static enum drm_connector_status
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index bc047f923508..b84147896294 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -526,18 +526,9 @@ static int sun4i_tv_comp_mode_valid(struct drm_connector *connector,
526 return MODE_OK; 526 return MODE_OK;
527} 527}
528 528
529static struct drm_encoder *
530sun4i_tv_comp_best_encoder(struct drm_connector *connector)
531{
532 struct sun4i_tv *tv = drm_connector_to_sun4i_tv(connector);
533
534 return &tv->encoder;
535}
536
537static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = { 529static struct drm_connector_helper_funcs sun4i_tv_comp_connector_helper_funcs = {
538 .get_modes = sun4i_tv_comp_get_modes, 530 .get_modes = sun4i_tv_comp_get_modes,
539 .mode_valid = sun4i_tv_comp_mode_valid, 531 .mode_valid = sun4i_tv_comp_mode_valid,
540 .best_encoder = sun4i_tv_comp_best_encoder,
541}; 532};
542 533
543static enum drm_connector_status 534static enum drm_connector_status
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 39940f5b7c91..8495bd01b544 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -10,6 +10,7 @@
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/iommu.h> 12#include <linux/iommu.h>
13#include <linux/pm_runtime.h>
13#include <linux/reset.h> 14#include <linux/reset.h>
14 15
15#include <soc/tegra/pmc.h> 16#include <soc/tegra/pmc.h>
@@ -1216,6 +1217,8 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
1216 1217
1217 tegra_dc_stats_reset(&dc->stats); 1218 tegra_dc_stats_reset(&dc->stats);
1218 drm_crtc_vblank_off(crtc); 1219 drm_crtc_vblank_off(crtc);
1220
1221 pm_runtime_put_sync(dc->dev);
1219} 1222}
1220 1223
1221static void tegra_crtc_enable(struct drm_crtc *crtc) 1224static void tegra_crtc_enable(struct drm_crtc *crtc)
@@ -1225,6 +1228,48 @@ static void tegra_crtc_enable(struct drm_crtc *crtc)
1225 struct tegra_dc *dc = to_tegra_dc(crtc); 1228 struct tegra_dc *dc = to_tegra_dc(crtc);
1226 u32 value; 1229 u32 value;
1227 1230
1231 pm_runtime_get_sync(dc->dev);
1232
1233 /* initialize display controller */
1234 if (dc->syncpt) {
1235 u32 syncpt = host1x_syncpt_id(dc->syncpt);
1236
1237 value = SYNCPT_CNTRL_NO_STALL;
1238 tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1239
1240 value = SYNCPT_VSYNC_ENABLE | syncpt;
1241 tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
1242 }
1243
1244 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1245 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1246 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1247
1248 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1249 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1250 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1251
1252 /* initialize timer */
1253 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1254 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1255 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1256
1257 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1258 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1259 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1260
1261 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1262 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1263 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1264
1265 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1266 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1267 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1268
1269 if (dc->soc->supports_border_color)
1270 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1271
1272 /* apply PLL and pixel clock changes */
1228 tegra_dc_commit_state(dc, state); 1273 tegra_dc_commit_state(dc, state);
1229 1274
1230 /* program display mode */ 1275 /* program display mode */
@@ -1685,7 +1730,6 @@ static int tegra_dc_init(struct host1x_client *client)
1685 struct tegra_drm *tegra = drm->dev_private; 1730 struct tegra_drm *tegra = drm->dev_private;
1686 struct drm_plane *primary = NULL; 1731 struct drm_plane *primary = NULL;
1687 struct drm_plane *cursor = NULL; 1732 struct drm_plane *cursor = NULL;
1688 u32 value;
1689 int err; 1733 int err;
1690 1734
1691 dc->syncpt = host1x_syncpt_request(dc->dev, flags); 1735 dc->syncpt = host1x_syncpt_request(dc->dev, flags);
@@ -1755,47 +1799,6 @@ static int tegra_dc_init(struct host1x_client *client)
1755 goto cleanup; 1799 goto cleanup;
1756 } 1800 }
1757 1801
1758 /* initialize display controller */
1759 if (dc->syncpt) {
1760 u32 syncpt = host1x_syncpt_id(dc->syncpt);
1761
1762 value = SYNCPT_CNTRL_NO_STALL;
1763 tegra_dc_writel(dc, value, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
1764
1765 value = SYNCPT_VSYNC_ENABLE | syncpt;
1766 tegra_dc_writel(dc, value, DC_CMD_CONT_SYNCPT_VSYNC);
1767 }
1768
1769 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1770 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1771 tegra_dc_writel(dc, value, DC_CMD_INT_TYPE);
1772
1773 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1774 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1775 tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY);
1776
1777 /* initialize timer */
1778 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) |
1779 WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20);
1780 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY);
1781
1782 value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) |
1783 WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1);
1784 tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER);
1785
1786 value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1787 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1788 tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE);
1789
1790 value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT |
1791 WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT;
1792 tegra_dc_writel(dc, value, DC_CMD_INT_MASK);
1793
1794 if (dc->soc->supports_border_color)
1795 tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR);
1796
1797 tegra_dc_stats_reset(&dc->stats);
1798
1799 return 0; 1802 return 0;
1800 1803
1801cleanup: 1804cleanup:
@@ -1987,33 +1990,15 @@ static int tegra_dc_probe(struct platform_device *pdev)
1987 return PTR_ERR(dc->rst); 1990 return PTR_ERR(dc->rst);
1988 } 1991 }
1989 1992
1993 reset_control_assert(dc->rst);
1994
1990 if (dc->soc->has_powergate) { 1995 if (dc->soc->has_powergate) {
1991 if (dc->pipe == 0) 1996 if (dc->pipe == 0)
1992 dc->powergate = TEGRA_POWERGATE_DIS; 1997 dc->powergate = TEGRA_POWERGATE_DIS;
1993 else 1998 else
1994 dc->powergate = TEGRA_POWERGATE_DISB; 1999 dc->powergate = TEGRA_POWERGATE_DISB;
1995 2000
1996 err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk, 2001 tegra_powergate_power_off(dc->powergate);
1997 dc->rst);
1998 if (err < 0) {
1999 dev_err(&pdev->dev, "failed to power partition: %d\n",
2000 err);
2001 return err;
2002 }
2003 } else {
2004 err = clk_prepare_enable(dc->clk);
2005 if (err < 0) {
2006 dev_err(&pdev->dev, "failed to enable clock: %d\n",
2007 err);
2008 return err;
2009 }
2010
2011 err = reset_control_deassert(dc->rst);
2012 if (err < 0) {
2013 dev_err(&pdev->dev, "failed to deassert reset: %d\n",
2014 err);
2015 return err;
2016 }
2017 } 2002 }
2018 2003
2019 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2004 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -2027,16 +2012,19 @@ static int tegra_dc_probe(struct platform_device *pdev)
2027 return -ENXIO; 2012 return -ENXIO;
2028 } 2013 }
2029 2014
2030 INIT_LIST_HEAD(&dc->client.list);
2031 dc->client.ops = &dc_client_ops;
2032 dc->client.dev = &pdev->dev;
2033
2034 err = tegra_dc_rgb_probe(dc); 2015 err = tegra_dc_rgb_probe(dc);
2035 if (err < 0 && err != -ENODEV) { 2016 if (err < 0 && err != -ENODEV) {
2036 dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err); 2017 dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
2037 return err; 2018 return err;
2038 } 2019 }
2039 2020
2021 platform_set_drvdata(pdev, dc);
2022 pm_runtime_enable(&pdev->dev);
2023
2024 INIT_LIST_HEAD(&dc->client.list);
2025 dc->client.ops = &dc_client_ops;
2026 dc->client.dev = &pdev->dev;
2027
2040 err = host1x_client_register(&dc->client); 2028 err = host1x_client_register(&dc->client);
2041 if (err < 0) { 2029 if (err < 0) {
2042 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 2030 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
@@ -2044,8 +2032,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
2044 return err; 2032 return err;
2045 } 2033 }
2046 2034
2047 platform_set_drvdata(pdev, dc);
2048
2049 return 0; 2035 return 0;
2050} 2036}
2051 2037
@@ -2067,7 +2053,22 @@ static int tegra_dc_remove(struct platform_device *pdev)
2067 return err; 2053 return err;
2068 } 2054 }
2069 2055
2070 reset_control_assert(dc->rst); 2056 pm_runtime_disable(&pdev->dev);
2057
2058 return 0;
2059}
2060
2061#ifdef CONFIG_PM
2062static int tegra_dc_suspend(struct device *dev)
2063{
2064 struct tegra_dc *dc = dev_get_drvdata(dev);
2065 int err;
2066
2067 err = reset_control_assert(dc->rst);
2068 if (err < 0) {
2069 dev_err(dev, "failed to assert reset: %d\n", err);
2070 return err;
2071 }
2071 2072
2072 if (dc->soc->has_powergate) 2073 if (dc->soc->has_powergate)
2073 tegra_powergate_power_off(dc->powergate); 2074 tegra_powergate_power_off(dc->powergate);
@@ -2077,10 +2078,45 @@ static int tegra_dc_remove(struct platform_device *pdev)
2077 return 0; 2078 return 0;
2078} 2079}
2079 2080
2081static int tegra_dc_resume(struct device *dev)
2082{
2083 struct tegra_dc *dc = dev_get_drvdata(dev);
2084 int err;
2085
2086 if (dc->soc->has_powergate) {
2087 err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
2088 dc->rst);
2089 if (err < 0) {
2090 dev_err(dev, "failed to power partition: %d\n", err);
2091 return err;
2092 }
2093 } else {
2094 err = clk_prepare_enable(dc->clk);
2095 if (err < 0) {
2096 dev_err(dev, "failed to enable clock: %d\n", err);
2097 return err;
2098 }
2099
2100 err = reset_control_deassert(dc->rst);
2101 if (err < 0) {
2102 dev_err(dev, "failed to deassert reset: %d\n", err);
2103 return err;
2104 }
2105 }
2106
2107 return 0;
2108}
2109#endif
2110
2111static const struct dev_pm_ops tegra_dc_pm_ops = {
2112 SET_RUNTIME_PM_OPS(tegra_dc_suspend, tegra_dc_resume, NULL)
2113};
2114
2080struct platform_driver tegra_dc_driver = { 2115struct platform_driver tegra_dc_driver = {
2081 .driver = { 2116 .driver = {
2082 .name = "tegra-dc", 2117 .name = "tegra-dc",
2083 .of_match_table = tegra_dc_of_match, 2118 .of_match_table = tegra_dc_of_match,
2119 .pm = &tegra_dc_pm_ops,
2084 }, 2120 },
2085 .probe = tegra_dc_probe, 2121 .probe = tegra_dc_probe,
2086 .remove = tegra_dc_remove, 2122 .remove = tegra_dc_remove,
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index b24a0f14821a..059f409556d5 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -12,6 +12,9 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/of_gpio.h> 14#include <linux/of_gpio.h>
15#include <linux/pinctrl/pinconf-generic.h>
16#include <linux/pinctrl/pinctrl.h>
17#include <linux/pinctrl/pinmux.h>
15#include <linux/platform_device.h> 18#include <linux/platform_device.h>
16#include <linux/reset.h> 19#include <linux/reset.h>
17#include <linux/regulator/consumer.h> 20#include <linux/regulator/consumer.h>
@@ -44,6 +47,11 @@ struct tegra_dpaux {
44 struct completion complete; 47 struct completion complete;
45 struct work_struct work; 48 struct work_struct work;
46 struct list_head list; 49 struct list_head list;
50
51#ifdef CONFIG_GENERIC_PINCONF
52 struct pinctrl_dev *pinctrl;
53 struct pinctrl_desc desc;
54#endif
47}; 55};
48 56
49static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux) 57static inline struct tegra_dpaux *to_dpaux(struct drm_dp_aux *aux)
@@ -267,6 +275,148 @@ static irqreturn_t tegra_dpaux_irq(int irq, void *data)
267 return ret; 275 return ret;
268} 276}
269 277
278enum tegra_dpaux_functions {
279 DPAUX_PADCTL_FUNC_AUX,
280 DPAUX_PADCTL_FUNC_I2C,
281 DPAUX_PADCTL_FUNC_OFF,
282};
283
284static void tegra_dpaux_pad_power_down(struct tegra_dpaux *dpaux)
285{
286 u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
287
288 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
289
290 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
291}
292
293static void tegra_dpaux_pad_power_up(struct tegra_dpaux *dpaux)
294{
295 u32 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
296
297 value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
298
299 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
300}
301
302static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function)
303{
304 u32 value;
305
306 switch (function) {
307 case DPAUX_PADCTL_FUNC_AUX:
308 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
309 DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
310 DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
311 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
312 DPAUX_HYBRID_PADCTL_MODE_AUX;
313 break;
314
315 case DPAUX_PADCTL_FUNC_I2C:
316 value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
317 DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
318 DPAUX_HYBRID_PADCTL_MODE_I2C;
319 break;
320
321 case DPAUX_PADCTL_FUNC_OFF:
322 tegra_dpaux_pad_power_down(dpaux);
323 return 0;
324
325 default:
326 return -ENOTSUPP;
327 }
328
329 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
330 tegra_dpaux_pad_power_up(dpaux);
331
332 return 0;
333}
334
335#ifdef CONFIG_GENERIC_PINCONF
336static const struct pinctrl_pin_desc tegra_dpaux_pins[] = {
337 PINCTRL_PIN(0, "DP_AUX_CHx_P"),
338 PINCTRL_PIN(1, "DP_AUX_CHx_N"),
339};
340
341static const unsigned tegra_dpaux_pin_numbers[] = { 0, 1 };
342
343static const char * const tegra_dpaux_groups[] = {
344 "dpaux-io",
345};
346
347static const char * const tegra_dpaux_functions[] = {
348 "aux",
349 "i2c",
350 "off",
351};
352
353static int tegra_dpaux_get_groups_count(struct pinctrl_dev *pinctrl)
354{
355 return ARRAY_SIZE(tegra_dpaux_groups);
356}
357
358static const char *tegra_dpaux_get_group_name(struct pinctrl_dev *pinctrl,
359 unsigned int group)
360{
361 return tegra_dpaux_groups[group];
362}
363
364static int tegra_dpaux_get_group_pins(struct pinctrl_dev *pinctrl,
365 unsigned group, const unsigned **pins,
366 unsigned *num_pins)
367{
368 *pins = tegra_dpaux_pin_numbers;
369 *num_pins = ARRAY_SIZE(tegra_dpaux_pin_numbers);
370
371 return 0;
372}
373
374static const struct pinctrl_ops tegra_dpaux_pinctrl_ops = {
375 .get_groups_count = tegra_dpaux_get_groups_count,
376 .get_group_name = tegra_dpaux_get_group_name,
377 .get_group_pins = tegra_dpaux_get_group_pins,
378 .dt_node_to_map = pinconf_generic_dt_node_to_map_group,
379 .dt_free_map = pinconf_generic_dt_free_map,
380};
381
382static int tegra_dpaux_get_functions_count(struct pinctrl_dev *pinctrl)
383{
384 return ARRAY_SIZE(tegra_dpaux_functions);
385}
386
387static const char *tegra_dpaux_get_function_name(struct pinctrl_dev *pinctrl,
388 unsigned int function)
389{
390 return tegra_dpaux_functions[function];
391}
392
393static int tegra_dpaux_get_function_groups(struct pinctrl_dev *pinctrl,
394 unsigned int function,
395 const char * const **groups,
396 unsigned * const num_groups)
397{
398 *num_groups = ARRAY_SIZE(tegra_dpaux_groups);
399 *groups = tegra_dpaux_groups;
400
401 return 0;
402}
403
404static int tegra_dpaux_set_mux(struct pinctrl_dev *pinctrl,
405 unsigned int function, unsigned int group)
406{
407 struct tegra_dpaux *dpaux = pinctrl_dev_get_drvdata(pinctrl);
408
409 return tegra_dpaux_pad_config(dpaux, function);
410}
411
412static const struct pinmux_ops tegra_dpaux_pinmux_ops = {
413 .get_functions_count = tegra_dpaux_get_functions_count,
414 .get_function_name = tegra_dpaux_get_function_name,
415 .get_function_groups = tegra_dpaux_get_function_groups,
416 .set_mux = tegra_dpaux_set_mux,
417};
418#endif
419
270static int tegra_dpaux_probe(struct platform_device *pdev) 420static int tegra_dpaux_probe(struct platform_device *pdev)
271{ 421{
272 struct tegra_dpaux *dpaux; 422 struct tegra_dpaux *dpaux;
@@ -294,11 +444,14 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
294 return -ENXIO; 444 return -ENXIO;
295 } 445 }
296 446
297 dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux"); 447 if (!pdev->dev.pm_domain) {
298 if (IS_ERR(dpaux->rst)) { 448 dpaux->rst = devm_reset_control_get(&pdev->dev, "dpaux");
299 dev_err(&pdev->dev, "failed to get reset control: %ld\n", 449 if (IS_ERR(dpaux->rst)) {
300 PTR_ERR(dpaux->rst)); 450 dev_err(&pdev->dev,
301 return PTR_ERR(dpaux->rst); 451 "failed to get reset control: %ld\n",
452 PTR_ERR(dpaux->rst));
453 return PTR_ERR(dpaux->rst);
454 }
302 } 455 }
303 456
304 dpaux->clk = devm_clk_get(&pdev->dev, NULL); 457 dpaux->clk = devm_clk_get(&pdev->dev, NULL);
@@ -315,34 +468,37 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
315 return err; 468 return err;
316 } 469 }
317 470
318 reset_control_deassert(dpaux->rst); 471 if (dpaux->rst)
472 reset_control_deassert(dpaux->rst);
319 473
320 dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent"); 474 dpaux->clk_parent = devm_clk_get(&pdev->dev, "parent");
321 if (IS_ERR(dpaux->clk_parent)) { 475 if (IS_ERR(dpaux->clk_parent)) {
322 dev_err(&pdev->dev, "failed to get parent clock: %ld\n", 476 dev_err(&pdev->dev, "failed to get parent clock: %ld\n",
323 PTR_ERR(dpaux->clk_parent)); 477 PTR_ERR(dpaux->clk_parent));
324 return PTR_ERR(dpaux->clk_parent); 478 err = PTR_ERR(dpaux->clk_parent);
479 goto assert_reset;
325 } 480 }
326 481
327 err = clk_prepare_enable(dpaux->clk_parent); 482 err = clk_prepare_enable(dpaux->clk_parent);
328 if (err < 0) { 483 if (err < 0) {
329 dev_err(&pdev->dev, "failed to enable parent clock: %d\n", 484 dev_err(&pdev->dev, "failed to enable parent clock: %d\n",
330 err); 485 err);
331 return err; 486 goto assert_reset;
332 } 487 }
333 488
334 err = clk_set_rate(dpaux->clk_parent, 270000000); 489 err = clk_set_rate(dpaux->clk_parent, 270000000);
335 if (err < 0) { 490 if (err < 0) {
336 dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n", 491 dev_err(&pdev->dev, "failed to set clock to 270 MHz: %d\n",
337 err); 492 err);
338 return err; 493 goto disable_parent_clk;
339 } 494 }
340 495
341 dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd"); 496 dpaux->vdd = devm_regulator_get(&pdev->dev, "vdd");
342 if (IS_ERR(dpaux->vdd)) { 497 if (IS_ERR(dpaux->vdd)) {
343 dev_err(&pdev->dev, "failed to get VDD supply: %ld\n", 498 dev_err(&pdev->dev, "failed to get VDD supply: %ld\n",
344 PTR_ERR(dpaux->vdd)); 499 PTR_ERR(dpaux->vdd));
345 return PTR_ERR(dpaux->vdd); 500 err = PTR_ERR(dpaux->vdd);
501 goto disable_parent_clk;
346 } 502 }
347 503
348 err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0, 504 err = devm_request_irq(dpaux->dev, dpaux->irq, tegra_dpaux_irq, 0,
@@ -350,7 +506,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
350 if (err < 0) { 506 if (err < 0) {
351 dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n", 507 dev_err(dpaux->dev, "failed to request IRQ#%u: %d\n",
352 dpaux->irq, err); 508 dpaux->irq, err);
353 return err; 509 goto disable_parent_clk;
354 } 510 }
355 511
356 disable_irq(dpaux->irq); 512 disable_irq(dpaux->irq);
@@ -360,7 +516,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
360 516
361 err = drm_dp_aux_register(&dpaux->aux); 517 err = drm_dp_aux_register(&dpaux->aux);
362 if (err < 0) 518 if (err < 0)
363 return err; 519 goto disable_parent_clk;
364 520
365 /* 521 /*
366 * Assume that by default the DPAUX/I2C pads will be used for HDMI, 522 * Assume that by default the DPAUX/I2C pads will be used for HDMI,
@@ -370,16 +526,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
370 * is no possibility to perform the I2C mode configuration in the 526 * is no possibility to perform the I2C mode configuration in the
371 * HDMI path. 527 * HDMI path.
372 */ 528 */
373 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); 529 err = tegra_dpaux_pad_config(dpaux, DPAUX_HYBRID_PADCTL_MODE_I2C);
374 value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN; 530 if (err < 0)
375 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE); 531 return err;
376
377 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_PADCTL);
378 value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV |
379 DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV |
380 DPAUX_HYBRID_PADCTL_MODE_I2C;
381 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
382 532
533#ifdef CONFIG_GENERIC_PINCONF
534 dpaux->desc.name = dev_name(&pdev->dev);
535 dpaux->desc.pins = tegra_dpaux_pins;
536 dpaux->desc.npins = ARRAY_SIZE(tegra_dpaux_pins);
537 dpaux->desc.pctlops = &tegra_dpaux_pinctrl_ops;
538 dpaux->desc.pmxops = &tegra_dpaux_pinmux_ops;
539 dpaux->desc.owner = THIS_MODULE;
540
541 dpaux->pinctrl = devm_pinctrl_register(&pdev->dev, &dpaux->desc, dpaux);
542 if (!dpaux->pinctrl) {
543 dev_err(&pdev->dev, "failed to register pincontrol\n");
544 return -ENODEV;
545 }
546#endif
383 /* enable and clear all interrupts */ 547 /* enable and clear all interrupts */
384 value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT | 548 value = DPAUX_INTR_AUX_DONE | DPAUX_INTR_IRQ_EVENT |
385 DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT; 549 DPAUX_INTR_UNPLUG_EVENT | DPAUX_INTR_PLUG_EVENT;
@@ -393,17 +557,24 @@ static int tegra_dpaux_probe(struct platform_device *pdev)
393 platform_set_drvdata(pdev, dpaux); 557 platform_set_drvdata(pdev, dpaux);
394 558
395 return 0; 559 return 0;
560
561disable_parent_clk:
562 clk_disable_unprepare(dpaux->clk_parent);
563assert_reset:
564 if (dpaux->rst)
565 reset_control_assert(dpaux->rst);
566
567 clk_disable_unprepare(dpaux->clk);
568
569 return err;
396} 570}
397 571
398static int tegra_dpaux_remove(struct platform_device *pdev) 572static int tegra_dpaux_remove(struct platform_device *pdev)
399{ 573{
400 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev); 574 struct tegra_dpaux *dpaux = platform_get_drvdata(pdev);
401 u32 value;
402 575
403 /* make sure pads are powered down when not in use */ 576 /* make sure pads are powered down when not in use */
404 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); 577 tegra_dpaux_pad_power_down(dpaux);
405 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
406 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
407 578
408 drm_dp_aux_unregister(&dpaux->aux); 579 drm_dp_aux_unregister(&dpaux->aux);
409 580
@@ -414,7 +585,10 @@ static int tegra_dpaux_remove(struct platform_device *pdev)
414 cancel_work_sync(&dpaux->work); 585 cancel_work_sync(&dpaux->work);
415 586
416 clk_disable_unprepare(dpaux->clk_parent); 587 clk_disable_unprepare(dpaux->clk_parent);
417 reset_control_assert(dpaux->rst); 588
589 if (dpaux->rst)
590 reset_control_assert(dpaux->rst);
591
418 clk_disable_unprepare(dpaux->clk); 592 clk_disable_unprepare(dpaux->clk);
419 593
420 return 0; 594 return 0;
@@ -528,30 +702,15 @@ enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux)
528int drm_dp_aux_enable(struct drm_dp_aux *aux) 702int drm_dp_aux_enable(struct drm_dp_aux *aux)
529{ 703{
530 struct tegra_dpaux *dpaux = to_dpaux(aux); 704 struct tegra_dpaux *dpaux = to_dpaux(aux);
531 u32 value;
532
533 value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) |
534 DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) |
535 DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) |
536 DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV |
537 DPAUX_HYBRID_PADCTL_MODE_AUX;
538 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_PADCTL);
539
540 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE);
541 value &= ~DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
542 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
543 705
544 return 0; 706 return tegra_dpaux_pad_config(dpaux, DPAUX_PADCTL_FUNC_AUX);
545} 707}
546 708
547int drm_dp_aux_disable(struct drm_dp_aux *aux) 709int drm_dp_aux_disable(struct drm_dp_aux *aux)
548{ 710{
549 struct tegra_dpaux *dpaux = to_dpaux(aux); 711 struct tegra_dpaux *dpaux = to_dpaux(aux);
550 u32 value;
551 712
552 value = tegra_dpaux_readl(dpaux, DPAUX_HYBRID_SPARE); 713 tegra_dpaux_pad_power_down(dpaux);
553 value |= DPAUX_HYBRID_SPARE_PAD_POWER_DOWN;
554 tegra_dpaux_writel(dpaux, value, DPAUX_HYBRID_SPARE);
555 714
556 return 0; 715 return 0;
557} 716}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index b59c3bf0df44..755264d9db22 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -56,8 +56,8 @@ static void tegra_atomic_complete(struct tegra_drm *tegra,
56 */ 56 */
57 57
58 drm_atomic_helper_commit_modeset_disables(drm, state); 58 drm_atomic_helper_commit_modeset_disables(drm, state);
59 drm_atomic_helper_commit_planes(drm, state, false);
60 drm_atomic_helper_commit_modeset_enables(drm, state); 59 drm_atomic_helper_commit_modeset_enables(drm, state);
60 drm_atomic_helper_commit_planes(drm, state, true);
61 61
62 drm_atomic_helper_wait_for_vblanks(drm, state); 62 drm_atomic_helper_wait_for_vblanks(drm, state);
63 63
@@ -93,7 +93,7 @@ static int tegra_atomic_commit(struct drm_device *drm,
93 * the software side now. 93 * the software side now.
94 */ 94 */
95 95
96 drm_atomic_helper_swap_state(drm, state); 96 drm_atomic_helper_swap_state(state, true);
97 97
98 if (nonblock) 98 if (nonblock)
99 tegra_atomic_schedule(tegra, state); 99 tegra_atomic_schedule(tegra, state);
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index f52d6cb24ff5..0ddcce1b420d 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -239,8 +239,6 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output);
239void tegra_output_exit(struct tegra_output *output); 239void tegra_output_exit(struct tegra_output *output);
240 240
241int tegra_output_connector_get_modes(struct drm_connector *connector); 241int tegra_output_connector_get_modes(struct drm_connector *connector);
242struct drm_encoder *
243tegra_output_connector_best_encoder(struct drm_connector *connector);
244enum drm_connector_status 242enum drm_connector_status
245tegra_output_connector_detect(struct drm_connector *connector, bool force); 243tegra_output_connector_detect(struct drm_connector *connector, bool force);
246void tegra_output_connector_destroy(struct drm_connector *connector); 244void tegra_output_connector_destroy(struct drm_connector *connector);
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index d1239ebc190f..3d228ad90e0f 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -13,6 +13,7 @@
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/of_platform.h> 14#include <linux/of_platform.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
16#include <linux/reset.h> 17#include <linux/reset.h>
17 18
18#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
@@ -677,6 +678,45 @@ static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
677 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL); 678 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
678} 679}
679 680
681static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
682{
683 u32 value;
684
685 value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
686 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
687
688 return 0;
689}
690
691static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
692{
693 u32 value;
694
695 /*
696 * XXX Is this still needed? The module reset is deasserted right
697 * before this function is called.
698 */
699 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
700 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
701 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
702 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
703 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
704
705 /* start calibration */
706 tegra_dsi_pad_enable(dsi);
707
708 value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
709 DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
710 DSI_PAD_OUT_CLK(0x0);
711 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
712
713 value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
714 DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
715 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
716
717 return tegra_mipi_calibrate(dsi->mipi);
718}
719
680static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk, 720static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
681 unsigned int vrefresh) 721 unsigned int vrefresh)
682{ 722{
@@ -794,7 +834,6 @@ tegra_dsi_connector_mode_valid(struct drm_connector *connector,
794static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = { 834static const struct drm_connector_helper_funcs tegra_dsi_connector_helper_funcs = {
795 .get_modes = tegra_output_connector_get_modes, 835 .get_modes = tegra_output_connector_get_modes,
796 .mode_valid = tegra_dsi_connector_mode_valid, 836 .mode_valid = tegra_dsi_connector_mode_valid,
797 .best_encoder = tegra_output_connector_best_encoder,
798}; 837};
799 838
800static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = { 839static const struct drm_encoder_funcs tegra_dsi_encoder_funcs = {
@@ -837,7 +876,7 @@ static void tegra_dsi_encoder_disable(struct drm_encoder *encoder)
837 876
838 tegra_dsi_disable(dsi); 877 tegra_dsi_disable(dsi);
839 878
840 return; 879 pm_runtime_put(dsi->dev);
841} 880}
842 881
843static void tegra_dsi_encoder_enable(struct drm_encoder *encoder) 882static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
@@ -848,6 +887,13 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
848 struct tegra_dsi *dsi = to_dsi(output); 887 struct tegra_dsi *dsi = to_dsi(output);
849 struct tegra_dsi_state *state; 888 struct tegra_dsi_state *state;
850 u32 value; 889 u32 value;
890 int err;
891
892 pm_runtime_get_sync(dsi->dev);
893
894 err = tegra_dsi_pad_calibrate(dsi);
895 if (err < 0)
896 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
851 897
852 state = tegra_dsi_get_state(dsi); 898 state = tegra_dsi_get_state(dsi);
853 899
@@ -876,8 +922,6 @@ static void tegra_dsi_encoder_enable(struct drm_encoder *encoder)
876 922
877 if (output->panel) 923 if (output->panel)
878 drm_panel_enable(output->panel); 924 drm_panel_enable(output->panel);
879
880 return;
881} 925}
882 926
883static int 927static int
@@ -967,55 +1011,12 @@ static const struct drm_encoder_helper_funcs tegra_dsi_encoder_helper_funcs = {
967 .atomic_check = tegra_dsi_encoder_atomic_check, 1011 .atomic_check = tegra_dsi_encoder_atomic_check,
968}; 1012};
969 1013
970static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
971{
972 u32 value;
973
974 value = DSI_PAD_CONTROL_VS1_PULLDN(0) | DSI_PAD_CONTROL_VS1_PDIO(0);
975 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_0);
976
977 return 0;
978}
979
980static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
981{
982 u32 value;
983
984 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
985 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
986 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_2);
987 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_3);
988 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_4);
989
990 /* start calibration */
991 tegra_dsi_pad_enable(dsi);
992
993 value = DSI_PAD_SLEW_UP(0x7) | DSI_PAD_SLEW_DN(0x7) |
994 DSI_PAD_LP_UP(0x1) | DSI_PAD_LP_DN(0x1) |
995 DSI_PAD_OUT_CLK(0x0);
996 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_2);
997
998 value = DSI_PAD_PREEMP_PD_CLK(0x3) | DSI_PAD_PREEMP_PU_CLK(0x3) |
999 DSI_PAD_PREEMP_PD(0x03) | DSI_PAD_PREEMP_PU(0x3);
1000 tegra_dsi_writel(dsi, value, DSI_PAD_CONTROL_3);
1001
1002 return tegra_mipi_calibrate(dsi->mipi);
1003}
1004
1005static int tegra_dsi_init(struct host1x_client *client) 1014static int tegra_dsi_init(struct host1x_client *client)
1006{ 1015{
1007 struct drm_device *drm = dev_get_drvdata(client->parent); 1016 struct drm_device *drm = dev_get_drvdata(client->parent);
1008 struct tegra_dsi *dsi = host1x_client_to_dsi(client); 1017 struct tegra_dsi *dsi = host1x_client_to_dsi(client);
1009 int err; 1018 int err;
1010 1019
1011 reset_control_deassert(dsi->rst);
1012
1013 err = tegra_dsi_pad_calibrate(dsi);
1014 if (err < 0) {
1015 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
1016 goto reset;
1017 }
1018
1019 /* Gangsters must not register their own outputs. */ 1020 /* Gangsters must not register their own outputs. */
1020 if (!dsi->master) { 1021 if (!dsi->master) {
1021 dsi->output.dev = client->dev; 1022 dsi->output.dev = client->dev;
@@ -1038,12 +1039,9 @@ static int tegra_dsi_init(struct host1x_client *client)
1038 drm_connector_register(&dsi->output.connector); 1039 drm_connector_register(&dsi->output.connector);
1039 1040
1040 err = tegra_output_init(drm, &dsi->output); 1041 err = tegra_output_init(drm, &dsi->output);
1041 if (err < 0) { 1042 if (err < 0)
1042 dev_err(client->dev, 1043 dev_err(dsi->dev, "failed to initialize output: %d\n",
1043 "failed to initialize output: %d\n",
1044 err); 1044 err);
1045 goto reset;
1046 }
1047 1045
1048 dsi->output.encoder.possible_crtcs = 0x3; 1046 dsi->output.encoder.possible_crtcs = 0x3;
1049 } 1047 }
@@ -1055,10 +1053,6 @@ static int tegra_dsi_init(struct host1x_client *client)
1055 } 1053 }
1056 1054
1057 return 0; 1055 return 0;
1058
1059reset:
1060 reset_control_assert(dsi->rst);
1061 return err;
1062} 1056}
1063 1057
1064static int tegra_dsi_exit(struct host1x_client *client) 1058static int tegra_dsi_exit(struct host1x_client *client)
@@ -1070,7 +1064,7 @@ static int tegra_dsi_exit(struct host1x_client *client)
1070 if (IS_ENABLED(CONFIG_DEBUG_FS)) 1064 if (IS_ENABLED(CONFIG_DEBUG_FS))
1071 tegra_dsi_debugfs_exit(dsi); 1065 tegra_dsi_debugfs_exit(dsi);
1072 1066
1073 reset_control_assert(dsi->rst); 1067 regulator_disable(dsi->vdd);
1074 1068
1075 return 0; 1069 return 0;
1076} 1070}
@@ -1494,74 +1488,50 @@ static int tegra_dsi_probe(struct platform_device *pdev)
1494 dsi->format = MIPI_DSI_FMT_RGB888; 1488 dsi->format = MIPI_DSI_FMT_RGB888;
1495 dsi->lanes = 4; 1489 dsi->lanes = 4;
1496 1490
1497 dsi->rst = devm_reset_control_get(&pdev->dev, "dsi"); 1491 if (!pdev->dev.pm_domain) {
1498 if (IS_ERR(dsi->rst)) 1492 dsi->rst = devm_reset_control_get(&pdev->dev, "dsi");
1499 return PTR_ERR(dsi->rst); 1493 if (IS_ERR(dsi->rst))
1494 return PTR_ERR(dsi->rst);
1495 }
1500 1496
1501 dsi->clk = devm_clk_get(&pdev->dev, NULL); 1497 dsi->clk = devm_clk_get(&pdev->dev, NULL);
1502 if (IS_ERR(dsi->clk)) { 1498 if (IS_ERR(dsi->clk)) {
1503 dev_err(&pdev->dev, "cannot get DSI clock\n"); 1499 dev_err(&pdev->dev, "cannot get DSI clock\n");
1504 err = PTR_ERR(dsi->clk); 1500 return PTR_ERR(dsi->clk);
1505 goto reset;
1506 }
1507
1508 err = clk_prepare_enable(dsi->clk);
1509 if (err < 0) {
1510 dev_err(&pdev->dev, "cannot enable DSI clock\n");
1511 goto reset;
1512 } 1501 }
1513 1502
1514 dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); 1503 dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
1515 if (IS_ERR(dsi->clk_lp)) { 1504 if (IS_ERR(dsi->clk_lp)) {
1516 dev_err(&pdev->dev, "cannot get low-power clock\n"); 1505 dev_err(&pdev->dev, "cannot get low-power clock\n");
1517 err = PTR_ERR(dsi->clk_lp); 1506 return PTR_ERR(dsi->clk_lp);
1518 goto disable_clk;
1519 }
1520
1521 err = clk_prepare_enable(dsi->clk_lp);
1522 if (err < 0) {
1523 dev_err(&pdev->dev, "cannot enable low-power clock\n");
1524 goto disable_clk;
1525 } 1507 }
1526 1508
1527 dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); 1509 dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
1528 if (IS_ERR(dsi->clk_parent)) { 1510 if (IS_ERR(dsi->clk_parent)) {
1529 dev_err(&pdev->dev, "cannot get parent clock\n"); 1511 dev_err(&pdev->dev, "cannot get parent clock\n");
1530 err = PTR_ERR(dsi->clk_parent); 1512 return PTR_ERR(dsi->clk_parent);
1531 goto disable_clk_lp;
1532 } 1513 }
1533 1514
1534 dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi"); 1515 dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
1535 if (IS_ERR(dsi->vdd)) { 1516 if (IS_ERR(dsi->vdd)) {
1536 dev_err(&pdev->dev, "cannot get VDD supply\n"); 1517 dev_err(&pdev->dev, "cannot get VDD supply\n");
1537 err = PTR_ERR(dsi->vdd); 1518 return PTR_ERR(dsi->vdd);
1538 goto disable_clk_lp;
1539 }
1540
1541 err = regulator_enable(dsi->vdd);
1542 if (err < 0) {
1543 dev_err(&pdev->dev, "cannot enable VDD supply\n");
1544 goto disable_clk_lp;
1545 } 1519 }
1546 1520
1547 err = tegra_dsi_setup_clocks(dsi); 1521 err = tegra_dsi_setup_clocks(dsi);
1548 if (err < 0) { 1522 if (err < 0) {
1549 dev_err(&pdev->dev, "cannot setup clocks\n"); 1523 dev_err(&pdev->dev, "cannot setup clocks\n");
1550 goto disable_vdd; 1524 return err;
1551 } 1525 }
1552 1526
1553 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1527 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1554 dsi->regs = devm_ioremap_resource(&pdev->dev, regs); 1528 dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
1555 if (IS_ERR(dsi->regs)) { 1529 if (IS_ERR(dsi->regs))
1556 err = PTR_ERR(dsi->regs); 1530 return PTR_ERR(dsi->regs);
1557 goto disable_vdd;
1558 }
1559 1531
1560 dsi->mipi = tegra_mipi_request(&pdev->dev); 1532 dsi->mipi = tegra_mipi_request(&pdev->dev);
1561 if (IS_ERR(dsi->mipi)) { 1533 if (IS_ERR(dsi->mipi))
1562 err = PTR_ERR(dsi->mipi); 1534 return PTR_ERR(dsi->mipi);
1563 goto disable_vdd;
1564 }
1565 1535
1566 dsi->host.ops = &tegra_dsi_host_ops; 1536 dsi->host.ops = &tegra_dsi_host_ops;
1567 dsi->host.dev = &pdev->dev; 1537 dsi->host.dev = &pdev->dev;
@@ -1572,6 +1542,9 @@ static int tegra_dsi_probe(struct platform_device *pdev)
1572 goto mipi_free; 1542 goto mipi_free;
1573 } 1543 }
1574 1544
1545 platform_set_drvdata(pdev, dsi);
1546 pm_runtime_enable(&pdev->dev);
1547
1575 INIT_LIST_HEAD(&dsi->client.list); 1548 INIT_LIST_HEAD(&dsi->client.list);
1576 dsi->client.ops = &dsi_client_ops; 1549 dsi->client.ops = &dsi_client_ops;
1577 dsi->client.dev = &pdev->dev; 1550 dsi->client.dev = &pdev->dev;
@@ -1583,22 +1556,12 @@ static int tegra_dsi_probe(struct platform_device *pdev)
1583 goto unregister; 1556 goto unregister;
1584 } 1557 }
1585 1558
1586 platform_set_drvdata(pdev, dsi);
1587
1588 return 0; 1559 return 0;
1589 1560
1590unregister: 1561unregister:
1591 mipi_dsi_host_unregister(&dsi->host); 1562 mipi_dsi_host_unregister(&dsi->host);
1592mipi_free: 1563mipi_free:
1593 tegra_mipi_free(dsi->mipi); 1564 tegra_mipi_free(dsi->mipi);
1594disable_vdd:
1595 regulator_disable(dsi->vdd);
1596disable_clk_lp:
1597 clk_disable_unprepare(dsi->clk_lp);
1598disable_clk:
1599 clk_disable_unprepare(dsi->clk);
1600reset:
1601 reset_control_assert(dsi->rst);
1602 return err; 1565 return err;
1603} 1566}
1604 1567
@@ -1607,6 +1570,8 @@ static int tegra_dsi_remove(struct platform_device *pdev)
1607 struct tegra_dsi *dsi = platform_get_drvdata(pdev); 1570 struct tegra_dsi *dsi = platform_get_drvdata(pdev);
1608 int err; 1571 int err;
1609 1572
1573 pm_runtime_disable(&pdev->dev);
1574
1610 err = host1x_client_unregister(&dsi->client); 1575 err = host1x_client_unregister(&dsi->client);
1611 if (err < 0) { 1576 if (err < 0) {
1612 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 1577 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1619,14 +1584,82 @@ static int tegra_dsi_remove(struct platform_device *pdev)
1619 mipi_dsi_host_unregister(&dsi->host); 1584 mipi_dsi_host_unregister(&dsi->host);
1620 tegra_mipi_free(dsi->mipi); 1585 tegra_mipi_free(dsi->mipi);
1621 1586
1622 regulator_disable(dsi->vdd); 1587 return 0;
1588}
1589
1590#ifdef CONFIG_PM
1591static int tegra_dsi_suspend(struct device *dev)
1592{
1593 struct tegra_dsi *dsi = dev_get_drvdata(dev);
1594 int err;
1595
1596 if (dsi->rst) {
1597 err = reset_control_assert(dsi->rst);
1598 if (err < 0) {
1599 dev_err(dev, "failed to assert reset: %d\n", err);
1600 return err;
1601 }
1602 }
1603
1604 usleep_range(1000, 2000);
1605
1623 clk_disable_unprepare(dsi->clk_lp); 1606 clk_disable_unprepare(dsi->clk_lp);
1624 clk_disable_unprepare(dsi->clk); 1607 clk_disable_unprepare(dsi->clk);
1625 reset_control_assert(dsi->rst); 1608
1609 regulator_disable(dsi->vdd);
1626 1610
1627 return 0; 1611 return 0;
1628} 1612}
1629 1613
1614static int tegra_dsi_resume(struct device *dev)
1615{
1616 struct tegra_dsi *dsi = dev_get_drvdata(dev);
1617 int err;
1618
1619 err = regulator_enable(dsi->vdd);
1620 if (err < 0) {
1621 dev_err(dsi->dev, "failed to enable VDD supply: %d\n", err);
1622 return err;
1623 }
1624
1625 err = clk_prepare_enable(dsi->clk);
1626 if (err < 0) {
1627 dev_err(dev, "cannot enable DSI clock: %d\n", err);
1628 goto disable_vdd;
1629 }
1630
1631 err = clk_prepare_enable(dsi->clk_lp);
1632 if (err < 0) {
1633 dev_err(dev, "cannot enable low-power clock: %d\n", err);
1634 goto disable_clk;
1635 }
1636
1637 usleep_range(1000, 2000);
1638
1639 if (dsi->rst) {
1640 err = reset_control_deassert(dsi->rst);
1641 if (err < 0) {
1642 dev_err(dev, "cannot assert reset: %d\n", err);
1643 goto disable_clk_lp;
1644 }
1645 }
1646
1647 return 0;
1648
1649disable_clk_lp:
1650 clk_disable_unprepare(dsi->clk_lp);
1651disable_clk:
1652 clk_disable_unprepare(dsi->clk);
1653disable_vdd:
1654 regulator_disable(dsi->vdd);
1655 return err;
1656}
1657#endif
1658
1659static const struct dev_pm_ops tegra_dsi_pm_ops = {
1660 SET_RUNTIME_PM_OPS(tegra_dsi_suspend, tegra_dsi_resume, NULL)
1661};
1662
1630static const struct of_device_id tegra_dsi_of_match[] = { 1663static const struct of_device_id tegra_dsi_of_match[] = {
1631 { .compatible = "nvidia,tegra210-dsi", }, 1664 { .compatible = "nvidia,tegra210-dsi", },
1632 { .compatible = "nvidia,tegra132-dsi", }, 1665 { .compatible = "nvidia,tegra132-dsi", },
@@ -1640,6 +1673,7 @@ struct platform_driver tegra_dsi_driver = {
1640 .driver = { 1673 .driver = {
1641 .name = "tegra-dsi", 1674 .name = "tegra-dsi",
1642 .of_match_table = tegra_dsi_of_match, 1675 .of_match_table = tegra_dsi_of_match,
1676 .pm = &tegra_dsi_pm_ops,
1643 }, 1677 },
1644 .probe = tegra_dsi_probe, 1678 .probe = tegra_dsi_probe,
1645 .remove = tegra_dsi_remove, 1679 .remove = tegra_dsi_remove,
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 1b12aa7a715e..e6d71fa4028e 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -68,7 +68,7 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
68 struct tegra_bo *bo = fb->planes[i]; 68 struct tegra_bo *bo = fb->planes[i];
69 69
70 if (bo) { 70 if (bo) {
71 if (bo->pages && bo->vaddr) 71 if (bo->pages)
72 vunmap(bo->vaddr); 72 vunmap(bo->vaddr);
73 73
74 drm_gem_object_unreference_unlocked(&bo->gem); 74 drm_gem_object_unreference_unlocked(&bo->gem);
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index b7ef4929e347..cda0491ed6bf 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -11,6 +11,7 @@
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/gpio.h> 12#include <linux/gpio.h>
13#include <linux/hdmi.h> 13#include <linux/hdmi.h>
14#include <linux/pm_runtime.h>
14#include <linux/regulator/consumer.h> 15#include <linux/regulator/consumer.h>
15#include <linux/reset.h> 16#include <linux/reset.h>
16 17
@@ -18,10 +19,14 @@
18#include <drm/drm_crtc.h> 19#include <drm/drm_crtc.h>
19#include <drm/drm_crtc_helper.h> 20#include <drm/drm_crtc_helper.h>
20 21
22#include <sound/hda_verbs.h>
23
21#include "hdmi.h" 24#include "hdmi.h"
22#include "drm.h" 25#include "drm.h"
23#include "dc.h" 26#include "dc.h"
24 27
28#define HDMI_ELD_BUFFER_SIZE 96
29
25struct tmds_config { 30struct tmds_config {
26 unsigned int pclk; 31 unsigned int pclk;
27 u32 pll0; 32 u32 pll0;
@@ -39,6 +44,8 @@ struct tegra_hdmi_config {
39 u32 fuse_override_value; 44 u32 fuse_override_value;
40 45
41 bool has_sor_io_peak_current; 46 bool has_sor_io_peak_current;
47 bool has_hda;
48 bool has_hbr;
42}; 49};
43 50
44struct tegra_hdmi { 51struct tegra_hdmi {
@@ -60,7 +67,10 @@ struct tegra_hdmi {
60 const struct tegra_hdmi_config *config; 67 const struct tegra_hdmi_config *config;
61 68
62 unsigned int audio_source; 69 unsigned int audio_source;
63 unsigned int audio_freq; 70 unsigned int audio_sample_rate;
71 unsigned int audio_channels;
72
73 unsigned int pixel_clock;
64 bool stereo; 74 bool stereo;
65 bool dvi; 75 bool dvi;
66 76
@@ -402,11 +412,11 @@ static const struct tmds_config tegra124_tmds_config[] = {
402}; 412};
403 413
404static const struct tegra_hdmi_audio_config * 414static const struct tegra_hdmi_audio_config *
405tegra_hdmi_get_audio_config(unsigned int audio_freq, unsigned int pclk) 415tegra_hdmi_get_audio_config(unsigned int sample_rate, unsigned int pclk)
406{ 416{
407 const struct tegra_hdmi_audio_config *table; 417 const struct tegra_hdmi_audio_config *table;
408 418
409 switch (audio_freq) { 419 switch (sample_rate) {
410 case 32000: 420 case 32000:
411 table = tegra_hdmi_audio_32k; 421 table = tegra_hdmi_audio_32k;
412 break; 422 break;
@@ -476,44 +486,114 @@ static void tegra_hdmi_setup_audio_fs_tables(struct tegra_hdmi *hdmi)
476 } 486 }
477} 487}
478 488
479static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk) 489static void tegra_hdmi_write_aval(struct tegra_hdmi *hdmi, u32 value)
490{
491 static const struct {
492 unsigned int sample_rate;
493 unsigned int offset;
494 } regs[] = {
495 { 32000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 },
496 { 44100, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 },
497 { 48000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480 },
498 { 88200, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882 },
499 { 96000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960 },
500 { 176400, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764 },
501 { 192000, HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 },
502 };
503 unsigned int i;
504
505 for (i = 0; i < ARRAY_SIZE(regs); i++) {
506 if (regs[i].sample_rate == hdmi->audio_sample_rate) {
507 tegra_hdmi_writel(hdmi, value, regs[i].offset);
508 break;
509 }
510 }
511}
512
513static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi)
480{ 514{
481 struct device_node *node = hdmi->dev->of_node;
482 const struct tegra_hdmi_audio_config *config; 515 const struct tegra_hdmi_audio_config *config;
483 unsigned int offset = 0; 516 u32 source, value;
484 u32 value;
485 517
486 switch (hdmi->audio_source) { 518 switch (hdmi->audio_source) {
487 case HDA: 519 case HDA:
488 value = AUDIO_CNTRL0_SOURCE_SELECT_HDAL; 520 if (hdmi->config->has_hda)
521 source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL;
522 else
523 return -EINVAL;
524
489 break; 525 break;
490 526
491 case SPDIF: 527 case SPDIF:
492 value = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF; 528 if (hdmi->config->has_hda)
529 source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
530 else
531 source = AUDIO_CNTRL0_SOURCE_SELECT_SPDIF;
493 break; 532 break;
494 533
495 default: 534 default:
496 value = AUDIO_CNTRL0_SOURCE_SELECT_AUTO; 535 if (hdmi->config->has_hda)
536 source = SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
537 else
538 source = AUDIO_CNTRL0_SOURCE_SELECT_AUTO;
497 break; 539 break;
498 } 540 }
499 541
500 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { 542 /*
501 value |= AUDIO_CNTRL0_ERROR_TOLERANCE(6) | 543 * Tegra30 and later use a slightly modified version of the register
502 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); 544 * layout to accomodate for changes related to supporting HDA as the
503 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); 545 * audio input source for HDMI. The source select field has moved to
504 } else { 546 * the SOR_AUDIO_CNTRL0 register, but the error tolerance and frames
505 value |= AUDIO_CNTRL0_INJECT_NULLSMPL; 547 * per block fields remain in the AUDIO_CNTRL0 register.
548 */
549 if (hdmi->config->has_hda) {
550 /*
551 * Inject null samples into the audio FIFO for every frame in
552 * which the codec did not receive any samples. This applies
553 * to stereo LPCM only.
554 *
555 * XXX: This seems to be a remnant of MCP days when this was
556 * used to work around issues with monitors not being able to
557 * play back system startup sounds early. It is possibly not
558 * needed on Linux at all.
559 */
560 if (hdmi->audio_channels == 2)
561 value = SOR_AUDIO_CNTRL0_INJECT_NULLSMPL;
562 else
563 value = 0;
564
565 value |= source;
566
506 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); 567 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
568 }
507 569
508 value = AUDIO_CNTRL0_ERROR_TOLERANCE(6) | 570 /*
509 AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0); 571 * On Tegra20, HDA is not a supported audio source and the source
510 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0); 572 * select field is part of the AUDIO_CNTRL0 register.
573 */
574 value = AUDIO_CNTRL0_FRAMES_PER_BLOCK(0xc0) |
575 AUDIO_CNTRL0_ERROR_TOLERANCE(6);
576
577 if (!hdmi->config->has_hda)
578 value |= source;
579
580 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_CNTRL0);
581
582 /*
583 * Advertise support for High Bit-Rate on Tegra114 and later.
584 */
585 if (hdmi->config->has_hbr) {
586 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
587 value |= SOR_AUDIO_SPARE0_HBR_ENABLE;
588 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
511 } 589 }
512 590
513 config = tegra_hdmi_get_audio_config(hdmi->audio_freq, pclk); 591 config = tegra_hdmi_get_audio_config(hdmi->audio_sample_rate,
592 hdmi->pixel_clock);
514 if (!config) { 593 if (!config) {
515 dev_err(hdmi->dev, "cannot set audio to %u at %u pclk\n", 594 dev_err(hdmi->dev,
516 hdmi->audio_freq, pclk); 595 "cannot set audio to %u Hz at %u Hz pixel clock\n",
596 hdmi->audio_sample_rate, hdmi->pixel_clock);
517 return -EINVAL; 597 return -EINVAL;
518 } 598 }
519 599
@@ -526,8 +606,8 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
526 tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE, 606 tegra_hdmi_writel(hdmi, ACR_SUBPACK_N(config->n) | ACR_ENABLE,
527 HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH); 607 HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_HIGH);
528 608
529 value = ACR_SUBPACK_CTS(config->cts); 609 tegra_hdmi_writel(hdmi, ACR_SUBPACK_CTS(config->cts),
530 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW); 610 HDMI_NV_PDISP_HDMI_ACR_0441_SUBPACK_LOW);
531 611
532 value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1); 612 value = SPARE_HW_CTS | SPARE_FORCE_SW_CTS | SPARE_CTS_RESET_VAL(1);
533 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE); 613 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_SPARE);
@@ -536,43 +616,53 @@ static int tegra_hdmi_setup_audio(struct tegra_hdmi *hdmi, unsigned int pclk)
536 value &= ~AUDIO_N_RESETF; 616 value &= ~AUDIO_N_RESETF;
537 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N); 617 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_AUDIO_N);
538 618
539 if (of_device_is_compatible(node, "nvidia,tegra30-hdmi")) { 619 if (hdmi->config->has_hda)
540 switch (hdmi->audio_freq) { 620 tegra_hdmi_write_aval(hdmi, config->aval);
541 case 32000:
542 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320;
543 break;
544 621
545 case 44100: 622 tegra_hdmi_setup_audio_fs_tables(hdmi);
546 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441;
547 break;
548 623
549 case 48000: 624 return 0;
550 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0480; 625}
551 break;
552 626
553 case 88200: 627static void tegra_hdmi_disable_audio(struct tegra_hdmi *hdmi)
554 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0882; 628{
555 break; 629 u32 value;
556 630
557 case 96000: 631 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
558 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_0960; 632 value &= ~GENERIC_CTRL_AUDIO;
559 break; 633 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
634}
560 635
561 case 176400: 636static void tegra_hdmi_enable_audio(struct tegra_hdmi *hdmi)
562 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1764; 637{
563 break; 638 u32 value;
564 639
565 case 192000: 640 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
566 offset = HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920; 641 value |= GENERIC_CTRL_AUDIO;
567 break; 642 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
568 } 643}
569 644
570 tegra_hdmi_writel(hdmi, config->aval, offset); 645static void tegra_hdmi_write_eld(struct tegra_hdmi *hdmi)
571 } 646{
647 size_t length = drm_eld_size(hdmi->output.connector.eld), i;
648 u32 value;
572 649
573 tegra_hdmi_setup_audio_fs_tables(hdmi); 650 for (i = 0; i < length; i++)
651 tegra_hdmi_writel(hdmi, i << 8 | hdmi->output.connector.eld[i],
652 HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
574 653
575 return 0; 654 /*
655 * The HDA codec will always report an ELD buffer size of 96 bytes and
656 * the HDA codec driver will check that each byte read from the buffer
657 * is valid. Therefore every byte must be written, even if no 96 bytes
658 * were parsed from EDID.
659 */
660 for (i = length; i < HDMI_ELD_BUFFER_SIZE; i++)
661 tegra_hdmi_writel(hdmi, i << 8 | 0,
662 HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
663
664 value = SOR_AUDIO_HDA_PRESENSE_VALID | SOR_AUDIO_HDA_PRESENSE_PRESENT;
665 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
576} 666}
577 667
578static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size) 668static inline u32 tegra_hdmi_subpack(const u8 *ptr, size_t size)
@@ -644,12 +734,6 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
644 u8 buffer[17]; 734 u8 buffer[17];
645 ssize_t err; 735 ssize_t err;
646 736
647 if (hdmi->dvi) {
648 tegra_hdmi_writel(hdmi, 0,
649 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
650 return;
651 }
652
653 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 737 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
654 if (err < 0) { 738 if (err < 0) {
655 dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err); 739 dev_err(hdmi->dev, "failed to setup AVI infoframe: %zd\n", err);
@@ -663,9 +747,24 @@ static void tegra_hdmi_setup_avi_infoframe(struct tegra_hdmi *hdmi,
663 } 747 }
664 748
665 tegra_hdmi_write_infopack(hdmi, buffer, err); 749 tegra_hdmi_write_infopack(hdmi, buffer, err);
750}
751
752static void tegra_hdmi_disable_avi_infoframe(struct tegra_hdmi *hdmi)
753{
754 u32 value;
666 755
667 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, 756 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
668 HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL); 757 value &= ~INFOFRAME_CTRL_ENABLE;
758 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
759}
760
761static void tegra_hdmi_enable_avi_infoframe(struct tegra_hdmi *hdmi)
762{
763 u32 value;
764
765 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
766 value |= INFOFRAME_CTRL_ENABLE;
767 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AVI_INFOFRAME_CTRL);
669} 768}
670 769
671static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi) 770static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
@@ -674,12 +773,6 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
674 u8 buffer[14]; 773 u8 buffer[14];
675 ssize_t err; 774 ssize_t err;
676 775
677 if (hdmi->dvi) {
678 tegra_hdmi_writel(hdmi, 0,
679 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
680 return;
681 }
682
683 err = hdmi_audio_infoframe_init(&frame); 776 err = hdmi_audio_infoframe_init(&frame);
684 if (err < 0) { 777 if (err < 0) {
685 dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n", 778 dev_err(hdmi->dev, "failed to setup audio infoframe: %zd\n",
@@ -687,7 +780,7 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
687 return; 780 return;
688 } 781 }
689 782
690 frame.channels = 2; 783 frame.channels = hdmi->audio_channels;
691 784
692 err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer)); 785 err = hdmi_audio_infoframe_pack(&frame, buffer, sizeof(buffer));
693 if (err < 0) { 786 if (err < 0) {
@@ -703,9 +796,24 @@ static void tegra_hdmi_setup_audio_infoframe(struct tegra_hdmi *hdmi)
703 * bytes can be programmed. 796 * bytes can be programmed.
704 */ 797 */
705 tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err)); 798 tegra_hdmi_write_infopack(hdmi, buffer, min_t(size_t, 10, err));
799}
706 800
707 tegra_hdmi_writel(hdmi, INFOFRAME_CTRL_ENABLE, 801static void tegra_hdmi_disable_audio_infoframe(struct tegra_hdmi *hdmi)
708 HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL); 802{
803 u32 value;
804
805 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
806 value &= ~INFOFRAME_CTRL_ENABLE;
807 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
808}
809
810static void tegra_hdmi_enable_audio_infoframe(struct tegra_hdmi *hdmi)
811{
812 u32 value;
813
814 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
815 value |= INFOFRAME_CTRL_ENABLE;
816 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_AUDIO_INFOFRAME_CTRL);
709} 817}
710 818
711static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi) 819static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
@@ -713,14 +821,6 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
713 struct hdmi_vendor_infoframe frame; 821 struct hdmi_vendor_infoframe frame;
714 u8 buffer[10]; 822 u8 buffer[10];
715 ssize_t err; 823 ssize_t err;
716 u32 value;
717
718 if (!hdmi->stereo) {
719 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
720 value &= ~GENERIC_CTRL_ENABLE;
721 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
722 return;
723 }
724 824
725 hdmi_vendor_infoframe_init(&frame); 825 hdmi_vendor_infoframe_init(&frame);
726 frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING; 826 frame.s3d_struct = HDMI_3D_STRUCTURE_FRAME_PACKING;
@@ -733,6 +833,20 @@ static void tegra_hdmi_setup_stereo_infoframe(struct tegra_hdmi *hdmi)
733 } 833 }
734 834
735 tegra_hdmi_write_infopack(hdmi, buffer, err); 835 tegra_hdmi_write_infopack(hdmi, buffer, err);
836}
837
838static void tegra_hdmi_disable_stereo_infoframe(struct tegra_hdmi *hdmi)
839{
840 u32 value;
841
842 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
843 value &= ~GENERIC_CTRL_ENABLE;
844 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
845}
846
847static void tegra_hdmi_enable_stereo_infoframe(struct tegra_hdmi *hdmi)
848{
849 u32 value;
736 850
737 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL); 851 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
738 value |= GENERIC_CTRL_ENABLE; 852 value |= GENERIC_CTRL_ENABLE;
@@ -772,10 +886,25 @@ static bool tegra_output_is_hdmi(struct tegra_output *output)
772 return drm_detect_hdmi_monitor(edid); 886 return drm_detect_hdmi_monitor(edid);
773} 887}
774 888
889static enum drm_connector_status
890tegra_hdmi_connector_detect(struct drm_connector *connector, bool force)
891{
892 struct tegra_output *output = connector_to_output(connector);
893 struct tegra_hdmi *hdmi = to_hdmi(output);
894 enum drm_connector_status status;
895
896 status = tegra_output_connector_detect(connector, force);
897 if (status == connector_status_connected)
898 return status;
899
900 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
901 return status;
902}
903
775static const struct drm_connector_funcs tegra_hdmi_connector_funcs = { 904static const struct drm_connector_funcs tegra_hdmi_connector_funcs = {
776 .dpms = drm_atomic_helper_connector_dpms, 905 .dpms = drm_atomic_helper_connector_dpms,
777 .reset = drm_atomic_helper_connector_reset, 906 .reset = drm_atomic_helper_connector_reset,
778 .detect = tegra_output_connector_detect, 907 .detect = tegra_hdmi_connector_detect,
779 .fill_modes = drm_helper_probe_single_connector_modes, 908 .fill_modes = drm_helper_probe_single_connector_modes,
780 .destroy = tegra_output_connector_destroy, 909 .destroy = tegra_output_connector_destroy,
781 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 910 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -806,7 +935,6 @@ static const struct drm_connector_helper_funcs
806tegra_hdmi_connector_helper_funcs = { 935tegra_hdmi_connector_helper_funcs = {
807 .get_modes = tegra_output_connector_get_modes, 936 .get_modes = tegra_output_connector_get_modes,
808 .mode_valid = tegra_hdmi_connector_mode_valid, 937 .mode_valid = tegra_hdmi_connector_mode_valid,
809 .best_encoder = tegra_output_connector_best_encoder,
810}; 938};
811 939
812static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = { 940static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
@@ -815,7 +943,9 @@ static const struct drm_encoder_funcs tegra_hdmi_encoder_funcs = {
815 943
816static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder) 944static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
817{ 945{
946 struct tegra_output *output = encoder_to_output(encoder);
818 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 947 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
948 struct tegra_hdmi *hdmi = to_hdmi(output);
819 u32 value; 949 u32 value;
820 950
821 /* 951 /*
@@ -829,6 +959,20 @@ static void tegra_hdmi_encoder_disable(struct drm_encoder *encoder)
829 959
830 tegra_dc_commit(dc); 960 tegra_dc_commit(dc);
831 } 961 }
962
963 if (!hdmi->dvi) {
964 if (hdmi->stereo)
965 tegra_hdmi_disable_stereo_infoframe(hdmi);
966
967 tegra_hdmi_disable_audio_infoframe(hdmi);
968 tegra_hdmi_disable_avi_infoframe(hdmi);
969 tegra_hdmi_disable_audio(hdmi);
970 }
971
972 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_ENABLE);
973 tegra_hdmi_writel(hdmi, 0, HDMI_NV_PDISP_INT_MASK);
974
975 pm_runtime_put(hdmi->dev);
832} 976}
833 977
834static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder) 978static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
@@ -837,21 +981,28 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
837 unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey; 981 unsigned int h_sync_width, h_front_porch, h_back_porch, i, rekey;
838 struct tegra_output *output = encoder_to_output(encoder); 982 struct tegra_output *output = encoder_to_output(encoder);
839 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 983 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
840 struct device_node *node = output->dev->of_node;
841 struct tegra_hdmi *hdmi = to_hdmi(output); 984 struct tegra_hdmi *hdmi = to_hdmi(output);
842 unsigned int pulse_start, div82, pclk; 985 unsigned int pulse_start, div82;
843 int retries = 1000; 986 int retries = 1000;
844 u32 value; 987 u32 value;
845 int err; 988 int err;
846 989
847 hdmi->dvi = !tegra_output_is_hdmi(output); 990 pm_runtime_get_sync(hdmi->dev);
848 991
849 pclk = mode->clock * 1000; 992 /*
993 * Enable and unmask the HDA codec SCRATCH0 register interrupt. This
994 * is used for interoperability between the HDA codec driver and the
995 * HDMI driver.
996 */
997 tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_ENABLE);
998 tegra_hdmi_writel(hdmi, INT_CODEC_SCRATCH0, HDMI_NV_PDISP_INT_MASK);
999
1000 hdmi->pixel_clock = mode->clock * 1000;
850 h_sync_width = mode->hsync_end - mode->hsync_start; 1001 h_sync_width = mode->hsync_end - mode->hsync_start;
851 h_back_porch = mode->htotal - mode->hsync_end; 1002 h_back_porch = mode->htotal - mode->hsync_end;
852 h_front_porch = mode->hsync_start - mode->hdisplay; 1003 h_front_porch = mode->hsync_start - mode->hdisplay;
853 1004
854 err = clk_set_rate(hdmi->clk, pclk); 1005 err = clk_set_rate(hdmi->clk, hdmi->pixel_clock);
855 if (err < 0) { 1006 if (err < 0) {
856 dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", 1007 dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n",
857 err); 1008 err);
@@ -910,17 +1061,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
910 value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82); 1061 value = SOR_REFCLK_DIV_INT(div82 >> 2) | SOR_REFCLK_DIV_FRAC(div82);
911 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK); 1062 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_SOR_REFCLK);
912 1063
1064 hdmi->dvi = !tegra_output_is_hdmi(output);
913 if (!hdmi->dvi) { 1065 if (!hdmi->dvi) {
914 err = tegra_hdmi_setup_audio(hdmi, pclk); 1066 err = tegra_hdmi_setup_audio(hdmi);
915 if (err < 0) 1067 if (err < 0)
916 hdmi->dvi = true; 1068 hdmi->dvi = true;
917 } 1069 }
918 1070
919 if (of_device_is_compatible(node, "nvidia,tegra20-hdmi")) { 1071 if (hdmi->config->has_hda)
920 /* 1072 tegra_hdmi_write_eld(hdmi);
921 * TODO: add ELD support
922 */
923 }
924 1073
925 rekey = HDMI_REKEY_DEFAULT; 1074 rekey = HDMI_REKEY_DEFAULT;
926 value = HDMI_CTRL_REKEY(rekey); 1075 value = HDMI_CTRL_REKEY(rekey);
@@ -932,20 +1081,17 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
932 1081
933 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL); 1082 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_HDMI_CTRL);
934 1083
935 if (hdmi->dvi) 1084 if (!hdmi->dvi) {
936 tegra_hdmi_writel(hdmi, 0x0, 1085 tegra_hdmi_setup_avi_infoframe(hdmi, mode);
937 HDMI_NV_PDISP_HDMI_GENERIC_CTRL); 1086 tegra_hdmi_setup_audio_infoframe(hdmi);
938 else
939 tegra_hdmi_writel(hdmi, GENERIC_CTRL_AUDIO,
940 HDMI_NV_PDISP_HDMI_GENERIC_CTRL);
941 1087
942 tegra_hdmi_setup_avi_infoframe(hdmi, mode); 1088 if (hdmi->stereo)
943 tegra_hdmi_setup_audio_infoframe(hdmi); 1089 tegra_hdmi_setup_stereo_infoframe(hdmi);
944 tegra_hdmi_setup_stereo_infoframe(hdmi); 1090 }
945 1091
946 /* TMDS CONFIG */ 1092 /* TMDS CONFIG */
947 for (i = 0; i < hdmi->config->num_tmds; i++) { 1093 for (i = 0; i < hdmi->config->num_tmds; i++) {
948 if (pclk <= hdmi->config->tmds[i].pclk) { 1094 if (hdmi->pixel_clock <= hdmi->config->tmds[i].pclk) {
949 tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]); 1095 tegra_hdmi_setup_tmds(hdmi, &hdmi->config->tmds[i]);
950 break; 1096 break;
951 } 1097 }
@@ -1032,6 +1178,15 @@ static void tegra_hdmi_encoder_enable(struct drm_encoder *encoder)
1032 1178
1033 tegra_dc_commit(dc); 1179 tegra_dc_commit(dc);
1034 1180
1181 if (!hdmi->dvi) {
1182 tegra_hdmi_enable_avi_infoframe(hdmi);
1183 tegra_hdmi_enable_audio_infoframe(hdmi);
1184 tegra_hdmi_enable_audio(hdmi);
1185
1186 if (hdmi->stereo)
1187 tegra_hdmi_enable_stereo_infoframe(hdmi);
1188 }
1189
1035 /* TODO: add HDCP support */ 1190 /* TODO: add HDCP support */
1036} 1191}
1037 1192
@@ -1236,8 +1391,14 @@ static int tegra_hdmi_show_regs(struct seq_file *s, void *data)
1236 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG); 1391 DUMP_REG(HDMI_NV_PDISP_KEY_HDCP_KEY_TRIG);
1237 DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX); 1392 DUMP_REG(HDMI_NV_PDISP_KEY_SKEY_INDEX);
1238 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0); 1393 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_CNTRL0);
1394 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_SPARE0);
1395 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
1396 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1);
1239 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR); 1397 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR);
1240 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE); 1398 DUMP_REG(HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE);
1399 DUMP_REG(HDMI_NV_PDISP_INT_STATUS);
1400 DUMP_REG(HDMI_NV_PDISP_INT_MASK);
1401 DUMP_REG(HDMI_NV_PDISP_INT_ENABLE);
1241 DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT); 1402 DUMP_REG(HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT);
1242 1403
1243#undef DUMP_REG 1404#undef DUMP_REG
@@ -1361,14 +1522,6 @@ static int tegra_hdmi_init(struct host1x_client *client)
1361 return err; 1522 return err;
1362 } 1523 }
1363 1524
1364 err = clk_prepare_enable(hdmi->clk);
1365 if (err < 0) {
1366 dev_err(hdmi->dev, "failed to enable clock: %d\n", err);
1367 return err;
1368 }
1369
1370 reset_control_deassert(hdmi->rst);
1371
1372 return 0; 1525 return 0;
1373} 1526}
1374 1527
@@ -1378,9 +1531,6 @@ static int tegra_hdmi_exit(struct host1x_client *client)
1378 1531
1379 tegra_output_exit(&hdmi->output); 1532 tegra_output_exit(&hdmi->output);
1380 1533
1381 reset_control_assert(hdmi->rst);
1382 clk_disable_unprepare(hdmi->clk);
1383
1384 regulator_disable(hdmi->vdd); 1534 regulator_disable(hdmi->vdd);
1385 regulator_disable(hdmi->pll); 1535 regulator_disable(hdmi->pll);
1386 regulator_disable(hdmi->hdmi); 1536 regulator_disable(hdmi->hdmi);
@@ -1402,6 +1552,8 @@ static const struct tegra_hdmi_config tegra20_hdmi_config = {
1402 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, 1552 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
1403 .fuse_override_value = 1 << 31, 1553 .fuse_override_value = 1 << 31,
1404 .has_sor_io_peak_current = false, 1554 .has_sor_io_peak_current = false,
1555 .has_hda = false,
1556 .has_hbr = false,
1405}; 1557};
1406 1558
1407static const struct tegra_hdmi_config tegra30_hdmi_config = { 1559static const struct tegra_hdmi_config tegra30_hdmi_config = {
@@ -1410,6 +1562,8 @@ static const struct tegra_hdmi_config tegra30_hdmi_config = {
1410 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT, 1562 .fuse_override_offset = HDMI_NV_PDISP_SOR_LANE_DRIVE_CURRENT,
1411 .fuse_override_value = 1 << 31, 1563 .fuse_override_value = 1 << 31,
1412 .has_sor_io_peak_current = false, 1564 .has_sor_io_peak_current = false,
1565 .has_hda = true,
1566 .has_hbr = false,
1413}; 1567};
1414 1568
1415static const struct tegra_hdmi_config tegra114_hdmi_config = { 1569static const struct tegra_hdmi_config tegra114_hdmi_config = {
@@ -1418,6 +1572,8 @@ static const struct tegra_hdmi_config tegra114_hdmi_config = {
1418 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0, 1572 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
1419 .fuse_override_value = 1 << 31, 1573 .fuse_override_value = 1 << 31,
1420 .has_sor_io_peak_current = true, 1574 .has_sor_io_peak_current = true,
1575 .has_hda = true,
1576 .has_hbr = true,
1421}; 1577};
1422 1578
1423static const struct tegra_hdmi_config tegra124_hdmi_config = { 1579static const struct tegra_hdmi_config tegra124_hdmi_config = {
@@ -1426,6 +1582,8 @@ static const struct tegra_hdmi_config tegra124_hdmi_config = {
1426 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0, 1582 .fuse_override_offset = HDMI_NV_PDISP_SOR_PAD_CTLS0,
1427 .fuse_override_value = 1 << 31, 1583 .fuse_override_value = 1 << 31,
1428 .has_sor_io_peak_current = true, 1584 .has_sor_io_peak_current = true,
1585 .has_hda = true,
1586 .has_hbr = true,
1429}; 1587};
1430 1588
1431static const struct of_device_id tegra_hdmi_of_match[] = { 1589static const struct of_device_id tegra_hdmi_of_match[] = {
@@ -1437,6 +1595,67 @@ static const struct of_device_id tegra_hdmi_of_match[] = {
1437}; 1595};
1438MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match); 1596MODULE_DEVICE_TABLE(of, tegra_hdmi_of_match);
1439 1597
1598static void hda_format_parse(unsigned int format, unsigned int *rate,
1599 unsigned int *channels)
1600{
1601 unsigned int mul, div;
1602
1603 if (format & AC_FMT_BASE_44K)
1604 *rate = 44100;
1605 else
1606 *rate = 48000;
1607
1608 mul = (format & AC_FMT_MULT_MASK) >> AC_FMT_MULT_SHIFT;
1609 div = (format & AC_FMT_DIV_MASK) >> AC_FMT_DIV_SHIFT;
1610
1611 *rate = *rate * (mul + 1) / (div + 1);
1612
1613 *channels = (format & AC_FMT_CHAN_MASK) >> AC_FMT_CHAN_SHIFT;
1614}
1615
1616static irqreturn_t tegra_hdmi_irq(int irq, void *data)
1617{
1618 struct tegra_hdmi *hdmi = data;
1619 u32 value;
1620 int err;
1621
1622 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_INT_STATUS);
1623 tegra_hdmi_writel(hdmi, value, HDMI_NV_PDISP_INT_STATUS);
1624
1625 if (value & INT_CODEC_SCRATCH0) {
1626 unsigned int format;
1627 u32 value;
1628
1629 value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0);
1630
1631 if (value & SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID) {
1632 unsigned int sample_rate, channels;
1633
1634 format = value & SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK;
1635
1636 hda_format_parse(format, &sample_rate, &channels);
1637
1638 hdmi->audio_sample_rate = sample_rate;
1639 hdmi->audio_channels = channels;
1640
1641 err = tegra_hdmi_setup_audio(hdmi);
1642 if (err < 0) {
1643 tegra_hdmi_disable_audio_infoframe(hdmi);
1644 tegra_hdmi_disable_audio(hdmi);
1645 } else {
1646 tegra_hdmi_setup_audio_infoframe(hdmi);
1647 tegra_hdmi_enable_audio_infoframe(hdmi);
1648 tegra_hdmi_enable_audio(hdmi);
1649 }
1650 } else {
1651 tegra_hdmi_disable_audio_infoframe(hdmi);
1652 tegra_hdmi_disable_audio(hdmi);
1653 }
1654 }
1655
1656 return IRQ_HANDLED;
1657}
1658
1440static int tegra_hdmi_probe(struct platform_device *pdev) 1659static int tegra_hdmi_probe(struct platform_device *pdev)
1441{ 1660{
1442 const struct of_device_id *match; 1661 const struct of_device_id *match;
@@ -1454,8 +1673,10 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1454 1673
1455 hdmi->config = match->data; 1674 hdmi->config = match->data;
1456 hdmi->dev = &pdev->dev; 1675 hdmi->dev = &pdev->dev;
1676
1457 hdmi->audio_source = AUTO; 1677 hdmi->audio_source = AUTO;
1458 hdmi->audio_freq = 44100; 1678 hdmi->audio_sample_rate = 48000;
1679 hdmi->audio_channels = 2;
1459 hdmi->stereo = false; 1680 hdmi->stereo = false;
1460 hdmi->dvi = false; 1681 hdmi->dvi = false;
1461 1682
@@ -1516,6 +1737,17 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1516 1737
1517 hdmi->irq = err; 1738 hdmi->irq = err;
1518 1739
1740 err = devm_request_irq(hdmi->dev, hdmi->irq, tegra_hdmi_irq, 0,
1741 dev_name(hdmi->dev), hdmi);
1742 if (err < 0) {
1743 dev_err(&pdev->dev, "failed to request IRQ#%u: %d\n",
1744 hdmi->irq, err);
1745 return err;
1746 }
1747
1748 platform_set_drvdata(pdev, hdmi);
1749 pm_runtime_enable(&pdev->dev);
1750
1519 INIT_LIST_HEAD(&hdmi->client.list); 1751 INIT_LIST_HEAD(&hdmi->client.list);
1520 hdmi->client.ops = &hdmi_client_ops; 1752 hdmi->client.ops = &hdmi_client_ops;
1521 hdmi->client.dev = &pdev->dev; 1753 hdmi->client.dev = &pdev->dev;
@@ -1527,8 +1759,6 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
1527 return err; 1759 return err;
1528 } 1760 }
1529 1761
1530 platform_set_drvdata(pdev, hdmi);
1531
1532 return 0; 1762 return 0;
1533} 1763}
1534 1764
@@ -1537,6 +1767,8 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
1537 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev); 1767 struct tegra_hdmi *hdmi = platform_get_drvdata(pdev);
1538 int err; 1768 int err;
1539 1769
1770 pm_runtime_disable(&pdev->dev);
1771
1540 err = host1x_client_unregister(&hdmi->client); 1772 err = host1x_client_unregister(&hdmi->client);
1541 if (err < 0) { 1773 if (err < 0) {
1542 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 1774 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -1546,17 +1778,61 @@ static int tegra_hdmi_remove(struct platform_device *pdev)
1546 1778
1547 tegra_output_remove(&hdmi->output); 1779 tegra_output_remove(&hdmi->output);
1548 1780
1549 clk_disable_unprepare(hdmi->clk_parent); 1781 return 0;
1782}
1783
1784#ifdef CONFIG_PM
1785static int tegra_hdmi_suspend(struct device *dev)
1786{
1787 struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
1788 int err;
1789
1790 err = reset_control_assert(hdmi->rst);
1791 if (err < 0) {
1792 dev_err(dev, "failed to assert reset: %d\n", err);
1793 return err;
1794 }
1795
1796 usleep_range(1000, 2000);
1797
1550 clk_disable_unprepare(hdmi->clk); 1798 clk_disable_unprepare(hdmi->clk);
1551 1799
1552 return 0; 1800 return 0;
1553} 1801}
1554 1802
1803static int tegra_hdmi_resume(struct device *dev)
1804{
1805 struct tegra_hdmi *hdmi = dev_get_drvdata(dev);
1806 int err;
1807
1808 err = clk_prepare_enable(hdmi->clk);
1809 if (err < 0) {
1810 dev_err(dev, "failed to enable clock: %d\n", err);
1811 return err;
1812 }
1813
1814 usleep_range(1000, 2000);
1815
1816 err = reset_control_deassert(hdmi->rst);
1817 if (err < 0) {
1818 dev_err(dev, "failed to deassert reset: %d\n", err);
1819 clk_disable_unprepare(hdmi->clk);
1820 return err;
1821 }
1822
1823 return 0;
1824}
1825#endif
1826
1827static const struct dev_pm_ops tegra_hdmi_pm_ops = {
1828 SET_RUNTIME_PM_OPS(tegra_hdmi_suspend, tegra_hdmi_resume, NULL)
1829};
1830
1555struct platform_driver tegra_hdmi_driver = { 1831struct platform_driver tegra_hdmi_driver = {
1556 .driver = { 1832 .driver = {
1557 .name = "tegra-hdmi", 1833 .name = "tegra-hdmi",
1558 .owner = THIS_MODULE,
1559 .of_match_table = tegra_hdmi_of_match, 1834 .of_match_table = tegra_hdmi_of_match,
1835 .pm = &tegra_hdmi_pm_ops,
1560 }, 1836 },
1561 .probe = tegra_hdmi_probe, 1837 .probe = tegra_hdmi_probe,
1562 .remove = tegra_hdmi_remove, 1838 .remove = tegra_hdmi_remove,
diff --git a/drivers/gpu/drm/tegra/hdmi.h b/drivers/gpu/drm/tegra/hdmi.h
index a882514389cd..2339f134a09a 100644
--- a/drivers/gpu/drm/tegra/hdmi.h
+++ b/drivers/gpu/drm/tegra/hdmi.h
@@ -468,9 +468,20 @@
468#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3 468#define HDMI_NV_PDISP_KEY_SKEY_INDEX 0xa3
469 469
470#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac 470#define HDMI_NV_PDISP_SOR_AUDIO_CNTRL0 0xac
471#define AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29) 471#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_AUTO (0 << 20)
472#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_SPDIF (1 << 20)
473#define SOR_AUDIO_CNTRL0_SOURCE_SELECT_HDAL (2 << 20)
474#define SOR_AUDIO_CNTRL0_INJECT_NULLSMPL (1 << 29)
475#define HDMI_NV_PDISP_SOR_AUDIO_SPARE0 0xae
476#define SOR_AUDIO_SPARE0_HBR_ENABLE (1 << 27)
477#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH0 0xba
478#define SOR_AUDIO_HDA_CODEC_SCRATCH0_VALID (1 << 30)
479#define SOR_AUDIO_HDA_CODEC_SCRATCH0_FMT_MASK 0xffff
480#define HDMI_NV_PDISP_SOR_AUDIO_HDA_CODEC_SCRATCH1 0xbb
472#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc 481#define HDMI_NV_PDISP_SOR_AUDIO_HDA_ELD_BUFWR 0xbc
473#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd 482#define HDMI_NV_PDISP_SOR_AUDIO_HDA_PRESENSE 0xbd
483#define SOR_AUDIO_HDA_PRESENSE_VALID (1 << 1)
484#define SOR_AUDIO_HDA_PRESENSE_PRESENT (1 << 0)
474 485
475#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf 486#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0320 0xbf
476#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0 487#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_0441 0xc0
@@ -481,6 +492,14 @@
481#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5 492#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_1920 0xc5
482#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5 493#define HDMI_NV_PDISP_SOR_AUDIO_AVAL_DEFAULT 0xc5
483 494
495#define HDMI_NV_PDISP_INT_STATUS 0xcc
496#define INT_SCRATCH (1 << 3)
497#define INT_CP_REQUEST (1 << 2)
498#define INT_CODEC_SCRATCH1 (1 << 1)
499#define INT_CODEC_SCRATCH0 (1 << 0)
500#define HDMI_NV_PDISP_INT_MASK 0xcd
501#define HDMI_NV_PDISP_INT_ENABLE 0xce
502
484#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1 503#define HDMI_NV_PDISP_SOR_IO_PEAK_CURRENT 0xd1
485#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0) 504#define PEAK_CURRENT_LANE0(x) (((x) & 0x7f) << 0)
486#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8) 505#define PEAK_CURRENT_LANE1(x) (((x) & 0x7f) << 8)
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 46664b622270..595d1ec3e02e 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -36,20 +36,13 @@ int tegra_output_connector_get_modes(struct drm_connector *connector)
36 36
37 if (edid) { 37 if (edid) {
38 err = drm_add_edid_modes(connector, edid); 38 err = drm_add_edid_modes(connector, edid);
39 drm_edid_to_eld(connector, edid);
39 kfree(edid); 40 kfree(edid);
40 } 41 }
41 42
42 return err; 43 return err;
43} 44}
44 45
45struct drm_encoder *
46tegra_output_connector_best_encoder(struct drm_connector *connector)
47{
48 struct tegra_output *output = connector_to_output(connector);
49
50 return &output->encoder;
51}
52
53enum drm_connector_status 46enum drm_connector_status
54tegra_output_connector_detect(struct drm_connector *connector, bool force) 47tegra_output_connector_detect(struct drm_connector *connector, bool force)
55{ 48{
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index e246334e0252..a131b44e2d6f 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -112,7 +112,6 @@ tegra_rgb_connector_mode_valid(struct drm_connector *connector,
112static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = { 112static const struct drm_connector_helper_funcs tegra_rgb_connector_helper_funcs = {
113 .get_modes = tegra_output_connector_get_modes, 113 .get_modes = tegra_output_connector_get_modes,
114 .mode_valid = tegra_rgb_connector_mode_valid, 114 .mode_valid = tegra_rgb_connector_mode_valid,
115 .best_encoder = tegra_output_connector_best_encoder,
116}; 115};
117 116
118static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = { 117static const struct drm_encoder_funcs tegra_rgb_encoder_funcs = {
diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c
index 757c6e8603af..74d0540b8d4c 100644
--- a/drivers/gpu/drm/tegra/sor.c
+++ b/drivers/gpu/drm/tegra/sor.c
@@ -7,11 +7,13 @@
7 */ 7 */
8 8
9#include <linux/clk.h> 9#include <linux/clk.h>
10#include <linux/clk-provider.h>
10#include <linux/debugfs.h> 11#include <linux/debugfs.h>
11#include <linux/gpio.h> 12#include <linux/gpio.h>
12#include <linux/io.h> 13#include <linux/io.h>
13#include <linux/of_device.h> 14#include <linux/of_device.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/pm_runtime.h>
15#include <linux/regulator/consumer.h> 17#include <linux/regulator/consumer.h>
16#include <linux/reset.h> 18#include <linux/reset.h>
17 19
@@ -149,6 +151,8 @@ struct tegra_sor_soc {
149 151
150 const struct tegra_sor_hdmi_settings *settings; 152 const struct tegra_sor_hdmi_settings *settings;
151 unsigned int num_settings; 153 unsigned int num_settings;
154
155 const u8 *xbar_cfg;
152}; 156};
153 157
154struct tegra_sor; 158struct tegra_sor;
@@ -169,7 +173,9 @@ struct tegra_sor {
169 173
170 struct reset_control *rst; 174 struct reset_control *rst;
171 struct clk *clk_parent; 175 struct clk *clk_parent;
176 struct clk *clk_brick;
172 struct clk *clk_safe; 177 struct clk *clk_safe;
178 struct clk *clk_src;
173 struct clk *clk_dp; 179 struct clk *clk_dp;
174 struct clk *clk; 180 struct clk *clk;
175 181
@@ -190,6 +196,18 @@ struct tegra_sor {
190 struct regulator *hdmi_supply; 196 struct regulator *hdmi_supply;
191}; 197};
192 198
199struct tegra_sor_state {
200 struct drm_connector_state base;
201
202 unsigned int bpc;
203};
204
205static inline struct tegra_sor_state *
206to_sor_state(struct drm_connector_state *state)
207{
208 return container_of(state, struct tegra_sor_state, base);
209}
210
193struct tegra_sor_config { 211struct tegra_sor_config {
194 u32 bits_per_pixel; 212 u32 bits_per_pixel;
195 213
@@ -225,6 +243,118 @@ static inline void tegra_sor_writel(struct tegra_sor *sor, u32 value,
225 writel(value, sor->regs + (offset << 2)); 243 writel(value, sor->regs + (offset << 2));
226} 244}
227 245
246static int tegra_sor_set_parent_clock(struct tegra_sor *sor, struct clk *parent)
247{
248 int err;
249
250 clk_disable_unprepare(sor->clk);
251
252 err = clk_set_parent(sor->clk, parent);
253 if (err < 0)
254 return err;
255
256 err = clk_prepare_enable(sor->clk);
257 if (err < 0)
258 return err;
259
260 return 0;
261}
262
263struct tegra_clk_sor_brick {
264 struct clk_hw hw;
265 struct tegra_sor *sor;
266};
267
268static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
269{
270 return container_of(hw, struct tegra_clk_sor_brick, hw);
271}
272
273static const char * const tegra_clk_sor_brick_parents[] = {
274 "pll_d2_out0", "pll_dp"
275};
276
277static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
278{
279 struct tegra_clk_sor_brick *brick = to_brick(hw);
280 struct tegra_sor *sor = brick->sor;
281 u32 value;
282
283 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
284 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
285
286 switch (index) {
287 case 0:
288 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK;
289 break;
290
291 case 1:
292 value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK;
293 break;
294 }
295
296 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
297
298 return 0;
299}
300
301static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
302{
303 struct tegra_clk_sor_brick *brick = to_brick(hw);
304 struct tegra_sor *sor = brick->sor;
305 u8 parent = U8_MAX;
306 u32 value;
307
308 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
309
310 switch (value & SOR_CLK_CNTRL_DP_CLK_SEL_MASK) {
311 case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_PCLK:
312 case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_PCLK:
313 parent = 0;
314 break;
315
316 case SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK:
317 case SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK:
318 parent = 1;
319 break;
320 }
321
322 return parent;
323}
324
325static const struct clk_ops tegra_clk_sor_brick_ops = {
326 .set_parent = tegra_clk_sor_brick_set_parent,
327 .get_parent = tegra_clk_sor_brick_get_parent,
328};
329
330static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
331 const char *name)
332{
333 struct tegra_clk_sor_brick *brick;
334 struct clk_init_data init;
335 struct clk *clk;
336
337 brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
338 if (!brick)
339 return ERR_PTR(-ENOMEM);
340
341 brick->sor = sor;
342
343 init.name = name;
344 init.flags = 0;
345 init.parent_names = tegra_clk_sor_brick_parents;
346 init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
347 init.ops = &tegra_clk_sor_brick_ops;
348
349 brick->hw.init = &init;
350
351 clk = devm_clk_register(sor->dev, &brick->hw);
352 if (IS_ERR(clk))
353 kfree(brick);
354
355 return clk;
356}
357
228static int tegra_sor_dp_train_fast(struct tegra_sor *sor, 358static int tegra_sor_dp_train_fast(struct tegra_sor *sor,
229 struct drm_dp_link *link) 359 struct drm_dp_link *link)
230{ 360{
@@ -569,10 +699,10 @@ static int tegra_sor_compute_params(struct tegra_sor *sor,
569 return false; 699 return false;
570} 700}
571 701
572static int tegra_sor_calc_config(struct tegra_sor *sor, 702static int tegra_sor_compute_config(struct tegra_sor *sor,
573 const struct drm_display_mode *mode, 703 const struct drm_display_mode *mode,
574 struct tegra_sor_config *config, 704 struct tegra_sor_config *config,
575 struct drm_dp_link *link) 705 struct drm_dp_link *link)
576{ 706{
577 const u64 f = 100000, link_rate = link->rate * 1000; 707 const u64 f = 100000, link_rate = link->rate * 1000;
578 const u64 pclk = mode->clock * 1000; 708 const u64 pclk = mode->clock * 1000;
@@ -661,6 +791,135 @@ static int tegra_sor_calc_config(struct tegra_sor *sor,
661 return 0; 791 return 0;
662} 792}
663 793
794static void tegra_sor_apply_config(struct tegra_sor *sor,
795 const struct tegra_sor_config *config)
796{
797 u32 value;
798
799 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
800 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
801 value |= SOR_DP_LINKCTL_TU_SIZE(config->tu_size);
802 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
803
804 value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
805 value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
806 value |= SOR_DP_CONFIG_WATERMARK(config->watermark);
807
808 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
809 value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config->active_count);
810
811 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
812 value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config->active_frac);
813
814 if (config->active_polarity)
815 value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
816 else
817 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
818
819 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
820 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
821 tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
822
823 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
824 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
825 value |= config->hblank_symbols & 0xffff;
826 tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
827
828 value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
829 value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
830 value |= config->vblank_symbols & 0xffff;
831 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
832}
833
834static void tegra_sor_mode_set(struct tegra_sor *sor,
835 const struct drm_display_mode *mode,
836 struct tegra_sor_state *state)
837{
838 struct tegra_dc *dc = to_tegra_dc(sor->output.encoder.crtc);
839 unsigned int vbe, vse, hbe, hse, vbs, hbs;
840 u32 value;
841
842 value = tegra_sor_readl(sor, SOR_STATE1);
843 value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
844 value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
845 value &= ~SOR_STATE_ASY_OWNER_MASK;
846
847 value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
848 SOR_STATE_ASY_OWNER(dc->pipe + 1);
849
850 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
851 value &= ~SOR_STATE_ASY_HSYNCPOL;
852
853 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
854 value |= SOR_STATE_ASY_HSYNCPOL;
855
856 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
857 value &= ~SOR_STATE_ASY_VSYNCPOL;
858
859 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
860 value |= SOR_STATE_ASY_VSYNCPOL;
861
862 switch (state->bpc) {
863 case 16:
864 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_48_444;
865 break;
866
867 case 12:
868 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_36_444;
869 break;
870
871 case 10:
872 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_30_444;
873 break;
874
875 case 8:
876 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
877 break;
878
879 case 6:
880 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
881 break;
882
883 default:
884 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
885 break;
886 }
887
888 tegra_sor_writel(sor, value, SOR_STATE1);
889
890 /*
891 * TODO: The video timing programming below doesn't seem to match the
892 * register definitions.
893 */
894
895 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
896 tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
897
898 /* sync end = sync width - 1 */
899 vse = mode->vsync_end - mode->vsync_start - 1;
900 hse = mode->hsync_end - mode->hsync_start - 1;
901
902 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
903 tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
904
905 /* blank end = sync end + back porch */
906 vbe = vse + (mode->vtotal - mode->vsync_end);
907 hbe = hse + (mode->htotal - mode->hsync_end);
908
909 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
910 tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
911
912 /* blank start = blank end + active */
913 vbs = vbe + mode->vdisplay;
914 hbs = hbe + mode->hdisplay;
915
916 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
917 tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
918
919 /* XXX interlacing support */
920 tegra_sor_writel(sor, 0x001, SOR_HEAD_STATE5(dc->pipe));
921}
922
664static int tegra_sor_detach(struct tegra_sor *sor) 923static int tegra_sor_detach(struct tegra_sor *sor)
665{ 924{
666 unsigned long value, timeout; 925 unsigned long value, timeout;
@@ -733,7 +992,8 @@ static int tegra_sor_power_down(struct tegra_sor *sor)
733 if ((value & SOR_PWR_TRIGGER) != 0) 992 if ((value & SOR_PWR_TRIGGER) != 0)
734 return -ETIMEDOUT; 993 return -ETIMEDOUT;
735 994
736 err = clk_set_parent(sor->clk, sor->clk_safe); 995 /* switch to safe parent clock */
996 err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
737 if (err < 0) 997 if (err < 0)
738 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); 998 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
739 999
@@ -1038,6 +1298,22 @@ static void tegra_sor_debugfs_exit(struct tegra_sor *sor)
1038 sor->debugfs = NULL; 1298 sor->debugfs = NULL;
1039} 1299}
1040 1300
1301static void tegra_sor_connector_reset(struct drm_connector *connector)
1302{
1303 struct tegra_sor_state *state;
1304
1305 state = kzalloc(sizeof(*state), GFP_KERNEL);
1306 if (!state)
1307 return;
1308
1309 if (connector->state) {
1310 __drm_atomic_helper_connector_destroy_state(connector->state);
1311 kfree(connector->state);
1312 }
1313
1314 __drm_atomic_helper_connector_reset(connector, &state->base);
1315}
1316
1041static enum drm_connector_status 1317static enum drm_connector_status
1042tegra_sor_connector_detect(struct drm_connector *connector, bool force) 1318tegra_sor_connector_detect(struct drm_connector *connector, bool force)
1043{ 1319{
@@ -1050,13 +1326,28 @@ tegra_sor_connector_detect(struct drm_connector *connector, bool force)
1050 return tegra_output_connector_detect(connector, force); 1326 return tegra_output_connector_detect(connector, force);
1051} 1327}
1052 1328
1329static struct drm_connector_state *
1330tegra_sor_connector_duplicate_state(struct drm_connector *connector)
1331{
1332 struct tegra_sor_state *state = to_sor_state(connector->state);
1333 struct tegra_sor_state *copy;
1334
1335 copy = kmemdup(state, sizeof(*state), GFP_KERNEL);
1336 if (!copy)
1337 return NULL;
1338
1339 __drm_atomic_helper_connector_duplicate_state(connector, &copy->base);
1340
1341 return &copy->base;
1342}
1343
1053static const struct drm_connector_funcs tegra_sor_connector_funcs = { 1344static const struct drm_connector_funcs tegra_sor_connector_funcs = {
1054 .dpms = drm_atomic_helper_connector_dpms, 1345 .dpms = drm_atomic_helper_connector_dpms,
1055 .reset = drm_atomic_helper_connector_reset, 1346 .reset = tegra_sor_connector_reset,
1056 .detect = tegra_sor_connector_detect, 1347 .detect = tegra_sor_connector_detect,
1057 .fill_modes = drm_helper_probe_single_connector_modes, 1348 .fill_modes = drm_helper_probe_single_connector_modes,
1058 .destroy = tegra_output_connector_destroy, 1349 .destroy = tegra_output_connector_destroy,
1059 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, 1350 .atomic_duplicate_state = tegra_sor_connector_duplicate_state,
1060 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, 1351 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
1061}; 1352};
1062 1353
@@ -1081,13 +1372,16 @@ static enum drm_mode_status
1081tegra_sor_connector_mode_valid(struct drm_connector *connector, 1372tegra_sor_connector_mode_valid(struct drm_connector *connector,
1082 struct drm_display_mode *mode) 1373 struct drm_display_mode *mode)
1083{ 1374{
1375 /* HDMI 2.0 modes are not yet supported */
1376 if (mode->clock > 340000)
1377 return MODE_NOCLOCK;
1378
1084 return MODE_OK; 1379 return MODE_OK;
1085} 1380}
1086 1381
1087static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = { 1382static const struct drm_connector_helper_funcs tegra_sor_connector_helper_funcs = {
1088 .get_modes = tegra_sor_connector_get_modes, 1383 .get_modes = tegra_sor_connector_get_modes,
1089 .mode_valid = tegra_sor_connector_mode_valid, 1384 .mode_valid = tegra_sor_connector_mode_valid,
1090 .best_encoder = tegra_output_connector_best_encoder,
1091}; 1385};
1092 1386
1093static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { 1387static const struct drm_encoder_funcs tegra_sor_encoder_funcs = {
@@ -1141,8 +1435,7 @@ static void tegra_sor_edp_disable(struct drm_encoder *encoder)
1141 if (output->panel) 1435 if (output->panel)
1142 drm_panel_unprepare(output->panel); 1436 drm_panel_unprepare(output->panel);
1143 1437
1144 reset_control_assert(sor->rst); 1438 pm_runtime_put(sor->dev);
1145 clk_disable_unprepare(sor->clk);
1146} 1439}
1147 1440
1148#if 0 1441#if 0
@@ -1192,19 +1485,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1192 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; 1485 struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode;
1193 struct tegra_output *output = encoder_to_output(encoder); 1486 struct tegra_output *output = encoder_to_output(encoder);
1194 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 1487 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
1195 unsigned int vbe, vse, hbe, hse, vbs, hbs, i;
1196 struct tegra_sor *sor = to_sor(output); 1488 struct tegra_sor *sor = to_sor(output);
1197 struct tegra_sor_config config; 1489 struct tegra_sor_config config;
1490 struct tegra_sor_state *state;
1198 struct drm_dp_link link; 1491 struct drm_dp_link link;
1199 u8 rate, lanes; 1492 u8 rate, lanes;
1493 unsigned int i;
1200 int err = 0; 1494 int err = 0;
1201 u32 value; 1495 u32 value;
1202 1496
1203 err = clk_prepare_enable(sor->clk); 1497 state = to_sor_state(output->connector.state);
1204 if (err < 0)
1205 dev_err(sor->dev, "failed to enable clock: %d\n", err);
1206 1498
1207 reset_control_deassert(sor->rst); 1499 pm_runtime_get_sync(sor->dev);
1208 1500
1209 if (output->panel) 1501 if (output->panel)
1210 drm_panel_prepare(output->panel); 1502 drm_panel_prepare(output->panel);
@@ -1219,17 +1511,17 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1219 return; 1511 return;
1220 } 1512 }
1221 1513
1222 err = clk_set_parent(sor->clk, sor->clk_safe); 1514 /* switch to safe parent clock */
1515 err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
1223 if (err < 0) 1516 if (err < 0)
1224 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); 1517 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
1225 1518
1226 memset(&config, 0, sizeof(config)); 1519 memset(&config, 0, sizeof(config));
1227 config.bits_per_pixel = output->connector.display_info.bpc * 3; 1520 config.bits_per_pixel = state->bpc * 3;
1228 1521
1229 err = tegra_sor_calc_config(sor, mode, &config, &link); 1522 err = tegra_sor_compute_config(sor, mode, &config, &link);
1230 if (err < 0) 1523 if (err < 0)
1231 dev_err(sor->dev, "failed to compute link configuration: %d\n", 1524 dev_err(sor->dev, "failed to compute configuration: %d\n", err);
1232 err);
1233 1525
1234 value = tegra_sor_readl(sor, SOR_CLK_CNTRL); 1526 value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
1235 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; 1527 value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK;
@@ -1326,10 +1618,18 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1326 value &= ~SOR_PLL2_PORT_POWERDOWN; 1618 value &= ~SOR_PLL2_PORT_POWERDOWN;
1327 tegra_sor_writel(sor, value, SOR_PLL2); 1619 tegra_sor_writel(sor, value, SOR_PLL2);
1328 1620
1329 /* switch to DP clock */ 1621 /* XXX not in TRM */
1330 err = clk_set_parent(sor->clk, sor->clk_dp); 1622 for (value = 0, i = 0; i < 5; i++)
1623 value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
1624 SOR_XBAR_CTRL_LINK1_XSEL(i, i);
1625
1626 tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
1627 tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
1628
1629 /* switch to DP parent clock */
1630 err = tegra_sor_set_parent_clock(sor, sor->clk_dp);
1331 if (err < 0) 1631 if (err < 0)
1332 dev_err(sor->dev, "failed to set DP parent clock: %d\n", err); 1632 dev_err(sor->dev, "failed to set parent clock: %d\n", err);
1333 1633
1334 /* power DP lanes */ 1634 /* power DP lanes */
1335 value = tegra_sor_readl(sor, SOR_DP_PADCTL0); 1635 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
@@ -1375,13 +1675,11 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1375 value |= drm_dp_link_rate_to_bw_code(link.rate) << 2; 1675 value |= drm_dp_link_rate_to_bw_code(link.rate) << 2;
1376 tegra_sor_writel(sor, value, SOR_CLK_CNTRL); 1676 tegra_sor_writel(sor, value, SOR_CLK_CNTRL);
1377 1677
1378 /* set linkctl */ 1678 tegra_sor_apply_config(sor, &config);
1679
1680 /* enable link */
1379 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); 1681 value = tegra_sor_readl(sor, SOR_DP_LINKCTL0);
1380 value |= SOR_DP_LINKCTL_ENABLE; 1682 value |= SOR_DP_LINKCTL_ENABLE;
1381
1382 value &= ~SOR_DP_LINKCTL_TU_SIZE_MASK;
1383 value |= SOR_DP_LINKCTL_TU_SIZE(config.tu_size);
1384
1385 value |= SOR_DP_LINKCTL_ENHANCED_FRAME; 1683 value |= SOR_DP_LINKCTL_ENHANCED_FRAME;
1386 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); 1684 tegra_sor_writel(sor, value, SOR_DP_LINKCTL0);
1387 1685
@@ -1394,35 +1692,6 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1394 1692
1395 tegra_sor_writel(sor, value, SOR_DP_TPG); 1693 tegra_sor_writel(sor, value, SOR_DP_TPG);
1396 1694
1397 value = tegra_sor_readl(sor, SOR_DP_CONFIG0);
1398 value &= ~SOR_DP_CONFIG_WATERMARK_MASK;
1399 value |= SOR_DP_CONFIG_WATERMARK(config.watermark);
1400
1401 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_COUNT_MASK;
1402 value |= SOR_DP_CONFIG_ACTIVE_SYM_COUNT(config.active_count);
1403
1404 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_FRAC_MASK;
1405 value |= SOR_DP_CONFIG_ACTIVE_SYM_FRAC(config.active_frac);
1406
1407 if (config.active_polarity)
1408 value |= SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
1409 else
1410 value &= ~SOR_DP_CONFIG_ACTIVE_SYM_POLARITY;
1411
1412 value |= SOR_DP_CONFIG_ACTIVE_SYM_ENABLE;
1413 value |= SOR_DP_CONFIG_DISPARITY_NEGATIVE;
1414 tegra_sor_writel(sor, value, SOR_DP_CONFIG0);
1415
1416 value = tegra_sor_readl(sor, SOR_DP_AUDIO_HBLANK_SYMBOLS);
1417 value &= ~SOR_DP_AUDIO_HBLANK_SYMBOLS_MASK;
1418 value |= config.hblank_symbols & 0xffff;
1419 tegra_sor_writel(sor, value, SOR_DP_AUDIO_HBLANK_SYMBOLS);
1420
1421 value = tegra_sor_readl(sor, SOR_DP_AUDIO_VBLANK_SYMBOLS);
1422 value &= ~SOR_DP_AUDIO_VBLANK_SYMBOLS_MASK;
1423 value |= config.vblank_symbols & 0xffff;
1424 tegra_sor_writel(sor, value, SOR_DP_AUDIO_VBLANK_SYMBOLS);
1425
1426 /* enable pad calibration logic */ 1695 /* enable pad calibration logic */
1427 value = tegra_sor_readl(sor, SOR_DP_PADCTL0); 1696 value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
1428 value |= SOR_DP_PADCTL_PAD_CAL_PD; 1697 value |= SOR_DP_PADCTL_PAD_CAL_PD;
@@ -1478,75 +1747,19 @@ static void tegra_sor_edp_enable(struct drm_encoder *encoder)
1478 if (err < 0) 1747 if (err < 0)
1479 dev_err(sor->dev, "failed to power up SOR: %d\n", err); 1748 dev_err(sor->dev, "failed to power up SOR: %d\n", err);
1480 1749
1481 /*
1482 * configure panel (24bpp, vsync-, hsync-, DP-A protocol, complete
1483 * raster, associate with display controller)
1484 */
1485 value = SOR_STATE_ASY_PROTOCOL_DP_A |
1486 SOR_STATE_ASY_CRC_MODE_COMPLETE |
1487 SOR_STATE_ASY_OWNER(dc->pipe + 1);
1488
1489 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
1490 value &= ~SOR_STATE_ASY_HSYNCPOL;
1491
1492 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
1493 value |= SOR_STATE_ASY_HSYNCPOL;
1494
1495 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
1496 value &= ~SOR_STATE_ASY_VSYNCPOL;
1497
1498 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
1499 value |= SOR_STATE_ASY_VSYNCPOL;
1500
1501 switch (config.bits_per_pixel) {
1502 case 24:
1503 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
1504 break;
1505
1506 case 18:
1507 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
1508 break;
1509
1510 default:
1511 BUG();
1512 break;
1513 }
1514
1515 tegra_sor_writel(sor, value, SOR_STATE1);
1516
1517 /*
1518 * TODO: The video timing programming below doesn't seem to match the
1519 * register definitions.
1520 */
1521
1522 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
1523 tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
1524
1525 vse = mode->vsync_end - mode->vsync_start - 1;
1526 hse = mode->hsync_end - mode->hsync_start - 1;
1527
1528 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
1529 tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
1530
1531 vbe = vse + (mode->vsync_start - mode->vdisplay);
1532 hbe = hse + (mode->hsync_start - mode->hdisplay);
1533
1534 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
1535 tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
1536
1537 vbs = vbe + mode->vdisplay;
1538 hbs = hbe + mode->hdisplay;
1539
1540 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
1541 tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
1542
1543 tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
1544
1545 /* CSTM (LVDS, link A/B, upper) */ 1750 /* CSTM (LVDS, link A/B, upper) */
1546 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | 1751 value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B |
1547 SOR_CSTM_UPPER; 1752 SOR_CSTM_UPPER;
1548 tegra_sor_writel(sor, value, SOR_CSTM); 1753 tegra_sor_writel(sor, value, SOR_CSTM);
1549 1754
1755 /* use DP-A protocol */
1756 value = tegra_sor_readl(sor, SOR_STATE1);
1757 value &= ~SOR_STATE_ASY_PROTOCOL_MASK;
1758 value |= SOR_STATE_ASY_PROTOCOL_DP_A;
1759 tegra_sor_writel(sor, value, SOR_STATE1);
1760
1761 tegra_sor_mode_set(sor, mode, state);
1762
1550 /* PWM setup */ 1763 /* PWM setup */
1551 err = tegra_sor_setup_pwm(sor, 250); 1764 err = tegra_sor_setup_pwm(sor, 250);
1552 if (err < 0) 1765 if (err < 0)
@@ -1578,11 +1791,15 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
1578 struct drm_connector_state *conn_state) 1791 struct drm_connector_state *conn_state)
1579{ 1792{
1580 struct tegra_output *output = encoder_to_output(encoder); 1793 struct tegra_output *output = encoder_to_output(encoder);
1794 struct tegra_sor_state *state = to_sor_state(conn_state);
1581 struct tegra_dc *dc = to_tegra_dc(conn_state->crtc); 1795 struct tegra_dc *dc = to_tegra_dc(conn_state->crtc);
1582 unsigned long pclk = crtc_state->mode.clock * 1000; 1796 unsigned long pclk = crtc_state->mode.clock * 1000;
1583 struct tegra_sor *sor = to_sor(output); 1797 struct tegra_sor *sor = to_sor(output);
1798 struct drm_display_info *info;
1584 int err; 1799 int err;
1585 1800
1801 info = &output->connector.display_info;
1802
1586 err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent, 1803 err = tegra_dc_state_setup_clock(dc, crtc_state, sor->clk_parent,
1587 pclk, 0); 1804 pclk, 0);
1588 if (err < 0) { 1805 if (err < 0) {
@@ -1590,6 +1807,18 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder,
1590 return err; 1807 return err;
1591 } 1808 }
1592 1809
1810 switch (info->bpc) {
1811 case 8:
1812 case 6:
1813 state->bpc = info->bpc;
1814 break;
1815
1816 default:
1817 DRM_DEBUG_KMS("%u bits-per-color not supported\n", info->bpc);
1818 state->bpc = 8;
1819 break;
1820 }
1821
1593 return 0; 1822 return 0;
1594} 1823}
1595 1824
@@ -1752,9 +1981,7 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder)
1752 if (err < 0) 1981 if (err < 0)
1753 dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err); 1982 dev_err(sor->dev, "failed to power off HDMI rail: %d\n", err);
1754 1983
1755 reset_control_assert(sor->rst); 1984 pm_runtime_put(sor->dev);
1756 usleep_range(1000, 2000);
1757 clk_disable_unprepare(sor->clk);
1758} 1985}
1759 1986
1760static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) 1987static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
@@ -1762,26 +1989,21 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
1762 struct tegra_output *output = encoder_to_output(encoder); 1989 struct tegra_output *output = encoder_to_output(encoder);
1763 unsigned int h_ref_to_sync = 1, pulse_start, max_ac; 1990 unsigned int h_ref_to_sync = 1, pulse_start, max_ac;
1764 struct tegra_dc *dc = to_tegra_dc(encoder->crtc); 1991 struct tegra_dc *dc = to_tegra_dc(encoder->crtc);
1765 unsigned int vbe, vse, hbe, hse, vbs, hbs, div;
1766 struct tegra_sor_hdmi_settings *settings; 1992 struct tegra_sor_hdmi_settings *settings;
1767 struct tegra_sor *sor = to_sor(output); 1993 struct tegra_sor *sor = to_sor(output);
1994 struct tegra_sor_state *state;
1768 struct drm_display_mode *mode; 1995 struct drm_display_mode *mode;
1769 struct drm_display_info *info; 1996 unsigned int div, i;
1770 u32 value; 1997 u32 value;
1771 int err; 1998 int err;
1772 1999
2000 state = to_sor_state(output->connector.state);
1773 mode = &encoder->crtc->state->adjusted_mode; 2001 mode = &encoder->crtc->state->adjusted_mode;
1774 info = &output->connector.display_info;
1775 2002
1776 err = clk_prepare_enable(sor->clk); 2003 pm_runtime_get_sync(sor->dev);
1777 if (err < 0)
1778 dev_err(sor->dev, "failed to enable clock: %d\n", err);
1779 2004
1780 usleep_range(1000, 2000); 2005 /* switch to safe parent clock */
1781 2006 err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
1782 reset_control_deassert(sor->rst);
1783
1784 err = clk_set_parent(sor->clk, sor->clk_safe);
1785 if (err < 0) 2007 if (err < 0)
1786 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); 2008 dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
1787 2009
@@ -1877,22 +2099,20 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
1877 value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div); 2099 value = SOR_REFCLK_DIV_INT(div) | SOR_REFCLK_DIV_FRAC(div);
1878 tegra_sor_writel(sor, value, SOR_REFCLK); 2100 tegra_sor_writel(sor, value, SOR_REFCLK);
1879 2101
1880 /* XXX don't hardcode */ 2102 /* XXX not in TRM */
1881 value = SOR_XBAR_CTRL_LINK1_XSEL(4, 4) | 2103 for (value = 0, i = 0; i < 5; i++)
1882 SOR_XBAR_CTRL_LINK1_XSEL(3, 3) | 2104 value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) |
1883 SOR_XBAR_CTRL_LINK1_XSEL(2, 2) | 2105 SOR_XBAR_CTRL_LINK1_XSEL(i, i);
1884 SOR_XBAR_CTRL_LINK1_XSEL(1, 1) |
1885 SOR_XBAR_CTRL_LINK1_XSEL(0, 0) |
1886 SOR_XBAR_CTRL_LINK0_XSEL(4, 4) |
1887 SOR_XBAR_CTRL_LINK0_XSEL(3, 3) |
1888 SOR_XBAR_CTRL_LINK0_XSEL(2, 0) |
1889 SOR_XBAR_CTRL_LINK0_XSEL(1, 1) |
1890 SOR_XBAR_CTRL_LINK0_XSEL(0, 2);
1891 tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
1892 2106
1893 tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); 2107 tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL);
2108 tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
1894 2109
1895 err = clk_set_parent(sor->clk, sor->clk_parent); 2110 /* switch to parent clock */
2111 err = clk_set_parent(sor->clk_src, sor->clk_parent);
2112 if (err < 0)
2113 dev_err(sor->dev, "failed to set source clock: %d\n", err);
2114
2115 err = tegra_sor_set_parent_clock(sor, sor->clk_src);
1896 if (err < 0) 2116 if (err < 0)
1897 dev_err(sor->dev, "failed to set parent clock: %d\n", err); 2117 dev_err(sor->dev, "failed to set parent clock: %d\n", err);
1898 2118
@@ -2002,7 +2222,7 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
2002 value &= ~DITHER_CONTROL_MASK; 2222 value &= ~DITHER_CONTROL_MASK;
2003 value &= ~BASE_COLOR_SIZE_MASK; 2223 value &= ~BASE_COLOR_SIZE_MASK;
2004 2224
2005 switch (info->bpc) { 2225 switch (state->bpc) {
2006 case 6: 2226 case 6:
2007 value |= BASE_COLOR_SIZE_666; 2227 value |= BASE_COLOR_SIZE_666;
2008 break; 2228 break;
@@ -2012,7 +2232,8 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
2012 break; 2232 break;
2013 2233
2014 default: 2234 default:
2015 WARN(1, "%u bits-per-color not supported\n", info->bpc); 2235 WARN(1, "%u bits-per-color not supported\n", state->bpc);
2236 value |= BASE_COLOR_SIZE_888;
2016 break; 2237 break;
2017 } 2238 }
2018 2239
@@ -2022,83 +2243,19 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder)
2022 if (err < 0) 2243 if (err < 0)
2023 dev_err(sor->dev, "failed to power up SOR: %d\n", err); 2244 dev_err(sor->dev, "failed to power up SOR: %d\n", err);
2024 2245
2025 /* configure mode */ 2246 /* configure dynamic range of output */
2026 value = tegra_sor_readl(sor, SOR_STATE1);
2027 value &= ~SOR_STATE_ASY_PIXELDEPTH_MASK;
2028 value &= ~SOR_STATE_ASY_CRC_MODE_MASK;
2029 value &= ~SOR_STATE_ASY_OWNER_MASK;
2030
2031 value |= SOR_STATE_ASY_CRC_MODE_COMPLETE |
2032 SOR_STATE_ASY_OWNER(dc->pipe + 1);
2033
2034 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
2035 value &= ~SOR_STATE_ASY_HSYNCPOL;
2036
2037 if (mode->flags & DRM_MODE_FLAG_NHSYNC)
2038 value |= SOR_STATE_ASY_HSYNCPOL;
2039
2040 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
2041 value &= ~SOR_STATE_ASY_VSYNCPOL;
2042
2043 if (mode->flags & DRM_MODE_FLAG_NVSYNC)
2044 value |= SOR_STATE_ASY_VSYNCPOL;
2045
2046 switch (info->bpc) {
2047 case 8:
2048 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_24_444;
2049 break;
2050
2051 case 6:
2052 value |= SOR_STATE_ASY_PIXELDEPTH_BPP_18_444;
2053 break;
2054
2055 default:
2056 BUG();
2057 break;
2058 }
2059
2060 tegra_sor_writel(sor, value, SOR_STATE1);
2061
2062 value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe)); 2247 value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
2063 value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK; 2248 value &= ~SOR_HEAD_STATE_RANGECOMPRESS_MASK;
2064 value &= ~SOR_HEAD_STATE_DYNRANGE_MASK; 2249 value &= ~SOR_HEAD_STATE_DYNRANGE_MASK;
2065 tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe)); 2250 tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
2066 2251
2252 /* configure colorspace */
2067 value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe)); 2253 value = tegra_sor_readl(sor, SOR_HEAD_STATE0(dc->pipe));
2068 value &= ~SOR_HEAD_STATE_COLORSPACE_MASK; 2254 value &= ~SOR_HEAD_STATE_COLORSPACE_MASK;
2069 value |= SOR_HEAD_STATE_COLORSPACE_RGB; 2255 value |= SOR_HEAD_STATE_COLORSPACE_RGB;
2070 tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe)); 2256 tegra_sor_writel(sor, value, SOR_HEAD_STATE0(dc->pipe));
2071 2257
2072 /* 2258 tegra_sor_mode_set(sor, mode, state);
2073 * TODO: The video timing programming below doesn't seem to match the
2074 * register definitions.
2075 */
2076
2077 value = ((mode->vtotal & 0x7fff) << 16) | (mode->htotal & 0x7fff);
2078 tegra_sor_writel(sor, value, SOR_HEAD_STATE1(dc->pipe));
2079
2080 /* sync end = sync width - 1 */
2081 vse = mode->vsync_end - mode->vsync_start - 1;
2082 hse = mode->hsync_end - mode->hsync_start - 1;
2083
2084 value = ((vse & 0x7fff) << 16) | (hse & 0x7fff);
2085 tegra_sor_writel(sor, value, SOR_HEAD_STATE2(dc->pipe));
2086
2087 /* blank end = sync end + back porch */
2088 vbe = vse + (mode->vtotal - mode->vsync_end);
2089 hbe = hse + (mode->htotal - mode->hsync_end);
2090
2091 value = ((vbe & 0x7fff) << 16) | (hbe & 0x7fff);
2092 tegra_sor_writel(sor, value, SOR_HEAD_STATE3(dc->pipe));
2093
2094 /* blank start = blank end + active */
2095 vbs = vbe + mode->vdisplay;
2096 hbs = hbe + mode->hdisplay;
2097
2098 value = ((vbs & 0x7fff) << 16) | (hbs & 0x7fff);
2099 tegra_sor_writel(sor, value, SOR_HEAD_STATE4(dc->pipe));
2100
2101 tegra_sor_writel(sor, 0x1, SOR_HEAD_STATE5(dc->pipe));
2102 2259
2103 tegra_sor_update(sor); 2260 tegra_sor_update(sor);
2104 2261
@@ -2196,10 +2353,13 @@ static int tegra_sor_init(struct host1x_client *client)
2196 * XXX: Remove this reset once proper hand-over from firmware to 2353 * XXX: Remove this reset once proper hand-over from firmware to
2197 * kernel is possible. 2354 * kernel is possible.
2198 */ 2355 */
2199 err = reset_control_assert(sor->rst); 2356 if (sor->rst) {
2200 if (err < 0) { 2357 err = reset_control_assert(sor->rst);
2201 dev_err(sor->dev, "failed to assert SOR reset: %d\n", err); 2358 if (err < 0) {
2202 return err; 2359 dev_err(sor->dev, "failed to assert SOR reset: %d\n",
2360 err);
2361 return err;
2362 }
2203 } 2363 }
2204 2364
2205 err = clk_prepare_enable(sor->clk); 2365 err = clk_prepare_enable(sor->clk);
@@ -2210,10 +2370,13 @@ static int tegra_sor_init(struct host1x_client *client)
2210 2370
2211 usleep_range(1000, 3000); 2371 usleep_range(1000, 3000);
2212 2372
2213 err = reset_control_deassert(sor->rst); 2373 if (sor->rst) {
2214 if (err < 0) { 2374 err = reset_control_deassert(sor->rst);
2215 dev_err(sor->dev, "failed to deassert SOR reset: %d\n", err); 2375 if (err < 0) {
2216 return err; 2376 dev_err(sor->dev, "failed to deassert SOR reset: %d\n",
2377 err);
2378 return err;
2379 }
2217 } 2380 }
2218 2381
2219 err = clk_prepare_enable(sor->clk_safe); 2382 err = clk_prepare_enable(sor->clk_safe);
@@ -2324,11 +2487,16 @@ static const struct tegra_sor_ops tegra_sor_hdmi_ops = {
2324 .remove = tegra_sor_hdmi_remove, 2487 .remove = tegra_sor_hdmi_remove,
2325}; 2488};
2326 2489
2490static const u8 tegra124_sor_xbar_cfg[5] = {
2491 0, 1, 2, 3, 4
2492};
2493
2327static const struct tegra_sor_soc tegra124_sor = { 2494static const struct tegra_sor_soc tegra124_sor = {
2328 .supports_edp = true, 2495 .supports_edp = true,
2329 .supports_lvds = true, 2496 .supports_lvds = true,
2330 .supports_hdmi = false, 2497 .supports_hdmi = false,
2331 .supports_dp = false, 2498 .supports_dp = false,
2499 .xbar_cfg = tegra124_sor_xbar_cfg,
2332}; 2500};
2333 2501
2334static const struct tegra_sor_soc tegra210_sor = { 2502static const struct tegra_sor_soc tegra210_sor = {
@@ -2336,6 +2504,11 @@ static const struct tegra_sor_soc tegra210_sor = {
2336 .supports_lvds = false, 2504 .supports_lvds = false,
2337 .supports_hdmi = false, 2505 .supports_hdmi = false,
2338 .supports_dp = false, 2506 .supports_dp = false,
2507 .xbar_cfg = tegra124_sor_xbar_cfg,
2508};
2509
2510static const u8 tegra210_sor_xbar_cfg[5] = {
2511 2, 1, 0, 3, 4
2339}; 2512};
2340 2513
2341static const struct tegra_sor_soc tegra210_sor1 = { 2514static const struct tegra_sor_soc tegra210_sor1 = {
@@ -2346,6 +2519,8 @@ static const struct tegra_sor_soc tegra210_sor1 = {
2346 2519
2347 .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults), 2520 .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults),
2348 .settings = tegra210_sor_hdmi_defaults, 2521 .settings = tegra210_sor_hdmi_defaults,
2522
2523 .xbar_cfg = tegra210_sor_xbar_cfg,
2349}; 2524};
2350 2525
2351static const struct of_device_id tegra_sor_of_match[] = { 2526static const struct of_device_id tegra_sor_of_match[] = {
@@ -2435,11 +2610,14 @@ static int tegra_sor_probe(struct platform_device *pdev)
2435 goto remove; 2610 goto remove;
2436 } 2611 }
2437 2612
2438 sor->rst = devm_reset_control_get(&pdev->dev, "sor"); 2613 if (!pdev->dev.pm_domain) {
2439 if (IS_ERR(sor->rst)) { 2614 sor->rst = devm_reset_control_get(&pdev->dev, "sor");
2440 err = PTR_ERR(sor->rst); 2615 if (IS_ERR(sor->rst)) {
2441 dev_err(&pdev->dev, "failed to get reset control: %d\n", err); 2616 err = PTR_ERR(sor->rst);
2442 goto remove; 2617 dev_err(&pdev->dev, "failed to get reset control: %d\n",
2618 err);
2619 goto remove;
2620 }
2443 } 2621 }
2444 2622
2445 sor->clk = devm_clk_get(&pdev->dev, NULL); 2623 sor->clk = devm_clk_get(&pdev->dev, NULL);
@@ -2449,6 +2627,16 @@ static int tegra_sor_probe(struct platform_device *pdev)
2449 goto remove; 2627 goto remove;
2450 } 2628 }
2451 2629
2630 if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
2631 sor->clk_src = devm_clk_get(&pdev->dev, "source");
2632 if (IS_ERR(sor->clk_src)) {
2633 err = PTR_ERR(sor->clk_src);
2634 dev_err(sor->dev, "failed to get source clock: %d\n",
2635 err);
2636 goto remove;
2637 }
2638 }
2639
2452 sor->clk_parent = devm_clk_get(&pdev->dev, "parent"); 2640 sor->clk_parent = devm_clk_get(&pdev->dev, "parent");
2453 if (IS_ERR(sor->clk_parent)) { 2641 if (IS_ERR(sor->clk_parent)) {
2454 err = PTR_ERR(sor->clk_parent); 2642 err = PTR_ERR(sor->clk_parent);
@@ -2470,6 +2658,19 @@ static int tegra_sor_probe(struct platform_device *pdev)
2470 goto remove; 2658 goto remove;
2471 } 2659 }
2472 2660
2661 platform_set_drvdata(pdev, sor);
2662 pm_runtime_enable(&pdev->dev);
2663
2664 pm_runtime_get_sync(&pdev->dev);
2665 sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
2666 pm_runtime_put(&pdev->dev);
2667
2668 if (IS_ERR(sor->clk_brick)) {
2669 err = PTR_ERR(sor->clk_brick);
2670 dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
2671 goto remove;
2672 }
2673
2473 INIT_LIST_HEAD(&sor->client.list); 2674 INIT_LIST_HEAD(&sor->client.list);
2474 sor->client.ops = &sor_client_ops; 2675 sor->client.ops = &sor_client_ops;
2475 sor->client.dev = &pdev->dev; 2676 sor->client.dev = &pdev->dev;
@@ -2481,8 +2682,6 @@ static int tegra_sor_probe(struct platform_device *pdev)
2481 goto remove; 2682 goto remove;
2482 } 2683 }
2483 2684
2484 platform_set_drvdata(pdev, sor);
2485
2486 return 0; 2685 return 0;
2487 2686
2488remove: 2687remove:
@@ -2498,6 +2697,8 @@ static int tegra_sor_remove(struct platform_device *pdev)
2498 struct tegra_sor *sor = platform_get_drvdata(pdev); 2697 struct tegra_sor *sor = platform_get_drvdata(pdev);
2499 int err; 2698 int err;
2500 2699
2700 pm_runtime_disable(&pdev->dev);
2701
2501 err = host1x_client_unregister(&sor->client); 2702 err = host1x_client_unregister(&sor->client);
2502 if (err < 0) { 2703 if (err < 0) {
2503 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 2704 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n",
@@ -2516,10 +2717,62 @@ static int tegra_sor_remove(struct platform_device *pdev)
2516 return 0; 2717 return 0;
2517} 2718}
2518 2719
2720#ifdef CONFIG_PM
2721static int tegra_sor_suspend(struct device *dev)
2722{
2723 struct tegra_sor *sor = dev_get_drvdata(dev);
2724 int err;
2725
2726 if (sor->rst) {
2727 err = reset_control_assert(sor->rst);
2728 if (err < 0) {
2729 dev_err(dev, "failed to assert reset: %d\n", err);
2730 return err;
2731 }
2732 }
2733
2734 usleep_range(1000, 2000);
2735
2736 clk_disable_unprepare(sor->clk);
2737
2738 return 0;
2739}
2740
2741static int tegra_sor_resume(struct device *dev)
2742{
2743 struct tegra_sor *sor = dev_get_drvdata(dev);
2744 int err;
2745
2746 err = clk_prepare_enable(sor->clk);
2747 if (err < 0) {
2748 dev_err(dev, "failed to enable clock: %d\n", err);
2749 return err;
2750 }
2751
2752 usleep_range(1000, 2000);
2753
2754 if (sor->rst) {
2755 err = reset_control_deassert(sor->rst);
2756 if (err < 0) {
2757 dev_err(dev, "failed to deassert reset: %d\n", err);
2758 clk_disable_unprepare(sor->clk);
2759 return err;
2760 }
2761 }
2762
2763 return 0;
2764}
2765#endif
2766
2767static const struct dev_pm_ops tegra_sor_pm_ops = {
2768 SET_RUNTIME_PM_OPS(tegra_sor_suspend, tegra_sor_resume, NULL)
2769};
2770
2519struct platform_driver tegra_sor_driver = { 2771struct platform_driver tegra_sor_driver = {
2520 .driver = { 2772 .driver = {
2521 .name = "tegra-sor", 2773 .name = "tegra-sor",
2522 .of_match_table = tegra_sor_of_match, 2774 .of_match_table = tegra_sor_of_match,
2775 .pm = &tegra_sor_pm_ops,
2523 }, 2776 },
2524 .probe = tegra_sor_probe, 2777 .probe = tegra_sor_probe,
2525 .remove = tegra_sor_remove, 2778 .remove = tegra_sor_remove,
diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h
index 2d31d027e3f6..865c73b48968 100644
--- a/drivers/gpu/drm/tegra/sor.h
+++ b/drivers/gpu/drm/tegra/sor.h
@@ -27,6 +27,9 @@
27#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17) 27#define SOR_STATE_ASY_PIXELDEPTH_MASK (0xf << 17)
28#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17) 28#define SOR_STATE_ASY_PIXELDEPTH_BPP_18_444 (0x2 << 17)
29#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17) 29#define SOR_STATE_ASY_PIXELDEPTH_BPP_24_444 (0x5 << 17)
30#define SOR_STATE_ASY_PIXELDEPTH_BPP_30_444 (0x6 << 17)
31#define SOR_STATE_ASY_PIXELDEPTH_BPP_36_444 (0x8 << 17)
32#define SOR_STATE_ASY_PIXELDEPTH_BPP_48_444 (0x9 << 17)
30#define SOR_STATE_ASY_VSYNCPOL (1 << 13) 33#define SOR_STATE_ASY_VSYNCPOL (1 << 13)
31#define SOR_STATE_ASY_HSYNCPOL (1 << 12) 34#define SOR_STATE_ASY_HSYNCPOL (1 << 12)
32#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8) 35#define SOR_STATE_ASY_PROTOCOL_MASK (0xf << 8)
diff --git a/drivers/gpu/drm/tilcdc/Kconfig b/drivers/gpu/drm/tilcdc/Kconfig
index f60a1ec84fa4..28fed7e206d0 100644
--- a/drivers/gpu/drm/tilcdc/Kconfig
+++ b/drivers/gpu/drm/tilcdc/Kconfig
@@ -2,7 +2,6 @@ config DRM_TILCDC
2 tristate "DRM Support for TI LCDC Display Controller" 2 tristate "DRM Support for TI LCDC Display Controller"
3 depends on DRM && OF && ARM 3 depends on DRM && OF && ARM
4 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
6 select DRM_KMS_CMA_HELPER 5 select DRM_KMS_CMA_HELPER
7 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
8 select VIDEOMODE_HELPERS 7 select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 79027b1c64d3..107c8bd04f6d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
697 697
698 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); 698 spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags);
699 699
700 drm_handle_vblank(dev, 0); 700 drm_crtc_handle_vblank(crtc);
701 701
702 if (!skip_event) { 702 if (!skip_event) {
703 struct drm_pending_vblank_event *event; 703 struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 709bc903524d..d27809372d54 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -541,7 +541,6 @@ static struct drm_driver tilcdc_driver = {
541 .load = tilcdc_load, 541 .load = tilcdc_load,
542 .unload = tilcdc_unload, 542 .unload = tilcdc_unload,
543 .lastclose = tilcdc_lastclose, 543 .lastclose = tilcdc_lastclose,
544 .set_busid = drm_platform_set_busid,
545 .irq_handler = tilcdc_irq, 544 .irq_handler = tilcdc_irq,
546 .irq_preinstall = tilcdc_irq_preinstall, 545 .irq_preinstall = tilcdc_irq_preinstall,
547 .irq_postinstall = tilcdc_irq_postinstall, 546 .irq_postinstall = tilcdc_irq_postinstall,
@@ -549,7 +548,7 @@ static struct drm_driver tilcdc_driver = {
549 .get_vblank_counter = drm_vblank_no_hw_counter, 548 .get_vblank_counter = drm_vblank_no_hw_counter,
550 .enable_vblank = tilcdc_enable_vblank, 549 .enable_vblank = tilcdc_enable_vblank,
551 .disable_vblank = tilcdc_disable_vblank, 550 .disable_vblank = tilcdc_disable_vblank,
552 .gem_free_object = drm_gem_cma_free_object, 551 .gem_free_object_unlocked = drm_gem_cma_free_object,
553 .gem_vm_ops = &drm_gem_cma_vm_ops, 552 .gem_vm_ops = &drm_gem_cma_vm_ops,
554 .dumb_create = drm_gem_cma_dumb_create, 553 .dumb_create = drm_gem_cma_dumb_create,
555 .dumb_map_offset = drm_gem_cma_dumb_map_offset, 554 .dumb_map_offset = drm_gem_cma_dumb_map_offset,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a71cf98c655f..4054d804fe06 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -146,10 +146,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
146 BUG_ON(bo->mem.mm_node != NULL); 146 BUG_ON(bo->mem.mm_node != NULL);
147 BUG_ON(!list_empty(&bo->lru)); 147 BUG_ON(!list_empty(&bo->lru));
148 BUG_ON(!list_empty(&bo->ddestroy)); 148 BUG_ON(!list_empty(&bo->ddestroy));
149 149 ttm_tt_destroy(bo->ttm);
150 if (bo->ttm)
151 ttm_tt_destroy(bo->ttm);
152 atomic_dec(&bo->glob->bo_count); 150 atomic_dec(&bo->glob->bo_count);
151 fence_put(bo->moving);
153 if (bo->resv == &bo->ttm_resv) 152 if (bo->resv == &bo->ttm_resv)
154 reservation_object_fini(&bo->ttm_resv); 153 reservation_object_fini(&bo->ttm_resv);
155 mutex_destroy(&bo->wu_mutex); 154 mutex_destroy(&bo->wu_mutex);
@@ -360,7 +359,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
360 ret = bdev->driver->move(bo, evict, interruptible, 359 ret = bdev->driver->move(bo, evict, interruptible,
361 no_wait_gpu, mem); 360 no_wait_gpu, mem);
362 else 361 else
363 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); 362 ret = ttm_bo_move_memcpy(bo, evict, interruptible,
363 no_wait_gpu, mem);
364 364
365 if (ret) { 365 if (ret) {
366 if (bdev->driver->move_notify) { 366 if (bdev->driver->move_notify) {
@@ -396,8 +396,7 @@ moved:
396 396
397out_err: 397out_err:
398 new_man = &bdev->man[bo->mem.mem_type]; 398 new_man = &bdev->man[bo->mem.mem_type];
399 if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { 399 if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) {
400 ttm_tt_unbind(bo->ttm);
401 ttm_tt_destroy(bo->ttm); 400 ttm_tt_destroy(bo->ttm);
402 bo->ttm = NULL; 401 bo->ttm = NULL;
403 } 402 }
@@ -418,11 +417,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
418 if (bo->bdev->driver->move_notify) 417 if (bo->bdev->driver->move_notify)
419 bo->bdev->driver->move_notify(bo, NULL); 418 bo->bdev->driver->move_notify(bo, NULL);
420 419
421 if (bo->ttm) { 420 ttm_tt_destroy(bo->ttm);
422 ttm_tt_unbind(bo->ttm); 421 bo->ttm = NULL;
423 ttm_tt_destroy(bo->ttm);
424 bo->ttm = NULL;
425 }
426 ttm_bo_mem_put(bo, &bo->mem); 422 ttm_bo_mem_put(bo, &bo->mem);
427 423
428 ww_mutex_unlock (&bo->resv->lock); 424 ww_mutex_unlock (&bo->resv->lock);
@@ -688,15 +684,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
688 struct ttm_placement placement; 684 struct ttm_placement placement;
689 int ret = 0; 685 int ret = 0;
690 686
691 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
692
693 if (unlikely(ret != 0)) {
694 if (ret != -ERESTARTSYS) {
695 pr_err("Failed to expire sync object before buffer eviction\n");
696 }
697 goto out;
698 }
699
700 lockdep_assert_held(&bo->resv->lock.base); 687 lockdep_assert_held(&bo->resv->lock.base);
701 688
702 evict_mem = bo->mem; 689 evict_mem = bo->mem;
@@ -720,7 +707,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
720 707
721 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, 708 ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
722 no_wait_gpu); 709 no_wait_gpu);
723 if (ret) { 710 if (unlikely(ret)) {
724 if (ret != -ERESTARTSYS) 711 if (ret != -ERESTARTSYS)
725 pr_err("Buffer eviction failed\n"); 712 pr_err("Buffer eviction failed\n");
726 ttm_bo_mem_put(bo, &evict_mem); 713 ttm_bo_mem_put(bo, &evict_mem);
@@ -800,6 +787,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
800EXPORT_SYMBOL(ttm_bo_mem_put); 787EXPORT_SYMBOL(ttm_bo_mem_put);
801 788
802/** 789/**
790 * Add the last move fence to the BO and reserve a new shared slot.
791 */
792static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo,
793 struct ttm_mem_type_manager *man,
794 struct ttm_mem_reg *mem)
795{
796 struct fence *fence;
797 int ret;
798
799 spin_lock(&man->move_lock);
800 fence = fence_get(man->move);
801 spin_unlock(&man->move_lock);
802
803 if (fence) {
804 reservation_object_add_shared_fence(bo->resv, fence);
805
806 ret = reservation_object_reserve_shared(bo->resv);
807 if (unlikely(ret))
808 return ret;
809
810 fence_put(bo->moving);
811 bo->moving = fence;
812 }
813
814 return 0;
815}
816
817/**
803 * Repeatedly evict memory from the LRU for @mem_type until we create enough 818 * Repeatedly evict memory from the LRU for @mem_type until we create enough
804 * space, or we've evicted everything and there isn't enough space. 819 * space, or we've evicted everything and there isn't enough space.
805 */ 820 */
@@ -825,10 +840,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
825 if (unlikely(ret != 0)) 840 if (unlikely(ret != 0))
826 return ret; 841 return ret;
827 } while (1); 842 } while (1);
828 if (mem->mm_node == NULL)
829 return -ENOMEM;
830 mem->mem_type = mem_type; 843 mem->mem_type = mem_type;
831 return 0; 844 return ttm_bo_add_move_fence(bo, man, mem);
832} 845}
833 846
834static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, 847static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
@@ -898,6 +911,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
898 bool has_erestartsys = false; 911 bool has_erestartsys = false;
899 int i, ret; 912 int i, ret;
900 913
914 ret = reservation_object_reserve_shared(bo->resv);
915 if (unlikely(ret))
916 return ret;
917
901 mem->mm_node = NULL; 918 mem->mm_node = NULL;
902 for (i = 0; i < placement->num_placement; ++i) { 919 for (i = 0; i < placement->num_placement; ++i) {
903 const struct ttm_place *place = &placement->placement[i]; 920 const struct ttm_place *place = &placement->placement[i];
@@ -931,9 +948,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
931 ret = (*man->func->get_node)(man, bo, place, mem); 948 ret = (*man->func->get_node)(man, bo, place, mem);
932 if (unlikely(ret)) 949 if (unlikely(ret))
933 return ret; 950 return ret;
934 951
935 if (mem->mm_node) 952 if (mem->mm_node) {
953 ret = ttm_bo_add_move_fence(bo, man, mem);
954 if (unlikely(ret)) {
955 (*man->func->put_node)(man, mem);
956 return ret;
957 }
936 break; 958 break;
959 }
937 } 960 }
938 961
939 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { 962 if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
@@ -1000,20 +1023,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
1000 1023
1001 lockdep_assert_held(&bo->resv->lock.base); 1024 lockdep_assert_held(&bo->resv->lock.base);
1002 1025
1003 /*
1004 * Don't wait for the BO on initial allocation. This is important when
1005 * the BO has an imported reservation object.
1006 */
1007 if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) {
1008 /*
1009 * FIXME: It's possible to pipeline buffer moves.
1010 * Have the driver move function wait for idle when necessary,
1011 * instead of doing it here.
1012 */
1013 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
1014 if (ret)
1015 return ret;
1016 }
1017 mem.num_pages = bo->num_pages; 1026 mem.num_pages = bo->num_pages;
1018 mem.size = mem.num_pages << PAGE_SHIFT; 1027 mem.size = mem.num_pages << PAGE_SHIFT;
1019 mem.page_alignment = bo->mem.page_alignment; 1028 mem.page_alignment = bo->mem.page_alignment;
@@ -1166,7 +1175,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1166 bo->mem.page_alignment = page_alignment; 1175 bo->mem.page_alignment = page_alignment;
1167 bo->mem.bus.io_reserved_vm = false; 1176 bo->mem.bus.io_reserved_vm = false;
1168 bo->mem.bus.io_reserved_count = 0; 1177 bo->mem.bus.io_reserved_count = 0;
1169 bo->priv_flags = 0; 1178 bo->moving = NULL;
1170 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); 1179 bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
1171 bo->persistent_swap_storage = persistent_swap_storage; 1180 bo->persistent_swap_storage = persistent_swap_storage;
1172 bo->acc_size = acc_size; 1181 bo->acc_size = acc_size;
@@ -1278,6 +1287,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1278{ 1287{
1279 struct ttm_mem_type_manager *man = &bdev->man[mem_type]; 1288 struct ttm_mem_type_manager *man = &bdev->man[mem_type];
1280 struct ttm_bo_global *glob = bdev->glob; 1289 struct ttm_bo_global *glob = bdev->glob;
1290 struct fence *fence;
1281 int ret; 1291 int ret;
1282 1292
1283 /* 1293 /*
@@ -1298,6 +1308,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1298 spin_lock(&glob->lru_lock); 1308 spin_lock(&glob->lru_lock);
1299 } 1309 }
1300 spin_unlock(&glob->lru_lock); 1310 spin_unlock(&glob->lru_lock);
1311
1312 spin_lock(&man->move_lock);
1313 fence = fence_get(man->move);
1314 spin_unlock(&man->move_lock);
1315
1316 if (fence) {
1317 ret = fence_wait(fence, false);
1318 fence_put(fence);
1319 if (ret) {
1320 if (allow_errors) {
1321 return ret;
1322 } else {
1323 pr_err("Cleanup eviction failed\n");
1324 }
1325 }
1326 }
1327
1301 return 0; 1328 return 0;
1302} 1329}
1303 1330
@@ -1317,6 +1344,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
1317 mem_type); 1344 mem_type);
1318 return ret; 1345 return ret;
1319 } 1346 }
1347 fence_put(man->move);
1320 1348
1321 man->use_type = false; 1349 man->use_type = false;
1322 man->has_type = false; 1350 man->has_type = false;
@@ -1362,6 +1390,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1362 man->io_reserve_fastpath = true; 1390 man->io_reserve_fastpath = true;
1363 man->use_io_reserve_lru = false; 1391 man->use_io_reserve_lru = false;
1364 mutex_init(&man->io_reserve_mutex); 1392 mutex_init(&man->io_reserve_mutex);
1393 spin_lock_init(&man->move_lock);
1365 INIT_LIST_HEAD(&man->io_reserve_lru); 1394 INIT_LIST_HEAD(&man->io_reserve_lru);
1366 1395
1367 ret = bdev->driver->init_mem_type(bdev, type, man); 1396 ret = bdev->driver->init_mem_type(bdev, type, man);
@@ -1380,6 +1409,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
1380 man->size = p_size; 1409 man->size = p_size;
1381 1410
1382 INIT_LIST_HEAD(&man->lru); 1411 INIT_LIST_HEAD(&man->lru);
1412 man->move = NULL;
1383 1413
1384 return 0; 1414 return 0;
1385} 1415}
@@ -1573,47 +1603,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual);
1573int ttm_bo_wait(struct ttm_buffer_object *bo, 1603int ttm_bo_wait(struct ttm_buffer_object *bo,
1574 bool interruptible, bool no_wait) 1604 bool interruptible, bool no_wait)
1575{ 1605{
1576 struct reservation_object_list *fobj; 1606 long timeout = no_wait ? 0 : 15 * HZ;
1577 struct reservation_object *resv;
1578 struct fence *excl;
1579 long timeout = 15 * HZ;
1580 int i;
1581
1582 resv = bo->resv;
1583 fobj = reservation_object_get_list(resv);
1584 excl = reservation_object_get_excl(resv);
1585 if (excl) {
1586 if (!fence_is_signaled(excl)) {
1587 if (no_wait)
1588 return -EBUSY;
1589
1590 timeout = fence_wait_timeout(excl,
1591 interruptible, timeout);
1592 }
1593 }
1594
1595 for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) {
1596 struct fence *fence;
1597 fence = rcu_dereference_protected(fobj->shared[i],
1598 reservation_object_held(resv));
1599
1600 if (!fence_is_signaled(fence)) {
1601 if (no_wait)
1602 return -EBUSY;
1603
1604 timeout = fence_wait_timeout(fence,
1605 interruptible, timeout);
1606 }
1607 }
1608 1607
1608 timeout = reservation_object_wait_timeout_rcu(bo->resv, true,
1609 interruptible, timeout);
1609 if (timeout < 0) 1610 if (timeout < 0)
1610 return timeout; 1611 return timeout;
1611 1612
1612 if (timeout == 0) 1613 if (timeout == 0)
1613 return -EBUSY; 1614 return -EBUSY;
1614 1615
1615 reservation_object_add_excl_fence(resv, NULL); 1616 reservation_object_add_excl_fence(bo->resv, NULL);
1616 clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
1617 return 0; 1617 return 0;
1618} 1618}
1619EXPORT_SYMBOL(ttm_bo_wait); 1619EXPORT_SYMBOL(ttm_bo_wait);
@@ -1683,14 +1683,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1683 ttm_bo_list_ref_sub(bo, put_count, true); 1683 ttm_bo_list_ref_sub(bo, put_count, true);
1684 1684
1685 /** 1685 /**
1686 * Wait for GPU, then move to system cached. 1686 * Move to system cached
1687 */ 1687 */
1688 1688
1689 ret = ttm_bo_wait(bo, false, false);
1690
1691 if (unlikely(ret != 0))
1692 goto out;
1693
1694 if ((bo->mem.placement & swap_placement) != swap_placement) { 1689 if ((bo->mem.placement & swap_placement) != swap_placement) {
1695 struct ttm_mem_reg evict_mem; 1690 struct ttm_mem_reg evict_mem;
1696 1691
@@ -1705,6 +1700,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1705 goto out; 1700 goto out;
1706 } 1701 }
1707 1702
1703 /**
1704 * Make sure BO is idle.
1705 */
1706
1707 ret = ttm_bo_wait(bo, false, false);
1708 if (unlikely(ret != 0))
1709 goto out;
1710
1708 ttm_bo_unmap_virtual(bo); 1711 ttm_bo_unmap_virtual(bo);
1709 1712
1710 /** 1713 /**
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index d9831559706e..2df602a35f92 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -321,7 +321,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
321} 321}
322 322
323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 323int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
324 bool evict, bool no_wait_gpu, 324 bool evict, bool interruptible,
325 bool no_wait_gpu,
325 struct ttm_mem_reg *new_mem) 326 struct ttm_mem_reg *new_mem)
326{ 327{
327 struct ttm_bo_device *bdev = bo->bdev; 328 struct ttm_bo_device *bdev = bo->bdev;
@@ -337,6 +338,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
337 unsigned long add = 0; 338 unsigned long add = 0;
338 int dir; 339 int dir;
339 340
341 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
342 if (ret)
343 return ret;
344
340 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 345 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
341 if (ret) 346 if (ret)
342 return ret; 347 return ret;
@@ -401,8 +406,7 @@ out2:
401 *old_mem = *new_mem; 406 *old_mem = *new_mem;
402 new_mem->mm_node = NULL; 407 new_mem->mm_node = NULL;
403 408
404 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 409 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
405 ttm_tt_unbind(ttm);
406 ttm_tt_destroy(ttm); 410 ttm_tt_destroy(ttm);
407 bo->ttm = NULL; 411 bo->ttm = NULL;
408 } 412 }
@@ -462,6 +466,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
462 INIT_LIST_HEAD(&fbo->lru); 466 INIT_LIST_HEAD(&fbo->lru);
463 INIT_LIST_HEAD(&fbo->swap); 467 INIT_LIST_HEAD(&fbo->swap);
464 INIT_LIST_HEAD(&fbo->io_reserve_lru); 468 INIT_LIST_HEAD(&fbo->io_reserve_lru);
469 fbo->moving = NULL;
465 drm_vma_node_reset(&fbo->vma_node); 470 drm_vma_node_reset(&fbo->vma_node);
466 atomic_set(&fbo->cpu_writers, 0); 471 atomic_set(&fbo->cpu_writers, 0);
467 472
@@ -634,7 +639,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap);
634int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 639int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
635 struct fence *fence, 640 struct fence *fence,
636 bool evict, 641 bool evict,
637 bool no_wait_gpu,
638 struct ttm_mem_reg *new_mem) 642 struct ttm_mem_reg *new_mem)
639{ 643{
640 struct ttm_bo_device *bdev = bo->bdev; 644 struct ttm_bo_device *bdev = bo->bdev;
@@ -649,9 +653,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
649 if (ret) 653 if (ret)
650 return ret; 654 return ret;
651 655
652 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 656 if (man->flags & TTM_MEMTYPE_FLAG_FIXED) {
653 (bo->ttm != NULL)) {
654 ttm_tt_unbind(bo->ttm);
655 ttm_tt_destroy(bo->ttm); 657 ttm_tt_destroy(bo->ttm);
656 bo->ttm = NULL; 658 bo->ttm = NULL;
657 } 659 }
@@ -665,7 +667,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
665 * operation has completed. 667 * operation has completed.
666 */ 668 */
667 669
668 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 670 fence_put(bo->moving);
671 bo->moving = fence_get(fence);
669 672
670 ret = ttm_buffer_object_transfer(bo, &ghost_obj); 673 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
671 if (ret) 674 if (ret)
@@ -694,3 +697,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
694 return 0; 697 return 0;
695} 698}
696EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); 699EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
700
701int ttm_bo_pipeline_move(struct ttm_buffer_object *bo,
702 struct fence *fence, bool evict,
703 struct ttm_mem_reg *new_mem)
704{
705 struct ttm_bo_device *bdev = bo->bdev;
706 struct ttm_mem_reg *old_mem = &bo->mem;
707
708 struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type];
709 struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type];
710
711 int ret;
712
713 reservation_object_add_excl_fence(bo->resv, fence);
714
715 if (!evict) {
716 struct ttm_buffer_object *ghost_obj;
717
718 /**
719 * This should help pipeline ordinary buffer moves.
720 *
721 * Hang old buffer memory on a new buffer object,
722 * and leave it to be released when the GPU
723 * operation has completed.
724 */
725
726 fence_put(bo->moving);
727 bo->moving = fence_get(fence);
728
729 ret = ttm_buffer_object_transfer(bo, &ghost_obj);
730 if (ret)
731 return ret;
732
733 reservation_object_add_excl_fence(ghost_obj->resv, fence);
734
735 /**
736 * If we're not moving to fixed memory, the TTM object
737 * needs to stay alive. Otherwhise hang it on the ghost
738 * bo to be unbound and destroyed.
739 */
740
741 if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED))
742 ghost_obj->ttm = NULL;
743 else
744 bo->ttm = NULL;
745
746 ttm_bo_unreserve(ghost_obj);
747 ttm_bo_unref(&ghost_obj);
748
749 } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) {
750
751 /**
752 * BO doesn't have a TTM we need to bind/unbind. Just remember
753 * this eviction and free up the allocation
754 */
755
756 spin_lock(&from->move_lock);
757 if (!from->move || fence_is_later(fence, from->move)) {
758 fence_put(from->move);
759 from->move = fence_get(fence);
760 }
761 spin_unlock(&from->move_lock);
762
763 ttm_bo_free_old_node(bo);
764
765 fence_put(bo->moving);
766 bo->moving = fence_get(fence);
767
768 } else {
769 /**
770 * Last resort, wait for the move to be completed.
771 *
772 * Should never happen in pratice.
773 */
774
775 ret = ttm_bo_wait(bo, false, false);
776 if (ret)
777 return ret;
778
779 if (to->flags & TTM_MEMTYPE_FLAG_FIXED) {
780 ttm_tt_destroy(bo->ttm);
781 bo->ttm = NULL;
782 }
783 ttm_bo_free_old_node(bo);
784 }
785
786 *old_mem = *new_mem;
787 new_mem->mm_node = NULL;
788
789 return 0;
790}
791EXPORT_SYMBOL(ttm_bo_pipeline_move);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 3216878bced3..a6ed9d5e5167 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
48{ 48{
49 int ret = 0; 49 int ret = 0;
50 50
51 if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) 51 if (likely(!bo->moving))
52 goto out_unlock; 52 goto out_unlock;
53 53
54 /* 54 /*
55 * Quick non-stalling check for idle. 55 * Quick non-stalling check for idle.
56 */ 56 */
57 ret = ttm_bo_wait(bo, false, true); 57 if (fence_is_signaled(bo->moving))
58 if (likely(ret == 0)) 58 goto out_clear;
59 goto out_unlock;
60 59
61 /* 60 /*
62 * If possible, avoid waiting for GPU with mmap_sem 61 * If possible, avoid waiting for GPU with mmap_sem
@@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
68 goto out_unlock; 67 goto out_unlock;
69 68
70 up_read(&vma->vm_mm->mmap_sem); 69 up_read(&vma->vm_mm->mmap_sem);
71 (void) ttm_bo_wait(bo, true, false); 70 (void) fence_wait(bo->moving, true);
72 goto out_unlock; 71 goto out_unlock;
73 } 72 }
74 73
75 /* 74 /*
76 * Ordinary wait. 75 * Ordinary wait.
77 */ 76 */
78 ret = ttm_bo_wait(bo, true, false); 77 ret = fence_wait(bo->moving, true);
79 if (unlikely(ret != 0)) 78 if (unlikely(ret != 0)) {
80 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : 79 ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
81 VM_FAULT_NOPAGE; 80 VM_FAULT_NOPAGE;
81 goto out_unlock;
82 }
83
84out_clear:
85 fence_put(bo->moving);
86 bo->moving = NULL;
82 87
83out_unlock: 88out_unlock:
84 return ret; 89 return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 97542c35d6ef..bc5aa573f466 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -166,12 +166,10 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching);
166 166
167void ttm_tt_destroy(struct ttm_tt *ttm) 167void ttm_tt_destroy(struct ttm_tt *ttm)
168{ 168{
169 if (unlikely(ttm == NULL)) 169 if (ttm == NULL)
170 return; 170 return;
171 171
172 if (ttm->state == tt_bound) { 172 ttm_tt_unbind(ttm);
173 ttm_tt_unbind(ttm);
174 }
175 173
176 if (ttm->state == tt_unbound) 174 if (ttm->state == tt_unbound)
177 ttm_tt_unpopulate(ttm); 175 ttm_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 613ab0622d6e..1616ec4f4d84 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,12 +4,7 @@ config DRM_UDL
4 depends on USB_SUPPORT 4 depends on USB_SUPPORT
5 depends on USB_ARCH_HAS_HCD 5 depends on USB_ARCH_HAS_HCD
6 select USB 6 select USB
7 select FB_SYS_FILLRECT
8 select FB_SYS_COPYAREA
9 select FB_SYS_IMAGEBLIT
10 select FB_DEFERRED_IO
11 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
12 select DRM_KMS_FB_HELPER
13 help 8 help
14 This is a KMS driver for the USB displaylink video adapters. 9 This is a KMS driver for the USB displaylink video adapters.
15 Say M/Y to add support for these devices via drm/kms interfaces. 10 Say M/Y to add support for these devices via drm/kms interfaces.
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index c20408940cd0..17d34e0edbdd 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface)
94 struct drm_device *dev = usb_get_intfdata(interface); 94 struct drm_device *dev = usb_get_intfdata(interface);
95 95
96 drm_kms_helper_poll_disable(dev); 96 drm_kms_helper_poll_disable(dev);
97 drm_connector_unregister_all(dev);
98 udl_fbdev_unplug(dev); 97 udl_fbdev_unplug(dev);
99 udl_drop_usb(dev); 98 udl_drop_usb(dev);
100 drm_unplug_dev(dev); 99 drm_unplug_dev(dev);
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index b87afee44995..f92ea9579674 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -376,7 +376,7 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
376 376
377 spin_lock_irqsave(&dev->event_lock, flags); 377 spin_lock_irqsave(&dev->event_lock, flags);
378 if (event) 378 if (event)
379 drm_send_vblank_event(dev, 0, event); 379 drm_crtc_send_vblank_event(crtc, event);
380 spin_unlock_irqrestore(&dev->event_lock, flags); 380 spin_unlock_irqrestore(&dev->event_lock, flags);
381 crtc->primary->fb = fb; 381 crtc->primary->fb = fb;
382 382
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index e5a9d3aaf45f..59adcf8532dd 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -291,8 +291,6 @@ static void vc4_bo_cache_free_old(struct drm_device *dev)
291 291
292/* Called on the last userspace/kernel unreference of the BO. Returns 292/* Called on the last userspace/kernel unreference of the BO. Returns
293 * it to the BO cache if possible, otherwise frees it. 293 * it to the BO cache if possible, otherwise frees it.
294 *
295 * Note that this is called with the struct_mutex held.
296 */ 294 */
297void vc4_free_object(struct drm_gem_object *gem_bo) 295void vc4_free_object(struct drm_gem_object *gem_bo)
298{ 296{
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 0f18b76c7906..8fc2b731b59a 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -46,12 +46,17 @@ struct vc4_crtc {
46 const struct vc4_crtc_data *data; 46 const struct vc4_crtc_data *data;
47 void __iomem *regs; 47 void __iomem *regs;
48 48
49 /* Timestamp at start of vblank irq - unaffected by lock delays. */
50 ktime_t t_vblank;
51
49 /* Which HVS channel we're using for our CRTC. */ 52 /* Which HVS channel we're using for our CRTC. */
50 int channel; 53 int channel;
51 54
52 u8 lut_r[256]; 55 u8 lut_r[256];
53 u8 lut_g[256]; 56 u8 lut_g[256];
54 u8 lut_b[256]; 57 u8 lut_b[256];
58 /* Size in pixels of the COB memory allocated to this CRTC. */
59 u32 cob_size;
55 60
56 struct drm_pending_vblank_event *event; 61 struct drm_pending_vblank_event *event;
57}; 62};
@@ -146,6 +151,144 @@ int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
146} 151}
147#endif 152#endif
148 153
154int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
155 unsigned int flags, int *vpos, int *hpos,
156 ktime_t *stime, ktime_t *etime,
157 const struct drm_display_mode *mode)
158{
159 struct vc4_dev *vc4 = to_vc4_dev(dev);
160 struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
161 u32 val;
162 int fifo_lines;
163 int vblank_lines;
164 int ret = 0;
165
166 /*
167 * XXX Doesn't work well in interlaced mode yet, partially due
168 * to problems in vc4 kms or drm core interlaced mode handling,
169 * so disable for now in interlaced mode.
170 */
171 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
172 return ret;
173
174 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
175
176 /* Get optional system timestamp before query. */
177 if (stime)
178 *stime = ktime_get();
179
180 /*
181 * Read vertical scanline which is currently composed for our
182 * pixelvalve by the HVS, and also the scaler status.
183 */
184 val = HVS_READ(SCALER_DISPSTATX(vc4_crtc->channel));
185
186 /* Get optional system timestamp after query. */
187 if (etime)
188 *etime = ktime_get();
189
190 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
191
192 /* Vertical position of hvs composed scanline. */
193 *vpos = VC4_GET_FIELD(val, SCALER_DISPSTATX_LINE);
194
195 /* No hpos info available. */
196 if (hpos)
197 *hpos = 0;
198
199 /* This is the offset we need for translating hvs -> pv scanout pos. */
200 fifo_lines = vc4_crtc->cob_size / mode->crtc_hdisplay;
201
202 if (fifo_lines > 0)
203 ret |= DRM_SCANOUTPOS_VALID;
204
205 /* HVS more than fifo_lines into frame for compositing? */
206 if (*vpos > fifo_lines) {
207 /*
208 * We are in active scanout and can get some meaningful results
209 * from HVS. The actual PV scanout can not trail behind more
210 * than fifo_lines as that is the fifo's capacity. Assume that
211 * in active scanout the HVS and PV work in lockstep wrt. HVS
212 * refilling the fifo and PV consuming from the fifo, ie.
213 * whenever the PV consumes and frees up a scanline in the
214 * fifo, the HVS will immediately refill it, therefore
215 * incrementing vpos. Therefore we choose HVS read position -
216 * fifo size in scanlines as a estimate of the real scanout
217 * position of the PV.
218 */
219 *vpos -= fifo_lines + 1;
220 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
221 *vpos /= 2;
222
223 ret |= DRM_SCANOUTPOS_ACCURATE;
224 return ret;
225 }
226
227 /*
228 * Less: This happens when we are in vblank and the HVS, after getting
229 * the VSTART restart signal from the PV, just started refilling its
230 * fifo with new lines from the top-most lines of the new framebuffers.
231 * The PV does not scan out in vblank, so does not remove lines from
232 * the fifo, so the fifo will be full quickly and the HVS has to pause.
233 * We can't get meaningful readings wrt. scanline position of the PV
234 * and need to make things up in a approximative but consistent way.
235 */
236 ret |= DRM_SCANOUTPOS_IN_VBLANK;
237 vblank_lines = mode->crtc_vtotal - mode->crtc_vdisplay;
238
239 if (flags & DRM_CALLED_FROM_VBLIRQ) {
240 /*
241 * Assume the irq handler got called close to first
242 * line of vblank, so PV has about a full vblank
243 * scanlines to go, and as a base timestamp use the
244 * one taken at entry into vblank irq handler, so it
245 * is not affected by random delays due to lock
246 * contention on event_lock or vblank_time lock in
247 * the core.
248 */
249 *vpos = -vblank_lines;
250
251 if (stime)
252 *stime = vc4_crtc->t_vblank;
253 if (etime)
254 *etime = vc4_crtc->t_vblank;
255
256 /*
257 * If the HVS fifo is not yet full then we know for certain
258 * we are at the very beginning of vblank, as the hvs just
259 * started refilling, and the stime and etime timestamps
260 * truly correspond to start of vblank.
261 */
262 if ((val & SCALER_DISPSTATX_FULL) != SCALER_DISPSTATX_FULL)
263 ret |= DRM_SCANOUTPOS_ACCURATE;
264 } else {
265 /*
266 * No clue where we are inside vblank. Return a vpos of zero,
267 * which will cause calling code to just return the etime
268 * timestamp uncorrected. At least this is no worse than the
269 * standard fallback.
270 */
271 *vpos = 0;
272 }
273
274 return ret;
275}
276
277int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
278 int *max_error, struct timeval *vblank_time,
279 unsigned flags)
280{
281 struct vc4_dev *vc4 = to_vc4_dev(dev);
282 struct vc4_crtc *vc4_crtc = vc4->crtc[crtc_id];
283 struct drm_crtc *crtc = &vc4_crtc->base;
284 struct drm_crtc_state *state = crtc->state;
285
286 /* Helper routine in DRM core does all the work: */
287 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc_id, max_error,
288 vblank_time, flags,
289 &state->adjusted_mode);
290}
291
149static void vc4_crtc_destroy(struct drm_crtc *crtc) 292static void vc4_crtc_destroy(struct drm_crtc *crtc)
150{ 293{
151 drm_crtc_cleanup(crtc); 294 drm_crtc_cleanup(crtc);
@@ -175,20 +318,22 @@ vc4_crtc_lut_load(struct drm_crtc *crtc)
175 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]); 318 HVS_WRITE(SCALER_GAMDATA, vc4_crtc->lut_b[i]);
176} 319}
177 320
178static void 321static int
179vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, 322vc4_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
180 uint32_t start, uint32_t size) 323 uint32_t size)
181{ 324{
182 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 325 struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
183 u32 i; 326 u32 i;
184 327
185 for (i = start; i < start + size; i++) { 328 for (i = 0; i < size; i++) {
186 vc4_crtc->lut_r[i] = r[i] >> 8; 329 vc4_crtc->lut_r[i] = r[i] >> 8;
187 vc4_crtc->lut_g[i] = g[i] >> 8; 330 vc4_crtc->lut_g[i] = g[i] >> 8;
188 vc4_crtc->lut_b[i] = b[i] >> 8; 331 vc4_crtc->lut_b[i] = b[i] >> 8;
189 } 332 }
190 333
191 vc4_crtc_lut_load(crtc); 334 vc4_crtc_lut_load(crtc);
335
336 return 0;
192} 337}
193 338
194static u32 vc4_get_fifo_full_level(u32 format) 339static u32 vc4_get_fifo_full_level(u32 format)
@@ -395,6 +540,7 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
395 struct vc4_dev *vc4 = to_vc4_dev(dev); 540 struct vc4_dev *vc4 = to_vc4_dev(dev);
396 struct drm_plane *plane; 541 struct drm_plane *plane;
397 unsigned long flags; 542 unsigned long flags;
543 const struct drm_plane_state *plane_state;
398 u32 dlist_count = 0; 544 u32 dlist_count = 0;
399 int ret; 545 int ret;
400 546
@@ -404,18 +550,8 @@ static int vc4_crtc_atomic_check(struct drm_crtc *crtc,
404 if (hweight32(state->connector_mask) > 1) 550 if (hweight32(state->connector_mask) > 1)
405 return -EINVAL; 551 return -EINVAL;
406 552
407 drm_atomic_crtc_state_for_each_plane(plane, state) { 553 drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, state)
408 struct drm_plane_state *plane_state =
409 state->state->plane_states[drm_plane_index(plane)];
410
411 /* plane might not have changed, in which case take
412 * current state:
413 */
414 if (!plane_state)
415 plane_state = plane->state;
416
417 dlist_count += vc4_plane_dlist_size(plane_state); 554 dlist_count += vc4_plane_dlist_size(plane_state);
418 }
419 555
420 dlist_count++; /* Account for SCALER_CTL0_END. */ 556 dlist_count++; /* Account for SCALER_CTL0_END. */
421 557
@@ -526,6 +662,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
526 irqreturn_t ret = IRQ_NONE; 662 irqreturn_t ret = IRQ_NONE;
527 663
528 if (stat & PV_INT_VFP_START) { 664 if (stat & PV_INT_VFP_START) {
665 vc4_crtc->t_vblank = ktime_get();
529 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); 666 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
530 drm_crtc_handle_vblank(&vc4_crtc->base); 667 drm_crtc_handle_vblank(&vc4_crtc->base);
531 vc4_crtc_handle_page_flip(vc4_crtc); 668 vc4_crtc_handle_page_flip(vc4_crtc);
@@ -730,6 +867,22 @@ static void vc4_set_crtc_possible_masks(struct drm_device *drm,
730 } 867 }
731} 868}
732 869
870static void
871vc4_crtc_get_cob_allocation(struct vc4_crtc *vc4_crtc)
872{
873 struct drm_device *drm = vc4_crtc->base.dev;
874 struct vc4_dev *vc4 = to_vc4_dev(drm);
875 u32 dispbase = HVS_READ(SCALER_DISPBASEX(vc4_crtc->channel));
876 /* Top/base are supposed to be 4-pixel aligned, but the
877 * Raspberry Pi firmware fills the low bits (which are
878 * presumably ignored).
879 */
880 u32 top = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_TOP) & ~3;
881 u32 base = VC4_GET_FIELD(dispbase, SCALER_DISPBASEX_BASE) & ~3;
882
883 vc4_crtc->cob_size = top - base + 4;
884}
885
733static int vc4_crtc_bind(struct device *dev, struct device *master, void *data) 886static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
734{ 887{
735 struct platform_device *pdev = to_platform_device(dev); 888 struct platform_device *pdev = to_platform_device(dev);
@@ -806,6 +959,8 @@ static int vc4_crtc_bind(struct device *dev, struct device *master, void *data)
806 crtc->cursor = cursor_plane; 959 crtc->cursor = cursor_plane;
807 } 960 }
808 961
962 vc4_crtc_get_cob_allocation(vc4_crtc);
963
809 CRTC_WRITE(PV_INTEN, 0); 964 CRTC_WRITE(PV_INTEN, 0);
810 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START); 965 CRTC_WRITE(PV_INTSTAT, PV_INT_VFP_START);
811 ret = devm_request_irq(dev, platform_get_irq(pdev, 0), 966 ret = devm_request_irq(dev, platform_get_irq(pdev, 0),
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index 9817dbfa4ac3..275fedbdbd9e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -208,14 +208,6 @@ static int vc4_dpi_connector_get_modes(struct drm_connector *connector)
208 return 0; 208 return 0;
209} 209}
210 210
211static struct drm_encoder *
212vc4_dpi_connector_best_encoder(struct drm_connector *connector)
213{
214 struct vc4_dpi_connector *dpi_connector =
215 to_vc4_dpi_connector(connector);
216 return dpi_connector->encoder;
217}
218
219static const struct drm_connector_funcs vc4_dpi_connector_funcs = { 211static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
220 .dpms = drm_atomic_helper_connector_dpms, 212 .dpms = drm_atomic_helper_connector_dpms,
221 .detect = vc4_dpi_connector_detect, 213 .detect = vc4_dpi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_dpi_connector_funcs = {
228 220
229static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = { 221static const struct drm_connector_helper_funcs vc4_dpi_connector_helper_funcs = {
230 .get_modes = vc4_dpi_connector_get_modes, 222 .get_modes = vc4_dpi_connector_get_modes,
231 .best_encoder = vc4_dpi_connector_best_encoder,
232}; 223};
233 224
234static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev, 225static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
@@ -236,14 +227,12 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
236{ 227{
237 struct drm_connector *connector = NULL; 228 struct drm_connector *connector = NULL;
238 struct vc4_dpi_connector *dpi_connector; 229 struct vc4_dpi_connector *dpi_connector;
239 int ret = 0;
240 230
241 dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector), 231 dpi_connector = devm_kzalloc(dev->dev, sizeof(*dpi_connector),
242 GFP_KERNEL); 232 GFP_KERNEL);
243 if (!dpi_connector) { 233 if (!dpi_connector)
244 ret = -ENOMEM; 234 return ERR_PTR(-ENOMEM);
245 goto fail; 235
246 }
247 connector = &dpi_connector->base; 236 connector = &dpi_connector->base;
248 237
249 dpi_connector->encoder = dpi->encoder; 238 dpi_connector->encoder = dpi->encoder;
@@ -260,12 +249,6 @@ static struct drm_connector *vc4_dpi_connector_init(struct drm_device *dev,
260 drm_mode_connector_attach_encoder(connector, dpi->encoder); 249 drm_mode_connector_attach_encoder(connector, dpi->encoder);
261 250
262 return connector; 251 return connector;
263
264 fail:
265 if (connector)
266 vc4_dpi_connector_destroy(connector);
267
268 return ERR_PTR(ret);
269} 252}
270 253
271static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = { 254static const struct drm_encoder_funcs vc4_dpi_encoder_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 250ed7e3754c..8b42d31a7f0e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/of_platform.h> 15#include <linux/of_platform.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/pm_runtime.h>
17#include "drm_fb_cma_helper.h" 18#include "drm_fb_cma_helper.h"
18 19
19#include "uapi/drm/vc4_drm.h" 20#include "uapi/drm/vc4_drm.h"
@@ -43,12 +44,54 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
43 return map; 44 return map;
44} 45}
45 46
47static int vc4_get_param_ioctl(struct drm_device *dev, void *data,
48 struct drm_file *file_priv)
49{
50 struct vc4_dev *vc4 = to_vc4_dev(dev);
51 struct drm_vc4_get_param *args = data;
52 int ret;
53
54 if (args->pad != 0)
55 return -EINVAL;
56
57 switch (args->param) {
58 case DRM_VC4_PARAM_V3D_IDENT0:
59 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
60 if (ret)
61 return ret;
62 args->value = V3D_READ(V3D_IDENT0);
63 pm_runtime_put(&vc4->v3d->pdev->dev);
64 break;
65 case DRM_VC4_PARAM_V3D_IDENT1:
66 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
67 if (ret)
68 return ret;
69 args->value = V3D_READ(V3D_IDENT1);
70 pm_runtime_put(&vc4->v3d->pdev->dev);
71 break;
72 case DRM_VC4_PARAM_V3D_IDENT2:
73 ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
74 if (ret)
75 return ret;
76 args->value = V3D_READ(V3D_IDENT2);
77 pm_runtime_put(&vc4->v3d->pdev->dev);
78 break;
79 case DRM_VC4_PARAM_SUPPORTS_BRANCHES:
80 args->value = true;
81 break;
82 default:
83 DRM_DEBUG("Unknown parameter %d\n", args->param);
84 return -EINVAL;
85 }
86
87 return 0;
88}
89
46static void vc4_lastclose(struct drm_device *dev) 90static void vc4_lastclose(struct drm_device *dev)
47{ 91{
48 struct vc4_dev *vc4 = to_vc4_dev(dev); 92 struct vc4_dev *vc4 = to_vc4_dev(dev);
49 93
50 if (vc4->fbdev) 94 drm_fbdev_cma_restore_mode(vc4->fbdev);
51 drm_fbdev_cma_restore_mode(vc4->fbdev);
52} 95}
53 96
54static const struct file_operations vc4_drm_fops = { 97static const struct file_operations vc4_drm_fops = {
@@ -74,6 +117,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = {
74 DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), 117 DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW),
75 DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, 118 DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl,
76 DRM_ROOT_ONLY), 119 DRM_ROOT_ONLY),
120 DRM_IOCTL_DEF_DRV(VC4_GET_PARAM, vc4_get_param_ioctl, DRM_RENDER_ALLOW),
77}; 121};
78 122
79static struct drm_driver vc4_drm_driver = { 123static struct drm_driver vc4_drm_driver = {
@@ -92,6 +136,8 @@ static struct drm_driver vc4_drm_driver = {
92 .enable_vblank = vc4_enable_vblank, 136 .enable_vblank = vc4_enable_vblank,
93 .disable_vblank = vc4_disable_vblank, 137 .disable_vblank = vc4_disable_vblank,
94 .get_vblank_counter = drm_vblank_no_hw_counter, 138 .get_vblank_counter = drm_vblank_no_hw_counter,
139 .get_scanout_position = vc4_crtc_get_scanoutpos,
140 .get_vblank_timestamp = vc4_crtc_get_vblank_timestamp,
95 141
96#if defined(CONFIG_DEBUG_FS) 142#if defined(CONFIG_DEBUG_FS)
97 .debugfs_init = vc4_debugfs_init, 143 .debugfs_init = vc4_debugfs_init,
@@ -99,7 +145,7 @@ static struct drm_driver vc4_drm_driver = {
99#endif 145#endif
100 146
101 .gem_create_object = vc4_create_object, 147 .gem_create_object = vc4_create_object,
102 .gem_free_object = vc4_free_object, 148 .gem_free_object_unlocked = vc4_free_object,
103 .gem_vm_ops = &drm_gem_cma_vm_ops, 149 .gem_vm_ops = &drm_gem_cma_vm_ops,
104 150
105 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 151 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
@@ -176,7 +222,6 @@ static int vc4_drm_bind(struct device *dev)
176{ 222{
177 struct platform_device *pdev = to_platform_device(dev); 223 struct platform_device *pdev = to_platform_device(dev);
178 struct drm_device *drm; 224 struct drm_device *drm;
179 struct drm_connector *connector;
180 struct vc4_dev *vc4; 225 struct vc4_dev *vc4;
181 int ret = 0; 226 int ret = 0;
182 227
@@ -196,8 +241,6 @@ static int vc4_drm_bind(struct device *dev)
196 vc4_bo_cache_init(drm); 241 vc4_bo_cache_init(drm);
197 242
198 drm_mode_config_init(drm); 243 drm_mode_config_init(drm);
199 if (ret)
200 goto unref;
201 244
202 vc4_gem_init(drm); 245 vc4_gem_init(drm);
203 246
@@ -211,27 +254,14 @@ static int vc4_drm_bind(struct device *dev)
211 if (ret < 0) 254 if (ret < 0)
212 goto unbind_all; 255 goto unbind_all;
213 256
214 /* Connector registration has to occur after DRM device
215 * registration, because it creates sysfs entries based on the
216 * DRM device.
217 */
218 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
219 ret = drm_connector_register(connector);
220 if (ret)
221 goto unregister;
222 }
223
224 vc4_kms_load(drm); 257 vc4_kms_load(drm);
225 258
226 return 0; 259 return 0;
227 260
228unregister:
229 drm_dev_unregister(drm);
230unbind_all: 261unbind_all:
231 component_unbind_all(dev, drm); 262 component_unbind_all(dev, drm);
232gem_destroy: 263gem_destroy:
233 vc4_gem_destroy(drm); 264 vc4_gem_destroy(drm);
234unref:
235 drm_dev_unref(drm); 265 drm_dev_unref(drm);
236 vc4_bo_cache_destroy(drm); 266 vc4_bo_cache_destroy(drm);
237 return ret; 267 return ret;
@@ -259,8 +289,8 @@ static const struct component_master_ops vc4_drm_ops = {
259static struct platform_driver *const component_drivers[] = { 289static struct platform_driver *const component_drivers[] = {
260 &vc4_hdmi_driver, 290 &vc4_hdmi_driver,
261 &vc4_dpi_driver, 291 &vc4_dpi_driver,
262 &vc4_crtc_driver,
263 &vc4_hvs_driver, 292 &vc4_hvs_driver,
293 &vc4_crtc_driver,
264 &vc4_v3d_driver, 294 &vc4_v3d_driver,
265}; 295};
266 296
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 37cac59401d7..489e3de0c050 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -355,6 +355,9 @@ struct vc4_validated_shader_info {
355 uint32_t uniforms_src_size; 355 uint32_t uniforms_src_size;
356 uint32_t num_texture_samples; 356 uint32_t num_texture_samples;
357 struct vc4_texture_sample_info *texture_samples; 357 struct vc4_texture_sample_info *texture_samples;
358
359 uint32_t num_uniform_addr_offsets;
360 uint32_t *uniform_addr_offsets;
358}; 361};
359 362
360/** 363/**
@@ -415,6 +418,13 @@ extern struct platform_driver vc4_crtc_driver;
415int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id); 418int vc4_enable_vblank(struct drm_device *dev, unsigned int crtc_id);
416void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id); 419void vc4_disable_vblank(struct drm_device *dev, unsigned int crtc_id);
417int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg); 420int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
421int vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
422 unsigned int flags, int *vpos, int *hpos,
423 ktime_t *stime, ktime_t *etime,
424 const struct drm_display_mode *mode);
425int vc4_crtc_get_vblank_timestamp(struct drm_device *dev, unsigned int crtc_id,
426 int *max_error, struct timeval *vblank_time,
427 unsigned flags);
418 428
419/* vc4_debugfs.c */ 429/* vc4_debugfs.c */
420int vc4_debugfs_init(struct drm_minor *minor); 430int vc4_debugfs_init(struct drm_minor *minor);
@@ -469,7 +479,7 @@ int vc4_kms_load(struct drm_device *dev);
469struct drm_plane *vc4_plane_init(struct drm_device *dev, 479struct drm_plane *vc4_plane_init(struct drm_device *dev,
470 enum drm_plane_type type); 480 enum drm_plane_type type);
471u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist); 481u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
472u32 vc4_plane_dlist_size(struct drm_plane_state *state); 482u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
473void vc4_plane_async_set_fb(struct drm_plane *plane, 483void vc4_plane_async_set_fb(struct drm_plane *plane,
474 struct drm_framebuffer *fb); 484 struct drm_framebuffer *fb);
475 485
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 46899d6de675..6155e8aca1c6 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -53,10 +53,8 @@ vc4_free_hang_state(struct drm_device *dev, struct vc4_hang_state *state)
53{ 53{
54 unsigned int i; 54 unsigned int i;
55 55
56 mutex_lock(&dev->struct_mutex);
57 for (i = 0; i < state->user_state.bo_count; i++) 56 for (i = 0; i < state->user_state.bo_count; i++)
58 drm_gem_object_unreference(state->bo[i]); 57 drm_gem_object_unreference_unlocked(state->bo[i]);
59 mutex_unlock(&dev->struct_mutex);
60 58
61 kfree(state); 59 kfree(state);
62} 60}
@@ -687,11 +685,9 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
687 struct vc4_dev *vc4 = to_vc4_dev(dev); 685 struct vc4_dev *vc4 = to_vc4_dev(dev);
688 unsigned i; 686 unsigned i;
689 687
690 /* Need the struct lock for drm_gem_object_unreference(). */
691 mutex_lock(&dev->struct_mutex);
692 if (exec->bo) { 688 if (exec->bo) {
693 for (i = 0; i < exec->bo_count; i++) 689 for (i = 0; i < exec->bo_count; i++)
694 drm_gem_object_unreference(&exec->bo[i]->base); 690 drm_gem_object_unreference_unlocked(&exec->bo[i]->base);
695 kfree(exec->bo); 691 kfree(exec->bo);
696 } 692 }
697 693
@@ -699,9 +695,8 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
699 struct vc4_bo *bo = list_first_entry(&exec->unref_list, 695 struct vc4_bo *bo = list_first_entry(&exec->unref_list,
700 struct vc4_bo, unref_head); 696 struct vc4_bo, unref_head);
701 list_del(&bo->unref_head); 697 list_del(&bo->unref_head);
702 drm_gem_object_unreference(&bo->base.base); 698 drm_gem_object_unreference_unlocked(&bo->base.base);
703 } 699 }
704 mutex_unlock(&dev->struct_mutex);
705 700
706 mutex_lock(&vc4->power_lock); 701 mutex_lock(&vc4->power_lock);
707 if (--vc4->power_refcount == 0) 702 if (--vc4->power_refcount == 0)
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index fd2644d231ff..4452f3631cac 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -208,14 +208,6 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
208 return ret; 208 return ret;
209} 209}
210 210
211static struct drm_encoder *
212vc4_hdmi_connector_best_encoder(struct drm_connector *connector)
213{
214 struct vc4_hdmi_connector *hdmi_connector =
215 to_vc4_hdmi_connector(connector);
216 return hdmi_connector->encoder;
217}
218
219static const struct drm_connector_funcs vc4_hdmi_connector_funcs = { 211static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
220 .dpms = drm_atomic_helper_connector_dpms, 212 .dpms = drm_atomic_helper_connector_dpms,
221 .detect = vc4_hdmi_connector_detect, 213 .detect = vc4_hdmi_connector_detect,
@@ -228,7 +220,6 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
228 220
229static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = { 221static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
230 .get_modes = vc4_hdmi_connector_get_modes, 222 .get_modes = vc4_hdmi_connector_get_modes,
231 .best_encoder = vc4_hdmi_connector_best_encoder,
232}; 223};
233 224
234static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev, 225static struct drm_connector *vc4_hdmi_connector_init(struct drm_device *dev,
@@ -465,12 +456,6 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
465 if (IS_ERR(hdmi->hd_regs)) 456 if (IS_ERR(hdmi->hd_regs))
466 return PTR_ERR(hdmi->hd_regs); 457 return PTR_ERR(hdmi->hd_regs);
467 458
468 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
469 if (!ddc_node) {
470 DRM_ERROR("Failed to find ddc node in device tree\n");
471 return -ENODEV;
472 }
473
474 hdmi->pixel_clock = devm_clk_get(dev, "pixel"); 459 hdmi->pixel_clock = devm_clk_get(dev, "pixel");
475 if (IS_ERR(hdmi->pixel_clock)) { 460 if (IS_ERR(hdmi->pixel_clock)) {
476 DRM_ERROR("Failed to get pixel clock\n"); 461 DRM_ERROR("Failed to get pixel clock\n");
@@ -482,7 +467,14 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data)
482 return PTR_ERR(hdmi->hsm_clock); 467 return PTR_ERR(hdmi->hsm_clock);
483 } 468 }
484 469
470 ddc_node = of_parse_phandle(dev->of_node, "ddc", 0);
471 if (!ddc_node) {
472 DRM_ERROR("Failed to find ddc node in device tree\n");
473 return -ENODEV;
474 }
475
485 hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); 476 hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node);
477 of_node_put(ddc_node);
486 if (!hdmi->ddc) { 478 if (!hdmi->ddc) {
487 DRM_DEBUG("Failed to get ddc i2c adapter by node\n"); 479 DRM_DEBUG("Failed to get ddc i2c adapter by node\n");
488 return -EPROBE_DEFER; 480 return -EPROBE_DEFER;
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index 861a623bc185..4ac894d993cd 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -26,8 +26,7 @@ static void vc4_output_poll_changed(struct drm_device *dev)
26{ 26{
27 struct vc4_dev *vc4 = to_vc4_dev(dev); 27 struct vc4_dev *vc4 = to_vc4_dev(dev);
28 28
29 if (vc4->fbdev) 29 drm_fbdev_cma_hotplug_event(vc4->fbdev);
30 drm_fbdev_cma_hotplug_event(vc4->fbdev);
31} 30}
32 31
33struct vc4_commit { 32struct vc4_commit {
@@ -111,6 +110,8 @@ static int vc4_atomic_commit(struct drm_device *dev,
111 int i; 110 int i;
112 uint64_t wait_seqno = 0; 111 uint64_t wait_seqno = 0;
113 struct vc4_commit *c; 112 struct vc4_commit *c;
113 struct drm_plane *plane;
114 struct drm_plane_state *new_state;
114 115
115 c = commit_init(state); 116 c = commit_init(state);
116 if (!c) 117 if (!c)
@@ -138,13 +139,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
138 return ret; 139 return ret;
139 } 140 }
140 141
141 for (i = 0; i < dev->mode_config.num_total_plane; i++) { 142 for_each_plane_in_state(state, plane, new_state, i) {
142 struct drm_plane *plane = state->planes[i];
143 struct drm_plane_state *new_state = state->plane_states[i];
144
145 if (!plane)
146 continue;
147
148 if ((plane->state->fb != new_state->fb) && new_state->fb) { 143 if ((plane->state->fb != new_state->fb) && new_state->fb) {
149 struct drm_gem_cma_object *cma_bo = 144 struct drm_gem_cma_object *cma_bo =
150 drm_fb_cma_get_gem_obj(new_state->fb, 0); 145 drm_fb_cma_get_gem_obj(new_state->fb, 0);
@@ -160,7 +155,7 @@ static int vc4_atomic_commit(struct drm_device *dev,
160 * the software side now. 155 * the software side now.
161 */ 156 */
162 157
163 drm_atomic_helper_swap_state(dev, state); 158 drm_atomic_helper_swap_state(state, true);
164 159
165 /* 160 /*
166 * Everything below can be run asynchronously without the need to grab 161 * Everything below can be run asynchronously without the need to grab
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4037b52fde31..29e4b400e25e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -94,6 +94,14 @@ static const struct hvs_format {
94 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true, 94 .pixel_order = HVS_PIXEL_ORDER_ABGR, .has_alpha = true,
95 }, 95 },
96 { 96 {
97 .drm = DRM_FORMAT_ABGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
98 .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = true,
99 },
100 {
101 .drm = DRM_FORMAT_XBGR8888, .hvs = HVS_PIXEL_FORMAT_RGBA8888,
102 .pixel_order = HVS_PIXEL_ORDER_ARGB, .has_alpha = false,
103 },
104 {
97 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565, 105 .drm = DRM_FORMAT_RGB565, .hvs = HVS_PIXEL_FORMAT_RGB565,
98 .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false, 106 .pixel_order = HVS_PIXEL_ORDER_XRGB, .has_alpha = false,
99 }, 107 },
@@ -690,9 +698,10 @@ u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist)
690 return vc4_state->dlist_count; 698 return vc4_state->dlist_count;
691} 699}
692 700
693u32 vc4_plane_dlist_size(struct drm_plane_state *state) 701u32 vc4_plane_dlist_size(const struct drm_plane_state *state)
694{ 702{
695 struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); 703 const struct vc4_plane_state *vc4_state =
704 container_of(state, typeof(*vc4_state), base);
696 705
697 return vc4_state->dlist_count; 706 return vc4_state->dlist_count;
698} 707}
diff --git a/drivers/gpu/drm/vc4/vc4_qpu_defines.h b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
index d5c2f3c85ebb..f4e795a0d3f6 100644
--- a/drivers/gpu/drm/vc4/vc4_qpu_defines.h
+++ b/drivers/gpu/drm/vc4/vc4_qpu_defines.h
@@ -70,7 +70,7 @@ enum qpu_raddr {
70 QPU_R_ELEM_QPU = 38, 70 QPU_R_ELEM_QPU = 38,
71 QPU_R_NOP, 71 QPU_R_NOP,
72 QPU_R_XY_PIXEL_COORD = 41, 72 QPU_R_XY_PIXEL_COORD = 41,
73 QPU_R_MS_REV_FLAGS = 41, 73 QPU_R_MS_REV_FLAGS = 42,
74 QPU_R_VPM = 48, 74 QPU_R_VPM = 48,
75 QPU_R_VPM_LD_BUSY, 75 QPU_R_VPM_LD_BUSY,
76 QPU_R_VPM_LD_WAIT, 76 QPU_R_VPM_LD_WAIT,
@@ -230,6 +230,15 @@ enum qpu_unpack_r4 {
230#define QPU_COND_MUL_SHIFT 46 230#define QPU_COND_MUL_SHIFT 46
231#define QPU_COND_MUL_MASK QPU_MASK(48, 46) 231#define QPU_COND_MUL_MASK QPU_MASK(48, 46)
232 232
233#define QPU_BRANCH_COND_SHIFT 52
234#define QPU_BRANCH_COND_MASK QPU_MASK(55, 52)
235
236#define QPU_BRANCH_REL ((uint64_t)1 << 51)
237#define QPU_BRANCH_REG ((uint64_t)1 << 50)
238
239#define QPU_BRANCH_RADDR_A_SHIFT 45
240#define QPU_BRANCH_RADDR_A_MASK QPU_MASK(49, 45)
241
233#define QPU_SF ((uint64_t)1 << 45) 242#define QPU_SF ((uint64_t)1 << 45)
234 243
235#define QPU_WADDR_ADD_SHIFT 38 244#define QPU_WADDR_ADD_SHIFT 38
@@ -261,4 +270,10 @@ enum qpu_unpack_r4 {
261#define QPU_OP_ADD_SHIFT 24 270#define QPU_OP_ADD_SHIFT 24
262#define QPU_OP_ADD_MASK QPU_MASK(28, 24) 271#define QPU_OP_ADD_MASK QPU_MASK(28, 24)
263 272
273#define QPU_LOAD_IMM_SHIFT 0
274#define QPU_LOAD_IMM_MASK QPU_MASK(31, 0)
275
276#define QPU_BRANCH_TARGET_SHIFT 0
277#define QPU_BRANCH_TARGET_MASK QPU_MASK(31, 0)
278
264#endif /* VC4_QPU_DEFINES_H */ 279#endif /* VC4_QPU_DEFINES_H */
diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h
index f99eece4cc97..160942a9180e 100644
--- a/drivers/gpu/drm/vc4/vc4_regs.h
+++ b/drivers/gpu/drm/vc4/vc4_regs.h
@@ -366,7 +366,6 @@
366# define SCALER_DISPBKGND_FILL BIT(24) 366# define SCALER_DISPBKGND_FILL BIT(24)
367 367
368#define SCALER_DISPSTAT0 0x00000048 368#define SCALER_DISPSTAT0 0x00000048
369#define SCALER_DISPBASE0 0x0000004c
370# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30) 369# define SCALER_DISPSTATX_MODE_MASK VC4_MASK(31, 30)
371# define SCALER_DISPSTATX_MODE_SHIFT 30 370# define SCALER_DISPSTATX_MODE_SHIFT 30
372# define SCALER_DISPSTATX_MODE_DISABLED 0 371# define SCALER_DISPSTATX_MODE_DISABLED 0
@@ -375,6 +374,24 @@
375# define SCALER_DISPSTATX_MODE_EOF 3 374# define SCALER_DISPSTATX_MODE_EOF 3
376# define SCALER_DISPSTATX_FULL BIT(29) 375# define SCALER_DISPSTATX_FULL BIT(29)
377# define SCALER_DISPSTATX_EMPTY BIT(28) 376# define SCALER_DISPSTATX_EMPTY BIT(28)
377# define SCALER_DISPSTATX_FRAME_COUNT_MASK VC4_MASK(17, 12)
378# define SCALER_DISPSTATX_FRAME_COUNT_SHIFT 12
379# define SCALER_DISPSTATX_LINE_MASK VC4_MASK(11, 0)
380# define SCALER_DISPSTATX_LINE_SHIFT 0
381
382#define SCALER_DISPBASE0 0x0000004c
383/* Last pixel in the COB (display FIFO memory) allocated to this HVS
384 * channel. Must be 4-pixel aligned (and thus 4 pixels less than the
385 * next COB base).
386 */
387# define SCALER_DISPBASEX_TOP_MASK VC4_MASK(31, 16)
388# define SCALER_DISPBASEX_TOP_SHIFT 16
389/* First pixel in the COB (display FIFO memory) allocated to this HVS
390 * channel. Must be 4-pixel aligned.
391 */
392# define SCALER_DISPBASEX_BASE_MASK VC4_MASK(15, 0)
393# define SCALER_DISPBASEX_BASE_SHIFT 0
394
378#define SCALER_DISPCTRL1 0x00000050 395#define SCALER_DISPCTRL1 0x00000050
379#define SCALER_DISPBKGND1 0x00000054 396#define SCALER_DISPBKGND1 0x00000054
380#define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \ 397#define SCALER_DISPBKGNDX(x) (SCALER_DISPBKGND0 + \
@@ -385,6 +402,9 @@
385 (x) * (SCALER_DISPSTAT1 - \ 402 (x) * (SCALER_DISPSTAT1 - \
386 SCALER_DISPSTAT0)) 403 SCALER_DISPSTAT0))
387#define SCALER_DISPBASE1 0x0000005c 404#define SCALER_DISPBASE1 0x0000005c
405#define SCALER_DISPBASEX(x) (SCALER_DISPBASE0 + \
406 (x) * (SCALER_DISPBASE1 - \
407 SCALER_DISPBASE0))
388#define SCALER_DISPCTRL2 0x00000060 408#define SCALER_DISPCTRL2 0x00000060
389#define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \ 409#define SCALER_DISPCTRLX(x) (SCALER_DISPCTRL0 + \
390 (x) * (SCALER_DISPCTRL1 - \ 410 (x) * (SCALER_DISPCTRL1 - \
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index 24c2c746e8f3..9ce1d0adf882 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -802,7 +802,7 @@ validate_gl_shader_rec(struct drm_device *dev,
802 uint32_t src_offset = *(uint32_t *)(pkt_u + o); 802 uint32_t src_offset = *(uint32_t *)(pkt_u + o);
803 uint32_t *texture_handles_u; 803 uint32_t *texture_handles_u;
804 void *uniform_data_u; 804 void *uniform_data_u;
805 uint32_t tex; 805 uint32_t tex, uni;
806 806
807 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset; 807 *(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
808 808
@@ -840,6 +840,17 @@ validate_gl_shader_rec(struct drm_device *dev,
840 } 840 }
841 } 841 }
842 842
843 /* Fill in the uniform slots that need this shader's
844 * start-of-uniforms address (used for resetting the uniform
845 * stream in the presence of control flow).
846 */
847 for (uni = 0;
848 uni < validated_shader->num_uniform_addr_offsets;
849 uni++) {
850 uint32_t o = validated_shader->uniform_addr_offsets[uni];
851 ((uint32_t *)exec->uniforms_v)[o] = exec->uniforms_p;
852 }
853
843 *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p; 854 *(uint32_t *)(pkt_v + o + 4) = exec->uniforms_p;
844 855
845 exec->uniforms_u += validated_shader->uniforms_src_size; 856 exec->uniforms_u += validated_shader->uniforms_src_size;
diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
index f67124b4c534..46527e989ce3 100644
--- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c
+++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c
@@ -39,7 +39,17 @@
39#include "vc4_drv.h" 39#include "vc4_drv.h"
40#include "vc4_qpu_defines.h" 40#include "vc4_qpu_defines.h"
41 41
42#define LIVE_REG_COUNT (32 + 32 + 4)
43
42struct vc4_shader_validation_state { 44struct vc4_shader_validation_state {
45 /* Current IP being validated. */
46 uint32_t ip;
47
48 /* IP at the end of the BO, do not read shader[max_ip] */
49 uint32_t max_ip;
50
51 uint64_t *shader;
52
43 struct vc4_texture_sample_info tmu_setup[2]; 53 struct vc4_texture_sample_info tmu_setup[2];
44 int tmu_write_count[2]; 54 int tmu_write_count[2];
45 55
@@ -49,8 +59,30 @@ struct vc4_shader_validation_state {
49 * 59 *
50 * This is used for the validation of direct address memory reads. 60 * This is used for the validation of direct address memory reads.
51 */ 61 */
52 uint32_t live_min_clamp_offsets[32 + 32 + 4]; 62 uint32_t live_min_clamp_offsets[LIVE_REG_COUNT];
53 bool live_max_clamp_regs[32 + 32 + 4]; 63 bool live_max_clamp_regs[LIVE_REG_COUNT];
64 uint32_t live_immediates[LIVE_REG_COUNT];
65
66 /* Bitfield of which IPs are used as branch targets.
67 *
68 * Used for validation that the uniform stream is updated at the right
69 * points and clearing the texturing/clamping state.
70 */
71 unsigned long *branch_targets;
72
73 /* Set when entering a basic block, and cleared when the uniform
74 * address update is found. This is used to make sure that we don't
75 * read uniforms when the address is undefined.
76 */
77 bool needs_uniform_address_update;
78
79 /* Set when we find a backwards branch. If the branch is backwards,
80 * the taraget is probably doing an address reset to read uniforms,
81 * and so we need to be sure that a uniforms address is present in the
82 * stream, even if the shader didn't need to read uniforms in later
83 * basic blocks.
84 */
85 bool needs_uniform_address_for_loop;
54}; 86};
55 87
56static uint32_t 88static uint32_t
@@ -129,11 +161,11 @@ record_texture_sample(struct vc4_validated_shader_info *validated_shader,
129} 161}
130 162
131static bool 163static bool
132check_tmu_write(uint64_t inst, 164check_tmu_write(struct vc4_validated_shader_info *validated_shader,
133 struct vc4_validated_shader_info *validated_shader,
134 struct vc4_shader_validation_state *validation_state, 165 struct vc4_shader_validation_state *validation_state,
135 bool is_mul) 166 bool is_mul)
136{ 167{
168 uint64_t inst = validation_state->shader[validation_state->ip];
137 uint32_t waddr = (is_mul ? 169 uint32_t waddr = (is_mul ?
138 QPU_GET_FIELD(inst, QPU_WADDR_MUL) : 170 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
139 QPU_GET_FIELD(inst, QPU_WADDR_ADD)); 171 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
@@ -162,7 +194,7 @@ check_tmu_write(uint64_t inst,
162 return false; 194 return false;
163 } 195 }
164 196
165 /* We assert that the the clamped address is the first 197 /* We assert that the clamped address is the first
166 * argument, and the UBO base address is the second argument. 198 * argument, and the UBO base address is the second argument.
167 * This is arbitrary, but simpler than supporting flipping the 199 * This is arbitrary, but simpler than supporting flipping the
168 * two either way. 200 * two either way.
@@ -212,8 +244,14 @@ check_tmu_write(uint64_t inst,
212 /* Since direct uses a RADDR uniform reference, it will get counted in 244 /* Since direct uses a RADDR uniform reference, it will get counted in
213 * check_instruction_reads() 245 * check_instruction_reads()
214 */ 246 */
215 if (!is_direct) 247 if (!is_direct) {
248 if (validation_state->needs_uniform_address_update) {
249 DRM_ERROR("Texturing with undefined uniform address\n");
250 return false;
251 }
252
216 validated_shader->uniforms_size += 4; 253 validated_shader->uniforms_size += 4;
254 }
217 255
218 if (submit) { 256 if (submit) {
219 if (!record_texture_sample(validated_shader, 257 if (!record_texture_sample(validated_shader,
@@ -227,23 +265,138 @@ check_tmu_write(uint64_t inst,
227 return true; 265 return true;
228} 266}
229 267
268static bool require_uniform_address_uniform(struct vc4_validated_shader_info *validated_shader)
269{
270 uint32_t o = validated_shader->num_uniform_addr_offsets;
271 uint32_t num_uniforms = validated_shader->uniforms_size / 4;
272
273 validated_shader->uniform_addr_offsets =
274 krealloc(validated_shader->uniform_addr_offsets,
275 (o + 1) *
276 sizeof(*validated_shader->uniform_addr_offsets),
277 GFP_KERNEL);
278 if (!validated_shader->uniform_addr_offsets)
279 return false;
280
281 validated_shader->uniform_addr_offsets[o] = num_uniforms;
282 validated_shader->num_uniform_addr_offsets++;
283
284 return true;
285}
286
230static bool 287static bool
231check_reg_write(uint64_t inst, 288validate_uniform_address_write(struct vc4_validated_shader_info *validated_shader,
232 struct vc4_validated_shader_info *validated_shader, 289 struct vc4_shader_validation_state *validation_state,
290 bool is_mul)
291{
292 uint64_t inst = validation_state->shader[validation_state->ip];
293 u32 add_b = QPU_GET_FIELD(inst, QPU_ADD_B);
294 u32 raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
295 u32 raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
296 u32 add_lri = raddr_add_a_to_live_reg_index(inst);
297 /* We want our reset to be pointing at whatever uniform follows the
298 * uniforms base address.
299 */
300 u32 expected_offset = validated_shader->uniforms_size + 4;
301
302 /* We only support absolute uniform address changes, and we
303 * require that they be in the current basic block before any
304 * of its uniform reads.
305 *
306 * One could potentially emit more efficient QPU code, by
307 * noticing that (say) an if statement does uniform control
308 * flow for all threads and that the if reads the same number
309 * of uniforms on each side. However, this scheme is easy to
310 * validate so it's all we allow for now.
311 */
312
313 if (QPU_GET_FIELD(inst, QPU_SIG) != QPU_SIG_NONE) {
314 DRM_ERROR("uniforms address change must be "
315 "normal math\n");
316 return false;
317 }
318
319 if (is_mul || QPU_GET_FIELD(inst, QPU_OP_ADD) != QPU_A_ADD) {
320 DRM_ERROR("Uniform address reset must be an ADD.\n");
321 return false;
322 }
323
324 if (QPU_GET_FIELD(inst, QPU_COND_ADD) != QPU_COND_ALWAYS) {
325 DRM_ERROR("Uniform address reset must be unconditional.\n");
326 return false;
327 }
328
329 if (QPU_GET_FIELD(inst, QPU_PACK) != QPU_PACK_A_NOP &&
330 !(inst & QPU_PM)) {
331 DRM_ERROR("No packing allowed on uniforms reset\n");
332 return false;
333 }
334
335 if (add_lri == -1) {
336 DRM_ERROR("First argument of uniform address write must be "
337 "an immediate value.\n");
338 return false;
339 }
340
341 if (validation_state->live_immediates[add_lri] != expected_offset) {
342 DRM_ERROR("Resetting uniforms with offset %db instead of %db\n",
343 validation_state->live_immediates[add_lri],
344 expected_offset);
345 return false;
346 }
347
348 if (!(add_b == QPU_MUX_A && raddr_a == QPU_R_UNIF) &&
349 !(add_b == QPU_MUX_B && raddr_b == QPU_R_UNIF)) {
350 DRM_ERROR("Second argument of uniform address write must be "
351 "a uniform.\n");
352 return false;
353 }
354
355 validation_state->needs_uniform_address_update = false;
356 validation_state->needs_uniform_address_for_loop = false;
357 return require_uniform_address_uniform(validated_shader);
358}
359
360static bool
361check_reg_write(struct vc4_validated_shader_info *validated_shader,
233 struct vc4_shader_validation_state *validation_state, 362 struct vc4_shader_validation_state *validation_state,
234 bool is_mul) 363 bool is_mul)
235{ 364{
365 uint64_t inst = validation_state->shader[validation_state->ip];
236 uint32_t waddr = (is_mul ? 366 uint32_t waddr = (is_mul ?
237 QPU_GET_FIELD(inst, QPU_WADDR_MUL) : 367 QPU_GET_FIELD(inst, QPU_WADDR_MUL) :
238 QPU_GET_FIELD(inst, QPU_WADDR_ADD)); 368 QPU_GET_FIELD(inst, QPU_WADDR_ADD));
369 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
370 bool ws = inst & QPU_WS;
371 bool is_b = is_mul ^ ws;
372 u32 lri = waddr_to_live_reg_index(waddr, is_b);
373
374 if (lri != -1) {
375 uint32_t cond_add = QPU_GET_FIELD(inst, QPU_COND_ADD);
376 uint32_t cond_mul = QPU_GET_FIELD(inst, QPU_COND_MUL);
377
378 if (sig == QPU_SIG_LOAD_IMM &&
379 QPU_GET_FIELD(inst, QPU_PACK) == QPU_PACK_A_NOP &&
380 ((is_mul && cond_mul == QPU_COND_ALWAYS) ||
381 (!is_mul && cond_add == QPU_COND_ALWAYS))) {
382 validation_state->live_immediates[lri] =
383 QPU_GET_FIELD(inst, QPU_LOAD_IMM);
384 } else {
385 validation_state->live_immediates[lri] = ~0;
386 }
387 }
239 388
240 switch (waddr) { 389 switch (waddr) {
241 case QPU_W_UNIFORMS_ADDRESS: 390 case QPU_W_UNIFORMS_ADDRESS:
242 /* XXX: We'll probably need to support this for reladdr, but 391 if (is_b) {
243 * it's definitely a security-related one. 392 DRM_ERROR("relative uniforms address change "
244 */ 393 "unsupported\n");
245 DRM_ERROR("uniforms address load unsupported\n"); 394 return false;
246 return false; 395 }
396
397 return validate_uniform_address_write(validated_shader,
398 validation_state,
399 is_mul);
247 400
248 case QPU_W_TLB_COLOR_MS: 401 case QPU_W_TLB_COLOR_MS:
249 case QPU_W_TLB_COLOR_ALL: 402 case QPU_W_TLB_COLOR_ALL:
@@ -261,7 +414,7 @@ check_reg_write(uint64_t inst,
261 case QPU_W_TMU1_T: 414 case QPU_W_TMU1_T:
262 case QPU_W_TMU1_R: 415 case QPU_W_TMU1_R:
263 case QPU_W_TMU1_B: 416 case QPU_W_TMU1_B:
264 return check_tmu_write(inst, validated_shader, validation_state, 417 return check_tmu_write(validated_shader, validation_state,
265 is_mul); 418 is_mul);
266 419
267 case QPU_W_HOST_INT: 420 case QPU_W_HOST_INT:
@@ -294,10 +447,10 @@ check_reg_write(uint64_t inst,
294} 447}
295 448
296static void 449static void
297track_live_clamps(uint64_t inst, 450track_live_clamps(struct vc4_validated_shader_info *validated_shader,
298 struct vc4_validated_shader_info *validated_shader,
299 struct vc4_shader_validation_state *validation_state) 451 struct vc4_shader_validation_state *validation_state)
300{ 452{
453 uint64_t inst = validation_state->shader[validation_state->ip];
301 uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD); 454 uint32_t op_add = QPU_GET_FIELD(inst, QPU_OP_ADD);
302 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); 455 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
303 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); 456 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
@@ -369,10 +522,10 @@ track_live_clamps(uint64_t inst,
369} 522}
370 523
371static bool 524static bool
372check_instruction_writes(uint64_t inst, 525check_instruction_writes(struct vc4_validated_shader_info *validated_shader,
373 struct vc4_validated_shader_info *validated_shader,
374 struct vc4_shader_validation_state *validation_state) 526 struct vc4_shader_validation_state *validation_state)
375{ 527{
528 uint64_t inst = validation_state->shader[validation_state->ip];
376 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD); 529 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
377 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL); 530 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
378 bool ok; 531 bool ok;
@@ -382,20 +535,44 @@ check_instruction_writes(uint64_t inst,
382 return false; 535 return false;
383 } 536 }
384 537
385 ok = (check_reg_write(inst, validated_shader, validation_state, 538 ok = (check_reg_write(validated_shader, validation_state, false) &&
386 false) && 539 check_reg_write(validated_shader, validation_state, true));
387 check_reg_write(inst, validated_shader, validation_state,
388 true));
389 540
390 track_live_clamps(inst, validated_shader, validation_state); 541 track_live_clamps(validated_shader, validation_state);
391 542
392 return ok; 543 return ok;
393} 544}
394 545
395static bool 546static bool
396check_instruction_reads(uint64_t inst, 547check_branch(uint64_t inst,
397 struct vc4_validated_shader_info *validated_shader) 548 struct vc4_validated_shader_info *validated_shader,
549 struct vc4_shader_validation_state *validation_state,
550 int ip)
551{
552 int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
553 uint32_t waddr_add = QPU_GET_FIELD(inst, QPU_WADDR_ADD);
554 uint32_t waddr_mul = QPU_GET_FIELD(inst, QPU_WADDR_MUL);
555
556 if ((int)branch_imm < 0)
557 validation_state->needs_uniform_address_for_loop = true;
558
559 /* We don't want to have to worry about validation of this, and
560 * there's no need for it.
561 */
562 if (waddr_add != QPU_W_NOP || waddr_mul != QPU_W_NOP) {
563 DRM_ERROR("branch instruction at %d wrote a register.\n",
564 validation_state->ip);
565 return false;
566 }
567
568 return true;
569}
570
571static bool
572check_instruction_reads(struct vc4_validated_shader_info *validated_shader,
573 struct vc4_shader_validation_state *validation_state)
398{ 574{
575 uint64_t inst = validation_state->shader[validation_state->ip];
399 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A); 576 uint32_t raddr_a = QPU_GET_FIELD(inst, QPU_RADDR_A);
400 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B); 577 uint32_t raddr_b = QPU_GET_FIELD(inst, QPU_RADDR_B);
401 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); 578 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
@@ -407,40 +584,204 @@ check_instruction_reads(uint64_t inst,
407 * already be OOM. 584 * already be OOM.
408 */ 585 */
409 validated_shader->uniforms_size += 4; 586 validated_shader->uniforms_size += 4;
587
588 if (validation_state->needs_uniform_address_update) {
589 DRM_ERROR("Uniform read with undefined uniform "
590 "address\n");
591 return false;
592 }
593 }
594
595 return true;
596}
597
598/* Make sure that all branches are absolute and point within the shader, and
599 * note their targets for later.
600 */
601static bool
602vc4_validate_branches(struct vc4_shader_validation_state *validation_state)
603{
604 uint32_t max_branch_target = 0;
605 bool found_shader_end = false;
606 int ip;
607 int shader_end_ip = 0;
608 int last_branch = -2;
609
610 for (ip = 0; ip < validation_state->max_ip; ip++) {
611 uint64_t inst = validation_state->shader[ip];
612 int32_t branch_imm = QPU_GET_FIELD(inst, QPU_BRANCH_TARGET);
613 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
614 uint32_t after_delay_ip = ip + 4;
615 uint32_t branch_target_ip;
616
617 if (sig == QPU_SIG_PROG_END) {
618 shader_end_ip = ip;
619 found_shader_end = true;
620 continue;
621 }
622
623 if (sig != QPU_SIG_BRANCH)
624 continue;
625
626 if (ip - last_branch < 4) {
627 DRM_ERROR("Branch at %d during delay slots\n", ip);
628 return false;
629 }
630 last_branch = ip;
631
632 if (inst & QPU_BRANCH_REG) {
633 DRM_ERROR("branching from register relative "
634 "not supported\n");
635 return false;
636 }
637
638 if (!(inst & QPU_BRANCH_REL)) {
639 DRM_ERROR("relative branching required\n");
640 return false;
641 }
642
643 /* The actual branch target is the instruction after the delay
644 * slots, plus whatever byte offset is in the low 32 bits of
645 * the instruction. Make sure we're not branching beyond the
646 * end of the shader object.
647 */
648 if (branch_imm % sizeof(inst) != 0) {
649 DRM_ERROR("branch target not aligned\n");
650 return false;
651 }
652
653 branch_target_ip = after_delay_ip + (branch_imm >> 3);
654 if (branch_target_ip >= validation_state->max_ip) {
655 DRM_ERROR("Branch at %d outside of shader (ip %d/%d)\n",
656 ip, branch_target_ip,
657 validation_state->max_ip);
658 return false;
659 }
660 set_bit(branch_target_ip, validation_state->branch_targets);
661
662 /* Make sure that the non-branching path is also not outside
663 * the shader.
664 */
665 if (after_delay_ip >= validation_state->max_ip) {
666 DRM_ERROR("Branch at %d continues past shader end "
667 "(%d/%d)\n",
668 ip, after_delay_ip, validation_state->max_ip);
669 return false;
670 }
671 set_bit(after_delay_ip, validation_state->branch_targets);
672 max_branch_target = max(max_branch_target, after_delay_ip);
673
674 /* There are two delay slots after program end is signaled
675 * that are still executed, then we're finished.
676 */
677 if (found_shader_end && ip == shader_end_ip + 2)
678 break;
679 }
680
681 if (max_branch_target > shader_end_ip) {
682 DRM_ERROR("Branch landed after QPU_SIG_PROG_END");
683 return false;
410 } 684 }
411 685
412 return true; 686 return true;
413} 687}
414 688
689/* Resets any known state for the shader, used when we may be branched to from
690 * multiple locations in the program (or at shader start).
691 */
692static void
693reset_validation_state(struct vc4_shader_validation_state *validation_state)
694{
695 int i;
696
697 for (i = 0; i < 8; i++)
698 validation_state->tmu_setup[i / 4].p_offset[i % 4] = ~0;
699
700 for (i = 0; i < LIVE_REG_COUNT; i++) {
701 validation_state->live_min_clamp_offsets[i] = ~0;
702 validation_state->live_max_clamp_regs[i] = false;
703 validation_state->live_immediates[i] = ~0;
704 }
705}
706
707static bool
708texturing_in_progress(struct vc4_shader_validation_state *validation_state)
709{
710 return (validation_state->tmu_write_count[0] != 0 ||
711 validation_state->tmu_write_count[1] != 0);
712}
713
714static bool
715vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state)
716{
717 uint32_t ip = validation_state->ip;
718
719 if (!test_bit(ip, validation_state->branch_targets))
720 return true;
721
722 if (texturing_in_progress(validation_state)) {
723 DRM_ERROR("Branch target landed during TMU setup\n");
724 return false;
725 }
726
727 /* Reset our live values tracking, since this instruction may have
728 * multiple predecessors.
729 *
730 * One could potentially do analysis to determine that, for
731 * example, all predecessors have a live max clamp in the same
732 * register, but we don't bother with that.
733 */
734 reset_validation_state(validation_state);
735
736 /* Since we've entered a basic block from potentially multiple
737 * predecessors, we need the uniforms address to be updated before any
738 * unforms are read. We require that after any branch point, the next
739 * uniform to be loaded is a uniform address offset. That uniform's
740 * offset will be marked by the uniform address register write
741 * validation, or a one-off the end-of-program check.
742 */
743 validation_state->needs_uniform_address_update = true;
744
745 return true;
746}
747
415struct vc4_validated_shader_info * 748struct vc4_validated_shader_info *
416vc4_validate_shader(struct drm_gem_cma_object *shader_obj) 749vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
417{ 750{
418 bool found_shader_end = false; 751 bool found_shader_end = false;
419 int shader_end_ip = 0; 752 int shader_end_ip = 0;
420 uint32_t ip, max_ip; 753 uint32_t ip;
421 uint64_t *shader; 754 struct vc4_validated_shader_info *validated_shader = NULL;
422 struct vc4_validated_shader_info *validated_shader;
423 struct vc4_shader_validation_state validation_state; 755 struct vc4_shader_validation_state validation_state;
424 int i;
425 756
426 memset(&validation_state, 0, sizeof(validation_state)); 757 memset(&validation_state, 0, sizeof(validation_state));
758 validation_state.shader = shader_obj->vaddr;
759 validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t);
427 760
428 for (i = 0; i < 8; i++) 761 reset_validation_state(&validation_state);
429 validation_state.tmu_setup[i / 4].p_offset[i % 4] = ~0;
430 for (i = 0; i < ARRAY_SIZE(validation_state.live_min_clamp_offsets); i++)
431 validation_state.live_min_clamp_offsets[i] = ~0;
432 762
433 shader = shader_obj->vaddr; 763 validation_state.branch_targets =
434 max_ip = shader_obj->base.size / sizeof(uint64_t); 764 kcalloc(BITS_TO_LONGS(validation_state.max_ip),
765 sizeof(unsigned long), GFP_KERNEL);
766 if (!validation_state.branch_targets)
767 goto fail;
435 768
436 validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL); 769 validated_shader = kcalloc(1, sizeof(*validated_shader), GFP_KERNEL);
437 if (!validated_shader) 770 if (!validated_shader)
438 return NULL; 771 goto fail;
772
773 if (!vc4_validate_branches(&validation_state))
774 goto fail;
439 775
440 for (ip = 0; ip < max_ip; ip++) { 776 for (ip = 0; ip < validation_state.max_ip; ip++) {
441 uint64_t inst = shader[ip]; 777 uint64_t inst = validation_state.shader[ip];
442 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG); 778 uint32_t sig = QPU_GET_FIELD(inst, QPU_SIG);
443 779
780 validation_state.ip = ip;
781
782 if (!vc4_handle_branch_target(&validation_state))
783 goto fail;
784
444 switch (sig) { 785 switch (sig) {
445 case QPU_SIG_NONE: 786 case QPU_SIG_NONE:
446 case QPU_SIG_WAIT_FOR_SCOREBOARD: 787 case QPU_SIG_WAIT_FOR_SCOREBOARD:
@@ -450,13 +791,14 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
450 case QPU_SIG_LOAD_TMU1: 791 case QPU_SIG_LOAD_TMU1:
451 case QPU_SIG_PROG_END: 792 case QPU_SIG_PROG_END:
452 case QPU_SIG_SMALL_IMM: 793 case QPU_SIG_SMALL_IMM:
453 if (!check_instruction_writes(inst, validated_shader, 794 if (!check_instruction_writes(validated_shader,
454 &validation_state)) { 795 &validation_state)) {
455 DRM_ERROR("Bad write at ip %d\n", ip); 796 DRM_ERROR("Bad write at ip %d\n", ip);
456 goto fail; 797 goto fail;
457 } 798 }
458 799
459 if (!check_instruction_reads(inst, validated_shader)) 800 if (!check_instruction_reads(validated_shader,
801 &validation_state))
460 goto fail; 802 goto fail;
461 803
462 if (sig == QPU_SIG_PROG_END) { 804 if (sig == QPU_SIG_PROG_END) {
@@ -467,13 +809,18 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
467 break; 809 break;
468 810
469 case QPU_SIG_LOAD_IMM: 811 case QPU_SIG_LOAD_IMM:
470 if (!check_instruction_writes(inst, validated_shader, 812 if (!check_instruction_writes(validated_shader,
471 &validation_state)) { 813 &validation_state)) {
472 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip); 814 DRM_ERROR("Bad LOAD_IMM write at ip %d\n", ip);
473 goto fail; 815 goto fail;
474 } 816 }
475 break; 817 break;
476 818
819 case QPU_SIG_BRANCH:
820 if (!check_branch(inst, validated_shader,
821 &validation_state, ip))
822 goto fail;
823 break;
477 default: 824 default:
478 DRM_ERROR("Unsupported QPU signal %d at " 825 DRM_ERROR("Unsupported QPU signal %d at "
479 "instruction %d\n", sig, ip); 826 "instruction %d\n", sig, ip);
@@ -487,13 +834,28 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
487 break; 834 break;
488 } 835 }
489 836
490 if (ip == max_ip) { 837 if (ip == validation_state.max_ip) {
491 DRM_ERROR("shader failed to terminate before " 838 DRM_ERROR("shader failed to terminate before "
492 "shader BO end at %zd\n", 839 "shader BO end at %zd\n",
493 shader_obj->base.size); 840 shader_obj->base.size);
494 goto fail; 841 goto fail;
495 } 842 }
496 843
844 /* If we did a backwards branch and we haven't emitted a uniforms
845 * reset since then, we still need the uniforms stream to have the
846 * uniforms address available so that the backwards branch can do its
847 * uniforms reset.
848 *
849 * We could potentially prove that the backwards branch doesn't
850 * contain any uses of uniforms until program exit, but that doesn't
851 * seem to be worth the trouble.
852 */
853 if (validation_state.needs_uniform_address_for_loop) {
854 if (!require_uniform_address_uniform(validated_shader))
855 goto fail;
856 validated_shader->uniforms_size += 4;
857 }
858
497 /* Again, no chance of integer overflow here because the worst case 859 /* Again, no chance of integer overflow here because the worst case
498 * scenario is 8 bytes of uniforms plus handles per 8-byte 860 * scenario is 8 bytes of uniforms plus handles per 8-byte
499 * instruction. 861 * instruction.
@@ -502,9 +864,12 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj)
502 (validated_shader->uniforms_size + 864 (validated_shader->uniforms_size +
503 4 * validated_shader->num_texture_samples); 865 4 * validated_shader->num_texture_samples);
504 866
867 kfree(validation_state.branch_targets);
868
505 return validated_shader; 869 return validated_shader;
506 870
507fail: 871fail:
872 kfree(validation_state.branch_targets);
508 if (validated_shader) { 873 if (validated_shader) {
509 kfree(validated_shader->texture_samples); 874 kfree(validated_shader->texture_samples);
510 kfree(validated_shader); 875 kfree(validated_shader);
diff --git a/drivers/gpu/drm/vgem/Makefile b/drivers/gpu/drm/vgem/Makefile
index 3f4c7b842028..bfcdea1330e6 100644
--- a/drivers/gpu/drm/vgem/Makefile
+++ b/drivers/gpu/drm/vgem/Makefile
@@ -1,4 +1,4 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2vgem-y := vgem_drv.o 2vgem-y := vgem_drv.o vgem_fence.o
3 3
4obj-$(CONFIG_DRM_VGEM) += vgem.o 4obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 341f9be3dde6..c15bafb06665 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -42,81 +42,38 @@
42#define DRIVER_MAJOR 1 42#define DRIVER_MAJOR 1
43#define DRIVER_MINOR 0 43#define DRIVER_MINOR 0
44 44
45void vgem_gem_put_pages(struct drm_vgem_gem_object *obj)
46{
47 drm_gem_put_pages(&obj->base, obj->pages, false, false);
48 obj->pages = NULL;
49}
50
51static void vgem_gem_free_object(struct drm_gem_object *obj) 45static void vgem_gem_free_object(struct drm_gem_object *obj)
52{ 46{
53 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); 47 struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
54 48
55 drm_gem_free_mmap_offset(obj);
56
57 if (vgem_obj->use_dma_buf && obj->dma_buf) {
58 dma_buf_put(obj->dma_buf);
59 obj->dma_buf = NULL;
60 }
61
62 drm_gem_object_release(obj); 49 drm_gem_object_release(obj);
63
64 if (vgem_obj->pages)
65 vgem_gem_put_pages(vgem_obj);
66
67 vgem_obj->pages = NULL;
68
69 kfree(vgem_obj); 50 kfree(vgem_obj);
70} 51}
71 52
72int vgem_gem_get_pages(struct drm_vgem_gem_object *obj)
73{
74 struct page **pages;
75
76 if (obj->pages || obj->use_dma_buf)
77 return 0;
78
79 pages = drm_gem_get_pages(&obj->base);
80 if (IS_ERR(pages)) {
81 return PTR_ERR(pages);
82 }
83
84 obj->pages = pages;
85
86 return 0;
87}
88
89static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 53static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
90{ 54{
91 struct drm_vgem_gem_object *obj = vma->vm_private_data; 55 struct drm_vgem_gem_object *obj = vma->vm_private_data;
92 loff_t num_pages;
93 pgoff_t page_offset;
94 int ret;
95
96 /* We don't use vmf->pgoff since that has the fake offset */ 56 /* We don't use vmf->pgoff since that has the fake offset */
97 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 57 unsigned long vaddr = (unsigned long)vmf->virtual_address;
98 PAGE_SHIFT; 58 struct page *page;
99 59
100 num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); 60 page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping,
101 61 (vaddr - vma->vm_start) >> PAGE_SHIFT);
102 if (page_offset > num_pages) 62 if (!IS_ERR(page)) {
103 return VM_FAULT_SIGBUS; 63 vmf->page = page;
104 64 return 0;
105 ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, 65 } else switch (PTR_ERR(page)) {
106 obj->pages[page_offset]); 66 case -ENOSPC:
107 switch (ret) { 67 case -ENOMEM:
108 case 0: 68 return VM_FAULT_OOM;
109 return VM_FAULT_NOPAGE; 69 case -EBUSY:
110 case -ENOMEM: 70 return VM_FAULT_RETRY;
111 return VM_FAULT_OOM; 71 case -EFAULT:
112 case -EBUSY: 72 case -EINVAL:
113 return VM_FAULT_RETRY; 73 return VM_FAULT_SIGBUS;
114 case -EFAULT: 74 default:
115 case -EINVAL: 75 WARN_ON_ONCE(PTR_ERR(page));
116 return VM_FAULT_SIGBUS; 76 return VM_FAULT_SIGBUS;
117 default:
118 WARN_ON(1);
119 return VM_FAULT_SIGBUS;
120 } 77 }
121} 78}
122 79
@@ -126,6 +83,34 @@ static const struct vm_operations_struct vgem_gem_vm_ops = {
126 .close = drm_gem_vm_close, 83 .close = drm_gem_vm_close,
127}; 84};
128 85
86static int vgem_open(struct drm_device *dev, struct drm_file *file)
87{
88 struct vgem_file *vfile;
89 int ret;
90
91 vfile = kzalloc(sizeof(*vfile), GFP_KERNEL);
92 if (!vfile)
93 return -ENOMEM;
94
95 file->driver_priv = vfile;
96
97 ret = vgem_fence_open(vfile);
98 if (ret) {
99 kfree(vfile);
100 return ret;
101 }
102
103 return 0;
104}
105
106static void vgem_preclose(struct drm_device *dev, struct drm_file *file)
107{
108 struct vgem_file *vfile = file->driver_priv;
109
110 vgem_fence_close(vfile);
111 kfree(vfile);
112}
113
129/* ioctls */ 114/* ioctls */
130 115
131static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, 116static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
@@ -134,57 +119,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
134 unsigned long size) 119 unsigned long size)
135{ 120{
136 struct drm_vgem_gem_object *obj; 121 struct drm_vgem_gem_object *obj;
137 struct drm_gem_object *gem_object; 122 int ret;
138 int err;
139
140 size = roundup(size, PAGE_SIZE);
141 123
142 obj = kzalloc(sizeof(*obj), GFP_KERNEL); 124 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
143 if (!obj) 125 if (!obj)
144 return ERR_PTR(-ENOMEM); 126 return ERR_PTR(-ENOMEM);
145 127
146 gem_object = &obj->base; 128 ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
147 129 if (ret)
148 err = drm_gem_object_init(dev, gem_object, size); 130 goto err_free;
149 if (err)
150 goto out;
151
152 err = vgem_gem_get_pages(obj);
153 if (err)
154 goto out;
155
156 err = drm_gem_handle_create(file, gem_object, handle);
157 if (err)
158 goto handle_out;
159 131
160 drm_gem_object_unreference_unlocked(gem_object); 132 ret = drm_gem_handle_create(file, &obj->base, handle);
133 drm_gem_object_unreference_unlocked(&obj->base);
134 if (ret)
135 goto err;
161 136
162 return gem_object; 137 return &obj->base;
163 138
164handle_out: 139err_free:
165 drm_gem_object_release(gem_object);
166out:
167 kfree(obj); 140 kfree(obj);
168 return ERR_PTR(err); 141err:
142 return ERR_PTR(ret);
169} 143}
170 144
171static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, 145static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
172 struct drm_mode_create_dumb *args) 146 struct drm_mode_create_dumb *args)
173{ 147{
174 struct drm_gem_object *gem_object; 148 struct drm_gem_object *gem_object;
175 uint64_t size; 149 u64 pitch, size;
176 uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
177 150
151 pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
178 size = args->height * pitch; 152 size = args->height * pitch;
179 if (size == 0) 153 if (size == 0)
180 return -EINVAL; 154 return -EINVAL;
181 155
182 gem_object = vgem_gem_create(dev, file, &args->handle, size); 156 gem_object = vgem_gem_create(dev, file, &args->handle, size);
183 157 if (IS_ERR(gem_object))
184 if (IS_ERR(gem_object)) {
185 DRM_DEBUG_DRIVER("object creation failed\n");
186 return PTR_ERR(gem_object); 158 return PTR_ERR(gem_object);
187 }
188 159
189 args->size = gem_object->size; 160 args->size = gem_object->size;
190 args->pitch = pitch; 161 args->pitch = pitch;
@@ -194,26 +165,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
194 return 0; 165 return 0;
195} 166}
196 167
197int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, 168static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
198 uint32_t handle, uint64_t *offset) 169 uint32_t handle, uint64_t *offset)
199{ 170{
200 int ret = 0;
201 struct drm_gem_object *obj; 171 struct drm_gem_object *obj;
172 int ret;
202 173
203 obj = drm_gem_object_lookup(file, handle); 174 obj = drm_gem_object_lookup(file, handle);
204 if (!obj) 175 if (!obj)
205 return -ENOENT; 176 return -ENOENT;
206 177
178 if (!obj->filp) {
179 ret = -EINVAL;
180 goto unref;
181 }
182
207 ret = drm_gem_create_mmap_offset(obj); 183 ret = drm_gem_create_mmap_offset(obj);
208 if (ret) 184 if (ret)
209 goto unref; 185 goto unref;
210 186
211 BUG_ON(!obj->filp);
212
213 obj->filp->private_data = obj;
214
215 *offset = drm_vma_node_offset_addr(&obj->vma_node); 187 *offset = drm_vma_node_offset_addr(&obj->vma_node);
216
217unref: 188unref:
218 drm_gem_object_unreference_unlocked(obj); 189 drm_gem_object_unreference_unlocked(obj);
219 190
@@ -221,26 +192,134 @@ unref:
221} 192}
222 193
223static struct drm_ioctl_desc vgem_ioctls[] = { 194static struct drm_ioctl_desc vgem_ioctls[] = {
195 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
196 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
224}; 197};
225 198
199static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
200{
201 unsigned long flags = vma->vm_flags;
202 int ret;
203
204 ret = drm_gem_mmap(filp, vma);
205 if (ret)
206 return ret;
207
208 /* Keep the WC mmaping set by drm_gem_mmap() but our pages
209 * are ordinary and not special.
210 */
211 vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
212 return 0;
213}
214
226static const struct file_operations vgem_driver_fops = { 215static const struct file_operations vgem_driver_fops = {
227 .owner = THIS_MODULE, 216 .owner = THIS_MODULE,
228 .open = drm_open, 217 .open = drm_open,
229 .mmap = drm_gem_mmap, 218 .mmap = vgem_mmap,
230 .poll = drm_poll, 219 .poll = drm_poll,
231 .read = drm_read, 220 .read = drm_read,
232 .unlocked_ioctl = drm_ioctl, 221 .unlocked_ioctl = drm_ioctl,
233 .release = drm_release, 222 .release = drm_release,
234}; 223};
235 224
225static int vgem_prime_pin(struct drm_gem_object *obj)
226{
227 long n_pages = obj->size >> PAGE_SHIFT;
228 struct page **pages;
229
230 /* Flush the object from the CPU cache so that importers can rely
231 * on coherent indirect access via the exported dma-address.
232 */
233 pages = drm_gem_get_pages(obj);
234 if (IS_ERR(pages))
235 return PTR_ERR(pages);
236
237 drm_clflush_pages(pages, n_pages);
238 drm_gem_put_pages(obj, pages, true, false);
239
240 return 0;
241}
242
243static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
244{
245 struct sg_table *st;
246 struct page **pages;
247
248 pages = drm_gem_get_pages(obj);
249 if (IS_ERR(pages))
250 return ERR_CAST(pages);
251
252 st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT);
253 drm_gem_put_pages(obj, pages, false, false);
254
255 return st;
256}
257
258static void *vgem_prime_vmap(struct drm_gem_object *obj)
259{
260 long n_pages = obj->size >> PAGE_SHIFT;
261 struct page **pages;
262 void *addr;
263
264 pages = drm_gem_get_pages(obj);
265 if (IS_ERR(pages))
266 return NULL;
267
268 addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
269 drm_gem_put_pages(obj, pages, false, false);
270
271 return addr;
272}
273
274static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
275{
276 vunmap(vaddr);
277}
278
279static int vgem_prime_mmap(struct drm_gem_object *obj,
280 struct vm_area_struct *vma)
281{
282 int ret;
283
284 if (obj->size < vma->vm_end - vma->vm_start)
285 return -EINVAL;
286
287 if (!obj->filp)
288 return -ENODEV;
289
290 ret = obj->filp->f_op->mmap(obj->filp, vma);
291 if (ret)
292 return ret;
293
294 fput(vma->vm_file);
295 vma->vm_file = get_file(obj->filp);
296 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
297 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
298
299 return 0;
300}
301
236static struct drm_driver vgem_driver = { 302static struct drm_driver vgem_driver = {
237 .driver_features = DRIVER_GEM, 303 .driver_features = DRIVER_GEM | DRIVER_PRIME,
238 .gem_free_object = vgem_gem_free_object, 304 .open = vgem_open,
305 .preclose = vgem_preclose,
306 .gem_free_object_unlocked = vgem_gem_free_object,
239 .gem_vm_ops = &vgem_gem_vm_ops, 307 .gem_vm_ops = &vgem_gem_vm_ops,
240 .ioctls = vgem_ioctls, 308 .ioctls = vgem_ioctls,
309 .num_ioctls = ARRAY_SIZE(vgem_ioctls),
241 .fops = &vgem_driver_fops, 310 .fops = &vgem_driver_fops,
311
242 .dumb_create = vgem_gem_dumb_create, 312 .dumb_create = vgem_gem_dumb_create,
243 .dumb_map_offset = vgem_gem_dumb_map, 313 .dumb_map_offset = vgem_gem_dumb_map,
314
315 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
316 .gem_prime_pin = vgem_prime_pin,
317 .gem_prime_export = drm_gem_prime_export,
318 .gem_prime_get_sg_table = vgem_prime_get_sg_table,
319 .gem_prime_vmap = vgem_prime_vmap,
320 .gem_prime_vunmap = vgem_prime_vunmap,
321 .gem_prime_mmap = vgem_prime_mmap,
322
244 .name = DRIVER_NAME, 323 .name = DRIVER_NAME,
245 .desc = DRIVER_DESC, 324 .desc = DRIVER_DESC,
246 .date = DRIVER_DATE, 325 .date = DRIVER_DATE,
@@ -248,7 +327,7 @@ static struct drm_driver vgem_driver = {
248 .minor = DRIVER_MINOR, 327 .minor = DRIVER_MINOR,
249}; 328};
250 329
251struct drm_device *vgem_device; 330static struct drm_device *vgem_device;
252 331
253static int __init vgem_init(void) 332static int __init vgem_init(void)
254{ 333{
@@ -260,10 +339,7 @@ static int __init vgem_init(void)
260 goto out; 339 goto out;
261 } 340 }
262 341
263 drm_dev_set_unique(vgem_device, "vgem");
264
265 ret = drm_dev_register(vgem_device, 0); 342 ret = drm_dev_register(vgem_device, 0);
266
267 if (ret) 343 if (ret)
268 goto out_unref; 344 goto out_unref;
269 345
@@ -285,5 +361,6 @@ module_init(vgem_init);
285module_exit(vgem_exit); 361module_exit(vgem_exit);
286 362
287MODULE_AUTHOR("Red Hat, Inc."); 363MODULE_AUTHOR("Red Hat, Inc.");
364MODULE_AUTHOR("Intel Corporation");
288MODULE_DESCRIPTION(DRIVER_DESC); 365MODULE_DESCRIPTION(DRIVER_DESC);
289MODULE_LICENSE("GPL and additional rights"); 366MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h
index e9f92f7ee275..1f8798ad329c 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.h
+++ b/drivers/gpu/drm/vgem/vgem_drv.h
@@ -32,15 +32,25 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/drm_gem.h> 33#include <drm/drm_gem.h>
34 34
35#include <uapi/drm/vgem_drm.h>
36
37struct vgem_file {
38 struct idr fence_idr;
39 struct mutex fence_mutex;
40};
41
35#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) 42#define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base)
36struct drm_vgem_gem_object { 43struct drm_vgem_gem_object {
37 struct drm_gem_object base; 44 struct drm_gem_object base;
38 struct page **pages;
39 bool use_dma_buf;
40}; 45};
41 46
42/* vgem_drv.c */ 47int vgem_fence_open(struct vgem_file *file);
43extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); 48int vgem_fence_attach_ioctl(struct drm_device *dev,
44extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); 49 void *data,
50 struct drm_file *file);
51int vgem_fence_signal_ioctl(struct drm_device *dev,
52 void *data,
53 struct drm_file *file);
54void vgem_fence_close(struct vgem_file *file);
45 55
46#endif 56#endif
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
new file mode 100644
index 000000000000..5c57c1ffa1f9
--- /dev/null
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -0,0 +1,283 @@
1/*
2 * Copyright 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software")
6 * to deal in the software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * them Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTIBILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER
19 * IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF OR IN
20 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/dma-buf.h>
24#include <linux/reservation.h>
25
26#include "vgem_drv.h"
27
28#define VGEM_FENCE_TIMEOUT (10*HZ)
29
30struct vgem_fence {
31 struct fence base;
32 struct spinlock lock;
33 struct timer_list timer;
34};
35
36static const char *vgem_fence_get_driver_name(struct fence *fence)
37{
38 return "vgem";
39}
40
41static const char *vgem_fence_get_timeline_name(struct fence *fence)
42{
43 return "unbound";
44}
45
46static bool vgem_fence_signaled(struct fence *fence)
47{
48 return false;
49}
50
51static bool vgem_fence_enable_signaling(struct fence *fence)
52{
53 return true;
54}
55
56static void vgem_fence_release(struct fence *base)
57{
58 struct vgem_fence *fence = container_of(base, typeof(*fence), base);
59
60 del_timer_sync(&fence->timer);
61 fence_free(&fence->base);
62}
63
64static void vgem_fence_value_str(struct fence *fence, char *str, int size)
65{
66 snprintf(str, size, "%u", fence->seqno);
67}
68
69static void vgem_fence_timeline_value_str(struct fence *fence, char *str,
70 int size)
71{
72 snprintf(str, size, "%u", fence_is_signaled(fence) ? fence->seqno : 0);
73}
74
75static const struct fence_ops vgem_fence_ops = {
76 .get_driver_name = vgem_fence_get_driver_name,
77 .get_timeline_name = vgem_fence_get_timeline_name,
78 .enable_signaling = vgem_fence_enable_signaling,
79 .signaled = vgem_fence_signaled,
80 .wait = fence_default_wait,
81 .release = vgem_fence_release,
82
83 .fence_value_str = vgem_fence_value_str,
84 .timeline_value_str = vgem_fence_timeline_value_str,
85};
86
87static void vgem_fence_timeout(unsigned long data)
88{
89 struct vgem_fence *fence = (struct vgem_fence *)data;
90
91 fence_signal(&fence->base);
92}
93
94static struct fence *vgem_fence_create(struct vgem_file *vfile,
95 unsigned int flags)
96{
97 struct vgem_fence *fence;
98
99 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
100 if (!fence)
101 return NULL;
102
103 spin_lock_init(&fence->lock);
104 fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
105 fence_context_alloc(1), 1);
106
107 setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
108
109 /* We force the fence to expire within 10s to prevent driver hangs */
110 mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
111
112 return &fence->base;
113}
114
115static int attach_dmabuf(struct drm_device *dev,
116 struct drm_gem_object *obj)
117{
118 struct dma_buf *dmabuf;
119
120 if (obj->dma_buf)
121 return 0;
122
123 dmabuf = dev->driver->gem_prime_export(dev, obj, 0);
124 if (IS_ERR(dmabuf))
125 return PTR_ERR(dmabuf);
126
127 obj->dma_buf = dmabuf;
128 drm_gem_object_reference(obj);
129 return 0;
130}
131
132/*
133 * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH):
134 *
135 * Create and attach a fence to the vGEM handle. This fence is then exposed
136 * via the dma-buf reservation object and visible to consumers of the exported
137 * dma-buf. If the flags contain VGEM_FENCE_WRITE, the fence indicates the
138 * vGEM buffer is being written to by the client and is exposed as an exclusive
139 * fence, otherwise the fence indicates the client is current reading from the
140 * buffer and all future writes should wait for the client to signal its
141 * completion. Note that if a conflicting fence is already on the dma-buf (i.e.
142 * an exclusive fence when adding a read, or any fence when adding a write),
143 * -EBUSY is reported. Serialisation between operations should be handled
144 * by waiting upon the dma-buf.
145 *
146 * This returns the handle for the new fence that must be signaled within 10
147 * seconds (or otherwise it will automatically expire). See
148 * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL).
149 *
150 * If the vGEM handle does not exist, vgem_fence_attach_ioctl returns -ENOENT.
151 */
152int vgem_fence_attach_ioctl(struct drm_device *dev,
153 void *data,
154 struct drm_file *file)
155{
156 struct drm_vgem_fence_attach *arg = data;
157 struct vgem_file *vfile = file->driver_priv;
158 struct reservation_object *resv;
159 struct drm_gem_object *obj;
160 struct fence *fence;
161 int ret;
162
163 if (arg->flags & ~VGEM_FENCE_WRITE)
164 return -EINVAL;
165
166 if (arg->pad)
167 return -EINVAL;
168
169 obj = drm_gem_object_lookup(file, arg->handle);
170 if (!obj)
171 return -ENOENT;
172
173 ret = attach_dmabuf(dev, obj);
174 if (ret)
175 goto err;
176
177 fence = vgem_fence_create(vfile, arg->flags);
178 if (!fence) {
179 ret = -ENOMEM;
180 goto err;
181 }
182
183 /* Check for a conflicting fence */
184 resv = obj->dma_buf->resv;
185 if (!reservation_object_test_signaled_rcu(resv,
186 arg->flags & VGEM_FENCE_WRITE)) {
187 ret = -EBUSY;
188 goto err_fence;
189 }
190
191 /* Expose the fence via the dma-buf */
192 ret = 0;
193 mutex_lock(&resv->lock.base);
194 if (arg->flags & VGEM_FENCE_WRITE)
195 reservation_object_add_excl_fence(resv, fence);
196 else if ((ret = reservation_object_reserve_shared(resv)) == 0)
197 reservation_object_add_shared_fence(resv, fence);
198 mutex_unlock(&resv->lock.base);
199
200 /* Record the fence in our idr for later signaling */
201 if (ret == 0) {
202 mutex_lock(&vfile->fence_mutex);
203 ret = idr_alloc(&vfile->fence_idr, fence, 1, 0, GFP_KERNEL);
204 mutex_unlock(&vfile->fence_mutex);
205 if (ret > 0) {
206 arg->out_fence = ret;
207 ret = 0;
208 }
209 }
210err_fence:
211 if (ret) {
212 fence_signal(fence);
213 fence_put(fence);
214 }
215err:
216 drm_gem_object_unreference_unlocked(obj);
217 return ret;
218}
219
220/*
221 * vgem_fence_signal_ioctl (DRM_IOCTL_VGEM_FENCE_SIGNAL):
222 *
223 * Signal and consume a fence ealier attached to a vGEM handle using
224 * vgem_fence_attach_ioctl (DRM_IOCTL_VGEM_FENCE_ATTACH).
225 *
226 * All fences must be signaled within 10s of attachment or otherwise they
227 * will automatically expire (and a vgem_fence_signal_ioctl returns -ETIMEDOUT).
228 *
229 * Signaling a fence indicates to all consumers of the dma-buf that the
230 * client has completed the operation associated with the fence, and that the
231 * buffer is then ready for consumption.
232 *
233 * If the fence does not exist (or has already been signaled by the client),
234 * vgem_fence_signal_ioctl returns -ENOENT.
235 */
236int vgem_fence_signal_ioctl(struct drm_device *dev,
237 void *data,
238 struct drm_file *file)
239{
240 struct vgem_file *vfile = file->driver_priv;
241 struct drm_vgem_fence_signal *arg = data;
242 struct fence *fence;
243 int ret = 0;
244
245 if (arg->flags)
246 return -EINVAL;
247
248 mutex_lock(&vfile->fence_mutex);
249 fence = idr_replace(&vfile->fence_idr, NULL, arg->fence);
250 mutex_unlock(&vfile->fence_mutex);
251 if (!fence)
252 return -ENOENT;
253 if (IS_ERR(fence))
254 return PTR_ERR(fence);
255
256 if (fence_is_signaled(fence))
257 ret = -ETIMEDOUT;
258
259 fence_signal(fence);
260 fence_put(fence);
261 return ret;
262}
263
264int vgem_fence_open(struct vgem_file *vfile)
265{
266 mutex_init(&vfile->fence_mutex);
267 idr_init(&vfile->fence_idr);
268
269 return 0;
270}
271
272static int __vgem_fence_idr_fini(int id, void *p, void *data)
273{
274 fence_signal(p);
275 fence_put(p);
276 return 0;
277}
278
279void vgem_fence_close(struct vgem_file *vfile)
280{
281 idr_for_each(&vfile->fence_idr, __vgem_fence_idr_fini, vfile);
282 idr_destroy(&vfile->fence_idr);
283}
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 4f20742e7788..a04ef1c992d9 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -208,7 +208,7 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
208 struct via_file_private *file_priv = file->driver_priv; 208 struct via_file_private *file_priv = file->driver_priv;
209 struct via_memblock *entry, *next; 209 struct via_memblock *entry, *next;
210 210
211 if (!(file->minor->master && file->master->lock.hw_lock)) 211 if (!(dev->master && file->master->lock.hw_lock))
212 return; 212 return;
213 213
214 drm_legacy_idlelock_take(&file->master->lock); 214 drm_legacy_idlelock_take(&file->master->lock);
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig
index 9983eadb81b6..e1afc3d3f8d9 100644
--- a/drivers/gpu/drm/virtio/Kconfig
+++ b/drivers/gpu/drm/virtio/Kconfig
@@ -1,11 +1,7 @@
1config DRM_VIRTIO_GPU 1config DRM_VIRTIO_GPU
2 tristate "Virtio GPU driver" 2 tristate "Virtio GPU driver"
3 depends on DRM && VIRTIO 3 depends on DRM && VIRTIO
4 select FB_SYS_FILLRECT
5 select FB_SYS_COPYAREA
6 select FB_SYS_IMAGEBLIT
7 select DRM_KMS_HELPER 4 select DRM_KMS_HELPER
8 select DRM_KMS_FB_HELPER
9 select DRM_TTM 5 select DRM_TTM
10 help 6 help
11 This is the virtual GPU driver for virtio. It can be used with 7 This is the virtual GPU driver for virtio. It can be used with
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index d4305da88f44..4e192aa2d021 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -29,8 +29,8 @@
29#include <drm/drm_crtc_helper.h> 29#include <drm/drm_crtc_helper.h>
30#include <drm/drm_atomic_helper.h> 30#include <drm/drm_atomic_helper.h>
31 31
32#define XRES_MIN 320 32#define XRES_MIN 32
33#define YRES_MIN 200 33#define YRES_MIN 32
34 34
35#define XRES_DEF 1024 35#define XRES_DEF 1024
36#define YRES_DEF 768 36#define YRES_DEF 768
@@ -38,138 +38,11 @@
38#define XRES_MAX 8192 38#define XRES_MAX 8192
39#define YRES_MAX 8192 39#define YRES_MAX 8192
40 40
41static void
42virtio_gpu_hide_cursor(struct virtio_gpu_device *vgdev,
43 struct virtio_gpu_output *output)
44{
45 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
46 output->cursor.resource_id = 0;
47 virtio_gpu_cursor_ping(vgdev, output);
48}
49
50static int virtio_gpu_crtc_cursor_set(struct drm_crtc *crtc,
51 struct drm_file *file_priv,
52 uint32_t handle,
53 uint32_t width,
54 uint32_t height,
55 int32_t hot_x, int32_t hot_y)
56{
57 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
58 struct virtio_gpu_output *output =
59 container_of(crtc, struct virtio_gpu_output, crtc);
60 struct drm_gem_object *gobj = NULL;
61 struct virtio_gpu_object *qobj = NULL;
62 struct virtio_gpu_fence *fence = NULL;
63 int ret = 0;
64
65 if (handle == 0) {
66 virtio_gpu_hide_cursor(vgdev, output);
67 return 0;
68 }
69
70 /* lookup the cursor */
71 gobj = drm_gem_object_lookup(file_priv, handle);
72 if (gobj == NULL)
73 return -ENOENT;
74
75 qobj = gem_to_virtio_gpu_obj(gobj);
76
77 if (!qobj->hw_res_handle) {
78 ret = -EINVAL;
79 goto out;
80 }
81
82 virtio_gpu_cmd_transfer_to_host_2d(vgdev, qobj->hw_res_handle, 0,
83 cpu_to_le32(64),
84 cpu_to_le32(64),
85 0, 0, &fence);
86 ret = virtio_gpu_object_reserve(qobj, false);
87 if (!ret) {
88 reservation_object_add_excl_fence(qobj->tbo.resv,
89 &fence->f);
90 fence_put(&fence->f);
91 virtio_gpu_object_unreserve(qobj);
92 virtio_gpu_object_wait(qobj, false);
93 }
94
95 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
96 output->cursor.resource_id = cpu_to_le32(qobj->hw_res_handle);
97 output->cursor.hot_x = cpu_to_le32(hot_x);
98 output->cursor.hot_y = cpu_to_le32(hot_y);
99 virtio_gpu_cursor_ping(vgdev, output);
100 ret = 0;
101
102out:
103 drm_gem_object_unreference_unlocked(gobj);
104 return ret;
105}
106
107static int virtio_gpu_crtc_cursor_move(struct drm_crtc *crtc,
108 int x, int y)
109{
110 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
111 struct virtio_gpu_output *output =
112 container_of(crtc, struct virtio_gpu_output, crtc);
113
114 output->cursor.hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
115 output->cursor.pos.x = cpu_to_le32(x);
116 output->cursor.pos.y = cpu_to_le32(y);
117 virtio_gpu_cursor_ping(vgdev, output);
118 return 0;
119}
120
121static int virtio_gpu_page_flip(struct drm_crtc *crtc,
122 struct drm_framebuffer *fb,
123 struct drm_pending_vblank_event *event,
124 uint32_t flags)
125{
126 struct virtio_gpu_device *vgdev = crtc->dev->dev_private;
127 struct virtio_gpu_output *output =
128 container_of(crtc, struct virtio_gpu_output, crtc);
129 struct drm_plane *plane = crtc->primary;
130 struct virtio_gpu_framebuffer *vgfb;
131 struct virtio_gpu_object *bo;
132 unsigned long irqflags;
133 uint32_t handle;
134
135 plane->fb = fb;
136 vgfb = to_virtio_gpu_framebuffer(plane->fb);
137 bo = gem_to_virtio_gpu_obj(vgfb->obj);
138 handle = bo->hw_res_handle;
139
140 DRM_DEBUG("handle 0x%x%s, crtc %dx%d\n", handle,
141 bo->dumb ? ", dumb" : "",
142 crtc->mode.hdisplay, crtc->mode.vdisplay);
143 if (bo->dumb) {
144 virtio_gpu_cmd_transfer_to_host_2d
145 (vgdev, handle, 0,
146 cpu_to_le32(crtc->mode.hdisplay),
147 cpu_to_le32(crtc->mode.vdisplay),
148 0, 0, NULL);
149 }
150 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
151 crtc->mode.hdisplay,
152 crtc->mode.vdisplay, 0, 0);
153 virtio_gpu_cmd_resource_flush(vgdev, handle, 0, 0,
154 crtc->mode.hdisplay,
155 crtc->mode.vdisplay);
156
157 if (event) {
158 spin_lock_irqsave(&crtc->dev->event_lock, irqflags);
159 drm_send_vblank_event(crtc->dev, -1, event);
160 spin_unlock_irqrestore(&crtc->dev->event_lock, irqflags);
161 }
162
163 return 0;
164}
165
166static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { 41static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = {
167 .cursor_set2 = virtio_gpu_crtc_cursor_set,
168 .cursor_move = virtio_gpu_crtc_cursor_move,
169 .set_config = drm_atomic_helper_set_config, 42 .set_config = drm_atomic_helper_set_config,
170 .destroy = drm_crtc_cleanup, 43 .destroy = drm_crtc_cleanup,
171 44
172 .page_flip = virtio_gpu_page_flip, 45 .page_flip = drm_atomic_helper_page_flip,
173 .reset = drm_atomic_helper_crtc_reset, 46 .reset = drm_atomic_helper_crtc_reset,
174 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, 47 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
175 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, 48 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
@@ -180,8 +53,7 @@ static void virtio_gpu_user_framebuffer_destroy(struct drm_framebuffer *fb)
180 struct virtio_gpu_framebuffer *virtio_gpu_fb 53 struct virtio_gpu_framebuffer *virtio_gpu_fb
181 = to_virtio_gpu_framebuffer(fb); 54 = to_virtio_gpu_framebuffer(fb);
182 55
183 if (virtio_gpu_fb->obj) 56 drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
184 drm_gem_object_unreference_unlocked(virtio_gpu_fb->obj);
185 drm_framebuffer_cleanup(fb); 57 drm_framebuffer_cleanup(fb);
186 kfree(virtio_gpu_fb); 58 kfree(virtio_gpu_fb);
187} 59}
@@ -267,6 +139,7 @@ static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
267 spin_lock_irqsave(&crtc->dev->event_lock, flags); 139 spin_lock_irqsave(&crtc->dev->event_lock, flags);
268 if (crtc->state->event) 140 if (crtc->state->event)
269 drm_crtc_send_vblank_event(crtc, crtc->state->event); 141 drm_crtc_send_vblank_event(crtc, crtc->state->event);
142 crtc->state->event = NULL;
270 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 143 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
271} 144}
272 145
@@ -341,15 +214,6 @@ static int virtio_gpu_conn_mode_valid(struct drm_connector *connector,
341 return MODE_BAD; 214 return MODE_BAD;
342} 215}
343 216
344static struct drm_encoder*
345virtio_gpu_best_encoder(struct drm_connector *connector)
346{
347 struct virtio_gpu_output *virtio_gpu_output =
348 drm_connector_to_virtio_gpu_output(connector);
349
350 return &virtio_gpu_output->enc;
351}
352
353static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { 217static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
354 .mode_set = virtio_gpu_enc_mode_set, 218 .mode_set = virtio_gpu_enc_mode_set,
355 .enable = virtio_gpu_enc_enable, 219 .enable = virtio_gpu_enc_enable,
@@ -359,7 +223,6 @@ static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = {
359static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { 223static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = {
360 .get_modes = virtio_gpu_conn_get_modes, 224 .get_modes = virtio_gpu_conn_get_modes,
361 .mode_valid = virtio_gpu_conn_mode_valid, 225 .mode_valid = virtio_gpu_conn_mode_valid,
362 .best_encoder = virtio_gpu_best_encoder,
363}; 226};
364 227
365static enum drm_connector_status virtio_gpu_conn_detect( 228static enum drm_connector_status virtio_gpu_conn_detect(
@@ -406,7 +269,7 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
406 struct drm_connector *connector = &output->conn; 269 struct drm_connector *connector = &output->conn;
407 struct drm_encoder *encoder = &output->enc; 270 struct drm_encoder *encoder = &output->enc;
408 struct drm_crtc *crtc = &output->crtc; 271 struct drm_crtc *crtc = &output->crtc;
409 struct drm_plane *plane; 272 struct drm_plane *primary, *cursor;
410 273
411 output->index = index; 274 output->index = index;
412 if (index == 0) { 275 if (index == 0) {
@@ -415,13 +278,17 @@ static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index)
415 output->info.r.height = cpu_to_le32(YRES_DEF); 278 output->info.r.height = cpu_to_le32(YRES_DEF);
416 } 279 }
417 280
418 plane = virtio_gpu_plane_init(vgdev, index); 281 primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index);
419 if (IS_ERR(plane)) 282 if (IS_ERR(primary))
420 return PTR_ERR(plane); 283 return PTR_ERR(primary);
421 drm_crtc_init_with_planes(dev, crtc, plane, NULL, 284 cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index);
285 if (IS_ERR(cursor))
286 return PTR_ERR(cursor);
287 drm_crtc_init_with_planes(dev, crtc, primary, cursor,
422 &virtio_gpu_crtc_funcs, NULL); 288 &virtio_gpu_crtc_funcs, NULL);
423 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); 289 drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs);
424 plane->crtc = crtc; 290 primary->crtc = crtc;
291 cursor->crtc = crtc;
425 292
426 drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, 293 drm_connector_init(dev, connector, &virtio_gpu_connector_funcs,
427 DRM_MODE_CONNECTOR_VIRTUAL); 294 DRM_MODE_CONNECTOR_VIRTUAL);
@@ -458,14 +325,31 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
458 ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); 325 ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj);
459 if (ret) { 326 if (ret) {
460 kfree(virtio_gpu_fb); 327 kfree(virtio_gpu_fb);
461 if (obj) 328 drm_gem_object_unreference_unlocked(obj);
462 drm_gem_object_unreference_unlocked(obj);
463 return NULL; 329 return NULL;
464 } 330 }
465 331
466 return &virtio_gpu_fb->base; 332 return &virtio_gpu_fb->base;
467} 333}
468 334
335static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
336{
337 struct drm_device *dev = state->dev;
338
339 drm_atomic_helper_commit_modeset_disables(dev, state);
340 drm_atomic_helper_commit_modeset_enables(dev, state);
341 drm_atomic_helper_commit_planes(dev, state, true);
342
343 drm_atomic_helper_commit_hw_done(state);
344
345 drm_atomic_helper_wait_for_vblanks(dev, state);
346 drm_atomic_helper_cleanup_planes(dev, state);
347}
348
349static struct drm_mode_config_helper_funcs virtio_mode_config_helpers = {
350 .atomic_commit_tail = vgdev_atomic_commit_tail,
351};
352
469static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { 353static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = {
470 .fb_create = virtio_gpu_user_framebuffer_create, 354 .fb_create = virtio_gpu_user_framebuffer_create,
471 .atomic_check = drm_atomic_helper_check, 355 .atomic_check = drm_atomic_helper_check,
@@ -477,7 +361,8 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
477 int i; 361 int i;
478 362
479 drm_mode_config_init(vgdev->ddev); 363 drm_mode_config_init(vgdev->ddev);
480 vgdev->ddev->mode_config.funcs = (void *)&virtio_gpu_mode_funcs; 364 vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
365 vgdev->ddev->mode_config.helper_private = &virtio_mode_config_helpers;
481 366
482 /* modes will be validated against the framebuffer size */ 367 /* modes will be validated against the framebuffer size */
483 vgdev->ddev->mode_config.min_width = XRES_MIN; 368 vgdev->ddev->mode_config.min_width = XRES_MIN;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 88a39165edd5..7f0e93f87a55 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -27,16 +27,6 @@
27 27
28#include "virtgpu_drv.h" 28#include "virtgpu_drv.h"
29 29
30int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master)
31{
32 struct pci_dev *pdev = dev->pdev;
33
34 if (pdev) {
35 return drm_pci_set_busid(dev, master);
36 }
37 return 0;
38}
39
40static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev) 30static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
41{ 31{
42 struct apertures_struct *ap; 32 struct apertures_struct *ap;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 3cc7afa77a35..c13f70cfc461 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -117,7 +117,6 @@ static const struct file_operations virtio_gpu_driver_fops = {
117 117
118static struct drm_driver driver = { 118static struct drm_driver driver = {
119 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC, 119 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME | DRIVER_RENDER | DRIVER_ATOMIC,
120 .set_busid = drm_virtio_set_busid,
121 .load = virtio_gpu_driver_load, 120 .load = virtio_gpu_driver_load,
122 .unload = virtio_gpu_driver_unload, 121 .unload = virtio_gpu_driver_unload,
123 .open = virtio_gpu_driver_open, 122 .open = virtio_gpu_driver_open,
@@ -143,7 +142,7 @@ static struct drm_driver driver = {
143 .gem_prime_vunmap = virtgpu_gem_prime_vunmap, 142 .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
144 .gem_prime_mmap = virtgpu_gem_prime_mmap, 143 .gem_prime_mmap = virtgpu_gem_prime_mmap,
145 144
146 .gem_free_object = virtio_gpu_gem_free_object, 145 .gem_free_object_unlocked = virtio_gpu_gem_free_object,
147 .gem_open_object = virtio_gpu_gem_object_open, 146 .gem_open_object = virtio_gpu_gem_object_open,
148 .gem_close_object = virtio_gpu_gem_object_close, 147 .gem_close_object = virtio_gpu_gem_object_close,
149 .fops = &virtio_gpu_driver_fops, 148 .fops = &virtio_gpu_driver_fops,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 0a54f43f846a..b18ef3111f0c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -33,6 +33,7 @@
33 33
34#include <drm/drmP.h> 34#include <drm/drmP.h>
35#include <drm/drm_gem.h> 35#include <drm/drm_gem.h>
36#include <drm/drm_atomic.h>
36#include <drm/drm_crtc_helper.h> 37#include <drm/drm_crtc_helper.h>
37#include <ttm/ttm_bo_api.h> 38#include <ttm/ttm_bo_api.h>
38#include <ttm/ttm_bo_driver.h> 39#include <ttm/ttm_bo_driver.h>
@@ -48,7 +49,6 @@
48#define DRIVER_PATCHLEVEL 1 49#define DRIVER_PATCHLEVEL 1
49 50
50/* virtgpu_drm_bus.c */ 51/* virtgpu_drm_bus.c */
51int drm_virtio_set_busid(struct drm_device *dev, struct drm_master *master);
52int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev); 52int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev);
53 53
54struct virtio_gpu_object { 54struct virtio_gpu_object {
@@ -335,6 +335,7 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev);
335 335
336/* virtio_gpu_plane.c */ 336/* virtio_gpu_plane.c */
337struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 337struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
338 enum drm_plane_type type,
338 int index); 339 int index);
339 340
340/* virtio_gpu_ttm.c */ 341/* virtio_gpu_ttm.c */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index 70b44a2345ab..925ca25209df 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -38,6 +38,10 @@ static const uint32_t virtio_gpu_formats[] = {
38 DRM_FORMAT_ABGR8888, 38 DRM_FORMAT_ABGR8888,
39}; 39};
40 40
41static const uint32_t virtio_gpu_cursor_formats[] = {
42 DRM_FORMAT_ARGB8888,
43};
44
41static void virtio_gpu_plane_destroy(struct drm_plane *plane) 45static void virtio_gpu_plane_destroy(struct drm_plane *plane)
42{ 46{
43 kfree(plane); 47 kfree(plane);
@@ -58,16 +62,22 @@ static int virtio_gpu_plane_atomic_check(struct drm_plane *plane,
58 return 0; 62 return 0;
59} 63}
60 64
61static void virtio_gpu_plane_atomic_update(struct drm_plane *plane, 65static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
62 struct drm_plane_state *old_state) 66 struct drm_plane_state *old_state)
63{ 67{
64 struct drm_device *dev = plane->dev; 68 struct drm_device *dev = plane->dev;
65 struct virtio_gpu_device *vgdev = dev->dev_private; 69 struct virtio_gpu_device *vgdev = dev->dev_private;
66 struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(plane->crtc); 70 struct virtio_gpu_output *output = NULL;
67 struct virtio_gpu_framebuffer *vgfb; 71 struct virtio_gpu_framebuffer *vgfb;
68 struct virtio_gpu_object *bo; 72 struct virtio_gpu_object *bo;
69 uint32_t handle; 73 uint32_t handle;
70 74
75 if (plane->state->crtc)
76 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
77 if (old_state->crtc)
78 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
79 WARN_ON(!output);
80
71 if (plane->state->fb) { 81 if (plane->state->fb) {
72 vgfb = to_virtio_gpu_framebuffer(plane->state->fb); 82 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
73 bo = gem_to_virtio_gpu_obj(vgfb->obj); 83 bo = gem_to_virtio_gpu_obj(vgfb->obj);
@@ -75,55 +85,149 @@ static void virtio_gpu_plane_atomic_update(struct drm_plane *plane,
75 if (bo->dumb) { 85 if (bo->dumb) {
76 virtio_gpu_cmd_transfer_to_host_2d 86 virtio_gpu_cmd_transfer_to_host_2d
77 (vgdev, handle, 0, 87 (vgdev, handle, 0,
78 cpu_to_le32(plane->state->crtc_w), 88 cpu_to_le32(plane->state->src_w >> 16),
79 cpu_to_le32(plane->state->crtc_h), 89 cpu_to_le32(plane->state->src_h >> 16),
80 plane->state->crtc_x, plane->state->crtc_y, NULL); 90 plane->state->src_x >> 16,
91 plane->state->src_y >> 16, NULL);
81 } 92 }
82 } else { 93 } else {
83 handle = 0; 94 handle = 0;
84 } 95 }
85 96
86 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d\n", handle, 97 DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", handle,
87 plane->state->crtc_w, plane->state->crtc_h, 98 plane->state->crtc_w, plane->state->crtc_h,
88 plane->state->crtc_x, plane->state->crtc_y); 99 plane->state->crtc_x, plane->state->crtc_y,
100 plane->state->src_w >> 16,
101 plane->state->src_h >> 16,
102 plane->state->src_x >> 16,
103 plane->state->src_y >> 16);
89 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle, 104 virtio_gpu_cmd_set_scanout(vgdev, output->index, handle,
90 plane->state->crtc_w, 105 plane->state->src_w >> 16,
91 plane->state->crtc_h, 106 plane->state->src_h >> 16,
92 plane->state->crtc_x, 107 plane->state->src_x >> 16,
93 plane->state->crtc_y); 108 plane->state->src_y >> 16);
94 virtio_gpu_cmd_resource_flush(vgdev, handle, 109 virtio_gpu_cmd_resource_flush(vgdev, handle,
95 plane->state->crtc_x, 110 plane->state->src_x >> 16,
96 plane->state->crtc_y, 111 plane->state->src_y >> 16,
97 plane->state->crtc_w, 112 plane->state->src_w >> 16,
98 plane->state->crtc_h); 113 plane->state->src_h >> 16);
99} 114}
100 115
116static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
117 struct drm_plane_state *old_state)
118{
119 struct drm_device *dev = plane->dev;
120 struct virtio_gpu_device *vgdev = dev->dev_private;
121 struct virtio_gpu_output *output = NULL;
122 struct virtio_gpu_framebuffer *vgfb;
123 struct virtio_gpu_fence *fence = NULL;
124 struct virtio_gpu_object *bo = NULL;
125 uint32_t handle;
126 int ret = 0;
101 127
102static const struct drm_plane_helper_funcs virtio_gpu_plane_helper_funcs = { 128 if (plane->state->crtc)
129 output = drm_crtc_to_virtio_gpu_output(plane->state->crtc);
130 if (old_state->crtc)
131 output = drm_crtc_to_virtio_gpu_output(old_state->crtc);
132 WARN_ON(!output);
133
134 if (plane->state->fb) {
135 vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
136 bo = gem_to_virtio_gpu_obj(vgfb->obj);
137 handle = bo->hw_res_handle;
138 } else {
139 handle = 0;
140 }
141
142 if (bo && bo->dumb && (plane->state->fb != old_state->fb)) {
143 /* new cursor -- update & wait */
144 virtio_gpu_cmd_transfer_to_host_2d
145 (vgdev, handle, 0,
146 cpu_to_le32(plane->state->crtc_w),
147 cpu_to_le32(plane->state->crtc_h),
148 0, 0, &fence);
149 ret = virtio_gpu_object_reserve(bo, false);
150 if (!ret) {
151 reservation_object_add_excl_fence(bo->tbo.resv,
152 &fence->f);
153 fence_put(&fence->f);
154 fence = NULL;
155 virtio_gpu_object_unreserve(bo);
156 virtio_gpu_object_wait(bo, false);
157 }
158 }
159
160 if (plane->state->fb != old_state->fb) {
161 DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle,
162 plane->state->crtc_x,
163 plane->state->crtc_y,
164 plane->state->fb ? plane->state->fb->hot_x : 0,
165 plane->state->fb ? plane->state->fb->hot_y : 0);
166 output->cursor.hdr.type =
167 cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR);
168 output->cursor.resource_id = cpu_to_le32(handle);
169 if (plane->state->fb) {
170 output->cursor.hot_x =
171 cpu_to_le32(plane->state->fb->hot_x);
172 output->cursor.hot_y =
173 cpu_to_le32(plane->state->fb->hot_y);
174 } else {
175 output->cursor.hot_x = cpu_to_le32(0);
176 output->cursor.hot_y = cpu_to_le32(0);
177 }
178 } else {
179 DRM_DEBUG("move +%d+%d\n",
180 plane->state->crtc_x,
181 plane->state->crtc_y);
182 output->cursor.hdr.type =
183 cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR);
184 }
185 output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x);
186 output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y);
187 virtio_gpu_cursor_ping(vgdev, output);
188}
189
190static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = {
191 .atomic_check = virtio_gpu_plane_atomic_check,
192 .atomic_update = virtio_gpu_primary_plane_update,
193};
194
195static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = {
103 .atomic_check = virtio_gpu_plane_atomic_check, 196 .atomic_check = virtio_gpu_plane_atomic_check,
104 .atomic_update = virtio_gpu_plane_atomic_update, 197 .atomic_update = virtio_gpu_cursor_plane_update,
105}; 198};
106 199
107struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, 200struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
201 enum drm_plane_type type,
108 int index) 202 int index)
109{ 203{
110 struct drm_device *dev = vgdev->ddev; 204 struct drm_device *dev = vgdev->ddev;
205 const struct drm_plane_helper_funcs *funcs;
111 struct drm_plane *plane; 206 struct drm_plane *plane;
112 int ret; 207 const uint32_t *formats;
208 int ret, nformats;
113 209
114 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 210 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
115 if (!plane) 211 if (!plane)
116 return ERR_PTR(-ENOMEM); 212 return ERR_PTR(-ENOMEM);
117 213
214 if (type == DRM_PLANE_TYPE_CURSOR) {
215 formats = virtio_gpu_cursor_formats;
216 nformats = ARRAY_SIZE(virtio_gpu_cursor_formats);
217 funcs = &virtio_gpu_cursor_helper_funcs;
218 } else {
219 formats = virtio_gpu_formats;
220 nformats = ARRAY_SIZE(virtio_gpu_formats);
221 funcs = &virtio_gpu_primary_helper_funcs;
222 }
118 ret = drm_universal_plane_init(dev, plane, 1 << index, 223 ret = drm_universal_plane_init(dev, plane, 1 << index,
119 &virtio_gpu_plane_funcs, 224 &virtio_gpu_plane_funcs,
120 virtio_gpu_formats, 225 formats, nformats,
121 ARRAY_SIZE(virtio_gpu_formats), 226 type, NULL);
122 DRM_PLANE_TYPE_PRIMARY, NULL);
123 if (ret) 227 if (ret)
124 goto err_plane_init; 228 goto err_plane_init;
125 229
126 drm_plane_helper_add(plane, &virtio_gpu_plane_helper_funcs); 230 drm_plane_helper_add(plane, funcs);
127 return plane; 231 return plane;
128 232
129err_plane_init: 233err_plane_init:
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index a0580815629f..80482ac5f95d 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo,
375 bool no_wait_gpu, 375 bool no_wait_gpu,
376 struct ttm_mem_reg *new_mem) 376 struct ttm_mem_reg *new_mem)
377{ 377{
378 int ret;
379
380 ret = ttm_bo_wait(bo, interruptible, no_wait_gpu);
381 if (ret)
382 return ret;
383
378 virtio_gpu_move_null(bo, new_mem); 384 virtio_gpu_move_null(bo, new_mem);
379 return 0; 385 return 0;
380} 386}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 67cebb23c940..aa04fb0159a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -293,13 +293,10 @@ static int vmw_cmdbuf_header_submit(struct vmw_cmdbuf_header *header)
293 struct vmw_cmdbuf_man *man = header->man; 293 struct vmw_cmdbuf_man *man = header->man;
294 u32 val; 294 u32 val;
295 295
296 if (sizeof(header->handle) > 4) 296 val = upper_32_bits(header->handle);
297 val = (header->handle >> 32);
298 else
299 val = 0;
300 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val); 297 vmw_write(man->dev_priv, SVGA_REG_COMMAND_HIGH, val);
301 298
302 val = (header->handle & 0xFFFFFFFFULL); 299 val = lower_32_bits(header->handle);
303 val |= header->cb_context & SVGA_CB_CONTEXT_MASK; 300 val |= header->cb_context & SVGA_CB_CONTEXT_MASK;
304 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val); 301 vmw_write(man->dev_priv, SVGA_REG_COMMAND_LOW, val);
305 302
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 8d528fcf6e96..e8ae3dc476d1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1053,15 +1053,14 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev,
1053 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1053 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1054 struct vmw_master *vmaster; 1054 struct vmw_master *vmaster;
1055 1055
1056 if (file_priv->minor->type != DRM_MINOR_LEGACY || 1056 if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
1057 !(flags & DRM_AUTH))
1058 return NULL; 1057 return NULL;
1059 1058
1060 ret = mutex_lock_interruptible(&dev->master_mutex); 1059 ret = mutex_lock_interruptible(&dev->master_mutex);
1061 if (unlikely(ret != 0)) 1060 if (unlikely(ret != 0))
1062 return ERR_PTR(-ERESTARTSYS); 1061 return ERR_PTR(-ERESTARTSYS);
1063 1062
1064 if (file_priv->is_master) { 1063 if (drm_is_current_master(file_priv)) {
1065 mutex_unlock(&dev->master_mutex); 1064 mutex_unlock(&dev->master_mutex);
1066 return NULL; 1065 return NULL;
1067 } 1066 }
@@ -1240,8 +1239,7 @@ static int vmw_master_set(struct drm_device *dev,
1240} 1239}
1241 1240
1242static void vmw_master_drop(struct drm_device *dev, 1241static void vmw_master_drop(struct drm_device *dev,
1243 struct drm_file *file_priv, 1242 struct drm_file *file_priv)
1244 bool from_release)
1245{ 1243{
1246 struct vmw_private *dev_priv = vmw_priv(dev); 1244 struct vmw_private *dev_priv = vmw_priv(dev);
1247 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1245 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 89fb19443a3f..74304b03f9d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -32,6 +32,7 @@
32#include <drm/drmP.h> 32#include <drm/drmP.h>
33#include <drm/vmwgfx_drm.h> 33#include <drm/vmwgfx_drm.h>
34#include <drm/drm_hashtab.h> 34#include <drm/drm_hashtab.h>
35#include <drm/drm_auth.h>
35#include <linux/suspend.h> 36#include <linux/suspend.h>
36#include <drm/ttm/ttm_bo_driver.h> 37#include <drm/ttm/ttm_bo_driver.h>
37#include <drm/ttm/ttm_object.h> 38#include <drm/ttm/ttm_object.h>
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 1a1a87cbf109..dc5beff2b4aa 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3625,9 +3625,7 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3625 (sw_context->cmd_bounce_size >> 1)); 3625 (sw_context->cmd_bounce_size >> 1));
3626 } 3626 }
3627 3627
3628 if (sw_context->cmd_bounce != NULL) 3628 vfree(sw_context->cmd_bounce);
3629 vfree(sw_context->cmd_bounce);
3630
3631 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size); 3629 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3632 3630
3633 if (sw_context->cmd_bounce == NULL) { 3631 if (sw_context->cmd_bounce == NULL) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index e959df6ede83..26ac8e80a478 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */
47 bool seqno_valid; /* Protected by @lock, and may not be set to true 47 bool seqno_valid; /* Protected by @lock, and may not be set to true
48 without the @goal_irq_mutex held. */ 48 without the @goal_irq_mutex held. */
49 unsigned ctx; 49 u64 ctx;
50}; 50};
51 51
52struct vmw_user_fence { 52struct vmw_user_fence {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index e29da45a2847..bf28ccc150df 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1404,9 +1404,9 @@ static int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
1404 return 0; 1404 return 0;
1405} 1405}
1406 1406
1407void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 1407int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1408 u16 *r, u16 *g, u16 *b, 1408 u16 *r, u16 *g, u16 *b,
1409 uint32_t start, uint32_t size) 1409 uint32_t size)
1410{ 1410{
1411 struct vmw_private *dev_priv = vmw_priv(crtc->dev); 1411 struct vmw_private *dev_priv = vmw_priv(crtc->dev);
1412 int i; 1412 int i;
@@ -1418,6 +1418,8 @@ void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
1418 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8); 1418 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
1419 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8); 1419 vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
1420 } 1420 }
1421
1422 return 0;
1421} 1423}
1422 1424
1423int vmw_du_connector_dpms(struct drm_connector *connector, int mode) 1425int vmw_du_connector_dpms(struct drm_connector *connector, int mode)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index 57203212c501..ff4803c107bc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -195,9 +195,9 @@ struct vmw_display_unit {
195void vmw_du_cleanup(struct vmw_display_unit *du); 195void vmw_du_cleanup(struct vmw_display_unit *du);
196void vmw_du_crtc_save(struct drm_crtc *crtc); 196void vmw_du_crtc_save(struct drm_crtc *crtc);
197void vmw_du_crtc_restore(struct drm_crtc *crtc); 197void vmw_du_crtc_restore(struct drm_crtc *crtc);
198void vmw_du_crtc_gamma_set(struct drm_crtc *crtc, 198int vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
199 u16 *r, u16 *g, u16 *b, 199 u16 *r, u16 *g, u16 *b,
200 uint32_t start, uint32_t size); 200 uint32_t size);
201int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, 201int vmw_du_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
202 uint32_t handle, uint32_t width, uint32_t height, 202 uint32_t handle, uint32_t width, uint32_t height,
203 int32_t hot_x, int32_t hot_y); 203 int32_t hot_x, int32_t hot_y);
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index a18db4d5347c..c5d82a8a2ec9 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -96,12 +96,12 @@ fail:
96 */ 96 */
97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2) 97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
98{ 98{
99 u32 pos = pb->pos; 99 u32 *p = (u32 *)((void *)pb->mapped + pb->pos);
100 u32 *p = (u32 *)((void *)pb->mapped + pos); 100
101 WARN_ON(pos == pb->fence); 101 WARN_ON(pb->pos == pb->fence);
102 *(p++) = op1; 102 *(p++) = op1;
103 *(p++) = op2; 103 *(p++) = op2;
104 pb->pos = (pos + 8) & (pb->size_bytes - 1); 104 pb->pos = (pb->pos + 8) & (pb->size_bytes - 1);
105} 105}
106 106
107/* 107/*
@@ -134,14 +134,19 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
134 enum cdma_event event) 134 enum cdma_event event)
135{ 135{
136 for (;;) { 136 for (;;) {
137 struct push_buffer *pb = &cdma->push_buffer;
137 unsigned int space; 138 unsigned int space;
138 139
139 if (event == CDMA_EVENT_SYNC_QUEUE_EMPTY) 140 switch (event) {
141 case CDMA_EVENT_SYNC_QUEUE_EMPTY:
140 space = list_empty(&cdma->sync_queue) ? 1 : 0; 142 space = list_empty(&cdma->sync_queue) ? 1 : 0;
141 else if (event == CDMA_EVENT_PUSH_BUFFER_SPACE) { 143 break;
142 struct push_buffer *pb = &cdma->push_buffer; 144
145 case CDMA_EVENT_PUSH_BUFFER_SPACE:
143 space = host1x_pushbuffer_space(pb); 146 space = host1x_pushbuffer_space(pb);
144 } else { 147 break;
148
149 default:
145 WARN_ON(1); 150 WARN_ON(1);
146 return -EINVAL; 151 return -EINVAL;
147 } 152 }
@@ -159,12 +164,14 @@ unsigned int host1x_cdma_wait_locked(struct host1x_cdma *cdma,
159 mutex_lock(&cdma->lock); 164 mutex_lock(&cdma->lock);
160 continue; 165 continue;
161 } 166 }
167
162 cdma->event = event; 168 cdma->event = event;
163 169
164 mutex_unlock(&cdma->lock); 170 mutex_unlock(&cdma->lock);
165 down(&cdma->sem); 171 down(&cdma->sem);
166 mutex_lock(&cdma->lock); 172 mutex_lock(&cdma->lock);
167 } 173 }
174
168 return 0; 175 return 0;
169} 176}
170 177
@@ -234,6 +241,7 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
234 /* Start timer on next pending syncpt */ 241 /* Start timer on next pending syncpt */
235 if (job->timeout) 242 if (job->timeout)
236 cdma_start_timer_locked(cdma, job); 243 cdma_start_timer_locked(cdma, job);
244
237 break; 245 break;
238 } 246 }
239 247
@@ -247,7 +255,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
247 /* Pop push buffer slots */ 255 /* Pop push buffer slots */
248 if (job->num_slots) { 256 if (job->num_slots) {
249 struct push_buffer *pb = &cdma->push_buffer; 257 struct push_buffer *pb = &cdma->push_buffer;
258
250 host1x_pushbuffer_pop(pb, job->num_slots); 259 host1x_pushbuffer_pop(pb, job->num_slots);
260
251 if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE) 261 if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
252 signal = true; 262 signal = true;
253 } 263 }
@@ -269,11 +279,9 @@ static void update_cdma_locked(struct host1x_cdma *cdma)
269void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma, 279void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
270 struct device *dev) 280 struct device *dev)
271{ 281{
272 u32 restart_addr;
273 u32 syncpt_incrs;
274 struct host1x_job *job = NULL;
275 u32 syncpt_val;
276 struct host1x *host1x = cdma_to_host1x(cdma); 282 struct host1x *host1x = cdma_to_host1x(cdma);
283 u32 restart_addr, syncpt_incrs, syncpt_val;
284 struct host1x_job *job = NULL;
277 285
278 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt); 286 syncpt_val = host1x_syncpt_load(cdma->timeout.syncpt);
279 287
@@ -342,9 +350,11 @@ void host1x_cdma_update_sync_queue(struct host1x_cdma *cdma,
342 syncpt_val += syncpt_incrs; 350 syncpt_val += syncpt_incrs;
343 } 351 }
344 352
345 /* The following sumbits from the same client may be dependent on the 353 /*
354 * The following sumbits from the same client may be dependent on the
346 * failed submit and therefore they may fail. Force a small timeout 355 * failed submit and therefore they may fail. Force a small timeout
347 * to make the queue cleanup faster */ 356 * to make the queue cleanup faster.
357 */
348 358
349 list_for_each_entry_from(job, &cdma->sync_queue, list) 359 list_for_each_entry_from(job, &cdma->sync_queue, list)
350 if (job->client == cdma->timeout.client) 360 if (job->client == cdma->timeout.client)
@@ -375,6 +385,7 @@ int host1x_cdma_init(struct host1x_cdma *cdma)
375 err = host1x_pushbuffer_init(&cdma->push_buffer); 385 err = host1x_pushbuffer_init(&cdma->push_buffer);
376 if (err) 386 if (err)
377 return err; 387 return err;
388
378 return 0; 389 return 0;
379} 390}
380 391
@@ -410,6 +421,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
410 /* init state on first submit with timeout value */ 421 /* init state on first submit with timeout value */
411 if (!cdma->timeout.initialized) { 422 if (!cdma->timeout.initialized) {
412 int err; 423 int err;
424
413 err = host1x_hw_cdma_timeout_init(host1x, cdma, 425 err = host1x_hw_cdma_timeout_init(host1x, cdma,
414 job->syncpt_id); 426 job->syncpt_id);
415 if (err) { 427 if (err) {
@@ -418,6 +430,7 @@ int host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job)
418 } 430 }
419 } 431 }
420 } 432 }
433
421 if (!cdma->running) 434 if (!cdma->running)
422 host1x_hw_cdma_start(host1x, cdma); 435 host1x_hw_cdma_start(host1x, cdma);
423 436
@@ -448,6 +461,7 @@ void host1x_cdma_push(struct host1x_cdma *cdma, u32 op1, u32 op2)
448 slots_free = host1x_cdma_wait_locked(cdma, 461 slots_free = host1x_cdma_wait_locked(cdma,
449 CDMA_EVENT_PUSH_BUFFER_SPACE); 462 CDMA_EVENT_PUSH_BUFFER_SPACE);
450 } 463 }
464
451 cdma->slots_free = slots_free - 1; 465 cdma->slots_free = slots_free - 1;
452 cdma->slots_used++; 466 cdma->slots_used++;
453 host1x_pushbuffer_push(pb, op1, op2); 467 host1x_pushbuffer_push(pb, op1, op2);
diff --git a/drivers/gpu/host1x/channel.c b/drivers/gpu/host1x/channel.c
index b4ae3affb987..8f437d924c10 100644
--- a/drivers/gpu/host1x/channel.c
+++ b/drivers/gpu/host1x/channel.c
@@ -83,9 +83,10 @@ EXPORT_SYMBOL(host1x_channel_put);
83struct host1x_channel *host1x_channel_request(struct device *dev) 83struct host1x_channel *host1x_channel_request(struct device *dev)
84{ 84{
85 struct host1x *host = dev_get_drvdata(dev->parent); 85 struct host1x *host = dev_get_drvdata(dev->parent);
86 int max_channels = host->info->nb_channels; 86 unsigned int max_channels = host->info->nb_channels;
87 struct host1x_channel *channel = NULL; 87 struct host1x_channel *channel = NULL;
88 int index, err; 88 unsigned long index;
89 int err;
89 90
90 mutex_lock(&host->chlist_mutex); 91 mutex_lock(&host->chlist_mutex);
91 92
diff --git a/drivers/gpu/host1x/debug.c b/drivers/gpu/host1x/debug.c
index ee3d12b51c50..d9330fcc62ad 100644
--- a/drivers/gpu/host1x/debug.c
+++ b/drivers/gpu/host1x/debug.c
@@ -39,6 +39,7 @@ void host1x_debug_output(struct output *o, const char *fmt, ...)
39 va_start(args, fmt); 39 va_start(args, fmt);
40 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args); 40 len = vsnprintf(o->buf, sizeof(o->buf), fmt, args);
41 va_end(args); 41 va_end(args);
42
42 o->fn(o->ctx, o->buf, len); 43 o->fn(o->ctx, o->buf, len);
43} 44}
44 45
@@ -48,13 +49,17 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
48 struct output *o = data; 49 struct output *o = data;
49 50
50 mutex_lock(&ch->reflock); 51 mutex_lock(&ch->reflock);
52
51 if (ch->refcount) { 53 if (ch->refcount) {
52 mutex_lock(&ch->cdma.lock); 54 mutex_lock(&ch->cdma.lock);
55
53 if (show_fifo) 56 if (show_fifo)
54 host1x_hw_show_channel_fifo(m, ch, o); 57 host1x_hw_show_channel_fifo(m, ch, o);
58
55 host1x_hw_show_channel_cdma(m, ch, o); 59 host1x_hw_show_channel_cdma(m, ch, o);
56 mutex_unlock(&ch->cdma.lock); 60 mutex_unlock(&ch->cdma.lock);
57 } 61 }
62
58 mutex_unlock(&ch->reflock); 63 mutex_unlock(&ch->reflock);
59 64
60 return 0; 65 return 0;
@@ -62,22 +67,27 @@ static int show_channels(struct host1x_channel *ch, void *data, bool show_fifo)
62 67
63static void show_syncpts(struct host1x *m, struct output *o) 68static void show_syncpts(struct host1x *m, struct output *o)
64{ 69{
65 int i; 70 unsigned int i;
71
66 host1x_debug_output(o, "---- syncpts ----\n"); 72 host1x_debug_output(o, "---- syncpts ----\n");
73
67 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) { 74 for (i = 0; i < host1x_syncpt_nb_pts(m); i++) {
68 u32 max = host1x_syncpt_read_max(m->syncpt + i); 75 u32 max = host1x_syncpt_read_max(m->syncpt + i);
69 u32 min = host1x_syncpt_load(m->syncpt + i); 76 u32 min = host1x_syncpt_load(m->syncpt + i);
77
70 if (!min && !max) 78 if (!min && !max)
71 continue; 79 continue;
72 host1x_debug_output(o, "id %d (%s) min %d max %d\n", 80
81 host1x_debug_output(o, "id %u (%s) min %d max %d\n",
73 i, m->syncpt[i].name, min, max); 82 i, m->syncpt[i].name, min, max);
74 } 83 }
75 84
76 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) { 85 for (i = 0; i < host1x_syncpt_nb_bases(m); i++) {
77 u32 base_val; 86 u32 base_val;
87
78 base_val = host1x_syncpt_load_wait_base(m->syncpt + i); 88 base_val = host1x_syncpt_load_wait_base(m->syncpt + i);
79 if (base_val) 89 if (base_val)
80 host1x_debug_output(o, "waitbase id %d val %d\n", i, 90 host1x_debug_output(o, "waitbase id %u val %d\n", i,
81 base_val); 91 base_val);
82 } 92 }
83 93
@@ -114,7 +124,9 @@ static int host1x_debug_show_all(struct seq_file *s, void *unused)
114 .fn = write_to_seqfile, 124 .fn = write_to_seqfile,
115 .ctx = s 125 .ctx = s
116 }; 126 };
127
117 show_all(s->private, &o); 128 show_all(s->private, &o);
129
118 return 0; 130 return 0;
119} 131}
120 132
@@ -124,7 +136,9 @@ static int host1x_debug_show(struct seq_file *s, void *unused)
124 .fn = write_to_seqfile, 136 .fn = write_to_seqfile,
125 .ctx = s 137 .ctx = s
126 }; 138 };
139
127 show_all_no_fifo(s->private, &o); 140 show_all_no_fifo(s->private, &o);
141
128 return 0; 142 return 0;
129} 143}
130 144
@@ -134,10 +148,10 @@ static int host1x_debug_open_all(struct inode *inode, struct file *file)
134} 148}
135 149
136static const struct file_operations host1x_debug_all_fops = { 150static const struct file_operations host1x_debug_all_fops = {
137 .open = host1x_debug_open_all, 151 .open = host1x_debug_open_all,
138 .read = seq_read, 152 .read = seq_read,
139 .llseek = seq_lseek, 153 .llseek = seq_lseek,
140 .release = single_release, 154 .release = single_release,
141}; 155};
142 156
143static int host1x_debug_open(struct inode *inode, struct file *file) 157static int host1x_debug_open(struct inode *inode, struct file *file)
@@ -146,10 +160,10 @@ static int host1x_debug_open(struct inode *inode, struct file *file)
146} 160}
147 161
148static const struct file_operations host1x_debug_fops = { 162static const struct file_operations host1x_debug_fops = {
149 .open = host1x_debug_open, 163 .open = host1x_debug_open,
150 .read = seq_read, 164 .read = seq_read,
151 .llseek = seq_lseek, 165 .llseek = seq_lseek,
152 .release = single_release, 166 .release = single_release,
153}; 167};
154 168
155static void host1x_debugfs_init(struct host1x *host1x) 169static void host1x_debugfs_init(struct host1x *host1x)
@@ -201,6 +215,7 @@ void host1x_debug_dump(struct host1x *host1x)
201 struct output o = { 215 struct output o = {
202 .fn = write_to_printk 216 .fn = write_to_printk
203 }; 217 };
218
204 show_all(host1x, &o); 219 show_all(host1x, &o);
205} 220}
206 221
@@ -209,5 +224,6 @@ void host1x_debug_dump_syncpts(struct host1x *host1x)
209 struct output o = { 224 struct output o = {
210 .fn = write_to_printk 225 .fn = write_to_printk
211 }; 226 };
227
212 show_syncpts(host1x, &o); 228 show_syncpts(host1x, &o);
213} 229}
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index ff348690df94..a62317af76ad 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -63,13 +63,13 @@ u32 host1x_ch_readl(struct host1x_channel *ch, u32 r)
63} 63}
64 64
65static const struct host1x_info host1x01_info = { 65static const struct host1x_info host1x01_info = {
66 .nb_channels = 8, 66 .nb_channels = 8,
67 .nb_pts = 32, 67 .nb_pts = 32,
68 .nb_mlocks = 16, 68 .nb_mlocks = 16,
69 .nb_bases = 8, 69 .nb_bases = 8,
70 .init = host1x01_init, 70 .init = host1x01_init,
71 .sync_offset = 0x3000, 71 .sync_offset = 0x3000,
72 .dma_mask = DMA_BIT_MASK(32), 72 .dma_mask = DMA_BIT_MASK(32),
73}; 73};
74 74
75static const struct host1x_info host1x02_info = { 75static const struct host1x_info host1x02_info = {
@@ -102,7 +102,7 @@ static const struct host1x_info host1x05_info = {
102 .dma_mask = DMA_BIT_MASK(34), 102 .dma_mask = DMA_BIT_MASK(34),
103}; 103};
104 104
105static struct of_device_id host1x_of_match[] = { 105static const struct of_device_id host1x_of_match[] = {
106 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, }, 106 { .compatible = "nvidia,tegra210-host1x", .data = &host1x05_info, },
107 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, }, 107 { .compatible = "nvidia,tegra124-host1x", .data = &host1x04_info, },
108 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, }, 108 { .compatible = "nvidia,tegra114-host1x", .data = &host1x02_info, },
diff --git a/drivers/gpu/host1x/dev.h b/drivers/gpu/host1x/dev.h
index dace124994bb..5220510f39da 100644
--- a/drivers/gpu/host1x/dev.h
+++ b/drivers/gpu/host1x/dev.h
@@ -45,7 +45,7 @@ struct host1x_cdma_ops {
45 void (*start)(struct host1x_cdma *cdma); 45 void (*start)(struct host1x_cdma *cdma);
46 void (*stop)(struct host1x_cdma *cdma); 46 void (*stop)(struct host1x_cdma *cdma);
47 void (*flush)(struct host1x_cdma *cdma); 47 void (*flush)(struct host1x_cdma *cdma);
48 int (*timeout_init)(struct host1x_cdma *cdma, u32 syncpt_id); 48 int (*timeout_init)(struct host1x_cdma *cdma, unsigned int syncpt);
49 void (*timeout_destroy)(struct host1x_cdma *cdma); 49 void (*timeout_destroy)(struct host1x_cdma *cdma);
50 void (*freeze)(struct host1x_cdma *cdma); 50 void (*freeze)(struct host1x_cdma *cdma);
51 void (*resume)(struct host1x_cdma *cdma, u32 getptr); 51 void (*resume)(struct host1x_cdma *cdma, u32 getptr);
@@ -82,21 +82,21 @@ struct host1x_intr_ops {
82 int (*init_host_sync)(struct host1x *host, u32 cpm, 82 int (*init_host_sync)(struct host1x *host, u32 cpm,
83 void (*syncpt_thresh_work)(struct work_struct *work)); 83 void (*syncpt_thresh_work)(struct work_struct *work));
84 void (*set_syncpt_threshold)( 84 void (*set_syncpt_threshold)(
85 struct host1x *host, u32 id, u32 thresh); 85 struct host1x *host, unsigned int id, u32 thresh);
86 void (*enable_syncpt_intr)(struct host1x *host, u32 id); 86 void (*enable_syncpt_intr)(struct host1x *host, unsigned int id);
87 void (*disable_syncpt_intr)(struct host1x *host, u32 id); 87 void (*disable_syncpt_intr)(struct host1x *host, unsigned int id);
88 void (*disable_all_syncpt_intrs)(struct host1x *host); 88 void (*disable_all_syncpt_intrs)(struct host1x *host);
89 int (*free_syncpt_irq)(struct host1x *host); 89 int (*free_syncpt_irq)(struct host1x *host);
90}; 90};
91 91
92struct host1x_info { 92struct host1x_info {
93 int nb_channels; /* host1x: num channels supported */ 93 unsigned int nb_channels; /* host1x: number of channels supported */
94 int nb_pts; /* host1x: num syncpoints supported */ 94 unsigned int nb_pts; /* host1x: number of syncpoints supported */
95 int nb_bases; /* host1x: num syncpoints supported */ 95 unsigned int nb_bases; /* host1x: number of syncpoint bases supported */
96 int nb_mlocks; /* host1x: number of mlocks */ 96 unsigned int nb_mlocks; /* host1x: number of mlocks supported */
97 int (*init)(struct host1x *); /* initialize per SoC ops */ 97 int (*init)(struct host1x *host1x); /* initialize per SoC ops */
98 int sync_offset; 98 unsigned int sync_offset; /* offset of syncpoint registers */
99 u64 dma_mask; /* mask of addressable memory */ 99 u64 dma_mask; /* mask of addressable memory */
100}; 100};
101 101
102struct host1x { 102struct host1x {
@@ -109,7 +109,6 @@ struct host1x {
109 struct clk *clk; 109 struct clk *clk;
110 110
111 struct mutex intr_mutex; 111 struct mutex intr_mutex;
112 struct workqueue_struct *intr_wq;
113 int intr_syncpt_irq; 112 int intr_syncpt_irq;
114 113
115 const struct host1x_syncpt_ops *syncpt_op; 114 const struct host1x_syncpt_ops *syncpt_op;
@@ -183,19 +182,20 @@ static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm,
183} 182}
184 183
185static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host, 184static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host,
186 u32 id, u32 thresh) 185 unsigned int id,
186 u32 thresh)
187{ 187{
188 host->intr_op->set_syncpt_threshold(host, id, thresh); 188 host->intr_op->set_syncpt_threshold(host, id, thresh);
189} 189}
190 190
191static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host, 191static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host,
192 u32 id) 192 unsigned int id)
193{ 193{
194 host->intr_op->enable_syncpt_intr(host, id); 194 host->intr_op->enable_syncpt_intr(host, id);
195} 195}
196 196
197static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host, 197static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host,
198 u32 id) 198 unsigned int id)
199{ 199{
200 host->intr_op->disable_syncpt_intr(host, id); 200 host->intr_op->disable_syncpt_intr(host, id);
201} 201}
@@ -212,9 +212,9 @@ static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host)
212 212
213static inline int host1x_hw_channel_init(struct host1x *host, 213static inline int host1x_hw_channel_init(struct host1x *host,
214 struct host1x_channel *channel, 214 struct host1x_channel *channel,
215 int chid) 215 unsigned int id)
216{ 216{
217 return host->channel_op->init(channel, host, chid); 217 return host->channel_op->init(channel, host, id);
218} 218}
219 219
220static inline int host1x_hw_channel_submit(struct host1x *host, 220static inline int host1x_hw_channel_submit(struct host1x *host,
@@ -243,9 +243,9 @@ static inline void host1x_hw_cdma_flush(struct host1x *host,
243 243
244static inline int host1x_hw_cdma_timeout_init(struct host1x *host, 244static inline int host1x_hw_cdma_timeout_init(struct host1x *host,
245 struct host1x_cdma *cdma, 245 struct host1x_cdma *cdma,
246 u32 syncpt_id) 246 unsigned int syncpt)
247{ 247{
248 return host->cdma_op->timeout_init(cdma, syncpt_id); 248 return host->cdma_op->timeout_init(cdma, syncpt);
249} 249}
250 250
251static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host, 251static inline void host1x_hw_cdma_timeout_destroy(struct host1x *host,
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 305ea8f3382d..659c1bbfeeba 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -41,7 +41,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
41{ 41{
42 struct host1x *host1x = cdma_to_host1x(cdma); 42 struct host1x *host1x = cdma_to_host1x(cdma);
43 struct push_buffer *pb = &cdma->push_buffer; 43 struct push_buffer *pb = &cdma->push_buffer;
44 u32 i; 44 unsigned int i;
45 45
46 for (i = 0; i < syncpt_incrs; i++) 46 for (i = 0; i < syncpt_incrs; i++)
47 host1x_syncpt_incr(cdma->timeout.syncpt); 47 host1x_syncpt_incr(cdma->timeout.syncpt);
@@ -58,6 +58,7 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
58 &pb->phys, getptr); 58 &pb->phys, getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1); 59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 } 60 }
61
61 wmb(); 62 wmb();
62} 63}
63 64
@@ -162,12 +163,14 @@ static void cdma_stop(struct host1x_cdma *cdma)
162 struct host1x_channel *ch = cdma_to_channel(cdma); 163 struct host1x_channel *ch = cdma_to_channel(cdma);
163 164
164 mutex_lock(&cdma->lock); 165 mutex_lock(&cdma->lock);
166
165 if (cdma->running) { 167 if (cdma->running) {
166 host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY); 168 host1x_cdma_wait_locked(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
167 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP, 169 host1x_ch_writel(ch, HOST1X_CHANNEL_DMACTRL_DMASTOP,
168 HOST1X_CHANNEL_DMACTRL); 170 HOST1X_CHANNEL_DMACTRL);
169 cdma->running = false; 171 cdma->running = false;
170 } 172 }
173
171 mutex_unlock(&cdma->lock); 174 mutex_unlock(&cdma->lock);
172} 175}
173 176
@@ -213,11 +216,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
213 u32 cmdproc_stop; 216 u32 cmdproc_stop;
214 217
215 dev_dbg(host1x->dev, 218 dev_dbg(host1x->dev,
216 "resuming channel (id %d, DMAGET restart = 0x%x)\n", 219 "resuming channel (id %u, DMAGET restart = 0x%x)\n",
217 ch->id, getptr); 220 ch->id, getptr);
218 221
219 cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP); 222 cmdproc_stop = host1x_sync_readl(host1x, HOST1X_SYNC_CMDPROC_STOP);
220 cmdproc_stop &= ~(BIT(ch->id)); 223 cmdproc_stop &= ~BIT(ch->id);
221 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP); 224 host1x_sync_writel(host1x, cmdproc_stop, HOST1X_SYNC_CMDPROC_STOP);
222 225
223 cdma->torndown = false; 226 cdma->torndown = false;
@@ -231,14 +234,11 @@ static void cdma_resume(struct host1x_cdma *cdma, u32 getptr)
231 */ 234 */
232static void cdma_timeout_handler(struct work_struct *work) 235static void cdma_timeout_handler(struct work_struct *work)
233{ 236{
237 u32 prev_cmdproc, cmdproc_stop, syncpt_val;
234 struct host1x_cdma *cdma; 238 struct host1x_cdma *cdma;
235 struct host1x *host1x; 239 struct host1x *host1x;
236 struct host1x_channel *ch; 240 struct host1x_channel *ch;
237 241
238 u32 syncpt_val;
239
240 u32 prev_cmdproc, cmdproc_stop;
241
242 cdma = container_of(to_delayed_work(work), struct host1x_cdma, 242 cdma = container_of(to_delayed_work(work), struct host1x_cdma,
243 timeout.wq); 243 timeout.wq);
244 host1x = cdma_to_host1x(cdma); 244 host1x = cdma_to_host1x(cdma);
@@ -277,9 +277,9 @@ static void cdma_timeout_handler(struct work_struct *work)
277 return; 277 return;
278 } 278 }
279 279
280 dev_warn(host1x->dev, "%s: timeout: %d (%s), HW thresh %d, done %d\n", 280 dev_warn(host1x->dev, "%s: timeout: %u (%s), HW thresh %d, done %d\n",
281 __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name, 281 __func__, cdma->timeout.syncpt->id, cdma->timeout.syncpt->name,
282 syncpt_val, cdma->timeout.syncpt_val); 282 syncpt_val, cdma->timeout.syncpt_val);
283 283
284 /* stop HW, resetting channel/module */ 284 /* stop HW, resetting channel/module */
285 host1x_hw_cdma_freeze(host1x, cdma); 285 host1x_hw_cdma_freeze(host1x, cdma);
@@ -291,7 +291,7 @@ static void cdma_timeout_handler(struct work_struct *work)
291/* 291/*
292 * Init timeout resources 292 * Init timeout resources
293 */ 293 */
294static int cdma_timeout_init(struct host1x_cdma *cdma, u32 syncpt_id) 294static int cdma_timeout_init(struct host1x_cdma *cdma, unsigned int syncpt)
295{ 295{
296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler); 296 INIT_DELAYED_WORK(&cdma->timeout.wq, cdma_timeout_handler);
297 cdma->timeout.initialized = true; 297 cdma->timeout.initialized = true;
@@ -306,6 +306,7 @@ static void cdma_timeout_destroy(struct host1x_cdma *cdma)
306{ 306{
307 if (cdma->timeout.initialized) 307 if (cdma->timeout.initialized)
308 cancel_delayed_work(&cdma->timeout.wq); 308 cancel_delayed_work(&cdma->timeout.wq);
309
309 cdma->timeout.initialized = false; 310 cdma->timeout.initialized = false;
310} 311}
311 312
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 946c332c3906..5e8df78b7acd 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -46,6 +46,7 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
46 */ 46 */
47 for (i = 0; i < words; i += TRACE_MAX_LENGTH) { 47 for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
48 u32 num_words = min(words - i, TRACE_MAX_LENGTH); 48 u32 num_words = min(words - i, TRACE_MAX_LENGTH);
49
49 offset += i * sizeof(u32); 50 offset += i * sizeof(u32);
50 51
51 trace_host1x_cdma_push_gather(dev_name(dev), bo, 52 trace_host1x_cdma_push_gather(dev_name(dev), bo,
@@ -66,6 +67,7 @@ static void submit_gathers(struct host1x_job *job)
66 struct host1x_job_gather *g = &job->gathers[i]; 67 struct host1x_job_gather *g = &job->gathers[i];
67 u32 op1 = host1x_opcode_gather(g->words); 68 u32 op1 = host1x_opcode_gather(g->words);
68 u32 op2 = g->base + g->offset; 69 u32 op2 = g->base + g->offset;
70
69 trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff); 71 trace_write_gather(cdma, g->bo, g->offset, op1 & 0xffff);
70 host1x_cdma_push(cdma, op1, op2); 72 host1x_cdma_push(cdma, op1, op2);
71 } 73 }
@@ -75,7 +77,8 @@ static inline void synchronize_syncpt_base(struct host1x_job *job)
75{ 77{
76 struct host1x *host = dev_get_drvdata(job->channel->dev->parent); 78 struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
77 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; 79 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id;
78 u32 id, value; 80 unsigned int id;
81 u32 value;
79 82
80 value = host1x_syncpt_read_max(sp); 83 value = host1x_syncpt_read_max(sp);
81 id = sp->base->id; 84 id = sp->base->id;
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index cc3f1825c735..7a4a3286e4a7 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -40,8 +40,7 @@ enum {
40 40
41static unsigned int show_channel_command(struct output *o, u32 val) 41static unsigned int show_channel_command(struct output *o, u32 val)
42{ 42{
43 unsigned mask; 43 unsigned int mask, subop;
44 unsigned subop;
45 44
46 switch (val >> 28) { 45 switch (val >> 28) {
47 case HOST1X_OPCODE_SETCLASS: 46 case HOST1X_OPCODE_SETCLASS:
@@ -51,12 +50,11 @@ static unsigned int show_channel_command(struct output *o, u32 val)
51 val >> 6 & 0x3ff, 50 val >> 6 & 0x3ff,
52 val >> 16 & 0xfff, mask); 51 val >> 16 & 0xfff, mask);
53 return hweight8(mask); 52 return hweight8(mask);
54 } else {
55 host1x_debug_output(o, "SETCL(class=%03x)\n",
56 val >> 6 & 0x3ff);
57 return 0;
58 } 53 }
59 54
55 host1x_debug_output(o, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
56 return 0;
57
60 case HOST1X_OPCODE_INCR: 58 case HOST1X_OPCODE_INCR:
61 host1x_debug_output(o, "INCR(offset=%03x, [", 59 host1x_debug_output(o, "INCR(offset=%03x, [",
62 val >> 16 & 0xfff); 60 val >> 16 & 0xfff);
@@ -143,7 +141,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
143 struct host1x_job *job; 141 struct host1x_job *job;
144 142
145 list_for_each_entry(job, &cdma->sync_queue, list) { 143 list_for_each_entry(job, &cdma->sync_queue, list) {
146 int i; 144 unsigned int i;
145
147 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n", 146 host1x_debug_output(o, "\n%p: JOB, syncpt_id=%d, syncpt_val=%d, first_get=%08x, timeout=%d num_slots=%d, num_handles=%d\n",
148 job, job->syncpt_id, job->syncpt_end, 147 job, job->syncpt_id, job->syncpt_end,
149 job->first_get, job->timeout, 148 job->first_get, job->timeout,
@@ -190,7 +189,7 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
190 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id)); 189 cbread = host1x_sync_readl(host, HOST1X_SYNC_CBREAD(ch->id));
191 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id)); 190 cbstat = host1x_sync_readl(host, HOST1X_SYNC_CBSTAT(ch->id));
192 191
193 host1x_debug_output(o, "%d-%s: ", ch->id, dev_name(ch->dev)); 192 host1x_debug_output(o, "%u-%s: ", ch->id, dev_name(ch->dev));
194 193
195 if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) || 194 if (HOST1X_CHANNEL_DMACTRL_DMASTOP_V(dmactrl) ||
196 !ch->cdma.push_buffer.mapped) { 195 !ch->cdma.push_buffer.mapped) {
@@ -200,14 +199,13 @@ static void host1x_debug_show_channel_cdma(struct host1x *host,
200 199
201 if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X && 200 if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == HOST1X_CLASS_HOST1X &&
202 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == 201 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
203 HOST1X_UCLASS_WAIT_SYNCPT) 202 HOST1X_UCLASS_WAIT_SYNCPT)
204 host1x_debug_output(o, "waiting on syncpt %d val %d\n", 203 host1x_debug_output(o, "waiting on syncpt %d val %d\n",
205 cbread >> 24, cbread & 0xffffff); 204 cbread >> 24, cbread & 0xffffff);
206 else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) == 205 else if (HOST1X_SYNC_CBSTAT_CBCLASS_V(cbstat) ==
207 HOST1X_CLASS_HOST1X && 206 HOST1X_CLASS_HOST1X &&
208 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) == 207 HOST1X_SYNC_CBSTAT_CBOFFSET_V(cbstat) ==
209 HOST1X_UCLASS_WAIT_SYNCPT_BASE) { 208 HOST1X_UCLASS_WAIT_SYNCPT_BASE) {
210
211 base = (cbread >> 16) & 0xff; 209 base = (cbread >> 16) & 0xff;
212 baseval = 210 baseval =
213 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base)); 211 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(base));
@@ -236,7 +234,7 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
236 u32 val, rd_ptr, wr_ptr, start, end; 234 u32 val, rd_ptr, wr_ptr, start, end;
237 unsigned int data_count = 0; 235 unsigned int data_count = 0;
238 236
239 host1x_debug_output(o, "%d: fifo:\n", ch->id); 237 host1x_debug_output(o, "%u: fifo:\n", ch->id);
240 238
241 val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT); 239 val = host1x_ch_readl(ch, HOST1X_CHANNEL_FIFOSTAT);
242 host1x_debug_output(o, "FIFOSTAT %08x\n", val); 240 host1x_debug_output(o, "FIFOSTAT %08x\n", val);
@@ -290,20 +288,22 @@ static void host1x_debug_show_channel_fifo(struct host1x *host,
290 288
291static void host1x_debug_show_mlocks(struct host1x *host, struct output *o) 289static void host1x_debug_show_mlocks(struct host1x *host, struct output *o)
292{ 290{
293 int i; 291 unsigned int i;
294 292
295 host1x_debug_output(o, "---- mlocks ----\n"); 293 host1x_debug_output(o, "---- mlocks ----\n");
294
296 for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) { 295 for (i = 0; i < host1x_syncpt_nb_mlocks(host); i++) {
297 u32 owner = 296 u32 owner =
298 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i)); 297 host1x_sync_readl(host, HOST1X_SYNC_MLOCK_OWNER(i));
299 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner)) 298 if (HOST1X_SYNC_MLOCK_OWNER_CH_OWNS_V(owner))
300 host1x_debug_output(o, "%d: locked by channel %d\n", 299 host1x_debug_output(o, "%u: locked by channel %u\n",
301 i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner)); 300 i, HOST1X_SYNC_MLOCK_OWNER_CHID_V(owner));
302 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner)) 301 else if (HOST1X_SYNC_MLOCK_OWNER_CPU_OWNS_V(owner))
303 host1x_debug_output(o, "%d: locked by cpu\n", i); 302 host1x_debug_output(o, "%u: locked by cpu\n", i);
304 else 303 else
305 host1x_debug_output(o, "%d: unlocked\n", i); 304 host1x_debug_output(o, "%u: unlocked\n", i);
306 } 305 }
306
307 host1x_debug_output(o, "\n"); 307 host1x_debug_output(o, "\n");
308} 308}
309 309
diff --git a/drivers/gpu/host1x/hw/intr_hw.c b/drivers/gpu/host1x/hw/intr_hw.c
index e1e31e9e67cd..dacb8009a605 100644
--- a/drivers/gpu/host1x/hw/intr_hw.c
+++ b/drivers/gpu/host1x/hw/intr_hw.c
@@ -38,14 +38,14 @@ static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt)
38 host1x_sync_writel(host, BIT_MASK(id), 38 host1x_sync_writel(host, BIT_MASK(id),
39 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id))); 39 HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id)));
40 40
41 queue_work(host->intr_wq, &syncpt->intr.work); 41 schedule_work(&syncpt->intr.work);
42} 42}
43 43
44static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) 44static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
45{ 45{
46 struct host1x *host = dev_id; 46 struct host1x *host = dev_id;
47 unsigned long reg; 47 unsigned long reg;
48 int i, id; 48 unsigned int i, id;
49 49
50 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) { 50 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); i++) {
51 reg = host1x_sync_readl(host, 51 reg = host1x_sync_readl(host,
@@ -62,7 +62,7 @@ static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
62 62
63static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) 63static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
64{ 64{
65 u32 i; 65 unsigned int i;
66 66
67 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) { 67 for (i = 0; i < DIV_ROUND_UP(host->info->nb_pts, 32); ++i) {
68 host1x_sync_writel(host, 0xffffffffu, 68 host1x_sync_writel(host, 0xffffffffu,
@@ -72,10 +72,12 @@ static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host)
72 } 72 }
73} 73}
74 74
75static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, 75static int
76 void (*syncpt_thresh_work)(struct work_struct *)) 76_host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
77 void (*syncpt_thresh_work)(struct work_struct *))
77{ 78{
78 int i, err; 79 unsigned int i;
80 int err;
79 81
80 host1x_hw_intr_disable_all_syncpt_intrs(host); 82 host1x_hw_intr_disable_all_syncpt_intrs(host);
81 83
@@ -106,18 +108,21 @@ static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm,
106} 108}
107 109
108static void _host1x_intr_set_syncpt_threshold(struct host1x *host, 110static void _host1x_intr_set_syncpt_threshold(struct host1x *host,
109 u32 id, u32 thresh) 111 unsigned int id,
112 u32 thresh)
110{ 113{
111 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id)); 114 host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id));
112} 115}
113 116
114static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id) 117static void _host1x_intr_enable_syncpt_intr(struct host1x *host,
118 unsigned int id)
115{ 119{
116 host1x_sync_writel(host, BIT_MASK(id), 120 host1x_sync_writel(host, BIT_MASK(id),
117 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id))); 121 HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id)));
118} 122}
119 123
120static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id) 124static void _host1x_intr_disable_syncpt_intr(struct host1x *host,
125 unsigned int id)
121{ 126{
122 host1x_sync_writel(host, BIT_MASK(id), 127 host1x_sync_writel(host, BIT_MASK(id),
123 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id))); 128 HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id)));
@@ -127,8 +132,13 @@ static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id)
127 132
128static int _host1x_free_syncpt_irq(struct host1x *host) 133static int _host1x_free_syncpt_irq(struct host1x *host)
129{ 134{
135 unsigned int i;
136
130 devm_free_irq(host->dev, host->intr_syncpt_irq, host); 137 devm_free_irq(host->dev, host->intr_syncpt_irq, host);
131 flush_workqueue(host->intr_wq); 138
139 for (i = 0; i < host->info->nb_pts; i++)
140 cancel_work_sync(&host->syncpt[i].intr.work);
141
132 return 0; 142 return 0;
133} 143}
134 144
diff --git a/drivers/gpu/host1x/hw/syncpt_hw.c b/drivers/gpu/host1x/hw/syncpt_hw.c
index 56e85395ac24..c93f74fcce72 100644
--- a/drivers/gpu/host1x/hw/syncpt_hw.c
+++ b/drivers/gpu/host1x/hw/syncpt_hw.c
@@ -26,8 +26,9 @@
26 */ 26 */
27static void syncpt_restore(struct host1x_syncpt *sp) 27static void syncpt_restore(struct host1x_syncpt *sp)
28{ 28{
29 u32 min = host1x_syncpt_read_min(sp);
29 struct host1x *host = sp->host; 30 struct host1x *host = sp->host;
30 int min = host1x_syncpt_read_min(sp); 31
31 host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id)); 32 host1x_sync_writel(host, min, HOST1X_SYNC_SYNCPT(sp->id));
32} 33}
33 34
@@ -37,6 +38,7 @@ static void syncpt_restore(struct host1x_syncpt *sp)
37static void syncpt_restore_wait_base(struct host1x_syncpt *sp) 38static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
38{ 39{
39 struct host1x *host = sp->host; 40 struct host1x *host = sp->host;
41
40 host1x_sync_writel(host, sp->base_val, 42 host1x_sync_writel(host, sp->base_val,
41 HOST1X_SYNC_SYNCPT_BASE(sp->id)); 43 HOST1X_SYNC_SYNCPT_BASE(sp->id));
42} 44}
@@ -47,6 +49,7 @@ static void syncpt_restore_wait_base(struct host1x_syncpt *sp)
47static void syncpt_read_wait_base(struct host1x_syncpt *sp) 49static void syncpt_read_wait_base(struct host1x_syncpt *sp)
48{ 50{
49 struct host1x *host = sp->host; 51 struct host1x *host = sp->host;
52
50 sp->base_val = 53 sp->base_val =
51 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id)); 54 host1x_sync_readl(host, HOST1X_SYNC_SYNCPT_BASE(sp->id));
52} 55}
@@ -85,6 +88,7 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
85 if (!host1x_syncpt_client_managed(sp) && 88 if (!host1x_syncpt_client_managed(sp) &&
86 host1x_syncpt_idle(sp)) 89 host1x_syncpt_idle(sp))
87 return -EINVAL; 90 return -EINVAL;
91
88 host1x_sync_writel(host, BIT_MASK(sp->id), 92 host1x_sync_writel(host, BIT_MASK(sp->id),
89 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset)); 93 HOST1X_SYNC_SYNCPT_CPU_INCR(reg_offset));
90 wmb(); 94 wmb();
@@ -95,10 +99,10 @@ static int syncpt_cpu_incr(struct host1x_syncpt *sp)
95/* remove a wait pointed to by patch_addr */ 99/* remove a wait pointed to by patch_addr */
96static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr) 100static int syncpt_patch_wait(struct host1x_syncpt *sp, void *patch_addr)
97{ 101{
98 u32 override = host1x_class_host_wait_syncpt( 102 u32 override = host1x_class_host_wait_syncpt(HOST1X_SYNCPT_RESERVED, 0);
99 HOST1X_SYNCPT_RESERVED, 0);
100 103
101 *((u32 *)patch_addr) = override; 104 *((u32 *)patch_addr) = override;
105
102 return 0; 106 return 0;
103} 107}
104 108
diff --git a/drivers/gpu/host1x/intr.c b/drivers/gpu/host1x/intr.c
index 2491bf82e30c..8b4fad0ab35d 100644
--- a/drivers/gpu/host1x/intr.c
+++ b/drivers/gpu/host1x/intr.c
@@ -122,18 +122,20 @@ static void action_submit_complete(struct host1x_waitlist *waiter)
122static void action_wakeup(struct host1x_waitlist *waiter) 122static void action_wakeup(struct host1x_waitlist *waiter)
123{ 123{
124 wait_queue_head_t *wq = waiter->data; 124 wait_queue_head_t *wq = waiter->data;
125
125 wake_up(wq); 126 wake_up(wq);
126} 127}
127 128
128static void action_wakeup_interruptible(struct host1x_waitlist *waiter) 129static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
129{ 130{
130 wait_queue_head_t *wq = waiter->data; 131 wait_queue_head_t *wq = waiter->data;
132
131 wake_up_interruptible(wq); 133 wake_up_interruptible(wq);
132} 134}
133 135
134typedef void (*action_handler)(struct host1x_waitlist *waiter); 136typedef void (*action_handler)(struct host1x_waitlist *waiter);
135 137
136static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = { 138static const action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
137 action_submit_complete, 139 action_submit_complete,
138 action_wakeup, 140 action_wakeup,
139 action_wakeup_interruptible, 141 action_wakeup_interruptible,
@@ -209,7 +211,7 @@ static void syncpt_thresh_work(struct work_struct *work)
209 host1x_syncpt_load(host->syncpt + id)); 211 host1x_syncpt_load(host->syncpt + id));
210} 212}
211 213
212int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, 214int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
213 enum host1x_intr_action action, void *data, 215 enum host1x_intr_action action, void *data,
214 struct host1x_waitlist *waiter, void **ref) 216 struct host1x_waitlist *waiter, void **ref)
215{ 217{
@@ -254,7 +256,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
254 return 0; 256 return 0;
255} 257}
256 258
257void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref) 259void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref)
258{ 260{
259 struct host1x_waitlist *waiter = ref; 261 struct host1x_waitlist *waiter = ref;
260 struct host1x_syncpt *syncpt; 262 struct host1x_syncpt *syncpt;
@@ -277,9 +279,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
277 279
278 mutex_init(&host->intr_mutex); 280 mutex_init(&host->intr_mutex);
279 host->intr_syncpt_irq = irq_sync; 281 host->intr_syncpt_irq = irq_sync;
280 host->intr_wq = create_workqueue("host_syncpt");
281 if (!host->intr_wq)
282 return -ENOMEM;
283 282
284 for (id = 0; id < nb_pts; ++id) { 283 for (id = 0; id < nb_pts; ++id) {
285 struct host1x_syncpt *syncpt = host->syncpt + id; 284 struct host1x_syncpt *syncpt = host->syncpt + id;
@@ -288,7 +287,7 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
288 INIT_LIST_HEAD(&syncpt->intr.wait_head); 287 INIT_LIST_HEAD(&syncpt->intr.wait_head);
289 snprintf(syncpt->intr.thresh_irq_name, 288 snprintf(syncpt->intr.thresh_irq_name,
290 sizeof(syncpt->intr.thresh_irq_name), 289 sizeof(syncpt->intr.thresh_irq_name),
291 "host1x_sp_%02d", id); 290 "host1x_sp_%02u", id);
292 } 291 }
293 292
294 host1x_intr_start(host); 293 host1x_intr_start(host);
@@ -299,7 +298,6 @@ int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
299void host1x_intr_deinit(struct host1x *host) 298void host1x_intr_deinit(struct host1x *host)
300{ 299{
301 host1x_intr_stop(host); 300 host1x_intr_stop(host);
302 destroy_workqueue(host->intr_wq);
303} 301}
304 302
305void host1x_intr_start(struct host1x *host) 303void host1x_intr_start(struct host1x *host)
@@ -342,7 +340,7 @@ void host1x_intr_stop(struct host1x *host)
342 if (!list_empty(&syncpt[id].intr.wait_head)) { 340 if (!list_empty(&syncpt[id].intr.wait_head)) {
343 /* output diagnostics */ 341 /* output diagnostics */
344 mutex_unlock(&host->intr_mutex); 342 mutex_unlock(&host->intr_mutex);
345 pr_warn("%s cannot stop syncpt intr id=%d\n", 343 pr_warn("%s cannot stop syncpt intr id=%u\n",
346 __func__, id); 344 __func__, id);
347 return; 345 return;
348 } 346 }
diff --git a/drivers/gpu/host1x/intr.h b/drivers/gpu/host1x/intr.h
index 2b8adf016a05..1370c2bb75b8 100644
--- a/drivers/gpu/host1x/intr.h
+++ b/drivers/gpu/host1x/intr.h
@@ -75,7 +75,7 @@ struct host1x_waitlist {
75 * 75 *
76 * This is a non-blocking api. 76 * This is a non-blocking api.
77 */ 77 */
78int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, 78int host1x_intr_add_action(struct host1x *host, unsigned int id, u32 thresh,
79 enum host1x_intr_action action, void *data, 79 enum host1x_intr_action action, void *data,
80 struct host1x_waitlist *waiter, void **ref); 80 struct host1x_waitlist *waiter, void **ref);
81 81
@@ -84,7 +84,7 @@ int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
84 * You must call this if you passed non-NULL as ref. 84 * You must call this if you passed non-NULL as ref.
85 * @ref the ref returned from host1x_intr_add_action() 85 * @ref the ref returned from host1x_intr_add_action()
86 */ 86 */
87void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref); 87void host1x_intr_put_ref(struct host1x *host, unsigned int id, void *ref);
88 88
89/* Initialize host1x sync point interrupt */ 89/* Initialize host1x sync point interrupt */
90int host1x_intr_init(struct host1x *host, unsigned int irq_sync); 90int host1x_intr_init(struct host1x *host, unsigned int irq_sync);
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index b4515d544039..a91b7c4a6110 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -161,7 +161,7 @@ static int do_waitchks(struct host1x_job *job, struct host1x *host,
161 161
162 if (host1x_syncpt_is_expired(sp, wait->thresh)) { 162 if (host1x_syncpt_is_expired(sp, wait->thresh)) {
163 dev_dbg(host->dev, 163 dev_dbg(host->dev,
164 "drop WAIT id %d (%s) thresh 0x%x, min 0x%x\n", 164 "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
165 wait->syncpt_id, sp->name, wait->thresh, 165 wait->syncpt_id, sp->name, wait->thresh,
166 host1x_syncpt_read_min(sp)); 166 host1x_syncpt_read_min(sp));
167 167
@@ -464,6 +464,7 @@ static inline int copy_gathers(struct host1x_job *job, struct device *dev)
464 464
465 for (i = 0; i < job->num_gathers; i++) { 465 for (i = 0; i < job->num_gathers; i++) {
466 struct host1x_job_gather *g = &job->gathers[i]; 466 struct host1x_job_gather *g = &job->gathers[i];
467
467 size += g->words * sizeof(u32); 468 size += g->words * sizeof(u32);
468 } 469 }
469 470
@@ -514,6 +515,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
514 bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host)); 515 bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
515 for (i = 0; i < job->num_waitchk; i++) { 516 for (i = 0; i < job->num_waitchk; i++) {
516 u32 syncpt_id = job->waitchk[i].syncpt_id; 517 u32 syncpt_id = job->waitchk[i].syncpt_id;
518
517 if (syncpt_id < host1x_syncpt_nb_pts(host)) 519 if (syncpt_id < host1x_syncpt_nb_pts(host))
518 set_bit(syncpt_id, waitchk_mask); 520 set_bit(syncpt_id, waitchk_mask);
519 } 521 }
@@ -571,14 +573,16 @@ void host1x_job_unpin(struct host1x_job *job)
571 573
572 for (i = 0; i < job->num_unpins; i++) { 574 for (i = 0; i < job->num_unpins; i++) {
573 struct host1x_job_unpin_data *unpin = &job->unpins[i]; 575 struct host1x_job_unpin_data *unpin = &job->unpins[i];
576
574 host1x_bo_unpin(unpin->bo, unpin->sgt); 577 host1x_bo_unpin(unpin->bo, unpin->sgt);
575 host1x_bo_put(unpin->bo); 578 host1x_bo_put(unpin->bo);
576 } 579 }
580
577 job->num_unpins = 0; 581 job->num_unpins = 0;
578 582
579 if (job->gather_copy_size) 583 if (job->gather_copy_size)
580 dma_free_wc(job->channel->dev, job->gather_copy_size, 584 dma_free_wc(job->channel->dev, job->gather_copy_size,
581 job->gather_copy_mapped, job->gather_copy); 585 job->gather_copy_mapped, job->gather_copy);
582} 586}
583EXPORT_SYMBOL(host1x_job_unpin); 587EXPORT_SYMBOL(host1x_job_unpin);
584 588
diff --git a/drivers/gpu/host1x/syncpt.c b/drivers/gpu/host1x/syncpt.c
index 6b7fdc1e2ed0..95589328ad52 100644
--- a/drivers/gpu/host1x/syncpt.c
+++ b/drivers/gpu/host1x/syncpt.c
@@ -73,7 +73,7 @@ static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
73 return NULL; 73 return NULL;
74 } 74 }
75 75
76 name = kasprintf(GFP_KERNEL, "%02d-%s", sp->id, 76 name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
77 dev ? dev_name(dev) : NULL); 77 dev ? dev_name(dev) : NULL);
78 if (!name) 78 if (!name)
79 return NULL; 79 return NULL;
@@ -110,12 +110,14 @@ EXPORT_SYMBOL(host1x_syncpt_incr_max);
110void host1x_syncpt_restore(struct host1x *host) 110void host1x_syncpt_restore(struct host1x *host)
111{ 111{
112 struct host1x_syncpt *sp_base = host->syncpt; 112 struct host1x_syncpt *sp_base = host->syncpt;
113 u32 i; 113 unsigned int i;
114 114
115 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) 115 for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
116 host1x_hw_syncpt_restore(host, sp_base + i); 116 host1x_hw_syncpt_restore(host, sp_base + i);
117
117 for (i = 0; i < host1x_syncpt_nb_bases(host); i++) 118 for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
118 host1x_hw_syncpt_restore_wait_base(host, sp_base + i); 119 host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
120
119 wmb(); 121 wmb();
120} 122}
121 123
@@ -126,7 +128,7 @@ void host1x_syncpt_restore(struct host1x *host)
126void host1x_syncpt_save(struct host1x *host) 128void host1x_syncpt_save(struct host1x *host)
127{ 129{
128 struct host1x_syncpt *sp_base = host->syncpt; 130 struct host1x_syncpt *sp_base = host->syncpt;
129 u32 i; 131 unsigned int i;
130 132
131 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) { 133 for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
132 if (host1x_syncpt_client_managed(sp_base + i)) 134 if (host1x_syncpt_client_managed(sp_base + i))
@@ -146,6 +148,7 @@ void host1x_syncpt_save(struct host1x *host)
146u32 host1x_syncpt_load(struct host1x_syncpt *sp) 148u32 host1x_syncpt_load(struct host1x_syncpt *sp)
147{ 149{
148 u32 val; 150 u32 val;
151
149 val = host1x_hw_syncpt_load(sp->host, sp); 152 val = host1x_hw_syncpt_load(sp->host, sp);
150 trace_host1x_syncpt_load_min(sp->id, val); 153 trace_host1x_syncpt_load_min(sp->id, val);
151 154
@@ -157,10 +160,9 @@ u32 host1x_syncpt_load(struct host1x_syncpt *sp)
157 */ 160 */
158u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp) 161u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
159{ 162{
160 u32 val;
161 host1x_hw_syncpt_load_wait_base(sp->host, sp); 163 host1x_hw_syncpt_load_wait_base(sp->host, sp);
162 val = sp->base_val; 164
163 return val; 165 return sp->base_val;
164} 166}
165 167
166/* 168/*
@@ -179,6 +181,7 @@ EXPORT_SYMBOL(host1x_syncpt_incr);
179static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh) 181static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
180{ 182{
181 host1x_hw_syncpt_load(sp->host, sp); 183 host1x_hw_syncpt_load(sp->host, sp);
184
182 return host1x_syncpt_is_expired(sp, thresh); 185 return host1x_syncpt_is_expired(sp, thresh);
183} 186}
184 187
@@ -186,7 +189,7 @@ static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
186 * Main entrypoint for syncpoint value waits. 189 * Main entrypoint for syncpoint value waits.
187 */ 190 */
188int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, 191int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
189 u32 *value) 192 u32 *value)
190{ 193{
191 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 194 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
192 void *ref; 195 void *ref;
@@ -201,6 +204,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
201 if (host1x_syncpt_is_expired(sp, thresh)) { 204 if (host1x_syncpt_is_expired(sp, thresh)) {
202 if (value) 205 if (value)
203 *value = host1x_syncpt_load(sp); 206 *value = host1x_syncpt_load(sp);
207
204 return 0; 208 return 0;
205 } 209 }
206 210
@@ -209,6 +213,7 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
209 if (host1x_syncpt_is_expired(sp, thresh)) { 213 if (host1x_syncpt_is_expired(sp, thresh)) {
210 if (value) 214 if (value)
211 *value = val; 215 *value = val;
216
212 goto done; 217 goto done;
213 } 218 }
214 219
@@ -239,32 +244,42 @@ int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
239 /* wait for the syncpoint, or timeout, or signal */ 244 /* wait for the syncpoint, or timeout, or signal */
240 while (timeout) { 245 while (timeout) {
241 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout); 246 long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
242 int remain = wait_event_interruptible_timeout(wq, 247 int remain;
248
249 remain = wait_event_interruptible_timeout(wq,
243 syncpt_load_min_is_expired(sp, thresh), 250 syncpt_load_min_is_expired(sp, thresh),
244 check); 251 check);
245 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) { 252 if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
246 if (value) 253 if (value)
247 *value = host1x_syncpt_load(sp); 254 *value = host1x_syncpt_load(sp);
255
248 err = 0; 256 err = 0;
257
249 break; 258 break;
250 } 259 }
260
251 if (remain < 0) { 261 if (remain < 0) {
252 err = remain; 262 err = remain;
253 break; 263 break;
254 } 264 }
265
255 timeout -= check; 266 timeout -= check;
267
256 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { 268 if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
257 dev_warn(sp->host->dev, 269 dev_warn(sp->host->dev,
258 "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n", 270 "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
259 current->comm, sp->id, sp->name, 271 current->comm, sp->id, sp->name,
260 thresh, timeout); 272 thresh, timeout);
261 273
262 host1x_debug_dump_syncpts(sp->host); 274 host1x_debug_dump_syncpts(sp->host);
275
263 if (check_count == MAX_STUCK_CHECK_COUNT) 276 if (check_count == MAX_STUCK_CHECK_COUNT)
264 host1x_debug_dump(sp->host); 277 host1x_debug_dump(sp->host);
278
265 check_count++; 279 check_count++;
266 } 280 }
267 } 281 }
282
268 host1x_intr_put_ref(sp->host, sp->id, ref); 283 host1x_intr_put_ref(sp->host, sp->id, ref);
269 284
270done: 285done:
@@ -279,7 +294,9 @@ bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
279{ 294{
280 u32 current_val; 295 u32 current_val;
281 u32 future_val; 296 u32 future_val;
297
282 smp_rmb(); 298 smp_rmb();
299
283 current_val = (u32)atomic_read(&sp->min_val); 300 current_val = (u32)atomic_read(&sp->min_val);
284 future_val = (u32)atomic_read(&sp->max_val); 301 future_val = (u32)atomic_read(&sp->max_val);
285 302
@@ -341,14 +358,14 @@ int host1x_syncpt_init(struct host1x *host)
341{ 358{
342 struct host1x_syncpt_base *bases; 359 struct host1x_syncpt_base *bases;
343 struct host1x_syncpt *syncpt; 360 struct host1x_syncpt *syncpt;
344 int i; 361 unsigned int i;
345 362
346 syncpt = devm_kzalloc(host->dev, sizeof(*syncpt) * host->info->nb_pts, 363 syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
347 GFP_KERNEL); 364 GFP_KERNEL);
348 if (!syncpt) 365 if (!syncpt)
349 return -ENOMEM; 366 return -ENOMEM;
350 367
351 bases = devm_kzalloc(host->dev, sizeof(*bases) * host->info->nb_bases, 368 bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
352 GFP_KERNEL); 369 GFP_KERNEL);
353 if (!bases) 370 if (!bases)
354 return -ENOMEM; 371 return -ENOMEM;
@@ -378,6 +395,7 @@ struct host1x_syncpt *host1x_syncpt_request(struct device *dev,
378 unsigned long flags) 395 unsigned long flags)
379{ 396{
380 struct host1x *host = dev_get_drvdata(dev->parent); 397 struct host1x *host = dev_get_drvdata(dev->parent);
398
381 return host1x_syncpt_alloc(host, dev, flags); 399 return host1x_syncpt_alloc(host, dev, flags);
382} 400}
383EXPORT_SYMBOL(host1x_syncpt_request); 401EXPORT_SYMBOL(host1x_syncpt_request);
@@ -398,8 +416,9 @@ EXPORT_SYMBOL(host1x_syncpt_free);
398 416
399void host1x_syncpt_deinit(struct host1x *host) 417void host1x_syncpt_deinit(struct host1x *host)
400{ 418{
401 int i;
402 struct host1x_syncpt *sp = host->syncpt; 419 struct host1x_syncpt *sp = host->syncpt;
420 unsigned int i;
421
403 for (i = 0; i < host->info->nb_pts; i++, sp++) 422 for (i = 0; i < host->info->nb_pts; i++, sp++)
404 kfree(sp->name); 423 kfree(sp->name);
405} 424}
@@ -407,10 +426,11 @@ void host1x_syncpt_deinit(struct host1x *host)
407/* 426/*
408 * Read max. It indicates how many operations there are in queue, either in 427 * Read max. It indicates how many operations there are in queue, either in
409 * channel or in a software thread. 428 * channel or in a software thread.
410 * */ 429 */
411u32 host1x_syncpt_read_max(struct host1x_syncpt *sp) 430u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
412{ 431{
413 smp_rmb(); 432 smp_rmb();
433
414 return (u32)atomic_read(&sp->max_val); 434 return (u32)atomic_read(&sp->max_val);
415} 435}
416EXPORT_SYMBOL(host1x_syncpt_read_max); 436EXPORT_SYMBOL(host1x_syncpt_read_max);
@@ -421,6 +441,7 @@ EXPORT_SYMBOL(host1x_syncpt_read_max);
421u32 host1x_syncpt_read_min(struct host1x_syncpt *sp) 441u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
422{ 442{
423 smp_rmb(); 443 smp_rmb();
444
424 return (u32)atomic_read(&sp->min_val); 445 return (u32)atomic_read(&sp->min_val);
425} 446}
426EXPORT_SYMBOL(host1x_syncpt_read_min); 447EXPORT_SYMBOL(host1x_syncpt_read_min);
@@ -431,25 +452,26 @@ u32 host1x_syncpt_read(struct host1x_syncpt *sp)
431} 452}
432EXPORT_SYMBOL(host1x_syncpt_read); 453EXPORT_SYMBOL(host1x_syncpt_read);
433 454
434int host1x_syncpt_nb_pts(struct host1x *host) 455unsigned int host1x_syncpt_nb_pts(struct host1x *host)
435{ 456{
436 return host->info->nb_pts; 457 return host->info->nb_pts;
437} 458}
438 459
439int host1x_syncpt_nb_bases(struct host1x *host) 460unsigned int host1x_syncpt_nb_bases(struct host1x *host)
440{ 461{
441 return host->info->nb_bases; 462 return host->info->nb_bases;
442} 463}
443 464
444int host1x_syncpt_nb_mlocks(struct host1x *host) 465unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
445{ 466{
446 return host->info->nb_mlocks; 467 return host->info->nb_mlocks;
447} 468}
448 469
449struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id) 470struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
450{ 471{
451 if (host->info->nb_pts < id) 472 if (host->info->nb_pts < id)
452 return NULL; 473 return NULL;
474
453 return host->syncpt + id; 475 return host->syncpt + id;
454} 476}
455EXPORT_SYMBOL(host1x_syncpt_get); 477EXPORT_SYMBOL(host1x_syncpt_get);
diff --git a/drivers/gpu/host1x/syncpt.h b/drivers/gpu/host1x/syncpt.h
index 9056465ecd3f..f719205105ac 100644
--- a/drivers/gpu/host1x/syncpt.h
+++ b/drivers/gpu/host1x/syncpt.h
@@ -37,7 +37,7 @@ struct host1x_syncpt_base {
37}; 37};
38 38
39struct host1x_syncpt { 39struct host1x_syncpt {
40 int id; 40 unsigned int id;
41 atomic_t min_val; 41 atomic_t min_val;
42 atomic_t max_val; 42 atomic_t max_val;
43 u32 base_val; 43 u32 base_val;
@@ -58,13 +58,13 @@ int host1x_syncpt_init(struct host1x *host);
58void host1x_syncpt_deinit(struct host1x *host); 58void host1x_syncpt_deinit(struct host1x *host);
59 59
60/* Return number of sync point supported. */ 60/* Return number of sync point supported. */
61int host1x_syncpt_nb_pts(struct host1x *host); 61unsigned int host1x_syncpt_nb_pts(struct host1x *host);
62 62
63/* Return number of wait bases supported. */ 63/* Return number of wait bases supported. */
64int host1x_syncpt_nb_bases(struct host1x *host); 64unsigned int host1x_syncpt_nb_bases(struct host1x *host);
65 65
66/* Return number of mlocks supported. */ 66/* Return number of mlocks supported. */
67int host1x_syncpt_nb_mlocks(struct host1x *host); 67unsigned int host1x_syncpt_nb_mlocks(struct host1x *host);
68 68
69/* 69/*
70 * Check sync point sanity. If max is larger than min, there have too many 70 * Check sync point sanity. If max is larger than min, there have too many
diff --git a/drivers/gpu/ipu-v3/ipu-dc.c b/drivers/gpu/ipu-v3/ipu-dc.c
index 2f29780e7c68..659475c1e44a 100644
--- a/drivers/gpu/ipu-v3/ipu-dc.c
+++ b/drivers/gpu/ipu-v3/ipu-dc.c
@@ -150,6 +150,9 @@ static void dc_write_tmpl(struct ipu_dc *dc, int word, u32 opcode, u32 operand,
150static int ipu_bus_format_to_map(u32 fmt) 150static int ipu_bus_format_to_map(u32 fmt)
151{ 151{
152 switch (fmt) { 152 switch (fmt) {
153 default:
154 WARN_ON(1);
155 /* fall-through */
153 case MEDIA_BUS_FMT_RGB888_1X24: 156 case MEDIA_BUS_FMT_RGB888_1X24:
154 return IPU_DC_MAP_RGB24; 157 return IPU_DC_MAP_RGB24;
155 case MEDIA_BUS_FMT_RGB565_1X16: 158 case MEDIA_BUS_FMT_RGB565_1X16:
@@ -162,8 +165,6 @@ static int ipu_bus_format_to_map(u32 fmt)
162 return IPU_DC_MAP_LVDS666; 165 return IPU_DC_MAP_LVDS666;
163 case MEDIA_BUS_FMT_BGR888_1X24: 166 case MEDIA_BUS_FMT_BGR888_1X24:
164 return IPU_DC_MAP_BGR24; 167 return IPU_DC_MAP_BGR24;
165 default:
166 return -EINVAL;
167 } 168 }
168} 169}
169 170
@@ -178,10 +179,6 @@ int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced,
178 dc->di = ipu_di_get_num(di); 179 dc->di = ipu_di_get_num(di);
179 180
180 map = ipu_bus_format_to_map(bus_format); 181 map = ipu_bus_format_to_map(bus_format);
181 if (map < 0) {
182 dev_dbg(priv->dev, "IPU_DISP: No MAP\n");
183 return map;
184 }
185 182
186 /* 183 /*
187 * In interlaced mode we need more counters to create the asymmetric 184 * In interlaced mode we need more counters to create the asymmetric
diff --git a/drivers/gpu/ipu-v3/ipu-di.c b/drivers/gpu/ipu-v3/ipu-di.c
index 359268e3a166..a8d87ddd8a17 100644
--- a/drivers/gpu/ipu-v3/ipu-di.c
+++ b/drivers/gpu/ipu-v3/ipu-di.c
@@ -572,9 +572,6 @@ int ipu_di_init_sync_panel(struct ipu_di *di, struct ipu_di_signal_cfg *sig)
572 dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n", 572 dev_dbg(di->ipu->dev, "disp %d: panel size = %d x %d\n",
573 di->id, sig->mode.hactive, sig->mode.vactive); 573 di->id, sig->mode.hactive, sig->mode.vactive);
574 574
575 if ((sig->mode.vsync_len == 0) || (sig->mode.hsync_len == 0))
576 return -EINVAL;
577
578 dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n", 575 dev_dbg(di->ipu->dev, "Clocks: IPU %luHz DI %luHz Needed %luHz\n",
579 clk_get_rate(di->clk_ipu), 576 clk_get_rate(di->clk_ipu),
580 clk_get_rate(di->clk_di), 577 clk_get_rate(di->clk_di),
diff --git a/drivers/gpu/ipu-v3/ipu-dmfc.c b/drivers/gpu/ipu-v3/ipu-dmfc.c
index 837b1ec22800..42705bb5aaa3 100644
--- a/drivers/gpu/ipu-v3/ipu-dmfc.c
+++ b/drivers/gpu/ipu-v3/ipu-dmfc.c
@@ -45,17 +45,6 @@
45#define DMFC_DP_CHAN_6B_24 16 45#define DMFC_DP_CHAN_6B_24 16
46#define DMFC_DP_CHAN_6F_29 24 46#define DMFC_DP_CHAN_6F_29 24
47 47
48#define DMFC_FIFO_SIZE_64 (3 << 3)
49#define DMFC_FIFO_SIZE_128 (2 << 3)
50#define DMFC_FIFO_SIZE_256 (1 << 3)
51#define DMFC_FIFO_SIZE_512 (0 << 3)
52
53#define DMFC_SEGMENT(x) ((x & 0x7) << 0)
54#define DMFC_BURSTSIZE_128 (0 << 6)
55#define DMFC_BURSTSIZE_64 (1 << 6)
56#define DMFC_BURSTSIZE_32 (2 << 6)
57#define DMFC_BURSTSIZE_16 (3 << 6)
58
59struct dmfc_channel_data { 48struct dmfc_channel_data {
60 int ipu_channel; 49 int ipu_channel;
61 unsigned long channel_reg; 50 unsigned long channel_reg;
@@ -104,9 +93,6 @@ struct ipu_dmfc_priv;
104 93
105struct dmfc_channel { 94struct dmfc_channel {
106 unsigned slots; 95 unsigned slots;
107 unsigned slotmask;
108 unsigned segment;
109 int burstsize;
110 struct ipu_soc *ipu; 96 struct ipu_soc *ipu;
111 struct ipu_dmfc_priv *priv; 97 struct ipu_dmfc_priv *priv;
112 const struct dmfc_channel_data *data; 98 const struct dmfc_channel_data *data;
@@ -117,7 +103,6 @@ struct ipu_dmfc_priv {
117 struct device *dev; 103 struct device *dev;
118 struct dmfc_channel channels[DMFC_NUM_CHANNELS]; 104 struct dmfc_channel channels[DMFC_NUM_CHANNELS];
119 struct mutex mutex; 105 struct mutex mutex;
120 unsigned long bandwidth_per_slot;
121 void __iomem *base; 106 void __iomem *base;
122 int use_count; 107 int use_count;
123}; 108};
@@ -172,184 +157,6 @@ void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc)
172} 157}
173EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel); 158EXPORT_SYMBOL_GPL(ipu_dmfc_disable_channel);
174 159
175static int ipu_dmfc_setup_channel(struct dmfc_channel *dmfc, int slots,
176 int segment, int burstsize)
177{
178 struct ipu_dmfc_priv *priv = dmfc->priv;
179 u32 val, field;
180
181 dev_dbg(priv->dev,
182 "dmfc: using %d slots starting from segment %d for IPU channel %d\n",
183 slots, segment, dmfc->data->ipu_channel);
184
185 switch (slots) {
186 case 1:
187 field = DMFC_FIFO_SIZE_64;
188 break;
189 case 2:
190 field = DMFC_FIFO_SIZE_128;
191 break;
192 case 4:
193 field = DMFC_FIFO_SIZE_256;
194 break;
195 case 8:
196 field = DMFC_FIFO_SIZE_512;
197 break;
198 default:
199 return -EINVAL;
200 }
201
202 switch (burstsize) {
203 case 16:
204 field |= DMFC_BURSTSIZE_16;
205 break;
206 case 32:
207 field |= DMFC_BURSTSIZE_32;
208 break;
209 case 64:
210 field |= DMFC_BURSTSIZE_64;
211 break;
212 case 128:
213 field |= DMFC_BURSTSIZE_128;
214 break;
215 }
216
217 field |= DMFC_SEGMENT(segment);
218
219 val = readl(priv->base + dmfc->data->channel_reg);
220
221 val &= ~(0xff << dmfc->data->shift);
222 val |= field << dmfc->data->shift;
223
224 writel(val, priv->base + dmfc->data->channel_reg);
225
226 dmfc->slots = slots;
227 dmfc->segment = segment;
228 dmfc->burstsize = burstsize;
229 dmfc->slotmask = ((1 << slots) - 1) << segment;
230
231 return 0;
232}
233
234static int dmfc_bandwidth_to_slots(struct ipu_dmfc_priv *priv,
235 unsigned long bandwidth)
236{
237 int slots = 1;
238
239 while (slots * priv->bandwidth_per_slot < bandwidth)
240 slots *= 2;
241
242 return slots;
243}
244
245static int dmfc_find_slots(struct ipu_dmfc_priv *priv, int slots)
246{
247 unsigned slotmask_need, slotmask_used = 0;
248 int i, segment = 0;
249
250 slotmask_need = (1 << slots) - 1;
251
252 for (i = 0; i < DMFC_NUM_CHANNELS; i++)
253 slotmask_used |= priv->channels[i].slotmask;
254
255 while (slotmask_need <= 0xff) {
256 if (!(slotmask_used & slotmask_need))
257 return segment;
258
259 slotmask_need <<= 1;
260 segment++;
261 }
262
263 return -EBUSY;
264}
265
266void ipu_dmfc_free_bandwidth(struct dmfc_channel *dmfc)
267{
268 struct ipu_dmfc_priv *priv = dmfc->priv;
269 int i;
270
271 dev_dbg(priv->dev, "dmfc: freeing %d slots starting from segment %d\n",
272 dmfc->slots, dmfc->segment);
273
274 mutex_lock(&priv->mutex);
275
276 if (!dmfc->slots)
277 goto out;
278
279 dmfc->slotmask = 0;
280 dmfc->slots = 0;
281 dmfc->segment = 0;
282
283 for (i = 0; i < DMFC_NUM_CHANNELS; i++)
284 priv->channels[i].slotmask = 0;
285
286 for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
287 if (priv->channels[i].slots > 0) {
288 priv->channels[i].segment =
289 dmfc_find_slots(priv, priv->channels[i].slots);
290 priv->channels[i].slotmask =
291 ((1 << priv->channels[i].slots) - 1) <<
292 priv->channels[i].segment;
293 }
294 }
295
296 for (i = 0; i < DMFC_NUM_CHANNELS; i++) {
297 if (priv->channels[i].slots > 0)
298 ipu_dmfc_setup_channel(&priv->channels[i],
299 priv->channels[i].slots,
300 priv->channels[i].segment,
301 priv->channels[i].burstsize);
302 }
303out:
304 mutex_unlock(&priv->mutex);
305}
306EXPORT_SYMBOL_GPL(ipu_dmfc_free_bandwidth);
307
308int ipu_dmfc_alloc_bandwidth(struct dmfc_channel *dmfc,
309 unsigned long bandwidth_pixel_per_second, int burstsize)
310{
311 struct ipu_dmfc_priv *priv = dmfc->priv;
312 int slots = dmfc_bandwidth_to_slots(priv, bandwidth_pixel_per_second);
313 int segment = -1, ret = 0;
314
315 dev_dbg(priv->dev, "dmfc: trying to allocate %ldMpixel/s for IPU channel %d\n",
316 bandwidth_pixel_per_second / 1000000,
317 dmfc->data->ipu_channel);
318
319 ipu_dmfc_free_bandwidth(dmfc);
320
321 mutex_lock(&priv->mutex);
322
323 if (slots > 8) {
324 ret = -EBUSY;
325 goto out;
326 }
327
328 /* For the MEM_BG channel, first try to allocate twice the slots */
329 if (dmfc->data->ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC)
330 segment = dmfc_find_slots(priv, slots * 2);
331 else if (slots < 2)
332 /* Always allocate at least 128*4 bytes (2 slots) */
333 slots = 2;
334
335 if (segment >= 0)
336 slots *= 2;
337 else
338 segment = dmfc_find_slots(priv, slots);
339 if (segment < 0) {
340 ret = -EBUSY;
341 goto out;
342 }
343
344 ipu_dmfc_setup_channel(dmfc, slots, segment, burstsize);
345
346out:
347 mutex_unlock(&priv->mutex);
348
349 return ret;
350}
351EXPORT_SYMBOL_GPL(ipu_dmfc_alloc_bandwidth);
352
353void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width) 160void ipu_dmfc_config_wait4eot(struct dmfc_channel *dmfc, int width)
354{ 161{
355 struct ipu_dmfc_priv *priv = dmfc->priv; 162 struct ipu_dmfc_priv *priv = dmfc->priv;
@@ -384,7 +191,6 @@ EXPORT_SYMBOL_GPL(ipu_dmfc_get);
384 191
385void ipu_dmfc_put(struct dmfc_channel *dmfc) 192void ipu_dmfc_put(struct dmfc_channel *dmfc)
386{ 193{
387 ipu_dmfc_free_bandwidth(dmfc);
388} 194}
389EXPORT_SYMBOL_GPL(ipu_dmfc_put); 195EXPORT_SYMBOL_GPL(ipu_dmfc_put);
390 196
@@ -412,20 +218,15 @@ int ipu_dmfc_init(struct ipu_soc *ipu, struct device *dev, unsigned long base,
412 priv->channels[i].priv = priv; 218 priv->channels[i].priv = priv;
413 priv->channels[i].ipu = ipu; 219 priv->channels[i].ipu = ipu;
414 priv->channels[i].data = &dmfcdata[i]; 220 priv->channels[i].data = &dmfcdata[i];
415 }
416
417 writel(0x0, priv->base + DMFC_WR_CHAN);
418 writel(0x0, priv->base + DMFC_DP_CHAN);
419 221
420 /* 222 if (dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_BG_SYNC ||
421 * We have a total bandwidth of clkrate * 4pixel divided 223 dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_FG_SYNC ||
422 * into 8 slots. 224 dmfcdata[i].ipu_channel == IPUV3_CHANNEL_MEM_DC_SYNC)
423 */ 225 priv->channels[i].slots = 2;
424 priv->bandwidth_per_slot = clk_get_rate(ipu_clk) * 4 / 8; 226 }
425
426 dev_dbg(dev, "dmfc: 8 slots with %ldMpixel/s bandwidth each\n",
427 priv->bandwidth_per_slot / 1000000);
428 227
228 writel(0x00000050, priv->base + DMFC_WR_CHAN);
229 writel(0x00005654, priv->base + DMFC_DP_CHAN);
429 writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF); 230 writel(0x202020f6, priv->base + DMFC_WR_CHAN_DEF);
430 writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF); 231 writel(0x2020f6f6, priv->base + DMFC_DP_CHAN_DEF);
431 writel(0x00000003, priv->base + DMFC_GENERAL1); 232 writel(0x00000003, priv->base + DMFC_GENERAL1);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index cbd7c986d926..5f962bfcb43c 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -30,6 +30,7 @@
30 30
31#define pr_fmt(fmt) "vga_switcheroo: " fmt 31#define pr_fmt(fmt) "vga_switcheroo: " fmt
32 32
33#include <linux/apple-gmux.h>
33#include <linux/console.h> 34#include <linux/console.h>
34#include <linux/debugfs.h> 35#include <linux/debugfs.h>
35#include <linux/fb.h> 36#include <linux/fb.h>
@@ -51,9 +52,9 @@
51 * 52 *
52 * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs. 53 * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs.
53 * * muxless: Dual GPUs but only one of them is connected to outputs. 54 * * muxless: Dual GPUs but only one of them is connected to outputs.
54 * The other one is merely used to offload rendering, its results 55 * The other one is merely used to offload rendering, its results
55 * are copied over PCIe into the framebuffer. On Linux this is 56 * are copied over PCIe into the framebuffer. On Linux this is
56 * supported with DRI PRIME. 57 * supported with DRI PRIME.
57 * 58 *
58 * Hybrid graphics started to appear in the late Naughties and were initially 59 * Hybrid graphics started to appear in the late Naughties and were initially
59 * all muxed. Newer laptops moved to a muxless architecture for cost reasons. 60 * all muxed. Newer laptops moved to a muxless architecture for cost reasons.
@@ -308,7 +309,8 @@ static int register_client(struct pci_dev *pdev,
308 * 309 *
309 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a 310 * Register vga client (GPU). Enable vga_switcheroo if another GPU and a
310 * handler have already registered. The power state of the client is assumed 311 * handler have already registered. The power state of the client is assumed
311 * to be ON. 312 * to be ON. Beforehand, vga_switcheroo_client_probe_defer() shall be called
313 * to ensure that all prerequisites are met.
312 * 314 *
313 * Return: 0 on success, -ENOMEM on memory allocation error. 315 * Return: 0 on success, -ENOMEM on memory allocation error.
314 */ 316 */
@@ -329,7 +331,8 @@ EXPORT_SYMBOL(vga_switcheroo_register_client);
329 * @id: client identifier 331 * @id: client identifier
330 * 332 *
331 * Register audio client (audio device on a GPU). The power state of the 333 * Register audio client (audio device on a GPU). The power state of the
332 * client is assumed to be ON. 334 * client is assumed to be ON. Beforehand, vga_switcheroo_client_probe_defer()
335 * shall be called to ensure that all prerequisites are met.
333 * 336 *
334 * Return: 0 on success, -ENOMEM on memory allocation error. 337 * Return: 0 on success, -ENOMEM on memory allocation error.
335 */ 338 */
@@ -376,6 +379,33 @@ find_active_client(struct list_head *head)
376} 379}
377 380
378/** 381/**
382 * vga_switcheroo_client_probe_defer() - whether to defer probing a given client
383 * @pdev: client pci device
384 *
385 * Determine whether any prerequisites are not fulfilled to probe a given
386 * client. Drivers shall invoke this early on in their ->probe callback
387 * and return %-EPROBE_DEFER if it evaluates to %true. Thou shalt not
388 * register the client ere thou hast called this.
389 *
390 * Return: %true if probing should be deferred, otherwise %false.
391 */
392bool vga_switcheroo_client_probe_defer(struct pci_dev *pdev)
393{
394 if ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
395 /*
396 * apple-gmux is needed on pre-retina MacBook Pro
397 * to probe the panel if pdev is the inactive GPU.
398 */
399 if (apple_gmux_present() && pdev != vga_default_device() &&
400 !vgasr_priv.handler_flags)
401 return true;
402 }
403
404 return false;
405}
406EXPORT_SYMBOL(vga_switcheroo_client_probe_defer);
407
408/**
379 * vga_switcheroo_get_client_state() - obtain power state of a given client 409 * vga_switcheroo_get_client_state() - obtain power state of a given client
380 * @pdev: client pci device 410 * @pdev: client pci device
381 * 411 *
@@ -530,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc);
530 * * OFF: Power off the device not in use. 560 * * OFF: Power off the device not in use.
531 * * ON: Power on the device not in use. 561 * * ON: Power on the device not in use.
532 * * IGD: Switch to the integrated graphics device. 562 * * IGD: Switch to the integrated graphics device.
533 * Power on the integrated GPU if necessary, power off the discrete GPU. 563 * Power on the integrated GPU if necessary, power off the discrete GPU.
534 * Prerequisite is that no user space processes (e.g. Xorg, alsactl) 564 * Prerequisite is that no user space processes (e.g. Xorg, alsactl)
535 * have opened device files of the GPUs or the audio client. If the 565 * have opened device files of the GPUs or the audio client. If the
536 * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/ 566 * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/
537 * and /dev/snd/controlC1 to identify processes blocking the switch. 567 * and /dev/snd/controlC1 to identify processes blocking the switch.
538 * * DIS: Switch to the discrete graphics device. 568 * * DIS: Switch to the discrete graphics device.
539 * * DIGD: Delayed switch to the integrated graphics device. 569 * * DIGD: Delayed switch to the integrated graphics device.
540 * This will perform the switch once the last user space process has 570 * This will perform the switch once the last user space process has
541 * closed the device files of the GPUs and the audio client. 571 * closed the device files of the GPUs and the audio client.
542 * * DDIS: Delayed switch to the discrete graphics device. 572 * * DDIS: Delayed switch to the discrete graphics device.
543 * * MIGD: Mux-only switch to the integrated graphics device. 573 * * MIGD: Mux-only switch to the integrated graphics device.
544 * Does not remap console or change the power state of either gpu. 574 * Does not remap console or change the power state of either gpu.
545 * If the integrated GPU is currently off, the screen will turn black. 575 * If the integrated GPU is currently off, the screen will turn black.
546 * If it is on, the screen will show whatever happens to be in VRAM. 576 * If it is on, the screen will show whatever happens to be in VRAM.
547 * Either way, the user has to blindly enter the command to switch back. 577 * Either way, the user has to blindly enter the command to switch back.
548 * * MDIS: Mux-only switch to the discrete graphics device. 578 * * MDIS: Mux-only switch to the discrete graphics device.
549 * 579 *
550 * For GPUs whose power state is controlled by the driver's runtime pm, 580 * For GPUs whose power state is controlled by the driver's runtime pm,
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 4afc999c0780..6b01e126fe73 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -45,7 +45,7 @@
45#include <media/v4l2-ioctl.h> 45#include <media/v4l2-ioctl.h>
46 46
47#include <video/omapvrfb.h> 47#include <video/omapvrfb.h>
48#include <video/omapdss.h> 48#include <video/omapfb_dss.h>
49 49
50#include "omap_voutlib.h" 50#include "omap_voutlib.h"
51#include "omap_voutdef.h" 51#include "omap_voutdef.h"
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 49de1475e473..80c79fabdf95 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -12,7 +12,7 @@
12#define OMAP_VOUTDEF_H 12#define OMAP_VOUTDEF_H
13 13
14#include <media/v4l2-ctrls.h> 14#include <media/v4l2-ctrls.h>
15#include <video/omapdss.h> 15#include <video/omapfb_dss.h>
16#include <video/omapvrfb.h> 16#include <video/omapvrfb.h>
17 17
18#define YUYV_BPP 2 18#define YUYV_BPP 2
diff --git a/drivers/media/platform/omap/omap_voutlib.c b/drivers/media/platform/omap/omap_voutlib.c
index 80b0d88f125c..58a25fdf0cce 100644
--- a/drivers/media/platform/omap/omap_voutlib.c
+++ b/drivers/media/platform/omap/omap_voutlib.c
@@ -26,7 +26,7 @@
26 26
27#include <linux/dma-mapping.h> 27#include <linux/dma-mapping.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "omap_voutlib.h" 31#include "omap_voutlib.h"
32 32
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index b3cc3ab63799..6fc156a3918d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -205,7 +205,9 @@ int mlx4_bf_alloc(struct mlx4_dev *dev, struct mlx4_bf *bf, int node)
205 goto free_uar; 205 goto free_uar;
206 } 206 }
207 207
208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping, uar->index << PAGE_SHIFT); 208 uar->bf_map = io_mapping_map_wc(priv->bf_mapping,
209 uar->index << PAGE_SHIFT,
210 PAGE_SIZE);
209 if (!uar->bf_map) { 211 if (!uar->bf_map) {
210 err = -ENOMEM; 212 err = -ENOMEM;
211 goto unamp_uar; 213 goto unamp_uar;
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 4034d2d4c507..a66be137324c 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -31,19 +31,21 @@
31/** 31/**
32 * DOC: Overview 32 * DOC: Overview
33 * 33 *
34 * :1: http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
35 * :2: http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
36 *
37 * gmux is a microcontroller built into the MacBook Pro to support dual GPUs: 34 * gmux is a microcontroller built into the MacBook Pro to support dual GPUs:
38 * A {1}[Lattice XP2] on pre-retinas, a {2}[Renesas R4F2113] on retinas. 35 * A `Lattice XP2`_ on pre-retinas, a `Renesas R4F2113`_ on retinas.
39 * 36 *
40 * (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has 37 * (The MacPro6,1 2013 also has a gmux, however it is unclear why since it has
41 * dual GPUs but no built-in display.) 38 * dual GPUs but no built-in display.)
42 * 39 *
43 * gmux is connected to the LPC bus of the southbridge. Its I/O ports are 40 * gmux is connected to the LPC bus of the southbridge. Its I/O ports are
44 * accessed differently depending on the microcontroller: Driver functions 41 * accessed differently depending on the microcontroller: Driver functions
45 * to access a pre-retina gmux are infixed `_pio_`, those for a retina gmux 42 * to access a pre-retina gmux are infixed ``_pio_``, those for a retina gmux
46 * are infixed `_index_`. 43 * are infixed ``_index_``.
44 *
45 * .. _Lattice XP2:
46 * http://www.latticesemi.com/en/Products/FPGAandCPLD/LatticeXP2.aspx
47 * .. _Renesas R4F2113:
48 * http://www.renesas.com/products/mpumcu/h8s/h8s2100/h8s2113/index.jsp
47 */ 49 */
48 50
49struct apple_gmux_data { 51struct apple_gmux_data {
@@ -272,15 +274,15 @@ static bool gmux_is_indexed(struct apple_gmux_data *gmux_data)
272/** 274/**
273 * DOC: Backlight control 275 * DOC: Backlight control
274 * 276 *
275 * :3: http://www.ti.com/lit/ds/symlink/lp8543.pdf
276 * :4: http://www.ti.com/lit/ds/symlink/lp8545.pdf
277 *
278 * On single GPU MacBooks, the PWM signal for the backlight is generated by 277 * On single GPU MacBooks, the PWM signal for the backlight is generated by
279 * the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended 278 * the GPU. On dual GPU MacBook Pros by contrast, either GPU may be suspended
280 * to conserve energy. Hence the PWM signal needs to be generated by a separate 279 * to conserve energy. Hence the PWM signal needs to be generated by a separate
281 * backlight driver which is controlled by gmux. The earliest generation 280 * backlight driver which is controlled by gmux. The earliest generation
282 * MBP5 2008/09 uses a {3}[TI LP8543] backlight driver. All newer models 281 * MBP5 2008/09 uses a `TI LP8543`_ backlight driver. All newer models
283 * use a {4}[TI LP8545]. 282 * use a `TI LP8545`_.
283 *
284 * .. _TI LP8543: http://www.ti.com/lit/ds/symlink/lp8543.pdf
285 * .. _TI LP8545: http://www.ti.com/lit/ds/symlink/lp8545.pdf
284 */ 286 */
285 287
286static int gmux_get_brightness(struct backlight_device *bd) 288static int gmux_get_brightness(struct backlight_device *bd)
@@ -312,28 +314,20 @@ static const struct backlight_ops gmux_bl_ops = {
312/** 314/**
313 * DOC: Graphics mux 315 * DOC: Graphics mux
314 * 316 *
315 * :5: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
316 * :6: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
317 * :7: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
318 * :8: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
319 * :9: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
320 * :10: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
321 * :11: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
322 *
323 * On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes 317 * On pre-retinas, the LVDS outputs of both GPUs feed into gmux which muxes
324 * either of them to the panel. One of the tricks gmux has up its sleeve is 318 * either of them to the panel. One of the tricks gmux has up its sleeve is
325 * to lengthen the blanking interval of its output during a switch to 319 * to lengthen the blanking interval of its output during a switch to
326 * synchronize it with the GPU switched to. This allows for a flicker-free 320 * synchronize it with the GPU switched to. This allows for a flicker-free
327 * switch that is imperceptible by the user ({5}[US 8,687,007 B2]). 321 * switch that is imperceptible by the user (`US 8,687,007 B2`_).
328 * 322 *
329 * On retinas, muxing is no longer done by gmux itself, but by a separate 323 * On retinas, muxing is no longer done by gmux itself, but by a separate
330 * chip which is controlled by gmux. The chip is triple sourced, it is 324 * chip which is controlled by gmux. The chip is triple sourced, it is
331 * either an {6}[NXP CBTL06142], {7}[TI HD3SS212] or {8}[Pericom PI3VDP12412]. 325 * either an `NXP CBTL06142`_, `TI HD3SS212`_ or `Pericom PI3VDP12412`_.
332 * The panel is driven with eDP instead of LVDS since the pixel clock 326 * The panel is driven with eDP instead of LVDS since the pixel clock
333 * required for retina resolution exceeds LVDS' limits. 327 * required for retina resolution exceeds LVDS' limits.
334 * 328 *
335 * Pre-retinas are able to switch the panel's DDC pins separately. 329 * Pre-retinas are able to switch the panel's DDC pins separately.
336 * This is handled by a {9}[TI SN74LV4066A] which is controlled by gmux. 330 * This is handled by a `TI SN74LV4066A`_ which is controlled by gmux.
337 * The inactive GPU can thus probe the panel's EDID without switching over 331 * The inactive GPU can thus probe the panel's EDID without switching over
338 * the entire panel. Retinas lack this functionality as the chips used for 332 * the entire panel. Retinas lack this functionality as the chips used for
339 * eDP muxing are incapable of switching the AUX channel separately (see 333 * eDP muxing are incapable of switching the AUX channel separately (see
@@ -344,15 +338,15 @@ static const struct backlight_ops gmux_bl_ops = {
344 * 338 *
345 * The external DP port is only fully switchable on the first two unibody 339 * The external DP port is only fully switchable on the first two unibody
346 * MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an 340 * MacBook Pro generations, MBP5 2008/09 and MBP6 2010. This is done by an
347 * {6}[NXP CBTL06141] which is controlled by gmux. It's the predecessor of the 341 * `NXP CBTL06141`_ which is controlled by gmux. It's the predecessor of the
348 * eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s. 342 * eDP mux on retinas, the difference being support for 2.7 versus 5.4 Gbit/s.
349 * 343 *
350 * The following MacBook Pro generations replaced the external DP port with a 344 * The following MacBook Pro generations replaced the external DP port with a
351 * combined DP/Thunderbolt port and lost the ability to switch it between GPUs, 345 * combined DP/Thunderbolt port and lost the ability to switch it between GPUs,
352 * connecting it either to the discrete GPU or the Thunderbolt controller. 346 * connecting it either to the discrete GPU or the Thunderbolt controller.
353 * Oddly enough, while the full port is no longer switchable, AUX and HPD 347 * Oddly enough, while the full port is no longer switchable, AUX and HPD
354 * are still switchable by way of an {10}[NXP CBTL03062] (on pre-retinas 348 * are still switchable by way of an `NXP CBTL03062`_ (on pre-retinas
355 * MBP8 2011 and MBP9 2012) or two {11}[TI TS3DS10224] (on retinas) under the 349 * MBP8 2011 and MBP9 2012) or two `TI TS3DS10224`_ (on retinas) under the
356 * control of gmux. Since the integrated GPU is missing the main link, 350 * control of gmux. Since the integrated GPU is missing the main link,
357 * external displays appear to it as phantoms which fail to link-train. 351 * external displays appear to it as phantoms which fail to link-train.
358 * 352 *
@@ -365,10 +359,19 @@ static const struct backlight_ops gmux_bl_ops = {
365 * of this feature. 359 * of this feature.
366 * 360 *
367 * gmux' initial switch state on bootup is user configurable via the EFI 361 * gmux' initial switch state on bootup is user configurable via the EFI
368 * variable `gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9` (5th byte, 362 * variable ``gpu-power-prefs-fa4ce28d-b62f-4c99-9cc3-6815686e30f9`` (5th byte,
369 * 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to 363 * 1 = IGD, 0 = DIS). Based on this setting, the EFI firmware tells gmux to
370 * switch the panel and the external DP connector and allocates a framebuffer 364 * switch the panel and the external DP connector and allocates a framebuffer
371 * for the selected GPU. 365 * for the selected GPU.
366 *
367 * .. _US 8,687,007 B2: http://pimg-fpiw.uspto.gov/fdd/07/870/086/0.pdf
368 * .. _NXP CBTL06141: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
369 * .. _NXP CBTL06142: http://www.nxp.com/documents/data_sheet/CBTL06141.pdf
370 * .. _TI HD3SS212: http://www.ti.com/lit/ds/symlink/hd3ss212.pdf
371 * .. _Pericom PI3VDP12412: https://www.pericom.com/assets/Datasheets/PI3VDP12412.pdf
372 * .. _TI SN74LV4066A: http://www.ti.com/lit/ds/symlink/sn74lv4066a.pdf
373 * .. _NXP CBTL03062: http://pdf.datasheetarchive.com/indexerfiles/Datasheets-SW16/DSASW00308511.pdf
374 * .. _TI TS3DS10224: http://www.ti.com/lit/ds/symlink/ts3ds10224.pdf
372 */ 375 */
373 376
374static void gmux_read_switch_state(struct apple_gmux_data *gmux_data) 377static void gmux_read_switch_state(struct apple_gmux_data *gmux_data)
diff --git a/drivers/staging/android/sync_debug.h b/drivers/staging/android/sync_debug.h
index 425ebc5c32aa..fab66396d421 100644
--- a/drivers/staging/android/sync_debug.h
+++ b/drivers/staging/android/sync_debug.h
@@ -34,7 +34,8 @@ struct sync_timeline {
34 char name[32]; 34 char name[32];
35 35
36 /* protected by child_list_lock */ 36 /* protected by child_list_lock */
37 int context, value; 37 u64 context;
38 int value;
38 39
39 struct list_head child_list_head; 40 struct list_head child_list_head;
40 spinlock_t child_list_lock; 41 spinlock_t child_list_lock;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
index 8511c648a15c..9d78411a3bf7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-analog-tv.c
@@ -14,7 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/of.h> 15#include <linux/of.h>
16 16
17#include <video/omapdss.h> 17#include <video/omapfb_dss.h>
18#include <video/omap-panel-data.h> 18#include <video/omap-panel-data.h>
19 19
20struct panel_drv_data { 20struct panel_drv_data {
@@ -25,7 +25,6 @@ struct panel_drv_data {
25 25
26 struct omap_video_timings timings; 26 struct omap_video_timings timings;
27 27
28 enum omap_dss_venc_type connector_type;
29 bool invert_polarity; 28 bool invert_polarity;
30}; 29};
31 30
@@ -45,10 +44,6 @@ static const struct omap_video_timings tvc_pal_timings = {
45 44
46static const struct of_device_id tvc_of_match[]; 45static const struct of_device_id tvc_of_match[];
47 46
48struct tvc_of_data {
49 enum omap_dss_venc_type connector_type;
50};
51
52#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) 47#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
53 48
54static int tvc_connect(struct omap_dss_device *dssdev) 49static int tvc_connect(struct omap_dss_device *dssdev)
@@ -99,7 +94,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
99 in->ops.atv->set_timings(in, &ddata->timings); 94 in->ops.atv->set_timings(in, &ddata->timings);
100 95
101 if (!ddata->dev->of_node) { 96 if (!ddata->dev->of_node) {
102 in->ops.atv->set_type(in, ddata->connector_type); 97 in->ops.atv->set_type(in, OMAP_DSS_VENC_TYPE_COMPOSITE);
103 98
104 in->ops.atv->invert_vid_out_polarity(in, 99 in->ops.atv->invert_vid_out_polarity(in,
105 ddata->invert_polarity); 100 ddata->invert_polarity);
@@ -207,7 +202,6 @@ static int tvc_probe_pdata(struct platform_device *pdev)
207 202
208 ddata->in = in; 203 ddata->in = in;
209 204
210 ddata->connector_type = pdata->connector_type;
211 ddata->invert_polarity = pdata->invert_polarity; 205 ddata->invert_polarity = pdata->invert_polarity;
212 206
213 dssdev = &ddata->dssdev; 207 dssdev = &ddata->dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
index d811e6dcaef7..06e1db34541e 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-dvi.c
@@ -16,8 +16,7 @@
16 16
17#include <drm/drm_edid.h> 17#include <drm/drm_edid.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22static const struct omap_video_timings dvic_default_timings = { 21static const struct omap_video_timings dvic_default_timings = {
23 .x_res = 640, 22 .x_res = 640,
@@ -236,46 +235,6 @@ static struct omap_dss_driver dvic_driver = {
236 .detect = dvic_detect, 235 .detect = dvic_detect,
237}; 236};
238 237
239static int dvic_probe_pdata(struct platform_device *pdev)
240{
241 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
242 struct connector_dvi_platform_data *pdata;
243 struct omap_dss_device *in, *dssdev;
244 int i2c_bus_num;
245
246 pdata = dev_get_platdata(&pdev->dev);
247 i2c_bus_num = pdata->i2c_bus_num;
248
249 if (i2c_bus_num != -1) {
250 struct i2c_adapter *adapter;
251
252 adapter = i2c_get_adapter(i2c_bus_num);
253 if (!adapter) {
254 dev_err(&pdev->dev,
255 "Failed to get I2C adapter, bus %d\n",
256 i2c_bus_num);
257 return -EPROBE_DEFER;
258 }
259
260 ddata->i2c_adapter = adapter;
261 }
262
263 in = omap_dss_find_output(pdata->source);
264 if (in == NULL) {
265 i2c_put_adapter(ddata->i2c_adapter);
266
267 dev_err(&pdev->dev, "Failed to find video source\n");
268 return -EPROBE_DEFER;
269 }
270
271 ddata->in = in;
272
273 dssdev = &ddata->dssdev;
274 dssdev->name = pdata->name;
275
276 return 0;
277}
278
279static int dvic_probe_of(struct platform_device *pdev) 238static int dvic_probe_of(struct platform_device *pdev)
280{ 239{
281 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 240 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -313,23 +272,18 @@ static int dvic_probe(struct platform_device *pdev)
313 struct omap_dss_device *dssdev; 272 struct omap_dss_device *dssdev;
314 int r; 273 int r;
315 274
275 if (!pdev->dev.of_node)
276 return -ENODEV;
277
316 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 278 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
317 if (!ddata) 279 if (!ddata)
318 return -ENOMEM; 280 return -ENOMEM;
319 281
320 platform_set_drvdata(pdev, ddata); 282 platform_set_drvdata(pdev, ddata);
321 283
322 if (dev_get_platdata(&pdev->dev)) { 284 r = dvic_probe_of(pdev);
323 r = dvic_probe_pdata(pdev); 285 if (r)
324 if (r) 286 return r;
325 return r;
326 } else if (pdev->dev.of_node) {
327 r = dvic_probe_of(pdev);
328 if (r)
329 return r;
330 } else {
331 return -ENODEV;
332 }
333 287
334 ddata->timings = dvic_default_timings; 288 ddata->timings = dvic_default_timings;
335 289
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
index 6ee4129bc0c0..58d5803ede67 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/connector-hdmi.c
@@ -17,8 +17,7 @@
17 17
18#include <drm/drm_edid.h> 18#include <drm/drm_edid.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21#include <video/omap-panel-data.h>
22 21
23static const struct omap_video_timings hdmic_default_timings = { 22static const struct omap_video_timings hdmic_default_timings = {
24 .x_res = 640, 23 .x_res = 640,
@@ -206,30 +205,6 @@ static struct omap_dss_driver hdmic_driver = {
206 .set_hdmi_infoframe = hdmic_set_infoframe, 205 .set_hdmi_infoframe = hdmic_set_infoframe,
207}; 206};
208 207
209static int hdmic_probe_pdata(struct platform_device *pdev)
210{
211 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
212 struct connector_hdmi_platform_data *pdata;
213 struct omap_dss_device *in, *dssdev;
214
215 pdata = dev_get_platdata(&pdev->dev);
216
217 ddata->hpd_gpio = -ENODEV;
218
219 in = omap_dss_find_output(pdata->source);
220 if (in == NULL) {
221 dev_err(&pdev->dev, "Failed to find video source\n");
222 return -EPROBE_DEFER;
223 }
224
225 ddata->in = in;
226
227 dssdev = &ddata->dssdev;
228 dssdev->name = pdata->name;
229
230 return 0;
231}
232
233static int hdmic_probe_of(struct platform_device *pdev) 208static int hdmic_probe_of(struct platform_device *pdev)
234{ 209{
235 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 210 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -261,6 +236,9 @@ static int hdmic_probe(struct platform_device *pdev)
261 struct omap_dss_device *dssdev; 236 struct omap_dss_device *dssdev;
262 int r; 237 int r;
263 238
239 if (!pdev->dev.of_node)
240 return -ENODEV;
241
264 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 242 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
265 if (!ddata) 243 if (!ddata)
266 return -ENOMEM; 244 return -ENOMEM;
@@ -268,17 +246,9 @@ static int hdmic_probe(struct platform_device *pdev)
268 platform_set_drvdata(pdev, ddata); 246 platform_set_drvdata(pdev, ddata);
269 ddata->dev = &pdev->dev; 247 ddata->dev = &pdev->dev;
270 248
271 if (dev_get_platdata(&pdev->dev)) { 249 r = hdmic_probe_of(pdev);
272 r = hdmic_probe_pdata(pdev); 250 if (r)
273 if (r) 251 return r;
274 return r;
275 } else if (pdev->dev.of_node) {
276 r = hdmic_probe_of(pdev);
277 if (r)
278 return r;
279 } else {
280 return -ENODEV;
281 }
282 252
283 if (gpio_is_valid(ddata->hpd_gpio)) { 253 if (gpio_is_valid(ddata->hpd_gpio)) {
284 r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio, 254 r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
index 8c246c213e06..a9a67167cc3d 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-opa362.c
@@ -20,7 +20,7 @@
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/of_gpio.h> 21#include <linux/of_gpio.h>
22 22
23#include <video/omapdss.h> 23#include <video/omapfb_dss.h>
24 24
25struct panel_drv_data { 25struct panel_drv_data {
26 struct omap_dss_device dssdev; 26 struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
index d9048b3df495..8c0953d069b7 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tfp410.c
@@ -15,8 +15,7 @@
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/of_gpio.h> 16#include <linux/of_gpio.h>
17 17
18#include <video/omapdss.h> 18#include <video/omapfb_dss.h>
19#include <video/omap-panel-data.h>
20 19
21struct panel_drv_data { 20struct panel_drv_data {
22 struct omap_dss_device dssdev; 21 struct omap_dss_device dssdev;
@@ -166,32 +165,6 @@ static const struct omapdss_dvi_ops tfp410_dvi_ops = {
166 .get_timings = tfp410_get_timings, 165 .get_timings = tfp410_get_timings,
167}; 166};
168 167
169static int tfp410_probe_pdata(struct platform_device *pdev)
170{
171 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
172 struct encoder_tfp410_platform_data *pdata;
173 struct omap_dss_device *dssdev, *in;
174
175 pdata = dev_get_platdata(&pdev->dev);
176
177 ddata->pd_gpio = pdata->power_down_gpio;
178
179 ddata->data_lines = pdata->data_lines;
180
181 in = omap_dss_find_output(pdata->source);
182 if (in == NULL) {
183 dev_err(&pdev->dev, "Failed to find video source\n");
184 return -ENODEV;
185 }
186
187 ddata->in = in;
188
189 dssdev = &ddata->dssdev;
190 dssdev->name = pdata->name;
191
192 return 0;
193}
194
195static int tfp410_probe_of(struct platform_device *pdev) 168static int tfp410_probe_of(struct platform_device *pdev)
196{ 169{
197 struct panel_drv_data *ddata = platform_get_drvdata(pdev); 170 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
@@ -225,23 +198,18 @@ static int tfp410_probe(struct platform_device *pdev)
225 struct omap_dss_device *dssdev; 198 struct omap_dss_device *dssdev;
226 int r; 199 int r;
227 200
201 if (!pdev->dev.of_node)
202 return -ENODEV;
203
228 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 204 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
229 if (!ddata) 205 if (!ddata)
230 return -ENOMEM; 206 return -ENOMEM;
231 207
232 platform_set_drvdata(pdev, ddata); 208 platform_set_drvdata(pdev, ddata);
233 209
234 if (dev_get_platdata(&pdev->dev)) { 210 r = tfp410_probe_of(pdev);
235 r = tfp410_probe_pdata(pdev); 211 if (r)
236 if (r) 212 return r;
237 return r;
238 } else if (pdev->dev.of_node) {
239 r = tfp410_probe_of(pdev);
240 if (r)
241 return r;
242 } else {
243 return -ENODEV;
244 }
245 213
246 if (gpio_is_valid(ddata->pd_gpio)) { 214 if (gpio_is_valid(ddata->pd_gpio)) {
247 r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio, 215 r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
index 677e2545fcbe..80dc47347e21 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/encoder-tpd12s015.c
@@ -16,8 +16,7 @@
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/gpio/consumer.h> 17#include <linux/gpio/consumer.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22struct panel_drv_data { 21struct panel_drv_data {
23 struct omap_dss_device dssdev; 22 struct omap_dss_device dssdev;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
index e780fd4f8b46..ace3d818afe5 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dpi.c
@@ -16,7 +16,7 @@
16#include <linux/of.h> 16#include <linux/of.h>
17#include <linux/of_gpio.h> 17#include <linux/of_gpio.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h> 20#include <video/omap-panel-data.h>
21#include <video/of_display_timing.h> 21#include <video/of_display_timing.h>
22 22
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
index 3414c2609320..b58012b82b6f 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c
@@ -25,8 +25,7 @@
25#include <linux/of_device.h> 25#include <linux/of_device.h>
26#include <linux/of_gpio.h> 26#include <linux/of_gpio.h>
27 27
28#include <video/omapdss.h> 28#include <video/omapfb_dss.h>
29#include <video/omap-panel-data.h>
30#include <video/mipi_display.h> 29#include <video/mipi_display.h>
31 30
32/* DSI Virtual channel. Hardcoded for now. */ 31/* DSI Virtual channel. Hardcoded for now. */
@@ -1127,40 +1126,6 @@ static struct omap_dss_driver dsicm_ops = {
1127 .memory_read = dsicm_memory_read, 1126 .memory_read = dsicm_memory_read,
1128}; 1127};
1129 1128
1130static int dsicm_probe_pdata(struct platform_device *pdev)
1131{
1132 const struct panel_dsicm_platform_data *pdata;
1133 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
1134 struct omap_dss_device *dssdev, *in;
1135
1136 pdata = dev_get_platdata(&pdev->dev);
1137
1138 in = omap_dss_find_output(pdata->source);
1139 if (in == NULL) {
1140 dev_err(&pdev->dev, "failed to find video source\n");
1141 return -EPROBE_DEFER;
1142 }
1143 ddata->in = in;
1144
1145 ddata->reset_gpio = pdata->reset_gpio;
1146
1147 if (pdata->use_ext_te)
1148 ddata->ext_te_gpio = pdata->ext_te_gpio;
1149 else
1150 ddata->ext_te_gpio = -1;
1151
1152 ddata->ulps_timeout = pdata->ulps_timeout;
1153
1154 ddata->use_dsi_backlight = pdata->use_dsi_backlight;
1155
1156 ddata->pin_config = pdata->pin_config;
1157
1158 dssdev = &ddata->dssdev;
1159 dssdev->name = pdata->name;
1160
1161 return 0;
1162}
1163
1164static int dsicm_probe_of(struct platform_device *pdev) 1129static int dsicm_probe_of(struct platform_device *pdev)
1165{ 1130{
1166 struct device_node *node = pdev->dev.of_node; 1131 struct device_node *node = pdev->dev.of_node;
@@ -1207,6 +1172,9 @@ static int dsicm_probe(struct platform_device *pdev)
1207 1172
1208 dev_dbg(dev, "probe\n"); 1173 dev_dbg(dev, "probe\n");
1209 1174
1175 if (!pdev->dev.of_node)
1176 return -ENODEV;
1177
1210 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL); 1178 ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
1211 if (!ddata) 1179 if (!ddata)
1212 return -ENOMEM; 1180 return -ENOMEM;
@@ -1214,17 +1182,9 @@ static int dsicm_probe(struct platform_device *pdev)
1214 platform_set_drvdata(pdev, ddata); 1182 platform_set_drvdata(pdev, ddata);
1215 ddata->pdev = pdev; 1183 ddata->pdev = pdev;
1216 1184
1217 if (dev_get_platdata(dev)) { 1185 r = dsicm_probe_of(pdev);
1218 r = dsicm_probe_pdata(pdev); 1186 if (r)
1219 if (r) 1187 return r;
1220 return r;
1221 } else if (pdev->dev.of_node) {
1222 r = dsicm_probe_of(pdev);
1223 if (r)
1224 return r;
1225 } else {
1226 return -ENODEV;
1227 }
1228 1188
1229 ddata->timings.x_res = 864; 1189 ddata->timings.x_res = 864;
1230 ddata->timings.y_res = 480; 1190 ddata->timings.y_res = 480;
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
index 18eb60e9c9ec..f14691ce8d02 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-lgphilips-lb035q02.c
@@ -16,8 +16,7 @@
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/gpio.h> 17#include <linux/gpio.h>
18 18
19#include <video/omapdss.h> 19#include <video/omapfb_dss.h>
20#include <video/omap-panel-data.h>
21 20
22static struct omap_video_timings lb035q02_timings = { 21static struct omap_video_timings lb035q02_timings = {
23 .x_res = 320, 22 .x_res = 320,
@@ -240,44 +239,6 @@ static struct omap_dss_driver lb035q02_ops = {
240 .get_resolution = omapdss_default_get_resolution, 239 .get_resolution = omapdss_default_get_resolution,
241}; 240};
242 241
243static int lb035q02_probe_pdata(struct spi_device *spi)
244{
245 const struct panel_lb035q02_platform_data *pdata;
246 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
247 struct omap_dss_device *dssdev, *in;
248 int r;
249
250 pdata = dev_get_platdata(&spi->dev);
251
252 in = omap_dss_find_output(pdata->source);
253 if (in == NULL) {
254 dev_err(&spi->dev, "failed to find video source '%s'\n",
255 pdata->source);
256 return -EPROBE_DEFER;
257 }
258
259 ddata->in = in;
260
261 ddata->data_lines = pdata->data_lines;
262
263 dssdev = &ddata->dssdev;
264 dssdev->name = pdata->name;
265
266 r = devm_gpio_request_one(&spi->dev, pdata->enable_gpio,
267 GPIOF_OUT_INIT_LOW, "panel enable");
268 if (r)
269 goto err_gpio;
270
271 ddata->enable_gpio = gpio_to_desc(pdata->enable_gpio);
272
273 ddata->backlight_gpio = pdata->backlight_gpio;
274
275 return 0;
276err_gpio:
277 omap_dss_put_device(ddata->in);
278 return r;
279}
280
281static int lb035q02_probe_of(struct spi_device *spi) 242static int lb035q02_probe_of(struct spi_device *spi)
282{ 243{
283 struct device_node *node = spi->dev.of_node; 244 struct device_node *node = spi->dev.of_node;
@@ -312,6 +273,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
312 struct omap_dss_device *dssdev; 273 struct omap_dss_device *dssdev;
313 int r; 274 int r;
314 275
276 if (!spi->dev.of_node)
277 return -ENODEV;
278
315 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL); 279 ddata = devm_kzalloc(&spi->dev, sizeof(*ddata), GFP_KERNEL);
316 if (ddata == NULL) 280 if (ddata == NULL)
317 return -ENOMEM; 281 return -ENOMEM;
@@ -320,17 +284,9 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
320 284
321 ddata->spi = spi; 285 ddata->spi = spi;
322 286
323 if (dev_get_platdata(&spi->dev)) { 287 r = lb035q02_probe_of(spi);
324 r = lb035q02_probe_pdata(spi); 288 if (r)
325 if (r) 289 return r;
326 return r;
327 } else if (spi->dev.of_node) {
328 r = lb035q02_probe_of(spi);
329 if (r)
330 return r;
331 } else {
332 return -ENODEV;
333 }
334 290
335 if (gpio_is_valid(ddata->backlight_gpio)) { 291 if (gpio_is_valid(ddata->backlight_gpio)) {
336 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio, 292 r = devm_gpio_request_one(&spi->dev, ddata->backlight_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
index 8a928c9a2fc9..a2cbadd3eca3 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-nec-nl8048hl11.c
@@ -18,8 +18,7 @@
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20 20
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22#include <video/omap-panel-data.h>
23 22
24struct panel_drv_data { 23struct panel_drv_data {
25 struct omap_dss_device dssdev; 24 struct omap_dss_device dssdev;
@@ -233,33 +232,6 @@ static struct omap_dss_driver nec_8048_ops = {
233}; 232};
234 233
235 234
236static int nec_8048_probe_pdata(struct spi_device *spi)
237{
238 const struct panel_nec_nl8048hl11_platform_data *pdata;
239 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
240 struct omap_dss_device *dssdev, *in;
241
242 pdata = dev_get_platdata(&spi->dev);
243
244 ddata->qvga_gpio = pdata->qvga_gpio;
245 ddata->res_gpio = pdata->res_gpio;
246
247 in = omap_dss_find_output(pdata->source);
248 if (in == NULL) {
249 dev_err(&spi->dev, "failed to find video source '%s'\n",
250 pdata->source);
251 return -EPROBE_DEFER;
252 }
253 ddata->in = in;
254
255 ddata->data_lines = pdata->data_lines;
256
257 dssdev = &ddata->dssdev;
258 dssdev->name = pdata->name;
259
260 return 0;
261}
262
263static int nec_8048_probe_of(struct spi_device *spi) 235static int nec_8048_probe_of(struct spi_device *spi)
264{ 236{
265 struct device_node *node = spi->dev.of_node; 237 struct device_node *node = spi->dev.of_node;
@@ -296,6 +268,9 @@ static int nec_8048_probe(struct spi_device *spi)
296 268
297 dev_dbg(&spi->dev, "%s\n", __func__); 269 dev_dbg(&spi->dev, "%s\n", __func__);
298 270
271 if (!spi->dev.of_node)
272 return -ENODEV;
273
299 spi->mode = SPI_MODE_0; 274 spi->mode = SPI_MODE_0;
300 spi->bits_per_word = 32; 275 spi->bits_per_word = 32;
301 276
@@ -315,17 +290,9 @@ static int nec_8048_probe(struct spi_device *spi)
315 290
316 ddata->spi = spi; 291 ddata->spi = spi;
317 292
318 if (dev_get_platdata(&spi->dev)) { 293 r = nec_8048_probe_of(spi);
319 r = nec_8048_probe_pdata(spi); 294 if (r)
320 if (r) 295 return r;
321 return r;
322 } else if (spi->dev.of_node) {
323 r = nec_8048_probe_of(spi);
324 if (r)
325 return r;
326 } else {
327 return -ENODEV;
328 }
329 296
330 if (gpio_is_valid(ddata->qvga_gpio)) { 297 if (gpio_is_valid(ddata->qvga_gpio)) {
331 r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio, 298 r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
index 1954ec913ce5..a8be18a87fa0 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sharp-ls037v7dw01.c
@@ -17,8 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/regulator/consumer.h> 19#include <linux/regulator/consumer.h>
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21#include <video/omap-panel-data.h>
22 21
23struct panel_drv_data { 22struct panel_drv_data {
24 struct omap_dss_device dssdev; 23 struct omap_dss_device dssdev;
@@ -197,69 +196,6 @@ static struct omap_dss_driver sharp_ls_ops = {
197 .get_resolution = omapdss_default_get_resolution, 196 .get_resolution = omapdss_default_get_resolution,
198}; 197};
199 198
200static int sharp_ls_get_gpio(struct device *dev, int gpio, unsigned long flags,
201 char *desc, struct gpio_desc **gpiod)
202{
203 int r;
204
205 r = devm_gpio_request_one(dev, gpio, flags, desc);
206 if (r) {
207 *gpiod = NULL;
208 return r == -ENOENT ? 0 : r;
209 }
210
211 *gpiod = gpio_to_desc(gpio);
212
213 return 0;
214}
215
216static int sharp_ls_probe_pdata(struct platform_device *pdev)
217{
218 const struct panel_sharp_ls037v7dw01_platform_data *pdata;
219 struct panel_drv_data *ddata = platform_get_drvdata(pdev);
220 struct omap_dss_device *dssdev, *in;
221 int r;
222
223 pdata = dev_get_platdata(&pdev->dev);
224
225 in = omap_dss_find_output(pdata->source);
226 if (in == NULL) {
227 dev_err(&pdev->dev, "failed to find video source '%s'\n",
228 pdata->source);
229 return -EPROBE_DEFER;
230 }
231
232 ddata->in = in;
233
234 ddata->data_lines = pdata->data_lines;
235
236 dssdev = &ddata->dssdev;
237 dssdev->name = pdata->name;
238
239 r = sharp_ls_get_gpio(&pdev->dev, pdata->mo_gpio, GPIOF_OUT_INIT_LOW,
240 "lcd MO", &ddata->mo_gpio);
241 if (r)
242 return r;
243 r = sharp_ls_get_gpio(&pdev->dev, pdata->lr_gpio, GPIOF_OUT_INIT_HIGH,
244 "lcd LR", &ddata->lr_gpio);
245 if (r)
246 return r;
247 r = sharp_ls_get_gpio(&pdev->dev, pdata->ud_gpio, GPIOF_OUT_INIT_HIGH,
248 "lcd UD", &ddata->ud_gpio);
249 if (r)
250 return r;
251 r = sharp_ls_get_gpio(&pdev->dev, pdata->resb_gpio, GPIOF_OUT_INIT_LOW,
252 "lcd RESB", &ddata->resb_gpio);
253 if (r)
254 return r;
255 r = sharp_ls_get_gpio(&pdev->dev, pdata->ini_gpio, GPIOF_OUT_INIT_LOW,
256 "lcd INI", &ddata->ini_gpio);
257 if (r)
258 return r;
259
260 return 0;
261}
262
263static int sharp_ls_get_gpio_of(struct device *dev, int index, int val, 199static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
264 const char *desc, struct gpio_desc **gpiod) 200 const char *desc, struct gpio_desc **gpiod)
265{ 201{
@@ -330,23 +266,18 @@ static int sharp_ls_probe(struct platform_device *pdev)
330 struct omap_dss_device *dssdev; 266 struct omap_dss_device *dssdev;
331 int r; 267 int r;
332 268
269 if (!pdev->dev.of_node)
270 return -ENODEV;
271
333 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); 272 ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
334 if (ddata == NULL) 273 if (ddata == NULL)
335 return -ENOMEM; 274 return -ENOMEM;
336 275
337 platform_set_drvdata(pdev, ddata); 276 platform_set_drvdata(pdev, ddata);
338 277
339 if (dev_get_platdata(&pdev->dev)) { 278 r = sharp_ls_probe_of(pdev);
340 r = sharp_ls_probe_pdata(pdev); 279 if (r)
341 if (r) 280 return r;
342 return r;
343 } else if (pdev->dev.of_node) {
344 r = sharp_ls_probe_of(pdev);
345 if (r)
346 return r;
347 } else {
348 return -ENODEV;
349 }
350 281
351 ddata->videomode = sharp_ls_timings; 282 ddata->videomode = sharp_ls_timings;
352 283
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
index 31efcca801bd..468560a6daae 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-sony-acx565akm.c
@@ -33,7 +33,7 @@
33#include <linux/of.h> 33#include <linux/of.h>
34#include <linux/of_gpio.h> 34#include <linux/of_gpio.h>
35 35
36#include <video/omapdss.h> 36#include <video/omapfb_dss.h>
37#include <video/omap-panel-data.h> 37#include <video/omap-panel-data.h>
38 38
39#define MIPID_CMD_READ_DISP_ID 0x04 39#define MIPID_CMD_READ_DISP_ID 0x04
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
index 4d657f3ab679..b529a8c2b652 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td028ttec1.c
@@ -28,8 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32#include <video/omap-panel-data.h>
33 32
34struct panel_drv_data { 33struct panel_drv_data {
35 struct omap_dss_device dssdev; 34 struct omap_dss_device dssdev;
@@ -365,31 +364,6 @@ static struct omap_dss_driver td028ttec1_ops = {
365 .check_timings = td028ttec1_panel_check_timings, 364 .check_timings = td028ttec1_panel_check_timings,
366}; 365};
367 366
368static int td028ttec1_panel_probe_pdata(struct spi_device *spi)
369{
370 const struct panel_tpo_td028ttec1_platform_data *pdata;
371 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
372 struct omap_dss_device *dssdev, *in;
373
374 pdata = dev_get_platdata(&spi->dev);
375
376 in = omap_dss_find_output(pdata->source);
377 if (in == NULL) {
378 dev_err(&spi->dev, "failed to find video source '%s'\n",
379 pdata->source);
380 return -EPROBE_DEFER;
381 }
382
383 ddata->in = in;
384
385 ddata->data_lines = pdata->data_lines;
386
387 dssdev = &ddata->dssdev;
388 dssdev->name = pdata->name;
389
390 return 0;
391}
392
393static int td028ttec1_probe_of(struct spi_device *spi) 367static int td028ttec1_probe_of(struct spi_device *spi)
394{ 368{
395 struct device_node *node = spi->dev.of_node; 369 struct device_node *node = spi->dev.of_node;
@@ -415,6 +389,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
415 389
416 dev_dbg(&spi->dev, "%s\n", __func__); 390 dev_dbg(&spi->dev, "%s\n", __func__);
417 391
392 if (!spi->dev.of_node)
393 return -ENODEV;
394
418 spi->bits_per_word = 9; 395 spi->bits_per_word = 9;
419 spi->mode = SPI_MODE_3; 396 spi->mode = SPI_MODE_3;
420 397
@@ -432,17 +409,9 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
432 409
433 ddata->spi_dev = spi; 410 ddata->spi_dev = spi;
434 411
435 if (dev_get_platdata(&spi->dev)) { 412 r = td028ttec1_probe_of(spi);
436 r = td028ttec1_panel_probe_pdata(spi); 413 if (r)
437 if (r) 414 return r;
438 return r;
439 } else if (spi->dev.of_node) {
440 r = td028ttec1_probe_of(spi);
441 if (r)
442 return r;
443 } else {
444 return -ENODEV;
445 }
446 415
447 ddata->videomode = td028ttec1_panel_timings; 416 ddata->videomode = td028ttec1_panel_timings;
448 417
diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
index 68e3b68a2920..51e628b85f4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-tpo-td043mtea1.c
@@ -19,8 +19,7 @@
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/of_gpio.h> 20#include <linux/of_gpio.h>
21 21
22#include <video/omapdss.h> 22#include <video/omapfb_dss.h>
23#include <video/omap-panel-data.h>
24 23
25#define TPO_R02_MODE(x) ((x) & 7) 24#define TPO_R02_MODE(x) ((x) & 7)
26#define TPO_R02_MODE_800x480 7 25#define TPO_R02_MODE_800x480 7
@@ -465,32 +464,6 @@ static struct omap_dss_driver tpo_td043_ops = {
465}; 464};
466 465
467 466
468static int tpo_td043_probe_pdata(struct spi_device *spi)
469{
470 const struct panel_tpo_td043mtea1_platform_data *pdata;
471 struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
472 struct omap_dss_device *dssdev, *in;
473
474 pdata = dev_get_platdata(&spi->dev);
475
476 ddata->nreset_gpio = pdata->nreset_gpio;
477
478 in = omap_dss_find_output(pdata->source);
479 if (in == NULL) {
480 dev_err(&spi->dev, "failed to find video source '%s'\n",
481 pdata->source);
482 return -EPROBE_DEFER;
483 }
484 ddata->in = in;
485
486 ddata->data_lines = pdata->data_lines;
487
488 dssdev = &ddata->dssdev;
489 dssdev->name = pdata->name;
490
491 return 0;
492}
493
494static int tpo_td043_probe_of(struct spi_device *spi) 467static int tpo_td043_probe_of(struct spi_device *spi)
495{ 468{
496 struct device_node *node = spi->dev.of_node; 469 struct device_node *node = spi->dev.of_node;
@@ -524,6 +497,9 @@ static int tpo_td043_probe(struct spi_device *spi)
524 497
525 dev_dbg(&spi->dev, "%s\n", __func__); 498 dev_dbg(&spi->dev, "%s\n", __func__);
526 499
500 if (!spi->dev.of_node)
501 return -ENODEV;
502
527 spi->bits_per_word = 16; 503 spi->bits_per_word = 16;
528 spi->mode = SPI_MODE_0; 504 spi->mode = SPI_MODE_0;
529 505
@@ -541,17 +517,9 @@ static int tpo_td043_probe(struct spi_device *spi)
541 517
542 ddata->spi = spi; 518 ddata->spi = spi;
543 519
544 if (dev_get_platdata(&spi->dev)) { 520 r = tpo_td043_probe_of(spi);
545 r = tpo_td043_probe_pdata(spi); 521 if (r)
546 if (r) 522 return r;
547 return r;
548 } else if (spi->dev.of_node) {
549 r = tpo_td043_probe_of(spi);
550 if (r)
551 return r;
552 } else {
553 return -ENODEV;
554 }
555 523
556 ddata->mode = TPO_R02_MODE_800x480; 524 ddata->mode = TPO_R02_MODE_800x480;
557 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma)); 525 memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/apply.c b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
index 663ccc3bf4e5..2481f4871f66 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/apply.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/apply.c
@@ -23,7 +23,7 @@
23#include <linux/spinlock.h> 23#include <linux/spinlock.h>
24#include <linux/jiffies.h> 24#include <linux/jiffies.h>
25 25
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27 27
28#include "dss.h" 28#include "dss.h"
29#include "dss_features.h" 29#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/core.c b/drivers/video/fbdev/omap2/omapfb/dss/core.c
index 5a87179b7312..29de4827589d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/core.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/core.c
@@ -35,7 +35,7 @@
35#include <linux/suspend.h> 35#include <linux/suspend.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37 37
38#include <video/omapdss.h> 38#include <video/omapfb_dss.h>
39 39
40#include "dss.h" 40#include "dss.h"
41#include "dss_features.h" 41#include "dss_features.h"
@@ -208,8 +208,6 @@ static int __init omap_dss_probe(struct platform_device *pdev)
208 core.default_display_name = def_disp_name; 208 core.default_display_name = def_disp_name;
209 else if (pdata->default_display_name) 209 else if (pdata->default_display_name)
210 core.default_display_name = pdata->default_display_name; 210 core.default_display_name = pdata->default_display_name;
211 else if (pdata->default_device)
212 core.default_display_name = pdata->default_device->name;
213 211
214 register_pm_notifier(&omap_dss_pm_notif_block); 212 register_pm_notifier(&omap_dss_pm_notif_block);
215 213
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
index 6607db37a5e4..3691bde4ce0a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc-compat.c
@@ -26,7 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/seq_file.h> 27#include <linux/seq_file.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
index 5491e304f4fe..7a75dfda9845 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
@@ -41,7 +41,7 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include <video/omapfb_dss.h>
45 45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
index 038c15b04215..59c9a5c47ca9 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc_coefs.c
@@ -18,7 +18,7 @@
18 */ 18 */
19 19
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22 22
23#include "dispc.h" 23#include "dispc.h"
24 24
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
index 75b5286029ee..b3fdbfd0b82d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display-sysfs.c
@@ -25,7 +25,7 @@
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/sysfs.h> 26#include <linux/sysfs.h>
27 27
28#include <video/omapdss.h> 28#include <video/omapfb_dss.h>
29#include "dss.h" 29#include "dss.h"
30 30
31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) 31static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf)
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/display.c b/drivers/video/fbdev/omap2/omapfb/dss/display.c
index ef5b9027985d..dd5468695c43 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/display.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/display.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/of.h> 29#include <linux/of.h>
30 30
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32#include "dss.h" 32#include "dss.h"
33#include "dss_features.h" 33#include "dss_features.h"
34 34
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
index 7953e6a52346..da09806b940c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dpi.c
@@ -34,7 +34,7 @@
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <linux/component.h> 35#include <linux/component.h>
36 36
37#include <video/omapdss.h> 37#include <video/omapfb_dss.h>
38 38
39#include "dss.h" 39#include "dss.h"
40#include "dss_features.h" 40#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
index d63e59807707..9e4800a4e3d1 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
@@ -42,7 +42,7 @@
42#include <linux/of_platform.h> 42#include <linux/of_platform.h>
43#include <linux/component.h> 43#include <linux/component.h>
44 44
45#include <video/omapdss.h> 45#include <video/omapfb_dss.h>
46#include <video/mipi_display.h> 46#include <video/mipi_display.h>
47 47
48#include "dss.h" 48#include "dss.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
index bf407b6ba15c..d356a252ab4a 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss-of.c
@@ -18,7 +18,7 @@
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20 20
21#include <video/omapdss.h> 21#include <video/omapfb_dss.h>
22 22
23#include "dss.h" 23#include "dss.h"
24 24
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
index 0078c4d1fc31..47d7f69ad9ad 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
@@ -41,7 +41,7 @@
41#include <linux/suspend.h> 41#include <linux/suspend.h>
42#include <linux/component.h> 42#include <linux/component.h>
43 43
44#include <video/omapdss.h> 44#include <video/omapfb_dss.h>
45 45
46#include "dss.h" 46#include "dss.h"
47#include "dss_features.h" 47#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.h b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
index 0184a8461df1..a3cc0ca8f9d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.h
@@ -73,6 +73,17 @@
73#define FLD_MOD(orig, val, start, end) \ 73#define FLD_MOD(orig, val, start, end) \
74 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end)) 74 (((orig) & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end))
75 75
76enum omap_dss_clk_source {
77 OMAP_DSS_CLK_SRC_FCK = 0, /* OMAP2/3: DSS1_ALWON_FCLK
78 * OMAP4: DSS_FCLK */
79 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC, /* OMAP3: DSI1_PLL_FCLK
80 * OMAP4: PLL1_CLK1 */
81 OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI, /* OMAP3: DSI2_PLL_FCLK
82 * OMAP4: PLL1_CLK2 */
83 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC, /* OMAP4: PLL2_CLK1 */
84 OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI, /* OMAP4: PLL2_CLK2 */
85};
86
76enum dss_io_pad_mode { 87enum dss_io_pad_mode {
77 DSS_IO_PAD_MODE_RESET, 88 DSS_IO_PAD_MODE_RESET,
78 DSS_IO_PAD_MODE_RFBI, 89 DSS_IO_PAD_MODE_RFBI,
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
index c886a2927f73..8fc843b56b26 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/dss_features.c
@@ -23,7 +23,7 @@
23#include <linux/err.h> 23#include <linux/err.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27 27
28#include "dss.h" 28#include "dss.h"
29#include "dss_features.h" 29#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
index 53616b02b613..f6de87e078b0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi.h
@@ -23,7 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/hdmi.h> 25#include <linux/hdmi.h>
26#include <video/omapdss.h> 26#include <video/omapfb_dss.h>
27#include <sound/omap-hdmi-audio.h>
27 28
28#include "dss.h" 29#include "dss.h"
29 30
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
index 2e71aec838b1..926a6f20dbb2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
@@ -33,7 +33,7 @@
33#include <linux/gpio.h> 33#include <linux/gpio.h>
34#include <linux/regulator/consumer.h> 34#include <linux/regulator/consumer.h>
35#include <linux/component.h> 35#include <linux/component.h>
36#include <video/omapdss.h> 36#include <video/omapfb_dss.h>
37#include <sound/omap-hdmi-audio.h> 37#include <sound/omap-hdmi-audio.h>
38 38
39#include "hdmi4_core.h" 39#include "hdmi4_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
index aade6d99662a..0ee829a165c3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
@@ -38,7 +38,7 @@
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/regulator/consumer.h> 39#include <linux/regulator/consumer.h>
40#include <linux/component.h> 40#include <linux/component.h>
41#include <video/omapdss.h> 41#include <video/omapfb_dss.h>
42#include <sound/omap-hdmi-audio.h> 42#include <sound/omap-hdmi-audio.h>
43 43
44#include "hdmi5_core.h" 44#include "hdmi5_core.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
index 1b8fcc6c4ba1..189a5ad125a3 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_common.c
@@ -4,7 +4,7 @@
4#include <linux/kernel.h> 4#include <linux/kernel.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <video/omapdss.h> 7#include <video/omapfb_dss.h>
8 8
9#include "hdmi.h" 9#include "hdmi.h"
10 10
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
index 1f5d19c119ce..9a13c35fd6d8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c
@@ -13,7 +13,7 @@
13#include <linux/io.h> 13#include <linux/io.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <video/omapdss.h> 16#include <video/omapfb_dss.h>
17 17
18#include "dss.h" 18#include "dss.h"
19#include "hdmi.h" 19#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
index 06e23a7c432c..eac3665aba6c 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_pll.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21 21
22#include "dss.h" 22#include "dss.h"
23#include "hdmi.h" 23#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
index 7c544bc56fb5..705373e4cf38 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_wp.c
@@ -14,7 +14,7 @@
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <video/omapdss.h> 17#include <video/omapfb_dss.h>
18 18
19#include "dss.h" 19#include "dss.h"
20#include "hdmi.h" 20#include "hdmi.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
index a7414fb12830..9e2a67fdf4d2 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c
@@ -26,7 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager.c b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
index 08a67f4f6a20..69f86d2cc274 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/manager.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/manager.c
@@ -28,7 +28,7 @@
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <linux/jiffies.h> 29#include <linux/jiffies.h>
30 30
31#include <video/omapdss.h> 31#include <video/omapfb_dss.h>
32 32
33#include "dss.h" 33#include "dss.h"
34#include "dss_features.h" 34#include "dss_features.h"
@@ -69,7 +69,6 @@ int dss_init_overlay_managers(void)
69 break; 69 break;
70 } 70 }
71 71
72 mgr->caps = 0;
73 mgr->supported_displays = 72 mgr->supported_displays =
74 dss_feat_get_supported_displays(mgr->id); 73 dss_feat_get_supported_displays(mgr->id);
75 mgr->supported_outputs = 74 mgr->supported_outputs =
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/output.c b/drivers/video/fbdev/omap2/omapfb/dss/output.c
index 16072159bd24..bed9a978269d 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/output.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/output.c
@@ -21,7 +21,7 @@
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h> 22#include <linux/of.h>
23 23
24#include <video/omapdss.h> 24#include <video/omapfb_dss.h>
25 25
26#include "dss.h" 26#include "dss.h"
27 27
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
index 4cc5ddebfb34..f1f6c0aea752 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay-sysfs.c
@@ -26,7 +26,7 @@
26#include <linux/kobject.h> 26#include <linux/kobject.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28 28
29#include <video/omapdss.h> 29#include <video/omapfb_dss.h>
30 30
31#include "dss.h" 31#include "dss.h"
32#include "dss_features.h" 32#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
index 2f7cee985cdd..d6c5d75d2ef8 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/overlay.c
@@ -30,7 +30,7 @@
30#include <linux/delay.h> 30#include <linux/delay.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34 34
35#include "dss.h" 35#include "dss.h"
36#include "dss_features.h" 36#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/pll.c b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
index f974ddcd3b6e..0564c5606cd0 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/pll.c
@@ -22,7 +22,7 @@
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/sched.h> 23#include <linux/sched.h>
24 24
25#include <video/omapdss.h> 25#include <video/omapfb_dss.h>
26 26
27#include "dss.h" 27#include "dss.h"
28 28
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
index aea6a1d0fb20..562b0c4ae0c6 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/rfbi.c
@@ -38,7 +38,7 @@
38#include <linux/pm_runtime.h> 38#include <linux/pm_runtime.h>
39#include <linux/component.h> 39#include <linux/component.h>
40 40
41#include <video/omapdss.h> 41#include <video/omapfb_dss.h>
42#include "dss.h" 42#include "dss.h"
43 43
44struct rfbi_reg { u16 idx; }; 44struct rfbi_reg { u16 idx; };
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
index d747cc6b59e1..c4be732a4714 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/sdi.c
@@ -29,7 +29,7 @@
29#include <linux/of.h> 29#include <linux/of.h>
30#include <linux/component.h> 30#include <linux/component.h>
31 31
32#include <video/omapdss.h> 32#include <video/omapfb_dss.h>
33#include "dss.h" 33#include "dss.h"
34 34
35static struct { 35static struct {
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
index 26e0ee30adf8..392464da12e4 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
@@ -37,7 +37,7 @@
37#include <linux/of.h> 37#include <linux/of.h>
38#include <linux/component.h> 38#include <linux/component.h>
39 39
40#include <video/omapdss.h> 40#include <video/omapfb_dss.h>
41 41
42#include "dss.h" 42#include "dss.h"
43#include "dss_features.h" 43#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
index b1ec59e42940..a890540f2037 100644
--- a/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
+++ b/drivers/video/fbdev/omap2/omapfb/dss/video-pll.c
@@ -17,7 +17,7 @@
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19 19
20#include <video/omapdss.h> 20#include <video/omapfb_dss.h>
21 21
22#include "dss.h" 22#include "dss.h"
23#include "dss_features.h" 23#include "dss_features.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index 9ddfdd63b84c..ef69273074ba 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -30,7 +30,7 @@
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/sizes.h> 31#include <linux/sizes.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34#include <video/omapvrfb.h> 34#include <video/omapvrfb.h>
35 35
36#include "omapfb.h" 36#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index d3af01c94a58..2fb90cb6803f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -30,7 +30,7 @@
30#include <linux/platform_device.h> 30#include <linux/platform_device.h>
31#include <linux/omapfb.h> 31#include <linux/omapfb.h>
32 32
33#include <video/omapdss.h> 33#include <video/omapfb_dss.h>
34#include <video/omapvrfb.h> 34#include <video/omapvrfb.h>
35 35
36#include "omapfb.h" 36#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
index 18fa9e1d0033..8087a009c54f 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-sysfs.c
@@ -29,7 +29,7 @@
29#include <linux/mm.h> 29#include <linux/mm.h>
30#include <linux/omapfb.h> 30#include <linux/omapfb.h>
31 31
32#include <video/omapdss.h> 32#include <video/omapfb_dss.h>
33#include <video/omapvrfb.h> 33#include <video/omapvrfb.h>
34 34
35#include "omapfb.h" 35#include "omapfb.h"
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb.h b/drivers/video/fbdev/omap2/omapfb/omapfb.h
index 623cd872a367..bcb9ff4a607d 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb.h
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb.h
@@ -31,7 +31,7 @@
31#include <linux/dma-attrs.h> 31#include <linux/dma-attrs.h>
32#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
33 33
34#include <video/omapdss.h> 34#include <video/omapfb_dss.h>
35 35
36#ifdef DEBUG 36#ifdef DEBUG
37extern bool omapfb_debug; 37extern bool omapfb_debug;