aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:52:01 -0500
commit988adfdffdd43cfd841df734664727993076d7cb (patch)
tree6794f7bba8f595500c2b7d33376ad6614adcfaf2
parent26178ec11ef3c6c814bf16a0a2b9c2f7242e3c64 (diff)
parent4e0cd68115620bc3236ff4e58e4c073948629b41 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull drm updates from Dave Airlie: "Highlights: - AMD KFD driver merge This is the AMD HSA interface for exposing a lowlevel interface for GPGPU use. They have an open source userspace built on top of this interface, and the code looks as good as it was going to get out of tree. - Initial atomic modesetting work The need for an atomic modesetting interface to allow userspace to try and send a complete set of modesetting state to the driver has arisen, and been suffering from neglect this past year. No more, the start of the common code and changes for msm driver to use it are in this tree. Ongoing work to get the userspace ioctl finished and the code clean will probably wait until next kernel. - DisplayID 1.3 and tiled monitor exposed to userspace. Tiled monitor property is now exposed for userspace to make use of. - Rockchip drm driver merged. - imx gpu driver moved out of staging Other stuff: - core: panel - MIPI DSI + new panels. expose suggested x/y properties for virtual GPUs - i915: Initial Skylake (SKL) support gen3/4 reset work start of dri1/ums removal infoframe tracking fixes for lots of things. - nouveau: tegra k1 voltage support GM204 modesetting support GT21x memory reclocking work - radeon: CI dpm fixes GPUVM improvements Initial DPM fan control - rcar-du: HDMI support added removed some support for old boards slave encoder driver for Analog Devices adv7511 - exynos: Exynos4415 SoC support - msm: a4xx gpu support atomic helper conversion - tegra: iommu support universal plane support ganged-mode DSI support - sti: HDMI i2c improvements - vmwgfx: some late fixes. - qxl: use suggested x/y properties" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (969 commits) drm: sti: fix module compilation issue drm/i915: save/restore GMBUS freq across suspend/resume on gen4 drm: sti: correctly cleanup CRTC and planes drm: sti: add HQVDP plane drm: sti: add cursor plane drm: sti: enable auxiliary CRTC drm: sti: fix delay in VTG programming drm: sti: prepare sti_tvout to support auxiliary crtc drm: sti: use drm_crtc_vblank_{on/off} instead of drm_vblank_{on/off} drm: sti: fix hdmi avi infoframe drm: sti: remove event lock while disabling vblank drm: sti: simplify gdp code drm: sti: clear all mixer control drm: sti: remove gpio for HDMI hot plug detection drm: sti: allow to change hdmi ddc i2c adapter drm/doc: Document drm_add_modes_noedid() usage drm/i915: Remove '& 0xffff' from the mask given to WA_REG() drm/i915: Invert the mask and val arguments in wa_add() and WA_REG() drm: Zero out DRM object memory upon cleanup drm/i915/bdw: Fix the write setting up the WIZ hashing mode ...
-rw-r--r--CREDITS7
-rw-r--r--Documentation/DocBook/drm.tmpl434
-rw-r--r--Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt)0
-rw-r--r--Documentation/devicetree/bindings/drm/imx/hdmi.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/drm/imx/ldb.txt (renamed from Documentation/devicetree/bindings/staging/imx-drm/ldb.txt)0
-rw-r--r--Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt2
-rw-r--r--Documentation/devicetree/bindings/gpu/st,stih4xx.txt29
-rw-r--r--Documentation/devicetree/bindings/panel/auo,b116xw03.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt49
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt2
-rw-r--r--Documentation/devicetree/bindings/video/adi,adv7511.txt88
-rw-r--r--Documentation/devicetree/bindings/video/exynos_dsim.txt1
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/video/rockchip-vop.txt58
-rw-r--r--Documentation/devicetree/bindings/video/samsung-fimd.txt1
-rw-r--r--MAINTAINERS17
-rw-r--r--arch/arm/mach-shmobile/board-lager.c58
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c58
-rw-r--r--arch/x86/kernel/early-quirks.c23
-rw-r--r--drivers/char/agp/intel-gtt.c4
-rw-r--r--drivers/gpu/drm/Kconfig6
-rw-r--r--drivers/gpu/drm/Makefile7
-rw-r--r--drivers/gpu/drm/README.drm43
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Kconfig9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile14
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_regs.h221
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c595
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.h294
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c308
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c1062
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h146
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c256
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c356
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c353
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c159
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c346
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h91
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c565
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c96
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h405
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h107
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h600
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c410
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c343
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_queue.c85
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c1235
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h168
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h185
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c1
-rw-r--r--drivers/gpu/drm/bochs/bochs_fbdev.c18
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c23
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c22
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c5
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c40
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c1
-rw-r--r--drivers/gpu/drm/drm_atomic.c657
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c1966
-rw-r--r--drivers/gpu/drm/drm_crtc.c581
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c132
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c201
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c68
-rw-r--r--drivers/gpu/drm/drm_drv.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c231
-rw-r--r--drivers/gpu/drm/drm_edid_load.c3
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c135
-rw-r--r--drivers/gpu/drm/drm_flip_work.c105
-rw-r--r--drivers/gpu/drm/drm_fops.c13
-rw-r--r--drivers/gpu/drm/drm_gem.c13
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c259
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c660
-rw-r--r--drivers/gpu/drm/drm_modes.c2
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c43
-rw-r--r--drivers/gpu/drm/drm_plane_helper.c203
-rw-r--r--drivers/gpu/drm/drm_prime.c6
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c132
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c252
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c129
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c266
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_iommu.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c150
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c65
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c126
-rw-r--r--drivers/gpu/drm/gma500/Makefile1
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c195
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c75
-rw-r--r--drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c31
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c170
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c20
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h3
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h1
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c49
-rw-r--r--drivers/gpu/drm/i2c/Kconfig6
-rw-r--r--drivers/gpu/drm/i2c/Makefile2
-rw-r--r--drivers/gpu/drm/i2c/adv7511.c1010
-rw-r--r--drivers/gpu/drm/i2c/adv7511.h289
-rw-r--r--drivers/gpu/drm/i915/Makefile13
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c39
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c270
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c1070
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c359
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h311
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c87
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_render_state.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c60
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c43
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c2
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1000
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h643
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c57
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h104
-rw-r--r--drivers/gpu/drm/i915/i915_ums.c14
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c463
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h10
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c719
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2851
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c985
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c16
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h212
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c44
-rw-r--r--drivers/gpu/drm/i915/intel_fifo_underrun.c381
-rw-r--r--drivers/gpu/drm/i915/intel_frontbuffer.c279
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c120
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c338
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.h6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c4
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c136
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c2653
-rw-r--r--drivers/gpu/drm/i915/intel_psr.c481
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate.h1
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen8.c792
-rw-r--r--drivers/gpu/drm/i915/intel_renderstate_gen9.c974
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c419
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h12
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c1406
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c47
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c605
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c495
-rw-r--r--drivers/gpu/drm/imx/Kconfig (renamed from drivers/staging/imx-drm/Kconfig)0
-rw-r--r--drivers/gpu/drm/imx/Makefile (renamed from drivers/staging/imx-drm/Makefile)0
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c (renamed from drivers/staging/imx-drm/imx-drm-core.c)1
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h (renamed from drivers/staging/imx-drm/imx-drm.h)0
-rw-r--r--drivers/gpu/drm/imx/imx-hdmi.c (renamed from drivers/staging/imx-drm/imx-hdmi.c)0
-rw-r--r--drivers/gpu/drm/imx/imx-hdmi.h (renamed from drivers/staging/imx-drm/imx-hdmi.h)0
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c (renamed from drivers/staging/imx-drm/imx-ldb.c)0
-rw-r--r--drivers/gpu/drm/imx/imx-tve.c (renamed from drivers/staging/imx-drm/imx-tve.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c (renamed from drivers/staging/imx-drm/ipuv3-crtc.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c (renamed from drivers/staging/imx-drm/ipuv3-plane.c)0
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.h (renamed from drivers/staging/imx-drm/ipuv3-plane.h)0
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c (renamed from drivers/staging/imx-drm/parallel-display.c)0
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c1
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile4
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h26
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h247
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c91
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h2144
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c604
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.h34
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h17
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c13
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c31
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h126
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h75
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h8
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c144
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h17
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c3
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c7
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c348
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h17
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c121
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h10
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c207
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h91
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c466
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c322
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h122
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c24
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c93
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c273
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h131
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c328
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c241
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h23
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c163
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c25
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h35
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c45
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c40
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h13
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c13
-rw-r--r--drivers/gpu/drm/nouveau/Makefile18
-rw-r--r--drivers/gpu/drm/nouveau/core/core/handle.c113
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/gm100.c43
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/dport.c9
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm107.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/gm204.c114
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c94
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.h63
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv84.c40
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv94.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nva3.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c99
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nve0.c30
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c16
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outp.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c144
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c48
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/device.h9
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/handle.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/include/core/object.h17
-rw-r--r--drivers/gpu/drm/nouveau/core/include/engine/disp.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h31
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h14
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h13
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h12
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h18
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h37
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h23
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/devinit.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/i2c.h3
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/pwr.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/volt.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/os.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c129
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c369
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c27
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/disp.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/dp.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c45
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/image.c78
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c56
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/npde.c59
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c135
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/priv.h25
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c270
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c111
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c71
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c108
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c112
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c69
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/timing.c42
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c17
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c173
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/base.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c117
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/priv.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h16
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c813
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/base.c97
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c221
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c13
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c86
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc111
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h738
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h863
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h828
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h754
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c37
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/base.c67
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c199
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/overlay.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c196
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.h5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c30
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c248
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c15
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_prime.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c134
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_fence.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvif/class.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvif/client.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvif/driver.h1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c1
-rw-r--r--drivers/gpu/drm/omapdrm/omap_gem.c3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_plane.c15
-rw-r--r--drivers/gpu/drm/panel/Kconfig13
-rw-r--r--drivers/gpu/drm/panel/Makefile1
-rw-r--r--drivers/gpu/drm/panel/panel-ld9040.c13
-rw-r--r--drivers/gpu/drm/panel/panel-s6e8aa0.c30
-rw-r--r--drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c464
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c133
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c36
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c3
-rw-r--r--drivers/gpu/drm/r128/r128_state.c4
-rw-r--r--drivers/gpu/drm/radeon/Makefile4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c752
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.h8
-rw-r--r--drivers/gpu/drm/radeon/ci_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c214
-rw-r--r--drivers/gpu/drm/radeon/cik_reg.h136
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c42
-rw-r--r--drivers/gpu/drm/radeon/cikd.h93
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c14
-rw-r--r--drivers/gpu/drm/radeon/evergreen_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c17
-rw-r--r--drivers/gpu/drm/radeon/ppsmc.h18
-rw-r--r--drivers/gpu/drm/radeon/pptable.h8
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r200.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c6
-rw-r--r--drivers/gpu/drm/radeon/r600.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h162
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h18
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c21
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c121
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c268
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c92
-rw-r--r--drivers/gpu/drm/radeon/radeon_ib.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c563
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.h47
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h20
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c83
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c154
-rw-r--r--drivers/gpu/drm/radeon/radeon_sync.c220
-rw-r--r--drivers/gpu/drm/radeon/radeon_trace.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c236
-rw-r--r--drivers/gpu/drm/radeon/rv770_dma.c18
-rw-r--r--drivers/gpu/drm/radeon/si.c24
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c37
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c381
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.h5
-rw-r--r--drivers/gpu/drm/radeon/si_smc.c2
-rw-r--r--drivers/gpu/drm/radeon/sid.h40
-rw-r--r--drivers/gpu/drm/radeon/sislands_smc.h25
-rw-r--r--drivers/gpu/drm/radeon/smu7_discrete.h30
-rw-r--r--drivers/gpu/drm/rcar-du/Kconfig11
-rw-r--r--drivers/gpu/drm/rcar-du/Makefile2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h10
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c4
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c45
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.h23
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c121
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c151
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h35
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c57
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c31
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h1
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vgacon.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig17
-rw-r--r--drivers/gpu/drm/rockchip/Makefile8
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c551
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h68
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.c201
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fb.h28
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c210
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h21
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c294
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.h54
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c1455
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h201
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_crtc.c1
-rw-r--r--drivers/gpu/drm/sti/Kconfig1
-rw-r--r--drivers/gpu/drm/sti/Makefile4
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.c20
-rw-r--r--drivers/gpu/drm/sti/sti_compositor.h2
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.c242
-rw-r--r--drivers/gpu/drm/sti/sti_cursor.h12
-rw-r--r--drivers/gpu/drm/sti/sti_drm_crtc.c24
-rw-r--r--drivers/gpu/drm/sti/sti_drm_drv.c6
-rw-r--r--drivers/gpu/drm/sti/sti_drm_plane.c4
-rw-r--r--drivers/gpu/drm/sti/sti_gdp.c62
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c84
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.h6
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c1073
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.h12
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp_lut.h373
-rw-r--r--drivers/gpu/drm/sti/sti_layer.c18
-rw-r--r--drivers/gpu/drm/sti/sti_layer.h12
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.c17
-rw-r--r--drivers/gpu/drm/sti/sti_mixer.h3
-rw-r--r--drivers/gpu/drm/sti/sti_tvout.c104
-rw-r--r--drivers/gpu/drm/sti/sti_vtg.c31
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tegra/dc.c596
-rw-r--r--drivers/gpu/drm/tegra/drm.c46
-rw-r--r--drivers/gpu/drm/tegra/drm.h18
-rw-r--r--drivers/gpu/drm/tegra/dsi.c811
-rw-r--r--drivers/gpu/drm/tegra/dsi.h14
-rw-r--r--drivers/gpu/drm/tegra/fb.c52
-rw-r--r--drivers/gpu/drm/tegra/gem.c366
-rw-r--r--drivers/gpu/drm/tegra/gem.h14
-rw-r--r--drivers/gpu/drm/tegra/output.c35
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c7
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c10
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c25
-rw-r--r--drivers/gpu/drm/udl/Makefile2
-rw-r--r--drivers/gpu/drm/udl/udl_dmabuf.c276
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c2
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h8
-rw-r--r--drivers/gpu/drm/udl/udl_gem.c97
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c11
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c39
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c2
-rw-r--r--drivers/gpu/host1x/cdma.c2
-rw-r--r--drivers/gpu/host1x/cdma.h2
-rw-r--r--drivers/gpu/host1x/hw/cdma_hw.c10
-rw-r--r--drivers/gpu/host1x/hw/channel_hw.c12
-rw-r--r--drivers/gpu/host1x/hw/debug_hw.c4
-rw-r--r--drivers/gpu/host1x/job.h2
-rw-r--r--drivers/gpu/host1x/mipi.c148
-rw-r--r--drivers/iommu/amd_iommu_v2.c61
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra.c1
-rw-r--r--drivers/staging/Kconfig2
-rw-r--r--drivers/staging/Makefile1
-rw-r--r--drivers/staging/imx-drm/TODO17
-rw-r--r--include/drm/drmP.h27
-rw-r--r--include/drm/drm_atomic.h69
-rw-r--r--include/drm/drm_atomic_helper.h126
-rw-r--r--include/drm/drm_crtc.h327
-rw-r--r--include/drm/drm_crtc_helper.h13
-rw-r--r--include/drm/drm_displayid.h76
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h8
-rw-r--r--include/drm/drm_edid.h109
-rw-r--r--include/drm/drm_fb_helper.h6
-rw-r--r--include/drm/drm_flip_work.h33
-rw-r--r--include/drm/drm_gem.h7
-rw-r--r--include/drm/drm_gem_cma_helper.h30
-rw-r--r--include/drm/drm_mipi_dsi.h94
-rw-r--r--include/drm/drm_modeset_lock.h5
-rw-r--r--include/drm/drm_plane_helper.h44
-rw-r--r--include/drm/i915_pciids.h17
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h9
-rw-r--r--include/linux/hdmi.h21
-rw-r--r--include/linux/mmu_notifier.h88
-rw-r--r--include/linux/platform_data/rcar-du.h74
-rw-r--r--include/trace/events/host1x.h27
-rw-r--r--include/uapi/drm/drm_mode.h2
-rw-r--r--include/uapi/drm/i915_drm.h7
-rw-r--r--include/uapi/linux/kfd_ioctl.h154
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/time/time.c1
-rw-r--r--mm/fremap.c2
-rw-r--r--mm/huge_memory.c9
-rw-r--r--mm/hugetlb.c7
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/memory.c3
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/mmu_notifier.c25
-rw-r--r--mm/rmap.c2
549 files changed, 53440 insertions, 14575 deletions
diff --git a/CREDITS b/CREDITS
index bb6278884f89..c56d8aa10131 100644
--- a/CREDITS
+++ b/CREDITS
@@ -1197,6 +1197,13 @@ S: R. Tocantins, 89 - Cristo Rei
1197S: 80050-430 - Curitiba - Paraná 1197S: 80050-430 - Curitiba - Paraná
1198S: Brazil 1198S: Brazil
1199 1199
1200N: Oded Gabbay
1201E: oded.gabbay@gmail.com
1202D: AMD KFD maintainer
1203S: 12 Shraga Raphaeli
1204S: Petah-Tikva, 4906418
1205S: Israel
1206
1200N: Kumar Gala 1207N: Kumar Gala
1201E: galak@kernel.crashing.org 1208E: galak@kernel.crashing.org
1202D: Embedded PowerPC 6xx/7xx/74xx/82xx/83xx/85xx support 1209D: Embedded PowerPC 6xx/7xx/74xx/82xx/83xx/85xx support
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index be35bc328b77..4b592ffbafee 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -492,10 +492,10 @@ char *date;</synopsis>
492 <sect2> 492 <sect2>
493 <title>The Translation Table Manager (TTM)</title> 493 <title>The Translation Table Manager (TTM)</title>
494 <para> 494 <para>
495 TTM design background and information belongs here. 495 TTM design background and information belongs here.
496 </para> 496 </para>
497 <sect3> 497 <sect3>
498 <title>TTM initialization</title> 498 <title>TTM initialization</title>
499 <warning><para>This section is outdated.</para></warning> 499 <warning><para>This section is outdated.</para></warning>
500 <para> 500 <para>
501 Drivers wishing to support TTM must fill out a drm_bo_driver 501 Drivers wishing to support TTM must fill out a drm_bo_driver
@@ -503,42 +503,42 @@ char *date;</synopsis>
503 pointers for initializing the TTM, allocating and freeing memory, 503 pointers for initializing the TTM, allocating and freeing memory,
504 waiting for command completion and fence synchronization, and memory 504 waiting for command completion and fence synchronization, and memory
505 migration. See the radeon_ttm.c file for an example of usage. 505 migration. See the radeon_ttm.c file for an example of usage.
506 </para> 506 </para>
507 <para> 507 <para>
508 The ttm_global_reference structure is made up of several fields: 508 The ttm_global_reference structure is made up of several fields:
509 </para> 509 </para>
510 <programlisting> 510 <programlisting>
511 struct ttm_global_reference { 511 struct ttm_global_reference {
512 enum ttm_global_types global_type; 512 enum ttm_global_types global_type;
513 size_t size; 513 size_t size;
514 void *object; 514 void *object;
515 int (*init) (struct ttm_global_reference *); 515 int (*init) (struct ttm_global_reference *);
516 void (*release) (struct ttm_global_reference *); 516 void (*release) (struct ttm_global_reference *);
517 }; 517 };
518 </programlisting> 518 </programlisting>
519 <para> 519 <para>
520 There should be one global reference structure for your memory 520 There should be one global reference structure for your memory
521 manager as a whole, and there will be others for each object 521 manager as a whole, and there will be others for each object
522 created by the memory manager at runtime. Your global TTM should 522 created by the memory manager at runtime. Your global TTM should
523 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global 523 have a type of TTM_GLOBAL_TTM_MEM. The size field for the global
524 object should be sizeof(struct ttm_mem_global), and the init and 524 object should be sizeof(struct ttm_mem_global), and the init and
525 release hooks should point at your driver-specific init and 525 release hooks should point at your driver-specific init and
526 release routines, which probably eventually call 526 release routines, which probably eventually call
527 ttm_mem_global_init and ttm_mem_global_release, respectively. 527 ttm_mem_global_init and ttm_mem_global_release, respectively.
528 </para> 528 </para>
529 <para> 529 <para>
530 Once your global TTM accounting structure is set up and initialized 530 Once your global TTM accounting structure is set up and initialized
531 by calling ttm_global_item_ref() on it, 531 by calling ttm_global_item_ref() on it,
532 you need to create a buffer object TTM to 532 you need to create a buffer object TTM to
533 provide a pool for buffer object allocation by clients and the 533 provide a pool for buffer object allocation by clients and the
534 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO, 534 kernel itself. The type of this object should be TTM_GLOBAL_TTM_BO,
535 and its size should be sizeof(struct ttm_bo_global). Again, 535 and its size should be sizeof(struct ttm_bo_global). Again,
536 driver-specific init and release functions may be provided, 536 driver-specific init and release functions may be provided,
537 likely eventually calling ttm_bo_global_init() and 537 likely eventually calling ttm_bo_global_init() and
538 ttm_bo_global_release(), respectively. Also, like the previous 538 ttm_bo_global_release(), respectively. Also, like the previous
539 object, ttm_global_item_ref() is used to create an initial reference 539 object, ttm_global_item_ref() is used to create an initial reference
540 count for the TTM, which will call your initialization function. 540 count for the TTM, which will call your initialization function.
541 </para> 541 </para>
542 </sect3> 542 </sect3>
543 </sect2> 543 </sect2>
544 <sect2 id="drm-gem"> 544 <sect2 id="drm-gem">
@@ -566,19 +566,19 @@ char *date;</synopsis>
566 using driver-specific ioctls. 566 using driver-specific ioctls.
567 </para> 567 </para>
568 <para> 568 <para>
569 On a fundamental level, GEM involves several operations: 569 On a fundamental level, GEM involves several operations:
570 <itemizedlist> 570 <itemizedlist>
571 <listitem>Memory allocation and freeing</listitem> 571 <listitem>Memory allocation and freeing</listitem>
572 <listitem>Command execution</listitem> 572 <listitem>Command execution</listitem>
573 <listitem>Aperture management at command execution time</listitem> 573 <listitem>Aperture management at command execution time</listitem>
574 </itemizedlist> 574 </itemizedlist>
575 Buffer object allocation is relatively straightforward and largely 575 Buffer object allocation is relatively straightforward and largely
576 provided by Linux's shmem layer, which provides memory to back each 576 provided by Linux's shmem layer, which provides memory to back each
577 object. 577 object.
578 </para> 578 </para>
579 <para> 579 <para>
580 Device-specific operations, such as command execution, pinning, buffer 580 Device-specific operations, such as command execution, pinning, buffer
581 read &amp; write, mapping, and domain ownership transfers are left to 581 read &amp; write, mapping, and domain ownership transfers are left to
582 driver-specific ioctls. 582 driver-specific ioctls.
583 </para> 583 </para>
584 <sect3> 584 <sect3>
@@ -738,16 +738,16 @@ char *date;</synopsis>
738 respectively. The conversion is handled by the DRM core without any 738 respectively. The conversion is handled by the DRM core without any
739 driver-specific support. 739 driver-specific support.
740 </para> 740 </para>
741 <para> 741 <para>
742 GEM also supports buffer sharing with dma-buf file descriptors through 742 GEM also supports buffer sharing with dma-buf file descriptors through
743 PRIME. GEM-based drivers must use the provided helpers functions to 743 PRIME. GEM-based drivers must use the provided helpers functions to
744 implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />. 744 implement the exporting and importing correctly. See <xref linkend="drm-prime-support" />.
745 Since sharing file descriptors is inherently more secure than the 745 Since sharing file descriptors is inherently more secure than the
746 easily guessable and global GEM names it is the preferred buffer 746 easily guessable and global GEM names it is the preferred buffer
747 sharing mechanism. Sharing buffers through GEM names is only supported 747 sharing mechanism. Sharing buffers through GEM names is only supported
748 for legacy userspace. Furthermore PRIME also allows cross-device 748 for legacy userspace. Furthermore PRIME also allows cross-device
749 buffer sharing since it is based on dma-bufs. 749 buffer sharing since it is based on dma-bufs.
750 </para> 750 </para>
751 </sect3> 751 </sect3>
752 <sect3 id="drm-gem-objects-mapping"> 752 <sect3 id="drm-gem-objects-mapping">
753 <title>GEM Objects Mapping</title> 753 <title>GEM Objects Mapping</title>
@@ -852,7 +852,7 @@ char *date;</synopsis>
852 <sect3> 852 <sect3>
853 <title>Command Execution</title> 853 <title>Command Execution</title>
854 <para> 854 <para>
855 Perhaps the most important GEM function for GPU devices is providing a 855 Perhaps the most important GEM function for GPU devices is providing a
856 command execution interface to clients. Client programs construct 856 command execution interface to clients. Client programs construct
857 command buffers containing references to previously allocated memory 857 command buffers containing references to previously allocated memory
858 objects, and then submit them to GEM. At that point, GEM takes care to 858 objects, and then submit them to GEM. At that point, GEM takes care to
@@ -874,95 +874,101 @@ char *date;</synopsis>
874 <title>GEM Function Reference</title> 874 <title>GEM Function Reference</title>
875!Edrivers/gpu/drm/drm_gem.c 875!Edrivers/gpu/drm/drm_gem.c
876 </sect3> 876 </sect3>
877 </sect2> 877 </sect2>
878 <sect2> 878 <sect2>
879 <title>VMA Offset Manager</title> 879 <title>VMA Offset Manager</title>
880!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager 880!Pdrivers/gpu/drm/drm_vma_manager.c vma offset manager
881!Edrivers/gpu/drm/drm_vma_manager.c 881!Edrivers/gpu/drm/drm_vma_manager.c
882!Iinclude/drm/drm_vma_manager.h 882!Iinclude/drm/drm_vma_manager.h
883 </sect2> 883 </sect2>
884 <sect2 id="drm-prime-support"> 884 <sect2 id="drm-prime-support">
885 <title>PRIME Buffer Sharing</title> 885 <title>PRIME Buffer Sharing</title>
886 <para> 886 <para>
887 PRIME is the cross device buffer sharing framework in drm, originally 887 PRIME is the cross device buffer sharing framework in drm, originally
888 created for the OPTIMUS range of multi-gpu platforms. To userspace 888 created for the OPTIMUS range of multi-gpu platforms. To userspace
889 PRIME buffers are dma-buf based file descriptors. 889 PRIME buffers are dma-buf based file descriptors.
890 </para> 890 </para>
891 <sect3> 891 <sect3>
892 <title>Overview and Driver Interface</title> 892 <title>Overview and Driver Interface</title>
893 <para> 893 <para>
894 Similar to GEM global names, PRIME file descriptors are 894 Similar to GEM global names, PRIME file descriptors are
895 also used to share buffer objects across processes. They offer 895 also used to share buffer objects across processes. They offer
896 additional security: as file descriptors must be explicitly sent over 896 additional security: as file descriptors must be explicitly sent over
897 UNIX domain sockets to be shared between applications, they can't be 897 UNIX domain sockets to be shared between applications, they can't be
898 guessed like the globally unique GEM names. 898 guessed like the globally unique GEM names.
899 </para> 899 </para>
900 <para> 900 <para>
901 Drivers that support the PRIME 901 Drivers that support the PRIME
902 API must set the DRIVER_PRIME bit in the struct 902 API must set the DRIVER_PRIME bit in the struct
903 <structname>drm_driver</structname> 903 <structname>drm_driver</structname>
904 <structfield>driver_features</structfield> field, and implement the 904 <structfield>driver_features</structfield> field, and implement the
905 <methodname>prime_handle_to_fd</methodname> and 905 <methodname>prime_handle_to_fd</methodname> and
906 <methodname>prime_fd_to_handle</methodname> operations. 906 <methodname>prime_fd_to_handle</methodname> operations.
907 </para> 907 </para>
908 <para> 908 <para>
909 <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev, 909 <synopsis>int (*prime_handle_to_fd)(struct drm_device *dev,
910 struct drm_file *file_priv, uint32_t handle, 910 struct drm_file *file_priv, uint32_t handle,
911 uint32_t flags, int *prime_fd); 911 uint32_t flags, int *prime_fd);
912int (*prime_fd_to_handle)(struct drm_device *dev, 912int (*prime_fd_to_handle)(struct drm_device *dev,
913 struct drm_file *file_priv, int prime_fd, 913 struct drm_file *file_priv, int prime_fd,
914 uint32_t *handle);</synopsis> 914 uint32_t *handle);</synopsis>
915 Those two operations convert a handle to a PRIME file descriptor and 915 Those two operations convert a handle to a PRIME file descriptor and
916 vice versa. Drivers must use the kernel dma-buf buffer sharing framework 916 vice versa. Drivers must use the kernel dma-buf buffer sharing framework
917 to manage the PRIME file descriptors. Similar to the mode setting 917 to manage the PRIME file descriptors. Similar to the mode setting
918 API PRIME is agnostic to the underlying buffer object manager, as 918 API PRIME is agnostic to the underlying buffer object manager, as
919 long as handles are 32bit unsigned integers. 919 long as handles are 32bit unsigned integers.
920 </para> 920 </para>
921 <para> 921 <para>
922 While non-GEM drivers must implement the operations themselves, GEM 922 While non-GEM drivers must implement the operations themselves, GEM
923 drivers must use the <function>drm_gem_prime_handle_to_fd</function> 923 drivers must use the <function>drm_gem_prime_handle_to_fd</function>
924 and <function>drm_gem_prime_fd_to_handle</function> helper functions. 924 and <function>drm_gem_prime_fd_to_handle</function> helper functions.
925 Those helpers rely on the driver 925 Those helpers rely on the driver
926 <methodname>gem_prime_export</methodname> and 926 <methodname>gem_prime_export</methodname> and
927 <methodname>gem_prime_import</methodname> operations to create a dma-buf 927 <methodname>gem_prime_import</methodname> operations to create a dma-buf
928 instance from a GEM object (dma-buf exporter role) and to create a GEM 928 instance from a GEM object (dma-buf exporter role) and to create a GEM
929 object from a dma-buf instance (dma-buf importer role). 929 object from a dma-buf instance (dma-buf importer role).
930 </para> 930 </para>
931 <para> 931 <para>
932 <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev, 932 <synopsis>struct dma_buf * (*gem_prime_export)(struct drm_device *dev,
933 struct drm_gem_object *obj, 933 struct drm_gem_object *obj,
934 int flags); 934 int flags);
935struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, 935struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev,
936 struct dma_buf *dma_buf);</synopsis> 936 struct dma_buf *dma_buf);</synopsis>
937 These two operations are mandatory for GEM drivers that support 937 These two operations are mandatory for GEM drivers that support
938 PRIME. 938 PRIME.
939 </para> 939 </para>
940 </sect3>
941 <sect3>
942 <title>PRIME Helper Functions</title>
943!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
944 </sect3> 940 </sect3>
945 </sect2> 941 <sect3>
946 <sect2> 942 <title>PRIME Helper Functions</title>
947 <title>PRIME Function References</title> 943!Pdrivers/gpu/drm/drm_prime.c PRIME Helpers
944 </sect3>
945 </sect2>
946 <sect2>
947 <title>PRIME Function References</title>
948!Edrivers/gpu/drm/drm_prime.c 948!Edrivers/gpu/drm/drm_prime.c
949 </sect2> 949 </sect2>
950 <sect2> 950 <sect2>
951 <title>DRM MM Range Allocator</title> 951 <title>DRM MM Range Allocator</title>
952 <sect3> 952 <sect3>
953 <title>Overview</title> 953 <title>Overview</title>
954!Pdrivers/gpu/drm/drm_mm.c Overview 954!Pdrivers/gpu/drm/drm_mm.c Overview
955 </sect3> 955 </sect3>
956 <sect3> 956 <sect3>
957 <title>LRU Scan/Eviction Support</title> 957 <title>LRU Scan/Eviction Support</title>
958!Pdrivers/gpu/drm/drm_mm.c lru scan roaster 958!Pdrivers/gpu/drm/drm_mm.c lru scan roaster
959 </sect3> 959 </sect3>
960 </sect2> 960 </sect2>
961 <sect2> 961 <sect2>
962 <title>DRM MM Range Allocator Function References</title> 962 <title>DRM MM Range Allocator Function References</title>
963!Edrivers/gpu/drm/drm_mm.c 963!Edrivers/gpu/drm/drm_mm.c
964!Iinclude/drm/drm_mm.h 964!Iinclude/drm/drm_mm.h
965 </sect2> 965 </sect2>
966 <sect2>
967 <title>CMA Helper Functions Reference</title>
968!Pdrivers/gpu/drm/drm_gem_cma_helper.c cma helpers
969!Edrivers/gpu/drm/drm_gem_cma_helper.c
970!Iinclude/drm/drm_gem_cma_helper.h
971 </sect2>
966 </sect1> 972 </sect1>
967 973
968 <!-- Internals: mode setting --> 974 <!-- Internals: mode setting -->
@@ -996,6 +1002,10 @@ int max_width, max_height;</synopsis>
996!Edrivers/gpu/drm/drm_modes.c 1002!Edrivers/gpu/drm/drm_modes.c
997 </sect2> 1003 </sect2>
998 <sect2> 1004 <sect2>
1005 <title>Atomic Mode Setting Function Reference</title>
1006!Edrivers/gpu/drm/drm_atomic.c
1007 </sect2>
1008 <sect2>
999 <title>Frame Buffer Creation</title> 1009 <title>Frame Buffer Creation</title>
1000 <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev, 1010 <synopsis>struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
1001 struct drm_file *file_priv, 1011 struct drm_file *file_priv,
@@ -1827,6 +1837,10 @@ void intel_crt_init(struct drm_device *dev)
1827!Edrivers/gpu/drm/drm_crtc.c 1837!Edrivers/gpu/drm/drm_crtc.c
1828 </sect2> 1838 </sect2>
1829 <sect2> 1839 <sect2>
1840 <title>KMS Data Structures</title>
1841!Iinclude/drm/drm_crtc.h
1842 </sect2>
1843 <sect2>
1830 <title>KMS Locking</title> 1844 <title>KMS Locking</title>
1831!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking 1845!Pdrivers/gpu/drm/drm_modeset_lock.c kms locking
1832!Iinclude/drm/drm_modeset_lock.h 1846!Iinclude/drm/drm_modeset_lock.h
@@ -1933,10 +1947,16 @@ void intel_crt_init(struct drm_device *dev)
1933 and then retrieves a list of modes by calling the connector 1947 and then retrieves a list of modes by calling the connector
1934 <methodname>get_modes</methodname> helper operation. 1948 <methodname>get_modes</methodname> helper operation.
1935 </para> 1949 </para>
1950 <para>
1951 If the helper operation returns no mode, and if the connector status
1952 is connector_status_connected, standard VESA DMT modes up to
1953 1024x768 are automatically added to the modes list by a call to
1954 <function>drm_add_modes_noedid</function>.
1955 </para>
1936 <para> 1956 <para>
1937 The function filters out modes larger than 1957 The function then filters out modes larger than
1938 <parameter>max_width</parameter> and <parameter>max_height</parameter> 1958 <parameter>max_width</parameter> and <parameter>max_height</parameter>
1939 if specified. It then calls the optional connector 1959 if specified. It finally calls the optional connector
1940 <methodname>mode_valid</methodname> helper operation for each mode in 1960 <methodname>mode_valid</methodname> helper operation for each mode in
1941 the probed list to check whether the mode is valid for the connector. 1961 the probed list to check whether the mode is valid for the connector.
1942 </para> 1962 </para>
@@ -2076,12 +2096,20 @@ void intel_crt_init(struct drm_device *dev)
2076 <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis> 2096 <synopsis>int (*get_modes)(struct drm_connector *connector);</synopsis>
2077 <para> 2097 <para>
2078 Fill the connector's <structfield>probed_modes</structfield> list 2098 Fill the connector's <structfield>probed_modes</structfield> list
2079 by parsing EDID data with <function>drm_add_edid_modes</function> or 2099 by parsing EDID data with <function>drm_add_edid_modes</function>,
2080 calling <function>drm_mode_probed_add</function> directly for every 2100 adding standard VESA DMT modes with <function>drm_add_modes_noedid</function>,
2101 or calling <function>drm_mode_probed_add</function> directly for every
2081 supported mode and return the number of modes it has detected. This 2102 supported mode and return the number of modes it has detected. This
2082 operation is mandatory. 2103 operation is mandatory.
2083 </para> 2104 </para>
2084 <para> 2105 <para>
2106 Note that the caller function will automatically add standard VESA
2107 DMT modes up to 1024x768 if the <methodname>get_modes</methodname>
2108 helper operation returns no mode and if the connector status is
2109 connector_status_connected. There is no need to call
2110 <function>drm_add_edid_modes</function> manually in that case.
2111 </para>
2112 <para>
2085 When adding modes manually the driver creates each mode with a call to 2113 When adding modes manually the driver creates each mode with a call to
2086 <function>drm_mode_create</function> and must fill the following fields. 2114 <function>drm_mode_create</function> and must fill the following fields.
2087 <itemizedlist> 2115 <itemizedlist>
@@ -2278,7 +2306,7 @@ void intel_crt_init(struct drm_device *dev)
2278 <function>drm_helper_probe_single_connector_modes</function>. 2306 <function>drm_helper_probe_single_connector_modes</function>.
2279 </para> 2307 </para>
2280 <para> 2308 <para>
2281 When parsing EDID data, <function>drm_add_edid_modes</function> fill the 2309 When parsing EDID data, <function>drm_add_edid_modes</function> fills the
2282 connector <structfield>display_info</structfield> 2310 connector <structfield>display_info</structfield>
2283 <structfield>width_mm</structfield> and 2311 <structfield>width_mm</structfield> and
2284 <structfield>height_mm</structfield> fields. When creating modes 2312 <structfield>height_mm</structfield> fields. When creating modes
@@ -2316,8 +2344,26 @@ void intel_crt_init(struct drm_device *dev)
2316 </itemizedlist> 2344 </itemizedlist>
2317 </sect2> 2345 </sect2>
2318 <sect2> 2346 <sect2>
2347 <title>Atomic Modeset Helper Functions Reference</title>
2348 <sect3>
2349 <title>Overview</title>
2350!Pdrivers/gpu/drm/drm_atomic_helper.c overview
2351 </sect3>
2352 <sect3>
2353 <title>Implementing Asynchronous Atomic Commit</title>
2354!Pdrivers/gpu/drm/drm_atomic_helper.c implementing async commit
2355 </sect3>
2356 <sect3>
2357 <title>Atomic State Reset and Initialization</title>
2358!Pdrivers/gpu/drm/drm_atomic_helper.c atomic state reset and initialization
2359 </sect3>
2360!Iinclude/drm/drm_atomic_helper.h
2361!Edrivers/gpu/drm/drm_atomic_helper.c
2362 </sect2>
2363 <sect2>
2319 <title>Modeset Helper Functions Reference</title> 2364 <title>Modeset Helper Functions Reference</title>
2320!Edrivers/gpu/drm/drm_crtc_helper.c 2365!Edrivers/gpu/drm/drm_crtc_helper.c
2366!Pdrivers/gpu/drm/drm_crtc_helper.c overview
2321 </sect2> 2367 </sect2>
2322 <sect2> 2368 <sect2>
2323 <title>Output Probing Helper Functions Reference</title> 2369 <title>Output Probing Helper Functions Reference</title>
@@ -2343,6 +2389,12 @@ void intel_crt_init(struct drm_device *dev)
2343!Edrivers/gpu/drm/drm_dp_mst_topology.c 2389!Edrivers/gpu/drm/drm_dp_mst_topology.c
2344 </sect2> 2390 </sect2>
2345 <sect2> 2391 <sect2>
2392 <title>MIPI DSI Helper Functions Reference</title>
2393!Pdrivers/gpu/drm/drm_mipi_dsi.c dsi helpers
2394!Iinclude/drm/drm_mipi_dsi.h
2395!Edrivers/gpu/drm/drm_mipi_dsi.c
2396 </sect2>
2397 <sect2>
2346 <title>EDID Helper Functions Reference</title> 2398 <title>EDID Helper Functions Reference</title>
2347!Edrivers/gpu/drm/drm_edid.c 2399!Edrivers/gpu/drm/drm_edid.c
2348 </sect2> 2400 </sect2>
@@ -2371,7 +2423,12 @@ void intel_crt_init(struct drm_device *dev)
2371 </sect2> 2423 </sect2>
2372 <sect2> 2424 <sect2>
2373 <title id="drm-kms-planehelpers">Plane Helper Reference</title> 2425 <title id="drm-kms-planehelpers">Plane Helper Reference</title>
2374!Edrivers/gpu/drm/drm_plane_helper.c Plane Helpers 2426!Edrivers/gpu/drm/drm_plane_helper.c
2427!Pdrivers/gpu/drm/drm_plane_helper.c overview
2428 </sect2>
2429 <sect2>
2430 <title>Tile group</title>
2431!Pdrivers/gpu/drm/drm_crtc.c Tile group
2375 </sect2> 2432 </sect2>
2376 </sect1> 2433 </sect1>
2377 2434
@@ -2507,8 +2564,8 @@ void intel_crt_init(struct drm_device *dev)
2507 <td valign="top" >Description/Restrictions</td> 2564 <td valign="top" >Description/Restrictions</td>
2508 </tr> 2565 </tr>
2509 <tr> 2566 <tr>
2510 <td rowspan="21" valign="top" >DRM</td> 2567 <td rowspan="25" valign="top" >DRM</td>
2511 <td rowspan="2" valign="top" >Generic</td> 2568 <td rowspan="4" valign="top" >Generic</td>
2512 <td valign="top" >“EDID”</td> 2569 <td valign="top" >“EDID”</td>
2513 <td valign="top" >BLOB | IMMUTABLE</td> 2570 <td valign="top" >BLOB | IMMUTABLE</td>
2514 <td valign="top" >0</td> 2571 <td valign="top" >0</td>
@@ -2523,6 +2580,20 @@ void intel_crt_init(struct drm_device *dev)
2523 <td valign="top" >Contains DPMS operation mode value.</td> 2580 <td valign="top" >Contains DPMS operation mode value.</td>
2524 </tr> 2581 </tr>
2525 <tr> 2582 <tr>
2583 <td valign="top" >“PATH”</td>
2584 <td valign="top" >BLOB | IMMUTABLE</td>
2585 <td valign="top" >0</td>
2586 <td valign="top" >Connector</td>
2587 <td valign="top" >Contains topology path to a connector.</td>
2588 </tr>
2589 <tr>
2590 <td valign="top" >“TILE”</td>
2591 <td valign="top" >BLOB | IMMUTABLE</td>
2592 <td valign="top" >0</td>
2593 <td valign="top" >Connector</td>
2594 <td valign="top" >Contains tiling information for a connector.</td>
2595 </tr>
2596 <tr>
2526 <td rowspan="1" valign="top" >Plane</td> 2597 <td rowspan="1" valign="top" >Plane</td>
2527 <td valign="top" >“type”</td> 2598 <td valign="top" >“type”</td>
2528 <td valign="top" >ENUM | IMMUTABLE</td> 2599 <td valign="top" >ENUM | IMMUTABLE</td>
@@ -2638,6 +2709,21 @@ void intel_crt_init(struct drm_device *dev)
2638 <td valign="top" >TBD</td> 2709 <td valign="top" >TBD</td>
2639 </tr> 2710 </tr>
2640 <tr> 2711 <tr>
2712 <td rowspan="2" valign="top" >Virtual GPU</td>
2713 <td valign="top" >“suggested X”</td>
2714 <td valign="top" >RANGE</td>
2715 <td valign="top" >Min=0, Max=0xffffffff</td>
2716 <td valign="top" >Connector</td>
2717 <td valign="top" >property to suggest an X offset for a connector</td>
2718 </tr>
2719 <tr>
2720 <td valign="top" >“suggested Y”</td>
2721 <td valign="top" >RANGE</td>
2722 <td valign="top" >Min=0, Max=0xffffffff</td>
2723 <td valign="top" >Connector</td>
2724 <td valign="top" >property to suggest an Y offset for a connector</td>
2725 </tr>
2726 <tr>
2641 <td rowspan="3" valign="top" >Optional</td> 2727 <td rowspan="3" valign="top" >Optional</td>
2642 <td valign="top" >“scaling mode”</td> 2728 <td valign="top" >“scaling mode”</td>
2643 <td valign="top" >ENUM</td> 2729 <td valign="top" >ENUM</td>
@@ -3788,6 +3874,26 @@ int num_ioctls;</synopsis>
3788 those have basic support through the gma500 drm driver. 3874 those have basic support through the gma500 drm driver.
3789 </para> 3875 </para>
3790 <sect1> 3876 <sect1>
3877 <title>Core Driver Infrastructure</title>
3878 <para>
3879 This section covers core driver infrastructure used by both the display
3880 and the GEM parts of the driver.
3881 </para>
3882 <sect2>
3883 <title>Runtime Power Management</title>
3884!Pdrivers/gpu/drm/i915/intel_runtime_pm.c runtime pm
3885!Idrivers/gpu/drm/i915/intel_runtime_pm.c
3886 </sect2>
3887 <sect2>
3888 <title>Interrupt Handling</title>
3889!Pdrivers/gpu/drm/i915/i915_irq.c interrupt handling
3890!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_init intel_irq_init_hw intel_hpd_init
3891!Fdrivers/gpu/drm/i915/i915_irq.c intel_irq_fini
3892!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_disable_interrupts
3893!Fdrivers/gpu/drm/i915/i915_irq.c intel_runtime_pm_enable_interrupts
3894 </sect2>
3895 </sect1>
3896 <sect1>
3791 <title>Display Hardware Handling</title> 3897 <title>Display Hardware Handling</title>
3792 <para> 3898 <para>
3793 This section covers everything related to the display hardware including 3899 This section covers everything related to the display hardware including
@@ -3804,6 +3910,18 @@ int num_ioctls;</synopsis>
3804 </para> 3910 </para>
3805 </sect2> 3911 </sect2>
3806 <sect2> 3912 <sect2>
3913 <title>Frontbuffer Tracking</title>
3914!Pdrivers/gpu/drm/i915/intel_frontbuffer.c frontbuffer tracking
3915!Idrivers/gpu/drm/i915/intel_frontbuffer.c
3916!Fdrivers/gpu/drm/i915/intel_drv.h intel_frontbuffer_flip
3917!Fdrivers/gpu/drm/i915/i915_gem.c i915_gem_track_fb
3918 </sect2>
3919 <sect2>
3920 <title>Display FIFO Underrun Reporting</title>
3921!Pdrivers/gpu/drm/i915/intel_fifo_underrun.c fifo underrun handling
3922!Idrivers/gpu/drm/i915/intel_fifo_underrun.c
3923 </sect2>
3924 <sect2>
3807 <title>Plane Configuration</title> 3925 <title>Plane Configuration</title>
3808 <para> 3926 <para>
3809 This section covers plane configuration and composition with the 3927 This section covers plane configuration and composition with the
@@ -3823,6 +3941,16 @@ int num_ioctls;</synopsis>
3823 </para> 3941 </para>
3824 </sect2> 3942 </sect2>
3825 <sect2> 3943 <sect2>
3944 <title>High Definition Audio</title>
3945!Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port
3946!Idrivers/gpu/drm/i915/intel_audio.c
3947 </sect2>
3948 <sect2>
3949 <title>Panel Self Refresh PSR (PSR/SRD)</title>
3950!Pdrivers/gpu/drm/i915/intel_psr.c Panel Self Refresh (PSR/SRD)
3951!Idrivers/gpu/drm/i915/intel_psr.c
3952 </sect2>
3953 <sect2>
3826 <title>DPIO</title> 3954 <title>DPIO</title>
3827!Pdrivers/gpu/drm/i915/i915_reg.h DPIO 3955!Pdrivers/gpu/drm/i915/i915_reg.h DPIO
3828 <table id="dpiox2"> 3956 <table id="dpiox2">
@@ -3931,6 +4059,28 @@ int num_ioctls;</synopsis>
3931!Idrivers/gpu/drm/i915/intel_lrc.c 4059!Idrivers/gpu/drm/i915/intel_lrc.c
3932 </sect2> 4060 </sect2>
3933 </sect1> 4061 </sect1>
4062
4063 <sect1>
4064 <title> Tracing </title>
4065 <para>
4066 This sections covers all things related to the tracepoints implemented in
4067 the i915 driver.
4068 </para>
4069 <sect2>
4070 <title> i915_ppgtt_create and i915_ppgtt_release </title>
4071!Pdrivers/gpu/drm/i915/i915_trace.h i915_ppgtt_create and i915_ppgtt_release tracepoints
4072 </sect2>
4073 <sect2>
4074 <title> i915_context_create and i915_context_free </title>
4075!Pdrivers/gpu/drm/i915/i915_trace.h i915_context_create and i915_context_free tracepoints
4076 </sect2>
4077 <sect2>
4078 <title> switch_mm </title>
4079!Pdrivers/gpu/drm/i915/i915_trace.h switch_mm tracepoint
4080 </sect2>
4081 </sect1>
4082
3934 </chapter> 4083 </chapter>
4084!Cdrivers/gpu/drm/i915/i915_irq.c
3935</part> 4085</part>
3936</book> 4086</book>
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
index e75f0e549fff..e75f0e549fff 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt
+++ b/Documentation/devicetree/bindings/drm/imx/fsl-imx-drm.txt
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt b/Documentation/devicetree/bindings/drm/imx/hdmi.txt
index 1b756cf9afb0..1b756cf9afb0 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/hdmi.txt
+++ b/Documentation/devicetree/bindings/drm/imx/hdmi.txt
diff --git a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt b/Documentation/devicetree/bindings/drm/imx/ldb.txt
index 443bcb6134d5..443bcb6134d5 100644
--- a/Documentation/devicetree/bindings/staging/imx-drm/ldb.txt
+++ b/Documentation/devicetree/bindings/drm/imx/ldb.txt
diff --git a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
index b48f4ef31d93..4c32ef0b7db8 100644
--- a/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
+++ b/Documentation/devicetree/bindings/gpu/nvidia,tegra20-host1x.txt
@@ -191,6 +191,8 @@ of the following host1x client modules:
191 - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection 191 - nvidia,hpd-gpio: specifies a GPIO used for hotplug detection
192 - nvidia,edid: supplies a binary EDID blob 192 - nvidia,edid: supplies a binary EDID blob
193 - nvidia,panel: phandle of a display panel 193 - nvidia,panel: phandle of a display panel
194 - nvidia,ganged-mode: contains a phandle to a second DSI controller to gang
195 up with in order to support up to 8 data lanes
194 196
195- sor: serial output resource 197- sor: serial output resource
196 198
diff --git a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
index 2d150c311a05..c99eb34e640b 100644
--- a/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
+++ b/Documentation/devicetree/bindings/gpu/st,stih4xx.txt
@@ -68,7 +68,7 @@ STMicroelectronics stih4xx platforms
68 number of clocks may depend of the SoC type. 68 number of clocks may depend of the SoC type.
69 - clock-names: names of the clocks listed in clocks property in the same 69 - clock-names: names of the clocks listed in clocks property in the same
70 order. 70 order.
71 - hdmi,hpd-gpio: gpio id to detect if an hdmi cable is plugged or not. 71 - ddc: phandle of an I2C controller used for DDC EDID probing
72 72
73sti-hda: 73sti-hda:
74 Required properties: 74 Required properties:
@@ -83,6 +83,22 @@ sti-hda:
83 - clock-names: names of the clocks listed in clocks property in the same 83 - clock-names: names of the clocks listed in clocks property in the same
84 order. 84 order.
85 85
86sti-hqvdp:
87 must be a child of sti-display-subsystem
88 Required properties:
89 - compatible: "st,stih<chip>-hqvdp"
90 - reg: Physical base address of the IP registers and length of memory mapped region.
91 - clocks: from common clock binding: handle hardware IP needed clocks, the
92 number of clocks may depend of the SoC type.
93 See ../clocks/clock-bindings.txt for details.
94 - clock-names: names of the clocks listed in clocks property in the same
95 order.
96 - resets: resets to be used by the device
97 See ../reset/reset.txt for details.
98 - reset-names: names of the resets listed in resets property in the same
99 order.
100 - st,vtg: phandle on vtg main device node.
101
86Example: 102Example:
87 103
88/ { 104/ {
@@ -173,7 +189,6 @@ Example:
173 interrupt-names = "irq"; 189 interrupt-names = "irq";
174 clock-names = "pix", "tmds", "phy", "audio"; 190 clock-names = "pix", "tmds", "phy", "audio";
175 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>; 191 clocks = <&clockgen_c_vcc CLK_S_PIX_HDMI>, <&clockgen_c_vcc CLK_S_TMDS_HDMI>, <&clockgen_c_vcc CLK_S_HDMI_REJECT_PLL>, <&clockgen_b1 CLK_S_PCM_0>;
176 hdmi,hpd-gpio = <&PIO2 5>;
177 }; 192 };
178 193
179 sti-hda@fe85a000 { 194 sti-hda@fe85a000 {
@@ -184,6 +199,16 @@ Example:
184 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>; 199 clocks = <&clockgen_c_vcc CLK_S_PIX_HD>, <&clockgen_c_vcc CLK_S_HDDAC>;
185 }; 200 };
186 }; 201 };
202
203 sti-hqvdp@9c000000 {
204 compatible = "st,stih407-hqvdp";
205 reg = <0x9C00000 0x100000>;
206 clock-names = "hqvdp", "pix_main";
207 clocks = <&clk_s_c0_flexgen CLK_MAIN_DISP>, <&clk_s_d2_flexgen CLK_PIX_MAIN_DISP>;
208 reset-names = "hqvdp";
209 resets = <&softreset STIH407_HDQVDP_SOFTRESET>;
210 st,vtg = <&vtg_main>;
211 };
187 }; 212 };
188 ... 213 ...
189}; 214};
diff --git a/Documentation/devicetree/bindings/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
new file mode 100644
index 000000000000..690d0a568ef3
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/auo,b116xw03.txt
@@ -0,0 +1,7 @@
1AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
2
3Required properties:
4- compatible: should be "auo,b116xw03"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
new file mode 100644
index 000000000000..7da1d5c038ff
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/hannstar,hsd070pww1.txt
@@ -0,0 +1,7 @@
1HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
2
3Required properties:
4- compatible: should be "hannstar,hsd070pww1"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
new file mode 100644
index 000000000000..04caaae19af6
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/hit,tx23d38vm0caa.txt
@@ -0,0 +1,7 @@
1Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
2
3Required properties:
4- compatible: should be "hit,tx23d38vm0caa"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
new file mode 100644
index 000000000000..2743b07cd2f2
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/innolux,g121i1-l01.txt
@@ -0,0 +1,7 @@
1Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
2
3Required properties:
4- compatible: should be "innolux,g121i1-l01"
5
6This binding is compatible with the simple-panel binding, which is specified
7in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
new file mode 100644
index 000000000000..f522bb8e47e1
--- /dev/null
+++ b/Documentation/devicetree/bindings/panel/sharp,lq101r1sx01.txt
@@ -0,0 +1,49 @@
1Sharp Microelectronics 10.1" WQXGA TFT LCD panel
2
3This panel requires a dual-channel DSI host to operate. It supports two modes:
4- left-right: each channel drives the left or right half of the screen
5- even-odd: each channel drives the even or odd lines of the screen
6
7Each of the DSI channels controls a separate DSI peripheral. The peripheral
8driven by the first link (DSI-LINK1), left or even, is considered the primary
9peripheral and controls the device. The 'link2' property contains a phandle
10to the peripheral driven by the second link (DSI-LINK2, right or odd).
11
12Note that in video mode the DSI-LINK1 interface always provides the left/even
13pixels and DSI-LINK2 always provides the right/odd pixels. In command mode it
14is possible to program either link to drive the left/even or right/odd pixels
15but for the sake of consistency this binding assumes that the same assignment
16is chosen as for video mode.
17
18Required properties:
19- compatible: should be "sharp,lq101r1sx01"
20- reg: DSI virtual channel of the peripheral
21
22Required properties (for DSI-LINK1 only):
23- link2: phandle to the DSI peripheral on the secondary link. Note that the
24 presence of this property marks the containing node as DSI-LINK1.
25- power-supply: phandle of the regulator that provides the supply voltage
26
27Optional properties (for DSI-LINK1 only):
28- backlight: phandle of the backlight device attached to the panel
29
30Example:
31
32 dsi@54300000 {
33 panel: panel@0 {
34 compatible = "sharp,lq101r1sx01";
35 reg = <0>;
36
37 link2 = <&secondary>;
38
39 power-supply = <...>;
40 backlight = <...>;
41 };
42 };
43
44 dsi@54400000 {
45 secondary: panel@0 {
46 compatible = "sharp,lq101r1sx01";
47 reg = <0>;
48 };
49 };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index cc6151c431c8..423d47418e72 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -66,8 +66,10 @@ gmt Global Mixed-mode Technology, Inc.
66google Google, Inc. 66google Google, Inc.
67gumstix Gumstix, Inc. 67gumstix Gumstix, Inc.
68gw Gateworks Corporation 68gw Gateworks Corporation
69hannstar HannStar Display Corporation
69haoyu Haoyu Microelectronic Co. Ltd. 70haoyu Haoyu Microelectronic Co. Ltd.
70hisilicon Hisilicon Limited. 71hisilicon Hisilicon Limited.
72hit Hitachi Ltd.
71honeywell Honeywell 73honeywell Honeywell
72hp Hewlett Packard 74hp Hewlett Packard
73i2se I2SE GmbH 75i2se I2SE GmbH
diff --git a/Documentation/devicetree/bindings/video/adi,adv7511.txt b/Documentation/devicetree/bindings/video/adi,adv7511.txt
new file mode 100644
index 000000000000..96c25ee01501
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/adi,adv7511.txt
@@ -0,0 +1,88 @@
1Analog Device ADV7511(W)/13 HDMI Encoders
2-----------------------------------------
3
4The ADV7511, ADV7511W and ADV7513 are HDMI audio and video transmitters
5compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
6S/PDIF, CEC and HDCP.
7
8Required properties:
9
10- compatible: Should be one of "adi,adv7511", "adi,adv7511w" or "adi,adv7513"
11- reg: I2C slave address
12
13The ADV7511 supports a large number of input data formats that differ by their
14color depth, color format, clock mode, bit justification and random
15arrangement of components on the data bus. The combination of the following
16properties describe the input and map directly to the video input tables of the
17ADV7511 datasheet that document all the supported combinations.
18
19- adi,input-depth: Number of bits per color component at the input (8, 10 or
20 12).
21- adi,input-colorspace: The input color space, one of "rgb", "yuv422" or
22 "yuv444".
23- adi,input-clock: The input clock type, one of "1x" (one clock cycle per
24 pixel), "2x" (two clock cycles per pixel), "ddr" (one clock cycle per pixel,
25 data driven on both edges).
26
27The following input format properties are required except in "rgb 1x" and
28"yuv444 1x" modes, in which case they must not be specified.
29
30- adi,input-style: The input components arrangement variant (1, 2 or 3), as
31 listed in the input format tables in the datasheet.
32- adi,input-justification: The input bit justification ("left", "evenly",
33 "right").
34
35Optional properties:
36
37- interrupts: Specifier for the ADV7511 interrupt
38- pd-gpios: Specifier for the GPIO connected to the power down signal
39
40- adi,clock-delay: Video data clock delay relative to the pixel clock, in ps
41 (-1200 ps .. 1600 ps). Defaults to no delay.
42- adi,embedded-sync: The input uses synchronization signals embedded in the
43 data stream (similar to BT.656). Defaults to separate H/V synchronization
44 signals.
45
46Required nodes:
47
48The ADV7511 has two video ports. Their connections are modelled using the OF
49graph bindings specified in Documentation/devicetree/bindings/graph.txt.
50
51- Video port 0 for the RGB or YUV input
52- Video port 1 for the HDMI output
53
54
55Example
56-------
57
58 adv7511w: hdmi@39 {
59 compatible = "adi,adv7511w";
60 reg = <39>;
61 interrupt-parent = <&gpio3>;
62 interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
63
64 adi,input-depth = <8>;
65 adi,input-colorspace = "rgb";
66 adi,input-clock = "1x";
67 adi,input-style = <1>;
68 adi,input-justification = "evenly";
69
70 ports {
71 #address-cells = <1>;
72 #size-cells = <0>;
73
74 port@0 {
75 reg = <0>;
76 adv7511w_in: endpoint {
77 remote-endpoint = <&dpi_out>;
78 };
79 };
80
81 port@1 {
82 reg = <1>;
83 adv7511_out: endpoint {
84 remote-endpoint = <&hdmi_connector_in>;
85 };
86 };
87 };
88 };
diff --git a/Documentation/devicetree/bindings/video/exynos_dsim.txt b/Documentation/devicetree/bindings/video/exynos_dsim.txt
index e74243b4b317..ca2b4aacd9af 100644
--- a/Documentation/devicetree/bindings/video/exynos_dsim.txt
+++ b/Documentation/devicetree/bindings/video/exynos_dsim.txt
@@ -4,6 +4,7 @@ Required properties:
4 - compatible: value should be one of the following 4 - compatible: value should be one of the following
5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */ 5 "samsung,exynos3250-mipi-dsi" /* for Exynos3250/3472 SoCs */
6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */ 6 "samsung,exynos4210-mipi-dsi" /* for Exynos4 SoCs */
7 "samsung,exynos4415-mipi-dsi" /* for Exynos4415 SoC */
7 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */ 8 "samsung,exynos5410-mipi-dsi" /* for Exynos5410/5420/5440 SoCs */
8 - reg: physical base address and length of the registers set for the device 9 - reg: physical base address and length of the registers set for the device
9 - interrupts: should contain DSI interrupt 10 - interrupts: should contain DSI interrupt
diff --git a/Documentation/devicetree/bindings/video/rockchip-drm.txt b/Documentation/devicetree/bindings/video/rockchip-drm.txt
new file mode 100644
index 000000000000..7fff582495a2
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-drm.txt
@@ -0,0 +1,19 @@
1Rockchip DRM master device
2================================
3
4The Rockchip DRM master device is a virtual device needed to list all
5vop devices or other display interface nodes that comprise the
6graphics subsystem.
7
8Required properties:
9- compatible: Should be "rockchip,display-subsystem"
10- ports: Should contain a list of phandles pointing to display interface port
11 of vop devices. vop definitions as defined in
12 Documentation/devicetree/bindings/video/rockchip-vop.txt
13
14example:
15
16display-subsystem {
17 compatible = "rockchip,display-subsystem";
18 ports = <&vopl_out>, <&vopb_out>;
19};
diff --git a/Documentation/devicetree/bindings/video/rockchip-vop.txt b/Documentation/devicetree/bindings/video/rockchip-vop.txt
new file mode 100644
index 000000000000..d15351f2313d
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/rockchip-vop.txt
@@ -0,0 +1,58 @@
1device-tree bindings for rockchip soc display controller (vop)
2
3VOP (Visual Output Processor) is the Display Controller for the Rockchip
4series of SoCs which transfers the image data from a video memory
5buffer to an external LCD interface.
6
7Required properties:
8- compatible: value should be one of the following
9 "rockchip,rk3288-vop";
10
11- interrupts: should contain a list of all VOP IP block interrupts in the
12 order: VSYNC, LCD_SYSTEM. The interrupt specifier
13 format depends on the interrupt controller used.
14
15- clocks: must include clock specifiers corresponding to entries in the
16 clock-names property.
17
18- clock-names: Must contain
19 aclk_vop: for ddr buffer transfer.
20 hclk_vop: for ahb bus to R/W the phy regs.
21 dclk_vop: pixel clock.
22
23- resets: Must contain an entry for each entry in reset-names.
24 See ../reset/reset.txt for details.
25- reset-names: Must include the following entries:
26 - axi
27 - ahb
28 - dclk
29
30- iommus: required a iommu node
31
32- port: A port node with endpoint definitions as defined in
33 Documentation/devicetree/bindings/media/video-interfaces.txt.
34
35Example:
36SoC specific DT entry:
37 vopb: vopb@ff930000 {
38 compatible = "rockchip,rk3288-vop";
39 reg = <0xff930000 0x19c>;
40 interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
41 clocks = <&cru ACLK_VOP0>, <&cru DCLK_VOP0>, <&cru HCLK_VOP0>;
42 clock-names = "aclk_vop", "dclk_vop", "hclk_vop";
43 resets = <&cru SRST_LCDC1_AXI>, <&cru SRST_LCDC1_AHB>, <&cru SRST_LCDC1_DCLK>;
44 reset-names = "axi", "ahb", "dclk";
45 iommus = <&vopb_mmu>;
46 vopb_out: port {
47 #address-cells = <1>;
48 #size-cells = <0>;
49 vopb_out_edp: endpoint@0 {
50 reg = <0>;
51 remote-endpoint=<&edp_in_vopb>;
52 };
53 vopb_out_hdmi: endpoint@1 {
54 reg = <1>;
55 remote-endpoint=<&hdmi_in_vopb>;
56 };
57 };
58 };
diff --git a/Documentation/devicetree/bindings/video/samsung-fimd.txt b/Documentation/devicetree/bindings/video/samsung-fimd.txt
index 4e6c77c85546..cf1af6371021 100644
--- a/Documentation/devicetree/bindings/video/samsung-fimd.txt
+++ b/Documentation/devicetree/bindings/video/samsung-fimd.txt
@@ -11,6 +11,7 @@ Required properties:
11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */ 11 "samsung,s5pv210-fimd"; /* for S5PV210 SoC */
12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */ 12 "samsung,exynos3250-fimd"; /* for Exynos3250/3472 SoCs */
13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */ 13 "samsung,exynos4210-fimd"; /* for Exynos4 SoCs */
14 "samsung,exynos4415-fimd"; /* for Exynos4415 SoC */
14 "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */ 15 "samsung,exynos5250-fimd"; /* for Exynos5 SoCs */
15 16
16- reg: physical base address and length of the FIMD registers set. 17- reg: physical base address and length of the FIMD registers set.
diff --git a/MAINTAINERS b/MAINTAINERS
index fdffe962a16a..c690b5a0d7b7 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -618,6 +618,16 @@ S: Maintained
618F: drivers/iommu/amd_iommu*.[ch] 618F: drivers/iommu/amd_iommu*.[ch]
619F: include/linux/amd-iommu.h 619F: include/linux/amd-iommu.h
620 620
621AMD KFD
622M: Oded Gabbay <oded.gabbay@amd.com>
623L: dri-devel@lists.freedesktop.org
624T: git git://people.freedesktop.org/~gabbayo/linux.git
625S: Supported
626F: drivers/gpu/drm/amd/amdkfd/
627F: drivers/gpu/drm/radeon/radeon_kfd.c
628F: drivers/gpu/drm/radeon/radeon_kfd.h
629F: include/uapi/linux/kfd_ioctl.h
630
621AMD MICROCODE UPDATE SUPPORT 631AMD MICROCODE UPDATE SUPPORT
622M: Andreas Herrmann <herrmann.der.user@googlemail.com> 632M: Andreas Herrmann <herrmann.der.user@googlemail.com>
623L: amd64-microcode@amd64.org 633L: amd64-microcode@amd64.org
@@ -3297,6 +3307,13 @@ F: drivers/gpu/drm/exynos/
3297F: include/drm/exynos* 3307F: include/drm/exynos*
3298F: include/uapi/drm/exynos* 3308F: include/uapi/drm/exynos*
3299 3309
3310DRM DRIVERS FOR FREESCALE IMX
3311M: Philipp Zabel <p.zabel@pengutronix.de>
3312L: dri-devel@lists.freedesktop.org
3313S: Maintained
3314F: drivers/gpu/drm/imx/
3315F: Documentation/devicetree/bindings/drm/imx/
3316
3300DRM DRIVERS FOR NVIDIA TEGRA 3317DRM DRIVERS FOR NVIDIA TEGRA
3301M: Thierry Reding <thierry.reding@gmail.com> 3318M: Thierry Reding <thierry.reding@gmail.com>
3302M: Terje Bergström <tbergstrom@nvidia.com> 3319M: Terje Bergström <tbergstrom@nvidia.com>
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index b47262afb240..f8197eb6e566 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -32,7 +32,6 @@
32#include <linux/pinctrl/machine.h> 32#include <linux/pinctrl/machine.h>
33#include <linux/platform_data/camera-rcar.h> 33#include <linux/platform_data/camera-rcar.h>
34#include <linux/platform_data/gpio-rcar.h> 34#include <linux/platform_data/gpio-rcar.h>
35#include <linux/platform_data/rcar-du.h>
36#include <linux/platform_data/usb-rcar-gen2-phy.h> 35#include <linux/platform_data/usb-rcar-gen2-phy.h>
37#include <linux/platform_device.h> 36#include <linux/platform_device.h>
38#include <linux/phy.h> 37#include <linux/phy.h>
@@ -83,61 +82,6 @@
83 * 82 *
84 */ 83 */
85 84
86/* DU */
87static struct rcar_du_encoder_data lager_du_encoders[] = {
88 {
89 .type = RCAR_DU_ENCODER_VGA,
90 .output = RCAR_DU_OUTPUT_DPAD0,
91 }, {
92 .type = RCAR_DU_ENCODER_NONE,
93 .output = RCAR_DU_OUTPUT_LVDS1,
94 .connector.lvds.panel = {
95 .width_mm = 210,
96 .height_mm = 158,
97 .mode = {
98 .pixelclock = 65000000,
99 .hactive = 1024,
100 .hfront_porch = 20,
101 .hback_porch = 160,
102 .hsync_len = 136,
103 .vactive = 768,
104 .vfront_porch = 3,
105 .vback_porch = 29,
106 .vsync_len = 6,
107 },
108 },
109 },
110};
111
112static const struct rcar_du_platform_data lager_du_pdata __initconst = {
113 .encoders = lager_du_encoders,
114 .num_encoders = ARRAY_SIZE(lager_du_encoders),
115};
116
117static const struct resource du_resources[] __initconst = {
118 DEFINE_RES_MEM(0xfeb00000, 0x70000),
119 DEFINE_RES_MEM_NAMED(0xfeb90000, 0x1c, "lvds.0"),
120 DEFINE_RES_MEM_NAMED(0xfeb94000, 0x1c, "lvds.1"),
121 DEFINE_RES_IRQ(gic_spi(256)),
122 DEFINE_RES_IRQ(gic_spi(268)),
123 DEFINE_RES_IRQ(gic_spi(269)),
124};
125
126static void __init lager_add_du_device(void)
127{
128 struct platform_device_info info = {
129 .name = "rcar-du-r8a7790",
130 .id = -1,
131 .res = du_resources,
132 .num_res = ARRAY_SIZE(du_resources),
133 .data = &lager_du_pdata,
134 .size_data = sizeof(lager_du_pdata),
135 .dma_mask = DMA_BIT_MASK(32),
136 };
137
138 platform_device_register_full(&info);
139}
140
141/* LEDS */ 85/* LEDS */
142static struct gpio_led lager_leds[] = { 86static struct gpio_led lager_leds[] = {
143 { 87 {
@@ -800,8 +744,6 @@ static void __init lager_add_standard_devices(void)
800 744
801 platform_device_register_full(&ether_info); 745 platform_device_register_full(&ether_info);
802 746
803 lager_add_du_device();
804
805 platform_device_register_resndata(NULL, "qspi", 0, 747 platform_device_register_resndata(NULL, "qspi", 0,
806 qspi_resources, 748 qspi_resources,
807 ARRAY_SIZE(qspi_resources), 749 ARRAY_SIZE(qspi_resources),
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 994dc7d86ae2..598f704f76ae 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -27,7 +27,6 @@
27#include <linux/pinctrl/machine.h> 27#include <linux/pinctrl/machine.h>
28#include <linux/platform_data/camera-rcar.h> 28#include <linux/platform_data/camera-rcar.h>
29#include <linux/platform_data/gpio-rcar.h> 29#include <linux/platform_data/gpio-rcar.h>
30#include <linux/platform_data/rcar-du.h>
31#include <linux/platform_data/usb-rcar-phy.h> 30#include <linux/platform_data/usb-rcar-phy.h>
32#include <linux/regulator/fixed.h> 31#include <linux/regulator/fixed.h>
33#include <linux/regulator/machine.h> 32#include <linux/regulator/machine.h>
@@ -171,62 +170,6 @@ static struct platform_device hspi_device = {
171 .num_resources = ARRAY_SIZE(hspi_resources), 170 .num_resources = ARRAY_SIZE(hspi_resources),
172}; 171};
173 172
174/*
175 * DU
176 *
177 * The panel only specifies the [hv]display and [hv]total values. The position
178 * and width of the sync pulses don't matter, they're copied from VESA timings.
179 */
180static struct rcar_du_encoder_data du_encoders[] = {
181 {
182 .type = RCAR_DU_ENCODER_VGA,
183 .output = RCAR_DU_OUTPUT_DPAD0,
184 }, {
185 .type = RCAR_DU_ENCODER_LVDS,
186 .output = RCAR_DU_OUTPUT_DPAD1,
187 .connector.lvds.panel = {
188 .width_mm = 210,
189 .height_mm = 158,
190 .mode = {
191 .pixelclock = 65000000,
192 .hactive = 1024,
193 .hfront_porch = 20,
194 .hback_porch = 160,
195 .hsync_len = 136,
196 .vactive = 768,
197 .vfront_porch = 3,
198 .vback_porch = 29,
199 .vsync_len = 6,
200 },
201 },
202 },
203};
204
205static const struct rcar_du_platform_data du_pdata __initconst = {
206 .encoders = du_encoders,
207 .num_encoders = ARRAY_SIZE(du_encoders),
208};
209
210static const struct resource du_resources[] __initconst = {
211 DEFINE_RES_MEM(0xfff80000, 0x40000),
212 DEFINE_RES_IRQ(gic_iid(0x3f)),
213};
214
215static void __init marzen_add_du_device(void)
216{
217 struct platform_device_info info = {
218 .name = "rcar-du-r8a7779",
219 .id = -1,
220 .res = du_resources,
221 .num_res = ARRAY_SIZE(du_resources),
222 .data = &du_pdata,
223 .size_data = sizeof(du_pdata),
224 .dma_mask = DMA_BIT_MASK(32),
225 };
226
227 platform_device_register_full(&info);
228}
229
230/* LEDS */ 173/* LEDS */
231static struct gpio_led marzen_leds[] = { 174static struct gpio_led marzen_leds[] = {
232 { 175 {
@@ -385,7 +328,6 @@ static void __init marzen_init(void)
385 platform_device_register_full(&vin1_info); 328 platform_device_register_full(&vin1_info);
386 platform_device_register_full(&vin3_info); 329 platform_device_register_full(&vin3_info);
387 platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices)); 330 platform_add_devices(marzen_devices, ARRAY_SIZE(marzen_devices));
388 marzen_add_du_device();
389} 331}
390 332
391static const char *marzen_boards_compat_dt[] __initdata = { 333static const char *marzen_boards_compat_dt[] __initdata = {
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 2e1a6853e00c..fe9f0b79a18b 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -455,6 +455,23 @@ struct intel_stolen_funcs {
455 u32 (*base)(int num, int slot, int func, size_t size); 455 u32 (*base)(int num, int slot, int func, size_t size);
456}; 456};
457 457
458static size_t __init gen9_stolen_size(int num, int slot, int func)
459{
460 u16 gmch_ctrl;
461
462 gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL);
463 gmch_ctrl >>= BDW_GMCH_GMS_SHIFT;
464 gmch_ctrl &= BDW_GMCH_GMS_MASK;
465
466 if (gmch_ctrl < 0xf0)
467 return gmch_ctrl << 25; /* 32 MB units */
468 else
469 /* 4MB increments starting at 0xf0 for 4MB */
470 return (gmch_ctrl - 0xf0 + 1) << 22;
471}
472
473typedef size_t (*stolen_size_fn)(int num, int slot, int func);
474
458static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { 475static const struct intel_stolen_funcs i830_stolen_funcs __initconst = {
459 .base = i830_stolen_base, 476 .base = i830_stolen_base,
460 .size = i830_stolen_size, 477 .size = i830_stolen_size,
@@ -490,6 +507,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = {
490 .size = gen8_stolen_size, 507 .size = gen8_stolen_size,
491}; 508};
492 509
510static const struct intel_stolen_funcs gen9_stolen_funcs __initconst = {
511 .base = intel_stolen_base,
512 .size = gen9_stolen_size,
513};
514
493static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { 515static const struct intel_stolen_funcs chv_stolen_funcs __initconst = {
494 .base = intel_stolen_base, 516 .base = intel_stolen_base,
495 .size = chv_stolen_size, 517 .size = chv_stolen_size,
@@ -523,6 +545,7 @@ static const struct pci_device_id intel_stolen_ids[] __initconst = {
523 INTEL_BDW_M_IDS(&gen8_stolen_funcs), 545 INTEL_BDW_M_IDS(&gen8_stolen_funcs),
524 INTEL_BDW_D_IDS(&gen8_stolen_funcs), 546 INTEL_BDW_D_IDS(&gen8_stolen_funcs),
525 INTEL_CHV_IDS(&chv_stolen_funcs), 547 INTEL_CHV_IDS(&chv_stolen_funcs),
548 INTEL_SKL_IDS(&gen9_stolen_funcs),
526}; 549};
527 550
528static void __init intel_graphics_stolen(int num, int slot, int func) 551static void __init intel_graphics_stolen(int num, int slot, int func)
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9a024f899dd4..f3334829e55a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -153,7 +153,6 @@ static struct page *i8xx_alloc_pages(void)
153 __free_pages(page, 2); 153 __free_pages(page, 2);
154 return NULL; 154 return NULL;
155 } 155 }
156 get_page(page);
157 atomic_inc(&agp_bridge->current_memory_agp); 156 atomic_inc(&agp_bridge->current_memory_agp);
158 return page; 157 return page;
159} 158}
@@ -164,7 +163,6 @@ static void i8xx_destroy_pages(struct page *page)
164 return; 163 return;
165 164
166 set_pages_wb(page, 4); 165 set_pages_wb(page, 4);
167 put_page(page);
168 __free_pages(page, 2); 166 __free_pages(page, 2);
169 atomic_dec(&agp_bridge->current_memory_agp); 167 atomic_dec(&agp_bridge->current_memory_agp);
170} 168}
@@ -300,7 +298,6 @@ static int intel_gtt_setup_scratch_page(void)
300 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 298 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
301 if (page == NULL) 299 if (page == NULL)
302 return -ENOMEM; 300 return -ENOMEM;
303 get_page(page);
304 set_pages_uc(page, 1); 301 set_pages_uc(page, 1);
305 302
306 if (intel_private.needs_dmar) { 303 if (intel_private.needs_dmar) {
@@ -560,7 +557,6 @@ static void intel_gtt_teardown_scratch_page(void)
560 set_pages_wb(intel_private.scratch_page, 1); 557 set_pages_wb(intel_private.scratch_page, 1);
561 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma, 558 pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
562 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 559 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
563 put_page(intel_private.scratch_page);
564 __free_page(intel_private.scratch_page); 560 __free_page(intel_private.scratch_page);
565} 561}
566 562
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index e3b4b0f02b3d..c3413b6adb17 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -167,6 +167,8 @@ config DRM_SAVAGE
167 167
168source "drivers/gpu/drm/exynos/Kconfig" 168source "drivers/gpu/drm/exynos/Kconfig"
169 169
170source "drivers/gpu/drm/rockchip/Kconfig"
171
170source "drivers/gpu/drm/vmwgfx/Kconfig" 172source "drivers/gpu/drm/vmwgfx/Kconfig"
171 173
172source "drivers/gpu/drm/gma500/Kconfig" 174source "drivers/gpu/drm/gma500/Kconfig"
@@ -200,3 +202,7 @@ source "drivers/gpu/drm/tegra/Kconfig"
200source "drivers/gpu/drm/panel/Kconfig" 202source "drivers/gpu/drm/panel/Kconfig"
201 203
202source "drivers/gpu/drm/sti/Kconfig" 204source "drivers/gpu/drm/sti/Kconfig"
205
206source "drivers/gpu/drm/amd/amdkfd/Kconfig"
207
208source "drivers/gpu/drm/imx/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 9292a761ea6d..66e40398b3d3 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -14,7 +14,7 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
14 drm_info.o drm_debugfs.o drm_encoder_slave.o \ 14 drm_info.o drm_debugfs.o drm_encoder_slave.o \
15 drm_trace_points.o drm_global.o drm_prime.o \ 15 drm_trace_points.o drm_global.o drm_prime.o \
16 drm_rect.o drm_vma_manager.o drm_flip_work.o \ 16 drm_rect.o drm_vma_manager.o drm_flip_work.o \
17 drm_modeset_lock.o 17 drm_modeset_lock.o drm_atomic.o
18 18
19drm-$(CONFIG_COMPAT) += drm_ioc32.o 19drm-$(CONFIG_COMPAT) += drm_ioc32.o
20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o 20drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
@@ -23,7 +23,7 @@ drm-$(CONFIG_DRM_PANEL) += drm_panel.o
23drm-$(CONFIG_OF) += drm_of.o 23drm-$(CONFIG_OF) += drm_of.o
24 24
25drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 25drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \
26 drm_plane_helper.o drm_dp_mst_topology.o 26 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o
27drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 27drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
28drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o 28drm_kms_helper-$(CONFIG_DRM_KMS_FB_HELPER) += drm_fb_helper.o
29drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o 29drm_kms_helper-$(CONFIG_DRM_KMS_CMA_HELPER) += drm_fb_cma_helper.o
@@ -49,6 +49,7 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
49obj-$(CONFIG_DRM_VIA) +=via/ 49obj-$(CONFIG_DRM_VIA) +=via/
50obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/ 50obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
51obj-$(CONFIG_DRM_EXYNOS) +=exynos/ 51obj-$(CONFIG_DRM_EXYNOS) +=exynos/
52obj-$(CONFIG_DRM_ROCKCHIP) +=rockchip/
52obj-$(CONFIG_DRM_GMA500) += gma500/ 53obj-$(CONFIG_DRM_GMA500) += gma500/
53obj-$(CONFIG_DRM_UDL) += udl/ 54obj-$(CONFIG_DRM_UDL) += udl/
54obj-$(CONFIG_DRM_AST) += ast/ 55obj-$(CONFIG_DRM_AST) += ast/
@@ -62,6 +63,8 @@ obj-$(CONFIG_DRM_BOCHS) += bochs/
62obj-$(CONFIG_DRM_MSM) += msm/ 63obj-$(CONFIG_DRM_MSM) += msm/
63obj-$(CONFIG_DRM_TEGRA) += tegra/ 64obj-$(CONFIG_DRM_TEGRA) += tegra/
64obj-$(CONFIG_DRM_STI) += sti/ 65obj-$(CONFIG_DRM_STI) += sti/
66obj-$(CONFIG_DRM_IMX) += imx/
65obj-y += i2c/ 67obj-y += i2c/
66obj-y += panel/ 68obj-y += panel/
67obj-y += bridge/ 69obj-y += bridge/
70obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/README.drm b/drivers/gpu/drm/README.drm
deleted file mode 100644
index b5b332722581..000000000000
--- a/drivers/gpu/drm/README.drm
+++ /dev/null
@@ -1,43 +0,0 @@
1************************************************************
2* For the very latest on DRI development, please see: *
3* http://dri.freedesktop.org/ *
4************************************************************
5
6The Direct Rendering Manager (drm) is a device-independent kernel-level
7device driver that provides support for the XFree86 Direct Rendering
8Infrastructure (DRI).
9
10The DRM supports the Direct Rendering Infrastructure (DRI) in four major
11ways:
12
13 1. The DRM provides synchronized access to the graphics hardware via
14 the use of an optimized two-tiered lock.
15
16 2. The DRM enforces the DRI security policy for access to the graphics
17 hardware by only allowing authenticated X11 clients access to
18 restricted regions of memory.
19
20 3. The DRM provides a generic DMA engine, complete with multiple
21 queues and the ability to detect the need for an OpenGL context
22 switch.
23
24 4. The DRM is extensible via the use of small device-specific modules
25 that rely extensively on the API exported by the DRM module.
26
27
28Documentation on the DRI is available from:
29 http://dri.freedesktop.org/wiki/Documentation
30 http://sourceforge.net/project/showfiles.php?group_id=387
31 http://dri.sourceforge.net/doc/
32
33For specific information about kernel-level support, see:
34
35 The Direct Rendering Manager, Kernel Support for the Direct Rendering
36 Infrastructure
37 http://dri.sourceforge.net/doc/drm_low_level.html
38
39 Hardware Locking for the Direct Rendering Infrastructure
40 http://dri.sourceforge.net/doc/hardware_locking_low_level.html
41
42 A Security Analysis of the Direct Rendering Infrastructure
43 http://dri.sourceforge.net/doc/security_low_level.html
diff --git a/drivers/gpu/drm/amd/amdkfd/Kconfig b/drivers/gpu/drm/amd/amdkfd/Kconfig
new file mode 100644
index 000000000000..8dfac37ff327
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/Kconfig
@@ -0,0 +1,9 @@
1#
2# Heterogenous system architecture configuration
3#
4
5config HSA_AMD
6 tristate "HSA kernel driver for AMD GPU devices"
7 depends on DRM_RADEON && AMD_IOMMU_V2 && X86_64
8 help
9 Enable this if you want to use HSA features on AMD GPU devices.
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
new file mode 100644
index 000000000000..be6246de5091
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for Heterogenous System Architecture support for AMD GPU devices
3#
4
5ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/amd/include/
6
7amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
8 kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
9 kfd_process.o kfd_queue.o kfd_mqd_manager.o \
10 kfd_kernel_queue.o kfd_packet_manager.o \
11 kfd_process_queue_manager.o kfd_device_queue_manager.o \
12 kfd_interrupt.o
13
14obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_regs.h b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
new file mode 100644
index 000000000000..607fc5ceadbe
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/cik_regs.h
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef CIK_REGS_H
24#define CIK_REGS_H
25
26#define IH_VMID_0_LUT 0x3D40u
27
28#define BIF_DOORBELL_CNTL 0x530Cu
29
30#define SRBM_GFX_CNTL 0xE44
31#define PIPEID(x) ((x) << 0)
32#define MEID(x) ((x) << 2)
33#define VMID(x) ((x) << 4)
34#define QUEUEID(x) ((x) << 8)
35
36#define SQ_CONFIG 0x8C00
37
38#define SH_MEM_BASES 0x8C28
39/* if PTR32, these are the bases for scratch and lds */
40#define PRIVATE_BASE(x) ((x) << 0) /* scratch */
41#define SHARED_BASE(x) ((x) << 16) /* LDS */
42#define SH_MEM_APE1_BASE 0x8C2C
43/* if PTR32, this is the base location of GPUVM */
44#define SH_MEM_APE1_LIMIT 0x8C30
45/* if PTR32, this is the upper limit of GPUVM */
46#define SH_MEM_CONFIG 0x8C34
47#define PTR32 (1 << 0)
48#define PRIVATE_ATC (1 << 1)
49#define ALIGNMENT_MODE(x) ((x) << 2)
50#define SH_MEM_ALIGNMENT_MODE_DWORD 0
51#define SH_MEM_ALIGNMENT_MODE_DWORD_STRICT 1
52#define SH_MEM_ALIGNMENT_MODE_STRICT 2
53#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
54#define DEFAULT_MTYPE(x) ((x) << 4)
55#define APE1_MTYPE(x) ((x) << 7)
56
57/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
58#define MTYPE_CACHED 0
59#define MTYPE_NONCACHED 3
60
61
62#define SH_STATIC_MEM_CONFIG 0x9604u
63
64#define TC_CFG_L1_LOAD_POLICY0 0xAC68
65#define TC_CFG_L1_LOAD_POLICY1 0xAC6C
66#define TC_CFG_L1_STORE_POLICY 0xAC70
67#define TC_CFG_L2_LOAD_POLICY0 0xAC74
68#define TC_CFG_L2_LOAD_POLICY1 0xAC78
69#define TC_CFG_L2_STORE_POLICY0 0xAC7C
70#define TC_CFG_L2_STORE_POLICY1 0xAC80
71#define TC_CFG_L2_ATOMIC_POLICY 0xAC84
72#define TC_CFG_L1_VOLATILE 0xAC88
73#define TC_CFG_L2_VOLATILE 0xAC8C
74
75#define CP_PQ_WPTR_POLL_CNTL 0xC20C
76#define WPTR_POLL_EN (1 << 31)
77
78#define CPC_INT_CNTL 0xC2D0
79#define CP_ME1_PIPE0_INT_CNTL 0xC214
80#define CP_ME1_PIPE1_INT_CNTL 0xC218
81#define CP_ME1_PIPE2_INT_CNTL 0xC21C
82#define CP_ME1_PIPE3_INT_CNTL 0xC220
83#define CP_ME2_PIPE0_INT_CNTL 0xC224
84#define CP_ME2_PIPE1_INT_CNTL 0xC228
85#define CP_ME2_PIPE2_INT_CNTL 0xC22C
86#define CP_ME2_PIPE3_INT_CNTL 0xC230
87#define DEQUEUE_REQUEST_INT_ENABLE (1 << 13)
88#define WRM_POLL_TIMEOUT_INT_ENABLE (1 << 17)
89#define PRIV_REG_INT_ENABLE (1 << 23)
90#define TIME_STAMP_INT_ENABLE (1 << 26)
91#define GENERIC2_INT_ENABLE (1 << 29)
92#define GENERIC1_INT_ENABLE (1 << 30)
93#define GENERIC0_INT_ENABLE (1 << 31)
94#define CP_ME1_PIPE0_INT_STATUS 0xC214
95#define CP_ME1_PIPE1_INT_STATUS 0xC218
96#define CP_ME1_PIPE2_INT_STATUS 0xC21C
97#define CP_ME1_PIPE3_INT_STATUS 0xC220
98#define CP_ME2_PIPE0_INT_STATUS 0xC224
99#define CP_ME2_PIPE1_INT_STATUS 0xC228
100#define CP_ME2_PIPE2_INT_STATUS 0xC22C
101#define CP_ME2_PIPE3_INT_STATUS 0xC230
102#define DEQUEUE_REQUEST_INT_STATUS (1 << 13)
103#define WRM_POLL_TIMEOUT_INT_STATUS (1 << 17)
104#define PRIV_REG_INT_STATUS (1 << 23)
105#define TIME_STAMP_INT_STATUS (1 << 26)
106#define GENERIC2_INT_STATUS (1 << 29)
107#define GENERIC1_INT_STATUS (1 << 30)
108#define GENERIC0_INT_STATUS (1 << 31)
109
110#define CP_HPD_EOP_BASE_ADDR 0xC904
111#define CP_HPD_EOP_BASE_ADDR_HI 0xC908
112#define CP_HPD_EOP_VMID 0xC90C
113#define CP_HPD_EOP_CONTROL 0xC910
114#define EOP_SIZE(x) ((x) << 0)
115#define EOP_SIZE_MASK (0x3f << 0)
116#define CP_MQD_BASE_ADDR 0xC914
117#define CP_MQD_BASE_ADDR_HI 0xC918
118#define CP_HQD_ACTIVE 0xC91C
119#define CP_HQD_VMID 0xC920
120
121#define CP_HQD_PERSISTENT_STATE 0xC924u
122#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
123#define PRELOAD_REQ (1 << 0)
124
125#define CP_HQD_PIPE_PRIORITY 0xC928u
126#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
127#define CP_HQD_QUANTUM 0xC930u
128#define QUANTUM_EN 1U
129#define QUANTUM_SCALE_1MS (1U << 4)
130#define QUANTUM_DURATION(x) ((x) << 8)
131
132#define CP_HQD_PQ_BASE 0xC934
133#define CP_HQD_PQ_BASE_HI 0xC938
134#define CP_HQD_PQ_RPTR 0xC93C
135#define CP_HQD_PQ_RPTR_REPORT_ADDR 0xC940
136#define CP_HQD_PQ_RPTR_REPORT_ADDR_HI 0xC944
137#define CP_HQD_PQ_WPTR_POLL_ADDR 0xC948
138#define CP_HQD_PQ_WPTR_POLL_ADDR_HI 0xC94C
139#define CP_HQD_PQ_DOORBELL_CONTROL 0xC950
140#define DOORBELL_OFFSET(x) ((x) << 2)
141#define DOORBELL_OFFSET_MASK (0x1fffff << 2)
142#define DOORBELL_SOURCE (1 << 28)
143#define DOORBELL_SCHD_HIT (1 << 29)
144#define DOORBELL_EN (1 << 30)
145#define DOORBELL_HIT (1 << 31)
146#define CP_HQD_PQ_WPTR 0xC954
147#define CP_HQD_PQ_CONTROL 0xC958
148#define QUEUE_SIZE(x) ((x) << 0)
149#define QUEUE_SIZE_MASK (0x3f << 0)
150#define RPTR_BLOCK_SIZE(x) ((x) << 8)
151#define RPTR_BLOCK_SIZE_MASK (0x3f << 8)
152#define MIN_AVAIL_SIZE(x) ((x) << 20)
153#define PQ_ATC_EN (1 << 23)
154#define PQ_VOLATILE (1 << 26)
155#define NO_UPDATE_RPTR (1 << 27)
156#define UNORD_DISPATCH (1 << 28)
157#define ROQ_PQ_IB_FLIP (1 << 29)
158#define PRIV_STATE (1 << 30)
159#define KMD_QUEUE (1 << 31)
160
161#define DEFAULT_RPTR_BLOCK_SIZE RPTR_BLOCK_SIZE(5)
162#define DEFAULT_MIN_AVAIL_SIZE MIN_AVAIL_SIZE(3)
163
164#define CP_HQD_IB_BASE_ADDR 0xC95Cu
165#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
166#define CP_HQD_IB_RPTR 0xC964u
167#define CP_HQD_IB_CONTROL 0xC968u
168#define IB_ATC_EN (1U << 23)
169#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
170
171#define CP_HQD_DEQUEUE_REQUEST 0xC974
172#define DEQUEUE_REQUEST_DRAIN 1
173#define DEQUEUE_REQUEST_RESET 2
174#define DEQUEUE_INT (1U << 8)
175
176#define CP_HQD_SEMA_CMD 0xC97Cu
177#define CP_HQD_MSG_TYPE 0xC980u
178#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
179#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
180#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
181#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
182#define CP_HQD_HQ_SCHEDULER0 0xC994u
183#define CP_HQD_HQ_SCHEDULER1 0xC998u
184
185
186#define CP_MQD_CONTROL 0xC99C
187#define MQD_VMID(x) ((x) << 0)
188#define MQD_VMID_MASK (0xf << 0)
189#define MQD_CONTROL_PRIV_STATE_EN (1U << 8)
190
191#define GRBM_GFX_INDEX 0x30800
192#define INSTANCE_INDEX(x) ((x) << 0)
193#define SH_INDEX(x) ((x) << 8)
194#define SE_INDEX(x) ((x) << 16)
195#define SH_BROADCAST_WRITES (1 << 29)
196#define INSTANCE_BROADCAST_WRITES (1 << 30)
197#define SE_BROADCAST_WRITES (1 << 31)
198
199#define SQC_CACHES 0x30d20
200#define SQC_POLICY 0x8C38u
201#define SQC_VOLATILE 0x8C3Cu
202
203#define CP_PERFMON_CNTL 0x36020
204
205#define ATC_VMID0_PASID_MAPPING 0x339Cu
206#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
207#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
208
209#define ATC_VM_APERTURE0_CNTL 0x3310u
210#define ATS_ACCESS_MODE_NEVER 0
211#define ATS_ACCESS_MODE_ALWAYS 1
212
213#define ATC_VM_APERTURE0_CNTL2 0x3318u
214#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
215#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
216#define ATC_VM_APERTURE1_CNTL 0x3314u
217#define ATC_VM_APERTURE1_CNTL2 0x331Cu
218#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
219#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
220
221#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
new file mode 100644
index 000000000000..4f7b275f2f7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -0,0 +1,595 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/device.h>
24#include <linux/export.h>
25#include <linux/err.h>
26#include <linux/fs.h>
27#include <linux/sched.h>
28#include <linux/slab.h>
29#include <linux/uaccess.h>
30#include <linux/compat.h>
31#include <uapi/linux/kfd_ioctl.h>
32#include <linux/time.h>
33#include <linux/mm.h>
34#include <linux/uaccess.h>
35#include <uapi/asm-generic/mman-common.h>
36#include <asm/processor.h>
37#include "kfd_priv.h"
38#include "kfd_device_queue_manager.h"
39
40static long kfd_ioctl(struct file *, unsigned int, unsigned long);
41static int kfd_open(struct inode *, struct file *);
42static int kfd_mmap(struct file *, struct vm_area_struct *);
43
44static const char kfd_dev_name[] = "kfd";
45
46static const struct file_operations kfd_fops = {
47 .owner = THIS_MODULE,
48 .unlocked_ioctl = kfd_ioctl,
49 .compat_ioctl = kfd_ioctl,
50 .open = kfd_open,
51 .mmap = kfd_mmap,
52};
53
54static int kfd_char_dev_major = -1;
55static struct class *kfd_class;
56struct device *kfd_device;
57
58int kfd_chardev_init(void)
59{
60 int err = 0;
61
62 kfd_char_dev_major = register_chrdev(0, kfd_dev_name, &kfd_fops);
63 err = kfd_char_dev_major;
64 if (err < 0)
65 goto err_register_chrdev;
66
67 kfd_class = class_create(THIS_MODULE, kfd_dev_name);
68 err = PTR_ERR(kfd_class);
69 if (IS_ERR(kfd_class))
70 goto err_class_create;
71
72 kfd_device = device_create(kfd_class, NULL,
73 MKDEV(kfd_char_dev_major, 0),
74 NULL, kfd_dev_name);
75 err = PTR_ERR(kfd_device);
76 if (IS_ERR(kfd_device))
77 goto err_device_create;
78
79 return 0;
80
81err_device_create:
82 class_destroy(kfd_class);
83err_class_create:
84 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
85err_register_chrdev:
86 return err;
87}
88
89void kfd_chardev_exit(void)
90{
91 device_destroy(kfd_class, MKDEV(kfd_char_dev_major, 0));
92 class_destroy(kfd_class);
93 unregister_chrdev(kfd_char_dev_major, kfd_dev_name);
94}
95
96struct device *kfd_chardev(void)
97{
98 return kfd_device;
99}
100
101
102static int kfd_open(struct inode *inode, struct file *filep)
103{
104 struct kfd_process *process;
105 bool is_32bit_user_mode;
106
107 if (iminor(inode) != 0)
108 return -ENODEV;
109
110 is_32bit_user_mode = is_compat_task();
111
112 if (is_32bit_user_mode == true) {
113 dev_warn(kfd_device,
114 "Process %d (32-bit) failed to open /dev/kfd\n"
115 "32-bit processes are not supported by amdkfd\n",
116 current->pid);
117 return -EPERM;
118 }
119
120 process = kfd_create_process(current);
121 if (IS_ERR(process))
122 return PTR_ERR(process);
123
124 process->is_32bit_user_mode = is_32bit_user_mode;
125
126 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
127 process->pasid, process->is_32bit_user_mode);
128
129 kfd_init_apertures(process);
130
131 return 0;
132}
133
134static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
135 void __user *arg)
136{
137 struct kfd_ioctl_get_version_args args;
138 int err = 0;
139
140 args.major_version = KFD_IOCTL_MAJOR_VERSION;
141 args.minor_version = KFD_IOCTL_MINOR_VERSION;
142
143 if (copy_to_user(arg, &args, sizeof(args)))
144 err = -EFAULT;
145
146 return err;
147}
148
149static int set_queue_properties_from_user(struct queue_properties *q_properties,
150 struct kfd_ioctl_create_queue_args *args)
151{
152 if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
153 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
154 return -EINVAL;
155 }
156
157 if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
158 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
159 return -EINVAL;
160 }
161
162 if ((args->ring_base_address) &&
163 (!access_ok(VERIFY_WRITE,
164 (const void __user *) args->ring_base_address,
165 sizeof(uint64_t)))) {
166 pr_err("kfd: can't access ring base address\n");
167 return -EFAULT;
168 }
169
170 if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
171 pr_err("kfd: ring size must be a power of 2 or 0\n");
172 return -EINVAL;
173 }
174
175 if (!access_ok(VERIFY_WRITE,
176 (const void __user *) args->read_pointer_address,
177 sizeof(uint32_t))) {
178 pr_err("kfd: can't access read pointer\n");
179 return -EFAULT;
180 }
181
182 if (!access_ok(VERIFY_WRITE,
183 (const void __user *) args->write_pointer_address,
184 sizeof(uint32_t))) {
185 pr_err("kfd: can't access write pointer\n");
186 return -EFAULT;
187 }
188
189 q_properties->is_interop = false;
190 q_properties->queue_percent = args->queue_percentage;
191 q_properties->priority = args->queue_priority;
192 q_properties->queue_address = args->ring_base_address;
193 q_properties->queue_size = args->ring_size;
194 q_properties->read_ptr = (uint32_t *) args->read_pointer_address;
195 q_properties->write_ptr = (uint32_t *) args->write_pointer_address;
196 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE ||
197 args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
198 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
199 else
200 return -ENOTSUPP;
201
202 if (args->queue_type == KFD_IOC_QUEUE_TYPE_COMPUTE_AQL)
203 q_properties->format = KFD_QUEUE_FORMAT_AQL;
204 else
205 q_properties->format = KFD_QUEUE_FORMAT_PM4;
206
207 pr_debug("Queue Percentage (%d, %d)\n",
208 q_properties->queue_percent, args->queue_percentage);
209
210 pr_debug("Queue Priority (%d, %d)\n",
211 q_properties->priority, args->queue_priority);
212
213 pr_debug("Queue Address (0x%llX, 0x%llX)\n",
214 q_properties->queue_address, args->ring_base_address);
215
216 pr_debug("Queue Size (0x%llX, %u)\n",
217 q_properties->queue_size, args->ring_size);
218
219 pr_debug("Queue r/w Pointers (0x%llX, 0x%llX)\n",
220 (uint64_t) q_properties->read_ptr,
221 (uint64_t) q_properties->write_ptr);
222
223 pr_debug("Queue Format (%d)\n", q_properties->format);
224
225 return 0;
226}
227
228static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
229 void __user *arg)
230{
231 struct kfd_ioctl_create_queue_args args;
232 struct kfd_dev *dev;
233 int err = 0;
234 unsigned int queue_id;
235 struct kfd_process_device *pdd;
236 struct queue_properties q_properties;
237
238 memset(&q_properties, 0, sizeof(struct queue_properties));
239
240 if (copy_from_user(&args, arg, sizeof(args)))
241 return -EFAULT;
242
243 pr_debug("kfd: creating queue ioctl\n");
244
245 err = set_queue_properties_from_user(&q_properties, &args);
246 if (err)
247 return err;
248
249 dev = kfd_device_by_id(args.gpu_id);
250 if (dev == NULL)
251 return -EINVAL;
252
253 mutex_lock(&p->mutex);
254
255 pdd = kfd_bind_process_to_device(dev, p);
256 if (IS_ERR(pdd)) {
257 err = PTR_ERR(pdd);
258 goto err_bind_process;
259 }
260
261 pr_debug("kfd: creating queue for PASID %d on GPU 0x%x\n",
262 p->pasid,
263 dev->id);
264
265 err = pqm_create_queue(&p->pqm, dev, filep, &q_properties, 0,
266 KFD_QUEUE_TYPE_COMPUTE, &queue_id);
267 if (err != 0)
268 goto err_create_queue;
269
270 args.queue_id = queue_id;
271
272 /* Return gpu_id as doorbell offset for mmap usage */
273 args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
274
275 if (copy_to_user(arg, &args, sizeof(args))) {
276 err = -EFAULT;
277 goto err_copy_args_out;
278 }
279
280 mutex_unlock(&p->mutex);
281
282 pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
283
284 pr_debug("ring buffer address == 0x%016llX\n",
285 args.ring_base_address);
286
287 pr_debug("read ptr address == 0x%016llX\n",
288 args.read_pointer_address);
289
290 pr_debug("write ptr address == 0x%016llX\n",
291 args.write_pointer_address);
292
293 return 0;
294
295err_copy_args_out:
296 pqm_destroy_queue(&p->pqm, queue_id);
297err_create_queue:
298err_bind_process:
299 mutex_unlock(&p->mutex);
300 return err;
301}
302
303static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
304 void __user *arg)
305{
306 int retval;
307 struct kfd_ioctl_destroy_queue_args args;
308
309 if (copy_from_user(&args, arg, sizeof(args)))
310 return -EFAULT;
311
312 pr_debug("kfd: destroying queue id %d for PASID %d\n",
313 args.queue_id,
314 p->pasid);
315
316 mutex_lock(&p->mutex);
317
318 retval = pqm_destroy_queue(&p->pqm, args.queue_id);
319
320 mutex_unlock(&p->mutex);
321 return retval;
322}
323
324static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
325 void __user *arg)
326{
327 int retval;
328 struct kfd_ioctl_update_queue_args args;
329 struct queue_properties properties;
330
331 if (copy_from_user(&args, arg, sizeof(args)))
332 return -EFAULT;
333
334 if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
335 pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
336 return -EINVAL;
337 }
338
339 if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
340 pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
341 return -EINVAL;
342 }
343
344 if ((args.ring_base_address) &&
345 (!access_ok(VERIFY_WRITE,
346 (const void __user *) args.ring_base_address,
347 sizeof(uint64_t)))) {
348 pr_err("kfd: can't access ring base address\n");
349 return -EFAULT;
350 }
351
352 if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
353 pr_err("kfd: ring size must be a power of 2 or 0\n");
354 return -EINVAL;
355 }
356
357 properties.queue_address = args.ring_base_address;
358 properties.queue_size = args.ring_size;
359 properties.queue_percent = args.queue_percentage;
360 properties.priority = args.queue_priority;
361
362 pr_debug("kfd: updating queue id %d for PASID %d\n",
363 args.queue_id, p->pasid);
364
365 mutex_lock(&p->mutex);
366
367 retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
368
369 mutex_unlock(&p->mutex);
370
371 return retval;
372}
373
374static long kfd_ioctl_set_memory_policy(struct file *filep,
375 struct kfd_process *p, void __user *arg)
376{
377 struct kfd_ioctl_set_memory_policy_args args;
378 struct kfd_dev *dev;
379 int err = 0;
380 struct kfd_process_device *pdd;
381 enum cache_policy default_policy, alternate_policy;
382
383 if (copy_from_user(&args, arg, sizeof(args)))
384 return -EFAULT;
385
386 if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
387 && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
388 return -EINVAL;
389 }
390
391 if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
392 && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
393 return -EINVAL;
394 }
395
396 dev = kfd_device_by_id(args.gpu_id);
397 if (dev == NULL)
398 return -EINVAL;
399
400 mutex_lock(&p->mutex);
401
402 pdd = kfd_bind_process_to_device(dev, p);
403 if (IS_ERR(pdd)) {
404 err = PTR_ERR(pdd);
405 goto out;
406 }
407
408 default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
409 ? cache_policy_coherent : cache_policy_noncoherent;
410
411 alternate_policy =
412 (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
413 ? cache_policy_coherent : cache_policy_noncoherent;
414
415 if (!dev->dqm->set_cache_memory_policy(dev->dqm,
416 &pdd->qpd,
417 default_policy,
418 alternate_policy,
419 (void __user *)args.alternate_aperture_base,
420 args.alternate_aperture_size))
421 err = -EINVAL;
422
423out:
424 mutex_unlock(&p->mutex);
425
426 return err;
427}
428
429static long kfd_ioctl_get_clock_counters(struct file *filep,
430 struct kfd_process *p, void __user *arg)
431{
432 struct kfd_ioctl_get_clock_counters_args args;
433 struct kfd_dev *dev;
434 struct timespec time;
435
436 if (copy_from_user(&args, arg, sizeof(args)))
437 return -EFAULT;
438
439 dev = kfd_device_by_id(args.gpu_id);
440 if (dev == NULL)
441 return -EINVAL;
442
443 /* Reading GPU clock counter from KGD */
444 args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
445
446 /* No access to rdtsc. Using raw monotonic time */
447 getrawmonotonic(&time);
448 args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
449
450 get_monotonic_boottime(&time);
451 args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
452
453 /* Since the counter is in nano-seconds we use 1GHz frequency */
454 args.system_clock_freq = 1000000000;
455
456 if (copy_to_user(arg, &args, sizeof(args)))
457 return -EFAULT;
458
459 return 0;
460}
461
462
463static int kfd_ioctl_get_process_apertures(struct file *filp,
464 struct kfd_process *p, void __user *arg)
465{
466 struct kfd_ioctl_get_process_apertures_args args;
467 struct kfd_process_device_apertures *pAperture;
468 struct kfd_process_device *pdd;
469
470 dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
471
472 if (copy_from_user(&args, arg, sizeof(args)))
473 return -EFAULT;
474
475 args.num_of_nodes = 0;
476
477 mutex_lock(&p->mutex);
478
479 /*if the process-device list isn't empty*/
480 if (kfd_has_process_device_data(p)) {
481 /* Run over all pdd of the process */
482 pdd = kfd_get_first_process_device_data(p);
483 do {
484 pAperture = &args.process_apertures[args.num_of_nodes];
485 pAperture->gpu_id = pdd->dev->id;
486 pAperture->lds_base = pdd->lds_base;
487 pAperture->lds_limit = pdd->lds_limit;
488 pAperture->gpuvm_base = pdd->gpuvm_base;
489 pAperture->gpuvm_limit = pdd->gpuvm_limit;
490 pAperture->scratch_base = pdd->scratch_base;
491 pAperture->scratch_limit = pdd->scratch_limit;
492
493 dev_dbg(kfd_device,
494 "node id %u\n", args.num_of_nodes);
495 dev_dbg(kfd_device,
496 "gpu id %u\n", pdd->dev->id);
497 dev_dbg(kfd_device,
498 "lds_base %llX\n", pdd->lds_base);
499 dev_dbg(kfd_device,
500 "lds_limit %llX\n", pdd->lds_limit);
501 dev_dbg(kfd_device,
502 "gpuvm_base %llX\n", pdd->gpuvm_base);
503 dev_dbg(kfd_device,
504 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
505 dev_dbg(kfd_device,
506 "scratch_base %llX\n", pdd->scratch_base);
507 dev_dbg(kfd_device,
508 "scratch_limit %llX\n", pdd->scratch_limit);
509
510 args.num_of_nodes++;
511 } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
512 (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
513 }
514
515 mutex_unlock(&p->mutex);
516
517 if (copy_to_user(arg, &args, sizeof(args)))
518 return -EFAULT;
519
520 return 0;
521}
522
523static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
524{
525 struct kfd_process *process;
526 long err = -EINVAL;
527
528 dev_dbg(kfd_device,
529 "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
530 cmd, _IOC_NR(cmd), arg);
531
532 process = kfd_get_process(current);
533 if (IS_ERR(process))
534 return PTR_ERR(process);
535
536 switch (cmd) {
537 case KFD_IOC_GET_VERSION:
538 err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
539 break;
540 case KFD_IOC_CREATE_QUEUE:
541 err = kfd_ioctl_create_queue(filep, process,
542 (void __user *)arg);
543 break;
544
545 case KFD_IOC_DESTROY_QUEUE:
546 err = kfd_ioctl_destroy_queue(filep, process,
547 (void __user *)arg);
548 break;
549
550 case KFD_IOC_SET_MEMORY_POLICY:
551 err = kfd_ioctl_set_memory_policy(filep, process,
552 (void __user *)arg);
553 break;
554
555 case KFD_IOC_GET_CLOCK_COUNTERS:
556 err = kfd_ioctl_get_clock_counters(filep, process,
557 (void __user *)arg);
558 break;
559
560 case KFD_IOC_GET_PROCESS_APERTURES:
561 err = kfd_ioctl_get_process_apertures(filep, process,
562 (void __user *)arg);
563 break;
564
565 case KFD_IOC_UPDATE_QUEUE:
566 err = kfd_ioctl_update_queue(filep, process,
567 (void __user *)arg);
568 break;
569
570 default:
571 dev_err(kfd_device,
572 "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
573 cmd, arg);
574 err = -EINVAL;
575 break;
576 }
577
578 if (err < 0)
579 dev_err(kfd_device,
580 "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
581 err, cmd, _IOC_NR(cmd));
582
583 return err;
584}
585
586static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
587{
588 struct kfd_process *process;
589
590 process = kfd_get_process(current);
591 if (IS_ERR(process))
592 return PTR_ERR(process);
593
594 return kfd_doorbell_mmap(process, vma);
595}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.h b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
new file mode 100644
index 000000000000..a374fa3d3ee6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.h
@@ -0,0 +1,294 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_CRAT_H_INCLUDED
24#define KFD_CRAT_H_INCLUDED
25
26#include <linux/types.h>
27
28#pragma pack(1)
29
30/*
31 * 4CC signature values for the CRAT and CDIT ACPI tables
32 */
33
34#define CRAT_SIGNATURE "CRAT"
35#define CDIT_SIGNATURE "CDIT"
36
37/*
38 * Component Resource Association Table (CRAT)
39 */
40
41#define CRAT_OEMID_LENGTH 6
42#define CRAT_OEMTABLEID_LENGTH 8
43#define CRAT_RESERVED_LENGTH 6
44
45#define CRAT_OEMID_64BIT_MASK ((1ULL << (CRAT_OEMID_LENGTH * 8)) - 1)
46
47struct crat_header {
48 uint32_t signature;
49 uint32_t length;
50 uint8_t revision;
51 uint8_t checksum;
52 uint8_t oem_id[CRAT_OEMID_LENGTH];
53 uint8_t oem_table_id[CRAT_OEMTABLEID_LENGTH];
54 uint32_t oem_revision;
55 uint32_t creator_id;
56 uint32_t creator_revision;
57 uint32_t total_entries;
58 uint16_t num_domains;
59 uint8_t reserved[CRAT_RESERVED_LENGTH];
60};
61
62/*
63 * The header structure is immediately followed by total_entries of the
64 * data definitions
65 */
66
67/*
68 * The currently defined subtype entries in the CRAT
69 */
70#define CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY 0
71#define CRAT_SUBTYPE_MEMORY_AFFINITY 1
72#define CRAT_SUBTYPE_CACHE_AFFINITY 2
73#define CRAT_SUBTYPE_TLB_AFFINITY 3
74#define CRAT_SUBTYPE_CCOMPUTE_AFFINITY 4
75#define CRAT_SUBTYPE_IOLINK_AFFINITY 5
76#define CRAT_SUBTYPE_MAX 6
77
78#define CRAT_SIBLINGMAP_SIZE 32
79
80/*
81 * ComputeUnit Affinity structure and definitions
82 */
83#define CRAT_CU_FLAGS_ENABLED 0x00000001
84#define CRAT_CU_FLAGS_HOT_PLUGGABLE 0x00000002
85#define CRAT_CU_FLAGS_CPU_PRESENT 0x00000004
86#define CRAT_CU_FLAGS_GPU_PRESENT 0x00000008
87#define CRAT_CU_FLAGS_IOMMU_PRESENT 0x00000010
88#define CRAT_CU_FLAGS_RESERVED 0xffffffe0
89
90#define CRAT_COMPUTEUNIT_RESERVED_LENGTH 4
91
92struct crat_subtype_computeunit {
93 uint8_t type;
94 uint8_t length;
95 uint16_t reserved;
96 uint32_t flags;
97 uint32_t proximity_domain;
98 uint32_t processor_id_low;
99 uint16_t num_cpu_cores;
100 uint16_t num_simd_cores;
101 uint16_t max_waves_simd;
102 uint16_t io_count;
103 uint16_t hsa_capability;
104 uint16_t lds_size_in_kb;
105 uint8_t wave_front_size;
106 uint8_t num_banks;
107 uint16_t micro_engine_id;
108 uint8_t num_arrays;
109 uint8_t num_cu_per_array;
110 uint8_t num_simd_per_cu;
111 uint8_t max_slots_scatch_cu;
112 uint8_t reserved2[CRAT_COMPUTEUNIT_RESERVED_LENGTH];
113};
114
115/*
116 * HSA Memory Affinity structure and definitions
117 */
118#define CRAT_MEM_FLAGS_ENABLED 0x00000001
119#define CRAT_MEM_FLAGS_HOT_PLUGGABLE 0x00000002
120#define CRAT_MEM_FLAGS_NON_VOLATILE 0x00000004
121#define CRAT_MEM_FLAGS_RESERVED 0xfffffff8
122
123#define CRAT_MEMORY_RESERVED_LENGTH 8
124
125struct crat_subtype_memory {
126 uint8_t type;
127 uint8_t length;
128 uint16_t reserved;
129 uint32_t flags;
130 uint32_t promixity_domain;
131 uint32_t base_addr_low;
132 uint32_t base_addr_high;
133 uint32_t length_low;
134 uint32_t length_high;
135 uint32_t width;
136 uint8_t reserved2[CRAT_MEMORY_RESERVED_LENGTH];
137};
138
139/*
140 * HSA Cache Affinity structure and definitions
141 */
142#define CRAT_CACHE_FLAGS_ENABLED 0x00000001
143#define CRAT_CACHE_FLAGS_DATA_CACHE 0x00000002
144#define CRAT_CACHE_FLAGS_INST_CACHE 0x00000004
145#define CRAT_CACHE_FLAGS_CPU_CACHE 0x00000008
146#define CRAT_CACHE_FLAGS_SIMD_CACHE 0x00000010
147#define CRAT_CACHE_FLAGS_RESERVED 0xffffffe0
148
149#define CRAT_CACHE_RESERVED_LENGTH 8
150
151struct crat_subtype_cache {
152 uint8_t type;
153 uint8_t length;
154 uint16_t reserved;
155 uint32_t flags;
156 uint32_t processor_id_low;
157 uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
158 uint32_t cache_size;
159 uint8_t cache_level;
160 uint8_t lines_per_tag;
161 uint16_t cache_line_size;
162 uint8_t associativity;
163 uint8_t cache_properties;
164 uint16_t cache_latency;
165 uint8_t reserved2[CRAT_CACHE_RESERVED_LENGTH];
166};
167
168/*
169 * HSA TLB Affinity structure and definitions
170 */
171#define CRAT_TLB_FLAGS_ENABLED 0x00000001
172#define CRAT_TLB_FLAGS_DATA_TLB 0x00000002
173#define CRAT_TLB_FLAGS_INST_TLB 0x00000004
174#define CRAT_TLB_FLAGS_CPU_TLB 0x00000008
175#define CRAT_TLB_FLAGS_SIMD_TLB 0x00000010
176#define CRAT_TLB_FLAGS_RESERVED 0xffffffe0
177
178#define CRAT_TLB_RESERVED_LENGTH 4
179
180struct crat_subtype_tlb {
181 uint8_t type;
182 uint8_t length;
183 uint16_t reserved;
184 uint32_t flags;
185 uint32_t processor_id_low;
186 uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
187 uint32_t tlb_level;
188 uint8_t data_tlb_associativity_2mb;
189 uint8_t data_tlb_size_2mb;
190 uint8_t instruction_tlb_associativity_2mb;
191 uint8_t instruction_tlb_size_2mb;
192 uint8_t data_tlb_associativity_4k;
193 uint8_t data_tlb_size_4k;
194 uint8_t instruction_tlb_associativity_4k;
195 uint8_t instruction_tlb_size_4k;
196 uint8_t data_tlb_associativity_1gb;
197 uint8_t data_tlb_size_1gb;
198 uint8_t instruction_tlb_associativity_1gb;
199 uint8_t instruction_tlb_size_1gb;
200 uint8_t reserved2[CRAT_TLB_RESERVED_LENGTH];
201};
202
203/*
204 * HSA CCompute/APU Affinity structure and definitions
205 */
206#define CRAT_CCOMPUTE_FLAGS_ENABLED 0x00000001
207#define CRAT_CCOMPUTE_FLAGS_RESERVED 0xfffffffe
208
209#define CRAT_CCOMPUTE_RESERVED_LENGTH 16
210
211struct crat_subtype_ccompute {
212 uint8_t type;
213 uint8_t length;
214 uint16_t reserved;
215 uint32_t flags;
216 uint32_t processor_id_low;
217 uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE];
218 uint32_t apu_size;
219 uint8_t reserved2[CRAT_CCOMPUTE_RESERVED_LENGTH];
220};
221
222/*
223 * HSA IO Link Affinity structure and definitions
224 */
225#define CRAT_IOLINK_FLAGS_ENABLED 0x00000001
226#define CRAT_IOLINK_FLAGS_COHERENCY 0x00000002
227#define CRAT_IOLINK_FLAGS_RESERVED 0xfffffffc
228
229/*
230 * IO interface types
231 */
232#define CRAT_IOLINK_TYPE_UNDEFINED 0
233#define CRAT_IOLINK_TYPE_HYPERTRANSPORT 1
234#define CRAT_IOLINK_TYPE_PCIEXPRESS 2
235#define CRAT_IOLINK_TYPE_OTHER 3
236#define CRAT_IOLINK_TYPE_MAX 255
237
238#define CRAT_IOLINK_RESERVED_LENGTH 24
239
240struct crat_subtype_iolink {
241 uint8_t type;
242 uint8_t length;
243 uint16_t reserved;
244 uint32_t flags;
245 uint32_t proximity_domain_from;
246 uint32_t proximity_domain_to;
247 uint8_t io_interface_type;
248 uint8_t version_major;
249 uint16_t version_minor;
250 uint32_t minimum_latency;
251 uint32_t maximum_latency;
252 uint32_t minimum_bandwidth_mbs;
253 uint32_t maximum_bandwidth_mbs;
254 uint32_t recommended_transfer_size;
255 uint8_t reserved2[CRAT_IOLINK_RESERVED_LENGTH];
256};
257
258/*
259 * HSA generic sub-type header
260 */
261
262#define CRAT_SUBTYPE_FLAGS_ENABLED 0x00000001
263
264struct crat_subtype_generic {
265 uint8_t type;
266 uint8_t length;
267 uint16_t reserved;
268 uint32_t flags;
269};
270
271/*
272 * Component Locality Distance Information Table (CDIT)
273 */
274#define CDIT_OEMID_LENGTH 6
275#define CDIT_OEMTABLEID_LENGTH 8
276
277struct cdit_header {
278 uint32_t signature;
279 uint32_t length;
280 uint8_t revision;
281 uint8_t checksum;
282 uint8_t oem_id[CDIT_OEMID_LENGTH];
283 uint8_t oem_table_id[CDIT_OEMTABLEID_LENGTH];
284 uint32_t oem_revision;
285 uint32_t creator_id;
286 uint32_t creator_revision;
287 uint32_t total_entries;
288 uint16_t num_domains;
289 uint8_t entry[1];
290};
291
292#pragma pack()
293
294#endif /* KFD_CRAT_H_INCLUDED */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
new file mode 100644
index 000000000000..43884ebd4303
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -0,0 +1,308 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/amd-iommu.h>
24#include <linux/bsearch.h>
25#include <linux/pci.h>
26#include <linux/slab.h>
27#include "kfd_priv.h"
28#include "kfd_device_queue_manager.h"
29
30#define MQD_SIZE_ALIGNED 768
31
32static const struct kfd_device_info kaveri_device_info = {
33 .max_pasid_bits = 16,
34 .ih_ring_entry_size = 4 * sizeof(uint32_t),
35 .mqd_size_aligned = MQD_SIZE_ALIGNED
36};
37
38struct kfd_deviceid {
39 unsigned short did;
40 const struct kfd_device_info *device_info;
41};
42
43/* Please keep this sorted by increasing device id. */
44static const struct kfd_deviceid supported_devices[] = {
45 { 0x1304, &kaveri_device_info }, /* Kaveri */
46 { 0x1305, &kaveri_device_info }, /* Kaveri */
47 { 0x1306, &kaveri_device_info }, /* Kaveri */
48 { 0x1307, &kaveri_device_info }, /* Kaveri */
49 { 0x1309, &kaveri_device_info }, /* Kaveri */
50 { 0x130A, &kaveri_device_info }, /* Kaveri */
51 { 0x130B, &kaveri_device_info }, /* Kaveri */
52 { 0x130C, &kaveri_device_info }, /* Kaveri */
53 { 0x130D, &kaveri_device_info }, /* Kaveri */
54 { 0x130E, &kaveri_device_info }, /* Kaveri */
55 { 0x130F, &kaveri_device_info }, /* Kaveri */
56 { 0x1310, &kaveri_device_info }, /* Kaveri */
57 { 0x1311, &kaveri_device_info }, /* Kaveri */
58 { 0x1312, &kaveri_device_info }, /* Kaveri */
59 { 0x1313, &kaveri_device_info }, /* Kaveri */
60 { 0x1315, &kaveri_device_info }, /* Kaveri */
61 { 0x1316, &kaveri_device_info }, /* Kaveri */
62 { 0x1317, &kaveri_device_info }, /* Kaveri */
63 { 0x1318, &kaveri_device_info }, /* Kaveri */
64 { 0x131B, &kaveri_device_info }, /* Kaveri */
65 { 0x131C, &kaveri_device_info }, /* Kaveri */
66 { 0x131D, &kaveri_device_info }, /* Kaveri */
67};
68
69static const struct kfd_device_info *lookup_device_info(unsigned short did)
70{
71 size_t i;
72
73 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
74 if (supported_devices[i].did == did) {
75 BUG_ON(supported_devices[i].device_info == NULL);
76 return supported_devices[i].device_info;
77 }
78 }
79
80 return NULL;
81}
82
83struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev)
84{
85 struct kfd_dev *kfd;
86
87 const struct kfd_device_info *device_info =
88 lookup_device_info(pdev->device);
89
90 if (!device_info)
91 return NULL;
92
93 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
94 if (!kfd)
95 return NULL;
96
97 kfd->kgd = kgd;
98 kfd->device_info = device_info;
99 kfd->pdev = pdev;
100 kfd->init_complete = false;
101
102 return kfd;
103}
104
105static bool device_iommu_pasid_init(struct kfd_dev *kfd)
106{
107 const u32 required_iommu_flags = AMD_IOMMU_DEVICE_FLAG_ATS_SUP |
108 AMD_IOMMU_DEVICE_FLAG_PRI_SUP |
109 AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
110
111 struct amd_iommu_device_info iommu_info;
112 unsigned int pasid_limit;
113 int err;
114
115 err = amd_iommu_device_info(kfd->pdev, &iommu_info);
116 if (err < 0) {
117 dev_err(kfd_device,
118 "error getting iommu info. is the iommu enabled?\n");
119 return false;
120 }
121
122 if ((iommu_info.flags & required_iommu_flags) != required_iommu_flags) {
123 dev_err(kfd_device, "error required iommu flags ats(%i), pri(%i), pasid(%i)\n",
124 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_ATS_SUP) != 0,
125 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PRI_SUP) != 0,
126 (iommu_info.flags & AMD_IOMMU_DEVICE_FLAG_PASID_SUP) != 0);
127 return false;
128 }
129
130 pasid_limit = min_t(unsigned int,
131 (unsigned int)1 << kfd->device_info->max_pasid_bits,
132 iommu_info.max_pasids);
133 /*
134 * last pasid is used for kernel queues doorbells
135 * in the future the last pasid might be used for a kernel thread.
136 */
137 pasid_limit = min_t(unsigned int,
138 pasid_limit,
139 kfd->doorbell_process_limit - 1);
140
141 err = amd_iommu_init_device(kfd->pdev, pasid_limit);
142 if (err < 0) {
143 dev_err(kfd_device, "error initializing iommu device\n");
144 return false;
145 }
146
147 if (!kfd_set_pasid_limit(pasid_limit)) {
148 dev_err(kfd_device, "error setting pasid limit\n");
149 amd_iommu_free_device(kfd->pdev);
150 return false;
151 }
152
153 return true;
154}
155
156static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid)
157{
158 struct kfd_dev *dev = kfd_device_by_pci_dev(pdev);
159
160 if (dev)
161 kfd_unbind_process_from_device(dev, pasid);
162}
163
164bool kgd2kfd_device_init(struct kfd_dev *kfd,
165 const struct kgd2kfd_shared_resources *gpu_resources)
166{
167 unsigned int size;
168
169 kfd->shared_resources = *gpu_resources;
170
171 /* calculate max size of mqds needed for queues */
172 size = max_num_of_processes *
173 max_num_of_queues_per_process *
174 kfd->device_info->mqd_size_aligned;
175
176 /* add another 512KB for all other allocations on gart */
177 size += 512 * 1024;
178
179 if (kfd2kgd->init_sa_manager(kfd->kgd, size)) {
180 dev_err(kfd_device,
181 "Error initializing sa manager for device (%x:%x)\n",
182 kfd->pdev->vendor, kfd->pdev->device);
183 goto out;
184 }
185
186 kfd_doorbell_init(kfd);
187
188 if (kfd_topology_add_device(kfd) != 0) {
189 dev_err(kfd_device,
190 "Error adding device (%x:%x) to topology\n",
191 kfd->pdev->vendor, kfd->pdev->device);
192 goto kfd_topology_add_device_error;
193 }
194
195 if (kfd_interrupt_init(kfd)) {
196 dev_err(kfd_device,
197 "Error initializing interrupts for device (%x:%x)\n",
198 kfd->pdev->vendor, kfd->pdev->device);
199 goto kfd_interrupt_error;
200 }
201
202 if (!device_iommu_pasid_init(kfd)) {
203 dev_err(kfd_device,
204 "Error initializing iommuv2 for device (%x:%x)\n",
205 kfd->pdev->vendor, kfd->pdev->device);
206 goto device_iommu_pasid_error;
207 }
208 amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
209 iommu_pasid_shutdown_callback);
210
211 kfd->dqm = device_queue_manager_init(kfd);
212 if (!kfd->dqm) {
213 dev_err(kfd_device,
214 "Error initializing queue manager for device (%x:%x)\n",
215 kfd->pdev->vendor, kfd->pdev->device);
216 goto device_queue_manager_error;
217 }
218
219 if (kfd->dqm->start(kfd->dqm) != 0) {
220 dev_err(kfd_device,
221 "Error starting queuen manager for device (%x:%x)\n",
222 kfd->pdev->vendor, kfd->pdev->device);
223 goto dqm_start_error;
224 }
225
226 kfd->init_complete = true;
227 dev_info(kfd_device, "added device (%x:%x)\n", kfd->pdev->vendor,
228 kfd->pdev->device);
229
230 pr_debug("kfd: Starting kfd with the following scheduling policy %d\n",
231 sched_policy);
232
233 goto out;
234
235dqm_start_error:
236 device_queue_manager_uninit(kfd->dqm);
237device_queue_manager_error:
238 amd_iommu_free_device(kfd->pdev);
239device_iommu_pasid_error:
240 kfd_interrupt_exit(kfd);
241kfd_interrupt_error:
242 kfd_topology_remove_device(kfd);
243kfd_topology_add_device_error:
244 kfd2kgd->fini_sa_manager(kfd->kgd);
245 dev_err(kfd_device,
246 "device (%x:%x) NOT added due to errors\n",
247 kfd->pdev->vendor, kfd->pdev->device);
248out:
249 return kfd->init_complete;
250}
251
252void kgd2kfd_device_exit(struct kfd_dev *kfd)
253{
254 if (kfd->init_complete) {
255 device_queue_manager_uninit(kfd->dqm);
256 amd_iommu_free_device(kfd->pdev);
257 kfd_interrupt_exit(kfd);
258 kfd_topology_remove_device(kfd);
259 }
260
261 kfree(kfd);
262}
263
264void kgd2kfd_suspend(struct kfd_dev *kfd)
265{
266 BUG_ON(kfd == NULL);
267
268 if (kfd->init_complete) {
269 kfd->dqm->stop(kfd->dqm);
270 amd_iommu_set_invalidate_ctx_cb(kfd->pdev, NULL);
271 amd_iommu_free_device(kfd->pdev);
272 }
273}
274
275int kgd2kfd_resume(struct kfd_dev *kfd)
276{
277 unsigned int pasid_limit;
278 int err;
279
280 BUG_ON(kfd == NULL);
281
282 pasid_limit = kfd_get_pasid_limit();
283
284 if (kfd->init_complete) {
285 err = amd_iommu_init_device(kfd->pdev, pasid_limit);
286 if (err < 0)
287 return -ENXIO;
288 amd_iommu_set_invalidate_ctx_cb(kfd->pdev,
289 iommu_pasid_shutdown_callback);
290 kfd->dqm->start(kfd->dqm);
291 }
292
293 return 0;
294}
295
296/* This is called directly from KGD at ISR. */
297void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
298{
299 if (kfd->init_complete) {
300 spin_lock(&kfd->interrupt_lock);
301
302 if (kfd->interrupts_active
303 && enqueue_ih_ring_entry(kfd, ih_ring_entry))
304 schedule_work(&kfd->interrupt_work);
305
306 spin_unlock(&kfd->interrupt_lock);
307 }
308}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
new file mode 100644
index 000000000000..924e90c072e5
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -0,0 +1,1062 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/list.h>
26#include <linux/types.h>
27#include <linux/printk.h>
28#include <linux/bitops.h>
29#include "kfd_priv.h"
30#include "kfd_device_queue_manager.h"
31#include "kfd_mqd_manager.h"
32#include "cik_regs.h"
33#include "kfd_kernel_queue.h"
34#include "../../radeon/cik_reg.h"
35
36/* Size of the per-pipe EOP queue */
37#define CIK_HPD_EOP_BYTES_LOG2 11
38#define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
39
40static bool is_mem_initialized;
41
42static int init_memory(struct device_queue_manager *dqm);
43static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
44 unsigned int pasid, unsigned int vmid);
45
46static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
47 struct queue *q,
48 struct qcm_process_device *qpd);
49static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
51
52
53static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
54{
55 BUG_ON(!dqm || !dqm->dev);
56 return dqm->dev->shared_resources.compute_pipe_count;
57}
58
59static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
60{
61 BUG_ON(!dqm);
62 return dqm->dev->shared_resources.first_compute_pipe;
63}
64
65static inline unsigned int get_pipes_num_cpsch(void)
66{
67 return PIPE_PER_ME_CP_SCHEDULING;
68}
69
70static inline unsigned int
71get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
72{
73 uint32_t nybble;
74
75 nybble = (pdd->lds_base >> 60) & 0x0E;
76
77 return nybble;
78
79}
80
81static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
82{
83 unsigned int shared_base;
84
85 shared_base = (pdd->lds_base >> 16) & 0xFF;
86
87 return shared_base;
88}
89
90static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
91static void init_process_memory(struct device_queue_manager *dqm,
92 struct qcm_process_device *qpd)
93{
94 struct kfd_process_device *pdd;
95 unsigned int temp;
96
97 BUG_ON(!dqm || !qpd);
98
99 pdd = qpd_to_pdd(qpd);
100
101 /* check if sh_mem_config register already configured */
102 if (qpd->sh_mem_config == 0) {
103 qpd->sh_mem_config =
104 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
105 DEFAULT_MTYPE(MTYPE_NONCACHED) |
106 APE1_MTYPE(MTYPE_NONCACHED);
107 qpd->sh_mem_ape1_limit = 0;
108 qpd->sh_mem_ape1_base = 0;
109 }
110
111 if (qpd->pqm->process->is_32bit_user_mode) {
112 temp = get_sh_mem_bases_32(pdd);
113 qpd->sh_mem_bases = SHARED_BASE(temp);
114 qpd->sh_mem_config |= PTR32;
115 } else {
116 temp = get_sh_mem_bases_nybble_64(pdd);
117 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
118 }
119
120 pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
121 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
122}
123
124static void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
126{
127 return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
128 qpd->sh_mem_config,
129 qpd->sh_mem_ape1_base,
130 qpd->sh_mem_ape1_limit,
131 qpd->sh_mem_bases);
132}
133
134static int allocate_vmid(struct device_queue_manager *dqm,
135 struct qcm_process_device *qpd,
136 struct queue *q)
137{
138 int bit, allocated_vmid;
139
140 if (dqm->vmid_bitmap == 0)
141 return -ENOMEM;
142
143 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
144 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
145
146 /* Kaveri kfd vmid's starts from vmid 8 */
147 allocated_vmid = bit + KFD_VMID_START_OFFSET;
148 pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
149 qpd->vmid = allocated_vmid;
150 q->properties.vmid = allocated_vmid;
151
152 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
153 program_sh_mem_settings(dqm, qpd);
154
155 return 0;
156}
157
158static void deallocate_vmid(struct device_queue_manager *dqm,
159 struct qcm_process_device *qpd,
160 struct queue *q)
161{
162 int bit = qpd->vmid - KFD_VMID_START_OFFSET;
163
164 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
165 qpd->vmid = 0;
166 q->properties.vmid = 0;
167}
168
169static int create_queue_nocpsch(struct device_queue_manager *dqm,
170 struct queue *q,
171 struct qcm_process_device *qpd,
172 int *allocated_vmid)
173{
174 int retval;
175
176 BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
177
178 pr_debug("kfd: In func %s\n", __func__);
179 print_queue(q);
180
181 mutex_lock(&dqm->lock);
182
183 if (list_empty(&qpd->queues_list)) {
184 retval = allocate_vmid(dqm, qpd, q);
185 if (retval != 0) {
186 mutex_unlock(&dqm->lock);
187 return retval;
188 }
189 }
190 *allocated_vmid = qpd->vmid;
191 q->properties.vmid = qpd->vmid;
192
193 retval = create_compute_queue_nocpsch(dqm, q, qpd);
194
195 if (retval != 0) {
196 if (list_empty(&qpd->queues_list)) {
197 deallocate_vmid(dqm, qpd, q);
198 *allocated_vmid = 0;
199 }
200 mutex_unlock(&dqm->lock);
201 return retval;
202 }
203
204 list_add(&q->list, &qpd->queues_list);
205 dqm->queue_count++;
206
207 mutex_unlock(&dqm->lock);
208 return 0;
209}
210
211static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
212{
213 bool set;
214 int pipe, bit;
215
216 set = false;
217
218 for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
219 pipe = (pipe + 1) % get_pipes_num(dqm)) {
220 if (dqm->allocated_queues[pipe] != 0) {
221 bit = find_first_bit(
222 (unsigned long *)&dqm->allocated_queues[pipe],
223 QUEUES_PER_PIPE);
224
225 clear_bit(bit,
226 (unsigned long *)&dqm->allocated_queues[pipe]);
227 q->pipe = pipe;
228 q->queue = bit;
229 set = true;
230 break;
231 }
232 }
233
234 if (set == false)
235 return -EBUSY;
236
237 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
238 __func__, q->pipe, q->queue);
239 /* horizontal hqd allocation */
240 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
241
242 return 0;
243}
244
245static inline void deallocate_hqd(struct device_queue_manager *dqm,
246 struct queue *q)
247{
248 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
249}
250
251static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
252 struct queue *q,
253 struct qcm_process_device *qpd)
254{
255 int retval;
256 struct mqd_manager *mqd;
257
258 BUG_ON(!dqm || !q || !qpd);
259
260 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
261 if (mqd == NULL)
262 return -ENOMEM;
263
264 retval = allocate_hqd(dqm, q);
265 if (retval != 0)
266 return retval;
267
268 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
269 &q->gart_mqd_addr, &q->properties);
270 if (retval != 0) {
271 deallocate_hqd(dqm, q);
272 return retval;
273 }
274
275 return 0;
276}
277
278static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
279 struct qcm_process_device *qpd,
280 struct queue *q)
281{
282 int retval;
283 struct mqd_manager *mqd;
284
285 BUG_ON(!dqm || !q || !q->mqd || !qpd);
286
287 retval = 0;
288
289 pr_debug("kfd: In Func %s\n", __func__);
290
291 mutex_lock(&dqm->lock);
292 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
293 if (mqd == NULL) {
294 retval = -ENOMEM;
295 goto out;
296 }
297
298 retval = mqd->destroy_mqd(mqd, q->mqd,
299 KFD_PREEMPT_TYPE_WAVEFRONT,
300 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
301 q->pipe, q->queue);
302
303 if (retval != 0)
304 goto out;
305
306 deallocate_hqd(dqm, q);
307
308 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
309
310 list_del(&q->list);
311 if (list_empty(&qpd->queues_list))
312 deallocate_vmid(dqm, qpd, q);
313 dqm->queue_count--;
314out:
315 mutex_unlock(&dqm->lock);
316 return retval;
317}
318
319static int update_queue(struct device_queue_manager *dqm, struct queue *q)
320{
321 int retval;
322 struct mqd_manager *mqd;
323
324 BUG_ON(!dqm || !q || !q->mqd);
325
326 mutex_lock(&dqm->lock);
327 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
328 if (mqd == NULL) {
329 mutex_unlock(&dqm->lock);
330 return -ENOMEM;
331 }
332
333 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
334 if (q->properties.is_active == true)
335 dqm->queue_count++;
336 else
337 dqm->queue_count--;
338
339 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
340 retval = execute_queues_cpsch(dqm, false);
341
342 mutex_unlock(&dqm->lock);
343 return retval;
344}
345
346static struct mqd_manager *get_mqd_manager_nocpsch(
347 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
348{
349 struct mqd_manager *mqd;
350
351 BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
352
353 pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
354
355 mqd = dqm->mqds[type];
356 if (!mqd) {
357 mqd = mqd_manager_init(type, dqm->dev);
358 if (mqd == NULL)
359 pr_err("kfd: mqd manager is NULL");
360 dqm->mqds[type] = mqd;
361 }
362
363 return mqd;
364}
365
366static int register_process_nocpsch(struct device_queue_manager *dqm,
367 struct qcm_process_device *qpd)
368{
369 struct device_process_node *n;
370
371 BUG_ON(!dqm || !qpd);
372
373 pr_debug("kfd: In func %s\n", __func__);
374
375 n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
376 if (!n)
377 return -ENOMEM;
378
379 n->qpd = qpd;
380
381 mutex_lock(&dqm->lock);
382 list_add(&n->list, &dqm->queues);
383
384 init_process_memory(dqm, qpd);
385 dqm->processes_count++;
386
387 mutex_unlock(&dqm->lock);
388
389 return 0;
390}
391
392static int unregister_process_nocpsch(struct device_queue_manager *dqm,
393 struct qcm_process_device *qpd)
394{
395 int retval;
396 struct device_process_node *cur, *next;
397
398 BUG_ON(!dqm || !qpd);
399
400 BUG_ON(!list_empty(&qpd->queues_list));
401
402 pr_debug("kfd: In func %s\n", __func__);
403
404 retval = 0;
405 mutex_lock(&dqm->lock);
406
407 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
408 if (qpd == cur->qpd) {
409 list_del(&cur->list);
410 kfree(cur);
411 dqm->processes_count--;
412 goto out;
413 }
414 }
415 /* qpd not found in dqm list */
416 retval = 1;
417out:
418 mutex_unlock(&dqm->lock);
419 return retval;
420}
421
422static int
423set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
424 unsigned int vmid)
425{
426 uint32_t pasid_mapping;
427
428 pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
429 ATC_VMID_PASID_MAPPING_VALID;
430 return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
431 vmid);
432}
433
434static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
435{
436 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
437 * scratch and GPUVM apertures.
438 * The hardware fills in the remaining 59 bits according to the
439 * following pattern:
440 * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
441 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
442 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
443 *
444 * (where X/Y is the configurable nybble with the low-bit 0)
445 *
446 * LDS and scratch will have the same top nybble programmed in the
447 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
448 * GPUVM can have a different top nybble programmed in the
449 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
450 * We don't bother to support different top nybbles
451 * for LDS/Scratch and GPUVM.
452 */
453
454 BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
455 top_address_nybble == 0);
456
457 return PRIVATE_BASE(top_address_nybble << 12) |
458 SHARED_BASE(top_address_nybble << 12);
459}
460
461static int init_memory(struct device_queue_manager *dqm)
462{
463 int i, retval;
464
465 for (i = 8; i < 16; i++)
466 set_pasid_vmid_mapping(dqm, 0, i);
467
468 retval = kfd2kgd->init_memory(dqm->dev->kgd);
469 if (retval == 0)
470 is_mem_initialized = true;
471 return retval;
472}
473
474
475static int init_pipelines(struct device_queue_manager *dqm,
476 unsigned int pipes_num, unsigned int first_pipe)
477{
478 void *hpdptr;
479 struct mqd_manager *mqd;
480 unsigned int i, err, inx;
481 uint64_t pipe_hpd_addr;
482
483 BUG_ON(!dqm || !dqm->dev);
484
485 pr_debug("kfd: In func %s\n", __func__);
486
487 /*
488 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
489 * The driver never accesses this memory after zeroing it.
490 * It doesn't even have to be saved/restored on suspend/resume
491 * because it contains no data when there are no active queues.
492 */
493
494 err = kfd2kgd->allocate_mem(dqm->dev->kgd,
495 CIK_HPD_EOP_BYTES * pipes_num,
496 PAGE_SIZE,
497 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
498 (struct kgd_mem **) &dqm->pipeline_mem);
499
500 if (err) {
501 pr_err("kfd: error allocate vidmem num pipes: %d\n",
502 pipes_num);
503 return -ENOMEM;
504 }
505
506 hpdptr = dqm->pipeline_mem->cpu_ptr;
507 dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
508
509 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
510
511 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
512 if (mqd == NULL) {
513 kfd2kgd->free_mem(dqm->dev->kgd,
514 (struct kgd_mem *) dqm->pipeline_mem);
515 return -ENOMEM;
516 }
517
518 for (i = 0; i < pipes_num; i++) {
519 inx = i + first_pipe;
520 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
521 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
522 /* = log2(bytes/4)-1 */
523 kfd2kgd->init_pipeline(dqm->dev->kgd, i,
524 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
525 }
526
527 return 0;
528}
529
530
531static int init_scheduler(struct device_queue_manager *dqm)
532{
533 int retval;
534
535 BUG_ON(!dqm);
536
537 pr_debug("kfd: In %s\n", __func__);
538
539 retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
540 if (retval != 0)
541 return retval;
542
543 retval = init_memory(dqm);
544
545 return retval;
546}
547
548static int initialize_nocpsch(struct device_queue_manager *dqm)
549{
550 int i;
551
552 BUG_ON(!dqm);
553
554 pr_debug("kfd: In func %s num of pipes: %d\n",
555 __func__, get_pipes_num(dqm));
556
557 mutex_init(&dqm->lock);
558 INIT_LIST_HEAD(&dqm->queues);
559 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
560 dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
561 sizeof(unsigned int), GFP_KERNEL);
562 if (!dqm->allocated_queues) {
563 mutex_destroy(&dqm->lock);
564 return -ENOMEM;
565 }
566
567 for (i = 0; i < get_pipes_num(dqm); i++)
568 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
569
570 dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
571
572 init_scheduler(dqm);
573 return 0;
574}
575
576static void uninitialize_nocpsch(struct device_queue_manager *dqm)
577{
578 int i;
579
580 BUG_ON(!dqm);
581
582 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
583
584 kfree(dqm->allocated_queues);
585 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
586 kfree(dqm->mqds[i]);
587 mutex_destroy(&dqm->lock);
588 kfd2kgd->free_mem(dqm->dev->kgd,
589 (struct kgd_mem *) dqm->pipeline_mem);
590}
591
592static int start_nocpsch(struct device_queue_manager *dqm)
593{
594 return 0;
595}
596
597static int stop_nocpsch(struct device_queue_manager *dqm)
598{
599 return 0;
600}
601
602/*
603 * Device Queue Manager implementation for cp scheduler
604 */
605
606static int set_sched_resources(struct device_queue_manager *dqm)
607{
608 struct scheduling_resources res;
609 unsigned int queue_num, queue_mask;
610
611 BUG_ON(!dqm);
612
613 pr_debug("kfd: In func %s\n", __func__);
614
615 queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
616 queue_mask = (1 << queue_num) - 1;
617 res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
618 res.vmid_mask <<= KFD_VMID_START_OFFSET;
619 res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
620 res.gws_mask = res.oac_mask = res.gds_heap_base =
621 res.gds_heap_size = 0;
622
623 pr_debug("kfd: scheduling resources:\n"
624 " vmid mask: 0x%8X\n"
625 " queue mask: 0x%8llX\n",
626 res.vmid_mask, res.queue_mask);
627
628 return pm_send_set_resources(&dqm->packets, &res);
629}
630
631static int initialize_cpsch(struct device_queue_manager *dqm)
632{
633 int retval;
634
635 BUG_ON(!dqm);
636
637 pr_debug("kfd: In func %s num of pipes: %d\n",
638 __func__, get_pipes_num_cpsch());
639
640 mutex_init(&dqm->lock);
641 INIT_LIST_HEAD(&dqm->queues);
642 dqm->queue_count = dqm->processes_count = 0;
643 dqm->active_runlist = false;
644 retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
645 if (retval != 0)
646 goto fail_init_pipelines;
647
648 return 0;
649
650fail_init_pipelines:
651 mutex_destroy(&dqm->lock);
652 return retval;
653}
654
655static int start_cpsch(struct device_queue_manager *dqm)
656{
657 struct device_process_node *node;
658 int retval;
659
660 BUG_ON(!dqm);
661
662 retval = 0;
663
664 retval = pm_init(&dqm->packets, dqm);
665 if (retval != 0)
666 goto fail_packet_manager_init;
667
668 retval = set_sched_resources(dqm);
669 if (retval != 0)
670 goto fail_set_sched_resources;
671
672 pr_debug("kfd: allocating fence memory\n");
673
674 /* allocate fence memory on the gart */
675 retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
676 sizeof(*dqm->fence_addr),
677 32,
678 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
679 (struct kgd_mem **) &dqm->fence_mem);
680
681 if (retval != 0)
682 goto fail_allocate_vidmem;
683
684 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
685 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
686
687 list_for_each_entry(node, &dqm->queues, list)
688 if (node->qpd->pqm->process && dqm->dev)
689 kfd_bind_process_to_device(dqm->dev,
690 node->qpd->pqm->process);
691
692 execute_queues_cpsch(dqm, true);
693
694 return 0;
695fail_allocate_vidmem:
696fail_set_sched_resources:
697 pm_uninit(&dqm->packets);
698fail_packet_manager_init:
699 return retval;
700}
701
702static int stop_cpsch(struct device_queue_manager *dqm)
703{
704 struct device_process_node *node;
705 struct kfd_process_device *pdd;
706
707 BUG_ON(!dqm);
708
709 destroy_queues_cpsch(dqm, true);
710
711 list_for_each_entry(node, &dqm->queues, list) {
712 pdd = qpd_to_pdd(node->qpd);
713 pdd->bound = false;
714 }
715 kfd2kgd->free_mem(dqm->dev->kgd,
716 (struct kgd_mem *) dqm->fence_mem);
717 pm_uninit(&dqm->packets);
718
719 return 0;
720}
721
722static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
723 struct kernel_queue *kq,
724 struct qcm_process_device *qpd)
725{
726 BUG_ON(!dqm || !kq || !qpd);
727
728 pr_debug("kfd: In func %s\n", __func__);
729
730 mutex_lock(&dqm->lock);
731 list_add(&kq->list, &qpd->priv_queue_list);
732 dqm->queue_count++;
733 qpd->is_debug = true;
734 execute_queues_cpsch(dqm, false);
735 mutex_unlock(&dqm->lock);
736
737 return 0;
738}
739
740static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
741 struct kernel_queue *kq,
742 struct qcm_process_device *qpd)
743{
744 BUG_ON(!dqm || !kq);
745
746 pr_debug("kfd: In %s\n", __func__);
747
748 mutex_lock(&dqm->lock);
749 destroy_queues_cpsch(dqm, false);
750 list_del(&kq->list);
751 dqm->queue_count--;
752 qpd->is_debug = false;
753 execute_queues_cpsch(dqm, false);
754 mutex_unlock(&dqm->lock);
755}
756
757static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
758 struct qcm_process_device *qpd, int *allocate_vmid)
759{
760 int retval;
761 struct mqd_manager *mqd;
762
763 BUG_ON(!dqm || !q || !qpd);
764
765 retval = 0;
766
767 if (allocate_vmid)
768 *allocate_vmid = 0;
769
770 mutex_lock(&dqm->lock);
771
772 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
773 if (mqd == NULL) {
774 mutex_unlock(&dqm->lock);
775 return -ENOMEM;
776 }
777
778 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
779 &q->gart_mqd_addr, &q->properties);
780 if (retval != 0)
781 goto out;
782
783 list_add(&q->list, &qpd->queues_list);
784 if (q->properties.is_active) {
785 dqm->queue_count++;
786 retval = execute_queues_cpsch(dqm, false);
787 }
788
789out:
790 mutex_unlock(&dqm->lock);
791 return retval;
792}
793
794static int fence_wait_timeout(unsigned int *fence_addr,
795 unsigned int fence_value,
796 unsigned long timeout)
797{
798 BUG_ON(!fence_addr);
799 timeout += jiffies;
800
801 while (*fence_addr != fence_value) {
802 if (time_after(jiffies, timeout)) {
803 pr_err("kfd: qcm fence wait loop timeout expired\n");
804 return -ETIME;
805 }
806 cpu_relax();
807 }
808
809 return 0;
810}
811
812static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
813{
814 int retval;
815
816 BUG_ON(!dqm);
817
818 retval = 0;
819
820 if (lock)
821 mutex_lock(&dqm->lock);
822 if (dqm->active_runlist == false)
823 goto out;
824 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
825 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
826 if (retval != 0)
827 goto out;
828
829 *dqm->fence_addr = KFD_FENCE_INIT;
830 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
831 KFD_FENCE_COMPLETED);
832 /* should be timed out */
833 fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
834 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
835 pm_release_ib(&dqm->packets);
836 dqm->active_runlist = false;
837
838out:
839 if (lock)
840 mutex_unlock(&dqm->lock);
841 return retval;
842}
843
844static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
845{
846 int retval;
847
848 BUG_ON(!dqm);
849
850 if (lock)
851 mutex_lock(&dqm->lock);
852
853 retval = destroy_queues_cpsch(dqm, false);
854 if (retval != 0) {
855 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
856 goto out;
857 }
858
859 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
860 retval = 0;
861 goto out;
862 }
863
864 if (dqm->active_runlist) {
865 retval = 0;
866 goto out;
867 }
868
869 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
870 if (retval != 0) {
871 pr_err("kfd: failed to execute runlist");
872 goto out;
873 }
874 dqm->active_runlist = true;
875
876out:
877 if (lock)
878 mutex_unlock(&dqm->lock);
879 return retval;
880}
881
882static int destroy_queue_cpsch(struct device_queue_manager *dqm,
883 struct qcm_process_device *qpd,
884 struct queue *q)
885{
886 int retval;
887 struct mqd_manager *mqd;
888
889 BUG_ON(!dqm || !qpd || !q);
890
891 retval = 0;
892
893 /* remove queue from list to prevent rescheduling after preemption */
894 mutex_lock(&dqm->lock);
895
896 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
897 if (!mqd) {
898 retval = -ENOMEM;
899 goto failed;
900 }
901
902 list_del(&q->list);
903 dqm->queue_count--;
904
905 execute_queues_cpsch(dqm, false);
906
907 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
908
909 mutex_unlock(&dqm->lock);
910
911 return 0;
912
913failed:
914 mutex_unlock(&dqm->lock);
915 return retval;
916}
917
918/*
919 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
920 * stay in user mode.
921 */
922#define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
923/* APE1 limit is inclusive and 64K aligned. */
924#define APE1_LIMIT_ALIGNMENT 0xFFFF
925
926static bool set_cache_memory_policy(struct device_queue_manager *dqm,
927 struct qcm_process_device *qpd,
928 enum cache_policy default_policy,
929 enum cache_policy alternate_policy,
930 void __user *alternate_aperture_base,
931 uint64_t alternate_aperture_size)
932{
933 uint32_t default_mtype;
934 uint32_t ape1_mtype;
935
936 pr_debug("kfd: In func %s\n", __func__);
937
938 mutex_lock(&dqm->lock);
939
940 if (alternate_aperture_size == 0) {
941 /* base > limit disables APE1 */
942 qpd->sh_mem_ape1_base = 1;
943 qpd->sh_mem_ape1_limit = 0;
944 } else {
945 /*
946 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
947 * SH_MEM_APE1_BASE[31:0], 0x0000 }
948 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
949 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
950 * Verify that the base and size parameters can be
951 * represented in this format and convert them.
952 * Additionally restrict APE1 to user-mode addresses.
953 */
954
955 uint64_t base = (uintptr_t)alternate_aperture_base;
956 uint64_t limit = base + alternate_aperture_size - 1;
957
958 if (limit <= base)
959 goto out;
960
961 if ((base & APE1_FIXED_BITS_MASK) != 0)
962 goto out;
963
964 if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
965 goto out;
966
967 qpd->sh_mem_ape1_base = base >> 16;
968 qpd->sh_mem_ape1_limit = limit >> 16;
969 }
970
971 default_mtype = (default_policy == cache_policy_coherent) ?
972 MTYPE_NONCACHED :
973 MTYPE_CACHED;
974
975 ape1_mtype = (alternate_policy == cache_policy_coherent) ?
976 MTYPE_NONCACHED :
977 MTYPE_CACHED;
978
979 qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
980 | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
981 | DEFAULT_MTYPE(default_mtype)
982 | APE1_MTYPE(ape1_mtype);
983
984 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
985 program_sh_mem_settings(dqm, qpd);
986
987 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
988 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
989 qpd->sh_mem_ape1_limit);
990
991 mutex_unlock(&dqm->lock);
992 return true;
993
994out:
995 mutex_unlock(&dqm->lock);
996 return false;
997}
998
999struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1000{
1001 struct device_queue_manager *dqm;
1002
1003 BUG_ON(!dev);
1004
1005 dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1006 if (!dqm)
1007 return NULL;
1008
1009 dqm->dev = dev;
1010 switch (sched_policy) {
1011 case KFD_SCHED_POLICY_HWS:
1012 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1013 /* initialize dqm for cp scheduling */
1014 dqm->create_queue = create_queue_cpsch;
1015 dqm->initialize = initialize_cpsch;
1016 dqm->start = start_cpsch;
1017 dqm->stop = stop_cpsch;
1018 dqm->destroy_queue = destroy_queue_cpsch;
1019 dqm->update_queue = update_queue;
1020 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1021 dqm->register_process = register_process_nocpsch;
1022 dqm->unregister_process = unregister_process_nocpsch;
1023 dqm->uninitialize = uninitialize_nocpsch;
1024 dqm->create_kernel_queue = create_kernel_queue_cpsch;
1025 dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
1026 dqm->set_cache_memory_policy = set_cache_memory_policy;
1027 break;
1028 case KFD_SCHED_POLICY_NO_HWS:
1029 /* initialize dqm for no cp scheduling */
1030 dqm->start = start_nocpsch;
1031 dqm->stop = stop_nocpsch;
1032 dqm->create_queue = create_queue_nocpsch;
1033 dqm->destroy_queue = destroy_queue_nocpsch;
1034 dqm->update_queue = update_queue;
1035 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1036 dqm->register_process = register_process_nocpsch;
1037 dqm->unregister_process = unregister_process_nocpsch;
1038 dqm->initialize = initialize_nocpsch;
1039 dqm->uninitialize = uninitialize_nocpsch;
1040 dqm->set_cache_memory_policy = set_cache_memory_policy;
1041 break;
1042 default:
1043 BUG();
1044 break;
1045 }
1046
1047 if (dqm->initialize(dqm) != 0) {
1048 kfree(dqm);
1049 return NULL;
1050 }
1051
1052 return dqm;
1053}
1054
1055void device_queue_manager_uninit(struct device_queue_manager *dqm)
1056{
1057 BUG_ON(!dqm);
1058
1059 dqm->uninitialize(dqm);
1060 kfree(dqm);
1061}
1062
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
new file mode 100644
index 000000000000..c3f189e8ae35
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -0,0 +1,146 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef KFD_DEVICE_QUEUE_MANAGER_H_
25#define KFD_DEVICE_QUEUE_MANAGER_H_
26
27#include <linux/rwsem.h>
28#include <linux/list.h>
29#include "kfd_priv.h"
30#include "kfd_mqd_manager.h"
31
32#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (500)
33#define QUEUES_PER_PIPE (8)
34#define PIPE_PER_ME_CP_SCHEDULING (3)
35#define CIK_VMID_NUM (8)
36#define KFD_VMID_START_OFFSET (8)
37#define VMID_PER_DEVICE CIK_VMID_NUM
38#define KFD_DQM_FIRST_PIPE (0)
39
40struct device_process_node {
41 struct qcm_process_device *qpd;
42 struct list_head list;
43};
44
45/**
46 * struct device_queue_manager
47 *
48 * @create_queue: Queue creation routine.
49 *
50 * @destroy_queue: Queue destruction routine.
51 *
52 * @update_queue: Queue update routine.
53 *
54 * @get_mqd_manager: Returns the mqd manager according to the mqd type.
55 *
56 * @exeute_queues: Dispatches the queues list to the H/W.
57 *
58 * @register_process: This routine associates a specific process with device.
59 *
60 * @unregister_process: destroys the associations between process to device.
61 *
62 * @initialize: Initializes the pipelines and memory module for that device.
63 *
64 * @start: Initializes the resources/modules the the device needs for queues
65 * execution. This function is called on device initialization and after the
66 * system woke up after suspension.
67 *
68 * @stop: This routine stops execution of all the active queue running on the
69 * H/W and basically this function called on system suspend.
70 *
71 * @uninitialize: Destroys all the device queue manager resources allocated in
72 * initialize routine.
73 *
74 * @create_kernel_queue: Creates kernel queue. Used for debug queue.
75 *
76 * @destroy_kernel_queue: Destroys kernel queue. Used for debug queue.
77 *
78 * @set_cache_memory_policy: Sets memory policy (cached/ non cached) for the
79 * memory apertures.
80 *
81 * This struct is a base class for the kfd queues scheduler in the
82 * device level. The device base class should expose the basic operations
83 * for queue creation and queue destruction. This base class hides the
84 * scheduling mode of the driver and the specific implementation of the
85 * concrete device. This class is the only class in the queues scheduler
86 * that configures the H/W.
87 */
88
89struct device_queue_manager {
90 int (*create_queue)(struct device_queue_manager *dqm,
91 struct queue *q,
92 struct qcm_process_device *qpd,
93 int *allocate_vmid);
94 int (*destroy_queue)(struct device_queue_manager *dqm,
95 struct qcm_process_device *qpd,
96 struct queue *q);
97 int (*update_queue)(struct device_queue_manager *dqm,
98 struct queue *q);
99
100 struct mqd_manager * (*get_mqd_manager)
101 (struct device_queue_manager *dqm,
102 enum KFD_MQD_TYPE type);
103
104 int (*register_process)(struct device_queue_manager *dqm,
105 struct qcm_process_device *qpd);
106 int (*unregister_process)(struct device_queue_manager *dqm,
107 struct qcm_process_device *qpd);
108 int (*initialize)(struct device_queue_manager *dqm);
109 int (*start)(struct device_queue_manager *dqm);
110 int (*stop)(struct device_queue_manager *dqm);
111 void (*uninitialize)(struct device_queue_manager *dqm);
112 int (*create_kernel_queue)(struct device_queue_manager *dqm,
113 struct kernel_queue *kq,
114 struct qcm_process_device *qpd);
115 void (*destroy_kernel_queue)(struct device_queue_manager *dqm,
116 struct kernel_queue *kq,
117 struct qcm_process_device *qpd);
118 bool (*set_cache_memory_policy)(struct device_queue_manager *dqm,
119 struct qcm_process_device *qpd,
120 enum cache_policy default_policy,
121 enum cache_policy alternate_policy,
122 void __user *alternate_aperture_base,
123 uint64_t alternate_aperture_size);
124
125
126 struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
127 struct packet_manager packets;
128 struct kfd_dev *dev;
129 struct mutex lock;
130 struct list_head queues;
131 unsigned int processes_count;
132 unsigned int queue_count;
133 unsigned int next_pipe_to_allocate;
134 unsigned int *allocated_queues;
135 unsigned int vmid_bitmap;
136 uint64_t pipelines_addr;
137 struct kfd_mem_obj *pipeline_mem;
138 uint64_t fence_gpu_addr;
139 unsigned int *fence_addr;
140 struct kfd_mem_obj *fence_mem;
141 bool active_runlist;
142};
143
144
145
146#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
new file mode 100644
index 000000000000..b5791a5c7c06
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -0,0 +1,256 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#include "kfd_priv.h"
23#include <linux/mm.h>
24#include <linux/mman.h>
25#include <linux/slab.h>
26#include <linux/io.h>
27
28/*
29 * This extension supports a kernel level doorbells management for
30 * the kernel queues.
31 * Basically the last doorbells page is devoted to kernel queues
32 * and that's assures that any user process won't get access to the
33 * kernel doorbells page
34 */
35static DEFINE_MUTEX(doorbell_mutex);
36static unsigned long doorbell_available_index[
37 DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)] = { 0 };
38
39#define KERNEL_DOORBELL_PASID 1
40#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
41
42/*
43 * Each device exposes a doorbell aperture, a PCI MMIO aperture that
44 * receives 32-bit writes that are passed to queues as wptr values.
45 * The doorbells are intended to be written by applications as part
46 * of queueing work on user-mode queues.
47 * We assign doorbells to applications in PAGE_SIZE-sized and aligned chunks.
48 * We map the doorbell address space into user-mode when a process creates
49 * its first queue on each device.
50 * Although the mapping is done by KFD, it is equivalent to an mmap of
51 * the /dev/kfd with the particular device encoded in the mmap offset.
52 * There will be other uses for mmap of /dev/kfd, so only a range of
53 * offsets (KFD_MMAP_DOORBELL_START-END) is used for doorbells.
54 */
55
56/* # of doorbell bytes allocated for each process. */
57static inline size_t doorbell_process_allocation(void)
58{
59 return roundup(KFD_SIZE_OF_DOORBELL_IN_BYTES *
60 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
61 PAGE_SIZE);
62}
63
64/* Doorbell calculations for device init. */
65void kfd_doorbell_init(struct kfd_dev *kfd)
66{
67 size_t doorbell_start_offset;
68 size_t doorbell_aperture_size;
69 size_t doorbell_process_limit;
70
71 /*
72 * We start with calculations in bytes because the input data might
73 * only be byte-aligned.
74 * Only after we have done the rounding can we assume any alignment.
75 */
76
77 doorbell_start_offset =
78 roundup(kfd->shared_resources.doorbell_start_offset,
79 doorbell_process_allocation());
80
81 doorbell_aperture_size =
82 rounddown(kfd->shared_resources.doorbell_aperture_size,
83 doorbell_process_allocation());
84
85 if (doorbell_aperture_size > doorbell_start_offset)
86 doorbell_process_limit =
87 (doorbell_aperture_size - doorbell_start_offset) /
88 doorbell_process_allocation();
89 else
90 doorbell_process_limit = 0;
91
92 kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
93 doorbell_start_offset;
94
95 kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
96 kfd->doorbell_process_limit = doorbell_process_limit - 1;
97
98 kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
99 doorbell_process_allocation());
100
101 BUG_ON(!kfd->doorbell_kernel_ptr);
102
103 pr_debug("kfd: doorbell initialization:\n");
104 pr_debug("kfd: doorbell base == 0x%08lX\n",
105 (uintptr_t)kfd->doorbell_base);
106
107 pr_debug("kfd: doorbell_id_offset == 0x%08lX\n",
108 kfd->doorbell_id_offset);
109
110 pr_debug("kfd: doorbell_process_limit == 0x%08lX\n",
111 doorbell_process_limit);
112
113 pr_debug("kfd: doorbell_kernel_offset == 0x%08lX\n",
114 (uintptr_t)kfd->doorbell_base);
115
116 pr_debug("kfd: doorbell aperture size == 0x%08lX\n",
117 kfd->shared_resources.doorbell_aperture_size);
118
119 pr_debug("kfd: doorbell kernel address == 0x%08lX\n",
120 (uintptr_t)kfd->doorbell_kernel_ptr);
121}
122
123int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
124{
125 phys_addr_t address;
126 struct kfd_dev *dev;
127
128 /*
129 * For simplicitly we only allow mapping of the entire doorbell
130 * allocation of a single device & process.
131 */
132 if (vma->vm_end - vma->vm_start != doorbell_process_allocation())
133 return -EINVAL;
134
135 /* Find kfd device according to gpu id */
136 dev = kfd_device_by_id(vma->vm_pgoff);
137 if (dev == NULL)
138 return -EINVAL;
139
140 /* Find if pdd exists for combination of process and gpu id */
141 if (!kfd_get_process_device_data(dev, process, 0))
142 return -EINVAL;
143
144 /* Calculate physical address of doorbell */
145 address = kfd_get_process_doorbells(dev, process);
146
147 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE |
148 VM_DONTDUMP | VM_PFNMAP;
149
150 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
151
152 pr_debug("kfd: mapping doorbell page in kfd_doorbell_mmap\n"
153 " target user address == 0x%08llX\n"
154 " physical address == 0x%08llX\n"
155 " vm_flags == 0x%04lX\n"
156 " size == 0x%04lX\n",
157 (unsigned long long) vma->vm_start, address, vma->vm_flags,
158 doorbell_process_allocation());
159
160
161 return io_remap_pfn_range(vma,
162 vma->vm_start,
163 address >> PAGE_SHIFT,
164 doorbell_process_allocation(),
165 vma->vm_page_prot);
166}
167
168
169/* get kernel iomem pointer for a doorbell */
170u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
171 unsigned int *doorbell_off)
172{
173 u32 inx;
174
175 BUG_ON(!kfd || !doorbell_off);
176
177 mutex_lock(&doorbell_mutex);
178 inx = find_first_zero_bit(doorbell_available_index,
179 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
180
181 __set_bit(inx, doorbell_available_index);
182 mutex_unlock(&doorbell_mutex);
183
184 if (inx >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
185 return NULL;
186
187 /*
188 * Calculating the kernel doorbell offset using "faked" kernel
189 * pasid that allocated for kernel queues only
190 */
191 *doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
192 sizeof(u32)) + inx;
193
194 pr_debug("kfd: get kernel queue doorbell\n"
195 " doorbell offset == 0x%08d\n"
196 " kernel address == 0x%08lX\n",
197 *doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
198
199 return kfd->doorbell_kernel_ptr + inx;
200}
201
202void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
203{
204 unsigned int inx;
205
206 BUG_ON(!kfd || !db_addr);
207
208 inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
209
210 mutex_lock(&doorbell_mutex);
211 __clear_bit(inx, doorbell_available_index);
212 mutex_unlock(&doorbell_mutex);
213}
214
215inline void write_kernel_doorbell(u32 __iomem *db, u32 value)
216{
217 if (db) {
218 writel(value, db);
219 pr_debug("writing %d to doorbell address 0x%p\n", value, db);
220 }
221}
222
223/*
224 * queue_ids are in the range [0,MAX_PROCESS_QUEUES) and are mapped 1:1
225 * to doorbells with the process's doorbell page
226 */
227unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
228 struct kfd_process *process,
229 unsigned int queue_id)
230{
231 /*
232 * doorbell_id_offset accounts for doorbells taken by KGD.
233 * pasid * doorbell_process_allocation/sizeof(u32) adjusts
234 * to the process's doorbells
235 */
236 return kfd->doorbell_id_offset +
237 process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
238 queue_id;
239}
240
241uint64_t kfd_get_number_elems(struct kfd_dev *kfd)
242{
243 uint64_t num_of_elems = (kfd->shared_resources.doorbell_aperture_size -
244 kfd->shared_resources.doorbell_start_offset) /
245 doorbell_process_allocation() + 1;
246
247 return num_of_elems;
248
249}
250
251phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
252 struct kfd_process *process)
253{
254 return dev->doorbell_base +
255 process->pasid * doorbell_process_allocation();
256}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
new file mode 100644
index 000000000000..66df4da01c29
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -0,0 +1,356 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/device.h>
25#include <linux/export.h>
26#include <linux/err.h>
27#include <linux/fs.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30#include <linux/uaccess.h>
31#include <linux/compat.h>
32#include <uapi/linux/kfd_ioctl.h>
33#include <linux/time.h>
34#include "kfd_priv.h"
35#include <linux/mm.h>
36#include <uapi/asm-generic/mman-common.h>
37#include <asm/processor.h>
38
39/*
40 * The primary memory I/O features being added for revisions of gfxip
41 * beyond 7.0 (Kaveri) are:
42 *
43 * Access to ATC/IOMMU mapped memory w/ associated extension of VA to 48b
44 *
45 * “Flat” shader memory access – These are new shader vector memory
46 * operations that do not reference a T#/V# so a “pointer” is what is
47 * sourced from the vector gprs for direct access to memory.
48 * This pointer space has the Shared(LDS) and Private(Scratch) memory
49 * mapped into this pointer space as apertures.
50 * The hardware then determines how to direct the memory request
51 * based on what apertures the request falls in.
52 *
53 * Unaligned support and alignment check
54 *
55 *
56 * System Unified Address - SUA
57 *
58 * The standard usage for GPU virtual addresses are that they are mapped by
59 * a set of page tables we call GPUVM and these page tables are managed by
60 * a combination of vidMM/driver software components. The current virtual
61 * address (VA) range for GPUVM is 40b.
62 *
63 * As of gfxip7.1 and beyond we’re adding the ability for compute memory
64 * clients (CP/RLC, DMA, SHADER(ifetch, scalar, and vector ops)) to access
65 * the same page tables used by host x86 processors and that are managed by
66 * the operating system. This is via a technique and hardware called ATC/IOMMU.
67 * The GPU has the capability of accessing both the GPUVM and ATC address
68 * spaces for a given VMID (process) simultaneously and we call this feature
69 * system unified address (SUA).
70 *
71 * There are three fundamental address modes of operation for a given VMID
72 * (process) on the GPU:
73 *
74 * HSA64 – 64b pointers and the default address space is ATC
75 * HSA32 – 32b pointers and the default address space is ATC
76 * GPUVM – 64b pointers and the default address space is GPUVM (driver
77 * model mode)
78 *
79 *
80 * HSA64 - ATC/IOMMU 64b
81 *
82 * A 64b pointer in the AMD64/IA64 CPU architecture is not fully utilized
83 * by the CPU so an AMD CPU can only access the high area
84 * (VA[63:47] == 0x1FFFF) and low area (VA[63:47 == 0) of the address space
85 * so the actual VA carried to translation is 48b. There is a “hole” in
86 * the middle of the 64b VA space.
87 *
88 * The GPU not only has access to all of the CPU accessible address space via
89 * ATC/IOMMU, but it also has access to the GPUVM address space. The “system
90 * unified address” feature (SUA) is the mapping of GPUVM and ATC address
91 * spaces into a unified pointer space. The method we take for 64b mode is
92 * to map the full 40b GPUVM address space into the hole of the 64b address
93 * space.
94
95 * The GPUVM_Base/GPUVM_Limit defines the aperture in the 64b space where we
96 * direct requests to be translated via GPUVM page tables instead of the
97 * IOMMU path.
98 *
99 *
100 * 64b to 49b Address conversion
101 *
102 * Note that there are still significant portions of unused regions (holes)
103 * in the 64b address space even for the GPU. There are several places in
104 * the pipeline (sw and hw), we wish to compress the 64b virtual address
105 * to a 49b address. This 49b address is constituted of an “ATC” bit
106 * plus a 48b virtual address. This 49b address is what is passed to the
107 * translation hardware. ATC==0 means the 48b address is a GPUVM address
108 * (max of 2^40 – 1) intended to be translated via GPUVM page tables.
109 * ATC==1 means the 48b address is intended to be translated via IOMMU
110 * page tables.
111 *
112 * A 64b pointer is compared to the apertures that are defined (Base/Limit), in
113 * this case the GPUVM aperture (red) is defined and if a pointer falls in this
114 * aperture, we subtract the GPUVM_Base address and set the ATC bit to zero
115 * as part of the 64b to 49b conversion.
116 *
117 * Where this 64b to 49b conversion is done is a function of the usage.
118 * Most GPU memory access is via memory objects where the driver builds
119 * a descriptor which consists of a base address and a memory access by
120 * the GPU usually consists of some kind of an offset or Cartesian coordinate
121 * that references this memory descriptor. This is the case for shader
122 * instructions that reference the T# or V# constants, or for specified
123 * locations of assets (ex. the shader program location). In these cases
124 * the driver is what handles the 64b to 49b conversion and the base
125 * address in the descriptor (ex. V# or T# or shader program location)
126 * is defined as a 48b address w/ an ATC bit. For this usage a given
127 * memory object cannot straddle multiple apertures in the 64b address
128 * space. For example a shader program cannot jump in/out between ATC
129 * and GPUVM space.
130 *
131 * In some cases we wish to pass a 64b pointer to the GPU hardware and
132 * the GPU hw does the 64b to 49b conversion before passing memory
133 * requests to the cache/memory system. This is the case for the
134 * S_LOAD and FLAT_* shader memory instructions where we have 64b pointers
135 * in scalar and vector GPRs respectively.
136 *
137 * In all cases (no matter where the 64b -> 49b conversion is done), the gfxip
138 * hardware sends a 48b address along w/ an ATC bit, to the memory controller
139 * on the memory request interfaces.
140 *
141 * <client>_MC_rdreq_atc // read request ATC bit
142 *
143 * 0 : <client>_MC_rdreq_addr is a GPUVM VA
144 *
145 * 1 : <client>_MC_rdreq_addr is a ATC VA
146 *
147 *
148 * “Spare” aperture (APE1)
149 *
150 * We use the GPUVM aperture to differentiate ATC vs. GPUVM, but we also use
151 * apertures to set the Mtype field for S_LOAD/FLAT_* ops which is input to the
152 * config tables for setting cache policies. The “spare” (APE1) aperture is
153 * motivated by getting a different Mtype from the default.
154 * The default aperture isn’t an actual base/limit aperture; it is just the
155 * address space that doesn’t hit any defined base/limit apertures.
156 * The following diagram is a complete picture of the gfxip7.x SUA apertures.
157 * The APE1 can be placed either below or above
158 * the hole (cannot be in the hole).
159 *
160 *
161 * General Aperture definitions and rules
162 *
163 * An aperture register definition consists of a Base, Limit, Mtype, and
164 * usually an ATC bit indicating which translation tables that aperture uses.
165 * In all cases (for SUA and DUA apertures discussed later), aperture base
166 * and limit definitions are 64KB aligned.
167 *
168 * <ape>_Base[63:0] = { <ape>_Base_register[63:16], 0x0000 }
169 *
170 * <ape>_Limit[63:0] = { <ape>_Limit_register[63:16], 0xFFFF }
171 *
172 * The base and limit are considered inclusive to an aperture so being
173 * inside an aperture means (address >= Base) AND (address <= Limit).
174 *
175 * In no case is a payload that straddles multiple apertures expected to work.
176 * For example a load_dword_x4 that starts in one aperture and ends in another,
177 * does not work. For the vector FLAT_* ops we have detection capability in
178 * the shader for reporting a “memory violation” back to the
179 * SQ block for use in traps.
180 * A memory violation results when an op falls into the hole,
181 * or a payload straddles multiple apertures. The S_LOAD instruction
182 * does not have this detection.
183 *
184 * Apertures cannot overlap.
185 *
186 *
187 *
188 * HSA32 - ATC/IOMMU 32b
189 *
190 * For HSA32 mode, the pointers are interpreted as 32 bits and use a single GPR
191 * instead of two for the S_LOAD and FLAT_* ops. The entire GPUVM space of 40b
192 * will not fit so there is only partial visibility to the GPUVM
193 * space (defined by the aperture) for S_LOAD and FLAT_* ops.
194 * There is no spare (APE1) aperture for HSA32 mode.
195 *
196 *
197 * GPUVM 64b mode (driver model)
198 *
199 * This mode is related to HSA64 in that the difference really is that
200 * the default aperture is GPUVM (ATC==0) and not ATC space.
201 * We have gfxip7.x hardware that has FLAT_* and S_LOAD support for
202 * SUA GPUVM mode, but does not support HSA32/HSA64.
203 *
204 *
205 * Device Unified Address - DUA
206 *
207 * Device unified address (DUA) is the name of the feature that maps the
208 * Shared(LDS) memory and Private(Scratch) memory into the overall address
209 * space for use by the new FLAT_* vector memory ops. The Shared and
210 * Private memories are mapped as apertures into the address space,
211 * and the hardware detects when a FLAT_* memory request is to be redirected
212 * to the LDS or Scratch memory when it falls into one of these apertures.
213 * Like the SUA apertures, the Shared/Private apertures are 64KB aligned and
214 * the base/limit is “in” the aperture. For both HSA64 and GPUVM SUA modes,
215 * the Shared/Private apertures are always placed in a limited selection of
216 * options in the hole of the 64b address space. For HSA32 mode, the
217 * Shared/Private apertures can be placed anywhere in the 32b space
218 * except at 0.
219 *
220 *
221 * HSA64 Apertures for FLAT_* vector ops
222 *
223 * For HSA64 SUA mode, the Shared and Private apertures are always placed
224 * in the hole w/ a limited selection of possible locations. The requests
225 * that fall in the private aperture are expanded as a function of the
226 * work-item id (tid) and redirected to the location of the
227 * “hidden private memory”. The hidden private can be placed in either GPUVM
228 * or ATC space. The addresses that fall in the shared aperture are
229 * re-directed to the on-chip LDS memory hardware.
230 *
231 *
232 * HSA32 Apertures for FLAT_* vector ops
233 *
234 * In HSA32 mode, the Private and Shared apertures can be placed anywhere
235 * in the 32b space except at 0 (Private or Shared Base at zero disables
236 * the apertures). If the base address of the apertures are non-zero
237 * (ie apertures exists), the size is always 64KB.
238 *
239 *
240 * GPUVM Apertures for FLAT_* vector ops
241 *
242 * In GPUVM mode, the Shared/Private apertures are specified identically
243 * to HSA64 mode where they are always in the hole at a limited selection
244 * of locations.
245 *
246 *
247 * Aperture Definitions for SUA and DUA
248 *
249 * The interpretation of the aperture register definitions for a given
250 * VMID is a function of the “SUA Mode” which is one of HSA64, HSA32, or
251 * GPUVM64 discussed in previous sections. The mode is first decoded, and
252 * then the remaining register decode is a function of the mode.
253 *
254 *
255 * SUA Mode Decode
256 *
257 * For the S_LOAD and FLAT_* shader operations, the SUA mode is decoded from
258 * the COMPUTE_DISPATCH_INITIATOR:DATA_ATC bit and
259 * the SH_MEM_CONFIG:PTR32 bits.
260 *
261 * COMPUTE_DISPATCH_INITIATOR:DATA_ATC SH_MEM_CONFIG:PTR32 Mode
262 *
263 * 1 0 HSA64
264 *
265 * 1 1 HSA32
266 *
267 * 0 X GPUVM64
268 *
269 * In general the hardware will ignore the PTR32 bit and treat
270 * as “0” whenever DATA_ATC = “0”, but sw should set PTR32=0
271 * when DATA_ATC=0.
272 *
273 * The DATA_ATC bit is only set for compute dispatches.
274 * All “Draw” dispatches are hardcoded to GPUVM64 mode
275 * for FLAT_* / S_LOAD operations.
276 */
277
278#define MAKE_GPUVM_APP_BASE(gpu_num) \
279 (((uint64_t)(gpu_num) << 61) + 0x1000000000000L)
280
281#define MAKE_GPUVM_APP_LIMIT(base) \
282 (((uint64_t)(base) & \
283 0xFFFFFF0000000000UL) | 0xFFFFFFFFFFL)
284
285#define MAKE_SCRATCH_APP_BASE(gpu_num) \
286 (((uint64_t)(gpu_num) << 61) + 0x100000000L)
287
288#define MAKE_SCRATCH_APP_LIMIT(base) \
289 (((uint64_t)base & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
290
291#define MAKE_LDS_APP_BASE(gpu_num) \
292 (((uint64_t)(gpu_num) << 61) + 0x0)
293#define MAKE_LDS_APP_LIMIT(base) \
294 (((uint64_t)(base) & 0xFFFFFFFF00000000UL) | 0xFFFFFFFF)
295
296int kfd_init_apertures(struct kfd_process *process)
297{
298 uint8_t id = 0;
299 struct kfd_dev *dev;
300 struct kfd_process_device *pdd;
301
302 mutex_lock(&process->mutex);
303
304 /*Iterating over all devices*/
305 while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
306 id < NUM_OF_SUPPORTED_GPUS) {
307
308 pdd = kfd_get_process_device_data(dev, process, 1);
309
310 /*
311 * For 64 bit process aperture will be statically reserved in
312 * the x86_64 non canonical process address space
313 * amdkfd doesn't currently support apertures for 32 bit process
314 */
315 if (process->is_32bit_user_mode) {
316 pdd->lds_base = pdd->lds_limit = 0;
317 pdd->gpuvm_base = pdd->gpuvm_limit = 0;
318 pdd->scratch_base = pdd->scratch_limit = 0;
319 } else {
320 /*
321 * node id couldn't be 0 - the three MSB bits of
322 * aperture shoudn't be 0
323 */
324 pdd->lds_base = MAKE_LDS_APP_BASE(id + 1);
325
326 pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
327
328 pdd->gpuvm_base = MAKE_GPUVM_APP_BASE(id + 1);
329
330 pdd->gpuvm_limit =
331 MAKE_GPUVM_APP_LIMIT(pdd->gpuvm_base);
332
333 pdd->scratch_base = MAKE_SCRATCH_APP_BASE(id + 1);
334
335 pdd->scratch_limit =
336 MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base);
337 }
338
339 dev_dbg(kfd_device, "node id %u\n", id);
340 dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id);
341 dev_dbg(kfd_device, "lds_base %llX\n", pdd->lds_base);
342 dev_dbg(kfd_device, "lds_limit %llX\n", pdd->lds_limit);
343 dev_dbg(kfd_device, "gpuvm_base %llX\n", pdd->gpuvm_base);
344 dev_dbg(kfd_device, "gpuvm_limit %llX\n", pdd->gpuvm_limit);
345 dev_dbg(kfd_device, "scratch_base %llX\n", pdd->scratch_base);
346 dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit);
347
348 id++;
349 }
350
351 mutex_unlock(&process->mutex);
352
353 return 0;
354}
355
356
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
new file mode 100644
index 000000000000..5b999095a1f7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -0,0 +1,176 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/*
24 * KFD Interrupts.
25 *
26 * AMD GPUs deliver interrupts by pushing an interrupt description onto the
27 * interrupt ring and then sending an interrupt. KGD receives the interrupt
28 * in ISR and sends us a pointer to each new entry on the interrupt ring.
29 *
30 * We generally can't process interrupt-signaled events from ISR, so we call
31 * out to each interrupt client module (currently only the scheduler) to ask if
32 * each interrupt is interesting. If they return true, then it requires further
33 * processing so we copy it to an internal interrupt ring and call each
34 * interrupt client again from a work-queue.
35 *
36 * There's no acknowledgment for the interrupts we use. The hardware simply
37 * queues a new interrupt each time without waiting.
38 *
39 * The fixed-size internal queue means that it's possible for us to lose
40 * interrupts because we have no back-pressure to the hardware.
41 */
42
43#include <linux/slab.h>
44#include <linux/device.h>
45#include "kfd_priv.h"
46
47#define KFD_INTERRUPT_RING_SIZE 256
48
49static void interrupt_wq(struct work_struct *);
50
51int kfd_interrupt_init(struct kfd_dev *kfd)
52{
53 void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
54 kfd->device_info->ih_ring_entry_size,
55 GFP_KERNEL);
56 if (!interrupt_ring)
57 return -ENOMEM;
58
59 kfd->interrupt_ring = interrupt_ring;
60 kfd->interrupt_ring_size =
61 KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
62 atomic_set(&kfd->interrupt_ring_wptr, 0);
63 atomic_set(&kfd->interrupt_ring_rptr, 0);
64
65 spin_lock_init(&kfd->interrupt_lock);
66
67 INIT_WORK(&kfd->interrupt_work, interrupt_wq);
68
69 kfd->interrupts_active = true;
70
71 /*
72 * After this function returns, the interrupt will be enabled. This
73 * barrier ensures that the interrupt running on a different processor
74 * sees all the above writes.
75 */
76 smp_wmb();
77
78 return 0;
79}
80
81void kfd_interrupt_exit(struct kfd_dev *kfd)
82{
83 /*
84 * Stop the interrupt handler from writing to the ring and scheduling
85 * workqueue items. The spinlock ensures that any interrupt running
86 * after we have unlocked sees interrupts_active = false.
87 */
88 unsigned long flags;
89
90 spin_lock_irqsave(&kfd->interrupt_lock, flags);
91 kfd->interrupts_active = false;
92 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
93
94 /*
95 * Flush_scheduled_work ensures that there are no outstanding
96 * work-queue items that will access interrupt_ring. New work items
97 * can't be created because we stopped interrupt handling above.
98 */
99 flush_scheduled_work();
100
101 kfree(kfd->interrupt_ring);
102}
103
104/*
105 * This assumes that it can't be called concurrently with itself
106 * but only with dequeue_ih_ring_entry.
107 */
108bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
109{
110 unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
111 unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
112
113 if ((rptr - wptr) % kfd->interrupt_ring_size ==
114 kfd->device_info->ih_ring_entry_size) {
115 /* This is very bad, the system is likely to hang. */
116 dev_err_ratelimited(kfd_chardev(),
117 "Interrupt ring overflow, dropping interrupt.\n");
118 return false;
119 }
120
121 memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
122 kfd->device_info->ih_ring_entry_size);
123
124 wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
125 kfd->interrupt_ring_size;
126 smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
127 atomic_set(&kfd->interrupt_ring_wptr, wptr);
128
129 return true;
130}
131
132/*
133 * This assumes that it can't be called concurrently with itself
134 * but only with enqueue_ih_ring_entry.
135 */
136static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
137{
138 /*
139 * Assume that wait queues have an implicit barrier, i.e. anything that
140 * happened in the ISR before it queued work is visible.
141 */
142
143 unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
144 unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
145
146 if (rptr == wptr)
147 return false;
148
149 memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
150 kfd->device_info->ih_ring_entry_size);
151
152 rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
153 kfd->interrupt_ring_size;
154
155 /*
156 * Ensure the rptr write update is not visible until
157 * memcpy has finished reading.
158 */
159 smp_mb();
160 atomic_set(&kfd->interrupt_ring_rptr, rptr);
161
162 return true;
163}
164
165static void interrupt_wq(struct work_struct *work)
166{
167 struct kfd_dev *dev = container_of(work, struct kfd_dev,
168 interrupt_work);
169
170 uint32_t ih_ring_entry[DIV_ROUND_UP(
171 dev->device_info->ih_ring_entry_size,
172 sizeof(uint32_t))];
173
174 while (dequeue_ih_ring_entry(dev, ih_ring_entry))
175 ;
176}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
new file mode 100644
index 000000000000..935071410724
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -0,0 +1,353 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/types.h>
25#include <linux/mutex.h>
26#include <linux/slab.h>
27#include <linux/printk.h>
28#include <linux/sched.h>
29#include "kfd_kernel_queue.h"
30#include "kfd_priv.h"
31#include "kfd_device_queue_manager.h"
32#include "kfd_pm4_headers.h"
33#include "kfd_pm4_opcodes.h"
34
35#define PM4_COUNT_ZERO (((1 << 15) - 1) << 16)
36
37static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
38 enum kfd_queue_type type, unsigned int queue_size)
39{
40 struct queue_properties prop;
41 int retval;
42 union PM4_MES_TYPE_3_HEADER nop;
43
44 BUG_ON(!kq || !dev);
45 BUG_ON(type != KFD_QUEUE_TYPE_DIQ && type != KFD_QUEUE_TYPE_HIQ);
46
47 pr_debug("kfd: In func %s initializing queue type %d size %d\n",
48 __func__, KFD_QUEUE_TYPE_HIQ, queue_size);
49
50 nop.opcode = IT_NOP;
51 nop.type = PM4_TYPE_3;
52 nop.u32all |= PM4_COUNT_ZERO;
53
54 kq->dev = dev;
55 kq->nop_packet = nop.u32all;
56 switch (type) {
57 case KFD_QUEUE_TYPE_DIQ:
58 case KFD_QUEUE_TYPE_HIQ:
59 kq->mqd = dev->dqm->get_mqd_manager(dev->dqm,
60 KFD_MQD_TYPE_CIK_HIQ);
61 break;
62 default:
63 BUG();
64 break;
65 }
66
67 if (kq->mqd == NULL)
68 return false;
69
70 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
71
72 if (prop.doorbell_ptr == NULL)
73 goto err_get_kernel_doorbell;
74
75 retval = kfd2kgd->allocate_mem(dev->kgd,
76 queue_size,
77 PAGE_SIZE,
78 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
79 (struct kgd_mem **) &kq->pq);
80
81 if (retval != 0)
82 goto err_pq_allocate_vidmem;
83
84 kq->pq_kernel_addr = kq->pq->cpu_ptr;
85 kq->pq_gpu_addr = kq->pq->gpu_addr;
86
87 retval = kfd2kgd->allocate_mem(dev->kgd,
88 sizeof(*kq->rptr_kernel),
89 32,
90 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
91 (struct kgd_mem **) &kq->rptr_mem);
92
93 if (retval != 0)
94 goto err_rptr_allocate_vidmem;
95
96 kq->rptr_kernel = kq->rptr_mem->cpu_ptr;
97 kq->rptr_gpu_addr = kq->rptr_mem->gpu_addr;
98
99 retval = kfd2kgd->allocate_mem(dev->kgd,
100 sizeof(*kq->wptr_kernel),
101 32,
102 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
103 (struct kgd_mem **) &kq->wptr_mem);
104
105 if (retval != 0)
106 goto err_wptr_allocate_vidmem;
107
108 kq->wptr_kernel = kq->wptr_mem->cpu_ptr;
109 kq->wptr_gpu_addr = kq->wptr_mem->gpu_addr;
110
111 memset(kq->pq_kernel_addr, 0, queue_size);
112 memset(kq->rptr_kernel, 0, sizeof(*kq->rptr_kernel));
113 memset(kq->wptr_kernel, 0, sizeof(*kq->wptr_kernel));
114
115 prop.queue_size = queue_size;
116 prop.is_interop = false;
117 prop.priority = 1;
118 prop.queue_percent = 100;
119 prop.type = type;
120 prop.vmid = 0;
121 prop.queue_address = kq->pq_gpu_addr;
122 prop.read_ptr = (uint32_t *) kq->rptr_gpu_addr;
123 prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
124
125 if (init_queue(&kq->queue, prop) != 0)
126 goto err_init_queue;
127
128 kq->queue->device = dev;
129 kq->queue->process = kfd_get_process(current);
130
131 retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
132 &kq->queue->mqd_mem_obj,
133 &kq->queue->gart_mqd_addr,
134 &kq->queue->properties);
135 if (retval != 0)
136 goto err_init_mqd;
137
138 /* assign HIQ to HQD */
139 if (type == KFD_QUEUE_TYPE_HIQ) {
140 pr_debug("assigning hiq to hqd\n");
141 kq->queue->pipe = KFD_CIK_HIQ_PIPE;
142 kq->queue->queue = KFD_CIK_HIQ_QUEUE;
143 kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
144 kq->queue->queue, NULL);
145 } else {
146 /* allocate fence for DIQ */
147
148 retval = kfd2kgd->allocate_mem(dev->kgd,
149 sizeof(uint32_t),
150 32,
151 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
152 (struct kgd_mem **) &kq->fence_mem_obj);
153
154 if (retval != 0)
155 goto err_alloc_fence;
156
157 kq->fence_kernel_address = kq->fence_mem_obj->cpu_ptr;
158 kq->fence_gpu_addr = kq->fence_mem_obj->gpu_addr;
159 }
160
161 print_queue(kq->queue);
162
163 return true;
164err_alloc_fence:
165err_init_mqd:
166 uninit_queue(kq->queue);
167err_init_queue:
168 kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->wptr_mem);
169err_wptr_allocate_vidmem:
170 kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->rptr_mem);
171err_rptr_allocate_vidmem:
172 kfd2kgd->free_mem(dev->kgd, (struct kgd_mem *) kq->pq);
173err_pq_allocate_vidmem:
174 pr_err("kfd: error init pq\n");
175 kfd_release_kernel_doorbell(dev, prop.doorbell_ptr);
176err_get_kernel_doorbell:
177 pr_err("kfd: error init doorbell");
178 return false;
179
180}
181
182static void uninitialize(struct kernel_queue *kq)
183{
184 BUG_ON(!kq);
185
186 if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
187 kq->mqd->destroy_mqd(kq->mqd,
188 NULL,
189 false,
190 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
191 kq->queue->pipe,
192 kq->queue->queue);
193
194 kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->rptr_mem);
195 kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->wptr_mem);
196 kfd2kgd->free_mem(kq->dev->kgd, (struct kgd_mem *) kq->pq);
197 kfd_release_kernel_doorbell(kq->dev,
198 kq->queue->properties.doorbell_ptr);
199 uninit_queue(kq->queue);
200}
201
202static int acquire_packet_buffer(struct kernel_queue *kq,
203 size_t packet_size_in_dwords, unsigned int **buffer_ptr)
204{
205 size_t available_size;
206 size_t queue_size_dwords;
207 uint32_t wptr, rptr;
208 unsigned int *queue_address;
209
210 BUG_ON(!kq || !buffer_ptr);
211
212 rptr = *kq->rptr_kernel;
213 wptr = *kq->wptr_kernel;
214 queue_address = (unsigned int *)kq->pq_kernel_addr;
215 queue_size_dwords = kq->queue->properties.queue_size / sizeof(uint32_t);
216
217 pr_debug("kfd: In func %s\nrptr: %d\nwptr: %d\nqueue_address 0x%p\n",
218 __func__, rptr, wptr, queue_address);
219
220 available_size = (rptr - 1 - wptr + queue_size_dwords) %
221 queue_size_dwords;
222
223 if (packet_size_in_dwords >= queue_size_dwords ||
224 packet_size_in_dwords >= available_size) {
225 /*
226 * make sure calling functions know
227 * acquire_packet_buffer() failed
228 */
229 *buffer_ptr = NULL;
230 return -ENOMEM;
231 }
232
233 if (wptr + packet_size_in_dwords >= queue_size_dwords) {
234 while (wptr > 0) {
235 queue_address[wptr] = kq->nop_packet;
236 wptr = (wptr + 1) % queue_size_dwords;
237 }
238 }
239
240 *buffer_ptr = &queue_address[wptr];
241 kq->pending_wptr = wptr + packet_size_in_dwords;
242
243 return 0;
244}
245
246static void submit_packet(struct kernel_queue *kq)
247{
248#ifdef DEBUG
249 int i;
250#endif
251
252 BUG_ON(!kq);
253
254#ifdef DEBUG
255 for (i = *kq->wptr_kernel; i < kq->pending_wptr; i++) {
256 pr_debug("0x%2X ", kq->pq_kernel_addr[i]);
257 if (i % 15 == 0)
258 pr_debug("\n");
259 }
260 pr_debug("\n");
261#endif
262
263 *kq->wptr_kernel = kq->pending_wptr;
264 write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
265 kq->pending_wptr);
266}
267
268static int sync_with_hw(struct kernel_queue *kq, unsigned long timeout_ms)
269{
270 unsigned long org_timeout_ms;
271
272 BUG_ON(!kq);
273
274 org_timeout_ms = timeout_ms;
275 timeout_ms += jiffies * 1000 / HZ;
276 while (*kq->wptr_kernel != *kq->rptr_kernel) {
277 if (time_after(jiffies * 1000 / HZ, timeout_ms)) {
278 pr_err("kfd: kernel_queue %s timeout expired %lu\n",
279 __func__, org_timeout_ms);
280 pr_err("kfd: wptr: %d rptr: %d\n",
281 *kq->wptr_kernel, *kq->rptr_kernel);
282 return -ETIME;
283 }
284 schedule();
285 }
286
287 return 0;
288}
289
290static void rollback_packet(struct kernel_queue *kq)
291{
292 BUG_ON(!kq);
293 kq->pending_wptr = *kq->queue->properties.write_ptr;
294}
295
296struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
297 enum kfd_queue_type type)
298{
299 struct kernel_queue *kq;
300
301 BUG_ON(!dev);
302
303 kq = kzalloc(sizeof(struct kernel_queue), GFP_KERNEL);
304 if (!kq)
305 return NULL;
306
307 kq->initialize = initialize;
308 kq->uninitialize = uninitialize;
309 kq->acquire_packet_buffer = acquire_packet_buffer;
310 kq->submit_packet = submit_packet;
311 kq->sync_with_hw = sync_with_hw;
312 kq->rollback_packet = rollback_packet;
313
314 if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
315 pr_err("kfd: failed to init kernel queue\n");
316 kfree(kq);
317 return NULL;
318 }
319 return kq;
320}
321
322void kernel_queue_uninit(struct kernel_queue *kq)
323{
324 BUG_ON(!kq);
325
326 kq->uninitialize(kq);
327 kfree(kq);
328}
329
330static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
331{
332 struct kernel_queue *kq;
333 uint32_t *buffer, i;
334 int retval;
335
336 BUG_ON(!dev);
337
338 pr_debug("kfd: starting kernel queue test\n");
339
340 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
341 BUG_ON(!kq);
342
343 retval = kq->acquire_packet_buffer(kq, 5, &buffer);
344 BUG_ON(retval != 0);
345 for (i = 0; i < 5; i++)
346 buffer[i] = kq->nop_packet;
347 kq->submit_packet(kq);
348 kq->sync_with_hw(kq, 1000);
349
350 pr_debug("kfd: ending kernel queue test\n");
351}
352
353
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
new file mode 100644
index 000000000000..dcd2bdb68d44
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef KFD_KERNEL_QUEUE_H_
25#define KFD_KERNEL_QUEUE_H_
26
27#include <linux/list.h>
28#include <linux/types.h>
29#include "kfd_priv.h"
30
31struct kernel_queue {
32 /* interface */
33 bool (*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
34 enum kfd_queue_type type, unsigned int queue_size);
35 void (*uninitialize)(struct kernel_queue *kq);
36 int (*acquire_packet_buffer)(struct kernel_queue *kq,
37 size_t packet_size_in_dwords,
38 unsigned int **buffer_ptr);
39
40 void (*submit_packet)(struct kernel_queue *kq);
41 int (*sync_with_hw)(struct kernel_queue *kq,
42 unsigned long timeout_ms);
43 void (*rollback_packet)(struct kernel_queue *kq);
44
45 /* data */
46 struct kfd_dev *dev;
47 struct mqd_manager *mqd;
48 struct queue *queue;
49 uint32_t pending_wptr;
50 unsigned int nop_packet;
51
52 struct kfd_mem_obj *rptr_mem;
53 uint32_t *rptr_kernel;
54 uint64_t rptr_gpu_addr;
55 struct kfd_mem_obj *wptr_mem;
56 uint32_t *wptr_kernel;
57 uint64_t wptr_gpu_addr;
58 struct kfd_mem_obj *pq;
59 uint64_t pq_gpu_addr;
60 uint32_t *pq_kernel_addr;
61
62 struct kfd_mem_obj *fence_mem_obj;
63 uint64_t fence_gpu_addr;
64 void *fence_kernel_address;
65
66 struct list_head list;
67};
68
69#endif /* KFD_KERNEL_QUEUE_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
new file mode 100644
index 000000000000..95d5af138e6e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -0,0 +1,159 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/module.h>
24#include <linux/sched.h>
25#include <linux/moduleparam.h>
26#include <linux/device.h>
27#include "kfd_priv.h"
28
29#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
30
31#define KFD_DRIVER_DESC "Standalone HSA driver for AMD's GPUs"
32#define KFD_DRIVER_DATE "20141113"
33#define KFD_DRIVER_MAJOR 0
34#define KFD_DRIVER_MINOR 7
35#define KFD_DRIVER_PATCHLEVEL 0
36
37const struct kfd2kgd_calls *kfd2kgd;
38static const struct kgd2kfd_calls kgd2kfd = {
39 .exit = kgd2kfd_exit,
40 .probe = kgd2kfd_probe,
41 .device_init = kgd2kfd_device_init,
42 .device_exit = kgd2kfd_device_exit,
43 .interrupt = kgd2kfd_interrupt,
44 .suspend = kgd2kfd_suspend,
45 .resume = kgd2kfd_resume,
46};
47
48int sched_policy = KFD_SCHED_POLICY_HWS;
49module_param(sched_policy, int, 0444);
50MODULE_PARM_DESC(sched_policy,
51 "Kernel cmdline parameter that defines the amdkfd scheduling policy");
52
53int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
54module_param(max_num_of_processes, int, 0444);
55MODULE_PARM_DESC(max_num_of_processes,
56 "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
57
58int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
59module_param(max_num_of_queues_per_process, int, 0444);
60MODULE_PARM_DESC(max_num_of_queues_per_process,
61 "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
62
63bool kgd2kfd_init(unsigned interface_version,
64 const struct kfd2kgd_calls *f2g,
65 const struct kgd2kfd_calls **g2f)
66{
67 /*
68 * Only one interface version is supported,
69 * no kfd/kgd version skew allowed.
70 */
71 if (interface_version != KFD_INTERFACE_VERSION)
72 return false;
73
74 /* Protection against multiple amd kgd loads */
75 if (kfd2kgd)
76 return true;
77
78 kfd2kgd = f2g;
79 *g2f = &kgd2kfd;
80
81 return true;
82}
83EXPORT_SYMBOL(kgd2kfd_init);
84
85void kgd2kfd_exit(void)
86{
87}
88
89static int __init kfd_module_init(void)
90{
91 int err;
92
93 kfd2kgd = NULL;
94
95 /* Verify module parameters */
96 if ((sched_policy < KFD_SCHED_POLICY_HWS) ||
97 (sched_policy > KFD_SCHED_POLICY_NO_HWS)) {
98 pr_err("kfd: sched_policy has invalid value\n");
99 return -1;
100 }
101
102 /* Verify module parameters */
103 if ((max_num_of_processes < 0) ||
104 (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
105 pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
106 return -1;
107 }
108
109 if ((max_num_of_queues_per_process < 0) ||
110 (max_num_of_queues_per_process >
111 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
112 pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
113 return -1;
114 }
115
116 err = kfd_pasid_init();
117 if (err < 0)
118 goto err_pasid;
119
120 err = kfd_chardev_init();
121 if (err < 0)
122 goto err_ioctl;
123
124 err = kfd_topology_init();
125 if (err < 0)
126 goto err_topology;
127
128 kfd_process_create_wq();
129
130 dev_info(kfd_device, "Initialized module\n");
131
132 return 0;
133
134err_topology:
135 kfd_chardev_exit();
136err_ioctl:
137 kfd_pasid_exit();
138err_pasid:
139 return err;
140}
141
142static void __exit kfd_module_exit(void)
143{
144 kfd_process_destroy_wq();
145 kfd_topology_shutdown();
146 kfd_chardev_exit();
147 kfd_pasid_exit();
148 dev_info(kfd_device, "Removed module\n");
149}
150
151module_init(kfd_module_init);
152module_exit(kfd_module_exit);
153
154MODULE_AUTHOR(KFD_DRIVER_AUTHOR);
155MODULE_DESCRIPTION(KFD_DRIVER_DESC);
156MODULE_LICENSE("GPL and additional rights");
157MODULE_VERSION(__stringify(KFD_DRIVER_MAJOR) "."
158 __stringify(KFD_DRIVER_MINOR) "."
159 __stringify(KFD_DRIVER_PATCHLEVEL));
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
new file mode 100644
index 000000000000..adc31474e786
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -0,0 +1,346 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/printk.h>
25#include <linux/slab.h>
26#include "kfd_priv.h"
27#include "kfd_mqd_manager.h"
28#include "cik_regs.h"
29#include "../../radeon/cik_reg.h"
30
31inline void busy_wait(unsigned long ms)
32{
33 while (time_before(jiffies, ms))
34 cpu_relax();
35}
36
37static inline struct cik_mqd *get_mqd(void *mqd)
38{
39 return (struct cik_mqd *)mqd;
40}
41
42static int init_mqd(struct mqd_manager *mm, void **mqd,
43 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
44 struct queue_properties *q)
45{
46 uint64_t addr;
47 struct cik_mqd *m;
48 int retval;
49
50 BUG_ON(!mm || !q || !mqd);
51
52 pr_debug("kfd: In func %s\n", __func__);
53
54 retval = kfd2kgd->allocate_mem(mm->dev->kgd,
55 sizeof(struct cik_mqd),
56 256,
57 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
58 (struct kgd_mem **) mqd_mem_obj);
59
60 if (retval != 0)
61 return -ENOMEM;
62
63 m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
64 addr = (*mqd_mem_obj)->gpu_addr;
65
66 memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
67
68 m->header = 0xC0310800;
69 m->compute_pipelinestat_enable = 1;
70 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
71 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
72 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
73 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
74
75 /*
76 * Make sure to use the last queue state saved on mqd when the cp
77 * reassigns the queue, so when queue is switched on/off (e.g over
78 * subscription or quantum timeout) the context will be consistent
79 */
80 m->cp_hqd_persistent_state =
81 DEFAULT_CP_HQD_PERSISTENT_STATE | PRELOAD_REQ;
82
83 m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
84 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
85 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
86
87 m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE | IB_ATC_EN;
88 /* Although WinKFD writes this, I suspect it should not be necessary */
89 m->cp_hqd_ib_control = IB_ATC_EN | DEFAULT_MIN_IB_AVAIL_SIZE;
90
91 m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
92 QUANTUM_DURATION(10);
93
94 /*
95 * Pipe Priority
96 * Identifies the pipe relative priority when this queue is connected
97 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
98 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
99 * 0 = CS_LOW (typically below GFX)
100 * 1 = CS_MEDIUM (typically between HP3D and GFX
101 * 2 = CS_HIGH (typically above HP3D)
102 */
103 m->cp_hqd_pipe_priority = 1;
104 m->cp_hqd_queue_priority = 15;
105
106 *mqd = m;
107 if (gart_addr != NULL)
108 *gart_addr = addr;
109 retval = mm->update_mqd(mm, m, q);
110
111 return retval;
112}
113
114static void uninit_mqd(struct mqd_manager *mm, void *mqd,
115 struct kfd_mem_obj *mqd_mem_obj)
116{
117 BUG_ON(!mm || !mqd);
118 kfd2kgd->free_mem(mm->dev->kgd, (struct kgd_mem *) mqd_mem_obj);
119}
120
121static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id,
122 uint32_t queue_id, uint32_t __user *wptr)
123{
124 return kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, wptr);
125
126}
127
128static int update_mqd(struct mqd_manager *mm, void *mqd,
129 struct queue_properties *q)
130{
131 struct cik_mqd *m;
132
133 BUG_ON(!mm || !q || !mqd);
134
135 pr_debug("kfd: In func %s\n", __func__);
136
137 m = get_mqd(mqd);
138 m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
139 DEFAULT_MIN_AVAIL_SIZE | PQ_ATC_EN;
140
141 /*
142 * Calculating queue size which is log base 2 of actual queue size -1
143 * dwords and another -1 for ffs
144 */
145 m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
146 - 1 - 1;
147 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
148 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
149 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
150 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
151 m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
152 DOORBELL_OFFSET(q->doorbell_off);
153
154 m->cp_hqd_vmid = q->vmid;
155
156 if (q->format == KFD_QUEUE_FORMAT_AQL) {
157 m->cp_hqd_iq_rptr = AQL_ENABLE;
158 m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
159 }
160
161 m->cp_hqd_active = 0;
162 q->is_active = false;
163 if (q->queue_size > 0 &&
164 q->queue_address != 0 &&
165 q->queue_percent > 0) {
166 m->cp_hqd_active = 1;
167 q->is_active = true;
168 }
169
170 return 0;
171}
172
173static int destroy_mqd(struct mqd_manager *mm, void *mqd,
174 enum kfd_preempt_type type,
175 unsigned int timeout, uint32_t pipe_id,
176 uint32_t queue_id)
177{
178 return kfd2kgd->hqd_destroy(mm->dev->kgd, type, timeout,
179 pipe_id, queue_id);
180}
181
182static bool is_occupied(struct mqd_manager *mm, void *mqd,
183 uint64_t queue_address, uint32_t pipe_id,
184 uint32_t queue_id)
185{
186
187 return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
188 pipe_id, queue_id);
189
190}
191
192/*
193 * HIQ MQD Implementation, concrete implementation for HIQ MQD implementation.
194 * The HIQ queue in Kaveri is using the same MQD structure as all the user mode
195 * queues but with different initial values.
196 */
197
198static int init_mqd_hiq(struct mqd_manager *mm, void **mqd,
199 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
200 struct queue_properties *q)
201{
202 uint64_t addr;
203 struct cik_mqd *m;
204 int retval;
205
206 BUG_ON(!mm || !q || !mqd || !mqd_mem_obj);
207
208 pr_debug("kfd: In func %s\n", __func__);
209
210 retval = kfd2kgd->allocate_mem(mm->dev->kgd,
211 sizeof(struct cik_mqd),
212 256,
213 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
214 (struct kgd_mem **) mqd_mem_obj);
215
216 if (retval != 0)
217 return -ENOMEM;
218
219 m = (struct cik_mqd *) (*mqd_mem_obj)->cpu_ptr;
220 addr = (*mqd_mem_obj)->gpu_addr;
221
222 memset(m, 0, ALIGN(sizeof(struct cik_mqd), 256));
223
224 m->header = 0xC0310800;
225 m->compute_pipelinestat_enable = 1;
226 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
227 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
228 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
229 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
230
231 m->cp_hqd_persistent_state = DEFAULT_CP_HQD_PERSISTENT_STATE |
232 PRELOAD_REQ;
233 m->cp_hqd_quantum = QUANTUM_EN | QUANTUM_SCALE_1MS |
234 QUANTUM_DURATION(10);
235
236 m->cp_mqd_control = MQD_CONTROL_PRIV_STATE_EN;
237 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
238 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
239
240 m->cp_hqd_ib_control = DEFAULT_MIN_IB_AVAIL_SIZE;
241
242 /*
243 * Pipe Priority
244 * Identifies the pipe relative priority when this queue is connected
245 * to the pipeline. The pipe priority is against the GFX pipe and HP3D.
246 * In KFD we are using a fixed pipe priority set to CS_MEDIUM.
247 * 0 = CS_LOW (typically below GFX)
248 * 1 = CS_MEDIUM (typically between HP3D and GFX
249 * 2 = CS_HIGH (typically above HP3D)
250 */
251 m->cp_hqd_pipe_priority = 1;
252 m->cp_hqd_queue_priority = 15;
253
254 *mqd = m;
255 if (gart_addr)
256 *gart_addr = addr;
257 retval = mm->update_mqd(mm, m, q);
258
259 return retval;
260}
261
262static int update_mqd_hiq(struct mqd_manager *mm, void *mqd,
263 struct queue_properties *q)
264{
265 struct cik_mqd *m;
266
267 BUG_ON(!mm || !q || !mqd);
268
269 pr_debug("kfd: In func %s\n", __func__);
270
271 m = get_mqd(mqd);
272 m->cp_hqd_pq_control = DEFAULT_RPTR_BLOCK_SIZE |
273 DEFAULT_MIN_AVAIL_SIZE |
274 PRIV_STATE |
275 KMD_QUEUE;
276
277 /*
278 * Calculating queue size which is log base 2 of actual queue
279 * size -1 dwords
280 */
281 m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int))
282 - 1 - 1;
283 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
284 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
285 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
286 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
287 m->cp_hqd_pq_doorbell_control = DOORBELL_EN |
288 DOORBELL_OFFSET(q->doorbell_off);
289
290 m->cp_hqd_vmid = q->vmid;
291
292 m->cp_hqd_active = 0;
293 q->is_active = false;
294 if (q->queue_size > 0 &&
295 q->queue_address != 0 &&
296 q->queue_percent > 0) {
297 m->cp_hqd_active = 1;
298 q->is_active = true;
299 }
300
301 return 0;
302}
303
304struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
305 struct kfd_dev *dev)
306{
307 struct mqd_manager *mqd;
308
309 BUG_ON(!dev);
310 BUG_ON(type >= KFD_MQD_TYPE_MAX);
311
312 pr_debug("kfd: In func %s\n", __func__);
313
314 mqd = kzalloc(sizeof(struct mqd_manager), GFP_KERNEL);
315 if (!mqd)
316 return NULL;
317
318 mqd->dev = dev;
319
320 switch (type) {
321 case KFD_MQD_TYPE_CIK_CP:
322 case KFD_MQD_TYPE_CIK_COMPUTE:
323 mqd->init_mqd = init_mqd;
324 mqd->uninit_mqd = uninit_mqd;
325 mqd->load_mqd = load_mqd;
326 mqd->update_mqd = update_mqd;
327 mqd->destroy_mqd = destroy_mqd;
328 mqd->is_occupied = is_occupied;
329 break;
330 case KFD_MQD_TYPE_CIK_HIQ:
331 mqd->init_mqd = init_mqd_hiq;
332 mqd->uninit_mqd = uninit_mqd;
333 mqd->load_mqd = load_mqd;
334 mqd->update_mqd = update_mqd_hiq;
335 mqd->destroy_mqd = destroy_mqd;
336 mqd->is_occupied = is_occupied;
337 break;
338 default:
339 kfree(mqd);
340 return NULL;
341 }
342
343 return mqd;
344}
345
346/* SDMA queues should be implemented here when the cp will supports them */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
new file mode 100644
index 000000000000..213a71e0b6c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef KFD_MQD_MANAGER_H_
25#define KFD_MQD_MANAGER_H_
26
27#include "kfd_priv.h"
28
29/**
30 * struct mqd_manager
31 *
32 * @init_mqd: Allocates the mqd buffer on local gpu memory and initialize it.
33 *
34 * @load_mqd: Loads the mqd to a concrete hqd slot. Used only for no cp
35 * scheduling mode.
36 *
37 * @update_mqd: Handles a update call for the MQD
38 *
39 * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue.
40 * Used only for no cp scheduling.
41 *
42 * @uninit_mqd: Releases the mqd buffer from local gpu memory.
43 *
44 * @is_occupied: Checks if the relevant HQD slot is occupied.
45 *
46 * @mqd_mutex: Mqd manager mutex.
47 *
48 * @dev: The kfd device structure coupled with this module.
49 *
50 * MQD stands for Memory Queue Descriptor which represents the current queue
51 * state in the memory and initiate the HQD (Hardware Queue Descriptor) state.
52 * This structure is actually a base class for the different types of MQDs
53 * structures for the variant ASICs that should be supported in the future.
54 * This base class is also contains all the MQD specific operations.
55 * Another important thing to mention is that each queue has a MQD that keeps
56 * his state (or context) after each preemption or reassignment.
57 * Basically there are a instances of the mqd manager class per MQD type per
58 * ASIC. Currently the kfd driver supports only Kaveri so there are instances
59 * per KFD_MQD_TYPE for each device.
60 *
61 */
62
63struct mqd_manager {
64 int (*init_mqd)(struct mqd_manager *mm, void **mqd,
65 struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
66 struct queue_properties *q);
67
68 int (*load_mqd)(struct mqd_manager *mm, void *mqd,
69 uint32_t pipe_id, uint32_t queue_id,
70 uint32_t __user *wptr);
71
72 int (*update_mqd)(struct mqd_manager *mm, void *mqd,
73 struct queue_properties *q);
74
75 int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
76 enum kfd_preempt_type type,
77 unsigned int timeout, uint32_t pipe_id,
78 uint32_t queue_id);
79
80 void (*uninit_mqd)(struct mqd_manager *mm, void *mqd,
81 struct kfd_mem_obj *mqd_mem_obj);
82
83 bool (*is_occupied)(struct mqd_manager *mm, void *mqd,
84 uint64_t queue_address, uint32_t pipe_id,
85 uint32_t queue_id);
86
87 struct mutex mqd_mutex;
88 struct kfd_dev *dev;
89};
90
91#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
new file mode 100644
index 000000000000..5ce9233d2004
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -0,0 +1,565 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/mutex.h>
26#include "kfd_device_queue_manager.h"
27#include "kfd_kernel_queue.h"
28#include "kfd_priv.h"
29#include "kfd_pm4_headers.h"
30#include "kfd_pm4_opcodes.h"
31
32static inline void inc_wptr(unsigned int *wptr, unsigned int increment_bytes,
33 unsigned int buffer_size_bytes)
34{
35 unsigned int temp = *wptr + increment_bytes / sizeof(uint32_t);
36
37 BUG_ON((temp * sizeof(uint32_t)) > buffer_size_bytes);
38 *wptr = temp;
39}
40
41static unsigned int build_pm4_header(unsigned int opcode, size_t packet_size)
42{
43 union PM4_MES_TYPE_3_HEADER header;
44
45 header.u32all = 0;
46 header.opcode = opcode;
47 header.count = packet_size/sizeof(uint32_t) - 2;
48 header.type = PM4_TYPE_3;
49
50 return header.u32all;
51}
52
53static void pm_calc_rlib_size(struct packet_manager *pm,
54 unsigned int *rlib_size,
55 bool *over_subscription)
56{
57 unsigned int process_count, queue_count;
58
59 BUG_ON(!pm || !rlib_size || !over_subscription);
60
61 process_count = pm->dqm->processes_count;
62 queue_count = pm->dqm->queue_count;
63
64 /* check if there is over subscription*/
65 *over_subscription = false;
66 if ((process_count > 1) ||
67 queue_count > PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE) {
68 *over_subscription = true;
69 pr_debug("kfd: over subscribed runlist\n");
70 }
71
72 /* calculate run list ib allocation size */
73 *rlib_size = process_count * sizeof(struct pm4_map_process) +
74 queue_count * sizeof(struct pm4_map_queues);
75
76 /*
77 * Increase the allocation size in case we need a chained run list
78 * when over subscription
79 */
80 if (*over_subscription)
81 *rlib_size += sizeof(struct pm4_runlist);
82
83 pr_debug("kfd: runlist ib size %d\n", *rlib_size);
84}
85
86static int pm_allocate_runlist_ib(struct packet_manager *pm,
87 unsigned int **rl_buffer,
88 uint64_t *rl_gpu_buffer,
89 unsigned int *rl_buffer_size,
90 bool *is_over_subscription)
91{
92 int retval;
93
94 BUG_ON(!pm);
95 BUG_ON(pm->allocated == true);
96 BUG_ON(is_over_subscription == NULL);
97
98 pm_calc_rlib_size(pm, rl_buffer_size, is_over_subscription);
99
100 retval = kfd2kgd->allocate_mem(pm->dqm->dev->kgd,
101 *rl_buffer_size,
102 PAGE_SIZE,
103 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
104 (struct kgd_mem **) &pm->ib_buffer_obj);
105
106 if (retval != 0) {
107 pr_err("kfd: failed to allocate runlist IB\n");
108 return retval;
109 }
110
111 *(void **)rl_buffer = pm->ib_buffer_obj->cpu_ptr;
112 *rl_gpu_buffer = pm->ib_buffer_obj->gpu_addr;
113
114 memset(*rl_buffer, 0, *rl_buffer_size);
115 pm->allocated = true;
116 return retval;
117}
118
119static int pm_create_runlist(struct packet_manager *pm, uint32_t *buffer,
120 uint64_t ib, size_t ib_size_in_dwords, bool chain)
121{
122 struct pm4_runlist *packet;
123
124 BUG_ON(!pm || !buffer || !ib);
125
126 packet = (struct pm4_runlist *)buffer;
127
128 memset(buffer, 0, sizeof(struct pm4_runlist));
129 packet->header.u32all = build_pm4_header(IT_RUN_LIST,
130 sizeof(struct pm4_runlist));
131
132 packet->bitfields4.ib_size = ib_size_in_dwords;
133 packet->bitfields4.chain = chain ? 1 : 0;
134 packet->bitfields4.offload_polling = 0;
135 packet->bitfields4.valid = 1;
136 packet->ordinal2 = lower_32_bits(ib);
137 packet->bitfields3.ib_base_hi = upper_32_bits(ib);
138
139 return 0;
140}
141
142static int pm_create_map_process(struct packet_manager *pm, uint32_t *buffer,
143 struct qcm_process_device *qpd)
144{
145 struct pm4_map_process *packet;
146 struct queue *cur;
147 uint32_t num_queues;
148
149 BUG_ON(!pm || !buffer || !qpd);
150
151 packet = (struct pm4_map_process *)buffer;
152
153 pr_debug("kfd: In func %s\n", __func__);
154
155 memset(buffer, 0, sizeof(struct pm4_map_process));
156
157 packet->header.u32all = build_pm4_header(IT_MAP_PROCESS,
158 sizeof(struct pm4_map_process));
159 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
160 packet->bitfields2.process_quantum = 1;
161 packet->bitfields2.pasid = qpd->pqm->process->pasid;
162 packet->bitfields3.page_table_base = qpd->page_table_base;
163 packet->bitfields10.gds_size = qpd->gds_size;
164 packet->bitfields10.num_gws = qpd->num_gws;
165 packet->bitfields10.num_oac = qpd->num_oac;
166 num_queues = 0;
167 list_for_each_entry(cur, &qpd->queues_list, list)
168 num_queues++;
169 packet->bitfields10.num_queues = num_queues;
170
171 packet->sh_mem_config = qpd->sh_mem_config;
172 packet->sh_mem_bases = qpd->sh_mem_bases;
173 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
174 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
175
176 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
177 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
178
179 return 0;
180}
181
182static int pm_create_map_queue(struct packet_manager *pm, uint32_t *buffer,
183 struct queue *q)
184{
185 struct pm4_map_queues *packet;
186
187 BUG_ON(!pm || !buffer || !q);
188
189 pr_debug("kfd: In func %s\n", __func__);
190
191 packet = (struct pm4_map_queues *)buffer;
192 memset(buffer, 0, sizeof(struct pm4_map_queues));
193
194 packet->header.u32all = build_pm4_header(IT_MAP_QUEUES,
195 sizeof(struct pm4_map_queues));
196 packet->bitfields2.alloc_format =
197 alloc_format__mes_map_queues__one_per_pipe;
198 packet->bitfields2.num_queues = 1;
199 packet->bitfields2.queue_sel =
200 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots;
201
202 packet->bitfields2.vidmem = (q->properties.is_interop) ?
203 vidmem__mes_map_queues__uses_video_memory :
204 vidmem__mes_map_queues__uses_no_video_memory;
205
206 switch (q->properties.type) {
207 case KFD_QUEUE_TYPE_COMPUTE:
208 case KFD_QUEUE_TYPE_DIQ:
209 packet->bitfields2.engine_sel =
210 engine_sel__mes_map_queues__compute;
211 break;
212 case KFD_QUEUE_TYPE_SDMA:
213 packet->bitfields2.engine_sel =
214 engine_sel__mes_map_queues__sdma0;
215 break;
216 default:
217 BUG();
218 break;
219 }
220
221 packet->mes_map_queues_ordinals[0].bitfields3.doorbell_offset =
222 q->properties.doorbell_off;
223
224 packet->mes_map_queues_ordinals[0].mqd_addr_lo =
225 lower_32_bits(q->gart_mqd_addr);
226
227 packet->mes_map_queues_ordinals[0].mqd_addr_hi =
228 upper_32_bits(q->gart_mqd_addr);
229
230 packet->mes_map_queues_ordinals[0].wptr_addr_lo =
231 lower_32_bits((uint64_t)q->properties.write_ptr);
232
233 packet->mes_map_queues_ordinals[0].wptr_addr_hi =
234 upper_32_bits((uint64_t)q->properties.write_ptr);
235
236 return 0;
237}
238
239static int pm_create_runlist_ib(struct packet_manager *pm,
240 struct list_head *queues,
241 uint64_t *rl_gpu_addr,
242 size_t *rl_size_bytes)
243{
244 unsigned int alloc_size_bytes;
245 unsigned int *rl_buffer, rl_wptr, i;
246 int retval, proccesses_mapped;
247 struct device_process_node *cur;
248 struct qcm_process_device *qpd;
249 struct queue *q;
250 struct kernel_queue *kq;
251 bool is_over_subscription;
252
253 BUG_ON(!pm || !queues || !rl_size_bytes || !rl_gpu_addr);
254
255 rl_wptr = retval = proccesses_mapped = 0;
256
257 retval = pm_allocate_runlist_ib(pm, &rl_buffer, rl_gpu_addr,
258 &alloc_size_bytes, &is_over_subscription);
259 if (retval != 0)
260 return retval;
261
262 *rl_size_bytes = alloc_size_bytes;
263
264 pr_debug("kfd: In func %s\n", __func__);
265 pr_debug("kfd: building runlist ib process count: %d queues count %d\n",
266 pm->dqm->processes_count, pm->dqm->queue_count);
267
268 /* build the run list ib packet */
269 list_for_each_entry(cur, queues, list) {
270 qpd = cur->qpd;
271 /* build map process packet */
272 if (proccesses_mapped >= pm->dqm->processes_count) {
273 pr_debug("kfd: not enough space left in runlist IB\n");
274 pm_release_ib(pm);
275 return -ENOMEM;
276 }
277 retval = pm_create_map_process(pm, &rl_buffer[rl_wptr], qpd);
278 if (retval != 0)
279 return retval;
280 proccesses_mapped++;
281 inc_wptr(&rl_wptr, sizeof(struct pm4_map_process),
282 alloc_size_bytes);
283
284 list_for_each_entry(kq, &qpd->priv_queue_list, list) {
285 if (kq->queue->properties.is_active != true)
286 continue;
287 retval = pm_create_map_queue(pm, &rl_buffer[rl_wptr],
288 kq->queue);
289 if (retval != 0)
290 return retval;
291 inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
292 alloc_size_bytes);
293 }
294
295 list_for_each_entry(q, &qpd->queues_list, list) {
296 if (q->properties.is_active != true)
297 continue;
298 retval = pm_create_map_queue(pm,
299 &rl_buffer[rl_wptr], q);
300 if (retval != 0)
301 return retval;
302 inc_wptr(&rl_wptr, sizeof(struct pm4_map_queues),
303 alloc_size_bytes);
304 }
305 }
306
307 pr_debug("kfd: finished map process and queues to runlist\n");
308
309 if (is_over_subscription)
310 pm_create_runlist(pm, &rl_buffer[rl_wptr], *rl_gpu_addr,
311 alloc_size_bytes / sizeof(uint32_t), true);
312
313 for (i = 0; i < alloc_size_bytes / sizeof(uint32_t); i++)
314 pr_debug("0x%2X ", rl_buffer[i]);
315 pr_debug("\n");
316
317 return 0;
318}
319
320int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm)
321{
322 BUG_ON(!dqm);
323
324 pm->dqm = dqm;
325 mutex_init(&pm->lock);
326 pm->priv_queue = kernel_queue_init(dqm->dev, KFD_QUEUE_TYPE_HIQ);
327 if (pm->priv_queue == NULL) {
328 mutex_destroy(&pm->lock);
329 return -ENOMEM;
330 }
331 pm->allocated = false;
332
333 return 0;
334}
335
336void pm_uninit(struct packet_manager *pm)
337{
338 BUG_ON(!pm);
339
340 mutex_destroy(&pm->lock);
341 kernel_queue_uninit(pm->priv_queue);
342}
343
344int pm_send_set_resources(struct packet_manager *pm,
345 struct scheduling_resources *res)
346{
347 struct pm4_set_resources *packet;
348
349 BUG_ON(!pm || !res);
350
351 pr_debug("kfd: In func %s\n", __func__);
352
353 mutex_lock(&pm->lock);
354 pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
355 sizeof(*packet) / sizeof(uint32_t),
356 (unsigned int **)&packet);
357 if (packet == NULL) {
358 mutex_unlock(&pm->lock);
359 pr_err("kfd: failed to allocate buffer on kernel queue\n");
360 return -ENOMEM;
361 }
362
363 memset(packet, 0, sizeof(struct pm4_set_resources));
364 packet->header.u32all = build_pm4_header(IT_SET_RESOURCES,
365 sizeof(struct pm4_set_resources));
366
367 packet->bitfields2.queue_type =
368 queue_type__mes_set_resources__hsa_interface_queue_hiq;
369 packet->bitfields2.vmid_mask = res->vmid_mask;
370 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY;
371 packet->bitfields7.oac_mask = res->oac_mask;
372 packet->bitfields8.gds_heap_base = res->gds_heap_base;
373 packet->bitfields8.gds_heap_size = res->gds_heap_size;
374
375 packet->gws_mask_lo = lower_32_bits(res->gws_mask);
376 packet->gws_mask_hi = upper_32_bits(res->gws_mask);
377
378 packet->queue_mask_lo = lower_32_bits(res->queue_mask);
379 packet->queue_mask_hi = upper_32_bits(res->queue_mask);
380
381 pm->priv_queue->submit_packet(pm->priv_queue);
382 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
383
384 mutex_unlock(&pm->lock);
385
386 return 0;
387}
388
389int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
390{
391 uint64_t rl_gpu_ib_addr;
392 uint32_t *rl_buffer;
393 size_t rl_ib_size, packet_size_dwords;
394 int retval;
395
396 BUG_ON(!pm || !dqm_queues);
397
398 retval = pm_create_runlist_ib(pm, dqm_queues, &rl_gpu_ib_addr,
399 &rl_ib_size);
400 if (retval != 0)
401 goto fail_create_runlist_ib;
402
403 pr_debug("kfd: runlist IB address: 0x%llX\n", rl_gpu_ib_addr);
404
405 packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
406 mutex_lock(&pm->lock);
407
408 retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
409 packet_size_dwords, &rl_buffer);
410 if (retval != 0)
411 goto fail_acquire_packet_buffer;
412
413 retval = pm_create_runlist(pm, rl_buffer, rl_gpu_ib_addr,
414 rl_ib_size / sizeof(uint32_t), false);
415 if (retval != 0)
416 goto fail_create_runlist;
417
418 pm->priv_queue->submit_packet(pm->priv_queue);
419 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
420
421 mutex_unlock(&pm->lock);
422
423 return retval;
424
425fail_create_runlist:
426 pm->priv_queue->rollback_packet(pm->priv_queue);
427fail_acquire_packet_buffer:
428 mutex_unlock(&pm->lock);
429fail_create_runlist_ib:
430 if (pm->allocated == true)
431 pm_release_ib(pm);
432 return retval;
433}
434
435int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
436 uint32_t fence_value)
437{
438 int retval;
439 struct pm4_query_status *packet;
440
441 BUG_ON(!pm || !fence_address);
442
443 mutex_lock(&pm->lock);
444 retval = pm->priv_queue->acquire_packet_buffer(
445 pm->priv_queue,
446 sizeof(struct pm4_query_status) / sizeof(uint32_t),
447 (unsigned int **)&packet);
448 if (retval != 0)
449 goto fail_acquire_packet_buffer;
450
451 packet->header.u32all = build_pm4_header(IT_QUERY_STATUS,
452 sizeof(struct pm4_query_status));
453
454 packet->bitfields2.context_id = 0;
455 packet->bitfields2.interrupt_sel =
456 interrupt_sel__mes_query_status__completion_status;
457 packet->bitfields2.command =
458 command__mes_query_status__fence_only_after_write_ack;
459
460 packet->addr_hi = upper_32_bits((uint64_t)fence_address);
461 packet->addr_lo = lower_32_bits((uint64_t)fence_address);
462 packet->data_hi = upper_32_bits((uint64_t)fence_value);
463 packet->data_lo = lower_32_bits((uint64_t)fence_value);
464
465 pm->priv_queue->submit_packet(pm->priv_queue);
466 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
467 mutex_unlock(&pm->lock);
468
469 return 0;
470
471fail_acquire_packet_buffer:
472 mutex_unlock(&pm->lock);
473 return retval;
474}
475
476int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
477 enum kfd_preempt_type_filter mode,
478 uint32_t filter_param, bool reset,
479 unsigned int sdma_engine)
480{
481 int retval;
482 uint32_t *buffer;
483 struct pm4_unmap_queues *packet;
484
485 BUG_ON(!pm);
486
487 mutex_lock(&pm->lock);
488 retval = pm->priv_queue->acquire_packet_buffer(
489 pm->priv_queue,
490 sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
491 &buffer);
492 if (retval != 0)
493 goto err_acquire_packet_buffer;
494
495 packet = (struct pm4_unmap_queues *)buffer;
496 memset(buffer, 0, sizeof(struct pm4_unmap_queues));
497
498 packet->header.u32all = build_pm4_header(IT_UNMAP_QUEUES,
499 sizeof(struct pm4_unmap_queues));
500 switch (type) {
501 case KFD_QUEUE_TYPE_COMPUTE:
502 case KFD_QUEUE_TYPE_DIQ:
503 packet->bitfields2.engine_sel =
504 engine_sel__mes_unmap_queues__compute;
505 break;
506 case KFD_QUEUE_TYPE_SDMA:
507 packet->bitfields2.engine_sel =
508 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
509 break;
510 default:
511 BUG();
512 break;
513 }
514
515 if (reset)
516 packet->bitfields2.action =
517 action__mes_unmap_queues__reset_queues;
518 else
519 packet->bitfields2.action =
520 action__mes_unmap_queues__preempt_queues;
521
522 switch (mode) {
523 case KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE:
524 packet->bitfields2.queue_sel =
525 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
526 packet->bitfields2.num_queues = 1;
527 packet->bitfields3b.doorbell_offset0 = filter_param;
528 break;
529 case KFD_PREEMPT_TYPE_FILTER_BY_PASID:
530 packet->bitfields2.queue_sel =
531 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
532 packet->bitfields3a.pasid = filter_param;
533 break;
534 case KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES:
535 packet->bitfields2.queue_sel =
536 queue_sel__mes_unmap_queues__perform_request_on_all_active_queues;
537 break;
538 default:
539 BUG();
540 break;
541 };
542
543 pm->priv_queue->submit_packet(pm->priv_queue);
544 pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
545
546 mutex_unlock(&pm->lock);
547 return 0;
548
549err_acquire_packet_buffer:
550 mutex_unlock(&pm->lock);
551 return retval;
552}
553
554void pm_release_ib(struct packet_manager *pm)
555{
556 BUG_ON(!pm);
557
558 mutex_lock(&pm->lock);
559 if (pm->allocated) {
560 kfd2kgd->free_mem(pm->dqm->dev->kgd,
561 (struct kgd_mem *) pm->ib_buffer_obj);
562 pm->allocated = false;
563 }
564 mutex_unlock(&pm->lock);
565}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
new file mode 100644
index 000000000000..71699ad97d74
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -0,0 +1,96 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/slab.h>
24#include <linux/types.h>
25#include "kfd_priv.h"
26
27static unsigned long *pasid_bitmap;
28static unsigned int pasid_limit;
29static DEFINE_MUTEX(pasid_mutex);
30
31int kfd_pasid_init(void)
32{
33 pasid_limit = max_num_of_processes;
34
35 pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
36 if (!pasid_bitmap)
37 return -ENOMEM;
38
39 set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
40
41 return 0;
42}
43
44void kfd_pasid_exit(void)
45{
46 kfree(pasid_bitmap);
47}
48
49bool kfd_set_pasid_limit(unsigned int new_limit)
50{
51 if (new_limit < pasid_limit) {
52 bool ok;
53
54 mutex_lock(&pasid_mutex);
55
56 /* ensure that no pasids >= new_limit are in-use */
57 ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
58 pasid_limit);
59 if (ok)
60 pasid_limit = new_limit;
61
62 mutex_unlock(&pasid_mutex);
63
64 return ok;
65 }
66
67 return true;
68}
69
70inline unsigned int kfd_get_pasid_limit(void)
71{
72 return pasid_limit;
73}
74
75unsigned int kfd_pasid_alloc(void)
76{
77 unsigned int found;
78
79 mutex_lock(&pasid_mutex);
80
81 found = find_first_zero_bit(pasid_bitmap, pasid_limit);
82 if (found == pasid_limit)
83 found = 0;
84 else
85 set_bit(found, pasid_bitmap);
86
87 mutex_unlock(&pasid_mutex);
88
89 return found;
90}
91
92void kfd_pasid_free(unsigned int pasid)
93{
94 BUG_ON(pasid == 0 || pasid >= pasid_limit);
95 clear_bit(pasid, pasid_bitmap);
96}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
new file mode 100644
index 000000000000..071ad5724bd2
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers.h
@@ -0,0 +1,405 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#ifndef KFD_PM4_HEADERS_H_
25#define KFD_PM4_HEADERS_H_
26
27#ifndef PM4_MES_HEADER_DEFINED
28#define PM4_MES_HEADER_DEFINED
29union PM4_MES_TYPE_3_HEADER {
30 struct {
31 uint32_t reserved1:8; /* < reserved */
32 uint32_t opcode:8; /* < IT opcode */
33 uint32_t count:14; /* < number of DWORDs - 1
34 * in the information body.
35 */
36 uint32_t type:2; /* < packet identifier.
37 * It should be 3 for type 3 packets
38 */
39 };
40 uint32_t u32all;
41};
42#endif /* PM4_MES_HEADER_DEFINED */
43
44/* --------------------MES_SET_RESOURCES-------------------- */
45
46#ifndef PM4_MES_SET_RESOURCES_DEFINED
47#define PM4_MES_SET_RESOURCES_DEFINED
48enum set_resources_queue_type_enum {
49 queue_type__mes_set_resources__kernel_interface_queue_kiq = 0,
50 queue_type__mes_set_resources__hsa_interface_queue_hiq = 1,
51 queue_type__mes_set_resources__hsa_debug_interface_queue = 4
52};
53
54struct pm4_set_resources {
55 union {
56 union PM4_MES_TYPE_3_HEADER header; /* header */
57 uint32_t ordinal1;
58 };
59
60 union {
61 struct {
62 uint32_t vmid_mask:16;
63 uint32_t unmap_latency:8;
64 uint32_t reserved1:5;
65 enum set_resources_queue_type_enum queue_type:3;
66 } bitfields2;
67 uint32_t ordinal2;
68 };
69
70 uint32_t queue_mask_lo;
71 uint32_t queue_mask_hi;
72 uint32_t gws_mask_lo;
73 uint32_t gws_mask_hi;
74
75 union {
76 struct {
77 uint32_t oac_mask:16;
78 uint32_t reserved2:16;
79 } bitfields7;
80 uint32_t ordinal7;
81 };
82
83 union {
84 struct {
85 uint32_t gds_heap_base:6;
86 uint32_t reserved3:5;
87 uint32_t gds_heap_size:6;
88 uint32_t reserved4:15;
89 } bitfields8;
90 uint32_t ordinal8;
91 };
92
93};
94#endif
95
96/*--------------------MES_RUN_LIST-------------------- */
97
98#ifndef PM4_MES_RUN_LIST_DEFINED
99#define PM4_MES_RUN_LIST_DEFINED
100
101struct pm4_runlist {
102 union {
103 union PM4_MES_TYPE_3_HEADER header; /* header */
104 uint32_t ordinal1;
105 };
106
107 union {
108 struct {
109 uint32_t reserved1:2;
110 uint32_t ib_base_lo:30;
111 } bitfields2;
112 uint32_t ordinal2;
113 };
114
115 union {
116 struct {
117 uint32_t ib_base_hi:16;
118 uint32_t reserved2:16;
119 } bitfields3;
120 uint32_t ordinal3;
121 };
122
123 union {
124 struct {
125 uint32_t ib_size:20;
126 uint32_t chain:1;
127 uint32_t offload_polling:1;
128 uint32_t reserved3:1;
129 uint32_t valid:1;
130 uint32_t reserved4:8;
131 } bitfields4;
132 uint32_t ordinal4;
133 };
134
135};
136#endif
137
138/*--------------------MES_MAP_PROCESS-------------------- */
139
140#ifndef PM4_MES_MAP_PROCESS_DEFINED
141#define PM4_MES_MAP_PROCESS_DEFINED
142
143struct pm4_map_process {
144 union {
145 union PM4_MES_TYPE_3_HEADER header; /* header */
146 uint32_t ordinal1;
147 };
148
149 union {
150 struct {
151 uint32_t pasid:16;
152 uint32_t reserved1:8;
153 uint32_t diq_enable:1;
154 uint32_t process_quantum:7;
155 } bitfields2;
156 uint32_t ordinal2;
157 };
158
159 union {
160 struct {
161 uint32_t page_table_base:28;
162 uint32_t reserved3:4;
163 } bitfields3;
164 uint32_t ordinal3;
165 };
166
167 uint32_t sh_mem_bases;
168 uint32_t sh_mem_ape1_base;
169 uint32_t sh_mem_ape1_limit;
170 uint32_t sh_mem_config;
171 uint32_t gds_addr_lo;
172 uint32_t gds_addr_hi;
173
174 union {
175 struct {
176 uint32_t num_gws:6;
177 uint32_t reserved4:2;
178 uint32_t num_oac:4;
179 uint32_t reserved5:4;
180 uint32_t gds_size:6;
181 uint32_t num_queues:10;
182 } bitfields10;
183 uint32_t ordinal10;
184 };
185
186};
187#endif
188
189/*--------------------MES_MAP_QUEUES--------------------*/
190
191#ifndef PM4_MES_MAP_QUEUES_DEFINED
192#define PM4_MES_MAP_QUEUES_DEFINED
193enum map_queues_queue_sel_enum {
194 queue_sel__mes_map_queues__map_to_specified_queue_slots = 0,
195 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots = 1,
196 queue_sel__mes_map_queues__enable_process_queues = 2
197};
198
199enum map_queues_vidmem_enum {
200 vidmem__mes_map_queues__uses_no_video_memory = 0,
201 vidmem__mes_map_queues__uses_video_memory = 1
202};
203
204enum map_queues_alloc_format_enum {
205 alloc_format__mes_map_queues__one_per_pipe = 0,
206 alloc_format__mes_map_queues__all_on_one_pipe = 1
207};
208
209enum map_queues_engine_sel_enum {
210 engine_sel__mes_map_queues__compute = 0,
211 engine_sel__mes_map_queues__sdma0 = 2,
212 engine_sel__mes_map_queues__sdma1 = 3
213};
214
215struct pm4_map_queues {
216 union {
217 union PM4_MES_TYPE_3_HEADER header; /* header */
218 uint32_t ordinal1;
219 };
220
221 union {
222 struct {
223 uint32_t reserved1:4;
224 enum map_queues_queue_sel_enum queue_sel:2;
225 uint32_t reserved2:2;
226 uint32_t vmid:4;
227 uint32_t reserved3:4;
228 enum map_queues_vidmem_enum vidmem:2;
229 uint32_t reserved4:6;
230 enum map_queues_alloc_format_enum alloc_format:2;
231 enum map_queues_engine_sel_enum engine_sel:3;
232 uint32_t num_queues:3;
233 } bitfields2;
234 uint32_t ordinal2;
235 };
236
237 struct {
238 union {
239 struct {
240 uint32_t reserved5:2;
241 uint32_t doorbell_offset:21;
242 uint32_t reserved6:3;
243 uint32_t queue:6;
244 } bitfields3;
245 uint32_t ordinal3;
246 };
247
248 uint32_t mqd_addr_lo;
249 uint32_t mqd_addr_hi;
250 uint32_t wptr_addr_lo;
251 uint32_t wptr_addr_hi;
252
253 } mes_map_queues_ordinals[1]; /* 1..N of these ordinal groups */
254
255};
256#endif
257
258/*--------------------MES_QUERY_STATUS--------------------*/
259
260#ifndef PM4_MES_QUERY_STATUS_DEFINED
261#define PM4_MES_QUERY_STATUS_DEFINED
262enum query_status_interrupt_sel_enum {
263 interrupt_sel__mes_query_status__completion_status = 0,
264 interrupt_sel__mes_query_status__process_status = 1,
265 interrupt_sel__mes_query_status__queue_status = 2
266};
267
268enum query_status_command_enum {
269 command__mes_query_status__interrupt_only = 0,
270 command__mes_query_status__fence_only_immediate = 1,
271 command__mes_query_status__fence_only_after_write_ack = 2,
272 command__mes_query_status__fence_wait_for_write_ack_send_interrupt = 3
273};
274
275enum query_status_engine_sel_enum {
276 engine_sel__mes_query_status__compute = 0,
277 engine_sel__mes_query_status__sdma0_queue = 2,
278 engine_sel__mes_query_status__sdma1_queue = 3
279};
280
281struct pm4_query_status {
282 union {
283 union PM4_MES_TYPE_3_HEADER header; /* header */
284 uint32_t ordinal1;
285 };
286
287 union {
288 struct {
289 uint32_t context_id:28;
290 enum query_status_interrupt_sel_enum interrupt_sel:2;
291 enum query_status_command_enum command:2;
292 } bitfields2;
293 uint32_t ordinal2;
294 };
295
296 union {
297 struct {
298 uint32_t pasid:16;
299 uint32_t reserved1:16;
300 } bitfields3a;
301 struct {
302 uint32_t reserved2:2;
303 uint32_t doorbell_offset:21;
304 uint32_t reserved3:3;
305 enum query_status_engine_sel_enum engine_sel:3;
306 uint32_t reserved4:3;
307 } bitfields3b;
308 uint32_t ordinal3;
309 };
310
311 uint32_t addr_lo;
312 uint32_t addr_hi;
313 uint32_t data_lo;
314 uint32_t data_hi;
315};
316#endif
317
318/*--------------------MES_UNMAP_QUEUES--------------------*/
319
320#ifndef PM4_MES_UNMAP_QUEUES_DEFINED
321#define PM4_MES_UNMAP_QUEUES_DEFINED
322enum unmap_queues_action_enum {
323 action__mes_unmap_queues__preempt_queues = 0,
324 action__mes_unmap_queues__reset_queues = 1,
325 action__mes_unmap_queues__disable_process_queues = 2
326};
327
328enum unmap_queues_queue_sel_enum {
329 queue_sel__mes_unmap_queues__perform_request_on_specified_queues = 0,
330 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues = 1,
331 queue_sel__mes_unmap_queues__perform_request_on_all_active_queues = 2
332};
333
334enum unmap_queues_engine_sel_enum {
335 engine_sel__mes_unmap_queues__compute = 0,
336 engine_sel__mes_unmap_queues__sdma0 = 2,
337 engine_sel__mes_unmap_queues__sdma1 = 3
338};
339
340struct pm4_unmap_queues {
341 union {
342 union PM4_MES_TYPE_3_HEADER header; /* header */
343 uint32_t ordinal1;
344 };
345
346 union {
347 struct {
348 enum unmap_queues_action_enum action:2;
349 uint32_t reserved1:2;
350 enum unmap_queues_queue_sel_enum queue_sel:2;
351 uint32_t reserved2:20;
352 enum unmap_queues_engine_sel_enum engine_sel:3;
353 uint32_t num_queues:3;
354 } bitfields2;
355 uint32_t ordinal2;
356 };
357
358 union {
359 struct {
360 uint32_t pasid:16;
361 uint32_t reserved3:16;
362 } bitfields3a;
363 struct {
364 uint32_t reserved4:2;
365 uint32_t doorbell_offset0:21;
366 uint32_t reserved5:9;
367 } bitfields3b;
368 uint32_t ordinal3;
369 };
370
371 union {
372 struct {
373 uint32_t reserved6:2;
374 uint32_t doorbell_offset1:21;
375 uint32_t reserved7:9;
376 } bitfields4;
377 uint32_t ordinal4;
378 };
379
380 union {
381 struct {
382 uint32_t reserved8:2;
383 uint32_t doorbell_offset2:21;
384 uint32_t reserved9:9;
385 } bitfields5;
386 uint32_t ordinal5;
387 };
388
389 union {
390 struct {
391 uint32_t reserved10:2;
392 uint32_t doorbell_offset3:21;
393 uint32_t reserved11:9;
394 } bitfields6;
395 uint32_t ordinal6;
396 };
397
398};
399#endif
400
401enum {
402 CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014
403};
404
405#endif /* KFD_PM4_HEADERS_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
new file mode 100644
index 000000000000..b72fa3b8c2d4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_opcodes.h
@@ -0,0 +1,107 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24
25#ifndef KFD_PM4_OPCODES_H
26#define KFD_PM4_OPCODES_H
27
28enum it_opcode_type {
29 IT_NOP = 0x10,
30 IT_SET_BASE = 0x11,
31 IT_CLEAR_STATE = 0x12,
32 IT_INDEX_BUFFER_SIZE = 0x13,
33 IT_DISPATCH_DIRECT = 0x15,
34 IT_DISPATCH_INDIRECT = 0x16,
35 IT_ATOMIC_GDS = 0x1D,
36 IT_OCCLUSION_QUERY = 0x1F,
37 IT_SET_PREDICATION = 0x20,
38 IT_REG_RMW = 0x21,
39 IT_COND_EXEC = 0x22,
40 IT_PRED_EXEC = 0x23,
41 IT_DRAW_INDIRECT = 0x24,
42 IT_DRAW_INDEX_INDIRECT = 0x25,
43 IT_INDEX_BASE = 0x26,
44 IT_DRAW_INDEX_2 = 0x27,
45 IT_CONTEXT_CONTROL = 0x28,
46 IT_INDEX_TYPE = 0x2A,
47 IT_DRAW_INDIRECT_MULTI = 0x2C,
48 IT_DRAW_INDEX_AUTO = 0x2D,
49 IT_NUM_INSTANCES = 0x2F,
50 IT_DRAW_INDEX_MULTI_AUTO = 0x30,
51 IT_INDIRECT_BUFFER_CNST = 0x33,
52 IT_STRMOUT_BUFFER_UPDATE = 0x34,
53 IT_DRAW_INDEX_OFFSET_2 = 0x35,
54 IT_DRAW_PREAMBLE = 0x36,
55 IT_WRITE_DATA = 0x37,
56 IT_DRAW_INDEX_INDIRECT_MULTI = 0x38,
57 IT_MEM_SEMAPHORE = 0x39,
58 IT_COPY_DW = 0x3B,
59 IT_WAIT_REG_MEM = 0x3C,
60 IT_INDIRECT_BUFFER = 0x3F,
61 IT_COPY_DATA = 0x40,
62 IT_PFP_SYNC_ME = 0x42,
63 IT_SURFACE_SYNC = 0x43,
64 IT_COND_WRITE = 0x45,
65 IT_EVENT_WRITE = 0x46,
66 IT_EVENT_WRITE_EOP = 0x47,
67 IT_EVENT_WRITE_EOS = 0x48,
68 IT_RELEASE_MEM = 0x49,
69 IT_PREAMBLE_CNTL = 0x4A,
70 IT_DMA_DATA = 0x50,
71 IT_ACQUIRE_MEM = 0x58,
72 IT_REWIND = 0x59,
73 IT_LOAD_UCONFIG_REG = 0x5E,
74 IT_LOAD_SH_REG = 0x5F,
75 IT_LOAD_CONFIG_REG = 0x60,
76 IT_LOAD_CONTEXT_REG = 0x61,
77 IT_SET_CONFIG_REG = 0x68,
78 IT_SET_CONTEXT_REG = 0x69,
79 IT_SET_CONTEXT_REG_INDIRECT = 0x73,
80 IT_SET_SH_REG = 0x76,
81 IT_SET_SH_REG_OFFSET = 0x77,
82 IT_SET_QUEUE_REG = 0x78,
83 IT_SET_UCONFIG_REG = 0x79,
84 IT_SCRATCH_RAM_WRITE = 0x7D,
85 IT_SCRATCH_RAM_READ = 0x7E,
86 IT_LOAD_CONST_RAM = 0x80,
87 IT_WRITE_CONST_RAM = 0x81,
88 IT_DUMP_CONST_RAM = 0x83,
89 IT_INCREMENT_CE_COUNTER = 0x84,
90 IT_INCREMENT_DE_COUNTER = 0x85,
91 IT_WAIT_ON_CE_COUNTER = 0x86,
92 IT_WAIT_ON_DE_COUNTER_DIFF = 0x88,
93 IT_SWITCH_BUFFER = 0x8B,
94 IT_SET_RESOURCES = 0xA0,
95 IT_MAP_PROCESS = 0xA1,
96 IT_MAP_QUEUES = 0xA2,
97 IT_UNMAP_QUEUES = 0xA3,
98 IT_QUERY_STATUS = 0xA4,
99 IT_RUN_LIST = 0xA5,
100};
101
102#define PM4_TYPE_0 0
103#define PM4_TYPE_2 2
104#define PM4_TYPE_3 3
105
106#endif /* KFD_PM4_OPCODES_H */
107
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
new file mode 100644
index 000000000000..f9fb81e3bb09
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -0,0 +1,600 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_PRIV_H_INCLUDED
24#define KFD_PRIV_H_INCLUDED
25
26#include <linux/hashtable.h>
27#include <linux/mmu_notifier.h>
28#include <linux/mutex.h>
29#include <linux/types.h>
30#include <linux/atomic.h>
31#include <linux/workqueue.h>
32#include <linux/spinlock.h>
33#include <linux/kfd_ioctl.h>
34#include <kgd_kfd_interface.h>
35
36#define KFD_SYSFS_FILE_MODE 0444
37
38/*
39 * When working with cp scheduler we should assign the HIQ manually or via
40 * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
41 * definitions for Kaveri. In Kaveri only the first ME queues participates
42 * in the cp scheduling taking that in mind we set the HIQ slot in the
43 * second ME.
44 */
45#define KFD_CIK_HIQ_PIPE 4
46#define KFD_CIK_HIQ_QUEUE 0
47
48/* GPU ID hash width in bits */
49#define KFD_GPU_ID_HASH_WIDTH 16
50
51/* Macro for allocating structures */
52#define kfd_alloc_struct(ptr_to_struct) \
53 ((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
54
55/* Kernel module parameter to specify maximum number of supported processes */
56extern int max_num_of_processes;
57
58#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
59#define KFD_MAX_NUM_OF_PROCESSES 512
60
61/*
62 * Kernel module parameter to specify maximum number of supported queues
63 * per process
64 */
65extern int max_num_of_queues_per_process;
66
67#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
68#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
69
70#define KFD_KERNEL_QUEUE_SIZE 2048
71
72/* Kernel module parameter to specify the scheduling policy */
73extern int sched_policy;
74
75/**
76 * enum kfd_sched_policy
77 *
78 * @KFD_SCHED_POLICY_HWS: H/W scheduling policy known as command processor (cp)
79 * scheduling. In this scheduling mode we're using the firmware code to
80 * schedule the user mode queues and kernel queues such as HIQ and DIQ.
81 * the HIQ queue is used as a special queue that dispatches the configuration
82 * to the cp and the user mode queues list that are currently running.
83 * the DIQ queue is a debugging queue that dispatches debugging commands to the
84 * firmware.
85 * in this scheduling mode user mode queues over subscription feature is
86 * enabled.
87 *
88 * @KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION: The same as above but the over
89 * subscription feature disabled.
90 *
91 * @KFD_SCHED_POLICY_NO_HWS: no H/W scheduling policy is a mode which directly
92 * set the command processor registers and sets the queues "manually". This
93 * mode is used *ONLY* for debugging proposes.
94 *
95 */
96enum kfd_sched_policy {
97 KFD_SCHED_POLICY_HWS = 0,
98 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION,
99 KFD_SCHED_POLICY_NO_HWS
100};
101
102enum cache_policy {
103 cache_policy_coherent,
104 cache_policy_noncoherent
105};
106
107struct kfd_device_info {
108 unsigned int max_pasid_bits;
109 size_t ih_ring_entry_size;
110 uint16_t mqd_size_aligned;
111};
112
113struct kfd_dev {
114 struct kgd_dev *kgd;
115
116 const struct kfd_device_info *device_info;
117 struct pci_dev *pdev;
118
119 unsigned int id; /* topology stub index */
120
121 phys_addr_t doorbell_base; /* Start of actual doorbells used by
122 * KFD. It is aligned for mapping
123 * into user mode
124 */
125 size_t doorbell_id_offset; /* Doorbell offset (from KFD doorbell
126 * to HW doorbell, GFX reserved some
127 * at the start)
128 */
129 size_t doorbell_process_limit; /* Number of processes we have doorbell
130 * space for.
131 */
132 u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
133 * page used by kernel queue
134 */
135
136 struct kgd2kfd_shared_resources shared_resources;
137
138 void *interrupt_ring;
139 size_t interrupt_ring_size;
140 atomic_t interrupt_ring_rptr;
141 atomic_t interrupt_ring_wptr;
142 struct work_struct interrupt_work;
143 spinlock_t interrupt_lock;
144
145 /* QCM Device instance */
146 struct device_queue_manager *dqm;
147
148 bool init_complete;
149 /*
150 * Interrupts of interest to KFD are copied
151 * from the HW ring into a SW ring.
152 */
153 bool interrupts_active;
154};
155
156/* KGD2KFD callbacks */
157void kgd2kfd_exit(void);
158struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev);
159bool kgd2kfd_device_init(struct kfd_dev *kfd,
160 const struct kgd2kfd_shared_resources *gpu_resources);
161void kgd2kfd_device_exit(struct kfd_dev *kfd);
162
163extern const struct kfd2kgd_calls *kfd2kgd;
164
165struct kfd_mem_obj {
166 void *bo;
167 uint64_t gpu_addr;
168 uint32_t *cpu_ptr;
169};
170
171enum kfd_mempool {
172 KFD_MEMPOOL_SYSTEM_CACHEABLE = 1,
173 KFD_MEMPOOL_SYSTEM_WRITECOMBINE = 2,
174 KFD_MEMPOOL_FRAMEBUFFER = 3,
175};
176
177/* Character device interface */
178int kfd_chardev_init(void);
179void kfd_chardev_exit(void);
180struct device *kfd_chardev(void);
181
182/**
183 * enum kfd_preempt_type_filter
184 *
185 * @KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE: Preempts single queue.
186 *
187 * @KFD_PRERMPT_TYPE_FILTER_ALL_QUEUES: Preempts all queues in the
188 * running queues list.
189 *
190 * @KFD_PRERMPT_TYPE_FILTER_BY_PASID: Preempts queues that belongs to
191 * specific process.
192 *
193 */
194enum kfd_preempt_type_filter {
195 KFD_PREEMPT_TYPE_FILTER_SINGLE_QUEUE,
196 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES,
197 KFD_PREEMPT_TYPE_FILTER_BY_PASID
198};
199
200enum kfd_preempt_type {
201 KFD_PREEMPT_TYPE_WAVEFRONT,
202 KFD_PREEMPT_TYPE_WAVEFRONT_RESET
203};
204
205/**
206 * enum kfd_queue_type
207 *
208 * @KFD_QUEUE_TYPE_COMPUTE: Regular user mode queue type.
209 *
210 * @KFD_QUEUE_TYPE_SDMA: Sdma user mode queue type.
211 *
212 * @KFD_QUEUE_TYPE_HIQ: HIQ queue type.
213 *
214 * @KFD_QUEUE_TYPE_DIQ: DIQ queue type.
215 */
216enum kfd_queue_type {
217 KFD_QUEUE_TYPE_COMPUTE,
218 KFD_QUEUE_TYPE_SDMA,
219 KFD_QUEUE_TYPE_HIQ,
220 KFD_QUEUE_TYPE_DIQ
221};
222
223enum kfd_queue_format {
224 KFD_QUEUE_FORMAT_PM4,
225 KFD_QUEUE_FORMAT_AQL
226};
227
228/**
229 * struct queue_properties
230 *
231 * @type: The queue type.
232 *
233 * @queue_id: Queue identifier.
234 *
235 * @queue_address: Queue ring buffer address.
236 *
237 * @queue_size: Queue ring buffer size.
238 *
239 * @priority: Defines the queue priority relative to other queues in the
240 * process.
241 * This is just an indication and HW scheduling may override the priority as
242 * necessary while keeping the relative prioritization.
243 * the priority granularity is from 0 to f which f is the highest priority.
244 * currently all queues are initialized with the highest priority.
245 *
246 * @queue_percent: This field is partially implemented and currently a zero in
247 * this field defines that the queue is non active.
248 *
249 * @read_ptr: User space address which points to the number of dwords the
250 * cp read from the ring buffer. This field updates automatically by the H/W.
251 *
252 * @write_ptr: Defines the number of dwords written to the ring buffer.
253 *
254 * @doorbell_ptr: This field aim is to notify the H/W of new packet written to
255 * the queue ring buffer. This field should be similar to write_ptr and the user
256 * should update this field after he updated the write_ptr.
257 *
258 * @doorbell_off: The doorbell offset in the doorbell pci-bar.
259 *
260 * @is_interop: Defines if this is a interop queue. Interop queue means that the
261 * queue can access both graphics and compute resources.
262 *
263 * @is_active: Defines if the queue is active or not.
264 *
265 * @vmid: If the scheduling mode is no cp scheduling the field defines the vmid
266 * of the queue.
267 *
268 * This structure represents the queue properties for each queue no matter if
269 * it's user mode or kernel mode queue.
270 *
271 */
272struct queue_properties {
273 enum kfd_queue_type type;
274 enum kfd_queue_format format;
275 unsigned int queue_id;
276 uint64_t queue_address;
277 uint64_t queue_size;
278 uint32_t priority;
279 uint32_t queue_percent;
280 uint32_t *read_ptr;
281 uint32_t *write_ptr;
282 uint32_t __iomem *doorbell_ptr;
283 uint32_t doorbell_off;
284 bool is_interop;
285 bool is_active;
286 /* Not relevant for user mode queues in cp scheduling */
287 unsigned int vmid;
288};
289
290/**
291 * struct queue
292 *
293 * @list: Queue linked list.
294 *
295 * @mqd: The queue MQD.
296 *
297 * @mqd_mem_obj: The MQD local gpu memory object.
298 *
299 * @gart_mqd_addr: The MQD gart mc address.
300 *
301 * @properties: The queue properties.
302 *
303 * @mec: Used only in no cp scheduling mode and identifies to micro engine id
304 * that the queue should be execute on.
305 *
306 * @pipe: Used only in no cp scheduling mode and identifies the queue's pipe id.
307 *
308 * @queue: Used only in no cp scheduliong mode and identifies the queue's slot.
309 *
310 * @process: The kfd process that created this queue.
311 *
312 * @device: The kfd device that created this queue.
313 *
314 * This structure represents user mode compute queues.
315 * It contains all the necessary data to handle such queues.
316 *
317 */
318
319struct queue {
320 struct list_head list;
321 void *mqd;
322 struct kfd_mem_obj *mqd_mem_obj;
323 uint64_t gart_mqd_addr;
324 struct queue_properties properties;
325
326 uint32_t mec;
327 uint32_t pipe;
328 uint32_t queue;
329
330 struct kfd_process *process;
331 struct kfd_dev *device;
332};
333
334/*
335 * Please read the kfd_mqd_manager.h description.
336 */
337enum KFD_MQD_TYPE {
338 KFD_MQD_TYPE_CIK_COMPUTE = 0, /* for no cp scheduling */
339 KFD_MQD_TYPE_CIK_HIQ, /* for hiq */
340 KFD_MQD_TYPE_CIK_CP, /* for cp queues and diq */
341 KFD_MQD_TYPE_CIK_SDMA, /* for sdma queues */
342 KFD_MQD_TYPE_MAX
343};
344
345struct scheduling_resources {
346 unsigned int vmid_mask;
347 enum kfd_queue_type type;
348 uint64_t queue_mask;
349 uint64_t gws_mask;
350 uint32_t oac_mask;
351 uint32_t gds_heap_base;
352 uint32_t gds_heap_size;
353};
354
355struct process_queue_manager {
356 /* data */
357 struct kfd_process *process;
358 unsigned int num_concurrent_processes;
359 struct list_head queues;
360 unsigned long *queue_slot_bitmap;
361};
362
363struct qcm_process_device {
364 /* The Device Queue Manager that owns this data */
365 struct device_queue_manager *dqm;
366 struct process_queue_manager *pqm;
367 /* Device Queue Manager lock */
368 struct mutex *lock;
369 /* Queues list */
370 struct list_head queues_list;
371 struct list_head priv_queue_list;
372
373 unsigned int queue_count;
374 unsigned int vmid;
375 bool is_debug;
376 /*
377 * All the memory management data should be here too
378 */
379 uint64_t gds_context_area;
380 uint32_t sh_mem_config;
381 uint32_t sh_mem_bases;
382 uint32_t sh_mem_ape1_base;
383 uint32_t sh_mem_ape1_limit;
384 uint32_t page_table_base;
385 uint32_t gds_size;
386 uint32_t num_gws;
387 uint32_t num_oac;
388};
389
390/* Data that is per-process-per device. */
391struct kfd_process_device {
392 /*
393 * List of all per-device data for a process.
394 * Starts from kfd_process.per_device_data.
395 */
396 struct list_head per_device_list;
397
398 /* The device that owns this data. */
399 struct kfd_dev *dev;
400
401
402 /* per-process-per device QCM data structure */
403 struct qcm_process_device qpd;
404
405 /*Apertures*/
406 uint64_t lds_base;
407 uint64_t lds_limit;
408 uint64_t gpuvm_base;
409 uint64_t gpuvm_limit;
410 uint64_t scratch_base;
411 uint64_t scratch_limit;
412
413 /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
414 bool bound;
415};
416
417#define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd)
418
419/* Process data */
420struct kfd_process {
421 /*
422 * kfd_process are stored in an mm_struct*->kfd_process*
423 * hash table (kfd_processes in kfd_process.c)
424 */
425 struct hlist_node kfd_processes;
426
427 struct mm_struct *mm;
428
429 struct mutex mutex;
430
431 /*
432 * In any process, the thread that started main() is the lead
433 * thread and outlives the rest.
434 * It is here because amd_iommu_bind_pasid wants a task_struct.
435 */
436 struct task_struct *lead_thread;
437
438 /* We want to receive a notification when the mm_struct is destroyed */
439 struct mmu_notifier mmu_notifier;
440
441 /* Use for delayed freeing of kfd_process structure */
442 struct rcu_head rcu;
443
444 unsigned int pasid;
445
446 /*
447 * List of kfd_process_device structures,
448 * one for each device the process is using.
449 */
450 struct list_head per_device_data;
451
452 struct process_queue_manager pqm;
453
454 /* The process's queues. */
455 size_t queue_array_size;
456
457 /* Size is queue_array_size, up to MAX_PROCESS_QUEUES. */
458 struct kfd_queue **queues;
459
460 unsigned long allocated_queue_bitmap[DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, BITS_PER_LONG)];
461
462 /*Is the user space process 32 bit?*/
463 bool is_32bit_user_mode;
464};
465
466void kfd_process_create_wq(void);
467void kfd_process_destroy_wq(void);
468struct kfd_process *kfd_create_process(const struct task_struct *);
469struct kfd_process *kfd_get_process(const struct task_struct *);
470
471struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
472 struct kfd_process *p);
473void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid);
474struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
475 struct kfd_process *p,
476 int create_pdd);
477
478/* Process device data iterator */
479struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p);
480struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
481 struct kfd_process_device *pdd);
482bool kfd_has_process_device_data(struct kfd_process *p);
483
484/* PASIDs */
485int kfd_pasid_init(void);
486void kfd_pasid_exit(void);
487bool kfd_set_pasid_limit(unsigned int new_limit);
488unsigned int kfd_get_pasid_limit(void);
489unsigned int kfd_pasid_alloc(void);
490void kfd_pasid_free(unsigned int pasid);
491
492/* Doorbells */
493void kfd_doorbell_init(struct kfd_dev *kfd);
494int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma);
495u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
496 unsigned int *doorbell_off);
497void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr);
498u32 read_kernel_doorbell(u32 __iomem *db);
499void write_kernel_doorbell(u32 __iomem *db, u32 value);
500unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
501 struct kfd_process *process,
502 unsigned int queue_id);
503
504extern struct device *kfd_device;
505
506/* Topology */
507int kfd_topology_init(void);
508void kfd_topology_shutdown(void);
509int kfd_topology_add_device(struct kfd_dev *gpu);
510int kfd_topology_remove_device(struct kfd_dev *gpu);
511struct kfd_dev *kfd_device_by_id(uint32_t gpu_id);
512struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
513struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
514
515/* Interrupts */
516int kfd_interrupt_init(struct kfd_dev *dev);
517void kfd_interrupt_exit(struct kfd_dev *dev);
518void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
519bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
520
521/* Power Management */
522void kgd2kfd_suspend(struct kfd_dev *kfd);
523int kgd2kfd_resume(struct kfd_dev *kfd);
524
525/* amdkfd Apertures */
526int kfd_init_apertures(struct kfd_process *process);
527
528/* Queue Context Management */
529inline uint32_t lower_32(uint64_t x);
530inline uint32_t upper_32(uint64_t x);
531
532int init_queue(struct queue **q, struct queue_properties properties);
533void uninit_queue(struct queue *q);
534void print_queue_properties(struct queue_properties *q);
535void print_queue(struct queue *q);
536
537struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
538 struct kfd_dev *dev);
539struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev);
540void device_queue_manager_uninit(struct device_queue_manager *dqm);
541struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
542 enum kfd_queue_type type);
543void kernel_queue_uninit(struct kernel_queue *kq);
544
545/* Process Queue Manager */
546struct process_queue_node {
547 struct queue *q;
548 struct kernel_queue *kq;
549 struct list_head process_queue_list;
550};
551
552int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p);
553void pqm_uninit(struct process_queue_manager *pqm);
554int pqm_create_queue(struct process_queue_manager *pqm,
555 struct kfd_dev *dev,
556 struct file *f,
557 struct queue_properties *properties,
558 unsigned int flags,
559 enum kfd_queue_type type,
560 unsigned int *qid);
561int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
562int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
563 struct queue_properties *p);
564
565/* Packet Manager */
566
567#define KFD_HIQ_TIMEOUT (500)
568
569#define KFD_FENCE_COMPLETED (100)
570#define KFD_FENCE_INIT (10)
571#define KFD_UNMAP_LATENCY (150)
572
573struct packet_manager {
574 struct device_queue_manager *dqm;
575 struct kernel_queue *priv_queue;
576 struct mutex lock;
577 bool allocated;
578 struct kfd_mem_obj *ib_buffer_obj;
579};
580
581int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm);
582void pm_uninit(struct packet_manager *pm);
583int pm_send_set_resources(struct packet_manager *pm,
584 struct scheduling_resources *res);
585int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
586int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
587 uint32_t fence_value);
588
589int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
590 enum kfd_preempt_type_filter mode,
591 uint32_t filter_param, bool reset,
592 unsigned int sdma_engine);
593
594void pm_release_ib(struct packet_manager *pm);
595
596uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
597phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
598 struct kfd_process *process);
599
600#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
new file mode 100644
index 000000000000..b85eb0b830b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -0,0 +1,410 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mutex.h>
24#include <linux/log2.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/amd-iommu.h>
28#include <linux/notifier.h>
29struct mm_struct;
30
31#include "kfd_priv.h"
32
33/*
34 * Initial size for the array of queues.
35 * The allocated size is doubled each time
36 * it is exceeded up to MAX_PROCESS_QUEUES.
37 */
38#define INITIAL_QUEUE_ARRAY_SIZE 16
39
40/*
41 * List of struct kfd_process (field kfd_process).
42 * Unique/indexed by mm_struct*
43 */
44#define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */
45static DEFINE_HASHTABLE(kfd_processes_table, KFD_PROCESS_TABLE_SIZE);
46static DEFINE_MUTEX(kfd_processes_mutex);
47
48DEFINE_STATIC_SRCU(kfd_processes_srcu);
49
50static struct workqueue_struct *kfd_process_wq;
51
52struct kfd_process_release_work {
53 struct work_struct kfd_work;
54 struct kfd_process *p;
55};
56
57static struct kfd_process *find_process(const struct task_struct *thread);
58static struct kfd_process *create_process(const struct task_struct *thread);
59
60void kfd_process_create_wq(void)
61{
62 if (!kfd_process_wq)
63 kfd_process_wq = create_workqueue("kfd_process_wq");
64}
65
66void kfd_process_destroy_wq(void)
67{
68 if (kfd_process_wq) {
69 flush_workqueue(kfd_process_wq);
70 destroy_workqueue(kfd_process_wq);
71 kfd_process_wq = NULL;
72 }
73}
74
75struct kfd_process *kfd_create_process(const struct task_struct *thread)
76{
77 struct kfd_process *process;
78
79 BUG_ON(!kfd_process_wq);
80
81 if (thread->mm == NULL)
82 return ERR_PTR(-EINVAL);
83
84 /* Only the pthreads threading model is supported. */
85 if (thread->group_leader->mm != thread->mm)
86 return ERR_PTR(-EINVAL);
87
88 /* Take mmap_sem because we call __mmu_notifier_register inside */
89 down_write(&thread->mm->mmap_sem);
90
91 /*
92 * take kfd processes mutex before starting of process creation
93 * so there won't be a case where two threads of the same process
94 * create two kfd_process structures
95 */
96 mutex_lock(&kfd_processes_mutex);
97
98 /* A prior open of /dev/kfd could have already created the process. */
99 process = find_process(thread);
100 if (process)
101 pr_debug("kfd: process already found\n");
102
103 if (!process)
104 process = create_process(thread);
105
106 mutex_unlock(&kfd_processes_mutex);
107
108 up_write(&thread->mm->mmap_sem);
109
110 return process;
111}
112
113struct kfd_process *kfd_get_process(const struct task_struct *thread)
114{
115 struct kfd_process *process;
116
117 if (thread->mm == NULL)
118 return ERR_PTR(-EINVAL);
119
120 /* Only the pthreads threading model is supported. */
121 if (thread->group_leader->mm != thread->mm)
122 return ERR_PTR(-EINVAL);
123
124 process = find_process(thread);
125
126 return process;
127}
128
129static struct kfd_process *find_process_by_mm(const struct mm_struct *mm)
130{
131 struct kfd_process *process;
132
133 hash_for_each_possible_rcu(kfd_processes_table, process,
134 kfd_processes, (uintptr_t)mm)
135 if (process->mm == mm)
136 return process;
137
138 return NULL;
139}
140
141static struct kfd_process *find_process(const struct task_struct *thread)
142{
143 struct kfd_process *p;
144 int idx;
145
146 idx = srcu_read_lock(&kfd_processes_srcu);
147 p = find_process_by_mm(thread->mm);
148 srcu_read_unlock(&kfd_processes_srcu, idx);
149
150 return p;
151}
152
153static void kfd_process_wq_release(struct work_struct *work)
154{
155 struct kfd_process_release_work *my_work;
156 struct kfd_process_device *pdd, *temp;
157 struct kfd_process *p;
158
159 my_work = (struct kfd_process_release_work *) work;
160
161 p = my_work->p;
162
163 mutex_lock(&p->mutex);
164
165 list_for_each_entry_safe(pdd, temp, &p->per_device_data,
166 per_device_list) {
167 amd_iommu_unbind_pasid(pdd->dev->pdev, p->pasid);
168 list_del(&pdd->per_device_list);
169
170 kfree(pdd);
171 }
172
173 kfd_pasid_free(p->pasid);
174
175 mutex_unlock(&p->mutex);
176
177 mutex_destroy(&p->mutex);
178
179 kfree(p->queues);
180
181 kfree(p);
182
183 kfree((void *)work);
184}
185
186static void kfd_process_destroy_delayed(struct rcu_head *rcu)
187{
188 struct kfd_process_release_work *work;
189 struct kfd_process *p;
190
191 BUG_ON(!kfd_process_wq);
192
193 p = container_of(rcu, struct kfd_process, rcu);
194 BUG_ON(atomic_read(&p->mm->mm_count) <= 0);
195
196 mmdrop(p->mm);
197
198 work = (struct kfd_process_release_work *)
199 kmalloc(sizeof(struct kfd_process_release_work), GFP_ATOMIC);
200
201 if (work) {
202 INIT_WORK((struct work_struct *) work, kfd_process_wq_release);
203 work->p = p;
204 queue_work(kfd_process_wq, (struct work_struct *) work);
205 }
206}
207
208static void kfd_process_notifier_release(struct mmu_notifier *mn,
209 struct mm_struct *mm)
210{
211 struct kfd_process *p;
212
213 /*
214 * The kfd_process structure can not be free because the
215 * mmu_notifier srcu is read locked
216 */
217 p = container_of(mn, struct kfd_process, mmu_notifier);
218 BUG_ON(p->mm != mm);
219
220 mutex_lock(&kfd_processes_mutex);
221 hash_del_rcu(&p->kfd_processes);
222 mutex_unlock(&kfd_processes_mutex);
223 synchronize_srcu(&kfd_processes_srcu);
224
225 mutex_lock(&p->mutex);
226
227 /* In case our notifier is called before IOMMU notifier */
228 pqm_uninit(&p->pqm);
229
230 mutex_unlock(&p->mutex);
231
232 /*
233 * Because we drop mm_count inside kfd_process_destroy_delayed
234 * and because the mmu_notifier_unregister function also drop
235 * mm_count we need to take an extra count here.
236 */
237 atomic_inc(&p->mm->mm_count);
238 mmu_notifier_unregister_no_release(&p->mmu_notifier, p->mm);
239 mmu_notifier_call_srcu(&p->rcu, &kfd_process_destroy_delayed);
240}
241
242static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = {
243 .release = kfd_process_notifier_release,
244};
245
246static struct kfd_process *create_process(const struct task_struct *thread)
247{
248 struct kfd_process *process;
249 int err = -ENOMEM;
250
251 process = kzalloc(sizeof(*process), GFP_KERNEL);
252
253 if (!process)
254 goto err_alloc_process;
255
256 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE,
257 sizeof(process->queues[0]), GFP_KERNEL);
258 if (!process->queues)
259 goto err_alloc_queues;
260
261 process->pasid = kfd_pasid_alloc();
262 if (process->pasid == 0)
263 goto err_alloc_pasid;
264
265 mutex_init(&process->mutex);
266
267 process->mm = thread->mm;
268
269 /* register notifier */
270 process->mmu_notifier.ops = &kfd_process_mmu_notifier_ops;
271 err = __mmu_notifier_register(&process->mmu_notifier, process->mm);
272 if (err)
273 goto err_mmu_notifier;
274
275 hash_add_rcu(kfd_processes_table, &process->kfd_processes,
276 (uintptr_t)process->mm);
277
278 process->lead_thread = thread->group_leader;
279
280 process->queue_array_size = INITIAL_QUEUE_ARRAY_SIZE;
281
282 INIT_LIST_HEAD(&process->per_device_data);
283
284 err = pqm_init(&process->pqm, process);
285 if (err != 0)
286 goto err_process_pqm_init;
287
288 return process;
289
290err_process_pqm_init:
291 hash_del_rcu(&process->kfd_processes);
292 synchronize_rcu();
293 mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
294err_mmu_notifier:
295 kfd_pasid_free(process->pasid);
296err_alloc_pasid:
297 kfree(process->queues);
298err_alloc_queues:
299 kfree(process);
300err_alloc_process:
301 return ERR_PTR(err);
302}
303
304struct kfd_process_device *kfd_get_process_device_data(struct kfd_dev *dev,
305 struct kfd_process *p,
306 int create_pdd)
307{
308 struct kfd_process_device *pdd = NULL;
309
310 list_for_each_entry(pdd, &p->per_device_data, per_device_list)
311 if (pdd->dev == dev)
312 return pdd;
313
314 if (create_pdd) {
315 pdd = kzalloc(sizeof(*pdd), GFP_KERNEL);
316 if (pdd != NULL) {
317 pdd->dev = dev;
318 INIT_LIST_HEAD(&pdd->qpd.queues_list);
319 INIT_LIST_HEAD(&pdd->qpd.priv_queue_list);
320 pdd->qpd.dqm = dev->dqm;
321 list_add(&pdd->per_device_list, &p->per_device_data);
322 }
323 }
324
325 return pdd;
326}
327
328/*
329 * Direct the IOMMU to bind the process (specifically the pasid->mm)
330 * to the device.
331 * Unbinding occurs when the process dies or the device is removed.
332 *
333 * Assumes that the process lock is held.
334 */
335struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
336 struct kfd_process *p)
337{
338 struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p, 1);
339 int err;
340
341 if (pdd == NULL)
342 return ERR_PTR(-ENOMEM);
343
344 if (pdd->bound)
345 return pdd;
346
347 err = amd_iommu_bind_pasid(dev->pdev, p->pasid, p->lead_thread);
348 if (err < 0)
349 return ERR_PTR(err);
350
351 pdd->bound = true;
352
353 return pdd;
354}
355
356void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
357{
358 struct kfd_process *p;
359 struct kfd_process_device *pdd;
360 int idx, i;
361
362 BUG_ON(dev == NULL);
363
364 idx = srcu_read_lock(&kfd_processes_srcu);
365
366 hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
367 if (p->pasid == pasid)
368 break;
369
370 srcu_read_unlock(&kfd_processes_srcu, idx);
371
372 BUG_ON(p->pasid != pasid);
373
374 mutex_lock(&p->mutex);
375
376 pqm_uninit(&p->pqm);
377
378 pdd = kfd_get_process_device_data(dev, p, 0);
379
380 /*
381 * Just mark pdd as unbound, because we still need it to call
382 * amd_iommu_unbind_pasid() in when the process exits.
383 * We don't call amd_iommu_unbind_pasid() here
384 * because the IOMMU called us.
385 */
386 if (pdd)
387 pdd->bound = false;
388
389 mutex_unlock(&p->mutex);
390}
391
392struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)
393{
394 return list_first_entry(&p->per_device_data,
395 struct kfd_process_device,
396 per_device_list);
397}
398
399struct kfd_process_device *kfd_get_next_process_device_data(struct kfd_process *p,
400 struct kfd_process_device *pdd)
401{
402 if (list_is_last(&pdd->per_device_list, &p->per_device_data))
403 return NULL;
404 return list_next_entry(pdd, per_device_list);
405}
406
407bool kfd_has_process_device_data(struct kfd_process *p)
408{
409 return !(list_empty(&p->per_device_data));
410}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
new file mode 100644
index 000000000000..47526780d736
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -0,0 +1,343 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/slab.h>
25#include <linux/list.h>
26#include "kfd_device_queue_manager.h"
27#include "kfd_priv.h"
28#include "kfd_kernel_queue.h"
29
30static inline struct process_queue_node *get_queue_by_qid(
31 struct process_queue_manager *pqm, unsigned int qid)
32{
33 struct process_queue_node *pqn;
34
35 BUG_ON(!pqm);
36
37 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
38 if (pqn->q && pqn->q->properties.queue_id == qid)
39 return pqn;
40 if (pqn->kq && pqn->kq->queue->properties.queue_id == qid)
41 return pqn;
42 }
43
44 return NULL;
45}
46
47static int find_available_queue_slot(struct process_queue_manager *pqm,
48 unsigned int *qid)
49{
50 unsigned long found;
51
52 BUG_ON(!pqm || !qid);
53
54 pr_debug("kfd: in %s\n", __func__);
55
56 found = find_first_zero_bit(pqm->queue_slot_bitmap,
57 max_num_of_queues_per_process);
58
59 pr_debug("kfd: the new slot id %lu\n", found);
60
61 if (found >= max_num_of_queues_per_process) {
62 pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
63 pqm->process->pasid);
64 return -ENOMEM;
65 }
66
67 set_bit(found, pqm->queue_slot_bitmap);
68 *qid = found;
69
70 return 0;
71}
72
73int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
74{
75 BUG_ON(!pqm);
76
77 INIT_LIST_HEAD(&pqm->queues);
78 pqm->queue_slot_bitmap =
79 kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
80 BITS_PER_BYTE), GFP_KERNEL);
81 if (pqm->queue_slot_bitmap == NULL)
82 return -ENOMEM;
83 pqm->process = p;
84
85 return 0;
86}
87
88void pqm_uninit(struct process_queue_manager *pqm)
89{
90 int retval;
91 struct process_queue_node *pqn, *next;
92
93 BUG_ON(!pqm);
94
95 pr_debug("In func %s\n", __func__);
96
97 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
98 retval = pqm_destroy_queue(
99 pqm,
100 (pqn->q != NULL) ?
101 pqn->q->properties.queue_id :
102 pqn->kq->queue->properties.queue_id);
103
104 if (retval != 0) {
105 pr_err("kfd: failed to destroy queue\n");
106 return;
107 }
108 }
109 kfree(pqm->queue_slot_bitmap);
110 pqm->queue_slot_bitmap = NULL;
111}
112
113static int create_cp_queue(struct process_queue_manager *pqm,
114 struct kfd_dev *dev, struct queue **q,
115 struct queue_properties *q_properties,
116 struct file *f, unsigned int qid)
117{
118 int retval;
119
120 retval = 0;
121
122 /* Doorbell initialized in user space*/
123 q_properties->doorbell_ptr = NULL;
124
125 q_properties->doorbell_off =
126 kfd_queue_id_to_doorbell(dev, pqm->process, qid);
127
128 /* let DQM handle it*/
129 q_properties->vmid = 0;
130 q_properties->queue_id = qid;
131 q_properties->type = KFD_QUEUE_TYPE_COMPUTE;
132
133 retval = init_queue(q, *q_properties);
134 if (retval != 0)
135 goto err_init_queue;
136
137 (*q)->device = dev;
138 (*q)->process = pqm->process;
139
140 pr_debug("kfd: PQM After init queue");
141
142 return retval;
143
144err_init_queue:
145 return retval;
146}
147
148int pqm_create_queue(struct process_queue_manager *pqm,
149 struct kfd_dev *dev,
150 struct file *f,
151 struct queue_properties *properties,
152 unsigned int flags,
153 enum kfd_queue_type type,
154 unsigned int *qid)
155{
156 int retval;
157 struct kfd_process_device *pdd;
158 struct queue_properties q_properties;
159 struct queue *q;
160 struct process_queue_node *pqn;
161 struct kernel_queue *kq;
162
163 BUG_ON(!pqm || !dev || !properties || !qid);
164
165 memset(&q_properties, 0, sizeof(struct queue_properties));
166 memcpy(&q_properties, properties, sizeof(struct queue_properties));
167 q = NULL;
168 kq = NULL;
169
170 pdd = kfd_get_process_device_data(dev, pqm->process, 1);
171 BUG_ON(!pdd);
172
173 retval = find_available_queue_slot(pqm, qid);
174 if (retval != 0)
175 return retval;
176
177 if (list_empty(&pqm->queues)) {
178 pdd->qpd.pqm = pqm;
179 dev->dqm->register_process(dev->dqm, &pdd->qpd);
180 }
181
182 pqn = kzalloc(sizeof(struct process_queue_node), GFP_KERNEL);
183 if (!pqn) {
184 retval = -ENOMEM;
185 goto err_allocate_pqn;
186 }
187
188 switch (type) {
189 case KFD_QUEUE_TYPE_COMPUTE:
190 /* check if there is over subscription */
191 if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
192 ((dev->dqm->processes_count >= VMID_PER_DEVICE) ||
193 (dev->dqm->queue_count >= PIPE_PER_ME_CP_SCHEDULING * QUEUES_PER_PIPE))) {
194 pr_err("kfd: over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
195 retval = -EPERM;
196 goto err_create_queue;
197 }
198
199 retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid);
200 if (retval != 0)
201 goto err_create_queue;
202 pqn->q = q;
203 pqn->kq = NULL;
204 retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
205 &q->properties.vmid);
206 print_queue(q);
207 break;
208 case KFD_QUEUE_TYPE_DIQ:
209 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
210 if (kq == NULL) {
211 retval = -ENOMEM;
212 goto err_create_queue;
213 }
214 kq->queue->properties.queue_id = *qid;
215 pqn->kq = kq;
216 pqn->q = NULL;
217 retval = dev->dqm->create_kernel_queue(dev->dqm, kq, &pdd->qpd);
218 break;
219 default:
220 BUG();
221 break;
222 }
223
224 if (retval != 0) {
225 pr_err("kfd: error dqm create queue\n");
226 goto err_create_queue;
227 }
228
229 pr_debug("kfd: PQM After DQM create queue\n");
230
231 list_add(&pqn->process_queue_list, &pqm->queues);
232
233 if (q) {
234 *properties = q->properties;
235 pr_debug("kfd: PQM done creating queue\n");
236 print_queue_properties(properties);
237 }
238
239 return retval;
240
241err_create_queue:
242 kfree(pqn);
243err_allocate_pqn:
244 clear_bit(*qid, pqm->queue_slot_bitmap);
245 return retval;
246}
247
248int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
249{
250 struct process_queue_node *pqn;
251 struct kfd_process_device *pdd;
252 struct device_queue_manager *dqm;
253 struct kfd_dev *dev;
254 int retval;
255
256 dqm = NULL;
257
258 BUG_ON(!pqm);
259 retval = 0;
260
261 pr_debug("kfd: In Func %s\n", __func__);
262
263 pqn = get_queue_by_qid(pqm, qid);
264 if (pqn == NULL) {
265 pr_err("kfd: queue id does not match any known queue\n");
266 return -EINVAL;
267 }
268
269 dev = NULL;
270 if (pqn->kq)
271 dev = pqn->kq->dev;
272 if (pqn->q)
273 dev = pqn->q->device;
274 BUG_ON(!dev);
275
276 pdd = kfd_get_process_device_data(dev, pqm->process, 1);
277 BUG_ON(!pdd);
278
279 if (pqn->kq) {
280 /* destroy kernel queue (DIQ) */
281 dqm = pqn->kq->dev->dqm;
282 dqm->destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
283 kernel_queue_uninit(pqn->kq);
284 }
285
286 if (pqn->q) {
287 dqm = pqn->q->device->dqm;
288 retval = dqm->destroy_queue(dqm, &pdd->qpd, pqn->q);
289 if (retval != 0)
290 return retval;
291
292 uninit_queue(pqn->q);
293 }
294
295 list_del(&pqn->process_queue_list);
296 kfree(pqn);
297 clear_bit(qid, pqm->queue_slot_bitmap);
298
299 if (list_empty(&pqm->queues))
300 dqm->unregister_process(dqm, &pdd->qpd);
301
302 return retval;
303}
304
305int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
306 struct queue_properties *p)
307{
308 int retval;
309 struct process_queue_node *pqn;
310
311 BUG_ON(!pqm);
312
313 pqn = get_queue_by_qid(pqm, qid);
314 BUG_ON(!pqn);
315
316 pqn->q->properties.queue_address = p->queue_address;
317 pqn->q->properties.queue_size = p->queue_size;
318 pqn->q->properties.queue_percent = p->queue_percent;
319 pqn->q->properties.priority = p->priority;
320
321 retval = pqn->q->device->dqm->update_queue(pqn->q->device->dqm, pqn->q);
322 if (retval != 0)
323 return retval;
324
325 return 0;
326}
327
328static __attribute__((unused)) struct kernel_queue *pqm_get_kernel_queue(
329 struct process_queue_manager *pqm,
330 unsigned int qid)
331{
332 struct process_queue_node *pqn;
333
334 BUG_ON(!pqm);
335
336 pqn = get_queue_by_qid(pqm, qid);
337 if (pqn && pqn->kq)
338 return pqn->kq;
339
340 return NULL;
341}
342
343
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
new file mode 100644
index 000000000000..9a0c90b0702e
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_queue.c
@@ -0,0 +1,85 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/slab.h>
25#include "kfd_priv.h"
26
27void print_queue_properties(struct queue_properties *q)
28{
29 if (!q)
30 return;
31
32 pr_debug("Printing queue properties:\n");
33 pr_debug("Queue Type: %u\n", q->type);
34 pr_debug("Queue Size: %llu\n", q->queue_size);
35 pr_debug("Queue percent: %u\n", q->queue_percent);
36 pr_debug("Queue Address: 0x%llX\n", q->queue_address);
37 pr_debug("Queue Id: %u\n", q->queue_id);
38 pr_debug("Queue Process Vmid: %u\n", q->vmid);
39 pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr);
40 pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr);
41 pr_debug("Queue Doorbell Pointer: 0x%p\n", q->doorbell_ptr);
42 pr_debug("Queue Doorbell Offset: %u\n", q->doorbell_off);
43}
44
45void print_queue(struct queue *q)
46{
47 if (!q)
48 return;
49 pr_debug("Printing queue:\n");
50 pr_debug("Queue Type: %u\n", q->properties.type);
51 pr_debug("Queue Size: %llu\n", q->properties.queue_size);
52 pr_debug("Queue percent: %u\n", q->properties.queue_percent);
53 pr_debug("Queue Address: 0x%llX\n", q->properties.queue_address);
54 pr_debug("Queue Id: %u\n", q->properties.queue_id);
55 pr_debug("Queue Process Vmid: %u\n", q->properties.vmid);
56 pr_debug("Queue Read Pointer: 0x%p\n", q->properties.read_ptr);
57 pr_debug("Queue Write Pointer: 0x%p\n", q->properties.write_ptr);
58 pr_debug("Queue Doorbell Pointer: 0x%p\n", q->properties.doorbell_ptr);
59 pr_debug("Queue Doorbell Offset: %u\n", q->properties.doorbell_off);
60 pr_debug("Queue MQD Address: 0x%p\n", q->mqd);
61 pr_debug("Queue MQD Gart: 0x%llX\n", q->gart_mqd_addr);
62 pr_debug("Queue Process Address: 0x%p\n", q->process);
63 pr_debug("Queue Device Address: 0x%p\n", q->device);
64}
65
66int init_queue(struct queue **q, struct queue_properties properties)
67{
68 struct queue *tmp;
69
70 BUG_ON(!q);
71
72 tmp = kzalloc(sizeof(struct queue), GFP_KERNEL);
73 if (!tmp)
74 return -ENOMEM;
75
76 memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
77
78 *q = tmp;
79 return 0;
80}
81
82void uninit_queue(struct queue *q)
83{
84 kfree(q);
85}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
new file mode 100644
index 000000000000..5733e2859e8a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -0,0 +1,1235 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/types.h>
24#include <linux/kernel.h>
25#include <linux/pci.h>
26#include <linux/errno.h>
27#include <linux/acpi.h>
28#include <linux/hash.h>
29#include <linux/cpufreq.h>
30
31#include "kfd_priv.h"
32#include "kfd_crat.h"
33#include "kfd_topology.h"
34
35static struct list_head topology_device_list;
36static int topology_crat_parsed;
37static struct kfd_system_properties sys_props;
38
39static DECLARE_RWSEM(topology_lock);
40
41struct kfd_dev *kfd_device_by_id(uint32_t gpu_id)
42{
43 struct kfd_topology_device *top_dev;
44 struct kfd_dev *device = NULL;
45
46 down_read(&topology_lock);
47
48 list_for_each_entry(top_dev, &topology_device_list, list)
49 if (top_dev->gpu_id == gpu_id) {
50 device = top_dev->gpu;
51 break;
52 }
53
54 up_read(&topology_lock);
55
56 return device;
57}
58
59struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev)
60{
61 struct kfd_topology_device *top_dev;
62 struct kfd_dev *device = NULL;
63
64 down_read(&topology_lock);
65
66 list_for_each_entry(top_dev, &topology_device_list, list)
67 if (top_dev->gpu->pdev == pdev) {
68 device = top_dev->gpu;
69 break;
70 }
71
72 up_read(&topology_lock);
73
74 return device;
75}
76
77static int kfd_topology_get_crat_acpi(void *crat_image, size_t *size)
78{
79 struct acpi_table_header *crat_table;
80 acpi_status status;
81
82 if (!size)
83 return -EINVAL;
84
85 /*
86 * Fetch the CRAT table from ACPI
87 */
88 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
89 if (status == AE_NOT_FOUND) {
90 pr_warn("CRAT table not found\n");
91 return -ENODATA;
92 } else if (ACPI_FAILURE(status)) {
93 const char *err = acpi_format_exception(status);
94
95 pr_err("CRAT table error: %s\n", err);
96 return -EINVAL;
97 }
98
99 if (*size >= crat_table->length && crat_image != NULL)
100 memcpy(crat_image, crat_table, crat_table->length);
101
102 *size = crat_table->length;
103
104 return 0;
105}
106
107static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
108 struct crat_subtype_computeunit *cu)
109{
110 BUG_ON(!dev);
111 BUG_ON(!cu);
112
113 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
114 dev->node_props.cpu_core_id_base = cu->processor_id_low;
115 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
116 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
117
118 pr_info("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
119 cu->processor_id_low);
120}
121
122static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
123 struct crat_subtype_computeunit *cu)
124{
125 BUG_ON(!dev);
126 BUG_ON(!cu);
127
128 dev->node_props.simd_id_base = cu->processor_id_low;
129 dev->node_props.simd_count = cu->num_simd_cores;
130 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
131 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
132 dev->node_props.wave_front_size = cu->wave_front_size;
133 dev->node_props.mem_banks_count = cu->num_banks;
134 dev->node_props.array_count = cu->num_arrays;
135 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
136 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
137 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
138 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
139 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
140 pr_info("CU GPU: simds=%d id_base=%d\n", cu->num_simd_cores,
141 cu->processor_id_low);
142}
143
144/* kfd_parse_subtype_cu is called when the topology mutex is already acquired */
145static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu)
146{
147 struct kfd_topology_device *dev;
148 int i = 0;
149
150 BUG_ON(!cu);
151
152 pr_info("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
153 cu->proximity_domain, cu->hsa_capability);
154 list_for_each_entry(dev, &topology_device_list, list) {
155 if (cu->proximity_domain == i) {
156 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
157 kfd_populated_cu_info_cpu(dev, cu);
158
159 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
160 kfd_populated_cu_info_gpu(dev, cu);
161 break;
162 }
163 i++;
164 }
165
166 return 0;
167}
168
169/*
170 * kfd_parse_subtype_mem is called when the topology mutex is
171 * already acquired
172 */
173static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem)
174{
175 struct kfd_mem_properties *props;
176 struct kfd_topology_device *dev;
177 int i = 0;
178
179 BUG_ON(!mem);
180
181 pr_info("Found memory entry in CRAT table with proximity_domain=%d\n",
182 mem->promixity_domain);
183 list_for_each_entry(dev, &topology_device_list, list) {
184 if (mem->promixity_domain == i) {
185 props = kfd_alloc_struct(props);
186 if (props == NULL)
187 return -ENOMEM;
188
189 if (dev->node_props.cpu_cores_count == 0)
190 props->heap_type = HSA_MEM_HEAP_TYPE_FB_PRIVATE;
191 else
192 props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
193
194 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
195 props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
196 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
197 props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
198
199 props->size_in_bytes =
200 ((uint64_t)mem->length_high << 32) +
201 mem->length_low;
202 props->width = mem->width;
203
204 dev->mem_bank_count++;
205 list_add_tail(&props->list, &dev->mem_props);
206
207 break;
208 }
209 i++;
210 }
211
212 return 0;
213}
214
215/*
216 * kfd_parse_subtype_cache is called when the topology mutex
217 * is already acquired
218 */
219static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache)
220{
221 struct kfd_cache_properties *props;
222 struct kfd_topology_device *dev;
223 uint32_t id;
224
225 BUG_ON(!cache);
226
227 id = cache->processor_id_low;
228
229 pr_info("Found cache entry in CRAT table with processor_id=%d\n", id);
230 list_for_each_entry(dev, &topology_device_list, list)
231 if (id == dev->node_props.cpu_core_id_base ||
232 id == dev->node_props.simd_id_base) {
233 props = kfd_alloc_struct(props);
234 if (props == NULL)
235 return -ENOMEM;
236
237 props->processor_id_low = id;
238 props->cache_level = cache->cache_level;
239 props->cache_size = cache->cache_size;
240 props->cacheline_size = cache->cache_line_size;
241 props->cachelines_per_tag = cache->lines_per_tag;
242 props->cache_assoc = cache->associativity;
243 props->cache_latency = cache->cache_latency;
244
245 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
246 props->cache_type |= HSA_CACHE_TYPE_DATA;
247 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
248 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
249 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
250 props->cache_type |= HSA_CACHE_TYPE_CPU;
251 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
252 props->cache_type |= HSA_CACHE_TYPE_HSACU;
253
254 dev->cache_count++;
255 dev->node_props.caches_count++;
256 list_add_tail(&props->list, &dev->cache_props);
257
258 break;
259 }
260
261 return 0;
262}
263
264/*
265 * kfd_parse_subtype_iolink is called when the topology mutex
266 * is already acquired
267 */
268static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink)
269{
270 struct kfd_iolink_properties *props;
271 struct kfd_topology_device *dev;
272 uint32_t i = 0;
273 uint32_t id_from;
274 uint32_t id_to;
275
276 BUG_ON(!iolink);
277
278 id_from = iolink->proximity_domain_from;
279 id_to = iolink->proximity_domain_to;
280
281 pr_info("Found IO link entry in CRAT table with id_from=%d\n", id_from);
282 list_for_each_entry(dev, &topology_device_list, list) {
283 if (id_from == i) {
284 props = kfd_alloc_struct(props);
285 if (props == NULL)
286 return -ENOMEM;
287
288 props->node_from = id_from;
289 props->node_to = id_to;
290 props->ver_maj = iolink->version_major;
291 props->ver_min = iolink->version_minor;
292
293 /*
294 * weight factor (derived from CDIR), currently always 1
295 */
296 props->weight = 1;
297
298 props->min_latency = iolink->minimum_latency;
299 props->max_latency = iolink->maximum_latency;
300 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
301 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
302 props->rec_transfer_size =
303 iolink->recommended_transfer_size;
304
305 dev->io_link_count++;
306 dev->node_props.io_links_count++;
307 list_add_tail(&props->list, &dev->io_link_props);
308
309 break;
310 }
311 i++;
312 }
313
314 return 0;
315}
316
317static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr)
318{
319 struct crat_subtype_computeunit *cu;
320 struct crat_subtype_memory *mem;
321 struct crat_subtype_cache *cache;
322 struct crat_subtype_iolink *iolink;
323 int ret = 0;
324
325 BUG_ON(!sub_type_hdr);
326
327 switch (sub_type_hdr->type) {
328 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
329 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
330 ret = kfd_parse_subtype_cu(cu);
331 break;
332 case CRAT_SUBTYPE_MEMORY_AFFINITY:
333 mem = (struct crat_subtype_memory *)sub_type_hdr;
334 ret = kfd_parse_subtype_mem(mem);
335 break;
336 case CRAT_SUBTYPE_CACHE_AFFINITY:
337 cache = (struct crat_subtype_cache *)sub_type_hdr;
338 ret = kfd_parse_subtype_cache(cache);
339 break;
340 case CRAT_SUBTYPE_TLB_AFFINITY:
341 /*
342 * For now, nothing to do here
343 */
344 pr_info("Found TLB entry in CRAT table (not processing)\n");
345 break;
346 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
347 /*
348 * For now, nothing to do here
349 */
350 pr_info("Found CCOMPUTE entry in CRAT table (not processing)\n");
351 break;
352 case CRAT_SUBTYPE_IOLINK_AFFINITY:
353 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
354 ret = kfd_parse_subtype_iolink(iolink);
355 break;
356 default:
357 pr_warn("Unknown subtype (%d) in CRAT\n",
358 sub_type_hdr->type);
359 }
360
361 return ret;
362}
363
364static void kfd_release_topology_device(struct kfd_topology_device *dev)
365{
366 struct kfd_mem_properties *mem;
367 struct kfd_cache_properties *cache;
368 struct kfd_iolink_properties *iolink;
369
370 BUG_ON(!dev);
371
372 list_del(&dev->list);
373
374 while (dev->mem_props.next != &dev->mem_props) {
375 mem = container_of(dev->mem_props.next,
376 struct kfd_mem_properties, list);
377 list_del(&mem->list);
378 kfree(mem);
379 }
380
381 while (dev->cache_props.next != &dev->cache_props) {
382 cache = container_of(dev->cache_props.next,
383 struct kfd_cache_properties, list);
384 list_del(&cache->list);
385 kfree(cache);
386 }
387
388 while (dev->io_link_props.next != &dev->io_link_props) {
389 iolink = container_of(dev->io_link_props.next,
390 struct kfd_iolink_properties, list);
391 list_del(&iolink->list);
392 kfree(iolink);
393 }
394
395 kfree(dev);
396
397 sys_props.num_devices--;
398}
399
400static void kfd_release_live_view(void)
401{
402 struct kfd_topology_device *dev;
403
404 while (topology_device_list.next != &topology_device_list) {
405 dev = container_of(topology_device_list.next,
406 struct kfd_topology_device, list);
407 kfd_release_topology_device(dev);
408}
409
410 memset(&sys_props, 0, sizeof(sys_props));
411}
412
413static struct kfd_topology_device *kfd_create_topology_device(void)
414{
415 struct kfd_topology_device *dev;
416
417 dev = kfd_alloc_struct(dev);
418 if (dev == NULL) {
419 pr_err("No memory to allocate a topology device");
420 return NULL;
421 }
422
423 INIT_LIST_HEAD(&dev->mem_props);
424 INIT_LIST_HEAD(&dev->cache_props);
425 INIT_LIST_HEAD(&dev->io_link_props);
426
427 list_add_tail(&dev->list, &topology_device_list);
428 sys_props.num_devices++;
429
430 return dev;
431}
432
433static int kfd_parse_crat_table(void *crat_image)
434{
435 struct kfd_topology_device *top_dev;
436 struct crat_subtype_generic *sub_type_hdr;
437 uint16_t node_id;
438 int ret;
439 struct crat_header *crat_table = (struct crat_header *)crat_image;
440 uint16_t num_nodes;
441 uint32_t image_len;
442
443 if (!crat_image)
444 return -EINVAL;
445
446 num_nodes = crat_table->num_domains;
447 image_len = crat_table->length;
448
449 pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
450
451 for (node_id = 0; node_id < num_nodes; node_id++) {
452 top_dev = kfd_create_topology_device();
453 if (!top_dev) {
454 kfd_release_live_view();
455 return -ENOMEM;
456 }
457 }
458
459 sys_props.platform_id =
460 (*((uint64_t *)crat_table->oem_id)) & CRAT_OEMID_64BIT_MASK;
461 sys_props.platform_oem = *((uint64_t *)crat_table->oem_table_id);
462 sys_props.platform_rev = crat_table->revision;
463
464 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
465 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
466 ((char *)crat_image) + image_len) {
467 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
468 ret = kfd_parse_subtype(sub_type_hdr);
469 if (ret != 0) {
470 kfd_release_live_view();
471 return ret;
472 }
473 }
474
475 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
476 sub_type_hdr->length);
477 }
478
479 sys_props.generation_count++;
480 topology_crat_parsed = 1;
481
482 return 0;
483}
484
485
486#define sysfs_show_gen_prop(buffer, fmt, ...) \
487 snprintf(buffer, PAGE_SIZE, "%s"fmt, buffer, __VA_ARGS__)
488#define sysfs_show_32bit_prop(buffer, name, value) \
489 sysfs_show_gen_prop(buffer, "%s %u\n", name, value)
490#define sysfs_show_64bit_prop(buffer, name, value) \
491 sysfs_show_gen_prop(buffer, "%s %llu\n", name, value)
492#define sysfs_show_32bit_val(buffer, value) \
493 sysfs_show_gen_prop(buffer, "%u\n", value)
494#define sysfs_show_str_val(buffer, value) \
495 sysfs_show_gen_prop(buffer, "%s\n", value)
496
497static ssize_t sysprops_show(struct kobject *kobj, struct attribute *attr,
498 char *buffer)
499{
500 ssize_t ret;
501
502 /* Making sure that the buffer is an empty string */
503 buffer[0] = 0;
504
505 if (attr == &sys_props.attr_genid) {
506 ret = sysfs_show_32bit_val(buffer, sys_props.generation_count);
507 } else if (attr == &sys_props.attr_props) {
508 sysfs_show_64bit_prop(buffer, "platform_oem",
509 sys_props.platform_oem);
510 sysfs_show_64bit_prop(buffer, "platform_id",
511 sys_props.platform_id);
512 ret = sysfs_show_64bit_prop(buffer, "platform_rev",
513 sys_props.platform_rev);
514 } else {
515 ret = -EINVAL;
516 }
517
518 return ret;
519}
520
521static const struct sysfs_ops sysprops_ops = {
522 .show = sysprops_show,
523};
524
525static struct kobj_type sysprops_type = {
526 .sysfs_ops = &sysprops_ops,
527};
528
529static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr,
530 char *buffer)
531{
532 ssize_t ret;
533 struct kfd_iolink_properties *iolink;
534
535 /* Making sure that the buffer is an empty string */
536 buffer[0] = 0;
537
538 iolink = container_of(attr, struct kfd_iolink_properties, attr);
539 sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type);
540 sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj);
541 sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min);
542 sysfs_show_32bit_prop(buffer, "node_from", iolink->node_from);
543 sysfs_show_32bit_prop(buffer, "node_to", iolink->node_to);
544 sysfs_show_32bit_prop(buffer, "weight", iolink->weight);
545 sysfs_show_32bit_prop(buffer, "min_latency", iolink->min_latency);
546 sysfs_show_32bit_prop(buffer, "max_latency", iolink->max_latency);
547 sysfs_show_32bit_prop(buffer, "min_bandwidth", iolink->min_bandwidth);
548 sysfs_show_32bit_prop(buffer, "max_bandwidth", iolink->max_bandwidth);
549 sysfs_show_32bit_prop(buffer, "recommended_transfer_size",
550 iolink->rec_transfer_size);
551 ret = sysfs_show_32bit_prop(buffer, "flags", iolink->flags);
552
553 return ret;
554}
555
556static const struct sysfs_ops iolink_ops = {
557 .show = iolink_show,
558};
559
560static struct kobj_type iolink_type = {
561 .sysfs_ops = &iolink_ops,
562};
563
564static ssize_t mem_show(struct kobject *kobj, struct attribute *attr,
565 char *buffer)
566{
567 ssize_t ret;
568 struct kfd_mem_properties *mem;
569
570 /* Making sure that the buffer is an empty string */
571 buffer[0] = 0;
572
573 mem = container_of(attr, struct kfd_mem_properties, attr);
574 sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type);
575 sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes);
576 sysfs_show_32bit_prop(buffer, "flags", mem->flags);
577 sysfs_show_32bit_prop(buffer, "width", mem->width);
578 ret = sysfs_show_32bit_prop(buffer, "mem_clk_max", mem->mem_clk_max);
579
580 return ret;
581}
582
583static const struct sysfs_ops mem_ops = {
584 .show = mem_show,
585};
586
587static struct kobj_type mem_type = {
588 .sysfs_ops = &mem_ops,
589};
590
591static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr,
592 char *buffer)
593{
594 ssize_t ret;
595 uint32_t i;
596 struct kfd_cache_properties *cache;
597
598 /* Making sure that the buffer is an empty string */
599 buffer[0] = 0;
600
601 cache = container_of(attr, struct kfd_cache_properties, attr);
602 sysfs_show_32bit_prop(buffer, "processor_id_low",
603 cache->processor_id_low);
604 sysfs_show_32bit_prop(buffer, "level", cache->cache_level);
605 sysfs_show_32bit_prop(buffer, "size", cache->cache_size);
606 sysfs_show_32bit_prop(buffer, "cache_line_size", cache->cacheline_size);
607 sysfs_show_32bit_prop(buffer, "cache_lines_per_tag",
608 cache->cachelines_per_tag);
609 sysfs_show_32bit_prop(buffer, "association", cache->cache_assoc);
610 sysfs_show_32bit_prop(buffer, "latency", cache->cache_latency);
611 sysfs_show_32bit_prop(buffer, "type", cache->cache_type);
612 snprintf(buffer, PAGE_SIZE, "%ssibling_map ", buffer);
613 for (i = 0; i < KFD_TOPOLOGY_CPU_SIBLINGS; i++)
614 ret = snprintf(buffer, PAGE_SIZE, "%s%d%s",
615 buffer, cache->sibling_map[i],
616 (i == KFD_TOPOLOGY_CPU_SIBLINGS-1) ?
617 "\n" : ",");
618
619 return ret;
620}
621
622static const struct sysfs_ops cache_ops = {
623 .show = kfd_cache_show,
624};
625
626static struct kobj_type cache_type = {
627 .sysfs_ops = &cache_ops,
628};
629
630static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
631 char *buffer)
632{
633 ssize_t ret;
634 struct kfd_topology_device *dev;
635 char public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
636 uint32_t i;
637
638 /* Making sure that the buffer is an empty string */
639 buffer[0] = 0;
640
641 if (strcmp(attr->name, "gpu_id") == 0) {
642 dev = container_of(attr, struct kfd_topology_device,
643 attr_gpuid);
644 ret = sysfs_show_32bit_val(buffer, dev->gpu_id);
645 } else if (strcmp(attr->name, "name") == 0) {
646 dev = container_of(attr, struct kfd_topology_device,
647 attr_name);
648 for (i = 0; i < KFD_TOPOLOGY_PUBLIC_NAME_SIZE; i++) {
649 public_name[i] =
650 (char)dev->node_props.marketing_name[i];
651 if (dev->node_props.marketing_name[i] == 0)
652 break;
653 }
654 public_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE-1] = 0x0;
655 ret = sysfs_show_str_val(buffer, public_name);
656 } else {
657 dev = container_of(attr, struct kfd_topology_device,
658 attr_props);
659 sysfs_show_32bit_prop(buffer, "cpu_cores_count",
660 dev->node_props.cpu_cores_count);
661 sysfs_show_32bit_prop(buffer, "simd_count",
662 dev->node_props.simd_count);
663
664 if (dev->mem_bank_count < dev->node_props.mem_banks_count) {
665 pr_warn("kfd: mem_banks_count truncated from %d to %d\n",
666 dev->node_props.mem_banks_count,
667 dev->mem_bank_count);
668 sysfs_show_32bit_prop(buffer, "mem_banks_count",
669 dev->mem_bank_count);
670 } else {
671 sysfs_show_32bit_prop(buffer, "mem_banks_count",
672 dev->node_props.mem_banks_count);
673 }
674
675 sysfs_show_32bit_prop(buffer, "caches_count",
676 dev->node_props.caches_count);
677 sysfs_show_32bit_prop(buffer, "io_links_count",
678 dev->node_props.io_links_count);
679 sysfs_show_32bit_prop(buffer, "cpu_core_id_base",
680 dev->node_props.cpu_core_id_base);
681 sysfs_show_32bit_prop(buffer, "simd_id_base",
682 dev->node_props.simd_id_base);
683 sysfs_show_32bit_prop(buffer, "capability",
684 dev->node_props.capability);
685 sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
686 dev->node_props.max_waves_per_simd);
687 sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
688 dev->node_props.lds_size_in_kb);
689 sysfs_show_32bit_prop(buffer, "gds_size_in_kb",
690 dev->node_props.gds_size_in_kb);
691 sysfs_show_32bit_prop(buffer, "wave_front_size",
692 dev->node_props.wave_front_size);
693 sysfs_show_32bit_prop(buffer, "array_count",
694 dev->node_props.array_count);
695 sysfs_show_32bit_prop(buffer, "simd_arrays_per_engine",
696 dev->node_props.simd_arrays_per_engine);
697 sysfs_show_32bit_prop(buffer, "cu_per_simd_array",
698 dev->node_props.cu_per_simd_array);
699 sysfs_show_32bit_prop(buffer, "simd_per_cu",
700 dev->node_props.simd_per_cu);
701 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
702 dev->node_props.max_slots_scratch_cu);
703 sysfs_show_32bit_prop(buffer, "engine_id",
704 dev->node_props.engine_id);
705 sysfs_show_32bit_prop(buffer, "vendor_id",
706 dev->node_props.vendor_id);
707 sysfs_show_32bit_prop(buffer, "device_id",
708 dev->node_props.device_id);
709 sysfs_show_32bit_prop(buffer, "location_id",
710 dev->node_props.location_id);
711
712 if (dev->gpu) {
713 sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
714 kfd2kgd->get_max_engine_clock_in_mhz(
715 dev->gpu->kgd));
716 sysfs_show_64bit_prop(buffer, "local_mem_size",
717 kfd2kgd->get_vmem_size(dev->gpu->kgd));
718 }
719
720 ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
721 cpufreq_quick_get_max(0)/1000);
722 }
723
724 return ret;
725}
726
727static const struct sysfs_ops node_ops = {
728 .show = node_show,
729};
730
731static struct kobj_type node_type = {
732 .sysfs_ops = &node_ops,
733};
734
735static void kfd_remove_sysfs_file(struct kobject *kobj, struct attribute *attr)
736{
737 sysfs_remove_file(kobj, attr);
738 kobject_del(kobj);
739 kobject_put(kobj);
740}
741
742static void kfd_remove_sysfs_node_entry(struct kfd_topology_device *dev)
743{
744 struct kfd_iolink_properties *iolink;
745 struct kfd_cache_properties *cache;
746 struct kfd_mem_properties *mem;
747
748 BUG_ON(!dev);
749
750 if (dev->kobj_iolink) {
751 list_for_each_entry(iolink, &dev->io_link_props, list)
752 if (iolink->kobj) {
753 kfd_remove_sysfs_file(iolink->kobj,
754 &iolink->attr);
755 iolink->kobj = NULL;
756 }
757 kobject_del(dev->kobj_iolink);
758 kobject_put(dev->kobj_iolink);
759 dev->kobj_iolink = NULL;
760 }
761
762 if (dev->kobj_cache) {
763 list_for_each_entry(cache, &dev->cache_props, list)
764 if (cache->kobj) {
765 kfd_remove_sysfs_file(cache->kobj,
766 &cache->attr);
767 cache->kobj = NULL;
768 }
769 kobject_del(dev->kobj_cache);
770 kobject_put(dev->kobj_cache);
771 dev->kobj_cache = NULL;
772 }
773
774 if (dev->kobj_mem) {
775 list_for_each_entry(mem, &dev->mem_props, list)
776 if (mem->kobj) {
777 kfd_remove_sysfs_file(mem->kobj, &mem->attr);
778 mem->kobj = NULL;
779 }
780 kobject_del(dev->kobj_mem);
781 kobject_put(dev->kobj_mem);
782 dev->kobj_mem = NULL;
783 }
784
785 if (dev->kobj_node) {
786 sysfs_remove_file(dev->kobj_node, &dev->attr_gpuid);
787 sysfs_remove_file(dev->kobj_node, &dev->attr_name);
788 sysfs_remove_file(dev->kobj_node, &dev->attr_props);
789 kobject_del(dev->kobj_node);
790 kobject_put(dev->kobj_node);
791 dev->kobj_node = NULL;
792 }
793}
794
795static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
796 uint32_t id)
797{
798 struct kfd_iolink_properties *iolink;
799 struct kfd_cache_properties *cache;
800 struct kfd_mem_properties *mem;
801 int ret;
802 uint32_t i;
803
804 BUG_ON(!dev);
805
806 /*
807 * Creating the sysfs folders
808 */
809 BUG_ON(dev->kobj_node);
810 dev->kobj_node = kfd_alloc_struct(dev->kobj_node);
811 if (!dev->kobj_node)
812 return -ENOMEM;
813
814 ret = kobject_init_and_add(dev->kobj_node, &node_type,
815 sys_props.kobj_nodes, "%d", id);
816 if (ret < 0)
817 return ret;
818
819 dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
820 if (!dev->kobj_mem)
821 return -ENOMEM;
822
823 dev->kobj_cache = kobject_create_and_add("caches", dev->kobj_node);
824 if (!dev->kobj_cache)
825 return -ENOMEM;
826
827 dev->kobj_iolink = kobject_create_and_add("io_links", dev->kobj_node);
828 if (!dev->kobj_iolink)
829 return -ENOMEM;
830
831 /*
832 * Creating sysfs files for node properties
833 */
834 dev->attr_gpuid.name = "gpu_id";
835 dev->attr_gpuid.mode = KFD_SYSFS_FILE_MODE;
836 sysfs_attr_init(&dev->attr_gpuid);
837 dev->attr_name.name = "name";
838 dev->attr_name.mode = KFD_SYSFS_FILE_MODE;
839 sysfs_attr_init(&dev->attr_name);
840 dev->attr_props.name = "properties";
841 dev->attr_props.mode = KFD_SYSFS_FILE_MODE;
842 sysfs_attr_init(&dev->attr_props);
843 ret = sysfs_create_file(dev->kobj_node, &dev->attr_gpuid);
844 if (ret < 0)
845 return ret;
846 ret = sysfs_create_file(dev->kobj_node, &dev->attr_name);
847 if (ret < 0)
848 return ret;
849 ret = sysfs_create_file(dev->kobj_node, &dev->attr_props);
850 if (ret < 0)
851 return ret;
852
853 i = 0;
854 list_for_each_entry(mem, &dev->mem_props, list) {
855 mem->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
856 if (!mem->kobj)
857 return -ENOMEM;
858 ret = kobject_init_and_add(mem->kobj, &mem_type,
859 dev->kobj_mem, "%d", i);
860 if (ret < 0)
861 return ret;
862
863 mem->attr.name = "properties";
864 mem->attr.mode = KFD_SYSFS_FILE_MODE;
865 sysfs_attr_init(&mem->attr);
866 ret = sysfs_create_file(mem->kobj, &mem->attr);
867 if (ret < 0)
868 return ret;
869 i++;
870 }
871
872 i = 0;
873 list_for_each_entry(cache, &dev->cache_props, list) {
874 cache->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
875 if (!cache->kobj)
876 return -ENOMEM;
877 ret = kobject_init_and_add(cache->kobj, &cache_type,
878 dev->kobj_cache, "%d", i);
879 if (ret < 0)
880 return ret;
881
882 cache->attr.name = "properties";
883 cache->attr.mode = KFD_SYSFS_FILE_MODE;
884 sysfs_attr_init(&cache->attr);
885 ret = sysfs_create_file(cache->kobj, &cache->attr);
886 if (ret < 0)
887 return ret;
888 i++;
889 }
890
891 i = 0;
892 list_for_each_entry(iolink, &dev->io_link_props, list) {
893 iolink->kobj = kzalloc(sizeof(struct kobject), GFP_KERNEL);
894 if (!iolink->kobj)
895 return -ENOMEM;
896 ret = kobject_init_and_add(iolink->kobj, &iolink_type,
897 dev->kobj_iolink, "%d", i);
898 if (ret < 0)
899 return ret;
900
901 iolink->attr.name = "properties";
902 iolink->attr.mode = KFD_SYSFS_FILE_MODE;
903 sysfs_attr_init(&iolink->attr);
904 ret = sysfs_create_file(iolink->kobj, &iolink->attr);
905 if (ret < 0)
906 return ret;
907 i++;
908}
909
910 return 0;
911}
912
913static int kfd_build_sysfs_node_tree(void)
914{
915 struct kfd_topology_device *dev;
916 int ret;
917 uint32_t i = 0;
918
919 list_for_each_entry(dev, &topology_device_list, list) {
920 ret = kfd_build_sysfs_node_entry(dev, 0);
921 if (ret < 0)
922 return ret;
923 i++;
924 }
925
926 return 0;
927}
928
929static void kfd_remove_sysfs_node_tree(void)
930{
931 struct kfd_topology_device *dev;
932
933 list_for_each_entry(dev, &topology_device_list, list)
934 kfd_remove_sysfs_node_entry(dev);
935}
936
937static int kfd_topology_update_sysfs(void)
938{
939 int ret;
940
941 pr_info("Creating topology SYSFS entries\n");
942 if (sys_props.kobj_topology == NULL) {
943 sys_props.kobj_topology =
944 kfd_alloc_struct(sys_props.kobj_topology);
945 if (!sys_props.kobj_topology)
946 return -ENOMEM;
947
948 ret = kobject_init_and_add(sys_props.kobj_topology,
949 &sysprops_type, &kfd_device->kobj,
950 "topology");
951 if (ret < 0)
952 return ret;
953
954 sys_props.kobj_nodes = kobject_create_and_add("nodes",
955 sys_props.kobj_topology);
956 if (!sys_props.kobj_nodes)
957 return -ENOMEM;
958
959 sys_props.attr_genid.name = "generation_id";
960 sys_props.attr_genid.mode = KFD_SYSFS_FILE_MODE;
961 sysfs_attr_init(&sys_props.attr_genid);
962 ret = sysfs_create_file(sys_props.kobj_topology,
963 &sys_props.attr_genid);
964 if (ret < 0)
965 return ret;
966
967 sys_props.attr_props.name = "system_properties";
968 sys_props.attr_props.mode = KFD_SYSFS_FILE_MODE;
969 sysfs_attr_init(&sys_props.attr_props);
970 ret = sysfs_create_file(sys_props.kobj_topology,
971 &sys_props.attr_props);
972 if (ret < 0)
973 return ret;
974 }
975
976 kfd_remove_sysfs_node_tree();
977
978 return kfd_build_sysfs_node_tree();
979}
980
981static void kfd_topology_release_sysfs(void)
982{
983 kfd_remove_sysfs_node_tree();
984 if (sys_props.kobj_topology) {
985 sysfs_remove_file(sys_props.kobj_topology,
986 &sys_props.attr_genid);
987 sysfs_remove_file(sys_props.kobj_topology,
988 &sys_props.attr_props);
989 if (sys_props.kobj_nodes) {
990 kobject_del(sys_props.kobj_nodes);
991 kobject_put(sys_props.kobj_nodes);
992 sys_props.kobj_nodes = NULL;
993 }
994 kobject_del(sys_props.kobj_topology);
995 kobject_put(sys_props.kobj_topology);
996 sys_props.kobj_topology = NULL;
997 }
998}
999
1000int kfd_topology_init(void)
1001{
1002 void *crat_image = NULL;
1003 size_t image_size = 0;
1004 int ret;
1005
1006 /*
1007 * Initialize the head for the topology device list
1008 */
1009 INIT_LIST_HEAD(&topology_device_list);
1010 init_rwsem(&topology_lock);
1011 topology_crat_parsed = 0;
1012
1013 memset(&sys_props, 0, sizeof(sys_props));
1014
1015 /*
1016 * Get the CRAT image from the ACPI
1017 */
1018 ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
1019 if (ret == 0 && image_size > 0) {
1020 pr_info("Found CRAT image with size=%zd\n", image_size);
1021 crat_image = kmalloc(image_size, GFP_KERNEL);
1022 if (!crat_image) {
1023 ret = -ENOMEM;
1024 pr_err("No memory for allocating CRAT image\n");
1025 goto err;
1026 }
1027 ret = kfd_topology_get_crat_acpi(crat_image, &image_size);
1028
1029 if (ret == 0) {
1030 down_write(&topology_lock);
1031 ret = kfd_parse_crat_table(crat_image);
1032 if (ret == 0)
1033 ret = kfd_topology_update_sysfs();
1034 up_write(&topology_lock);
1035 } else {
1036 pr_err("Couldn't get CRAT table size from ACPI\n");
1037 }
1038 kfree(crat_image);
1039 } else if (ret == -ENODATA) {
1040 ret = 0;
1041 } else {
1042 pr_err("Couldn't get CRAT table size from ACPI\n");
1043 }
1044
1045err:
1046 pr_info("Finished initializing topology ret=%d\n", ret);
1047 return ret;
1048}
1049
1050void kfd_topology_shutdown(void)
1051{
1052 kfd_topology_release_sysfs();
1053 kfd_release_live_view();
1054}
1055
1056static void kfd_debug_print_topology(void)
1057{
1058 struct kfd_topology_device *dev;
1059 uint32_t i = 0;
1060
1061 pr_info("DEBUG PRINT OF TOPOLOGY:");
1062 list_for_each_entry(dev, &topology_device_list, list) {
1063 pr_info("Node: %d\n", i);
1064 pr_info("\tGPU assigned: %s\n", (dev->gpu ? "yes" : "no"));
1065 pr_info("\tCPU count: %d\n", dev->node_props.cpu_cores_count);
1066 pr_info("\tSIMD count: %d", dev->node_props.simd_count);
1067 i++;
1068 }
1069}
1070
1071static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
1072{
1073 uint32_t hashout;
1074 uint32_t buf[7];
1075 int i;
1076
1077 if (!gpu)
1078 return 0;
1079
1080 buf[0] = gpu->pdev->devfn;
1081 buf[1] = gpu->pdev->subsystem_vendor;
1082 buf[2] = gpu->pdev->subsystem_device;
1083 buf[3] = gpu->pdev->device;
1084 buf[4] = gpu->pdev->bus->number;
1085 buf[5] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) & 0xffffffff);
1086 buf[6] = (uint32_t)(kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
1087
1088 for (i = 0, hashout = 0; i < 7; i++)
1089 hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);
1090
1091 return hashout;
1092}
1093
1094static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
1095{
1096 struct kfd_topology_device *dev;
1097 struct kfd_topology_device *out_dev = NULL;
1098
1099 BUG_ON(!gpu);
1100
1101 list_for_each_entry(dev, &topology_device_list, list)
1102 if (dev->gpu == NULL && dev->node_props.simd_count > 0) {
1103 dev->gpu = gpu;
1104 out_dev = dev;
1105 break;
1106 }
1107
1108 return out_dev;
1109}
1110
1111static void kfd_notify_gpu_change(uint32_t gpu_id, int arrival)
1112{
1113 /*
1114 * TODO: Generate an event for thunk about the arrival/removal
1115 * of the GPU
1116 */
1117}
1118
1119int kfd_topology_add_device(struct kfd_dev *gpu)
1120{
1121 uint32_t gpu_id;
1122 struct kfd_topology_device *dev;
1123 int res;
1124
1125 BUG_ON(!gpu);
1126
1127 gpu_id = kfd_generate_gpu_id(gpu);
1128
1129 pr_debug("kfd: Adding new GPU (ID: 0x%x) to topology\n", gpu_id);
1130
1131 down_write(&topology_lock);
1132 /*
1133 * Try to assign the GPU to existing topology device (generated from
1134 * CRAT table
1135 */
1136 dev = kfd_assign_gpu(gpu);
1137 if (!dev) {
1138 pr_info("GPU was not found in the current topology. Extending.\n");
1139 kfd_debug_print_topology();
1140 dev = kfd_create_topology_device();
1141 if (!dev) {
1142 res = -ENOMEM;
1143 goto err;
1144 }
1145 dev->gpu = gpu;
1146
1147 /*
1148 * TODO: Make a call to retrieve topology information from the
1149 * GPU vBIOS
1150 */
1151
1152 /*
1153 * Update the SYSFS tree, since we added another topology device
1154 */
1155 if (kfd_topology_update_sysfs() < 0)
1156 kfd_topology_release_sysfs();
1157
1158 }
1159
1160 dev->gpu_id = gpu_id;
1161 gpu->id = gpu_id;
1162 dev->node_props.vendor_id = gpu->pdev->vendor;
1163 dev->node_props.device_id = gpu->pdev->device;
1164 dev->node_props.location_id = (gpu->pdev->bus->number << 24) +
1165 (gpu->pdev->devfn & 0xffffff);
1166 /*
1167 * TODO: Retrieve max engine clock values from KGD
1168 */
1169
1170 res = 0;
1171
1172err:
1173 up_write(&topology_lock);
1174
1175 if (res == 0)
1176 kfd_notify_gpu_change(gpu_id, 1);
1177
1178 return res;
1179}
1180
1181int kfd_topology_remove_device(struct kfd_dev *gpu)
1182{
1183 struct kfd_topology_device *dev;
1184 uint32_t gpu_id;
1185 int res = -ENODEV;
1186
1187 BUG_ON(!gpu);
1188
1189 down_write(&topology_lock);
1190
1191 list_for_each_entry(dev, &topology_device_list, list)
1192 if (dev->gpu == gpu) {
1193 gpu_id = dev->gpu_id;
1194 kfd_remove_sysfs_node_entry(dev);
1195 kfd_release_topology_device(dev);
1196 res = 0;
1197 if (kfd_topology_update_sysfs() < 0)
1198 kfd_topology_release_sysfs();
1199 break;
1200 }
1201
1202 up_write(&topology_lock);
1203
1204 if (res == 0)
1205 kfd_notify_gpu_change(gpu_id, 0);
1206
1207 return res;
1208}
1209
1210/*
1211 * When idx is out of bounds, the function will return NULL
1212 */
1213struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx)
1214{
1215
1216 struct kfd_topology_device *top_dev;
1217 struct kfd_dev *device = NULL;
1218 uint8_t device_idx = 0;
1219
1220 down_read(&topology_lock);
1221
1222 list_for_each_entry(top_dev, &topology_device_list, list) {
1223 if (device_idx == idx) {
1224 device = top_dev->gpu;
1225 break;
1226 }
1227
1228 device_idx++;
1229 }
1230
1231 up_read(&topology_lock);
1232
1233 return device;
1234
1235}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
new file mode 100644
index 000000000000..989624b3cd14
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -0,0 +1,168 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef __KFD_TOPOLOGY_H__
24#define __KFD_TOPOLOGY_H__
25
26#include <linux/types.h>
27#include <linux/list.h>
28#include "kfd_priv.h"
29
30#define KFD_TOPOLOGY_PUBLIC_NAME_SIZE 128
31
32#define HSA_CAP_HOT_PLUGGABLE 0x00000001
33#define HSA_CAP_ATS_PRESENT 0x00000002
34#define HSA_CAP_SHARED_WITH_GRAPHICS 0x00000004
35#define HSA_CAP_QUEUE_SIZE_POW2 0x00000008
36#define HSA_CAP_QUEUE_SIZE_32BIT 0x00000010
37#define HSA_CAP_QUEUE_IDLE_EVENT 0x00000020
38#define HSA_CAP_VA_LIMIT 0x00000040
39#define HSA_CAP_WATCH_POINTS_SUPPORTED 0x00000080
40#define HSA_CAP_WATCH_POINTS_TOTALBITS_MASK 0x00000f00
41#define HSA_CAP_WATCH_POINTS_TOTALBITS_SHIFT 8
42#define HSA_CAP_RESERVED 0xfffff000
43
44struct kfd_node_properties {
45 uint32_t cpu_cores_count;
46 uint32_t simd_count;
47 uint32_t mem_banks_count;
48 uint32_t caches_count;
49 uint32_t io_links_count;
50 uint32_t cpu_core_id_base;
51 uint32_t simd_id_base;
52 uint32_t capability;
53 uint32_t max_waves_per_simd;
54 uint32_t lds_size_in_kb;
55 uint32_t gds_size_in_kb;
56 uint32_t wave_front_size;
57 uint32_t array_count;
58 uint32_t simd_arrays_per_engine;
59 uint32_t cu_per_simd_array;
60 uint32_t simd_per_cu;
61 uint32_t max_slots_scratch_cu;
62 uint32_t engine_id;
63 uint32_t vendor_id;
64 uint32_t device_id;
65 uint32_t location_id;
66 uint32_t max_engine_clk_fcompute;
67 uint32_t max_engine_clk_ccompute;
68 uint16_t marketing_name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE];
69};
70
71#define HSA_MEM_HEAP_TYPE_SYSTEM 0
72#define HSA_MEM_HEAP_TYPE_FB_PUBLIC 1
73#define HSA_MEM_HEAP_TYPE_FB_PRIVATE 2
74#define HSA_MEM_HEAP_TYPE_GPU_GDS 3
75#define HSA_MEM_HEAP_TYPE_GPU_LDS 4
76#define HSA_MEM_HEAP_TYPE_GPU_SCRATCH 5
77
78#define HSA_MEM_FLAGS_HOT_PLUGGABLE 0x00000001
79#define HSA_MEM_FLAGS_NON_VOLATILE 0x00000002
80#define HSA_MEM_FLAGS_RESERVED 0xfffffffc
81
82struct kfd_mem_properties {
83 struct list_head list;
84 uint32_t heap_type;
85 uint64_t size_in_bytes;
86 uint32_t flags;
87 uint32_t width;
88 uint32_t mem_clk_max;
89 struct kobject *kobj;
90 struct attribute attr;
91};
92
93#define KFD_TOPOLOGY_CPU_SIBLINGS 256
94
95#define HSA_CACHE_TYPE_DATA 0x00000001
96#define HSA_CACHE_TYPE_INSTRUCTION 0x00000002
97#define HSA_CACHE_TYPE_CPU 0x00000004
98#define HSA_CACHE_TYPE_HSACU 0x00000008
99#define HSA_CACHE_TYPE_RESERVED 0xfffffff0
100
101struct kfd_cache_properties {
102 struct list_head list;
103 uint32_t processor_id_low;
104 uint32_t cache_level;
105 uint32_t cache_size;
106 uint32_t cacheline_size;
107 uint32_t cachelines_per_tag;
108 uint32_t cache_assoc;
109 uint32_t cache_latency;
110 uint32_t cache_type;
111 uint8_t sibling_map[KFD_TOPOLOGY_CPU_SIBLINGS];
112 struct kobject *kobj;
113 struct attribute attr;
114};
115
116struct kfd_iolink_properties {
117 struct list_head list;
118 uint32_t iolink_type;
119 uint32_t ver_maj;
120 uint32_t ver_min;
121 uint32_t node_from;
122 uint32_t node_to;
123 uint32_t weight;
124 uint32_t min_latency;
125 uint32_t max_latency;
126 uint32_t min_bandwidth;
127 uint32_t max_bandwidth;
128 uint32_t rec_transfer_size;
129 uint32_t flags;
130 struct kobject *kobj;
131 struct attribute attr;
132};
133
134struct kfd_topology_device {
135 struct list_head list;
136 uint32_t gpu_id;
137 struct kfd_node_properties node_props;
138 uint32_t mem_bank_count;
139 struct list_head mem_props;
140 uint32_t cache_count;
141 struct list_head cache_props;
142 uint32_t io_link_count;
143 struct list_head io_link_props;
144 struct kfd_dev *gpu;
145 struct kobject *kobj_node;
146 struct kobject *kobj_mem;
147 struct kobject *kobj_cache;
148 struct kobject *kobj_iolink;
149 struct attribute attr_gpuid;
150 struct attribute attr_name;
151 struct attribute attr_props;
152};
153
154struct kfd_system_properties {
155 uint32_t num_devices; /* Number of H-NUMA nodes */
156 uint32_t generation_count;
157 uint64_t platform_oem;
158 uint64_t platform_id;
159 uint64_t platform_rev;
160 struct kobject *kobj_topology;
161 struct kobject *kobj_nodes;
162 struct attribute attr_genid;
163 struct attribute attr_props;
164};
165
166
167
168#endif /* __KFD_TOPOLOGY_H__ */
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
new file mode 100644
index 000000000000..9c729dd8dd50
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -0,0 +1,185 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/*
24 * This file defines the private interface between the
25 * AMD kernel graphics drivers and the AMD KFD.
26 */
27
28#ifndef KGD_KFD_INTERFACE_H_INCLUDED
29#define KGD_KFD_INTERFACE_H_INCLUDED
30
31#include <linux/types.h>
32
33struct pci_dev;
34
35#define KFD_INTERFACE_VERSION 1
36
37struct kfd_dev;
38struct kgd_dev;
39
40struct kgd_mem;
41
42enum kgd_memory_pool {
43 KGD_POOL_SYSTEM_CACHEABLE = 1,
44 KGD_POOL_SYSTEM_WRITECOMBINE = 2,
45 KGD_POOL_FRAMEBUFFER = 3,
46};
47
48struct kgd2kfd_shared_resources {
49 /* Bit n == 1 means VMID n is available for KFD. */
50 unsigned int compute_vmid_bitmap;
51
52 /* Compute pipes are counted starting from MEC0/pipe0 as 0. */
53 unsigned int first_compute_pipe;
54
55 /* Number of MEC pipes available for KFD. */
56 unsigned int compute_pipe_count;
57
58 /* Base address of doorbell aperture. */
59 phys_addr_t doorbell_physical_address;
60
61 /* Size in bytes of doorbell aperture. */
62 size_t doorbell_aperture_size;
63
64 /* Number of bytes at start of aperture reserved for KGD. */
65 size_t doorbell_start_offset;
66};
67
68/**
69 * struct kgd2kfd_calls
70 *
71 * @exit: Notifies amdkfd that kgd module is unloaded
72 *
73 * @probe: Notifies amdkfd about a probe done on a device in the kgd driver.
74 *
75 * @device_init: Initialize the newly probed device (if it is a device that
76 * amdkfd supports)
77 *
78 * @device_exit: Notifies amdkfd about a removal of a kgd device
79 *
80 * @suspend: Notifies amdkfd about a suspend action done to a kgd device
81 *
82 * @resume: Notifies amdkfd about a resume action done to a kgd device
83 *
84 * This structure contains function callback pointers so the kgd driver
85 * will notify to the amdkfd about certain status changes.
86 *
87 */
88struct kgd2kfd_calls {
89 void (*exit)(void);
90 struct kfd_dev* (*probe)(struct kgd_dev *kgd, struct pci_dev *pdev);
91 bool (*device_init)(struct kfd_dev *kfd,
92 const struct kgd2kfd_shared_resources *gpu_resources);
93 void (*device_exit)(struct kfd_dev *kfd);
94 void (*interrupt)(struct kfd_dev *kfd, const void *ih_ring_entry);
95 void (*suspend)(struct kfd_dev *kfd);
96 int (*resume)(struct kfd_dev *kfd);
97};
98
99/**
100 * struct kfd2kgd_calls
101 *
102 * @init_sa_manager: Initialize an instance of the sa manager, used by
103 * amdkfd for all system memory allocations that are mapped to the GART
104 * address space
105 *
106 * @fini_sa_manager: Releases all memory allocations for amdkfd that are
107 * handled by kgd sa manager
108 *
109 * @allocate_mem: Allocate a buffer from amdkfd's sa manager. The buffer can
110 * be used for mqds, hpds, kernel queue, fence and runlists
111 *
112 * @free_mem: Frees a buffer that was allocated by amdkfd's sa manager
113 *
114 * @get_vmem_size: Retrieves (physical) size of VRAM
115 *
116 * @get_gpu_clock_counter: Retrieves GPU clock counter
117 *
118 * @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
119 *
120 * @program_sh_mem_settings: A function that should initiate the memory
121 * properties such as main aperture memory type (cache / non cached) and
122 * secondary aperture base address, size and memory type.
123 * This function is used only for no cp scheduling mode.
124 *
125 * @set_pasid_vmid_mapping: Exposes pasid/vmid pair to the H/W for no cp
126 * scheduling mode. Only used for no cp scheduling mode.
127 *
128 * @init_memory: Initializes memory apertures to fixed base/limit address
129 * and non cached memory types.
130 *
131 * @init_pipeline: Initialized the compute pipelines.
132 *
133 * @hqd_load: Loads the mqd structure to a H/W hqd slot. used only for no cp
134 * sceduling mode.
135 *
136 * @hqd_is_occupies: Checks if a hqd slot is occupied.
137 *
138 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
139 *
140 * This structure contains function pointers to services that the kgd driver
141 * provides to amdkfd driver.
142 *
143 */
144struct kfd2kgd_calls {
145 /* Memory management. */
146 int (*init_sa_manager)(struct kgd_dev *kgd, unsigned int size);
147 void (*fini_sa_manager)(struct kgd_dev *kgd);
148 int (*allocate_mem)(struct kgd_dev *kgd, size_t size, size_t alignment,
149 enum kgd_memory_pool pool, struct kgd_mem **mem);
150
151 void (*free_mem)(struct kgd_dev *kgd, struct kgd_mem *mem);
152
153 uint64_t (*get_vmem_size)(struct kgd_dev *kgd);
154 uint64_t (*get_gpu_clock_counter)(struct kgd_dev *kgd);
155
156 uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
157
158 /* Register access functions */
159 void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
160 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
161 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
162
163 int (*set_pasid_vmid_mapping)(struct kgd_dev *kgd, unsigned int pasid,
164 unsigned int vmid);
165
166 int (*init_memory)(struct kgd_dev *kgd);
167 int (*init_pipeline)(struct kgd_dev *kgd, uint32_t pipe_id,
168 uint32_t hpd_size, uint64_t hpd_gpu_addr);
169
170 int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
171 uint32_t queue_id, uint32_t __user *wptr);
172
173 bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
174 uint32_t pipe_id, uint32_t queue_id);
175
176 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
177 unsigned int timeout, uint32_t pipe_id,
178 uint32_t queue_id);
179};
180
181bool kgd2kfd_init(unsigned interface_version,
182 const struct kfd2kgd_calls *f2g,
183 const struct kgd2kfd_calls **g2f);
184
185#endif /* KGD_KFD_INTERFACE_H_INCLUDED */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index e4a1490b42c2..e3a7a5078e5c 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -12,6 +12,7 @@
12#include <linux/platform_device.h> 12#include <linux/platform_device.h>
13#include <drm/drmP.h> 13#include <drm/drmP.h>
14#include <drm/drm_crtc_helper.h> 14#include <drm/drm_crtc_helper.h>
15#include <drm/drm_plane_helper.h>
15#include "armada_crtc.h" 16#include "armada_crtc.h"
16#include "armada_drm.h" 17#include "armada_drm.h"
17#include "armada_fb.h" 18#include "armada_fb.h"
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 9dc0fd5c1ea4..b7ee2634e47c 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -31,6 +31,7 @@
31#include <drm/drmP.h> 31#include <drm/drmP.h>
32#include <drm/drm_crtc.h> 32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h> 33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_plane_helper.h>
34#include "ast_drv.h" 35#include "ast_drv.h"
35 36
36#include "ast_tables.h" 37#include "ast_tables.h"
diff --git a/drivers/gpu/drm/bochs/bochs_fbdev.c b/drivers/gpu/drm/bochs/bochs_fbdev.c
index fe95d31cd110..61dbf09dff5d 100644
--- a/drivers/gpu/drm/bochs/bochs_fbdev.c
+++ b/drivers/gpu/drm/bochs/bochs_fbdev.c
@@ -9,6 +9,17 @@
9 9
10/* ---------------------------------------------------------------------- */ 10/* ---------------------------------------------------------------------- */
11 11
12static int bochsfb_mmap(struct fb_info *info,
13 struct vm_area_struct *vma)
14{
15 struct drm_fb_helper *fb_helper = info->par;
16 struct bochs_device *bochs =
17 container_of(fb_helper, struct bochs_device, fb.helper);
18 struct bochs_bo *bo = gem_to_bochs_bo(bochs->fb.gfb.obj);
19
20 return ttm_fbdev_mmap(vma, &bo->bo);
21}
22
12static struct fb_ops bochsfb_ops = { 23static struct fb_ops bochsfb_ops = {
13 .owner = THIS_MODULE, 24 .owner = THIS_MODULE,
14 .fb_check_var = drm_fb_helper_check_var, 25 .fb_check_var = drm_fb_helper_check_var,
@@ -19,6 +30,7 @@ static struct fb_ops bochsfb_ops = {
19 .fb_pan_display = drm_fb_helper_pan_display, 30 .fb_pan_display = drm_fb_helper_pan_display,
20 .fb_blank = drm_fb_helper_blank, 31 .fb_blank = drm_fb_helper_blank,
21 .fb_setcmap = drm_fb_helper_setcmap, 32 .fb_setcmap = drm_fb_helper_setcmap,
33 .fb_mmap = bochsfb_mmap,
22}; 34};
23 35
24static int bochsfb_create_object(struct bochs_device *bochs, 36static int bochsfb_create_object(struct bochs_device *bochs,
@@ -123,11 +135,9 @@ static int bochsfb_create(struct drm_fb_helper *helper,
123 info->screen_base = bo->kmap.virtual; 135 info->screen_base = bo->kmap.virtual;
124 info->screen_size = size; 136 info->screen_size = size;
125 137
126#if 0 138 drm_vma_offset_remove(&bo->bo.bdev->vma_manager, &bo->bo.vma_node);
127 /* FIXME: get this right for mmap(/dev/fb0) */ 139 info->fix.smem_start = 0;
128 info->fix.smem_start = bochs_bo_mmap_offset(bo);
129 info->fix.smem_len = size; 140 info->fix.smem_len = size;
130#endif
131 141
132 ret = fb_alloc_cmap(&info->cmap, 256, 0); 142 ret = fb_alloc_cmap(&info->cmap, 256, 0);
133 if (ret) { 143 if (ret) {
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index dbe619e6aab4..460389702d31 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -51,11 +51,10 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
51{ 51{
52 struct bochs_device *bochs = dev->dev_private; 52 struct bochs_device *bochs = dev->dev_private;
53 struct pci_dev *pdev = dev->pdev; 53 struct pci_dev *pdev = dev->pdev;
54 unsigned long addr, size, mem, ioaddr, iosize; 54 unsigned long addr, size, mem, ioaddr, iosize, qext_size;
55 u16 id; 55 u16 id;
56 56
57 if (/* (ent->driver_data == BOCHS_QEMU_STDVGA) && */ 57 if (pdev->resource[2].flags & IORESOURCE_MEM) {
58 (pdev->resource[2].flags & IORESOURCE_MEM)) {
59 /* mmio bar with vga and bochs registers present */ 58 /* mmio bar with vga and bochs registers present */
60 if (pci_request_region(pdev, 2, "bochs-drm") != 0) { 59 if (pci_request_region(pdev, 2, "bochs-drm") != 0) {
61 DRM_ERROR("Cannot request mmio region\n"); 60 DRM_ERROR("Cannot request mmio region\n");
@@ -116,6 +115,24 @@ int bochs_hw_init(struct drm_device *dev, uint32_t flags)
116 size / 1024, addr, 115 size / 1024, addr,
117 bochs->ioports ? "ioports" : "mmio", 116 bochs->ioports ? "ioports" : "mmio",
118 ioaddr); 117 ioaddr);
118
119 if (bochs->mmio && pdev->revision >= 2) {
120 qext_size = readl(bochs->mmio + 0x600);
121 if (qext_size < 4 || qext_size > iosize)
122 goto noext;
123 DRM_DEBUG("Found qemu ext regs, size %ld\n", qext_size);
124 if (qext_size >= 8) {
125#ifdef __BIG_ENDIAN
126 writel(0xbebebebe, bochs->mmio + 0x604);
127#else
128 writel(0x1e1e1e1e, bochs->mmio + 0x604);
129#endif
130 DRM_DEBUG(" qext endian: 0x%x\n",
131 readl(bochs->mmio + 0x604));
132 }
133 }
134
135noext:
119 return 0; 136 return 0;
120} 137}
121 138
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 6b7efcf363d6..85f0f8cf1fb8 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include "bochs.h" 8#include "bochs.h"
9#include <drm/drm_plane_helper.h>
9 10
10static int defx = 1024; 11static int defx = 1024;
11static int defy = 768; 12static int defy = 768;
@@ -108,11 +109,32 @@ static void bochs_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
108{ 109{
109} 110}
110 111
112static int bochs_crtc_page_flip(struct drm_crtc *crtc,
113 struct drm_framebuffer *fb,
114 struct drm_pending_vblank_event *event,
115 uint32_t page_flip_flags)
116{
117 struct bochs_device *bochs =
118 container_of(crtc, struct bochs_device, crtc);
119 struct drm_framebuffer *old_fb = crtc->primary->fb;
120 unsigned long irqflags;
121
122 crtc->primary->fb = fb;
123 bochs_crtc_mode_set_base(crtc, 0, 0, old_fb);
124 if (event) {
125 spin_lock_irqsave(&bochs->dev->event_lock, irqflags);
126 drm_send_vblank_event(bochs->dev, -1, event);
127 spin_unlock_irqrestore(&bochs->dev->event_lock, irqflags);
128 }
129 return 0;
130}
131
111/* These provide the minimum set of functions required to handle a CRTC */ 132/* These provide the minimum set of functions required to handle a CRTC */
112static const struct drm_crtc_funcs bochs_crtc_funcs = { 133static const struct drm_crtc_funcs bochs_crtc_funcs = {
113 .gamma_set = bochs_crtc_gamma_set, 134 .gamma_set = bochs_crtc_gamma_set,
114 .set_config = drm_crtc_helper_set_config, 135 .set_config = drm_crtc_helper_set_config,
115 .destroy = drm_crtc_cleanup, 136 .destroy = drm_crtc_cleanup,
137 .page_flip = bochs_crtc_page_flip,
116}; 138};
117 139
118static const struct drm_crtc_helper_funcs bochs_helper_funcs = { 140static const struct drm_crtc_helper_funcs bochs_helper_funcs = {
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index d44e69daa239..693a4565c4ff 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -210,6 +210,9 @@ int cirrus_framebuffer_init(struct drm_device *dev,
210 struct drm_mode_fb_cmd2 *mode_cmd, 210 struct drm_mode_fb_cmd2 *mode_cmd,
211 struct drm_gem_object *obj); 211 struct drm_gem_object *obj);
212 212
213bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
214 int bpp, int pitch);
215
213 /* cirrus_display.c */ 216 /* cirrus_display.c */
214int cirrus_modeset_init(struct cirrus_device *cdev); 217int cirrus_modeset_init(struct cirrus_device *cdev);
215void cirrus_modeset_fini(struct cirrus_device *cdev); 218void cirrus_modeset_fini(struct cirrus_device *cdev);
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index d231b1c317af..502a89eb54b5 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -139,6 +139,7 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
139 struct drm_gem_object **gobj_p) 139 struct drm_gem_object **gobj_p)
140{ 140{
141 struct drm_device *dev = afbdev->helper.dev; 141 struct drm_device *dev = afbdev->helper.dev;
142 struct cirrus_device *cdev = dev->dev_private;
142 u32 bpp, depth; 143 u32 bpp, depth;
143 u32 size; 144 u32 size;
144 struct drm_gem_object *gobj; 145 struct drm_gem_object *gobj;
@@ -146,8 +147,10 @@ static int cirrusfb_create_object(struct cirrus_fbdev *afbdev,
146 int ret = 0; 147 int ret = 0;
147 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); 148 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
148 149
149 if (bpp > 24) 150 if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
151 bpp, mode_cmd->pitches[0]))
150 return -EINVAL; 152 return -EINVAL;
153
151 size = mode_cmd->pitches[0] * mode_cmd->height; 154 size = mode_cmd->pitches[0] * mode_cmd->height;
152 ret = cirrus_gem_create(dev, size, true, &gobj); 155 ret = cirrus_gem_create(dev, size, true, &gobj);
153 if (ret) 156 if (ret)
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 99c1983f99d2..4c2d68e9102d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -49,14 +49,16 @@ cirrus_user_framebuffer_create(struct drm_device *dev,
49 struct drm_file *filp, 49 struct drm_file *filp,
50 struct drm_mode_fb_cmd2 *mode_cmd) 50 struct drm_mode_fb_cmd2 *mode_cmd)
51{ 51{
52 struct cirrus_device *cdev = dev->dev_private;
52 struct drm_gem_object *obj; 53 struct drm_gem_object *obj;
53 struct cirrus_framebuffer *cirrus_fb; 54 struct cirrus_framebuffer *cirrus_fb;
54 int ret; 55 int ret;
55 u32 bpp, depth; 56 u32 bpp, depth;
56 57
57 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp); 58 drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
58 /* cirrus can't handle > 24bpp framebuffers at all */ 59
59 if (bpp > 24) 60 if (!cirrus_check_framebuffer(cdev, mode_cmd->width, mode_cmd->height,
61 bpp, mode_cmd->pitches[0]))
60 return ERR_PTR(-EINVAL); 62 return ERR_PTR(-EINVAL);
61 63
62 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]); 64 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
@@ -96,8 +98,7 @@ static int cirrus_vram_init(struct cirrus_device *cdev)
96{ 98{
97 /* BAR 0 is VRAM */ 99 /* BAR 0 is VRAM */
98 cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0); 100 cdev->mc.vram_base = pci_resource_start(cdev->dev->pdev, 0);
99 /* We have 4MB of VRAM */ 101 cdev->mc.vram_size = pci_resource_len(cdev->dev->pdev, 0);
100 cdev->mc.vram_size = 4 * 1024 * 1024;
101 102
102 if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size, 103 if (!request_mem_region(cdev->mc.vram_base, cdev->mc.vram_size,
103 "cirrusdrmfb_vram")) { 104 "cirrusdrmfb_vram")) {
@@ -179,17 +180,22 @@ int cirrus_driver_load(struct drm_device *dev, unsigned long flags)
179 } 180 }
180 181
181 r = cirrus_mm_init(cdev); 182 r = cirrus_mm_init(cdev);
182 if (r) 183 if (r) {
183 dev_err(&dev->pdev->dev, "fatal err on mm init\n"); 184 dev_err(&dev->pdev->dev, "fatal err on mm init\n");
185 goto out;
186 }
184 187
185 r = cirrus_modeset_init(cdev); 188 r = cirrus_modeset_init(cdev);
186 if (r) 189 if (r) {
187 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r); 190 dev_err(&dev->pdev->dev, "Fatal error during modeset init: %d\n", r);
191 goto out;
192 }
188 193
189 dev->mode_config.funcs = (void *)&cirrus_mode_funcs; 194 dev->mode_config.funcs = (void *)&cirrus_mode_funcs;
195
196 return 0;
190out: 197out:
191 if (r) 198 cirrus_driver_unload(dev);
192 cirrus_driver_unload(dev);
193 return r; 199 return r;
194} 200}
195 201
@@ -307,3 +313,21 @@ out_unlock:
307 return ret; 313 return ret;
308 314
309} 315}
316
317bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height,
318 int bpp, int pitch)
319{
320 const int max_pitch = 0x1FF << 3; /* (4096 - 1) & ~111b bytes */
321 const int max_size = cdev->mc.vram_size;
322
323 if (bpp > 32)
324 return false;
325
326 if (pitch > max_pitch)
327 return false;
328
329 if (pitch * height > max_size)
330 return false;
331
332 return true;
333}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index c7c5a9d91fa0..99d4a74ffeaf 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -16,6 +16,7 @@
16 */ 16 */
17#include <drm/drmP.h> 17#include <drm/drmP.h>
18#include <drm/drm_crtc_helper.h> 18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_plane_helper.h>
19 20
20#include <video/cirrus.h> 21#include <video/cirrus.h>
21 22
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
new file mode 100644
index 000000000000..ff5f034cc405
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -0,0 +1,657 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28
29#include <drm/drmP.h>
30#include <drm/drm_atomic.h>
31#include <drm/drm_plane_helper.h>
32
33static void kfree_state(struct drm_atomic_state *state)
34{
35 kfree(state->connectors);
36 kfree(state->connector_states);
37 kfree(state->crtcs);
38 kfree(state->crtc_states);
39 kfree(state->planes);
40 kfree(state->plane_states);
41 kfree(state);
42}
43
44/**
45 * drm_atomic_state_alloc - allocate atomic state
46 * @dev: DRM device
47 *
48 * This allocates an empty atomic state to track updates.
49 */
50struct drm_atomic_state *
51drm_atomic_state_alloc(struct drm_device *dev)
52{
53 struct drm_atomic_state *state;
54
55 state = kzalloc(sizeof(*state), GFP_KERNEL);
56 if (!state)
57 return NULL;
58
59 state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
60
61 state->crtcs = kcalloc(dev->mode_config.num_crtc,
62 sizeof(*state->crtcs), GFP_KERNEL);
63 if (!state->crtcs)
64 goto fail;
65 state->crtc_states = kcalloc(dev->mode_config.num_crtc,
66 sizeof(*state->crtc_states), GFP_KERNEL);
67 if (!state->crtc_states)
68 goto fail;
69 state->planes = kcalloc(dev->mode_config.num_total_plane,
70 sizeof(*state->planes), GFP_KERNEL);
71 if (!state->planes)
72 goto fail;
73 state->plane_states = kcalloc(dev->mode_config.num_total_plane,
74 sizeof(*state->plane_states), GFP_KERNEL);
75 if (!state->plane_states)
76 goto fail;
77 state->connectors = kcalloc(state->num_connector,
78 sizeof(*state->connectors),
79 GFP_KERNEL);
80 if (!state->connectors)
81 goto fail;
82 state->connector_states = kcalloc(state->num_connector,
83 sizeof(*state->connector_states),
84 GFP_KERNEL);
85 if (!state->connector_states)
86 goto fail;
87
88 state->dev = dev;
89
90 DRM_DEBUG_KMS("Allocate atomic state %p\n", state);
91
92 return state;
93fail:
94 kfree_state(state);
95
96 return NULL;
97}
98EXPORT_SYMBOL(drm_atomic_state_alloc);
99
100/**
101 * drm_atomic_state_clear - clear state object
102 * @state: atomic state
103 *
104 * When the w/w mutex algorithm detects a deadlock we need to back off and drop
105 * all locks. So someone else could sneak in and change the current modeset
106 * configuration. Which means that all the state assembled in @state is no
107 * longer an atomic update to the current state, but to some arbitrary earlier
108 * state. Which could break assumptions the driver's ->atomic_check likely
109 * relies on.
110 *
111 * Hence we must clear all cached state and completely start over, using this
112 * function.
113 */
114void drm_atomic_state_clear(struct drm_atomic_state *state)
115{
116 struct drm_device *dev = state->dev;
117 struct drm_mode_config *config = &dev->mode_config;
118 int i;
119
120 DRM_DEBUG_KMS("Clearing atomic state %p\n", state);
121
122 for (i = 0; i < state->num_connector; i++) {
123 struct drm_connector *connector = state->connectors[i];
124
125 if (!connector)
126 continue;
127
128 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
129
130 connector->funcs->atomic_destroy_state(connector,
131 state->connector_states[i]);
132 }
133
134 for (i = 0; i < config->num_crtc; i++) {
135 struct drm_crtc *crtc = state->crtcs[i];
136
137 if (!crtc)
138 continue;
139
140 crtc->funcs->atomic_destroy_state(crtc,
141 state->crtc_states[i]);
142 }
143
144 for (i = 0; i < config->num_total_plane; i++) {
145 struct drm_plane *plane = state->planes[i];
146
147 if (!plane)
148 continue;
149
150 plane->funcs->atomic_destroy_state(plane,
151 state->plane_states[i]);
152 }
153}
154EXPORT_SYMBOL(drm_atomic_state_clear);
155
156/**
157 * drm_atomic_state_free - free all memory for an atomic state
158 * @state: atomic state to deallocate
159 *
160 * This frees all memory associated with an atomic state, including all the
161 * per-object state for planes, crtcs and connectors.
162 */
163void drm_atomic_state_free(struct drm_atomic_state *state)
164{
165 drm_atomic_state_clear(state);
166
167 DRM_DEBUG_KMS("Freeing atomic state %p\n", state);
168
169 kfree_state(state);
170}
171EXPORT_SYMBOL(drm_atomic_state_free);
172
173/**
174 * drm_atomic_get_crtc_state - get crtc state
175 * @state: global atomic state object
176 * @crtc: crtc to get state object for
177 *
178 * This function returns the crtc state for the given crtc, allocating it if
179 * needed. It will also grab the relevant crtc lock to make sure that the state
180 * is consistent.
181 *
182 * Returns:
183 *
184 * Either the allocated state or the error code encoded into the pointer. When
185 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
186 * entire atomic sequence must be restarted. All other errors are fatal.
187 */
188struct drm_crtc_state *
189drm_atomic_get_crtc_state(struct drm_atomic_state *state,
190 struct drm_crtc *crtc)
191{
192 int ret, index;
193 struct drm_crtc_state *crtc_state;
194
195 index = drm_crtc_index(crtc);
196
197 if (state->crtc_states[index])
198 return state->crtc_states[index];
199
200 ret = drm_modeset_lock(&crtc->mutex, state->acquire_ctx);
201 if (ret)
202 return ERR_PTR(ret);
203
204 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
205 if (!crtc_state)
206 return ERR_PTR(-ENOMEM);
207
208 state->crtc_states[index] = crtc_state;
209 state->crtcs[index] = crtc;
210 crtc_state->state = state;
211
212 DRM_DEBUG_KMS("Added [CRTC:%d] %p state to %p\n",
213 crtc->base.id, crtc_state, state);
214
215 return crtc_state;
216}
217EXPORT_SYMBOL(drm_atomic_get_crtc_state);
218
219/**
220 * drm_atomic_get_plane_state - get plane state
221 * @state: global atomic state object
222 * @plane: plane to get state object for
223 *
224 * This function returns the plane state for the given plane, allocating it if
225 * needed. It will also grab the relevant plane lock to make sure that the state
226 * is consistent.
227 *
228 * Returns:
229 *
230 * Either the allocated state or the error code encoded into the pointer. When
231 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
232 * entire atomic sequence must be restarted. All other errors are fatal.
233 */
234struct drm_plane_state *
235drm_atomic_get_plane_state(struct drm_atomic_state *state,
236 struct drm_plane *plane)
237{
238 int ret, index;
239 struct drm_plane_state *plane_state;
240
241 index = drm_plane_index(plane);
242
243 if (state->plane_states[index])
244 return state->plane_states[index];
245
246 ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx);
247 if (ret)
248 return ERR_PTR(ret);
249
250 plane_state = plane->funcs->atomic_duplicate_state(plane);
251 if (!plane_state)
252 return ERR_PTR(-ENOMEM);
253
254 state->plane_states[index] = plane_state;
255 state->planes[index] = plane;
256 plane_state->state = state;
257
258 DRM_DEBUG_KMS("Added [PLANE:%d] %p state to %p\n",
259 plane->base.id, plane_state, state);
260
261 if (plane_state->crtc) {
262 struct drm_crtc_state *crtc_state;
263
264 crtc_state = drm_atomic_get_crtc_state(state,
265 plane_state->crtc);
266 if (IS_ERR(crtc_state))
267 return ERR_CAST(crtc_state);
268 }
269
270 return plane_state;
271}
272EXPORT_SYMBOL(drm_atomic_get_plane_state);
273
274/**
275 * drm_atomic_get_connector_state - get connector state
276 * @state: global atomic state object
277 * @connector: connector to get state object for
278 *
279 * This function returns the connector state for the given connector,
280 * allocating it if needed. It will also grab the relevant connector lock to
281 * make sure that the state is consistent.
282 *
283 * Returns:
284 *
285 * Either the allocated state or the error code encoded into the pointer. When
286 * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
287 * entire atomic sequence must be restarted. All other errors are fatal.
288 */
289struct drm_connector_state *
290drm_atomic_get_connector_state(struct drm_atomic_state *state,
291 struct drm_connector *connector)
292{
293 int ret, index;
294 struct drm_mode_config *config = &connector->dev->mode_config;
295 struct drm_connector_state *connector_state;
296
297 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
298 if (ret)
299 return ERR_PTR(ret);
300
301 index = drm_connector_index(connector);
302
303 /*
304 * Construction of atomic state updates can race with a connector
305 * hot-add which might overflow. In this case flip the table and just
306 * restart the entire ioctl - no one is fast enough to livelock a cpu
307 * with physical hotplug events anyway.
308 *
309 * Note that we only grab the indexes once we have the right lock to
310 * prevent hotplug/unplugging of connectors. So removal is no problem,
311 * at most the array is a bit too large.
312 */
313 if (index >= state->num_connector) {
314 DRM_DEBUG_KMS("Hot-added connector would overflow state array, restarting\n");
315 return ERR_PTR(-EAGAIN);
316 }
317
318 if (state->connector_states[index])
319 return state->connector_states[index];
320
321 connector_state = connector->funcs->atomic_duplicate_state(connector);
322 if (!connector_state)
323 return ERR_PTR(-ENOMEM);
324
325 state->connector_states[index] = connector_state;
326 state->connectors[index] = connector;
327 connector_state->state = state;
328
329 DRM_DEBUG_KMS("Added [CONNECTOR:%d] %p state to %p\n",
330 connector->base.id, connector_state, state);
331
332 if (connector_state->crtc) {
333 struct drm_crtc_state *crtc_state;
334
335 crtc_state = drm_atomic_get_crtc_state(state,
336 connector_state->crtc);
337 if (IS_ERR(crtc_state))
338 return ERR_CAST(crtc_state);
339 }
340
341 return connector_state;
342}
343EXPORT_SYMBOL(drm_atomic_get_connector_state);
344
345/**
346 * drm_atomic_set_crtc_for_plane - set crtc for plane
347 * @state: the incoming atomic state
348 * @plane: the plane whose incoming state to update
349 * @crtc: crtc to use for the plane
350 *
351 * Changing the assigned crtc for a plane requires us to grab the lock and state
352 * for the new crtc, as needed. This function takes care of all these details
353 * besides updating the pointer in the state object itself.
354 *
355 * Returns:
356 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
357 * then the w/w mutex code has detected a deadlock and the entire atomic
358 * sequence must be restarted. All other errors are fatal.
359 */
360int
361drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
362 struct drm_plane *plane, struct drm_crtc *crtc)
363{
364 struct drm_plane_state *plane_state =
365 drm_atomic_get_plane_state(state, plane);
366 struct drm_crtc_state *crtc_state;
367
368 if (WARN_ON(IS_ERR(plane_state)))
369 return PTR_ERR(plane_state);
370
371 if (plane_state->crtc) {
372 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
373 plane_state->crtc);
374 if (WARN_ON(IS_ERR(crtc_state)))
375 return PTR_ERR(crtc_state);
376
377 crtc_state->plane_mask &= ~(1 << drm_plane_index(plane));
378 }
379
380 plane_state->crtc = crtc;
381
382 if (crtc) {
383 crtc_state = drm_atomic_get_crtc_state(plane_state->state,
384 crtc);
385 if (IS_ERR(crtc_state))
386 return PTR_ERR(crtc_state);
387 crtc_state->plane_mask |= (1 << drm_plane_index(plane));
388 }
389
390 if (crtc)
391 DRM_DEBUG_KMS("Link plane state %p to [CRTC:%d]\n",
392 plane_state, crtc->base.id);
393 else
394 DRM_DEBUG_KMS("Link plane state %p to [NOCRTC]\n", plane_state);
395
396 return 0;
397}
398EXPORT_SYMBOL(drm_atomic_set_crtc_for_plane);
399
400/**
401 * drm_atomic_set_fb_for_plane - set crtc for plane
402 * @plane_state: atomic state object for the plane
403 * @fb: fb to use for the plane
404 *
405 * Changing the assigned framebuffer for a plane requires us to grab a reference
406 * to the new fb and drop the reference to the old fb, if there is one. This
407 * function takes care of all these details besides updating the pointer in the
408 * state object itself.
409 */
410void
411drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
412 struct drm_framebuffer *fb)
413{
414 if (plane_state->fb)
415 drm_framebuffer_unreference(plane_state->fb);
416 if (fb)
417 drm_framebuffer_reference(fb);
418 plane_state->fb = fb;
419
420 if (fb)
421 DRM_DEBUG_KMS("Set [FB:%d] for plane state %p\n",
422 fb->base.id, plane_state);
423 else
424 DRM_DEBUG_KMS("Set [NOFB] for plane state %p\n", plane_state);
425}
426EXPORT_SYMBOL(drm_atomic_set_fb_for_plane);
427
428/**
429 * drm_atomic_set_crtc_for_connector - set crtc for connector
430 * @conn_state: atomic state object for the connector
431 * @crtc: crtc to use for the connector
432 *
433 * Changing the assigned crtc for a connector requires us to grab the lock and
434 * state for the new crtc, as needed. This function takes care of all these
435 * details besides updating the pointer in the state object itself.
436 *
437 * Returns:
438 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
439 * then the w/w mutex code has detected a deadlock and the entire atomic
440 * sequence must be restarted. All other errors are fatal.
441 */
442int
443drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
444 struct drm_crtc *crtc)
445{
446 struct drm_crtc_state *crtc_state;
447
448 if (crtc) {
449 crtc_state = drm_atomic_get_crtc_state(conn_state->state, crtc);
450 if (IS_ERR(crtc_state))
451 return PTR_ERR(crtc_state);
452 }
453
454 conn_state->crtc = crtc;
455
456 if (crtc)
457 DRM_DEBUG_KMS("Link connector state %p to [CRTC:%d]\n",
458 conn_state, crtc->base.id);
459 else
460 DRM_DEBUG_KMS("Link connector state %p to [NOCRTC]\n",
461 conn_state);
462
463 return 0;
464}
465EXPORT_SYMBOL(drm_atomic_set_crtc_for_connector);
466
467/**
468 * drm_atomic_add_affected_connectors - add connectors for crtc
469 * @state: atomic state
470 * @crtc: DRM crtc
471 *
472 * This function walks the current configuration and adds all connectors
473 * currently using @crtc to the atomic configuration @state. Note that this
474 * function must acquire the connection mutex. This can potentially cause
475 * unneeded seralization if the update is just for the planes on one crtc. Hence
476 * drivers and helpers should only call this when really needed (e.g. when a
477 * full modeset needs to happen due to some change).
478 *
479 * Returns:
480 * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
481 * then the w/w mutex code has detected a deadlock and the entire atomic
482 * sequence must be restarted. All other errors are fatal.
483 */
484int
485drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
486 struct drm_crtc *crtc)
487{
488 struct drm_mode_config *config = &state->dev->mode_config;
489 struct drm_connector *connector;
490 struct drm_connector_state *conn_state;
491 int ret;
492
493 ret = drm_modeset_lock(&config->connection_mutex, state->acquire_ctx);
494 if (ret)
495 return ret;
496
497 DRM_DEBUG_KMS("Adding all current connectors for [CRTC:%d] to %p\n",
498 crtc->base.id, state);
499
500 /*
501 * Changed connectors are already in @state, so only need to look at the
502 * current configuration.
503 */
504 list_for_each_entry(connector, &config->connector_list, head) {
505 if (connector->state->crtc != crtc)
506 continue;
507
508 conn_state = drm_atomic_get_connector_state(state, connector);
509 if (IS_ERR(conn_state))
510 return PTR_ERR(conn_state);
511 }
512
513 return 0;
514}
515EXPORT_SYMBOL(drm_atomic_add_affected_connectors);
516
517/**
518 * drm_atomic_connectors_for_crtc - count number of connected outputs
519 * @state: atomic state
520 * @crtc: DRM crtc
521 *
522 * This function counts all connectors which will be connected to @crtc
523 * according to @state. Useful to recompute the enable state for @crtc.
524 */
525int
526drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
527 struct drm_crtc *crtc)
528{
529 int i, num_connected_connectors = 0;
530
531 for (i = 0; i < state->num_connector; i++) {
532 struct drm_connector_state *conn_state;
533
534 conn_state = state->connector_states[i];
535
536 if (conn_state && conn_state->crtc == crtc)
537 num_connected_connectors++;
538 }
539
540 DRM_DEBUG_KMS("State %p has %i connectors for [CRTC:%d]\n",
541 state, num_connected_connectors, crtc->base.id);
542
543 return num_connected_connectors;
544}
545EXPORT_SYMBOL(drm_atomic_connectors_for_crtc);
546
547/**
548 * drm_atomic_legacy_backoff - locking backoff for legacy ioctls
549 * @state: atomic state
550 *
551 * This function should be used by legacy entry points which don't understand
552 * -EDEADLK semantics. For simplicity this one will grab all modeset locks after
553 * the slowpath completed.
554 */
555void drm_atomic_legacy_backoff(struct drm_atomic_state *state)
556{
557 int ret;
558
559retry:
560 drm_modeset_backoff(state->acquire_ctx);
561
562 ret = drm_modeset_lock(&state->dev->mode_config.connection_mutex,
563 state->acquire_ctx);
564 if (ret)
565 goto retry;
566 ret = drm_modeset_lock_all_crtcs(state->dev,
567 state->acquire_ctx);
568 if (ret)
569 goto retry;
570}
571EXPORT_SYMBOL(drm_atomic_legacy_backoff);
572
573/**
574 * drm_atomic_check_only - check whether a given config would work
575 * @state: atomic configuration to check
576 *
577 * Note that this function can return -EDEADLK if the driver needed to acquire
578 * more locks but encountered a deadlock. The caller must then do the usual w/w
579 * backoff dance and restart. All other errors are fatal.
580 *
581 * Returns:
582 * 0 on success, negative error code on failure.
583 */
584int drm_atomic_check_only(struct drm_atomic_state *state)
585{
586 struct drm_mode_config *config = &state->dev->mode_config;
587
588 DRM_DEBUG_KMS("checking %p\n", state);
589
590 if (config->funcs->atomic_check)
591 return config->funcs->atomic_check(state->dev, state);
592 else
593 return 0;
594}
595EXPORT_SYMBOL(drm_atomic_check_only);
596
597/**
598 * drm_atomic_commit - commit configuration atomically
599 * @state: atomic configuration to check
600 *
601 * Note that this function can return -EDEADLK if the driver needed to acquire
602 * more locks but encountered a deadlock. The caller must then do the usual w/w
603 * backoff dance and restart. All other errors are fatal.
604 *
605 * Also note that on successful execution ownership of @state is transferred
606 * from the caller of this function to the function itself. The caller must not
607 * free or in any other way access @state. If the function fails then the caller
608 * must clean up @state itself.
609 *
610 * Returns:
611 * 0 on success, negative error code on failure.
612 */
613int drm_atomic_commit(struct drm_atomic_state *state)
614{
615 struct drm_mode_config *config = &state->dev->mode_config;
616 int ret;
617
618 ret = drm_atomic_check_only(state);
619 if (ret)
620 return ret;
621
622 DRM_DEBUG_KMS("commiting %p\n", state);
623
624 return config->funcs->atomic_commit(state->dev, state, false);
625}
626EXPORT_SYMBOL(drm_atomic_commit);
627
628/**
629 * drm_atomic_async_commit - atomic&async configuration commit
630 * @state: atomic configuration to check
631 *
632 * Note that this function can return -EDEADLK if the driver needed to acquire
633 * more locks but encountered a deadlock. The caller must then do the usual w/w
634 * backoff dance and restart. All other errors are fatal.
635 *
636 * Also note that on successful execution ownership of @state is transferred
637 * from the caller of this function to the function itself. The caller must not
638 * free or in any other way access @state. If the function fails then the caller
639 * must clean up @state itself.
640 *
641 * Returns:
642 * 0 on success, negative error code on failure.
643 */
644int drm_atomic_async_commit(struct drm_atomic_state *state)
645{
646 struct drm_mode_config *config = &state->dev->mode_config;
647 int ret;
648
649 ret = drm_atomic_check_only(state);
650 if (ret)
651 return ret;
652
653 DRM_DEBUG_KMS("commiting %p asynchronously\n", state);
654
655 return config->funcs->atomic_commit(state->dev, state, true);
656}
657EXPORT_SYMBOL(drm_atomic_async_commit);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
new file mode 100644
index 000000000000..4a78a773151c
--- /dev/null
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -0,0 +1,1966 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28#include <drm/drmP.h>
29#include <drm/drm_atomic.h>
30#include <drm/drm_plane_helper.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_atomic_helper.h>
33#include <linux/fence.h>
34
35/**
36 * DOC: overview
37 *
38 * This helper library provides implementations of check and commit functions on
39 * top of the CRTC modeset helper callbacks and the plane helper callbacks. It
40 * also provides convenience implementations for the atomic state handling
41 * callbacks for drivers which don't need to subclass the drm core structures to
42 * add their own additional internal state.
43 *
44 * This library also provides default implementations for the check callback in
45 * drm_atomic_helper_check and for the commit callback with
46 * drm_atomic_helper_commit. But the individual stages and callbacks are expose
47 * to allow drivers to mix and match and e.g. use the plane helpers only
48 * together with a driver private modeset implementation.
49 *
50 * This library also provides implementations for all the legacy driver
51 * interfaces on top of the atomic interface. See drm_atomic_helper_set_config,
52 * drm_atomic_helper_disable_plane, drm_atomic_helper_disable_plane and the
53 * various functions to implement set_property callbacks. New drivers must not
54 * implement these functions themselves but must use the provided helpers.
55 */
56static void
57drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
58 struct drm_plane_state *plane_state,
59 struct drm_plane *plane)
60{
61 struct drm_crtc_state *crtc_state;
62
63 if (plane->state->crtc) {
64 crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
65
66 if (WARN_ON(!crtc_state))
67 return;
68
69 crtc_state->planes_changed = true;
70 }
71
72 if (plane_state->crtc) {
73 crtc_state =
74 state->crtc_states[drm_crtc_index(plane_state->crtc)];
75
76 if (WARN_ON(!crtc_state))
77 return;
78
79 crtc_state->planes_changed = true;
80 }
81}
82
83static struct drm_crtc *
84get_current_crtc_for_encoder(struct drm_device *dev,
85 struct drm_encoder *encoder)
86{
87 struct drm_mode_config *config = &dev->mode_config;
88 struct drm_connector *connector;
89
90 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
91
92 list_for_each_entry(connector, &config->connector_list, head) {
93 if (connector->state->best_encoder != encoder)
94 continue;
95
96 return connector->state->crtc;
97 }
98
99 return NULL;
100}
101
102static int
103steal_encoder(struct drm_atomic_state *state,
104 struct drm_encoder *encoder,
105 struct drm_crtc *encoder_crtc)
106{
107 struct drm_mode_config *config = &state->dev->mode_config;
108 struct drm_crtc_state *crtc_state;
109 struct drm_connector *connector;
110 struct drm_connector_state *connector_state;
111 int ret;
112
113 /*
114 * We can only steal an encoder coming from a connector, which means we
115 * must already hold the connection_mutex.
116 */
117 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
118
119 DRM_DEBUG_KMS("[ENCODER:%d:%s] in use on [CRTC:%d], stealing it\n",
120 encoder->base.id, encoder->name,
121 encoder_crtc->base.id);
122
123 crtc_state = drm_atomic_get_crtc_state(state, encoder_crtc);
124 if (IS_ERR(crtc_state))
125 return PTR_ERR(crtc_state);
126
127 crtc_state->mode_changed = true;
128
129 list_for_each_entry(connector, &config->connector_list, head) {
130 if (connector->state->best_encoder != encoder)
131 continue;
132
133 DRM_DEBUG_KMS("Stealing encoder from [CONNECTOR:%d:%s]\n",
134 connector->base.id,
135 connector->name);
136
137 connector_state = drm_atomic_get_connector_state(state,
138 connector);
139 if (IS_ERR(connector_state))
140 return PTR_ERR(connector_state);
141
142 ret = drm_atomic_set_crtc_for_connector(connector_state, NULL);
143 if (ret)
144 return ret;
145 connector_state->best_encoder = NULL;
146 }
147
148 return 0;
149}
150
151static int
152update_connector_routing(struct drm_atomic_state *state, int conn_idx)
153{
154 struct drm_connector_helper_funcs *funcs;
155 struct drm_encoder *new_encoder;
156 struct drm_crtc *encoder_crtc;
157 struct drm_connector *connector;
158 struct drm_connector_state *connector_state;
159 struct drm_crtc_state *crtc_state;
160 int idx, ret;
161
162 connector = state->connectors[conn_idx];
163 connector_state = state->connector_states[conn_idx];
164
165 if (!connector)
166 return 0;
167
168 DRM_DEBUG_KMS("Updating routing for [CONNECTOR:%d:%s]\n",
169 connector->base.id,
170 connector->name);
171
172 if (connector->state->crtc != connector_state->crtc) {
173 if (connector->state->crtc) {
174 idx = drm_crtc_index(connector->state->crtc);
175
176 crtc_state = state->crtc_states[idx];
177 crtc_state->mode_changed = true;
178 }
179
180 if (connector_state->crtc) {
181 idx = drm_crtc_index(connector_state->crtc);
182
183 crtc_state = state->crtc_states[idx];
184 crtc_state->mode_changed = true;
185 }
186 }
187
188 if (!connector_state->crtc) {
189 DRM_DEBUG_KMS("Disabling [CONNECTOR:%d:%s]\n",
190 connector->base.id,
191 connector->name);
192
193 connector_state->best_encoder = NULL;
194
195 return 0;
196 }
197
198 funcs = connector->helper_private;
199 new_encoder = funcs->best_encoder(connector);
200
201 if (!new_encoder) {
202 DRM_DEBUG_KMS("No suitable encoder found for [CONNECTOR:%d:%s]\n",
203 connector->base.id,
204 connector->name);
205 return -EINVAL;
206 }
207
208 if (new_encoder == connector_state->best_encoder) {
209 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
210 connector->base.id,
211 connector->name,
212 new_encoder->base.id,
213 new_encoder->name,
214 connector_state->crtc->base.id);
215
216 return 0;
217 }
218
219 encoder_crtc = get_current_crtc_for_encoder(state->dev,
220 new_encoder);
221
222 if (encoder_crtc) {
223 ret = steal_encoder(state, new_encoder, encoder_crtc);
224 if (ret) {
225 DRM_DEBUG_KMS("Encoder stealing failed for [CONNECTOR:%d:%s]\n",
226 connector->base.id,
227 connector->name);
228 return ret;
229 }
230 }
231
232 connector_state->best_encoder = new_encoder;
233 idx = drm_crtc_index(connector_state->crtc);
234
235 crtc_state = state->crtc_states[idx];
236 crtc_state->mode_changed = true;
237
238 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using [ENCODER:%d:%s] on [CRTC:%d]\n",
239 connector->base.id,
240 connector->name,
241 new_encoder->base.id,
242 new_encoder->name,
243 connector_state->crtc->base.id);
244
245 return 0;
246}
247
248static int
249mode_fixup(struct drm_atomic_state *state)
250{
251 int ncrtcs = state->dev->mode_config.num_crtc;
252 struct drm_crtc_state *crtc_state;
253 struct drm_connector_state *conn_state;
254 int i;
255 bool ret;
256
257 for (i = 0; i < ncrtcs; i++) {
258 crtc_state = state->crtc_states[i];
259
260 if (!crtc_state || !crtc_state->mode_changed)
261 continue;
262
263 drm_mode_copy(&crtc_state->adjusted_mode, &crtc_state->mode);
264 }
265
266 for (i = 0; i < state->num_connector; i++) {
267 struct drm_encoder_helper_funcs *funcs;
268 struct drm_encoder *encoder;
269
270 conn_state = state->connector_states[i];
271
272 if (!conn_state)
273 continue;
274
275 WARN_ON(!!conn_state->best_encoder != !!conn_state->crtc);
276
277 if (!conn_state->crtc || !conn_state->best_encoder)
278 continue;
279
280 crtc_state =
281 state->crtc_states[drm_crtc_index(conn_state->crtc)];
282
283 /*
284 * Each encoder has at most one connector (since we always steal
285 * it away), so we won't call ->mode_fixup twice.
286 */
287 encoder = conn_state->best_encoder;
288 funcs = encoder->helper_private;
289
290 if (encoder->bridge && encoder->bridge->funcs->mode_fixup) {
291 ret = encoder->bridge->funcs->mode_fixup(
292 encoder->bridge, &crtc_state->mode,
293 &crtc_state->adjusted_mode);
294 if (!ret) {
295 DRM_DEBUG_KMS("Bridge fixup failed\n");
296 return -EINVAL;
297 }
298 }
299
300
301 ret = funcs->mode_fixup(encoder, &crtc_state->mode,
302 &crtc_state->adjusted_mode);
303 if (!ret) {
304 DRM_DEBUG_KMS("[ENCODER:%d:%s] fixup failed\n",
305 encoder->base.id, encoder->name);
306 return -EINVAL;
307 }
308 }
309
310 for (i = 0; i < ncrtcs; i++) {
311 struct drm_crtc_helper_funcs *funcs;
312 struct drm_crtc *crtc;
313
314 crtc_state = state->crtc_states[i];
315 crtc = state->crtcs[i];
316
317 if (!crtc_state || !crtc_state->mode_changed)
318 continue;
319
320 funcs = crtc->helper_private;
321 ret = funcs->mode_fixup(crtc, &crtc_state->mode,
322 &crtc_state->adjusted_mode);
323 if (!ret) {
324 DRM_DEBUG_KMS("[CRTC:%d] fixup failed\n",
325 crtc->base.id);
326 return -EINVAL;
327 }
328 }
329
330 return 0;
331}
332
333static int
334drm_atomic_helper_check_modeset(struct drm_device *dev,
335 struct drm_atomic_state *state)
336{
337 int ncrtcs = dev->mode_config.num_crtc;
338 struct drm_crtc *crtc;
339 struct drm_crtc_state *crtc_state;
340 int i, ret;
341
342 for (i = 0; i < ncrtcs; i++) {
343 crtc = state->crtcs[i];
344 crtc_state = state->crtc_states[i];
345
346 if (!crtc)
347 continue;
348
349 if (!drm_mode_equal(&crtc->state->mode, &crtc_state->mode)) {
350 DRM_DEBUG_KMS("[CRTC:%d] mode changed\n",
351 crtc->base.id);
352 crtc_state->mode_changed = true;
353 }
354
355 if (crtc->state->enable != crtc_state->enable) {
356 DRM_DEBUG_KMS("[CRTC:%d] enable changed\n",
357 crtc->base.id);
358 crtc_state->mode_changed = true;
359 }
360 }
361
362 for (i = 0; i < state->num_connector; i++) {
363 /*
364 * This only sets crtc->mode_changed for routing changes,
365 * drivers must set crtc->mode_changed themselves when connector
366 * properties need to be updated.
367 */
368 ret = update_connector_routing(state, i);
369 if (ret)
370 return ret;
371 }
372
373 /*
374 * After all the routing has been prepared we need to add in any
375 * connector which is itself unchanged, but who's crtc changes it's
376 * configuration. This must be done before calling mode_fixup in case a
377 * crtc only changed its mode but has the same set of connectors.
378 */
379 for (i = 0; i < ncrtcs; i++) {
380 int num_connectors;
381
382 crtc = state->crtcs[i];
383 crtc_state = state->crtc_states[i];
384
385 if (!crtc || !crtc_state->mode_changed)
386 continue;
387
388 DRM_DEBUG_KMS("[CRTC:%d] needs full modeset, enable: %c\n",
389 crtc->base.id,
390 crtc_state->enable ? 'y' : 'n');
391
392 ret = drm_atomic_add_affected_connectors(state, crtc);
393 if (ret != 0)
394 return ret;
395
396 num_connectors = drm_atomic_connectors_for_crtc(state,
397 crtc);
398
399 if (crtc_state->enable != !!num_connectors) {
400 DRM_DEBUG_KMS("[CRTC:%d] enabled/connectors mismatch\n",
401 crtc->base.id);
402
403 return -EINVAL;
404 }
405 }
406
407 return mode_fixup(state);
408}
409
410/**
411 * drm_atomic_helper_check - validate state object
412 * @dev: DRM device
413 * @state: the driver state object
414 *
415 * Check the state object to see if the requested state is physically possible.
416 * Only crtcs and planes have check callbacks, so for any additional (global)
417 * checking that a driver needs it can simply wrap that around this function.
418 * Drivers without such needs can directly use this as their ->atomic_check()
419 * callback.
420 *
421 * RETURNS
422 * Zero for success or -errno
423 */
424int drm_atomic_helper_check(struct drm_device *dev,
425 struct drm_atomic_state *state)
426{
427 int nplanes = dev->mode_config.num_total_plane;
428 int ncrtcs = dev->mode_config.num_crtc;
429 int i, ret = 0;
430
431 for (i = 0; i < nplanes; i++) {
432 struct drm_plane_helper_funcs *funcs;
433 struct drm_plane *plane = state->planes[i];
434 struct drm_plane_state *plane_state = state->plane_states[i];
435
436 if (!plane)
437 continue;
438
439 funcs = plane->helper_private;
440
441 drm_atomic_helper_plane_changed(state, plane_state, plane);
442
443 if (!funcs || !funcs->atomic_check)
444 continue;
445
446 ret = funcs->atomic_check(plane, plane_state);
447 if (ret) {
448 DRM_DEBUG_KMS("[PLANE:%d] atomic check failed\n",
449 plane->base.id);
450 return ret;
451 }
452 }
453
454 for (i = 0; i < ncrtcs; i++) {
455 struct drm_crtc_helper_funcs *funcs;
456 struct drm_crtc *crtc = state->crtcs[i];
457
458 if (!crtc)
459 continue;
460
461 funcs = crtc->helper_private;
462
463 if (!funcs || !funcs->atomic_check)
464 continue;
465
466 ret = funcs->atomic_check(crtc, state->crtc_states[i]);
467 if (ret) {
468 DRM_DEBUG_KMS("[CRTC:%d] atomic check failed\n",
469 crtc->base.id);
470 return ret;
471 }
472 }
473
474 ret = drm_atomic_helper_check_modeset(dev, state);
475 if (ret)
476 return ret;
477
478 return ret;
479}
480EXPORT_SYMBOL(drm_atomic_helper_check);
481
482static void
483disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
484{
485 int ncrtcs = old_state->dev->mode_config.num_crtc;
486 int i;
487
488 for (i = 0; i < old_state->num_connector; i++) {
489 struct drm_connector_state *old_conn_state;
490 struct drm_connector *connector;
491 struct drm_encoder_helper_funcs *funcs;
492 struct drm_encoder *encoder;
493
494 old_conn_state = old_state->connector_states[i];
495 connector = old_state->connectors[i];
496
497 /* Shut down everything that's in the changeset and currently
498 * still on. So need to check the old, saved state. */
499 if (!old_conn_state || !old_conn_state->crtc)
500 continue;
501
502 encoder = old_conn_state->best_encoder;
503
504 /* We shouldn't get this far if we didn't previously have
505 * an encoder.. but WARN_ON() rather than explode.
506 */
507 if (WARN_ON(!encoder))
508 continue;
509
510 funcs = encoder->helper_private;
511
512 /*
513 * Each encoder has at most one connector (since we always steal
514 * it away), so we won't call call disable hooks twice.
515 */
516 if (encoder->bridge)
517 encoder->bridge->funcs->disable(encoder->bridge);
518
519 /* Right function depends upon target state. */
520 if (connector->state->crtc)
521 funcs->prepare(encoder);
522 else if (funcs->disable)
523 funcs->disable(encoder);
524 else
525 funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
526
527 if (encoder->bridge)
528 encoder->bridge->funcs->post_disable(encoder->bridge);
529 }
530
531 for (i = 0; i < ncrtcs; i++) {
532 struct drm_crtc_helper_funcs *funcs;
533 struct drm_crtc *crtc;
534
535 crtc = old_state->crtcs[i];
536
537 /* Shut down everything that needs a full modeset. */
538 if (!crtc || !crtc->state->mode_changed)
539 continue;
540
541 funcs = crtc->helper_private;
542
543 /* Right function depends upon target state. */
544 if (crtc->state->enable)
545 funcs->prepare(crtc);
546 else if (funcs->disable)
547 funcs->disable(crtc);
548 else
549 funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
550 }
551}
552
553static void
554set_routing_links(struct drm_device *dev, struct drm_atomic_state *old_state)
555{
556 int ncrtcs = old_state->dev->mode_config.num_crtc;
557 int i;
558
559 /* clear out existing links */
560 for (i = 0; i < old_state->num_connector; i++) {
561 struct drm_connector *connector;
562
563 connector = old_state->connectors[i];
564
565 if (!connector || !connector->encoder)
566 continue;
567
568 WARN_ON(!connector->encoder->crtc);
569
570 connector->encoder->crtc = NULL;
571 connector->encoder = NULL;
572 }
573
574 /* set new links */
575 for (i = 0; i < old_state->num_connector; i++) {
576 struct drm_connector *connector;
577
578 connector = old_state->connectors[i];
579
580 if (!connector || !connector->state->crtc)
581 continue;
582
583 if (WARN_ON(!connector->state->best_encoder))
584 continue;
585
586 connector->encoder = connector->state->best_encoder;
587 connector->encoder->crtc = connector->state->crtc;
588 }
589
590 /* set legacy state in the crtc structure */
591 for (i = 0; i < ncrtcs; i++) {
592 struct drm_crtc *crtc;
593
594 crtc = old_state->crtcs[i];
595
596 if (!crtc)
597 continue;
598
599 crtc->mode = crtc->state->mode;
600 crtc->enabled = crtc->state->enable;
601 crtc->x = crtc->primary->state->src_x >> 16;
602 crtc->y = crtc->primary->state->src_y >> 16;
603 }
604}
605
606static void
607crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
608{
609 int ncrtcs = old_state->dev->mode_config.num_crtc;
610 int i;
611
612 for (i = 0; i < ncrtcs; i++) {
613 struct drm_crtc_helper_funcs *funcs;
614 struct drm_crtc *crtc;
615
616 crtc = old_state->crtcs[i];
617
618 if (!crtc || !crtc->state->mode_changed)
619 continue;
620
621 funcs = crtc->helper_private;
622
623 if (crtc->state->enable)
624 funcs->mode_set_nofb(crtc);
625 }
626
627 for (i = 0; i < old_state->num_connector; i++) {
628 struct drm_connector *connector;
629 struct drm_crtc_state *new_crtc_state;
630 struct drm_encoder_helper_funcs *funcs;
631 struct drm_encoder *encoder;
632 struct drm_display_mode *mode, *adjusted_mode;
633
634 connector = old_state->connectors[i];
635
636 if (!connector || !connector->state->best_encoder)
637 continue;
638
639 encoder = connector->state->best_encoder;
640 funcs = encoder->helper_private;
641 new_crtc_state = connector->state->crtc->state;
642 mode = &new_crtc_state->mode;
643 adjusted_mode = &new_crtc_state->adjusted_mode;
644
645 /*
646 * Each encoder has at most one connector (since we always steal
647 * it away), so we won't call call mode_set hooks twice.
648 */
649 funcs->mode_set(encoder, mode, adjusted_mode);
650
651 if (encoder->bridge && encoder->bridge->funcs->mode_set)
652 encoder->bridge->funcs->mode_set(encoder->bridge,
653 mode, adjusted_mode);
654 }
655}
656
657/**
658 * drm_atomic_helper_commit_pre_planes - modeset commit before plane updates
659 * @dev: DRM device
660 * @state: atomic state
661 *
662 * This function commits the modeset changes that need to be committed before
663 * updating planes. It shuts down all the outputs that need to be shut down and
664 * prepares them (if required) with the new mode.
665 */
666void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
667 struct drm_atomic_state *state)
668{
669 disable_outputs(dev, state);
670 set_routing_links(dev, state);
671 crtc_set_mode(dev, state);
672}
673EXPORT_SYMBOL(drm_atomic_helper_commit_pre_planes);
674
675/**
676 * drm_atomic_helper_commit_post_planes - modeset commit after plane updates
677 * @dev: DRM device
678 * @old_state: atomic state object with old state structures
679 *
680 * This function commits the modeset changes that need to be committed after
681 * updating planes: It enables all the outputs with the new configuration which
682 * had to be turned off for the update.
683 */
684void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
685 struct drm_atomic_state *old_state)
686{
687 int ncrtcs = old_state->dev->mode_config.num_crtc;
688 int i;
689
690 for (i = 0; i < ncrtcs; i++) {
691 struct drm_crtc_helper_funcs *funcs;
692 struct drm_crtc *crtc;
693
694 crtc = old_state->crtcs[i];
695
696 /* Need to filter out CRTCs where only planes change. */
697 if (!crtc || !crtc->state->mode_changed)
698 continue;
699
700 funcs = crtc->helper_private;
701
702 if (crtc->state->enable)
703 funcs->commit(crtc);
704 }
705
706 for (i = 0; i < old_state->num_connector; i++) {
707 struct drm_connector *connector;
708 struct drm_encoder_helper_funcs *funcs;
709 struct drm_encoder *encoder;
710
711 connector = old_state->connectors[i];
712
713 if (!connector || !connector->state->best_encoder)
714 continue;
715
716 encoder = connector->state->best_encoder;
717 funcs = encoder->helper_private;
718
719 /*
720 * Each encoder has at most one connector (since we always steal
721 * it away), so we won't call call enable hooks twice.
722 */
723 if (encoder->bridge)
724 encoder->bridge->funcs->pre_enable(encoder->bridge);
725
726 funcs->commit(encoder);
727
728 if (encoder->bridge)
729 encoder->bridge->funcs->enable(encoder->bridge);
730 }
731}
732EXPORT_SYMBOL(drm_atomic_helper_commit_post_planes);
733
734static void wait_for_fences(struct drm_device *dev,
735 struct drm_atomic_state *state)
736{
737 int nplanes = dev->mode_config.num_total_plane;
738 int i;
739
740 for (i = 0; i < nplanes; i++) {
741 struct drm_plane *plane = state->planes[i];
742
743 if (!plane || !plane->state->fence)
744 continue;
745
746 WARN_ON(!plane->state->fb);
747
748 fence_wait(plane->state->fence, false);
749 fence_put(plane->state->fence);
750 plane->state->fence = NULL;
751 }
752}
753
754static bool framebuffer_changed(struct drm_device *dev,
755 struct drm_atomic_state *old_state,
756 struct drm_crtc *crtc)
757{
758 struct drm_plane *plane;
759 struct drm_plane_state *old_plane_state;
760 int nplanes = old_state->dev->mode_config.num_total_plane;
761 int i;
762
763 for (i = 0; i < nplanes; i++) {
764 plane = old_state->planes[i];
765 old_plane_state = old_state->plane_states[i];
766
767 if (!plane)
768 continue;
769
770 if (plane->state->crtc != crtc &&
771 old_plane_state->crtc != crtc)
772 continue;
773
774 if (plane->state->fb != old_plane_state->fb)
775 return true;
776 }
777
778 return false;
779}
780
781/**
782 * drm_atomic_helper_wait_for_vblanks - wait for vblank on crtcs
783 * @dev: DRM device
784 * @old_state: atomic state object with old state structures
785 *
786 * Helper to, after atomic commit, wait for vblanks on all effected
787 * crtcs (ie. before cleaning up old framebuffers using
788 * drm_atomic_helper_cleanup_planes()). It will only wait on crtcs where the
789 * framebuffers have actually changed to optimize for the legacy cursor and
790 * plane update use-case.
791 */
792void
793drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
794 struct drm_atomic_state *old_state)
795{
796 struct drm_crtc *crtc;
797 struct drm_crtc_state *old_crtc_state;
798 int ncrtcs = old_state->dev->mode_config.num_crtc;
799 int i, ret;
800
801 for (i = 0; i < ncrtcs; i++) {
802 crtc = old_state->crtcs[i];
803 old_crtc_state = old_state->crtc_states[i];
804
805 if (!crtc)
806 continue;
807
808 /* No one cares about the old state, so abuse it for tracking
809 * and store whether we hold a vblank reference (and should do a
810 * vblank wait) in the ->enable boolean. */
811 old_crtc_state->enable = false;
812
813 if (!crtc->state->enable)
814 continue;
815
816 if (!framebuffer_changed(dev, old_state, crtc))
817 continue;
818
819 ret = drm_crtc_vblank_get(crtc);
820 if (ret != 0)
821 continue;
822
823 old_crtc_state->enable = true;
824 old_crtc_state->last_vblank_count = drm_vblank_count(dev, i);
825 }
826
827 for (i = 0; i < ncrtcs; i++) {
828 crtc = old_state->crtcs[i];
829 old_crtc_state = old_state->crtc_states[i];
830
831 if (!crtc || !old_crtc_state->enable)
832 continue;
833
834 ret = wait_event_timeout(dev->vblank[i].queue,
835 old_crtc_state->last_vblank_count !=
836 drm_vblank_count(dev, i),
837 msecs_to_jiffies(50));
838
839 drm_crtc_vblank_put(crtc);
840 }
841}
842EXPORT_SYMBOL(drm_atomic_helper_wait_for_vblanks);
843
844/**
845 * drm_atomic_helper_commit - commit validated state object
846 * @dev: DRM device
847 * @state: the driver state object
848 * @async: asynchronous commit
849 *
850 * This function commits a with drm_atomic_helper_check() pre-validated state
851 * object. This can still fail when e.g. the framebuffer reservation fails. For
852 * now this doesn't implement asynchronous commits.
853 *
854 * RETURNS
855 * Zero for success or -errno.
856 */
857int drm_atomic_helper_commit(struct drm_device *dev,
858 struct drm_atomic_state *state,
859 bool async)
860{
861 int ret;
862
863 if (async)
864 return -EBUSY;
865
866 ret = drm_atomic_helper_prepare_planes(dev, state);
867 if (ret)
868 return ret;
869
870 /*
871 * This is the point of no return - everything below never fails except
872 * when the hw goes bonghits. Which means we can commit the new state on
873 * the software side now.
874 */
875
876 drm_atomic_helper_swap_state(dev, state);
877
878 /*
879 * Everything below can be run asynchronously without the need to grab
880 * any modeset locks at all under one conditions: It must be guaranteed
881 * that the asynchronous work has either been cancelled (if the driver
882 * supports it, which at least requires that the framebuffers get
883 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
884 * before the new state gets committed on the software side with
885 * drm_atomic_helper_swap_state().
886 *
887 * This scheme allows new atomic state updates to be prepared and
888 * checked in parallel to the asynchronous completion of the previous
889 * update. Which is important since compositors need to figure out the
890 * composition of the next frame right after having submitted the
891 * current layout.
892 */
893
894 wait_for_fences(dev, state);
895
896 drm_atomic_helper_commit_pre_planes(dev, state);
897
898 drm_atomic_helper_commit_planes(dev, state);
899
900 drm_atomic_helper_commit_post_planes(dev, state);
901
902 drm_atomic_helper_wait_for_vblanks(dev, state);
903
904 drm_atomic_helper_cleanup_planes(dev, state);
905
906 drm_atomic_state_free(state);
907
908 return 0;
909}
910EXPORT_SYMBOL(drm_atomic_helper_commit);
911
912/**
913 * DOC: implementing async commit
914 *
915 * For now the atomic helpers don't support async commit directly. If there is
916 * real need it could be added though, using the dma-buf fence infrastructure
917 * for generic synchronization with outstanding rendering.
918 *
919 * For now drivers have to implement async commit themselves, with the following
920 * sequence being the recommended one:
921 *
922 * 1. Run drm_atomic_helper_prepare_planes() first. This is the only function
923 * which commit needs to call which can fail, so we want to run it first and
924 * synchronously.
925 *
926 * 2. Synchronize with any outstanding asynchronous commit worker threads which
927 * might be affected the new state update. This can be done by either cancelling
928 * or flushing the work items, depending upon whether the driver can deal with
929 * cancelled updates. Note that it is important to ensure that the framebuffer
930 * cleanup is still done when cancelling.
931 *
932 * For sufficient parallelism it is recommended to have a work item per crtc
933 * (for updates which don't touch global state) and a global one. Then we only
934 * need to synchronize with the crtc work items for changed crtcs and the global
935 * work item, which allows nice concurrent updates on disjoint sets of crtcs.
936 *
937 * 3. The software state is updated synchronously with
938 * drm_atomic_helper_swap_state. Doing this under the protection of all modeset
939 * locks means concurrent callers never see inconsistent state. And doing this
940 * while it's guaranteed that no relevant async worker runs means that async
941 * workers do not need grab any locks. Actually they must not grab locks, for
942 * otherwise the work flushing will deadlock.
943 *
944 * 4. Schedule a work item to do all subsequent steps, using the split-out
945 * commit helpers: a) pre-plane commit b) plane commit c) post-plane commit and
946 * then cleaning up the framebuffers after the old framebuffer is no longer
947 * being displayed.
948 */
949
950/**
951 * drm_atomic_helper_prepare_planes - prepare plane resources after commit
952 * @dev: DRM device
953 * @state: atomic state object with old state structures
954 *
955 * This function prepares plane state, specifically framebuffers, for the new
956 * configuration. If any failure is encountered this function will call
957 * ->cleanup_fb on any already successfully prepared framebuffer.
958 *
959 * Returns:
960 * 0 on success, negative error code on failure.
961 */
962int drm_atomic_helper_prepare_planes(struct drm_device *dev,
963 struct drm_atomic_state *state)
964{
965 int nplanes = dev->mode_config.num_total_plane;
966 int ret, i;
967
968 for (i = 0; i < nplanes; i++) {
969 struct drm_plane_helper_funcs *funcs;
970 struct drm_plane *plane = state->planes[i];
971 struct drm_framebuffer *fb;
972
973 if (!plane)
974 continue;
975
976 funcs = plane->helper_private;
977
978 fb = state->plane_states[i]->fb;
979
980 if (fb && funcs->prepare_fb) {
981 ret = funcs->prepare_fb(plane, fb);
982 if (ret)
983 goto fail;
984 }
985 }
986
987 return 0;
988
989fail:
990 for (i--; i >= 0; i--) {
991 struct drm_plane_helper_funcs *funcs;
992 struct drm_plane *plane = state->planes[i];
993 struct drm_framebuffer *fb;
994
995 if (!plane)
996 continue;
997
998 funcs = plane->helper_private;
999
1000 fb = state->plane_states[i]->fb;
1001
1002 if (fb && funcs->cleanup_fb)
1003 funcs->cleanup_fb(plane, fb);
1004
1005 }
1006
1007 return ret;
1008}
1009EXPORT_SYMBOL(drm_atomic_helper_prepare_planes);
1010
1011/**
1012 * drm_atomic_helper_commit_planes - commit plane state
1013 * @dev: DRM device
1014 * @old_state: atomic state object with old state structures
1015 *
1016 * This function commits the new plane state using the plane and atomic helper
1017 * functions for planes and crtcs. It assumes that the atomic state has already
1018 * been pushed into the relevant object state pointers, since this step can no
1019 * longer fail.
1020 *
1021 * It still requires the global state object @old_state to know which planes and
1022 * crtcs need to be updated though.
1023 */
1024void drm_atomic_helper_commit_planes(struct drm_device *dev,
1025 struct drm_atomic_state *old_state)
1026{
1027 int nplanes = dev->mode_config.num_total_plane;
1028 int ncrtcs = dev->mode_config.num_crtc;
1029 int i;
1030
1031 for (i = 0; i < ncrtcs; i++) {
1032 struct drm_crtc_helper_funcs *funcs;
1033 struct drm_crtc *crtc = old_state->crtcs[i];
1034
1035 if (!crtc)
1036 continue;
1037
1038 funcs = crtc->helper_private;
1039
1040 if (!funcs || !funcs->atomic_begin)
1041 continue;
1042
1043 funcs->atomic_begin(crtc);
1044 }
1045
1046 for (i = 0; i < nplanes; i++) {
1047 struct drm_plane_helper_funcs *funcs;
1048 struct drm_plane *plane = old_state->planes[i];
1049 struct drm_plane_state *old_plane_state;
1050
1051 if (!plane)
1052 continue;
1053
1054 funcs = plane->helper_private;
1055
1056 if (!funcs || !funcs->atomic_update)
1057 continue;
1058
1059 old_plane_state = old_state->plane_states[i];
1060
1061 funcs->atomic_update(plane, old_plane_state);
1062 }
1063
1064 for (i = 0; i < ncrtcs; i++) {
1065 struct drm_crtc_helper_funcs *funcs;
1066 struct drm_crtc *crtc = old_state->crtcs[i];
1067
1068 if (!crtc)
1069 continue;
1070
1071 funcs = crtc->helper_private;
1072
1073 if (!funcs || !funcs->atomic_flush)
1074 continue;
1075
1076 funcs->atomic_flush(crtc);
1077 }
1078}
1079EXPORT_SYMBOL(drm_atomic_helper_commit_planes);
1080
1081/**
1082 * drm_atomic_helper_cleanup_planes - cleanup plane resources after commit
1083 * @dev: DRM device
1084 * @old_state: atomic state object with old state structures
1085 *
1086 * This function cleans up plane state, specifically framebuffers, from the old
1087 * configuration. Hence the old configuration must be perserved in @old_state to
1088 * be able to call this function.
1089 *
1090 * This function must also be called on the new state when the atomic update
1091 * fails at any point after calling drm_atomic_helper_prepare_planes().
1092 */
1093void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
1094 struct drm_atomic_state *old_state)
1095{
1096 int nplanes = dev->mode_config.num_total_plane;
1097 int i;
1098
1099 for (i = 0; i < nplanes; i++) {
1100 struct drm_plane_helper_funcs *funcs;
1101 struct drm_plane *plane = old_state->planes[i];
1102 struct drm_framebuffer *old_fb;
1103
1104 if (!plane)
1105 continue;
1106
1107 funcs = plane->helper_private;
1108
1109 old_fb = old_state->plane_states[i]->fb;
1110
1111 if (old_fb && funcs->cleanup_fb)
1112 funcs->cleanup_fb(plane, old_fb);
1113 }
1114}
1115EXPORT_SYMBOL(drm_atomic_helper_cleanup_planes);
1116
1117/**
1118 * drm_atomic_helper_swap_state - store atomic state into current sw state
1119 * @dev: DRM device
1120 * @state: atomic state
1121 *
1122 * This function stores the atomic state into the current state pointers in all
1123 * driver objects. It should be called after all failing steps have been done
1124 * and succeeded, but before the actual hardware state is committed.
1125 *
1126 * For cleanup and error recovery the current state for all changed objects will
1127 * be swaped into @state.
1128 *
1129 * With that sequence it fits perfectly into the plane prepare/cleanup sequence:
1130 *
1131 * 1. Call drm_atomic_helper_prepare_planes() with the staged atomic state.
1132 *
1133 * 2. Do any other steps that might fail.
1134 *
1135 * 3. Put the staged state into the current state pointers with this function.
1136 *
1137 * 4. Actually commit the hardware state.
1138 *
1139 * 5. Call drm_atomic_helper_cleanup_planes with @state, which since step 3
1140 * contains the old state. Also do any other cleanup required with that state.
1141 */
1142void drm_atomic_helper_swap_state(struct drm_device *dev,
1143 struct drm_atomic_state *state)
1144{
1145 int i;
1146
1147 for (i = 0; i < dev->mode_config.num_connector; i++) {
1148 struct drm_connector *connector = state->connectors[i];
1149
1150 if (!connector)
1151 continue;
1152
1153 connector->state->state = state;
1154 swap(state->connector_states[i], connector->state);
1155 connector->state->state = NULL;
1156 }
1157
1158 for (i = 0; i < dev->mode_config.num_crtc; i++) {
1159 struct drm_crtc *crtc = state->crtcs[i];
1160
1161 if (!crtc)
1162 continue;
1163
1164 crtc->state->state = state;
1165 swap(state->crtc_states[i], crtc->state);
1166 crtc->state->state = NULL;
1167 }
1168
1169 for (i = 0; i < dev->mode_config.num_total_plane; i++) {
1170 struct drm_plane *plane = state->planes[i];
1171
1172 if (!plane)
1173 continue;
1174
1175 plane->state->state = state;
1176 swap(state->plane_states[i], plane->state);
1177 plane->state->state = NULL;
1178 }
1179}
1180EXPORT_SYMBOL(drm_atomic_helper_swap_state);
1181
1182/**
1183 * drm_atomic_helper_update_plane - Helper for primary plane update using atomic
1184 * @plane: plane object to update
1185 * @crtc: owning CRTC of owning plane
1186 * @fb: framebuffer to flip onto plane
1187 * @crtc_x: x offset of primary plane on crtc
1188 * @crtc_y: y offset of primary plane on crtc
1189 * @crtc_w: width of primary plane rectangle on crtc
1190 * @crtc_h: height of primary plane rectangle on crtc
1191 * @src_x: x offset of @fb for panning
1192 * @src_y: y offset of @fb for panning
1193 * @src_w: width of source rectangle in @fb
1194 * @src_h: height of source rectangle in @fb
1195 *
1196 * Provides a default plane update handler using the atomic driver interface.
1197 *
1198 * RETURNS:
1199 * Zero on success, error code on failure
1200 */
1201int drm_atomic_helper_update_plane(struct drm_plane *plane,
1202 struct drm_crtc *crtc,
1203 struct drm_framebuffer *fb,
1204 int crtc_x, int crtc_y,
1205 unsigned int crtc_w, unsigned int crtc_h,
1206 uint32_t src_x, uint32_t src_y,
1207 uint32_t src_w, uint32_t src_h)
1208{
1209 struct drm_atomic_state *state;
1210 struct drm_plane_state *plane_state;
1211 int ret = 0;
1212
1213 state = drm_atomic_state_alloc(plane->dev);
1214 if (!state)
1215 return -ENOMEM;
1216
1217 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
1218retry:
1219 plane_state = drm_atomic_get_plane_state(state, plane);
1220 if (IS_ERR(plane_state)) {
1221 ret = PTR_ERR(plane_state);
1222 goto fail;
1223 }
1224
1225 ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
1226 if (ret != 0)
1227 goto fail;
1228 drm_atomic_set_fb_for_plane(plane_state, fb);
1229 plane_state->crtc_x = crtc_x;
1230 plane_state->crtc_y = crtc_y;
1231 plane_state->crtc_h = crtc_h;
1232 plane_state->crtc_w = crtc_w;
1233 plane_state->src_x = src_x;
1234 plane_state->src_y = src_y;
1235 plane_state->src_h = src_h;
1236 plane_state->src_w = src_w;
1237
1238 ret = drm_atomic_commit(state);
1239 if (ret != 0)
1240 goto fail;
1241
1242 /* Driver takes ownership of state on successful commit. */
1243 return 0;
1244fail:
1245 if (ret == -EDEADLK)
1246 goto backoff;
1247
1248 drm_atomic_state_free(state);
1249
1250 return ret;
1251backoff:
1252 drm_atomic_state_clear(state);
1253 drm_atomic_legacy_backoff(state);
1254
1255 /*
1256 * Someone might have exchanged the framebuffer while we dropped locks
1257 * in the backoff code. We need to fix up the fb refcount tracking the
1258 * core does for us.
1259 */
1260 plane->old_fb = plane->fb;
1261
1262 goto retry;
1263}
1264EXPORT_SYMBOL(drm_atomic_helper_update_plane);
1265
1266/**
1267 * drm_atomic_helper_disable_plane - Helper for primary plane disable using * atomic
1268 * @plane: plane to disable
1269 *
1270 * Provides a default plane disable handler using the atomic driver interface.
1271 *
1272 * RETURNS:
1273 * Zero on success, error code on failure
1274 */
1275int drm_atomic_helper_disable_plane(struct drm_plane *plane)
1276{
1277 struct drm_atomic_state *state;
1278 struct drm_plane_state *plane_state;
1279 int ret = 0;
1280
1281 /*
1282 * FIXME: Without plane->crtc set we can't get at the implicit legacy
1283 * acquire context. The real fix will be to wire the acquire ctx through
1284 * everywhere we need it, but meanwhile prevent chaos by just skipping
1285 * this noop. The critical case is the cursor ioctls which a) only grab
1286 * crtc/cursor-plane locks (so we need the crtc to get at the right
1287 * acquire context) and b) can try to disable the plane multiple times.
1288 */
1289 if (!plane->crtc)
1290 return 0;
1291
1292 state = drm_atomic_state_alloc(plane->dev);
1293 if (!state)
1294 return -ENOMEM;
1295
1296 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(plane->crtc);
1297retry:
1298 plane_state = drm_atomic_get_plane_state(state, plane);
1299 if (IS_ERR(plane_state)) {
1300 ret = PTR_ERR(plane_state);
1301 goto fail;
1302 }
1303
1304 ret = drm_atomic_set_crtc_for_plane(state, plane, NULL);
1305 if (ret != 0)
1306 goto fail;
1307 drm_atomic_set_fb_for_plane(plane_state, NULL);
1308 plane_state->crtc_x = 0;
1309 plane_state->crtc_y = 0;
1310 plane_state->crtc_h = 0;
1311 plane_state->crtc_w = 0;
1312 plane_state->src_x = 0;
1313 plane_state->src_y = 0;
1314 plane_state->src_h = 0;
1315 plane_state->src_w = 0;
1316
1317 ret = drm_atomic_commit(state);
1318 if (ret != 0)
1319 goto fail;
1320
1321 /* Driver takes ownership of state on successful commit. */
1322 return 0;
1323fail:
1324 if (ret == -EDEADLK)
1325 goto backoff;
1326
1327 drm_atomic_state_free(state);
1328
1329 return ret;
1330backoff:
1331 drm_atomic_state_clear(state);
1332 drm_atomic_legacy_backoff(state);
1333
1334 /*
1335 * Someone might have exchanged the framebuffer while we dropped locks
1336 * in the backoff code. We need to fix up the fb refcount tracking the
1337 * core does for us.
1338 */
1339 plane->old_fb = plane->fb;
1340
1341 goto retry;
1342}
1343EXPORT_SYMBOL(drm_atomic_helper_disable_plane);
1344
1345static int update_output_state(struct drm_atomic_state *state,
1346 struct drm_mode_set *set)
1347{
1348 struct drm_device *dev = set->crtc->dev;
1349 struct drm_connector_state *conn_state;
1350 int ncrtcs = state->dev->mode_config.num_crtc;
1351 int ret, i, j;
1352
1353 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
1354 state->acquire_ctx);
1355 if (ret)
1356 return ret;
1357
1358 /* First grab all affected connector/crtc states. */
1359 for (i = 0; i < set->num_connectors; i++) {
1360 conn_state = drm_atomic_get_connector_state(state,
1361 set->connectors[i]);
1362 if (IS_ERR(conn_state))
1363 return PTR_ERR(conn_state);
1364 }
1365
1366 for (i = 0; i < ncrtcs; i++) {
1367 struct drm_crtc *crtc = state->crtcs[i];
1368
1369 if (!crtc)
1370 continue;
1371
1372 ret = drm_atomic_add_affected_connectors(state, crtc);
1373 if (ret)
1374 return ret;
1375 }
1376
1377 /* Then recompute connector->crtc links and crtc enabling state. */
1378 for (i = 0; i < state->num_connector; i++) {
1379 struct drm_connector *connector;
1380
1381 connector = state->connectors[i];
1382 conn_state = state->connector_states[i];
1383
1384 if (!connector)
1385 continue;
1386
1387 if (conn_state->crtc == set->crtc) {
1388 ret = drm_atomic_set_crtc_for_connector(conn_state,
1389 NULL);
1390 if (ret)
1391 return ret;
1392 }
1393
1394 for (j = 0; j < set->num_connectors; j++) {
1395 if (set->connectors[j] == connector) {
1396 ret = drm_atomic_set_crtc_for_connector(conn_state,
1397 set->crtc);
1398 if (ret)
1399 return ret;
1400 break;
1401 }
1402 }
1403 }
1404
1405 for (i = 0; i < ncrtcs; i++) {
1406 struct drm_crtc *crtc = state->crtcs[i];
1407 struct drm_crtc_state *crtc_state = state->crtc_states[i];
1408
1409 if (!crtc)
1410 continue;
1411
1412 /* Don't update ->enable for the CRTC in the set_config request,
1413 * since a mismatch would indicate a bug in the upper layers.
1414 * The actual modeset code later on will catch any
1415 * inconsistencies here. */
1416 if (crtc == set->crtc)
1417 continue;
1418
1419 crtc_state->enable =
1420 drm_atomic_connectors_for_crtc(state, crtc);
1421 }
1422
1423 return 0;
1424}
1425
1426/**
1427 * drm_atomic_helper_set_config - set a new config from userspace
1428 * @set: mode set configuration
1429 *
1430 * Provides a default crtc set_config handler using the atomic driver interface.
1431 *
1432 * Returns:
1433 * Returns 0 on success, negative errno numbers on failure.
1434 */
1435int drm_atomic_helper_set_config(struct drm_mode_set *set)
1436{
1437 struct drm_atomic_state *state;
1438 struct drm_crtc *crtc = set->crtc;
1439 struct drm_crtc_state *crtc_state;
1440 struct drm_plane_state *primary_state;
1441 int ret = 0;
1442
1443 state = drm_atomic_state_alloc(crtc->dev);
1444 if (!state)
1445 return -ENOMEM;
1446
1447 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
1448retry:
1449 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1450 if (IS_ERR(crtc_state)) {
1451 ret = PTR_ERR(crtc_state);
1452 goto fail;
1453 }
1454
1455 primary_state = drm_atomic_get_plane_state(state, crtc->primary);
1456 if (IS_ERR(primary_state)) {
1457 ret = PTR_ERR(primary_state);
1458 goto fail;
1459 }
1460
1461 if (!set->mode) {
1462 WARN_ON(set->fb);
1463 WARN_ON(set->num_connectors);
1464
1465 crtc_state->enable = false;
1466
1467 ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, NULL);
1468 if (ret != 0)
1469 goto fail;
1470
1471 drm_atomic_set_fb_for_plane(primary_state, NULL);
1472
1473 goto commit;
1474 }
1475
1476 WARN_ON(!set->fb);
1477 WARN_ON(!set->num_connectors);
1478
1479 crtc_state->enable = true;
1480 drm_mode_copy(&crtc_state->mode, set->mode);
1481
1482 ret = drm_atomic_set_crtc_for_plane(state, crtc->primary, crtc);
1483 if (ret != 0)
1484 goto fail;
1485 drm_atomic_set_fb_for_plane(primary_state, set->fb);
1486 primary_state->crtc_x = 0;
1487 primary_state->crtc_y = 0;
1488 primary_state->crtc_h = set->mode->vdisplay;
1489 primary_state->crtc_w = set->mode->hdisplay;
1490 primary_state->src_x = set->x << 16;
1491 primary_state->src_y = set->y << 16;
1492 primary_state->src_h = set->mode->vdisplay << 16;
1493 primary_state->src_w = set->mode->hdisplay << 16;
1494
1495commit:
1496 ret = update_output_state(state, set);
1497 if (ret)
1498 goto fail;
1499
1500 ret = drm_atomic_commit(state);
1501 if (ret != 0)
1502 goto fail;
1503
1504 /* Driver takes ownership of state on successful commit. */
1505 return 0;
1506fail:
1507 if (ret == -EDEADLK)
1508 goto backoff;
1509
1510 drm_atomic_state_free(state);
1511
1512 return ret;
1513backoff:
1514 drm_atomic_state_clear(state);
1515 drm_atomic_legacy_backoff(state);
1516
1517 /*
1518 * Someone might have exchanged the framebuffer while we dropped locks
1519 * in the backoff code. We need to fix up the fb refcount tracking the
1520 * core does for us.
1521 */
1522 crtc->primary->old_fb = crtc->primary->fb;
1523
1524 goto retry;
1525}
1526EXPORT_SYMBOL(drm_atomic_helper_set_config);
1527
1528/**
1529 * drm_atomic_helper_crtc_set_property - helper for crtc prorties
1530 * @crtc: DRM crtc
1531 * @property: DRM property
1532 * @val: value of property
1533 *
1534 * Provides a default plane disablle handler using the atomic driver interface.
1535 *
1536 * RETURNS:
1537 * Zero on success, error code on failure
1538 */
1539int
1540drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
1541 struct drm_property *property,
1542 uint64_t val)
1543{
1544 struct drm_atomic_state *state;
1545 struct drm_crtc_state *crtc_state;
1546 int ret = 0;
1547
1548 state = drm_atomic_state_alloc(crtc->dev);
1549 if (!state)
1550 return -ENOMEM;
1551
1552 /* ->set_property is always called with all locks held. */
1553 state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
1554retry:
1555 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1556 if (IS_ERR(crtc_state)) {
1557 ret = PTR_ERR(crtc_state);
1558 goto fail;
1559 }
1560
1561 ret = crtc->funcs->atomic_set_property(crtc, crtc_state,
1562 property, val);
1563 if (ret)
1564 goto fail;
1565
1566 ret = drm_atomic_commit(state);
1567 if (ret != 0)
1568 goto fail;
1569
1570 /* Driver takes ownership of state on successful commit. */
1571 return 0;
1572fail:
1573 if (ret == -EDEADLK)
1574 goto backoff;
1575
1576 drm_atomic_state_free(state);
1577
1578 return ret;
1579backoff:
1580 drm_atomic_state_clear(state);
1581 drm_atomic_legacy_backoff(state);
1582
1583 goto retry;
1584}
1585EXPORT_SYMBOL(drm_atomic_helper_crtc_set_property);
1586
1587/**
1588 * drm_atomic_helper_plane_set_property - helper for plane prorties
1589 * @plane: DRM plane
1590 * @property: DRM property
1591 * @val: value of property
1592 *
1593 * Provides a default plane disable handler using the atomic driver interface.
1594 *
1595 * RETURNS:
1596 * Zero on success, error code on failure
1597 */
1598int
1599drm_atomic_helper_plane_set_property(struct drm_plane *plane,
1600 struct drm_property *property,
1601 uint64_t val)
1602{
1603 struct drm_atomic_state *state;
1604 struct drm_plane_state *plane_state;
1605 int ret = 0;
1606
1607 state = drm_atomic_state_alloc(plane->dev);
1608 if (!state)
1609 return -ENOMEM;
1610
1611 /* ->set_property is always called with all locks held. */
1612 state->acquire_ctx = plane->dev->mode_config.acquire_ctx;
1613retry:
1614 plane_state = drm_atomic_get_plane_state(state, plane);
1615 if (IS_ERR(plane_state)) {
1616 ret = PTR_ERR(plane_state);
1617 goto fail;
1618 }
1619
1620 ret = plane->funcs->atomic_set_property(plane, plane_state,
1621 property, val);
1622 if (ret)
1623 goto fail;
1624
1625 ret = drm_atomic_commit(state);
1626 if (ret != 0)
1627 goto fail;
1628
1629 /* Driver takes ownership of state on successful commit. */
1630 return 0;
1631fail:
1632 if (ret == -EDEADLK)
1633 goto backoff;
1634
1635 drm_atomic_state_free(state);
1636
1637 return ret;
1638backoff:
1639 drm_atomic_state_clear(state);
1640 drm_atomic_legacy_backoff(state);
1641
1642 goto retry;
1643}
1644EXPORT_SYMBOL(drm_atomic_helper_plane_set_property);
1645
1646/**
1647 * drm_atomic_helper_connector_set_property - helper for connector prorties
1648 * @connector: DRM connector
1649 * @property: DRM property
1650 * @val: value of property
1651 *
1652 * Provides a default plane disablle handler using the atomic driver interface.
1653 *
1654 * RETURNS:
1655 * Zero on success, error code on failure
1656 */
1657int
1658drm_atomic_helper_connector_set_property(struct drm_connector *connector,
1659 struct drm_property *property,
1660 uint64_t val)
1661{
1662 struct drm_atomic_state *state;
1663 struct drm_connector_state *connector_state;
1664 int ret = 0;
1665
1666 state = drm_atomic_state_alloc(connector->dev);
1667 if (!state)
1668 return -ENOMEM;
1669
1670 /* ->set_property is always called with all locks held. */
1671 state->acquire_ctx = connector->dev->mode_config.acquire_ctx;
1672retry:
1673 connector_state = drm_atomic_get_connector_state(state, connector);
1674 if (IS_ERR(connector_state)) {
1675 ret = PTR_ERR(connector_state);
1676 goto fail;
1677 }
1678
1679 ret = connector->funcs->atomic_set_property(connector, connector_state,
1680 property, val);
1681 if (ret)
1682 goto fail;
1683
1684 ret = drm_atomic_commit(state);
1685 if (ret != 0)
1686 goto fail;
1687
1688 /* Driver takes ownership of state on successful commit. */
1689 return 0;
1690fail:
1691 if (ret == -EDEADLK)
1692 goto backoff;
1693
1694 drm_atomic_state_free(state);
1695
1696 return ret;
1697backoff:
1698 drm_atomic_state_clear(state);
1699 drm_atomic_legacy_backoff(state);
1700
1701 goto retry;
1702}
1703EXPORT_SYMBOL(drm_atomic_helper_connector_set_property);
1704
1705/**
1706 * drm_atomic_helper_page_flip - execute a legacy page flip
1707 * @crtc: DRM crtc
1708 * @fb: DRM framebuffer
1709 * @event: optional DRM event to signal upon completion
1710 * @flags: flip flags for non-vblank sync'ed updates
1711 *
1712 * Provides a default page flip implementation using the atomic driver interface.
1713 *
1714 * Note that for now so called async page flips (i.e. updates which are not
1715 * synchronized to vblank) are not supported, since the atomic interfaces have
1716 * no provisions for this yet.
1717 *
1718 * Returns:
1719 * Returns 0 on success, negative errno numbers on failure.
1720 */
1721int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
1722 struct drm_framebuffer *fb,
1723 struct drm_pending_vblank_event *event,
1724 uint32_t flags)
1725{
1726 struct drm_plane *plane = crtc->primary;
1727 struct drm_atomic_state *state;
1728 struct drm_plane_state *plane_state;
1729 struct drm_crtc_state *crtc_state;
1730 int ret = 0;
1731
1732 if (flags & DRM_MODE_PAGE_FLIP_ASYNC)
1733 return -EINVAL;
1734
1735 state = drm_atomic_state_alloc(plane->dev);
1736 if (!state)
1737 return -ENOMEM;
1738
1739 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
1740retry:
1741 crtc_state = drm_atomic_get_crtc_state(state, crtc);
1742 if (IS_ERR(crtc_state)) {
1743 ret = PTR_ERR(crtc_state);
1744 goto fail;
1745 }
1746 crtc_state->event = event;
1747
1748 plane_state = drm_atomic_get_plane_state(state, plane);
1749 if (IS_ERR(plane_state)) {
1750 ret = PTR_ERR(plane_state);
1751 goto fail;
1752 }
1753
1754 ret = drm_atomic_set_crtc_for_plane(state, plane, crtc);
1755 if (ret != 0)
1756 goto fail;
1757 drm_atomic_set_fb_for_plane(plane_state, fb);
1758
1759 ret = drm_atomic_async_commit(state);
1760 if (ret != 0)
1761 goto fail;
1762
1763 /* TODO: ->page_flip is the only driver callback where the core
1764 * doesn't update plane->fb. For now patch it up here. */
1765 plane->fb = plane->state->fb;
1766
1767 /* Driver takes ownership of state on successful async commit. */
1768 return 0;
1769fail:
1770 if (ret == -EDEADLK)
1771 goto backoff;
1772
1773 drm_atomic_state_free(state);
1774
1775 return ret;
1776backoff:
1777 drm_atomic_state_clear(state);
1778 drm_atomic_legacy_backoff(state);
1779
1780 /*
1781 * Someone might have exchanged the framebuffer while we dropped locks
1782 * in the backoff code. We need to fix up the fb refcount tracking the
1783 * core does for us.
1784 */
1785 plane->old_fb = plane->fb;
1786
1787 goto retry;
1788}
1789EXPORT_SYMBOL(drm_atomic_helper_page_flip);
1790
1791/**
1792 * DOC: atomic state reset and initialization
1793 *
1794 * Both the drm core and the atomic helpers assume that there is always the full
1795 * and correct atomic software state for all connectors, CRTCs and planes
1796 * available. Which is a bit a problem on driver load and also after system
1797 * suspend. One way to solve this is to have a hardware state read-out
1798 * infrastructure which reconstructs the full software state (e.g. the i915
1799 * driver).
1800 *
1801 * The simpler solution is to just reset the software state to everything off,
1802 * which is easiest to do by calling drm_mode_config_reset(). To facilitate this
1803 * the atomic helpers provide default reset implementations for all hooks.
1804 */
1805
1806/**
1807 * drm_atomic_helper_crtc_reset - default ->reset hook for CRTCs
1808 * @crtc: drm CRTC
1809 *
1810 * Resets the atomic state for @crtc by freeing the state pointer (which might
1811 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1812 */
1813void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc)
1814{
1815 kfree(crtc->state);
1816 crtc->state = kzalloc(sizeof(*crtc->state), GFP_KERNEL);
1817}
1818EXPORT_SYMBOL(drm_atomic_helper_crtc_reset);
1819
1820/**
1821 * drm_atomic_helper_crtc_duplicate_state - default state duplicate hook
1822 * @crtc: drm CRTC
1823 *
1824 * Default CRTC state duplicate hook for drivers which don't have their own
1825 * subclassed CRTC state structure.
1826 */
1827struct drm_crtc_state *
1828drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc)
1829{
1830 struct drm_crtc_state *state;
1831
1832 if (WARN_ON(!crtc->state))
1833 return NULL;
1834
1835 state = kmemdup(crtc->state, sizeof(*crtc->state), GFP_KERNEL);
1836
1837 if (state) {
1838 state->mode_changed = false;
1839 state->planes_changed = false;
1840 state->event = NULL;
1841 }
1842
1843 return state;
1844}
1845EXPORT_SYMBOL(drm_atomic_helper_crtc_duplicate_state);
1846
1847/**
1848 * drm_atomic_helper_crtc_destroy_state - default state destroy hook
1849 * @crtc: drm CRTC
1850 * @state: CRTC state object to release
1851 *
1852 * Default CRTC state destroy hook for drivers which don't have their own
1853 * subclassed CRTC state structure.
1854 */
1855void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
1856 struct drm_crtc_state *state)
1857{
1858 kfree(state);
1859}
1860EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
1861
1862/**
1863 * drm_atomic_helper_plane_reset - default ->reset hook for planes
1864 * @plane: drm plane
1865 *
1866 * Resets the atomic state for @plane by freeing the state pointer (which might
1867 * be NULL, e.g. at driver load time) and allocating a new empty state object.
1868 */
1869void drm_atomic_helper_plane_reset(struct drm_plane *plane)
1870{
1871 if (plane->state && plane->state->fb)
1872 drm_framebuffer_unreference(plane->state->fb);
1873
1874 kfree(plane->state);
1875 plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
1876}
1877EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
1878
1879/**
1880 * drm_atomic_helper_plane_duplicate_state - default state duplicate hook
1881 * @plane: drm plane
1882 *
1883 * Default plane state duplicate hook for drivers which don't have their own
1884 * subclassed plane state structure.
1885 */
1886struct drm_plane_state *
1887drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane)
1888{
1889 struct drm_plane_state *state;
1890
1891 if (WARN_ON(!plane->state))
1892 return NULL;
1893
1894 state = kmemdup(plane->state, sizeof(*plane->state), GFP_KERNEL);
1895
1896 if (state && state->fb)
1897 drm_framebuffer_reference(state->fb);
1898
1899 return state;
1900}
1901EXPORT_SYMBOL(drm_atomic_helper_plane_duplicate_state);
1902
1903/**
1904 * drm_atomic_helper_plane_destroy_state - default state destroy hook
1905 * @plane: drm plane
1906 * @state: plane state object to release
1907 *
1908 * Default plane state destroy hook for drivers which don't have their own
1909 * subclassed plane state structure.
1910 */
1911void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
1912 struct drm_plane_state *state)
1913{
1914 if (state->fb)
1915 drm_framebuffer_unreference(state->fb);
1916
1917 kfree(state);
1918}
1919EXPORT_SYMBOL(drm_atomic_helper_plane_destroy_state);
1920
1921/**
1922 * drm_atomic_helper_connector_reset - default ->reset hook for connectors
1923 * @connector: drm connector
1924 *
1925 * Resets the atomic state for @connector by freeing the state pointer (which
1926 * might be NULL, e.g. at driver load time) and allocating a new empty state
1927 * object.
1928 */
1929void drm_atomic_helper_connector_reset(struct drm_connector *connector)
1930{
1931 kfree(connector->state);
1932 connector->state = kzalloc(sizeof(*connector->state), GFP_KERNEL);
1933}
1934EXPORT_SYMBOL(drm_atomic_helper_connector_reset);
1935
1936/**
1937 * drm_atomic_helper_connector_duplicate_state - default state duplicate hook
1938 * @connector: drm connector
1939 *
1940 * Default connector state duplicate hook for drivers which don't have their own
1941 * subclassed connector state structure.
1942 */
1943struct drm_connector_state *
1944drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector)
1945{
1946 if (WARN_ON(!connector->state))
1947 return NULL;
1948
1949 return kmemdup(connector->state, sizeof(*connector->state), GFP_KERNEL);
1950}
1951EXPORT_SYMBOL(drm_atomic_helper_connector_duplicate_state);
1952
1953/**
1954 * drm_atomic_helper_connector_destroy_state - default state destroy hook
1955 * @connector: drm connector
1956 * @state: connector state object to release
1957 *
1958 * Default connector state destroy hook for drivers which don't have their own
1959 * subclassed connector state structure.
1960 */
1961void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
1962 struct drm_connector_state *state)
1963{
1964 kfree(state);
1965}
1966EXPORT_SYMBOL(drm_atomic_helper_connector_destroy_state);
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index e79c8d3700d8..5213da499d39 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -683,7 +683,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
683 drm_modeset_lock_init(&crtc->mutex); 683 drm_modeset_lock_init(&crtc->mutex);
684 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC); 684 ret = drm_mode_object_get(dev, &crtc->base, DRM_MODE_OBJECT_CRTC);
685 if (ret) 685 if (ret)
686 goto out; 686 return ret;
687 687
688 crtc->base.properties = &crtc->properties; 688 crtc->base.properties = &crtc->properties;
689 689
@@ -697,9 +697,7 @@ int drm_crtc_init_with_planes(struct drm_device *dev, struct drm_crtc *crtc,
697 if (cursor) 697 if (cursor)
698 cursor->possible_crtcs = 1 << drm_crtc_index(crtc); 698 cursor->possible_crtcs = 1 << drm_crtc_index(crtc);
699 699
700 out: 700 return 0;
701
702 return ret;
703} 701}
704EXPORT_SYMBOL(drm_crtc_init_with_planes); 702EXPORT_SYMBOL(drm_crtc_init_with_planes);
705 703
@@ -723,6 +721,12 @@ void drm_crtc_cleanup(struct drm_crtc *crtc)
723 drm_mode_object_put(dev, &crtc->base); 721 drm_mode_object_put(dev, &crtc->base);
724 list_del(&crtc->head); 722 list_del(&crtc->head);
725 dev->mode_config.num_crtc--; 723 dev->mode_config.num_crtc--;
724
725 WARN_ON(crtc->state && !crtc->funcs->atomic_destroy_state);
726 if (crtc->state && crtc->funcs->atomic_destroy_state)
727 crtc->funcs->atomic_destroy_state(crtc, crtc->state);
728
729 memset(crtc, 0, sizeof(*crtc));
726} 730}
727EXPORT_SYMBOL(drm_crtc_cleanup); 731EXPORT_SYMBOL(drm_crtc_cleanup);
728 732
@@ -766,7 +770,6 @@ static void drm_mode_remove(struct drm_connector *connector,
766/** 770/**
767 * drm_connector_get_cmdline_mode - reads the user's cmdline mode 771 * drm_connector_get_cmdline_mode - reads the user's cmdline mode
768 * @connector: connector to quwery 772 * @connector: connector to quwery
769 * @mode: returned mode
770 * 773 *
771 * The kernel supports per-connector configration of its consoles through 774 * The kernel supports per-connector configration of its consoles through
772 * use of the video= parameter. This function parses that option and 775 * use of the video= parameter. This function parses that option and
@@ -870,6 +873,8 @@ int drm_connector_init(struct drm_device *dev,
870 873
871 drm_connector_get_cmdline_mode(connector); 874 drm_connector_get_cmdline_mode(connector);
872 875
876 /* We should add connectors at the end to avoid upsetting the connector
877 * index too much. */
873 list_add_tail(&connector->head, &dev->mode_config.connector_list); 878 list_add_tail(&connector->head, &dev->mode_config.connector_list);
874 dev->mode_config.num_connector++; 879 dev->mode_config.num_connector++;
875 880
@@ -905,6 +910,11 @@ void drm_connector_cleanup(struct drm_connector *connector)
905 struct drm_device *dev = connector->dev; 910 struct drm_device *dev = connector->dev;
906 struct drm_display_mode *mode, *t; 911 struct drm_display_mode *mode, *t;
907 912
913 if (connector->tile_group) {
914 drm_mode_put_tile_group(dev, connector->tile_group);
915 connector->tile_group = NULL;
916 }
917
908 list_for_each_entry_safe(mode, t, &connector->probed_modes, head) 918 list_for_each_entry_safe(mode, t, &connector->probed_modes, head)
909 drm_mode_remove(connector, mode); 919 drm_mode_remove(connector, mode);
910 920
@@ -919,6 +929,13 @@ void drm_connector_cleanup(struct drm_connector *connector)
919 connector->name = NULL; 929 connector->name = NULL;
920 list_del(&connector->head); 930 list_del(&connector->head);
921 dev->mode_config.num_connector--; 931 dev->mode_config.num_connector--;
932
933 WARN_ON(connector->state && !connector->funcs->atomic_destroy_state);
934 if (connector->state && connector->funcs->atomic_destroy_state)
935 connector->funcs->atomic_destroy_state(connector,
936 connector->state);
937
938 memset(connector, 0, sizeof(*connector));
922} 939}
923EXPORT_SYMBOL(drm_connector_cleanup); 940EXPORT_SYMBOL(drm_connector_cleanup);
924 941
@@ -933,6 +950,9 @@ unsigned int drm_connector_index(struct drm_connector *connector)
933{ 950{
934 unsigned int index = 0; 951 unsigned int index = 0;
935 struct drm_connector *tmp; 952 struct drm_connector *tmp;
953 struct drm_mode_config *config = &connector->dev->mode_config;
954
955 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
936 956
937 list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) { 957 list_for_each_entry(tmp, &connector->dev->mode_config.connector_list, head) {
938 if (tmp == connector) 958 if (tmp == connector)
@@ -1057,6 +1077,8 @@ void drm_bridge_cleanup(struct drm_bridge *bridge)
1057 list_del(&bridge->head); 1077 list_del(&bridge->head);
1058 dev->mode_config.num_bridge--; 1078 dev->mode_config.num_bridge--;
1059 drm_modeset_unlock_all(dev); 1079 drm_modeset_unlock_all(dev);
1080
1081 memset(bridge, 0, sizeof(*bridge));
1060} 1082}
1061EXPORT_SYMBOL(drm_bridge_cleanup); 1083EXPORT_SYMBOL(drm_bridge_cleanup);
1062 1084
@@ -1123,10 +1145,11 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
1123 drm_modeset_lock_all(dev); 1145 drm_modeset_lock_all(dev);
1124 drm_mode_object_put(dev, &encoder->base); 1146 drm_mode_object_put(dev, &encoder->base);
1125 kfree(encoder->name); 1147 kfree(encoder->name);
1126 encoder->name = NULL;
1127 list_del(&encoder->head); 1148 list_del(&encoder->head);
1128 dev->mode_config.num_encoder--; 1149 dev->mode_config.num_encoder--;
1129 drm_modeset_unlock_all(dev); 1150 drm_modeset_unlock_all(dev);
1151
1152 memset(encoder, 0, sizeof(*encoder));
1130} 1153}
1131EXPORT_SYMBOL(drm_encoder_cleanup); 1154EXPORT_SYMBOL(drm_encoder_cleanup);
1132 1155
@@ -1153,11 +1176,11 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1153{ 1176{
1154 int ret; 1177 int ret;
1155 1178
1156 drm_modeset_lock_all(dev);
1157
1158 ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE); 1179 ret = drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
1159 if (ret) 1180 if (ret)
1160 goto out; 1181 return ret;
1182
1183 drm_modeset_lock_init(&plane->mutex);
1161 1184
1162 plane->base.properties = &plane->properties; 1185 plane->base.properties = &plane->properties;
1163 plane->dev = dev; 1186 plane->dev = dev;
@@ -1167,8 +1190,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1167 if (!plane->format_types) { 1190 if (!plane->format_types) {
1168 DRM_DEBUG_KMS("out of memory when allocating plane\n"); 1191 DRM_DEBUG_KMS("out of memory when allocating plane\n");
1169 drm_mode_object_put(dev, &plane->base); 1192 drm_mode_object_put(dev, &plane->base);
1170 ret = -ENOMEM; 1193 return -ENOMEM;
1171 goto out;
1172 } 1194 }
1173 1195
1174 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t)); 1196 memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
@@ -1185,10 +1207,7 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1185 dev->mode_config.plane_type_property, 1207 dev->mode_config.plane_type_property,
1186 plane->type); 1208 plane->type);
1187 1209
1188 out: 1210 return 0;
1189 drm_modeset_unlock_all(dev);
1190
1191 return ret;
1192} 1211}
1193EXPORT_SYMBOL(drm_universal_plane_init); 1212EXPORT_SYMBOL(drm_universal_plane_init);
1194 1213
@@ -1246,6 +1265,12 @@ void drm_plane_cleanup(struct drm_plane *plane)
1246 if (plane->type == DRM_PLANE_TYPE_OVERLAY) 1265 if (plane->type == DRM_PLANE_TYPE_OVERLAY)
1247 dev->mode_config.num_overlay_plane--; 1266 dev->mode_config.num_overlay_plane--;
1248 drm_modeset_unlock_all(dev); 1267 drm_modeset_unlock_all(dev);
1268
1269 WARN_ON(plane->state && !plane->funcs->atomic_destroy_state);
1270 if (plane->state && plane->funcs->atomic_destroy_state)
1271 plane->funcs->atomic_destroy_state(plane, plane->state);
1272
1273 memset(plane, 0, sizeof(*plane));
1249} 1274}
1250EXPORT_SYMBOL(drm_plane_cleanup); 1275EXPORT_SYMBOL(drm_plane_cleanup);
1251 1276
@@ -1328,6 +1353,11 @@ static int drm_mode_create_standard_connector_properties(struct drm_device *dev)
1328 "PATH", 0); 1353 "PATH", 0);
1329 dev->mode_config.path_property = dev_path; 1354 dev->mode_config.path_property = dev_path;
1330 1355
1356 dev->mode_config.tile_property = drm_property_create(dev,
1357 DRM_MODE_PROP_BLOB |
1358 DRM_MODE_PROP_IMMUTABLE,
1359 "TILE", 0);
1360
1331 return 0; 1361 return 0;
1332} 1362}
1333 1363
@@ -1388,12 +1418,13 @@ EXPORT_SYMBOL(drm_mode_create_dvi_i_properties);
1388 * responsible for allocating a list of format names and passing them to 1418 * responsible for allocating a list of format names and passing them to
1389 * this routine. 1419 * this routine.
1390 */ 1420 */
1391int drm_mode_create_tv_properties(struct drm_device *dev, int num_modes, 1421int drm_mode_create_tv_properties(struct drm_device *dev,
1422 unsigned int num_modes,
1392 char *modes[]) 1423 char *modes[])
1393{ 1424{
1394 struct drm_property *tv_selector; 1425 struct drm_property *tv_selector;
1395 struct drm_property *tv_subconnector; 1426 struct drm_property *tv_subconnector;
1396 int i; 1427 unsigned int i;
1397 1428
1398 if (dev->mode_config.tv_select_subconnector_property) 1429 if (dev->mode_config.tv_select_subconnector_property)
1399 return 0; 1430 return 0;
@@ -1491,7 +1522,7 @@ EXPORT_SYMBOL(drm_mode_create_scaling_mode_property);
1491 * connectors. 1522 * connectors.
1492 * 1523 *
1493 * Returns: 1524 * Returns:
1494 * Zero on success, errno on failure. 1525 * Zero on success, negative errno on failure.
1495 */ 1526 */
1496int drm_mode_create_aspect_ratio_property(struct drm_device *dev) 1527int drm_mode_create_aspect_ratio_property(struct drm_device *dev)
1497{ 1528{
@@ -1535,6 +1566,30 @@ int drm_mode_create_dirty_info_property(struct drm_device *dev)
1535} 1566}
1536EXPORT_SYMBOL(drm_mode_create_dirty_info_property); 1567EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
1537 1568
1569/**
1570 * drm_mode_create_suggested_offset_properties - create suggests offset properties
1571 * @dev: DRM device
1572 *
1573 * Create the the suggested x/y offset property for connectors.
1574 */
1575int drm_mode_create_suggested_offset_properties(struct drm_device *dev)
1576{
1577 if (dev->mode_config.suggested_x_property && dev->mode_config.suggested_y_property)
1578 return 0;
1579
1580 dev->mode_config.suggested_x_property =
1581 drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested X", 0, 0xffffffff);
1582
1583 dev->mode_config.suggested_y_property =
1584 drm_property_create_range(dev, DRM_MODE_PROP_IMMUTABLE, "suggested Y", 0, 0xffffffff);
1585
1586 if (dev->mode_config.suggested_x_property == NULL ||
1587 dev->mode_config.suggested_y_property == NULL)
1588 return -ENOMEM;
1589 return 0;
1590}
1591EXPORT_SYMBOL(drm_mode_create_suggested_offset_properties);
1592
1538static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group) 1593static int drm_mode_group_init(struct drm_device *dev, struct drm_mode_group *group)
1539{ 1594{
1540 uint32_t total_objects = 0; 1595 uint32_t total_objects = 0;
@@ -1651,7 +1706,7 @@ static void drm_crtc_convert_to_umode(struct drm_mode_modeinfo *out,
1651 * the caller. 1706 * the caller.
1652 * 1707 *
1653 * Returns: 1708 * Returns:
1654 * Zero on success, errno on failure. 1709 * Zero on success, negative errno on failure.
1655 */ 1710 */
1656static int drm_crtc_convert_umode(struct drm_display_mode *out, 1711static int drm_crtc_convert_umode(struct drm_display_mode *out,
1657 const struct drm_mode_modeinfo *in) 1712 const struct drm_mode_modeinfo *in)
@@ -1694,7 +1749,7 @@ static int drm_crtc_convert_umode(struct drm_display_mode *out,
1694 * Called by the user via ioctl. 1749 * Called by the user via ioctl.
1695 * 1750 *
1696 * Returns: 1751 * Returns:
1697 * Zero on success, errno on failure. 1752 * Zero on success, negative errno on failure.
1698 */ 1753 */
1699int drm_mode_getresources(struct drm_device *dev, void *data, 1754int drm_mode_getresources(struct drm_device *dev, void *data,
1700 struct drm_file *file_priv) 1755 struct drm_file *file_priv)
@@ -1745,7 +1800,9 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1745 card_res->count_fbs = fb_count; 1800 card_res->count_fbs = fb_count;
1746 mutex_unlock(&file_priv->fbs_lock); 1801 mutex_unlock(&file_priv->fbs_lock);
1747 1802
1748 drm_modeset_lock_all(dev); 1803 /* mode_config.mutex protects the connector list against e.g. DP MST
1804 * connector hot-adding. CRTC/Plane lists are invariant. */
1805 mutex_lock(&dev->mode_config.mutex);
1749 if (!drm_is_primary_client(file_priv)) { 1806 if (!drm_is_primary_client(file_priv)) {
1750 1807
1751 mode_group = NULL; 1808 mode_group = NULL;
@@ -1865,7 +1922,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1865 card_res->count_connectors, card_res->count_encoders); 1922 card_res->count_connectors, card_res->count_encoders);
1866 1923
1867out: 1924out:
1868 drm_modeset_unlock_all(dev); 1925 mutex_unlock(&dev->mode_config.mutex);
1869 return ret; 1926 return ret;
1870} 1927}
1871 1928
@@ -1880,26 +1937,22 @@ out:
1880 * Called by the user via ioctl. 1937 * Called by the user via ioctl.
1881 * 1938 *
1882 * Returns: 1939 * Returns:
1883 * Zero on success, errno on failure. 1940 * Zero on success, negative errno on failure.
1884 */ 1941 */
1885int drm_mode_getcrtc(struct drm_device *dev, 1942int drm_mode_getcrtc(struct drm_device *dev,
1886 void *data, struct drm_file *file_priv) 1943 void *data, struct drm_file *file_priv)
1887{ 1944{
1888 struct drm_mode_crtc *crtc_resp = data; 1945 struct drm_mode_crtc *crtc_resp = data;
1889 struct drm_crtc *crtc; 1946 struct drm_crtc *crtc;
1890 int ret = 0;
1891 1947
1892 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1948 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1893 return -EINVAL; 1949 return -EINVAL;
1894 1950
1895 drm_modeset_lock_all(dev);
1896
1897 crtc = drm_crtc_find(dev, crtc_resp->crtc_id); 1951 crtc = drm_crtc_find(dev, crtc_resp->crtc_id);
1898 if (!crtc) { 1952 if (!crtc)
1899 ret = -ENOENT; 1953 return -ENOENT;
1900 goto out;
1901 }
1902 1954
1955 drm_modeset_lock_crtc(crtc, crtc->primary);
1903 crtc_resp->x = crtc->x; 1956 crtc_resp->x = crtc->x;
1904 crtc_resp->y = crtc->y; 1957 crtc_resp->y = crtc->y;
1905 crtc_resp->gamma_size = crtc->gamma_size; 1958 crtc_resp->gamma_size = crtc->gamma_size;
@@ -1916,10 +1969,9 @@ int drm_mode_getcrtc(struct drm_device *dev,
1916 } else { 1969 } else {
1917 crtc_resp->mode_valid = 0; 1970 crtc_resp->mode_valid = 0;
1918 } 1971 }
1972 drm_modeset_unlock_crtc(crtc);
1919 1973
1920out: 1974 return 0;
1921 drm_modeset_unlock_all(dev);
1922 return ret;
1923} 1975}
1924 1976
1925static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode, 1977static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
@@ -1935,6 +1987,15 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1935 return true; 1987 return true;
1936} 1988}
1937 1989
1990static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
1991{
1992 /* For atomic drivers only state objects are synchronously updated and
1993 * protected by modeset locks, so check those first. */
1994 if (connector->state)
1995 return connector->state->best_encoder;
1996 return connector->encoder;
1997}
1998
1938/** 1999/**
1939 * drm_mode_getconnector - get connector configuration 2000 * drm_mode_getconnector - get connector configuration
1940 * @dev: drm device for the ioctl 2001 * @dev: drm device for the ioctl
@@ -1946,13 +2007,14 @@ static bool drm_mode_expose_to_userspace(const struct drm_display_mode *mode,
1946 * Called by the user via ioctl. 2007 * Called by the user via ioctl.
1947 * 2008 *
1948 * Returns: 2009 * Returns:
1949 * Zero on success, errno on failure. 2010 * Zero on success, negative errno on failure.
1950 */ 2011 */
1951int drm_mode_getconnector(struct drm_device *dev, void *data, 2012int drm_mode_getconnector(struct drm_device *dev, void *data,
1952 struct drm_file *file_priv) 2013 struct drm_file *file_priv)
1953{ 2014{
1954 struct drm_mode_get_connector *out_resp = data; 2015 struct drm_mode_get_connector *out_resp = data;
1955 struct drm_connector *connector; 2016 struct drm_connector *connector;
2017 struct drm_encoder *encoder;
1956 struct drm_display_mode *mode; 2018 struct drm_display_mode *mode;
1957 int mode_count = 0; 2019 int mode_count = 0;
1958 int props_count = 0; 2020 int props_count = 0;
@@ -2008,8 +2070,10 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
2008 out_resp->subpixel = connector->display_info.subpixel_order; 2070 out_resp->subpixel = connector->display_info.subpixel_order;
2009 out_resp->connection = connector->status; 2071 out_resp->connection = connector->status;
2010 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 2072 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2011 if (connector->encoder) 2073
2012 out_resp->encoder_id = connector->encoder->base.id; 2074 encoder = drm_connector_get_encoder(connector);
2075 if (encoder)
2076 out_resp->encoder_id = encoder->base.id;
2013 else 2077 else
2014 out_resp->encoder_id = 0; 2078 out_resp->encoder_id = 0;
2015 drm_modeset_unlock(&dev->mode_config.connection_mutex); 2079 drm_modeset_unlock(&dev->mode_config.connection_mutex);
@@ -2079,6 +2143,33 @@ out:
2079 return ret; 2143 return ret;
2080} 2144}
2081 2145
2146static struct drm_crtc *drm_encoder_get_crtc(struct drm_encoder *encoder)
2147{
2148 struct drm_connector *connector;
2149 struct drm_device *dev = encoder->dev;
2150 bool uses_atomic = false;
2151
2152 /* For atomic drivers only state objects are synchronously updated and
2153 * protected by modeset locks, so check those first. */
2154 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
2155 if (!connector->state)
2156 continue;
2157
2158 uses_atomic = true;
2159
2160 if (connector->state->best_encoder != encoder)
2161 continue;
2162
2163 return connector->state->crtc;
2164 }
2165
2166 /* Don't return stale data (e.g. pending async disable). */
2167 if (uses_atomic)
2168 return NULL;
2169
2170 return encoder->crtc;
2171}
2172
2082/** 2173/**
2083 * drm_mode_getencoder - get encoder configuration 2174 * drm_mode_getencoder - get encoder configuration
2084 * @dev: drm device for the ioctl 2175 * @dev: drm device for the ioctl
@@ -2090,37 +2181,38 @@ out:
2090 * Called by the user via ioctl. 2181 * Called by the user via ioctl.
2091 * 2182 *
2092 * Returns: 2183 * Returns:
2093 * Zero on success, errno on failure. 2184 * Zero on success, negative errno on failure.
2094 */ 2185 */
2095int drm_mode_getencoder(struct drm_device *dev, void *data, 2186int drm_mode_getencoder(struct drm_device *dev, void *data,
2096 struct drm_file *file_priv) 2187 struct drm_file *file_priv)
2097{ 2188{
2098 struct drm_mode_get_encoder *enc_resp = data; 2189 struct drm_mode_get_encoder *enc_resp = data;
2099 struct drm_encoder *encoder; 2190 struct drm_encoder *encoder;
2100 int ret = 0; 2191 struct drm_crtc *crtc;
2101 2192
2102 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2193 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2103 return -EINVAL; 2194 return -EINVAL;
2104 2195
2105 drm_modeset_lock_all(dev);
2106 encoder = drm_encoder_find(dev, enc_resp->encoder_id); 2196 encoder = drm_encoder_find(dev, enc_resp->encoder_id);
2107 if (!encoder) { 2197 if (!encoder)
2108 ret = -ENOENT; 2198 return -ENOENT;
2109 goto out;
2110 }
2111 2199
2112 if (encoder->crtc) 2200 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
2201 crtc = drm_encoder_get_crtc(encoder);
2202 if (crtc)
2203 enc_resp->crtc_id = crtc->base.id;
2204 else if (encoder->crtc)
2113 enc_resp->crtc_id = encoder->crtc->base.id; 2205 enc_resp->crtc_id = encoder->crtc->base.id;
2114 else 2206 else
2115 enc_resp->crtc_id = 0; 2207 enc_resp->crtc_id = 0;
2208 drm_modeset_unlock(&dev->mode_config.connection_mutex);
2209
2116 enc_resp->encoder_type = encoder->encoder_type; 2210 enc_resp->encoder_type = encoder->encoder_type;
2117 enc_resp->encoder_id = encoder->base.id; 2211 enc_resp->encoder_id = encoder->base.id;
2118 enc_resp->possible_crtcs = encoder->possible_crtcs; 2212 enc_resp->possible_crtcs = encoder->possible_crtcs;
2119 enc_resp->possible_clones = encoder->possible_clones; 2213 enc_resp->possible_clones = encoder->possible_clones;
2120 2214
2121out: 2215 return 0;
2122 drm_modeset_unlock_all(dev);
2123 return ret;
2124} 2216}
2125 2217
2126/** 2218/**
@@ -2134,7 +2226,7 @@ out:
2134 * Called by the user via ioctl. 2226 * Called by the user via ioctl.
2135 * 2227 *
2136 * Returns: 2228 * Returns:
2137 * Zero on success, errno on failure. 2229 * Zero on success, negative errno on failure.
2138 */ 2230 */
2139int drm_mode_getplane_res(struct drm_device *dev, void *data, 2231int drm_mode_getplane_res(struct drm_device *dev, void *data,
2140 struct drm_file *file_priv) 2232 struct drm_file *file_priv)
@@ -2143,13 +2235,12 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
2143 struct drm_mode_config *config; 2235 struct drm_mode_config *config;
2144 struct drm_plane *plane; 2236 struct drm_plane *plane;
2145 uint32_t __user *plane_ptr; 2237 uint32_t __user *plane_ptr;
2146 int copied = 0, ret = 0; 2238 int copied = 0;
2147 unsigned num_planes; 2239 unsigned num_planes;
2148 2240
2149 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2241 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2150 return -EINVAL; 2242 return -EINVAL;
2151 2243
2152 drm_modeset_lock_all(dev);
2153 config = &dev->mode_config; 2244 config = &dev->mode_config;
2154 2245
2155 if (file_priv->universal_planes) 2246 if (file_priv->universal_planes)
@@ -2165,6 +2256,7 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
2165 (plane_resp->count_planes >= num_planes)) { 2256 (plane_resp->count_planes >= num_planes)) {
2166 plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr; 2257 plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
2167 2258
2259 /* Plane lists are invariant, no locking needed. */
2168 list_for_each_entry(plane, &config->plane_list, head) { 2260 list_for_each_entry(plane, &config->plane_list, head) {
2169 /* 2261 /*
2170 * Unless userspace set the 'universal planes' 2262 * Unless userspace set the 'universal planes'
@@ -2174,18 +2266,14 @@ int drm_mode_getplane_res(struct drm_device *dev, void *data,
2174 !file_priv->universal_planes) 2266 !file_priv->universal_planes)
2175 continue; 2267 continue;
2176 2268
2177 if (put_user(plane->base.id, plane_ptr + copied)) { 2269 if (put_user(plane->base.id, plane_ptr + copied))
2178 ret = -EFAULT; 2270 return -EFAULT;
2179 goto out;
2180 }
2181 copied++; 2271 copied++;
2182 } 2272 }
2183 } 2273 }
2184 plane_resp->count_planes = num_planes; 2274 plane_resp->count_planes = num_planes;
2185 2275
2186out: 2276 return 0;
2187 drm_modeset_unlock_all(dev);
2188 return ret;
2189} 2277}
2190 2278
2191/** 2279/**
@@ -2199,7 +2287,7 @@ out:
2199 * Called by the user via ioctl. 2287 * Called by the user via ioctl.
2200 * 2288 *
2201 * Returns: 2289 * Returns:
2202 * Zero on success, errno on failure. 2290 * Zero on success, negative errno on failure.
2203 */ 2291 */
2204int drm_mode_getplane(struct drm_device *dev, void *data, 2292int drm_mode_getplane(struct drm_device *dev, void *data,
2205 struct drm_file *file_priv) 2293 struct drm_file *file_priv)
@@ -2207,18 +2295,15 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
2207 struct drm_mode_get_plane *plane_resp = data; 2295 struct drm_mode_get_plane *plane_resp = data;
2208 struct drm_plane *plane; 2296 struct drm_plane *plane;
2209 uint32_t __user *format_ptr; 2297 uint32_t __user *format_ptr;
2210 int ret = 0;
2211 2298
2212 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2299 if (!drm_core_check_feature(dev, DRIVER_MODESET))
2213 return -EINVAL; 2300 return -EINVAL;
2214 2301
2215 drm_modeset_lock_all(dev);
2216 plane = drm_plane_find(dev, plane_resp->plane_id); 2302 plane = drm_plane_find(dev, plane_resp->plane_id);
2217 if (!plane) { 2303 if (!plane)
2218 ret = -ENOENT; 2304 return -ENOENT;
2219 goto out;
2220 }
2221 2305
2306 drm_modeset_lock(&plane->mutex, NULL);
2222 if (plane->crtc) 2307 if (plane->crtc)
2223 plane_resp->crtc_id = plane->crtc->base.id; 2308 plane_resp->crtc_id = plane->crtc->base.id;
2224 else 2309 else
@@ -2228,6 +2313,7 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
2228 plane_resp->fb_id = plane->fb->base.id; 2313 plane_resp->fb_id = plane->fb->base.id;
2229 else 2314 else
2230 plane_resp->fb_id = 0; 2315 plane_resp->fb_id = 0;
2316 drm_modeset_unlock(&plane->mutex);
2231 2317
2232 plane_resp->plane_id = plane->base.id; 2318 plane_resp->plane_id = plane->base.id;
2233 plane_resp->possible_crtcs = plane->possible_crtcs; 2319 plane_resp->possible_crtcs = plane->possible_crtcs;
@@ -2243,15 +2329,12 @@ int drm_mode_getplane(struct drm_device *dev, void *data,
2243 if (copy_to_user(format_ptr, 2329 if (copy_to_user(format_ptr,
2244 plane->format_types, 2330 plane->format_types,
2245 sizeof(uint32_t) * plane->format_count)) { 2331 sizeof(uint32_t) * plane->format_count)) {
2246 ret = -EFAULT; 2332 return -EFAULT;
2247 goto out;
2248 } 2333 }
2249 } 2334 }
2250 plane_resp->count_format_types = plane->format_count; 2335 plane_resp->count_format_types = plane->format_count;
2251 2336
2252out: 2337 return 0;
2253 drm_modeset_unlock_all(dev);
2254 return ret;
2255} 2338}
2256 2339
2257/* 2340/*
@@ -2274,7 +2357,7 @@ static int __setplane_internal(struct drm_plane *plane,
2274{ 2357{
2275 int ret = 0; 2358 int ret = 0;
2276 unsigned int fb_width, fb_height; 2359 unsigned int fb_width, fb_height;
2277 int i; 2360 unsigned int i;
2278 2361
2279 /* No fb means shut it down */ 2362 /* No fb means shut it down */
2280 if (!fb) { 2363 if (!fb) {
@@ -2378,13 +2461,12 @@ static int setplane_internal(struct drm_plane *plane,
2378 * valid crtc). 2461 * valid crtc).
2379 * 2462 *
2380 * Returns: 2463 * Returns:
2381 * Zero on success, errno on failure. 2464 * Zero on success, negative errno on failure.
2382 */ 2465 */
2383int drm_mode_setplane(struct drm_device *dev, void *data, 2466int drm_mode_setplane(struct drm_device *dev, void *data,
2384 struct drm_file *file_priv) 2467 struct drm_file *file_priv)
2385{ 2468{
2386 struct drm_mode_set_plane *plane_req = data; 2469 struct drm_mode_set_plane *plane_req = data;
2387 struct drm_mode_object *obj;
2388 struct drm_plane *plane; 2470 struct drm_plane *plane;
2389 struct drm_crtc *crtc = NULL; 2471 struct drm_crtc *crtc = NULL;
2390 struct drm_framebuffer *fb = NULL; 2472 struct drm_framebuffer *fb = NULL;
@@ -2407,14 +2489,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2407 * First, find the plane, crtc, and fb objects. If not available, 2489 * First, find the plane, crtc, and fb objects. If not available,
2408 * we don't bother to call the driver. 2490 * we don't bother to call the driver.
2409 */ 2491 */
2410 obj = drm_mode_object_find(dev, plane_req->plane_id, 2492 plane = drm_plane_find(dev, plane_req->plane_id);
2411 DRM_MODE_OBJECT_PLANE); 2493 if (!plane) {
2412 if (!obj) {
2413 DRM_DEBUG_KMS("Unknown plane ID %d\n", 2494 DRM_DEBUG_KMS("Unknown plane ID %d\n",
2414 plane_req->plane_id); 2495 plane_req->plane_id);
2415 return -ENOENT; 2496 return -ENOENT;
2416 } 2497 }
2417 plane = obj_to_plane(obj);
2418 2498
2419 if (plane_req->fb_id) { 2499 if (plane_req->fb_id) {
2420 fb = drm_framebuffer_lookup(dev, plane_req->fb_id); 2500 fb = drm_framebuffer_lookup(dev, plane_req->fb_id);
@@ -2424,14 +2504,12 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2424 return -ENOENT; 2504 return -ENOENT;
2425 } 2505 }
2426 2506
2427 obj = drm_mode_object_find(dev, plane_req->crtc_id, 2507 crtc = drm_crtc_find(dev, plane_req->crtc_id);
2428 DRM_MODE_OBJECT_CRTC); 2508 if (!crtc) {
2429 if (!obj) {
2430 DRM_DEBUG_KMS("Unknown crtc ID %d\n", 2509 DRM_DEBUG_KMS("Unknown crtc ID %d\n",
2431 plane_req->crtc_id); 2510 plane_req->crtc_id);
2432 return -ENOENT; 2511 return -ENOENT;
2433 } 2512 }
2434 crtc = obj_to_crtc(obj);
2435 } 2513 }
2436 2514
2437 /* 2515 /*
@@ -2453,7 +2531,7 @@ int drm_mode_setplane(struct drm_device *dev, void *data,
2453 * interface. The only thing it adds is correct refcounting dance. 2531 * interface. The only thing it adds is correct refcounting dance.
2454 * 2532 *
2455 * Returns: 2533 * Returns:
2456 * Zero on success, errno on failure. 2534 * Zero on success, negative errno on failure.
2457 */ 2535 */
2458int drm_mode_set_config_internal(struct drm_mode_set *set) 2536int drm_mode_set_config_internal(struct drm_mode_set *set)
2459{ 2537{
@@ -2546,7 +2624,7 @@ EXPORT_SYMBOL(drm_crtc_check_viewport);
2546 * Called by the user via ioctl. 2624 * Called by the user via ioctl.
2547 * 2625 *
2548 * Returns: 2626 * Returns:
2549 * Zero on success, errno on failure. 2627 * Zero on success, negative errno on failure.
2550 */ 2628 */
2551int drm_mode_setcrtc(struct drm_device *dev, void *data, 2629int drm_mode_setcrtc(struct drm_device *dev, void *data,
2552 struct drm_file *file_priv) 2630 struct drm_file *file_priv)
@@ -2709,7 +2787,7 @@ out:
2709 * userspace wants to make use of these capabilities. 2787 * userspace wants to make use of these capabilities.
2710 * 2788 *
2711 * Returns: 2789 * Returns:
2712 * Zero on success, errno on failure. 2790 * Zero on success, negative errno on failure.
2713 */ 2791 */
2714static int drm_mode_cursor_universal(struct drm_crtc *crtc, 2792static int drm_mode_cursor_universal(struct drm_crtc *crtc,
2715 struct drm_mode_cursor2 *req, 2793 struct drm_mode_cursor2 *req,
@@ -2810,7 +2888,7 @@ static int drm_mode_cursor_common(struct drm_device *dev,
2810 * If this crtc has a universal cursor plane, call that plane's update 2888 * If this crtc has a universal cursor plane, call that plane's update
2811 * handler rather than using legacy cursor handlers. 2889 * handler rather than using legacy cursor handlers.
2812 */ 2890 */
2813 drm_modeset_lock_crtc(crtc); 2891 drm_modeset_lock_crtc(crtc, crtc->cursor);
2814 if (crtc->cursor) { 2892 if (crtc->cursor) {
2815 ret = drm_mode_cursor_universal(crtc, req, file_priv); 2893 ret = drm_mode_cursor_universal(crtc, req, file_priv);
2816 goto out; 2894 goto out;
@@ -2857,7 +2935,7 @@ out:
2857 * Called by the user via ioctl. 2935 * Called by the user via ioctl.
2858 * 2936 *
2859 * Returns: 2937 * Returns:
2860 * Zero on success, errno on failure. 2938 * Zero on success, negative errno on failure.
2861 */ 2939 */
2862int drm_mode_cursor_ioctl(struct drm_device *dev, 2940int drm_mode_cursor_ioctl(struct drm_device *dev,
2863 void *data, struct drm_file *file_priv) 2941 void *data, struct drm_file *file_priv)
@@ -2884,7 +2962,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
2884 * Called by the user via ioctl. 2962 * Called by the user via ioctl.
2885 * 2963 *
2886 * Returns: 2964 * Returns:
2887 * Zero on success, errno on failure. 2965 * Zero on success, negative errno on failure.
2888 */ 2966 */
2889int drm_mode_cursor2_ioctl(struct drm_device *dev, 2967int drm_mode_cursor2_ioctl(struct drm_device *dev,
2890 void *data, struct drm_file *file_priv) 2968 void *data, struct drm_file *file_priv)
@@ -2943,23 +3021,21 @@ EXPORT_SYMBOL(drm_mode_legacy_fb_format);
2943 * @file_priv: drm file for the ioctl call 3021 * @file_priv: drm file for the ioctl call
2944 * 3022 *
2945 * Add a new FB to the specified CRTC, given a user request. This is the 3023 * Add a new FB to the specified CRTC, given a user request. This is the
2946 * original addfb ioclt which only supported RGB formats. 3024 * original addfb ioctl which only supported RGB formats.
2947 * 3025 *
2948 * Called by the user via ioctl. 3026 * Called by the user via ioctl.
2949 * 3027 *
2950 * Returns: 3028 * Returns:
2951 * Zero on success, errno on failure. 3029 * Zero on success, negative errno on failure.
2952 */ 3030 */
2953int drm_mode_addfb(struct drm_device *dev, 3031int drm_mode_addfb(struct drm_device *dev,
2954 void *data, struct drm_file *file_priv) 3032 void *data, struct drm_file *file_priv)
2955{ 3033{
2956 struct drm_mode_fb_cmd *or = data; 3034 struct drm_mode_fb_cmd *or = data;
2957 struct drm_mode_fb_cmd2 r = {}; 3035 struct drm_mode_fb_cmd2 r = {};
2958 struct drm_mode_config *config = &dev->mode_config; 3036 int ret;
2959 struct drm_framebuffer *fb;
2960 int ret = 0;
2961 3037
2962 /* Use new struct with format internally */ 3038 /* convert to new format and call new ioctl */
2963 r.fb_id = or->fb_id; 3039 r.fb_id = or->fb_id;
2964 r.width = or->width; 3040 r.width = or->width;
2965 r.height = or->height; 3041 r.height = or->height;
@@ -2967,28 +3043,13 @@ int drm_mode_addfb(struct drm_device *dev,
2967 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth); 3043 r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
2968 r.handles[0] = or->handle; 3044 r.handles[0] = or->handle;
2969 3045
2970 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3046 ret = drm_mode_addfb2(dev, &r, file_priv);
2971 return -EINVAL; 3047 if (ret)
2972 3048 return ret;
2973 if ((config->min_width > r.width) || (r.width > config->max_width))
2974 return -EINVAL;
2975
2976 if ((config->min_height > r.height) || (r.height > config->max_height))
2977 return -EINVAL;
2978 3049
2979 fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r); 3050 or->fb_id = r.fb_id;
2980 if (IS_ERR(fb)) {
2981 DRM_DEBUG_KMS("could not create framebuffer\n");
2982 return PTR_ERR(fb);
2983 }
2984 3051
2985 mutex_lock(&file_priv->fbs_lock); 3052 return 0;
2986 or->fb_id = fb->base.id;
2987 list_add(&fb->filp_head, &file_priv->fbs);
2988 DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
2989 mutex_unlock(&file_priv->fbs_lock);
2990
2991 return ret;
2992} 3053}
2993 3054
2994static int format_check(const struct drm_mode_fb_cmd2 *r) 3055static int format_check(const struct drm_mode_fb_cmd2 *r)
@@ -3080,7 +3141,7 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r)
3080 num_planes = drm_format_num_planes(r->pixel_format); 3141 num_planes = drm_format_num_planes(r->pixel_format);
3081 3142
3082 if (r->width == 0 || r->width % hsub) { 3143 if (r->width == 0 || r->width % hsub) {
3083 DRM_DEBUG_KMS("bad framebuffer width %u\n", r->height); 3144 DRM_DEBUG_KMS("bad framebuffer width %u\n", r->width);
3084 return -EINVAL; 3145 return -EINVAL;
3085 } 3146 }
3086 3147
@@ -3170,7 +3231,7 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev,
3170 * Called by the user via ioctl. 3231 * Called by the user via ioctl.
3171 * 3232 *
3172 * Returns: 3233 * Returns:
3173 * Zero on success, errno on failure. 3234 * Zero on success, negative errno on failure.
3174 */ 3235 */
3175int drm_mode_addfb2(struct drm_device *dev, 3236int drm_mode_addfb2(struct drm_device *dev,
3176 void *data, struct drm_file *file_priv) 3237 void *data, struct drm_file *file_priv)
@@ -3198,7 +3259,7 @@ int drm_mode_addfb2(struct drm_device *dev,
3198 * Called by the user via ioctl. 3259 * Called by the user via ioctl.
3199 * 3260 *
3200 * Returns: 3261 * Returns:
3201 * Zero on success, errno on failure. 3262 * Zero on success, negative errno on failure.
3202 */ 3263 */
3203int drm_mode_rmfb(struct drm_device *dev, 3264int drm_mode_rmfb(struct drm_device *dev,
3204 void *data, struct drm_file *file_priv) 3265 void *data, struct drm_file *file_priv)
@@ -3252,7 +3313,7 @@ fail_lookup:
3252 * Called by the user via ioctl. 3313 * Called by the user via ioctl.
3253 * 3314 *
3254 * Returns: 3315 * Returns:
3255 * Zero on success, errno on failure. 3316 * Zero on success, negative errno on failure.
3256 */ 3317 */
3257int drm_mode_getfb(struct drm_device *dev, 3318int drm_mode_getfb(struct drm_device *dev,
3258 void *data, struct drm_file *file_priv) 3319 void *data, struct drm_file *file_priv)
@@ -3313,7 +3374,7 @@ int drm_mode_getfb(struct drm_device *dev,
3313 * Called by the user via ioctl. 3374 * Called by the user via ioctl.
3314 * 3375 *
3315 * Returns: 3376 * Returns:
3316 * Zero on success, errno on failure. 3377 * Zero on success, negative errno on failure.
3317 */ 3378 */
3318int drm_mode_dirtyfb_ioctl(struct drm_device *dev, 3379int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
3319 void *data, struct drm_file *file_priv) 3380 void *data, struct drm_file *file_priv)
@@ -3393,7 +3454,7 @@ out_err1:
3393 * Called by the user via ioctl. 3454 * Called by the user via ioctl.
3394 * 3455 *
3395 * Returns: 3456 * Returns:
3396 * Zero on success, errno on failure. 3457 * Zero on success, negative errno on failure.
3397 */ 3458 */
3398void drm_fb_release(struct drm_file *priv) 3459void drm_fb_release(struct drm_file *priv)
3399{ 3460{
@@ -3402,7 +3463,7 @@ void drm_fb_release(struct drm_file *priv)
3402 3463
3403 /* 3464 /*
3404 * When the file gets released that means no one else can access the fb 3465 * When the file gets released that means no one else can access the fb
3405 * list any more, so no need to grab fpriv->fbs_lock. And we need to to 3466 * list any more, so no need to grab fpriv->fbs_lock. And we need to
3406 * avoid upsetting lockdep since the universal cursor code adds a 3467 * avoid upsetting lockdep since the universal cursor code adds a
3407 * framebuffer while holding mutex locks. 3468 * framebuffer while holding mutex locks.
3408 * 3469 *
@@ -3435,6 +3496,10 @@ void drm_fb_release(struct drm_file *priv)
3435 * object with drm_object_attach_property. The returned property object must be 3496 * object with drm_object_attach_property. The returned property object must be
3436 * freed with drm_property_destroy. 3497 * freed with drm_property_destroy.
3437 * 3498 *
3499 * Note that the DRM core keeps a per-device list of properties and that, if
3500 * drm_mode_config_cleanup() is called, it will destroy all properties created
3501 * by the driver.
3502 *
3438 * Returns: 3503 * Returns:
3439 * A pointer to the newly created property on success, NULL on failure. 3504 * A pointer to the newly created property on success, NULL on failure.
3440 */ 3505 */
@@ -3462,7 +3527,7 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
3462 3527
3463 property->flags = flags; 3528 property->flags = flags;
3464 property->num_values = num_values; 3529 property->num_values = num_values;
3465 INIT_LIST_HEAD(&property->enum_blob_list); 3530 INIT_LIST_HEAD(&property->enum_list);
3466 3531
3467 if (name) { 3532 if (name) {
3468 strncpy(property->name, name, DRM_PROP_NAME_LEN); 3533 strncpy(property->name, name, DRM_PROP_NAME_LEN);
@@ -3611,7 +3676,7 @@ static struct drm_property *property_create_range(struct drm_device *dev,
3611 * object with drm_object_attach_property. The returned property object must be 3676 * object with drm_object_attach_property. The returned property object must be
3612 * freed with drm_property_destroy. 3677 * freed with drm_property_destroy.
3613 * 3678 *
3614 * Userspace is allowed to set any interger value in the (min, max) range 3679 * Userspace is allowed to set any integer value in the (min, max) range
3615 * inclusive. 3680 * inclusive.
3616 * 3681 *
3617 * Returns: 3682 * Returns:
@@ -3684,8 +3749,8 @@ int drm_property_add_enum(struct drm_property *property, int index,
3684 (value > 63)) 3749 (value > 63))
3685 return -EINVAL; 3750 return -EINVAL;
3686 3751
3687 if (!list_empty(&property->enum_blob_list)) { 3752 if (!list_empty(&property->enum_list)) {
3688 list_for_each_entry(prop_enum, &property->enum_blob_list, head) { 3753 list_for_each_entry(prop_enum, &property->enum_list, head) {
3689 if (prop_enum->value == value) { 3754 if (prop_enum->value == value) {
3690 strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN); 3755 strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
3691 prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0'; 3756 prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
@@ -3703,7 +3768,7 @@ int drm_property_add_enum(struct drm_property *property, int index,
3703 prop_enum->value = value; 3768 prop_enum->value = value;
3704 3769
3705 property->values[index] = value; 3770 property->values[index] = value;
3706 list_add_tail(&prop_enum->head, &property->enum_blob_list); 3771 list_add_tail(&prop_enum->head, &property->enum_list);
3707 return 0; 3772 return 0;
3708} 3773}
3709EXPORT_SYMBOL(drm_property_add_enum); 3774EXPORT_SYMBOL(drm_property_add_enum);
@@ -3720,7 +3785,7 @@ void drm_property_destroy(struct drm_device *dev, struct drm_property *property)
3720{ 3785{
3721 struct drm_property_enum *prop_enum, *pt; 3786 struct drm_property_enum *prop_enum, *pt;
3722 3787
3723 list_for_each_entry_safe(prop_enum, pt, &property->enum_blob_list, head) { 3788 list_for_each_entry_safe(prop_enum, pt, &property->enum_list, head) {
3724 list_del(&prop_enum->head); 3789 list_del(&prop_enum->head);
3725 kfree(prop_enum); 3790 kfree(prop_enum);
3726 } 3791 }
@@ -3823,17 +3888,20 @@ int drm_object_property_get_value(struct drm_mode_object *obj,
3823EXPORT_SYMBOL(drm_object_property_get_value); 3888EXPORT_SYMBOL(drm_object_property_get_value);
3824 3889
3825/** 3890/**
3826 * drm_mode_getproperty_ioctl - get the current value of a connector's property 3891 * drm_mode_getproperty_ioctl - get the property metadata
3827 * @dev: DRM device 3892 * @dev: DRM device
3828 * @data: ioctl data 3893 * @data: ioctl data
3829 * @file_priv: DRM file info 3894 * @file_priv: DRM file info
3830 * 3895 *
3831 * This function retrieves the current value for an connectors's property. 3896 * This function retrieves the metadata for a given property, like the different
3897 * possible values for an enum property or the limits for a range property.
3898 *
3899 * Blob properties are special
3832 * 3900 *
3833 * Called by the user via ioctl. 3901 * Called by the user via ioctl.
3834 * 3902 *
3835 * Returns: 3903 * Returns:
3836 * Zero on success, errno on failure. 3904 * Zero on success, negative errno on failure.
3837 */ 3905 */
3838int drm_mode_getproperty_ioctl(struct drm_device *dev, 3906int drm_mode_getproperty_ioctl(struct drm_device *dev,
3839 void *data, struct drm_file *file_priv) 3907 void *data, struct drm_file *file_priv)
@@ -3841,16 +3909,12 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3841 struct drm_mode_get_property *out_resp = data; 3909 struct drm_mode_get_property *out_resp = data;
3842 struct drm_property *property; 3910 struct drm_property *property;
3843 int enum_count = 0; 3911 int enum_count = 0;
3844 int blob_count = 0;
3845 int value_count = 0; 3912 int value_count = 0;
3846 int ret = 0, i; 3913 int ret = 0, i;
3847 int copied; 3914 int copied;
3848 struct drm_property_enum *prop_enum; 3915 struct drm_property_enum *prop_enum;
3849 struct drm_mode_property_enum __user *enum_ptr; 3916 struct drm_mode_property_enum __user *enum_ptr;
3850 struct drm_property_blob *prop_blob;
3851 uint32_t __user *blob_id_ptr;
3852 uint64_t __user *values_ptr; 3917 uint64_t __user *values_ptr;
3853 uint32_t __user *blob_length_ptr;
3854 3918
3855 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3919 if (!drm_core_check_feature(dev, DRIVER_MODESET))
3856 return -EINVAL; 3920 return -EINVAL;
@@ -3864,11 +3928,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3864 3928
3865 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) || 3929 if (drm_property_type_is(property, DRM_MODE_PROP_ENUM) ||
3866 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) { 3930 drm_property_type_is(property, DRM_MODE_PROP_BITMASK)) {
3867 list_for_each_entry(prop_enum, &property->enum_blob_list, head) 3931 list_for_each_entry(prop_enum, &property->enum_list, head)
3868 enum_count++; 3932 enum_count++;
3869 } else if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) {
3870 list_for_each_entry(prop_blob, &property->enum_blob_list, head)
3871 blob_count++;
3872 } 3933 }
3873 3934
3874 value_count = property->num_values; 3935 value_count = property->num_values;
@@ -3893,7 +3954,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3893 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) { 3954 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
3894 copied = 0; 3955 copied = 0;
3895 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr; 3956 enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
3896 list_for_each_entry(prop_enum, &property->enum_blob_list, head) { 3957 list_for_each_entry(prop_enum, &property->enum_list, head) {
3897 3958
3898 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) { 3959 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
3899 ret = -EFAULT; 3960 ret = -EFAULT;
@@ -3911,35 +3972,24 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
3911 out_resp->count_enum_blobs = enum_count; 3972 out_resp->count_enum_blobs = enum_count;
3912 } 3973 }
3913 3974
3914 if (drm_property_type_is(property, DRM_MODE_PROP_BLOB)) { 3975 /*
3915 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) { 3976 * NOTE: The idea seems to have been to use this to read all the blob
3916 copied = 0; 3977 * property values. But nothing ever added them to the corresponding
3917 blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr; 3978 * list, userspace always used the special-purpose get_blob ioctl to
3918 blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr; 3979 * read the value for a blob property. It also doesn't make a lot of
3919 3980 * sense to return values here when everything else is just metadata for
3920 list_for_each_entry(prop_blob, &property->enum_blob_list, head) { 3981 * the property itself.
3921 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) { 3982 */
3922 ret = -EFAULT; 3983 if (drm_property_type_is(property, DRM_MODE_PROP_BLOB))
3923 goto done; 3984 out_resp->count_enum_blobs = 0;
3924 }
3925
3926 if (put_user(prop_blob->length, blob_length_ptr + copied)) {
3927 ret = -EFAULT;
3928 goto done;
3929 }
3930
3931 copied++;
3932 }
3933 }
3934 out_resp->count_enum_blobs = blob_count;
3935 }
3936done: 3985done:
3937 drm_modeset_unlock_all(dev); 3986 drm_modeset_unlock_all(dev);
3938 return ret; 3987 return ret;
3939} 3988}
3940 3989
3941static struct drm_property_blob *drm_property_create_blob(struct drm_device *dev, int length, 3990static struct drm_property_blob *
3942 void *data) 3991drm_property_create_blob(struct drm_device *dev, size_t length,
3992 const void *data)
3943{ 3993{
3944 struct drm_property_blob *blob; 3994 struct drm_property_blob *blob;
3945 int ret; 3995 int ret;
@@ -3985,7 +4035,7 @@ static void drm_property_destroy_blob(struct drm_device *dev,
3985 * Called by the user via ioctl. 4035 * Called by the user via ioctl.
3986 * 4036 *
3987 * Returns: 4037 * Returns:
3988 * Zero on success, errno on failure. 4038 * Zero on success, negative errno on failure.
3989 */ 4039 */
3990int drm_mode_getblob_ioctl(struct drm_device *dev, 4040int drm_mode_getblob_ioctl(struct drm_device *dev,
3991 void *data, struct drm_file *file_priv) 4041 void *data, struct drm_file *file_priv)
@@ -4019,12 +4069,25 @@ done:
4019 return ret; 4069 return ret;
4020} 4070}
4021 4071
4072/**
4073 * drm_mode_connector_set_path_property - set tile property on connector
4074 * @connector: connector to set property on.
4075 * @path: path to use for property.
4076 *
4077 * This creates a property to expose to userspace to specify a
4078 * connector path. This is mainly used for DisplayPort MST where
4079 * connectors have a topology and we want to allow userspace to give
4080 * them more meaningful names.
4081 *
4082 * Returns:
4083 * Zero on success, negative errno on failure.
4084 */
4022int drm_mode_connector_set_path_property(struct drm_connector *connector, 4085int drm_mode_connector_set_path_property(struct drm_connector *connector,
4023 char *path) 4086 const char *path)
4024{ 4087{
4025 struct drm_device *dev = connector->dev; 4088 struct drm_device *dev = connector->dev;
4026 int ret, size; 4089 size_t size = strlen(path) + 1;
4027 size = strlen(path) + 1; 4090 int ret;
4028 4091
4029 connector->path_blob_ptr = drm_property_create_blob(connector->dev, 4092 connector->path_blob_ptr = drm_property_create_blob(connector->dev,
4030 size, path); 4093 size, path);
@@ -4039,6 +4102,52 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
4039EXPORT_SYMBOL(drm_mode_connector_set_path_property); 4102EXPORT_SYMBOL(drm_mode_connector_set_path_property);
4040 4103
4041/** 4104/**
4105 * drm_mode_connector_set_tile_property - set tile property on connector
4106 * @connector: connector to set property on.
4107 *
4108 * This looks up the tile information for a connector, and creates a
4109 * property for userspace to parse if it exists. The property is of
4110 * the form of 8 integers using ':' as a separator.
4111 *
4112 * Returns:
4113 * Zero on success, errno on failure.
4114 */
4115int drm_mode_connector_set_tile_property(struct drm_connector *connector)
4116{
4117 struct drm_device *dev = connector->dev;
4118 int ret, size;
4119 char tile[256];
4120
4121 if (connector->tile_blob_ptr)
4122 drm_property_destroy_blob(dev, connector->tile_blob_ptr);
4123
4124 if (!connector->has_tile) {
4125 connector->tile_blob_ptr = NULL;
4126 ret = drm_object_property_set_value(&connector->base,
4127 dev->mode_config.tile_property, 0);
4128 return ret;
4129 }
4130
4131 snprintf(tile, 256, "%d:%d:%d:%d:%d:%d:%d:%d",
4132 connector->tile_group->id, connector->tile_is_single_monitor,
4133 connector->num_h_tile, connector->num_v_tile,
4134 connector->tile_h_loc, connector->tile_v_loc,
4135 connector->tile_h_size, connector->tile_v_size);
4136 size = strlen(tile) + 1;
4137
4138 connector->tile_blob_ptr = drm_property_create_blob(connector->dev,
4139 size, tile);
4140 if (!connector->tile_blob_ptr)
4141 return -EINVAL;
4142
4143 ret = drm_object_property_set_value(&connector->base,
4144 dev->mode_config.tile_property,
4145 connector->tile_blob_ptr->base.id);
4146 return ret;
4147}
4148EXPORT_SYMBOL(drm_mode_connector_set_tile_property);
4149
4150/**
4042 * drm_mode_connector_update_edid_property - update the edid property of a connector 4151 * drm_mode_connector_update_edid_property - update the edid property of a connector
4043 * @connector: drm connector 4152 * @connector: drm connector
4044 * @edid: new value of the edid property 4153 * @edid: new value of the edid property
@@ -4047,13 +4156,14 @@ EXPORT_SYMBOL(drm_mode_connector_set_path_property);
4047 * connector's edid property. 4156 * connector's edid property.
4048 * 4157 *
4049 * Returns: 4158 * Returns:
4050 * Zero on success, errno on failure. 4159 * Zero on success, negative errno on failure.
4051 */ 4160 */
4052int drm_mode_connector_update_edid_property(struct drm_connector *connector, 4161int drm_mode_connector_update_edid_property(struct drm_connector *connector,
4053 struct edid *edid) 4162 const struct edid *edid)
4054{ 4163{
4055 struct drm_device *dev = connector->dev; 4164 struct drm_device *dev = connector->dev;
4056 int ret, size; 4165 size_t size;
4166 int ret;
4057 4167
4058 /* ignore requests to set edid when overridden */ 4168 /* ignore requests to set edid when overridden */
4059 if (connector->override_edid) 4169 if (connector->override_edid)
@@ -4143,7 +4253,7 @@ static bool drm_property_change_is_valid(struct drm_property *property,
4143 * Called by the user via ioctl. 4253 * Called by the user via ioctl.
4144 * 4254 *
4145 * Returns: 4255 * Returns:
4146 * Zero on success, errno on failure. 4256 * Zero on success, negative errno on failure.
4147 */ 4257 */
4148int drm_mode_connector_property_set_ioctl(struct drm_device *dev, 4258int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
4149 void *data, struct drm_file *file_priv) 4259 void *data, struct drm_file *file_priv)
@@ -4226,7 +4336,7 @@ int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
4226EXPORT_SYMBOL(drm_mode_plane_set_obj_prop); 4336EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
4227 4337
4228/** 4338/**
4229 * drm_mode_getproperty_ioctl - get the current value of a object's property 4339 * drm_mode_obj_get_properties_ioctl - get the current value of a object's property
4230 * @dev: DRM device 4340 * @dev: DRM device
4231 * @data: ioctl data 4341 * @data: ioctl data
4232 * @file_priv: DRM file info 4342 * @file_priv: DRM file info
@@ -4238,7 +4348,7 @@ EXPORT_SYMBOL(drm_mode_plane_set_obj_prop);
4238 * Called by the user via ioctl. 4348 * Called by the user via ioctl.
4239 * 4349 *
4240 * Returns: 4350 * Returns:
4241 * Zero on success, errno on failure. 4351 * Zero on success, negative errno on failure.
4242 */ 4352 */
4243int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data, 4353int drm_mode_obj_get_properties_ioctl(struct drm_device *dev, void *data,
4244 struct drm_file *file_priv) 4354 struct drm_file *file_priv)
@@ -4310,7 +4420,7 @@ out:
4310 * Called by the user via ioctl. 4420 * Called by the user via ioctl.
4311 * 4421 *
4312 * Returns: 4422 * Returns:
4313 * Zero on success, errno on failure. 4423 * Zero on success, negative errno on failure.
4314 */ 4424 */
4315int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data, 4425int drm_mode_obj_set_property_ioctl(struct drm_device *dev, void *data,
4316 struct drm_file *file_priv) 4426 struct drm_file *file_priv)
@@ -4382,7 +4492,7 @@ out:
4382 * possible_clones and possible_crtcs bitmasks. 4492 * possible_clones and possible_crtcs bitmasks.
4383 * 4493 *
4384 * Returns: 4494 * Returns:
4385 * Zero on success, errno on failure. 4495 * Zero on success, negative errno on failure.
4386 */ 4496 */
4387int drm_mode_connector_attach_encoder(struct drm_connector *connector, 4497int drm_mode_connector_attach_encoder(struct drm_connector *connector,
4388 struct drm_encoder *encoder) 4498 struct drm_encoder *encoder)
@@ -4409,7 +4519,7 @@ EXPORT_SYMBOL(drm_mode_connector_attach_encoder);
4409 * fixed gamma table size. 4519 * fixed gamma table size.
4410 * 4520 *
4411 * Returns: 4521 * Returns:
4412 * Zero on success, errno on failure. 4522 * Zero on success, negative errno on failure.
4413 */ 4523 */
4414int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc, 4524int drm_mode_crtc_set_gamma_size(struct drm_crtc *crtc,
4415 int gamma_size) 4525 int gamma_size)
@@ -4438,7 +4548,7 @@ EXPORT_SYMBOL(drm_mode_crtc_set_gamma_size);
4438 * Called by the user via ioctl. 4548 * Called by the user via ioctl.
4439 * 4549 *
4440 * Returns: 4550 * Returns:
4441 * Zero on success, errno on failure. 4551 * Zero on success, negative errno on failure.
4442 */ 4552 */
4443int drm_mode_gamma_set_ioctl(struct drm_device *dev, 4553int drm_mode_gamma_set_ioctl(struct drm_device *dev,
4444 void *data, struct drm_file *file_priv) 4554 void *data, struct drm_file *file_priv)
@@ -4510,7 +4620,7 @@ out:
4510 * Called by the user via ioctl. 4620 * Called by the user via ioctl.
4511 * 4621 *
4512 * Returns: 4622 * Returns:
4513 * Zero on success, errno on failure. 4623 * Zero on success, negative errno on failure.
4514 */ 4624 */
4515int drm_mode_gamma_get_ioctl(struct drm_device *dev, 4625int drm_mode_gamma_get_ioctl(struct drm_device *dev,
4516 void *data, struct drm_file *file_priv) 4626 void *data, struct drm_file *file_priv)
@@ -4576,7 +4686,7 @@ out:
4576 * Called by the user via ioctl. 4686 * Called by the user via ioctl.
4577 * 4687 *
4578 * Returns: 4688 * Returns:
4579 * Zero on success, errno on failure. 4689 * Zero on success, negative errno on failure.
4580 */ 4690 */
4581int drm_mode_page_flip_ioctl(struct drm_device *dev, 4691int drm_mode_page_flip_ioctl(struct drm_device *dev,
4582 void *data, struct drm_file *file_priv) 4692 void *data, struct drm_file *file_priv)
@@ -4599,7 +4709,7 @@ int drm_mode_page_flip_ioctl(struct drm_device *dev,
4599 if (!crtc) 4709 if (!crtc)
4600 return -ENOENT; 4710 return -ENOENT;
4601 4711
4602 drm_modeset_lock_crtc(crtc); 4712 drm_modeset_lock_crtc(crtc, crtc->primary);
4603 if (crtc->primary->fb == NULL) { 4713 if (crtc->primary->fb == NULL) {
4604 /* The framebuffer is currently unbound, presumably 4714 /* The framebuffer is currently unbound, presumably
4605 * due to a hotplug event, that userspace has not 4715 * due to a hotplug event, that userspace has not
@@ -4742,7 +4852,7 @@ EXPORT_SYMBOL(drm_mode_config_reset);
4742 * Called by the user via ioctl. 4852 * Called by the user via ioctl.
4743 * 4853 *
4744 * Returns: 4854 * Returns:
4745 * Zero on success, errno on failure. 4855 * Zero on success, negative errno on failure.
4746 */ 4856 */
4747int drm_mode_create_dumb_ioctl(struct drm_device *dev, 4857int drm_mode_create_dumb_ioctl(struct drm_device *dev,
4748 void *data, struct drm_file *file_priv) 4858 void *data, struct drm_file *file_priv)
@@ -4769,6 +4879,16 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
4769 if (PAGE_ALIGN(size) == 0) 4879 if (PAGE_ALIGN(size) == 0)
4770 return -EINVAL; 4880 return -EINVAL;
4771 4881
4882 /*
4883 * handle, pitch and size are output parameters. Zero them out to
4884 * prevent drivers from accidentally using uninitialized data. Since
4885 * not all existing userspace is clearing these fields properly we
4886 * cannot reject IOCTL with garbage in them.
4887 */
4888 args->handle = 0;
4889 args->pitch = 0;
4890 args->size = 0;
4891
4772 return dev->driver->dumb_create(file_priv, dev, args); 4892 return dev->driver->dumb_create(file_priv, dev, args);
4773} 4893}
4774 4894
@@ -4784,7 +4904,7 @@ int drm_mode_create_dumb_ioctl(struct drm_device *dev,
4784 * Called by the user via ioctl. 4904 * Called by the user via ioctl.
4785 * 4905 *
4786 * Returns: 4906 * Returns:
4787 * Zero on success, errno on failure. 4907 * Zero on success, negative errno on failure.
4788 */ 4908 */
4789int drm_mode_mmap_dumb_ioctl(struct drm_device *dev, 4909int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
4790 void *data, struct drm_file *file_priv) 4910 void *data, struct drm_file *file_priv)
@@ -4811,7 +4931,7 @@ int drm_mode_mmap_dumb_ioctl(struct drm_device *dev,
4811 * Called by the user via ioctl. 4931 * Called by the user via ioctl.
4812 * 4932 *
4813 * Returns: 4933 * Returns:
4814 * Zero on success, errno on failure. 4934 * Zero on success, negative errno on failure.
4815 */ 4935 */
4816int drm_mode_destroy_dumb_ioctl(struct drm_device *dev, 4936int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
4817 void *data, struct drm_file *file_priv) 4937 void *data, struct drm_file *file_priv)
@@ -5097,6 +5217,7 @@ void drm_mode_config_init(struct drm_device *dev)
5097 INIT_LIST_HEAD(&dev->mode_config.property_blob_list); 5217 INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
5098 INIT_LIST_HEAD(&dev->mode_config.plane_list); 5218 INIT_LIST_HEAD(&dev->mode_config.plane_list);
5099 idr_init(&dev->mode_config.crtc_idr); 5219 idr_init(&dev->mode_config.crtc_idr);
5220 idr_init(&dev->mode_config.tile_idr);
5100 5221
5101 drm_modeset_lock_all(dev); 5222 drm_modeset_lock_all(dev);
5102 drm_mode_create_standard_connector_properties(dev); 5223 drm_mode_create_standard_connector_properties(dev);
@@ -5184,6 +5305,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
5184 crtc->funcs->destroy(crtc); 5305 crtc->funcs->destroy(crtc);
5185 } 5306 }
5186 5307
5308 idr_destroy(&dev->mode_config.tile_idr);
5187 idr_destroy(&dev->mode_config.crtc_idr); 5309 idr_destroy(&dev->mode_config.crtc_idr);
5188 drm_modeset_lock_fini(&dev->mode_config.connection_mutex); 5310 drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
5189} 5311}
@@ -5206,3 +5328,100 @@ struct drm_property *drm_mode_create_rotation_property(struct drm_device *dev,
5206 supported_rotations); 5328 supported_rotations);
5207} 5329}
5208EXPORT_SYMBOL(drm_mode_create_rotation_property); 5330EXPORT_SYMBOL(drm_mode_create_rotation_property);
5331
5332/**
5333 * DOC: Tile group
5334 *
5335 * Tile groups are used to represent tiled monitors with a unique
5336 * integer identifier. Tiled monitors using DisplayID v1.3 have
5337 * a unique 8-byte handle, we store this in a tile group, so we
5338 * have a common identifier for all tiles in a monitor group.
5339 */
5340static void drm_tile_group_free(struct kref *kref)
5341{
5342 struct drm_tile_group *tg = container_of(kref, struct drm_tile_group, refcount);
5343 struct drm_device *dev = tg->dev;
5344 mutex_lock(&dev->mode_config.idr_mutex);
5345 idr_remove(&dev->mode_config.tile_idr, tg->id);
5346 mutex_unlock(&dev->mode_config.idr_mutex);
5347 kfree(tg);
5348}
5349
5350/**
5351 * drm_mode_put_tile_group - drop a reference to a tile group.
5352 * @dev: DRM device
5353 * @tg: tile group to drop reference to.
5354 *
5355 * drop reference to tile group and free if 0.
5356 */
5357void drm_mode_put_tile_group(struct drm_device *dev,
5358 struct drm_tile_group *tg)
5359{
5360 kref_put(&tg->refcount, drm_tile_group_free);
5361}
5362
5363/**
5364 * drm_mode_get_tile_group - get a reference to an existing tile group
5365 * @dev: DRM device
5366 * @topology: 8-bytes unique per monitor.
5367 *
5368 * Use the unique bytes to get a reference to an existing tile group.
5369 *
5370 * RETURNS:
5371 * tile group or NULL if not found.
5372 */
5373struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
5374 char topology[8])
5375{
5376 struct drm_tile_group *tg;
5377 int id;
5378 mutex_lock(&dev->mode_config.idr_mutex);
5379 idr_for_each_entry(&dev->mode_config.tile_idr, tg, id) {
5380 if (!memcmp(tg->group_data, topology, 8)) {
5381 if (!kref_get_unless_zero(&tg->refcount))
5382 tg = NULL;
5383 mutex_unlock(&dev->mode_config.idr_mutex);
5384 return tg;
5385 }
5386 }
5387 mutex_unlock(&dev->mode_config.idr_mutex);
5388 return NULL;
5389}
5390
5391/**
5392 * drm_mode_create_tile_group - create a tile group from a displayid description
5393 * @dev: DRM device
5394 * @topology: 8-bytes unique per monitor.
5395 *
5396 * Create a tile group for the unique monitor, and get a unique
5397 * identifier for the tile group.
5398 *
5399 * RETURNS:
5400 * new tile group or error.
5401 */
5402struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
5403 char topology[8])
5404{
5405 struct drm_tile_group *tg;
5406 int ret;
5407
5408 tg = kzalloc(sizeof(*tg), GFP_KERNEL);
5409 if (!tg)
5410 return ERR_PTR(-ENOMEM);
5411
5412 kref_init(&tg->refcount);
5413 memcpy(tg->group_data, topology, 8);
5414 tg->dev = dev;
5415
5416 mutex_lock(&dev->mode_config.idr_mutex);
5417 ret = idr_alloc(&dev->mode_config.tile_idr, tg, 1, 0, GFP_KERNEL);
5418 if (ret >= 0) {
5419 tg->id = ret;
5420 } else {
5421 kfree(tg);
5422 tg = ERR_PTR(ret);
5423 }
5424
5425 mutex_unlock(&dev->mode_config.idr_mutex);
5426 return tg;
5427}
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 6c65a0a28fbd..d552708409de 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,12 +34,35 @@
34#include <linux/moduleparam.h> 34#include <linux/moduleparam.h>
35 35
36#include <drm/drmP.h> 36#include <drm/drmP.h>
37#include <drm/drm_atomic.h>
37#include <drm/drm_crtc.h> 38#include <drm/drm_crtc.h>
38#include <drm/drm_fourcc.h> 39#include <drm/drm_fourcc.h>
39#include <drm/drm_crtc_helper.h> 40#include <drm/drm_crtc_helper.h>
40#include <drm/drm_fb_helper.h> 41#include <drm/drm_fb_helper.h>
42#include <drm/drm_plane_helper.h>
43#include <drm/drm_atomic_helper.h>
41#include <drm/drm_edid.h> 44#include <drm/drm_edid.h>
42 45
46/**
47 * DOC: overview
48 *
49 * The CRTC modeset helper library provides a default set_config implementation
50 * in drm_crtc_helper_set_config(). Plus a few other convenience functions using
51 * the same callbacks which drivers can use to e.g. restore the modeset
52 * configuration on resume with drm_helper_resume_force_mode().
53 *
54 * The driver callbacks are mostly compatible with the atomic modeset helpers,
55 * except for the handling of the primary plane: Atomic helpers require that the
56 * primary plane is implemented as a real standalone plane and not directly tied
57 * to the CRTC state. For easier transition this library provides functions to
58 * implement the old semantics required by the CRTC helpers using the new plane
59 * and atomic helper callbacks.
60 *
61 * Drivers are strongly urged to convert to the atomic helpers (by way of first
62 * converting to the plane helpers). New drivers must not use these functions
63 * but need to implement the atomic interface instead, potentially using the
64 * atomic helpers for that.
65 */
43MODULE_AUTHOR("David Airlie, Jesse Barnes"); 66MODULE_AUTHOR("David Airlie, Jesse Barnes");
44MODULE_DESCRIPTION("DRM KMS helper"); 67MODULE_DESCRIPTION("DRM KMS helper");
45MODULE_LICENSE("GPL and additional rights"); 68MODULE_LICENSE("GPL and additional rights");
@@ -888,3 +911,112 @@ void drm_helper_resume_force_mode(struct drm_device *dev)
888 drm_modeset_unlock_all(dev); 911 drm_modeset_unlock_all(dev);
889} 912}
890EXPORT_SYMBOL(drm_helper_resume_force_mode); 913EXPORT_SYMBOL(drm_helper_resume_force_mode);
914
915/**
916 * drm_helper_crtc_mode_set - mode_set implementation for atomic plane helpers
917 * @crtc: DRM CRTC
918 * @mode: DRM display mode which userspace requested
919 * @adjusted_mode: DRM display mode adjusted by ->mode_fixup callbacks
920 * @x: x offset of the CRTC scanout area on the underlying framebuffer
921 * @y: y offset of the CRTC scanout area on the underlying framebuffer
922 * @old_fb: previous framebuffer
923 *
924 * This function implements a callback useable as the ->mode_set callback
925 * required by the crtc helpers. Besides the atomic plane helper functions for
926 * the primary plane the driver must also provide the ->mode_set_nofb callback
927 * to set up the crtc.
928 *
929 * This is a transitional helper useful for converting drivers to the atomic
930 * interfaces.
931 */
932int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
933 struct drm_display_mode *adjusted_mode, int x, int y,
934 struct drm_framebuffer *old_fb)
935{
936 struct drm_crtc_state *crtc_state;
937 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
938 int ret;
939
940 if (crtc->funcs->atomic_duplicate_state)
941 crtc_state = crtc->funcs->atomic_duplicate_state(crtc);
942 else if (crtc->state)
943 crtc_state = kmemdup(crtc->state, sizeof(*crtc_state),
944 GFP_KERNEL);
945 else
946 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
947 if (!crtc_state)
948 return -ENOMEM;
949
950 crtc_state->enable = true;
951 crtc_state->planes_changed = true;
952 crtc_state->mode_changed = true;
953 drm_mode_copy(&crtc_state->mode, mode);
954 drm_mode_copy(&crtc_state->adjusted_mode, adjusted_mode);
955
956 if (crtc_funcs->atomic_check) {
957 ret = crtc_funcs->atomic_check(crtc, crtc_state);
958 if (ret) {
959 kfree(crtc_state);
960
961 return ret;
962 }
963 }
964
965 swap(crtc->state, crtc_state);
966
967 crtc_funcs->mode_set_nofb(crtc);
968
969 if (crtc_state) {
970 if (crtc->funcs->atomic_destroy_state)
971 crtc->funcs->atomic_destroy_state(crtc, crtc_state);
972 else
973 kfree(crtc_state);
974 }
975
976 return drm_helper_crtc_mode_set_base(crtc, x, y, old_fb);
977}
978EXPORT_SYMBOL(drm_helper_crtc_mode_set);
979
980/**
981 * drm_helper_crtc_mode_set_base - mode_set_base implementation for atomic plane helpers
982 * @crtc: DRM CRTC
983 * @x: x offset of the CRTC scanout area on the underlying framebuffer
984 * @y: y offset of the CRTC scanout area on the underlying framebuffer
985 * @old_fb: previous framebuffer
986 *
987 * This function implements a callback useable as the ->mode_set_base used
988 * required by the crtc helpers. The driver must provide the atomic plane helper
989 * functions for the primary plane.
990 *
991 * This is a transitional helper useful for converting drivers to the atomic
992 * interfaces.
993 */
994int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
995 struct drm_framebuffer *old_fb)
996{
997 struct drm_plane_state *plane_state;
998 struct drm_plane *plane = crtc->primary;
999
1000 if (plane->funcs->atomic_duplicate_state)
1001 plane_state = plane->funcs->atomic_duplicate_state(plane);
1002 else if (plane->state)
1003 plane_state = drm_atomic_helper_plane_duplicate_state(plane);
1004 else
1005 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
1006 if (!plane_state)
1007 return -ENOMEM;
1008
1009 plane_state->crtc = crtc;
1010 drm_atomic_set_fb_for_plane(plane_state, crtc->primary->fb);
1011 plane_state->crtc_x = 0;
1012 plane_state->crtc_y = 0;
1013 plane_state->crtc_h = crtc->mode.vdisplay;
1014 plane_state->crtc_w = crtc->mode.hdisplay;
1015 plane_state->src_x = x << 16;
1016 plane_state->src_y = y << 16;
1017 plane_state->src_h = crtc->mode.vdisplay << 16;
1018 plane_state->src_w = crtc->mode.hdisplay << 16;
1019
1020 return drm_plane_helper_commit(plane, plane_state, old_fb);
1021}
1022EXPORT_SYMBOL(drm_helper_crtc_mode_set_base);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 08e33b8b13a4..79968e39c8d0 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -39,198 +39,6 @@
39 * blocks, ... 39 * blocks, ...
40 */ 40 */
41 41
42/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
43static int
44i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
45 uint8_t write_byte, uint8_t *read_byte)
46{
47 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
48 int ret;
49
50 ret = (*algo_data->aux_ch)(adapter, mode,
51 write_byte, read_byte);
52 return ret;
53}
54
55/*
56 * I2C over AUX CH
57 */
58
59/*
60 * Send the address. If the I2C link is running, this 'restarts'
61 * the connection with the new address, this is used for doing
62 * a write followed by a read (as needed for DDC)
63 */
64static int
65i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
66{
67 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
68 int mode = MODE_I2C_START;
69 int ret;
70
71 if (reading)
72 mode |= MODE_I2C_READ;
73 else
74 mode |= MODE_I2C_WRITE;
75 algo_data->address = address;
76 algo_data->running = true;
77 ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
78 return ret;
79}
80
81/*
82 * Stop the I2C transaction. This closes out the link, sending
83 * a bare address packet with the MOT bit turned off
84 */
85static void
86i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
87{
88 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
89 int mode = MODE_I2C_STOP;
90
91 if (reading)
92 mode |= MODE_I2C_READ;
93 else
94 mode |= MODE_I2C_WRITE;
95 if (algo_data->running) {
96 (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
97 algo_data->running = false;
98 }
99}
100
101/*
102 * Write a single byte to the current I2C address, the
103 * the I2C link must be running or this returns -EIO
104 */
105static int
106i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
107{
108 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
109 int ret;
110
111 if (!algo_data->running)
112 return -EIO;
113
114 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
115 return ret;
116}
117
118/*
119 * Read a single byte from the current I2C address, the
120 * I2C link must be running or this returns -EIO
121 */
122static int
123i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
124{
125 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
126 int ret;
127
128 if (!algo_data->running)
129 return -EIO;
130
131 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
132 return ret;
133}
134
135static int
136i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
137 struct i2c_msg *msgs,
138 int num)
139{
140 int ret = 0;
141 bool reading = false;
142 int m;
143 int b;
144
145 for (m = 0; m < num; m++) {
146 u16 len = msgs[m].len;
147 u8 *buf = msgs[m].buf;
148 reading = (msgs[m].flags & I2C_M_RD) != 0;
149 ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
150 if (ret < 0)
151 break;
152 if (reading) {
153 for (b = 0; b < len; b++) {
154 ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
155 if (ret < 0)
156 break;
157 }
158 } else {
159 for (b = 0; b < len; b++) {
160 ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
161 if (ret < 0)
162 break;
163 }
164 }
165 if (ret < 0)
166 break;
167 }
168 if (ret >= 0)
169 ret = num;
170 i2c_algo_dp_aux_stop(adapter, reading);
171 DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
172 return ret;
173}
174
175static u32
176i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
177{
178 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
179 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
180 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
181 I2C_FUNC_10BIT_ADDR;
182}
183
184static const struct i2c_algorithm i2c_dp_aux_algo = {
185 .master_xfer = i2c_algo_dp_aux_xfer,
186 .functionality = i2c_algo_dp_aux_functionality,
187};
188
189static void
190i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
191{
192 (void) i2c_algo_dp_aux_address(adapter, 0, false);
193 (void) i2c_algo_dp_aux_stop(adapter, false);
194}
195
196static int
197i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
198{
199 adapter->algo = &i2c_dp_aux_algo;
200 adapter->retries = 3;
201 i2c_dp_aux_reset_bus(adapter);
202 return 0;
203}
204
205/**
206 * i2c_dp_aux_add_bus() - register an i2c adapter using the aux ch helper
207 * @adapter: i2c adapter to register
208 *
209 * This registers an i2c adapter that uses dp aux channel as it's underlaying
210 * transport. The driver needs to fill out the &i2c_algo_dp_aux_data structure
211 * and store it in the algo_data member of the @adapter argument. This will be
212 * used by the i2c over dp aux algorithm to drive the hardware.
213 *
214 * RETURNS:
215 * 0 on success, -ERRNO on failure.
216 *
217 * IMPORTANT:
218 * This interface is deprecated, please switch to the new dp aux helpers and
219 * drm_dp_aux_register().
220 */
221int
222i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
223{
224 int error;
225
226 error = i2c_dp_aux_prepare_bus(adapter);
227 if (error)
228 return error;
229 error = i2c_add_adapter(adapter);
230 return error;
231}
232EXPORT_SYMBOL(i2c_dp_aux_add_bus);
233
234/* Helpers for DP link training */ 42/* Helpers for DP link training */
235static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r) 43static u8 dp_link_status(const u8 link_status[DP_LINK_STATUS_SIZE], int r)
236{ 44{
@@ -378,10 +186,11 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
378 186
379 /* 187 /*
380 * The specification doesn't give any recommendation on how often to 188 * The specification doesn't give any recommendation on how often to
381 * retry native transactions, so retry 7 times like for I2C-over-AUX 189 * retry native transactions. We used to retry 7 times like for
382 * transactions. 190 * aux i2c transactions but real world devices this wasn't
191 * sufficient, bump to 32 which makes Dell 4k monitors happier.
383 */ 192 */
384 for (retry = 0; retry < 7; retry++) { 193 for (retry = 0; retry < 32; retry++) {
385 194
386 mutex_lock(&aux->hw_mutex); 195 mutex_lock(&aux->hw_mutex);
387 err = aux->transfer(aux, &msg); 196 err = aux->transfer(aux, &msg);
@@ -654,10 +463,12 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
654 463
655 case DP_AUX_I2C_REPLY_NACK: 464 case DP_AUX_I2C_REPLY_NACK:
656 DRM_DEBUG_KMS("I2C nack\n"); 465 DRM_DEBUG_KMS("I2C nack\n");
466 aux->i2c_nack_count++;
657 return -EREMOTEIO; 467 return -EREMOTEIO;
658 468
659 case DP_AUX_I2C_REPLY_DEFER: 469 case DP_AUX_I2C_REPLY_DEFER:
660 DRM_DEBUG_KMS("I2C defer\n"); 470 DRM_DEBUG_KMS("I2C defer\n");
471 aux->i2c_defer_count++;
661 usleep_range(400, 500); 472 usleep_range(400, 500);
662 continue; 473 continue;
663 474
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 070f913d2dba..9a5b68717ec8 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -839,6 +839,8 @@ static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
839 839
840static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) 840static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
841{ 841{
842 struct drm_dp_mst_branch *mstb;
843
842 switch (old_pdt) { 844 switch (old_pdt) {
843 case DP_PEER_DEVICE_DP_LEGACY_CONV: 845 case DP_PEER_DEVICE_DP_LEGACY_CONV:
844 case DP_PEER_DEVICE_SST_SINK: 846 case DP_PEER_DEVICE_SST_SINK:
@@ -846,8 +848,9 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
846 drm_dp_mst_unregister_i2c_bus(&port->aux); 848 drm_dp_mst_unregister_i2c_bus(&port->aux);
847 break; 849 break;
848 case DP_PEER_DEVICE_MST_BRANCHING: 850 case DP_PEER_DEVICE_MST_BRANCHING:
849 drm_dp_put_mst_branch_device(port->mstb); 851 mstb = port->mstb;
850 port->mstb = NULL; 852 port->mstb = NULL;
853 drm_dp_put_mst_branch_device(mstb);
851 break; 854 break;
852 } 855 }
853} 856}
@@ -858,6 +861,8 @@ static void drm_dp_destroy_port(struct kref *kref)
858 struct drm_dp_mst_topology_mgr *mgr = port->mgr; 861 struct drm_dp_mst_topology_mgr *mgr = port->mgr;
859 if (!port->input) { 862 if (!port->input) {
860 port->vcpi.num_slots = 0; 863 port->vcpi.num_slots = 0;
864
865 kfree(port->cached_edid);
861 if (port->connector) 866 if (port->connector)
862 (*port->mgr->cbs->destroy_connector)(mgr, port->connector); 867 (*port->mgr->cbs->destroy_connector)(mgr, port->connector);
863 drm_dp_port_teardown_pdt(port, port->pdt); 868 drm_dp_port_teardown_pdt(port, port->pdt);
@@ -1011,19 +1016,20 @@ static void drm_dp_check_port_guid(struct drm_dp_mst_branch *mstb,
1011 1016
1012static void build_mst_prop_path(struct drm_dp_mst_port *port, 1017static void build_mst_prop_path(struct drm_dp_mst_port *port,
1013 struct drm_dp_mst_branch *mstb, 1018 struct drm_dp_mst_branch *mstb,
1014 char *proppath) 1019 char *proppath,
1020 size_t proppath_size)
1015{ 1021{
1016 int i; 1022 int i;
1017 char temp[8]; 1023 char temp[8];
1018 snprintf(proppath, 255, "mst:%d", mstb->mgr->conn_base_id); 1024 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id);
1019 for (i = 0; i < (mstb->lct - 1); i++) { 1025 for (i = 0; i < (mstb->lct - 1); i++) {
1020 int shift = (i % 2) ? 0 : 4; 1026 int shift = (i % 2) ? 0 : 4;
1021 int port_num = mstb->rad[i / 2] >> shift; 1027 int port_num = mstb->rad[i / 2] >> shift;
1022 snprintf(temp, 8, "-%d", port_num); 1028 snprintf(temp, sizeof(temp), "-%d", port_num);
1023 strncat(proppath, temp, 255); 1029 strlcat(proppath, temp, proppath_size);
1024 } 1030 }
1025 snprintf(temp, 8, "-%d", port->port_num); 1031 snprintf(temp, sizeof(temp), "-%d", port->port_num);
1026 strncat(proppath, temp, 255); 1032 strlcat(proppath, temp, proppath_size);
1027} 1033}
1028 1034
1029static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, 1035static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
@@ -1094,8 +1100,12 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
1094 1100
1095 if (created && !port->input) { 1101 if (created && !port->input) {
1096 char proppath[255]; 1102 char proppath[255];
1097 build_mst_prop_path(port, mstb, proppath); 1103 build_mst_prop_path(port, mstb, proppath, sizeof(proppath));
1098 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); 1104 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
1105
1106 if (port->port_num >= 8) {
1107 port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
1108 }
1099 } 1109 }
1100 1110
1101 /* put reference to this port */ 1111 /* put reference to this port */
@@ -1798,17 +1808,27 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
1798 return 0; 1808 return 0;
1799} 1809}
1800 1810
1801static int drm_dp_get_vc_payload_bw(int dp_link_bw, int dp_link_count) 1811static bool drm_dp_get_vc_payload_bw(int dp_link_bw,
1812 int dp_link_count,
1813 int *out)
1802{ 1814{
1803 switch (dp_link_bw) { 1815 switch (dp_link_bw) {
1816 default:
1817 DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n",
1818 dp_link_bw, dp_link_count);
1819 return false;
1820
1804 case DP_LINK_BW_1_62: 1821 case DP_LINK_BW_1_62:
1805 return 3 * dp_link_count; 1822 *out = 3 * dp_link_count;
1823 break;
1806 case DP_LINK_BW_2_7: 1824 case DP_LINK_BW_2_7:
1807 return 5 * dp_link_count; 1825 *out = 5 * dp_link_count;
1826 break;
1808 case DP_LINK_BW_5_4: 1827 case DP_LINK_BW_5_4:
1809 return 10 * dp_link_count; 1828 *out = 10 * dp_link_count;
1829 break;
1810 } 1830 }
1811 BUG(); 1831 return true;
1812} 1832}
1813 1833
1814/** 1834/**
@@ -1840,7 +1860,13 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
1840 goto out_unlock; 1860 goto out_unlock;
1841 } 1861 }
1842 1862
1843 mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); 1863 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1],
1864 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK,
1865 &mgr->pbn_div)) {
1866 ret = -EINVAL;
1867 goto out_unlock;
1868 }
1869
1844 mgr->total_pbn = 2560; 1870 mgr->total_pbn = 2560;
1845 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); 1871 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div);
1846 mgr->avail_slots = mgr->total_slots; 1872 mgr->avail_slots = mgr->total_slots;
@@ -2150,7 +2176,8 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq);
2150 * This returns the current connection state for a port. It validates the 2176 * This returns the current connection state for a port. It validates the
2151 * port pointer still exists so the caller doesn't require a reference 2177 * port pointer still exists so the caller doesn't require a reference
2152 */ 2178 */
2153enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) 2179enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector,
2180 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
2154{ 2181{
2155 enum drm_connector_status status = connector_status_disconnected; 2182 enum drm_connector_status status = connector_status_disconnected;
2156 2183
@@ -2169,6 +2196,10 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr
2169 2196
2170 case DP_PEER_DEVICE_SST_SINK: 2197 case DP_PEER_DEVICE_SST_SINK:
2171 status = connector_status_connected; 2198 status = connector_status_connected;
2199 /* for logical ports - cache the EDID */
2200 if (port->port_num >= 8 && !port->cached_edid) {
2201 port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
2202 }
2172 break; 2203 break;
2173 case DP_PEER_DEVICE_DP_LEGACY_CONV: 2204 case DP_PEER_DEVICE_DP_LEGACY_CONV:
2174 if (port->ldps) 2205 if (port->ldps)
@@ -2200,7 +2231,12 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
2200 if (!port) 2231 if (!port)
2201 return NULL; 2232 return NULL;
2202 2233
2203 edid = drm_get_edid(connector, &port->aux.ddc); 2234 if (port->cached_edid)
2235 edid = drm_edid_duplicate(port->cached_edid);
2236 else
2237 edid = drm_get_edid(connector, &port->aux.ddc);
2238
2239 drm_mode_connector_set_tile_property(connector);
2204 drm_dp_put_port(port); 2240 drm_dp_put_port(port);
2205 return edid; 2241 return edid;
2206} 2242}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index bc3da32d4585..4f41377b0b80 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -56,7 +56,7 @@ static struct idr drm_minors_idr;
56struct class *drm_class; 56struct class *drm_class;
57static struct dentry *drm_debugfs_root; 57static struct dentry *drm_debugfs_root;
58 58
59void drm_err(const char *func, const char *format, ...) 59void drm_err(const char *format, ...)
60{ 60{
61 struct va_format vaf; 61 struct va_format vaf;
62 va_list args; 62 va_list args;
@@ -66,7 +66,8 @@ void drm_err(const char *func, const char *format, ...)
66 vaf.fmt = format; 66 vaf.fmt = format;
67 vaf.va = &args; 67 vaf.va = &args;
68 68
69 printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* %pV", func, &vaf); 69 printk(KERN_ERR "[" DRM_NAME ":%pf] *ERROR* %pV",
70 __builtin_return_address(0), &vaf);
70 71
71 va_end(args); 72 va_end(args);
72} 73}
@@ -534,6 +535,8 @@ static void drm_fs_inode_free(struct inode *inode)
534 * The initial ref-count of the object is 1. Use drm_dev_ref() and 535 * The initial ref-count of the object is 1. Use drm_dev_ref() and
535 * drm_dev_unref() to take and drop further ref-counts. 536 * drm_dev_unref() to take and drop further ref-counts.
536 * 537 *
538 * Note that for purely virtual devices @parent can be NULL.
539 *
537 * RETURNS: 540 * RETURNS:
538 * Pointer to new DRM device, or NULL if out of memory. 541 * Pointer to new DRM device, or NULL if out of memory.
539 */ 542 */
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3bf999134bcc..53bc7a628909 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -34,6 +34,7 @@
34#include <linux/module.h> 34#include <linux/module.h>
35#include <drm/drmP.h> 35#include <drm/drmP.h>
36#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
37#include <drm/drm_displayid.h>
37 38
38#define version_greater(edid, maj, min) \ 39#define version_greater(edid, maj, min) \
39 (((edid)->version > (maj)) || \ 40 (((edid)->version > (maj)) || \
@@ -1014,6 +1015,27 @@ module_param_named(edid_fixup, edid_fixup, int, 0400);
1014MODULE_PARM_DESC(edid_fixup, 1015MODULE_PARM_DESC(edid_fixup,
1015 "Minimum number of valid EDID header bytes (0-8, default 6)"); 1016 "Minimum number of valid EDID header bytes (0-8, default 6)");
1016 1017
1018static void drm_get_displayid(struct drm_connector *connector,
1019 struct edid *edid);
1020
1021static int drm_edid_block_checksum(const u8 *raw_edid)
1022{
1023 int i;
1024 u8 csum = 0;
1025 for (i = 0; i < EDID_LENGTH; i++)
1026 csum += raw_edid[i];
1027
1028 return csum;
1029}
1030
1031static bool drm_edid_is_zero(const u8 *in_edid, int length)
1032{
1033 if (memchr_inv(in_edid, 0, length))
1034 return false;
1035
1036 return true;
1037}
1038
1017/** 1039/**
1018 * drm_edid_block_valid - Sanity check the EDID block (base or extension) 1040 * drm_edid_block_valid - Sanity check the EDID block (base or extension)
1019 * @raw_edid: pointer to raw EDID block 1041 * @raw_edid: pointer to raw EDID block
@@ -1027,8 +1049,7 @@ MODULE_PARM_DESC(edid_fixup,
1027 */ 1049 */
1028bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid) 1050bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1029{ 1051{
1030 int i; 1052 u8 csum;
1031 u8 csum = 0;
1032 struct edid *edid = (struct edid *)raw_edid; 1053 struct edid *edid = (struct edid *)raw_edid;
1033 1054
1034 if (WARN_ON(!raw_edid)) 1055 if (WARN_ON(!raw_edid))
@@ -1048,8 +1069,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1048 } 1069 }
1049 } 1070 }
1050 1071
1051 for (i = 0; i < EDID_LENGTH; i++) 1072 csum = drm_edid_block_checksum(raw_edid);
1052 csum += raw_edid[i];
1053 if (csum) { 1073 if (csum) {
1054 if (print_bad_edid) { 1074 if (print_bad_edid) {
1055 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum); 1075 DRM_ERROR("EDID checksum is invalid, remainder is %d\n", csum);
@@ -1080,9 +1100,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid)
1080 1100
1081bad: 1101bad:
1082 if (print_bad_edid) { 1102 if (print_bad_edid) {
1083 printk(KERN_ERR "Raw EDID:\n"); 1103 if (drm_edid_is_zero(raw_edid, EDID_LENGTH)) {
1084 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1, 1104 printk(KERN_ERR "EDID block is all zeroes\n");
1105 } else {
1106 printk(KERN_ERR "Raw EDID:\n");
1107 print_hex_dump(KERN_ERR, " \t", DUMP_PREFIX_NONE, 16, 1,
1085 raw_edid, EDID_LENGTH, false); 1108 raw_edid, EDID_LENGTH, false);
1109 }
1086 } 1110 }
1087 return false; 1111 return false;
1088} 1112}
@@ -1115,7 +1139,7 @@ EXPORT_SYMBOL(drm_edid_is_valid);
1115#define DDC_SEGMENT_ADDR 0x30 1139#define DDC_SEGMENT_ADDR 0x30
1116/** 1140/**
1117 * drm_do_probe_ddc_edid() - get EDID information via I2C 1141 * drm_do_probe_ddc_edid() - get EDID information via I2C
1118 * @adapter: I2C device adaptor 1142 * @data: I2C device adapter
1119 * @buf: EDID data buffer to be filled 1143 * @buf: EDID data buffer to be filled
1120 * @block: 128 byte EDID block to start fetching from 1144 * @block: 128 byte EDID block to start fetching from
1121 * @len: EDID data buffer length to fetch 1145 * @len: EDID data buffer length to fetch
@@ -1125,9 +1149,9 @@ EXPORT_SYMBOL(drm_edid_is_valid);
1125 * Return: 0 on success or -1 on failure. 1149 * Return: 0 on success or -1 on failure.
1126 */ 1150 */
1127static int 1151static int
1128drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf, 1152drm_do_probe_ddc_edid(void *data, u8 *buf, unsigned int block, size_t len)
1129 int block, int len)
1130{ 1153{
1154 struct i2c_adapter *adapter = data;
1131 unsigned char start = block * EDID_LENGTH; 1155 unsigned char start = block * EDID_LENGTH;
1132 unsigned char segment = block >> 1; 1156 unsigned char segment = block >> 1;
1133 unsigned char xfers = segment ? 3 : 2; 1157 unsigned char xfers = segment ? 3 : 2;
@@ -1176,16 +1200,26 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
1176 return ret == xfers ? 0 : -1; 1200 return ret == xfers ? 0 : -1;
1177} 1201}
1178 1202
1179static bool drm_edid_is_zero(u8 *in_edid, int length) 1203/**
1180{ 1204 * drm_do_get_edid - get EDID data using a custom EDID block read function
1181 if (memchr_inv(in_edid, 0, length)) 1205 * @connector: connector we're probing
1182 return false; 1206 * @get_edid_block: EDID block read function
1183 1207 * @data: private data passed to the block read function
1184 return true; 1208 *
1185} 1209 * When the I2C adapter connected to the DDC bus is hidden behind a device that
1186 1210 * exposes a different interface to read EDID blocks this function can be used
1187static u8 * 1211 * to get EDID data using a custom block read function.
1188drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 1212 *
1213 * As in the general case the DDC bus is accessible by the kernel at the I2C
1214 * level, drivers must make all reasonable efforts to expose it as an I2C
1215 * adapter and use drm_get_edid() instead of abusing this function.
1216 *
1217 * Return: Pointer to valid EDID or NULL if we couldn't find any.
1218 */
1219struct edid *drm_do_get_edid(struct drm_connector *connector,
1220 int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
1221 size_t len),
1222 void *data)
1189{ 1223{
1190 int i, j = 0, valid_extensions = 0; 1224 int i, j = 0, valid_extensions = 0;
1191 u8 *block, *new; 1225 u8 *block, *new;
@@ -1196,7 +1230,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1196 1230
1197 /* base block fetch */ 1231 /* base block fetch */
1198 for (i = 0; i < 4; i++) { 1232 for (i = 0; i < 4; i++) {
1199 if (drm_do_probe_ddc_edid(adapter, block, 0, EDID_LENGTH)) 1233 if (get_edid_block(data, block, 0, EDID_LENGTH))
1200 goto out; 1234 goto out;
1201 if (drm_edid_block_valid(block, 0, print_bad_edid)) 1235 if (drm_edid_block_valid(block, 0, print_bad_edid))
1202 break; 1236 break;
@@ -1210,7 +1244,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1210 1244
1211 /* if there's no extensions, we're done */ 1245 /* if there's no extensions, we're done */
1212 if (block[0x7e] == 0) 1246 if (block[0x7e] == 0)
1213 return block; 1247 return (struct edid *)block;
1214 1248
1215 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL); 1249 new = krealloc(block, (block[0x7e] + 1) * EDID_LENGTH, GFP_KERNEL);
1216 if (!new) 1250 if (!new)
@@ -1219,7 +1253,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1219 1253
1220 for (j = 1; j <= block[0x7e]; j++) { 1254 for (j = 1; j <= block[0x7e]; j++) {
1221 for (i = 0; i < 4; i++) { 1255 for (i = 0; i < 4; i++) {
1222 if (drm_do_probe_ddc_edid(adapter, 1256 if (get_edid_block(data,
1223 block + (valid_extensions + 1) * EDID_LENGTH, 1257 block + (valid_extensions + 1) * EDID_LENGTH,
1224 j, EDID_LENGTH)) 1258 j, EDID_LENGTH))
1225 goto out; 1259 goto out;
@@ -1247,7 +1281,7 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
1247 block = new; 1281 block = new;
1248 } 1282 }
1249 1283
1250 return block; 1284 return (struct edid *)block;
1251 1285
1252carp: 1286carp:
1253 if (print_bad_edid) { 1287 if (print_bad_edid) {
@@ -1260,6 +1294,7 @@ out:
1260 kfree(block); 1294 kfree(block);
1261 return NULL; 1295 return NULL;
1262} 1296}
1297EXPORT_SYMBOL_GPL(drm_do_get_edid);
1263 1298
1264/** 1299/**
1265 * drm_probe_ddc() - probe DDC presence 1300 * drm_probe_ddc() - probe DDC presence
@@ -1289,11 +1324,14 @@ EXPORT_SYMBOL(drm_probe_ddc);
1289struct edid *drm_get_edid(struct drm_connector *connector, 1324struct edid *drm_get_edid(struct drm_connector *connector,
1290 struct i2c_adapter *adapter) 1325 struct i2c_adapter *adapter)
1291{ 1326{
1292 struct edid *edid = NULL; 1327 struct edid *edid;
1293 1328
1294 if (drm_probe_ddc(adapter)) 1329 if (!drm_probe_ddc(adapter))
1295 edid = (struct edid *)drm_do_get_edid(connector, adapter); 1330 return NULL;
1296 1331
1332 edid = drm_do_get_edid(connector, drm_do_probe_ddc_edid, adapter);
1333 if (edid)
1334 drm_get_displayid(connector, edid);
1297 return edid; 1335 return edid;
1298} 1336}
1299EXPORT_SYMBOL(drm_get_edid); 1337EXPORT_SYMBOL(drm_get_edid);
@@ -2389,7 +2427,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
2389/* 2427/*
2390 * Search EDID for CEA extension block. 2428 * Search EDID for CEA extension block.
2391 */ 2429 */
2392static u8 *drm_find_cea_extension(struct edid *edid) 2430static u8 *drm_find_edid_extension(struct edid *edid, int ext_id)
2393{ 2431{
2394 u8 *edid_ext = NULL; 2432 u8 *edid_ext = NULL;
2395 int i; 2433 int i;
@@ -2401,7 +2439,7 @@ static u8 *drm_find_cea_extension(struct edid *edid)
2401 /* Find CEA extension */ 2439 /* Find CEA extension */
2402 for (i = 0; i < edid->extensions; i++) { 2440 for (i = 0; i < edid->extensions; i++) {
2403 edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1); 2441 edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
2404 if (edid_ext[0] == CEA_EXT) 2442 if (edid_ext[0] == ext_id)
2405 break; 2443 break;
2406 } 2444 }
2407 2445
@@ -2411,6 +2449,16 @@ static u8 *drm_find_cea_extension(struct edid *edid)
2411 return edid_ext; 2449 return edid_ext;
2412} 2450}
2413 2451
2452static u8 *drm_find_cea_extension(struct edid *edid)
2453{
2454 return drm_find_edid_extension(edid, CEA_EXT);
2455}
2456
2457static u8 *drm_find_displayid_extension(struct edid *edid)
2458{
2459 return drm_find_edid_extension(edid, DISPLAYID_EXT);
2460}
2461
2414/* 2462/*
2415 * Calculate the alternate clock for the CEA mode 2463 * Calculate the alternate clock for the CEA mode
2416 * (60Hz vs. 59.94Hz etc.) 2464 * (60Hz vs. 59.94Hz etc.)
@@ -3128,9 +3176,12 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
3128 } 3176 }
3129 } 3177 }
3130 eld[5] |= sad_count << 4; 3178 eld[5] |= sad_count << 4;
3131 eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
3132 3179
3133 DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count); 3180 eld[DRM_ELD_BASELINE_ELD_LEN] =
3181 DIV_ROUND_UP(drm_eld_calc_baseline_block_size(eld), 4);
3182
3183 DRM_DEBUG_KMS("ELD size %d, SAD count %d\n",
3184 drm_eld_size(eld), sad_count);
3134} 3185}
3135EXPORT_SYMBOL(drm_edid_to_eld); 3186EXPORT_SYMBOL(drm_edid_to_eld);
3136 3187
@@ -3868,3 +3919,123 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
3868 return 0; 3919 return 0;
3869} 3920}
3870EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode); 3921EXPORT_SYMBOL(drm_hdmi_vendor_infoframe_from_display_mode);
3922
3923static int drm_parse_display_id(struct drm_connector *connector,
3924 u8 *displayid, int length,
3925 bool is_edid_extension)
3926{
3927 /* if this is an EDID extension the first byte will be 0x70 */
3928 int idx = 0;
3929 struct displayid_hdr *base;
3930 struct displayid_block *block;
3931 u8 csum = 0;
3932 int i;
3933
3934 if (is_edid_extension)
3935 idx = 1;
3936
3937 base = (struct displayid_hdr *)&displayid[idx];
3938
3939 DRM_DEBUG_KMS("base revision 0x%x, length %d, %d %d\n",
3940 base->rev, base->bytes, base->prod_id, base->ext_count);
3941
3942 if (base->bytes + 5 > length - idx)
3943 return -EINVAL;
3944
3945 for (i = idx; i <= base->bytes + 5; i++) {
3946 csum += displayid[i];
3947 }
3948 if (csum) {
3949 DRM_ERROR("DisplayID checksum invalid, remainder is %d\n", csum);
3950 return -EINVAL;
3951 }
3952
3953 block = (struct displayid_block *)&displayid[idx + 4];
3954 DRM_DEBUG_KMS("block id %d, rev %d, len %d\n",
3955 block->tag, block->rev, block->num_bytes);
3956
3957 switch (block->tag) {
3958 case DATA_BLOCK_TILED_DISPLAY: {
3959 struct displayid_tiled_block *tile = (struct displayid_tiled_block *)block;
3960
3961 u16 w, h;
3962 u8 tile_v_loc, tile_h_loc;
3963 u8 num_v_tile, num_h_tile;
3964 struct drm_tile_group *tg;
3965
3966 w = tile->tile_size[0] | tile->tile_size[1] << 8;
3967 h = tile->tile_size[2] | tile->tile_size[3] << 8;
3968
3969 num_v_tile = (tile->topo[0] & 0xf) | (tile->topo[2] & 0x30);
3970 num_h_tile = (tile->topo[0] >> 4) | ((tile->topo[2] >> 2) & 0x30);
3971 tile_v_loc = (tile->topo[1] & 0xf) | ((tile->topo[2] & 0x3) << 4);
3972 tile_h_loc = (tile->topo[1] >> 4) | (((tile->topo[2] >> 2) & 0x3) << 4);
3973
3974 connector->has_tile = true;
3975 if (tile->tile_cap & 0x80)
3976 connector->tile_is_single_monitor = true;
3977
3978 connector->num_h_tile = num_h_tile + 1;
3979 connector->num_v_tile = num_v_tile + 1;
3980 connector->tile_h_loc = tile_h_loc;
3981 connector->tile_v_loc = tile_v_loc;
3982 connector->tile_h_size = w + 1;
3983 connector->tile_v_size = h + 1;
3984
3985 DRM_DEBUG_KMS("tile cap 0x%x\n", tile->tile_cap);
3986 DRM_DEBUG_KMS("tile_size %d x %d\n", w + 1, h + 1);
3987 DRM_DEBUG_KMS("topo num tiles %dx%d, location %dx%d\n",
3988 num_h_tile + 1, num_v_tile + 1, tile_h_loc, tile_v_loc);
3989 DRM_DEBUG_KMS("vend %c%c%c\n", tile->topology_id[0], tile->topology_id[1], tile->topology_id[2]);
3990
3991 tg = drm_mode_get_tile_group(connector->dev, tile->topology_id);
3992 if (!tg) {
3993 tg = drm_mode_create_tile_group(connector->dev, tile->topology_id);
3994 }
3995 if (!tg)
3996 return -ENOMEM;
3997
3998 if (connector->tile_group != tg) {
3999 /* if we haven't got a pointer,
4000 take the reference, drop ref to old tile group */
4001 if (connector->tile_group) {
4002 drm_mode_put_tile_group(connector->dev, connector->tile_group);
4003 }
4004 connector->tile_group = tg;
4005 } else
4006 /* if same tile group, then release the ref we just took. */
4007 drm_mode_put_tile_group(connector->dev, tg);
4008 }
4009 break;
4010 default:
4011 printk("unknown displayid tag %d\n", block->tag);
4012 break;
4013 }
4014 return 0;
4015}
4016
4017static void drm_get_displayid(struct drm_connector *connector,
4018 struct edid *edid)
4019{
4020 void *displayid = NULL;
4021 int ret;
4022 connector->has_tile = false;
4023 displayid = drm_find_displayid_extension(edid);
4024 if (!displayid) {
4025 /* drop reference to any tile group we had */
4026 goto out_drop_ref;
4027 }
4028
4029 ret = drm_parse_display_id(connector, displayid, EDID_LENGTH, true);
4030 if (ret < 0)
4031 goto out_drop_ref;
4032 if (!connector->has_tile)
4033 goto out_drop_ref;
4034 return;
4035out_drop_ref:
4036 if (connector->tile_group) {
4037 drm_mode_put_tile_group(connector->dev, connector->tile_group);
4038 connector->tile_group = NULL;
4039 }
4040 return;
4041}
diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c
index 0a235fe61c9b..732cb6f8e653 100644
--- a/drivers/gpu/drm/drm_edid_load.c
+++ b/drivers/gpu/drm/drm_edid_load.c
@@ -254,8 +254,7 @@ static void *edid_load(struct drm_connector *connector, const char *name,
254 name, connector_name); 254 name, connector_name);
255 255
256out: 256out:
257 if (fw) 257 release_firmware(fw);
258 release_firmware(fw);
259 return edid; 258 return edid;
260} 259}
261 260
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0c0c39bac23d..52ce26d6b4fb 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -347,9 +347,18 @@ bool drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
347{ 347{
348 struct drm_device *dev = fb_helper->dev; 348 struct drm_device *dev = fb_helper->dev;
349 bool ret; 349 bool ret;
350 bool do_delayed = false;
351
350 drm_modeset_lock_all(dev); 352 drm_modeset_lock_all(dev);
351 ret = restore_fbdev_mode(fb_helper); 353 ret = restore_fbdev_mode(fb_helper);
354
355 do_delayed = fb_helper->delayed_hotplug;
356 if (do_delayed)
357 fb_helper->delayed_hotplug = false;
352 drm_modeset_unlock_all(dev); 358 drm_modeset_unlock_all(dev);
359
360 if (do_delayed)
361 drm_fb_helper_hotplug_event(fb_helper);
353 return ret; 362 return ret;
354} 363}
355EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked); 364EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode_unlocked);
@@ -888,10 +897,6 @@ int drm_fb_helper_set_par(struct fb_info *info)
888 897
889 drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper); 898 drm_fb_helper_restore_fbdev_mode_unlocked(fb_helper);
890 899
891 if (fb_helper->delayed_hotplug) {
892 fb_helper->delayed_hotplug = false;
893 drm_fb_helper_hotplug_event(fb_helper);
894 }
895 return 0; 900 return 0;
896} 901}
897EXPORT_SYMBOL(drm_fb_helper_set_par); 902EXPORT_SYMBOL(drm_fb_helper_set_par);
@@ -995,19 +1000,21 @@ static int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
995 crtc_count = 0; 1000 crtc_count = 0;
996 for (i = 0; i < fb_helper->crtc_count; i++) { 1001 for (i = 0; i < fb_helper->crtc_count; i++) {
997 struct drm_display_mode *desired_mode; 1002 struct drm_display_mode *desired_mode;
1003 int x, y;
998 desired_mode = fb_helper->crtc_info[i].desired_mode; 1004 desired_mode = fb_helper->crtc_info[i].desired_mode;
999 1005 x = fb_helper->crtc_info[i].x;
1006 y = fb_helper->crtc_info[i].y;
1000 if (desired_mode) { 1007 if (desired_mode) {
1001 if (gamma_size == 0) 1008 if (gamma_size == 0)
1002 gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size; 1009 gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
1003 if (desired_mode->hdisplay < sizes.fb_width) 1010 if (desired_mode->hdisplay + x < sizes.fb_width)
1004 sizes.fb_width = desired_mode->hdisplay; 1011 sizes.fb_width = desired_mode->hdisplay + x;
1005 if (desired_mode->vdisplay < sizes.fb_height) 1012 if (desired_mode->vdisplay + y < sizes.fb_height)
1006 sizes.fb_height = desired_mode->vdisplay; 1013 sizes.fb_height = desired_mode->vdisplay + y;
1007 if (desired_mode->hdisplay > sizes.surface_width) 1014 if (desired_mode->hdisplay + x > sizes.surface_width)
1008 sizes.surface_width = desired_mode->hdisplay; 1015 sizes.surface_width = desired_mode->hdisplay + x;
1009 if (desired_mode->vdisplay > sizes.surface_height) 1016 if (desired_mode->vdisplay + y > sizes.surface_height)
1010 sizes.surface_height = desired_mode->vdisplay; 1017 sizes.surface_height = desired_mode->vdisplay + y;
1011 crtc_count++; 1018 crtc_count++;
1012 } 1019 }
1013 } 1020 }
@@ -1307,6 +1314,7 @@ static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
1307 1314
1308static bool drm_target_cloned(struct drm_fb_helper *fb_helper, 1315static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
1309 struct drm_display_mode **modes, 1316 struct drm_display_mode **modes,
1317 struct drm_fb_offset *offsets,
1310 bool *enabled, int width, int height) 1318 bool *enabled, int width, int height)
1311{ 1319{
1312 int count, i, j; 1320 int count, i, j;
@@ -1378,27 +1386,88 @@ static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
1378 return false; 1386 return false;
1379} 1387}
1380 1388
1389static int drm_get_tile_offsets(struct drm_fb_helper *fb_helper,
1390 struct drm_display_mode **modes,
1391 struct drm_fb_offset *offsets,
1392 int idx,
1393 int h_idx, int v_idx)
1394{
1395 struct drm_fb_helper_connector *fb_helper_conn;
1396 int i;
1397 int hoffset = 0, voffset = 0;
1398
1399 for (i = 0; i < fb_helper->connector_count; i++) {
1400 fb_helper_conn = fb_helper->connector_info[i];
1401 if (!fb_helper_conn->connector->has_tile)
1402 continue;
1403
1404 if (!modes[i] && (h_idx || v_idx)) {
1405 DRM_DEBUG_KMS("no modes for connector tiled %d %d\n", i,
1406 fb_helper_conn->connector->base.id);
1407 continue;
1408 }
1409 if (fb_helper_conn->connector->tile_h_loc < h_idx)
1410 hoffset += modes[i]->hdisplay;
1411
1412 if (fb_helper_conn->connector->tile_v_loc < v_idx)
1413 voffset += modes[i]->vdisplay;
1414 }
1415 offsets[idx].x = hoffset;
1416 offsets[idx].y = voffset;
1417 DRM_DEBUG_KMS("returned %d %d for %d %d\n", hoffset, voffset, h_idx, v_idx);
1418 return 0;
1419}
1420
1381static bool drm_target_preferred(struct drm_fb_helper *fb_helper, 1421static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
1382 struct drm_display_mode **modes, 1422 struct drm_display_mode **modes,
1423 struct drm_fb_offset *offsets,
1383 bool *enabled, int width, int height) 1424 bool *enabled, int width, int height)
1384{ 1425{
1385 struct drm_fb_helper_connector *fb_helper_conn; 1426 struct drm_fb_helper_connector *fb_helper_conn;
1386 int i; 1427 int i;
1387 1428 uint64_t conn_configured = 0, mask;
1429 int tile_pass = 0;
1430 mask = (1 << fb_helper->connector_count) - 1;
1431retry:
1388 for (i = 0; i < fb_helper->connector_count; i++) { 1432 for (i = 0; i < fb_helper->connector_count; i++) {
1389 fb_helper_conn = fb_helper->connector_info[i]; 1433 fb_helper_conn = fb_helper->connector_info[i];
1390 1434
1391 if (enabled[i] == false) 1435 if (conn_configured & (1 << i))
1392 continue; 1436 continue;
1393 1437
1438 if (enabled[i] == false) {
1439 conn_configured |= (1 << i);
1440 continue;
1441 }
1442
1443 /* first pass over all the untiled connectors */
1444 if (tile_pass == 0 && fb_helper_conn->connector->has_tile)
1445 continue;
1446
1447 if (tile_pass == 1) {
1448 if (fb_helper_conn->connector->tile_h_loc != 0 ||
1449 fb_helper_conn->connector->tile_v_loc != 0)
1450 continue;
1451
1452 } else {
1453 if (fb_helper_conn->connector->tile_h_loc != tile_pass -1 &&
1454 fb_helper_conn->connector->tile_v_loc != tile_pass - 1)
1455 /* if this tile_pass doesn't cover any of the tiles - keep going */
1456 continue;
1457
1458 /* find the tile offsets for this pass - need
1459 to find all tiles left and above */
1460 drm_get_tile_offsets(fb_helper, modes, offsets,
1461 i, fb_helper_conn->connector->tile_h_loc, fb_helper_conn->connector->tile_v_loc);
1462 }
1394 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n", 1463 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
1395 fb_helper_conn->connector->base.id); 1464 fb_helper_conn->connector->base.id);
1396 1465
1397 /* got for command line mode first */ 1466 /* got for command line mode first */
1398 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height); 1467 modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
1399 if (!modes[i]) { 1468 if (!modes[i]) {
1400 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 1469 DRM_DEBUG_KMS("looking for preferred mode on connector %d %d\n",
1401 fb_helper_conn->connector->base.id); 1470 fb_helper_conn->connector->base.id, fb_helper_conn->connector->tile_group ? fb_helper_conn->connector->tile_group->id : 0);
1402 modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height); 1471 modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
1403 } 1472 }
1404 /* No preferred modes, pick one off the list */ 1473 /* No preferred modes, pick one off the list */
@@ -1408,6 +1477,12 @@ static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
1408 } 1477 }
1409 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name : 1478 DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
1410 "none"); 1479 "none");
1480 conn_configured |= (1 << i);
1481 }
1482
1483 if ((conn_configured & mask) != mask) {
1484 tile_pass++;
1485 goto retry;
1411 } 1486 }
1412 return true; 1487 return true;
1413} 1488}
@@ -1497,6 +1572,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1497 struct drm_device *dev = fb_helper->dev; 1572 struct drm_device *dev = fb_helper->dev;
1498 struct drm_fb_helper_crtc **crtcs; 1573 struct drm_fb_helper_crtc **crtcs;
1499 struct drm_display_mode **modes; 1574 struct drm_display_mode **modes;
1575 struct drm_fb_offset *offsets;
1500 struct drm_mode_set *modeset; 1576 struct drm_mode_set *modeset;
1501 bool *enabled; 1577 bool *enabled;
1502 int width, height; 1578 int width, height;
@@ -1511,9 +1587,11 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1511 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL); 1587 sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
1512 modes = kcalloc(dev->mode_config.num_connector, 1588 modes = kcalloc(dev->mode_config.num_connector,
1513 sizeof(struct drm_display_mode *), GFP_KERNEL); 1589 sizeof(struct drm_display_mode *), GFP_KERNEL);
1590 offsets = kcalloc(dev->mode_config.num_connector,
1591 sizeof(struct drm_fb_offset), GFP_KERNEL);
1514 enabled = kcalloc(dev->mode_config.num_connector, 1592 enabled = kcalloc(dev->mode_config.num_connector,
1515 sizeof(bool), GFP_KERNEL); 1593 sizeof(bool), GFP_KERNEL);
1516 if (!crtcs || !modes || !enabled) { 1594 if (!crtcs || !modes || !enabled || !offsets) {
1517 DRM_ERROR("Memory allocation failed\n"); 1595 DRM_ERROR("Memory allocation failed\n");
1518 goto out; 1596 goto out;
1519 } 1597 }
@@ -1523,14 +1601,16 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1523 1601
1524 if (!(fb_helper->funcs->initial_config && 1602 if (!(fb_helper->funcs->initial_config &&
1525 fb_helper->funcs->initial_config(fb_helper, crtcs, modes, 1603 fb_helper->funcs->initial_config(fb_helper, crtcs, modes,
1604 offsets,
1526 enabled, width, height))) { 1605 enabled, width, height))) {
1527 memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0])); 1606 memset(modes, 0, dev->mode_config.num_connector*sizeof(modes[0]));
1528 memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0])); 1607 memset(crtcs, 0, dev->mode_config.num_connector*sizeof(crtcs[0]));
1608 memset(offsets, 0, dev->mode_config.num_connector*sizeof(offsets[0]));
1529 1609
1530 if (!drm_target_cloned(fb_helper, 1610 if (!drm_target_cloned(fb_helper, modes, offsets,
1531 modes, enabled, width, height) && 1611 enabled, width, height) &&
1532 !drm_target_preferred(fb_helper, 1612 !drm_target_preferred(fb_helper, modes, offsets,
1533 modes, enabled, width, height)) 1613 enabled, width, height))
1534 DRM_ERROR("Unable to find initial modes\n"); 1614 DRM_ERROR("Unable to find initial modes\n");
1535 1615
1536 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", 1616 DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n",
@@ -1550,18 +1630,23 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1550 for (i = 0; i < fb_helper->connector_count; i++) { 1630 for (i = 0; i < fb_helper->connector_count; i++) {
1551 struct drm_display_mode *mode = modes[i]; 1631 struct drm_display_mode *mode = modes[i];
1552 struct drm_fb_helper_crtc *fb_crtc = crtcs[i]; 1632 struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
1633 struct drm_fb_offset *offset = &offsets[i];
1553 modeset = &fb_crtc->mode_set; 1634 modeset = &fb_crtc->mode_set;
1554 1635
1555 if (mode && fb_crtc) { 1636 if (mode && fb_crtc) {
1556 DRM_DEBUG_KMS("desired mode %s set on crtc %d\n", 1637 DRM_DEBUG_KMS("desired mode %s set on crtc %d (%d,%d)\n",
1557 mode->name, fb_crtc->mode_set.crtc->base.id); 1638 mode->name, fb_crtc->mode_set.crtc->base.id, offset->x, offset->y);
1558 fb_crtc->desired_mode = mode; 1639 fb_crtc->desired_mode = mode;
1640 fb_crtc->x = offset->x;
1641 fb_crtc->y = offset->y;
1559 if (modeset->mode) 1642 if (modeset->mode)
1560 drm_mode_destroy(dev, modeset->mode); 1643 drm_mode_destroy(dev, modeset->mode);
1561 modeset->mode = drm_mode_duplicate(dev, 1644 modeset->mode = drm_mode_duplicate(dev,
1562 fb_crtc->desired_mode); 1645 fb_crtc->desired_mode);
1563 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector; 1646 modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
1564 modeset->fb = fb_helper->fb; 1647 modeset->fb = fb_helper->fb;
1648 modeset->x = offset->x;
1649 modeset->y = offset->y;
1565 } 1650 }
1566 } 1651 }
1567 1652
@@ -1570,7 +1655,6 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1570 modeset = &fb_helper->crtc_info[i].mode_set; 1655 modeset = &fb_helper->crtc_info[i].mode_set;
1571 if (modeset->num_connectors == 0) { 1656 if (modeset->num_connectors == 0) {
1572 BUG_ON(modeset->fb); 1657 BUG_ON(modeset->fb);
1573 BUG_ON(modeset->num_connectors);
1574 if (modeset->mode) 1658 if (modeset->mode)
1575 drm_mode_destroy(dev, modeset->mode); 1659 drm_mode_destroy(dev, modeset->mode);
1576 modeset->mode = NULL; 1660 modeset->mode = NULL;
@@ -1579,6 +1663,7 @@ static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
1579out: 1663out:
1580 kfree(crtcs); 1664 kfree(crtcs);
1581 kfree(modes); 1665 kfree(modes);
1666 kfree(offsets);
1582 kfree(enabled); 1667 kfree(enabled);
1583} 1668}
1584 1669
diff --git a/drivers/gpu/drm/drm_flip_work.c b/drivers/gpu/drm/drm_flip_work.c
index f9c7fa3d0012..43d9b950ef9f 100644
--- a/drivers/gpu/drm/drm_flip_work.c
+++ b/drivers/gpu/drm/drm_flip_work.c
@@ -25,6 +25,44 @@
25#include "drm_flip_work.h" 25#include "drm_flip_work.h"
26 26
27/** 27/**
28 * drm_flip_work_allocate_task - allocate a flip-work task
29 * @data: data associated to the task
30 * @flags: allocator flags
31 *
32 * Allocate a drm_flip_task object and attach private data to it.
33 */
34struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
35{
36 struct drm_flip_task *task;
37
38 task = kzalloc(sizeof(*task), flags);
39 if (task)
40 task->data = data;
41
42 return task;
43}
44EXPORT_SYMBOL(drm_flip_work_allocate_task);
45
46/**
47 * drm_flip_work_queue_task - queue a specific task
48 * @work: the flip-work
49 * @task: the task to handle
50 *
51 * Queues task, that will later be run (passed back to drm_flip_func_t
52 * func) on a work queue after drm_flip_work_commit() is called.
53 */
54void drm_flip_work_queue_task(struct drm_flip_work *work,
55 struct drm_flip_task *task)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&work->lock, flags);
60 list_add_tail(&task->node, &work->queued);
61 spin_unlock_irqrestore(&work->lock, flags);
62}
63EXPORT_SYMBOL(drm_flip_work_queue_task);
64
65/**
28 * drm_flip_work_queue - queue work 66 * drm_flip_work_queue - queue work
29 * @work: the flip-work 67 * @work: the flip-work
30 * @val: the value to queue 68 * @val: the value to queue
@@ -34,10 +72,14 @@
34 */ 72 */
35void drm_flip_work_queue(struct drm_flip_work *work, void *val) 73void drm_flip_work_queue(struct drm_flip_work *work, void *val)
36{ 74{
37 if (kfifo_put(&work->fifo, val)) { 75 struct drm_flip_task *task;
38 atomic_inc(&work->pending); 76
77 task = drm_flip_work_allocate_task(val,
78 drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
79 if (task) {
80 drm_flip_work_queue_task(work, task);
39 } else { 81 } else {
40 DRM_ERROR("%s fifo full!\n", work->name); 82 DRM_ERROR("%s could not allocate task!\n", work->name);
41 work->func(work, val); 83 work->func(work, val);
42 } 84 }
43} 85}
@@ -56,9 +98,12 @@ EXPORT_SYMBOL(drm_flip_work_queue);
56void drm_flip_work_commit(struct drm_flip_work *work, 98void drm_flip_work_commit(struct drm_flip_work *work,
57 struct workqueue_struct *wq) 99 struct workqueue_struct *wq)
58{ 100{
59 uint32_t pending = atomic_read(&work->pending); 101 unsigned long flags;
60 atomic_add(pending, &work->count); 102
61 atomic_sub(pending, &work->pending); 103 spin_lock_irqsave(&work->lock, flags);
104 list_splice_tail(&work->queued, &work->commited);
105 INIT_LIST_HEAD(&work->queued);
106 spin_unlock_irqrestore(&work->lock, flags);
62 queue_work(wq, &work->worker); 107 queue_work(wq, &work->worker);
63} 108}
64EXPORT_SYMBOL(drm_flip_work_commit); 109EXPORT_SYMBOL(drm_flip_work_commit);
@@ -66,47 +111,46 @@ EXPORT_SYMBOL(drm_flip_work_commit);
66static void flip_worker(struct work_struct *w) 111static void flip_worker(struct work_struct *w)
67{ 112{
68 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker); 113 struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
69 uint32_t count = atomic_read(&work->count); 114 struct list_head tasks;
70 void *val = NULL; 115 unsigned long flags;
116
117 while (1) {
118 struct drm_flip_task *task, *tmp;
119
120 INIT_LIST_HEAD(&tasks);
121 spin_lock_irqsave(&work->lock, flags);
122 list_splice_tail(&work->commited, &tasks);
123 INIT_LIST_HEAD(&work->commited);
124 spin_unlock_irqrestore(&work->lock, flags);
71 125
72 atomic_sub(count, &work->count); 126 if (list_empty(&tasks))
127 break;
73 128
74 while(count--) 129 list_for_each_entry_safe(task, tmp, &tasks, node) {
75 if (!WARN_ON(!kfifo_get(&work->fifo, &val))) 130 work->func(work, task->data);
76 work->func(work, val); 131 kfree(task);
132 }
133 }
77} 134}
78 135
79/** 136/**
80 * drm_flip_work_init - initialize flip-work 137 * drm_flip_work_init - initialize flip-work
81 * @work: the flip-work to initialize 138 * @work: the flip-work to initialize
82 * @size: the max queue depth
83 * @name: debug name 139 * @name: debug name
84 * @func: the callback work function 140 * @func: the callback work function
85 * 141 *
86 * Initializes/allocates resources for the flip-work 142 * Initializes/allocates resources for the flip-work
87 *
88 * RETURNS:
89 * Zero on success, error code on failure.
90 */ 143 */
91int drm_flip_work_init(struct drm_flip_work *work, int size, 144void drm_flip_work_init(struct drm_flip_work *work,
92 const char *name, drm_flip_func_t func) 145 const char *name, drm_flip_func_t func)
93{ 146{
94 int ret;
95
96 work->name = name; 147 work->name = name;
97 atomic_set(&work->count, 0); 148 INIT_LIST_HEAD(&work->queued);
98 atomic_set(&work->pending, 0); 149 INIT_LIST_HEAD(&work->commited);
150 spin_lock_init(&work->lock);
99 work->func = func; 151 work->func = func;
100 152
101 ret = kfifo_alloc(&work->fifo, size, GFP_KERNEL);
102 if (ret) {
103 DRM_ERROR("could not allocate %s fifo\n", name);
104 return ret;
105 }
106
107 INIT_WORK(&work->worker, flip_worker); 153 INIT_WORK(&work->worker, flip_worker);
108
109 return 0;
110} 154}
111EXPORT_SYMBOL(drm_flip_work_init); 155EXPORT_SYMBOL(drm_flip_work_init);
112 156
@@ -118,7 +162,6 @@ EXPORT_SYMBOL(drm_flip_work_init);
118 */ 162 */
119void drm_flip_work_cleanup(struct drm_flip_work *work) 163void drm_flip_work_cleanup(struct drm_flip_work *work)
120{ 164{
121 WARN_ON(!kfifo_is_empty(&work->fifo)); 165 WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
122 kfifo_free(&work->fifo);
123} 166}
124EXPORT_SYMBOL(drm_flip_work_cleanup); 167EXPORT_SYMBOL(drm_flip_work_cleanup);
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index ed7bc68f7e87..0b9514b6cd64 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -515,16 +515,19 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
515 size_t total; 515 size_t total;
516 ssize_t ret; 516 ssize_t ret;
517 517
518 ret = wait_event_interruptible(file_priv->event_wait, 518 if ((filp->f_flags & O_NONBLOCK) == 0) {
519 !list_empty(&file_priv->event_list)); 519 ret = wait_event_interruptible(file_priv->event_wait,
520 if (ret < 0) 520 !list_empty(&file_priv->event_list));
521 return ret; 521 if (ret < 0)
522 return ret;
523 }
522 524
523 total = 0; 525 total = 0;
524 while (drm_dequeue_event(file_priv, total, count, &e)) { 526 while (drm_dequeue_event(file_priv, total, count, &e)) {
525 if (copy_to_user(buffer + total, 527 if (copy_to_user(buffer + total,
526 e->event, e->event->length)) { 528 e->event, e->event->length)) {
527 total = -EFAULT; 529 total = -EFAULT;
530 e->destroy(e);
528 break; 531 break;
529 } 532 }
530 533
@@ -532,7 +535,7 @@ ssize_t drm_read(struct file *filp, char __user *buffer,
532 e->destroy(e); 535 e->destroy(e);
533 } 536 }
534 537
535 return total; 538 return total ?: -EAGAIN;
536} 539}
537EXPORT_SYMBOL(drm_read); 540EXPORT_SYMBOL(drm_read);
538 541
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index f6ca51259fa3..16a164770713 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -188,7 +188,7 @@ drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
188} 188}
189 189
190/** 190/**
191 * drm_gem_object_free - release resources bound to userspace handles 191 * drm_gem_object_handle_free - release resources bound to userspace handles
192 * @obj: GEM object to clean up. 192 * @obj: GEM object to clean up.
193 * 193 *
194 * Called after the last handle to the object has been closed 194 * Called after the last handle to the object has been closed
@@ -309,7 +309,7 @@ EXPORT_SYMBOL(drm_gem_dumb_destroy);
309 * drm_gem_handle_create_tail - internal functions to create a handle 309 * drm_gem_handle_create_tail - internal functions to create a handle
310 * @file_priv: drm file-private structure to register the handle for 310 * @file_priv: drm file-private structure to register the handle for
311 * @obj: object to register 311 * @obj: object to register
312 * @handlep: pionter to return the created handle to the caller 312 * @handlep: pointer to return the created handle to the caller
313 * 313 *
314 * This expects the dev->object_name_lock to be held already and will drop it 314 * This expects the dev->object_name_lock to be held already and will drop it
315 * before returning. Used to avoid races in establishing new handles when 315 * before returning. Used to avoid races in establishing new handles when
@@ -362,7 +362,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
362} 362}
363 363
364/** 364/**
365 * gem_handle_create - create a gem handle for an object 365 * drm_gem_handle_create - create a gem handle for an object
366 * @file_priv: drm file-private structure to register the handle for 366 * @file_priv: drm file-private structure to register the handle for
367 * @obj: object to register 367 * @obj: object to register
368 * @handlep: pionter to return the created handle to the caller 368 * @handlep: pionter to return the created handle to the caller
@@ -371,10 +371,9 @@ drm_gem_handle_create_tail(struct drm_file *file_priv,
371 * to the object, which includes a regular reference count. Callers 371 * to the object, which includes a regular reference count. Callers
372 * will likely want to dereference the object afterwards. 372 * will likely want to dereference the object afterwards.
373 */ 373 */
374int 374int drm_gem_handle_create(struct drm_file *file_priv,
375drm_gem_handle_create(struct drm_file *file_priv, 375 struct drm_gem_object *obj,
376 struct drm_gem_object *obj, 376 u32 *handlep)
377 u32 *handlep)
378{ 377{
379 mutex_lock(&obj->dev->object_name_lock); 378 mutex_lock(&obj->dev->object_name_lock);
380 379
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 0316310e2cc4..e419eedf751d 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -29,18 +29,31 @@
29#include <drm/drm_gem_cma_helper.h> 29#include <drm/drm_gem_cma_helper.h>
30#include <drm/drm_vma_manager.h> 30#include <drm/drm_vma_manager.h>
31 31
32/* 32/**
33 * DOC: cma helpers
34 *
35 * The Contiguous Memory Allocator reserves a pool of memory at early boot
36 * that is used to service requests for large blocks of contiguous memory.
37 *
38 * The DRM GEM/CMA helpers use this allocator as a means to provide buffer
39 * objects that are physically contiguous in memory. This is useful for
40 * display drivers that are unable to map scattered buffers via an IOMMU.
41 */
42
43/**
33 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory 44 * __drm_gem_cma_create - Create a GEM CMA object without allocating memory
34 * @drm: The drm device 45 * @drm: DRM device
35 * @size: The GEM object size 46 * @size: size of the object to allocate
36 * 47 *
37 * This function creates and initializes a GEM CMA object of the given size, but 48 * This function creates and initializes a GEM CMA object of the given size,
38 * doesn't allocate any memory to back the object. 49 * but doesn't allocate any memory to back the object.
39 * 50 *
40 * Return a struct drm_gem_cma_object* on success or ERR_PTR values on failure. 51 * Returns:
52 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
53 * error code on failure.
41 */ 54 */
42static struct drm_gem_cma_object * 55static struct drm_gem_cma_object *
43__drm_gem_cma_create(struct drm_device *drm, unsigned int size) 56__drm_gem_cma_create(struct drm_device *drm, size_t size)
44{ 57{
45 struct drm_gem_cma_object *cma_obj; 58 struct drm_gem_cma_object *cma_obj;
46 struct drm_gem_object *gem_obj; 59 struct drm_gem_object *gem_obj;
@@ -69,14 +82,21 @@ error:
69 return ERR_PTR(ret); 82 return ERR_PTR(ret);
70} 83}
71 84
72/* 85/**
73 * drm_gem_cma_create - allocate an object with the given size 86 * drm_gem_cma_create - allocate an object with the given size
87 * @drm: DRM device
88 * @size: size of the object to allocate
89 *
90 * This function creates a CMA GEM object and allocates a contiguous chunk of
91 * memory as backing store. The backing memory has the writecombine attribute
92 * set.
74 * 93 *
75 * returns a struct drm_gem_cma_object* on success or ERR_PTR values 94 * Returns:
76 * on failure. 95 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
96 * error code on failure.
77 */ 97 */
78struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 98struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
79 unsigned int size) 99 size_t size)
80{ 100{
81 struct drm_gem_cma_object *cma_obj; 101 struct drm_gem_cma_object *cma_obj;
82 int ret; 102 int ret;
@@ -104,17 +124,26 @@ error:
104} 124}
105EXPORT_SYMBOL_GPL(drm_gem_cma_create); 125EXPORT_SYMBOL_GPL(drm_gem_cma_create);
106 126
107/* 127/**
108 * drm_gem_cma_create_with_handle - allocate an object with the given 128 * drm_gem_cma_create_with_handle - allocate an object with the given size and
109 * size and create a gem handle on it 129 * return a GEM handle to it
130 * @file_priv: DRM file-private structure to register the handle for
131 * @drm: DRM device
132 * @size: size of the object to allocate
133 * @handle: return location for the GEM handle
134 *
135 * This function creates a CMA GEM object, allocating a physically contiguous
136 * chunk of memory as backing store. The GEM object is then added to the list
137 * of object associated with the given file and a handle to it is returned.
110 * 138 *
111 * returns a struct drm_gem_cma_object* on success or ERR_PTR values 139 * Returns:
112 * on failure. 140 * A struct drm_gem_cma_object * on success or an ERR_PTR()-encoded negative
141 * error code on failure.
113 */ 142 */
114static struct drm_gem_cma_object *drm_gem_cma_create_with_handle( 143static struct drm_gem_cma_object *
115 struct drm_file *file_priv, 144drm_gem_cma_create_with_handle(struct drm_file *file_priv,
116 struct drm_device *drm, unsigned int size, 145 struct drm_device *drm, size_t size,
117 unsigned int *handle) 146 uint32_t *handle)
118{ 147{
119 struct drm_gem_cma_object *cma_obj; 148 struct drm_gem_cma_object *cma_obj;
120 struct drm_gem_object *gem_obj; 149 struct drm_gem_object *gem_obj;
@@ -145,16 +174,19 @@ err_handle_create:
145 return ERR_PTR(ret); 174 return ERR_PTR(ret);
146} 175}
147 176
148/* 177/**
149 * drm_gem_cma_free_object - (struct drm_driver)->gem_free_object callback 178 * drm_gem_cma_free_object - free resources associated with a CMA GEM object
150 * function 179 * @gem_obj: GEM object to free
180 *
181 * This function frees the backing memory of the CMA GEM object, cleans up the
182 * GEM object state and frees the memory used to store the object itself.
183 * Drivers using the CMA helpers should set this as their DRM driver's
184 * ->gem_free_object() callback.
151 */ 185 */
152void drm_gem_cma_free_object(struct drm_gem_object *gem_obj) 186void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
153{ 187{
154 struct drm_gem_cma_object *cma_obj; 188 struct drm_gem_cma_object *cma_obj;
155 189
156 drm_gem_free_mmap_offset(gem_obj);
157
158 cma_obj = to_drm_gem_cma_obj(gem_obj); 190 cma_obj = to_drm_gem_cma_obj(gem_obj);
159 191
160 if (cma_obj->vaddr) { 192 if (cma_obj->vaddr) {
@@ -170,18 +202,26 @@ void drm_gem_cma_free_object(struct drm_gem_object *gem_obj)
170} 202}
171EXPORT_SYMBOL_GPL(drm_gem_cma_free_object); 203EXPORT_SYMBOL_GPL(drm_gem_cma_free_object);
172 204
173/* 205/**
174 * drm_gem_cma_dumb_create - (struct drm_driver)->dumb_create callback 206 * drm_gem_cma_dumb_create_internal - create a dumb buffer object
175 * function 207 * @file_priv: DRM file-private structure to create the dumb buffer for
208 * @drm: DRM device
209 * @args: IOCTL data
210 *
211 * This aligns the pitch and size arguments to the minimum required. This is
212 * an internal helper that can be wrapped by a driver to account for hardware
213 * with more specific alignment requirements. It should not be used directly
214 * as the ->dumb_create() callback in a DRM driver.
176 * 215 *
177 * This aligns the pitch and size arguments to the minimum required. wrap 216 * Returns:
178 * this into your own function if you need bigger alignment. 217 * 0 on success or a negative error code on failure.
179 */ 218 */
180int drm_gem_cma_dumb_create(struct drm_file *file_priv, 219int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
181 struct drm_device *dev, struct drm_mode_create_dumb *args) 220 struct drm_device *drm,
221 struct drm_mode_create_dumb *args)
182{ 222{
223 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
183 struct drm_gem_cma_object *cma_obj; 224 struct drm_gem_cma_object *cma_obj;
184 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
185 225
186 if (args->pitch < min_pitch) 226 if (args->pitch < min_pitch)
187 args->pitch = min_pitch; 227 args->pitch = min_pitch;
@@ -189,18 +229,63 @@ int drm_gem_cma_dumb_create(struct drm_file *file_priv,
189 if (args->size < args->pitch * args->height) 229 if (args->size < args->pitch * args->height)
190 args->size = args->pitch * args->height; 230 args->size = args->pitch * args->height;
191 231
192 cma_obj = drm_gem_cma_create_with_handle(file_priv, dev, 232 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
193 args->size, &args->handle); 233 &args->handle);
234 return PTR_ERR_OR_ZERO(cma_obj);
235}
236EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create_internal);
237
238/**
239 * drm_gem_cma_dumb_create - create a dumb buffer object
240 * @file_priv: DRM file-private structure to create the dumb buffer for
241 * @drm: DRM device
242 * @args: IOCTL data
243 *
244 * This function computes the pitch of the dumb buffer and rounds it up to an
245 * integer number of bytes per pixel. Drivers for hardware that doesn't have
246 * any additional restrictions on the pitch can directly use this function as
247 * their ->dumb_create() callback.
248 *
249 * For hardware with additional restrictions, drivers can adjust the fields
250 * set up by userspace and pass the IOCTL data along to the
251 * drm_gem_cma_dumb_create_internal() function.
252 *
253 * Returns:
254 * 0 on success or a negative error code on failure.
255 */
256int drm_gem_cma_dumb_create(struct drm_file *file_priv,
257 struct drm_device *drm,
258 struct drm_mode_create_dumb *args)
259{
260 struct drm_gem_cma_object *cma_obj;
261
262 args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
263 args->size = args->pitch * args->height;
264
265 cma_obj = drm_gem_cma_create_with_handle(file_priv, drm, args->size,
266 &args->handle);
194 return PTR_ERR_OR_ZERO(cma_obj); 267 return PTR_ERR_OR_ZERO(cma_obj);
195} 268}
196EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create); 269EXPORT_SYMBOL_GPL(drm_gem_cma_dumb_create);
197 270
198/* 271/**
199 * drm_gem_cma_dumb_map_offset - (struct drm_driver)->dumb_map_offset callback 272 * drm_gem_cma_dumb_map_offset - return the fake mmap offset for a CMA GEM
200 * function 273 * object
274 * @file_priv: DRM file-private structure containing the GEM object
275 * @drm: DRM device
276 * @handle: GEM object handle
277 * @offset: return location for the fake mmap offset
278 *
279 * This function look up an object by its handle and returns the fake mmap
280 * offset associated with it. Drivers using the CMA helpers should set this
281 * as their DRM driver's ->dumb_map_offset() callback.
282 *
283 * Returns:
284 * 0 on success or a negative error code on failure.
201 */ 285 */
202int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, 286int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
203 struct drm_device *drm, uint32_t handle, uint64_t *offset) 287 struct drm_device *drm, u32 handle,
288 u64 *offset)
204{ 289{
205 struct drm_gem_object *gem_obj; 290 struct drm_gem_object *gem_obj;
206 291
@@ -208,7 +293,7 @@ int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
208 293
209 gem_obj = drm_gem_object_lookup(drm, file_priv, handle); 294 gem_obj = drm_gem_object_lookup(drm, file_priv, handle);
210 if (!gem_obj) { 295 if (!gem_obj) {
211 dev_err(drm->dev, "failed to lookup gem object\n"); 296 dev_err(drm->dev, "failed to lookup GEM object\n");
212 mutex_unlock(&drm->struct_mutex); 297 mutex_unlock(&drm->struct_mutex);
213 return -EINVAL; 298 return -EINVAL;
214 } 299 }
@@ -251,8 +336,20 @@ static int drm_gem_cma_mmap_obj(struct drm_gem_cma_object *cma_obj,
251 return ret; 336 return ret;
252} 337}
253 338
254/* 339/**
255 * drm_gem_cma_mmap - (struct file_operation)->mmap callback function 340 * drm_gem_cma_mmap - memory-map a CMA GEM object
341 * @filp: file object
342 * @vma: VMA for the area to be mapped
343 *
344 * This function implements an augmented version of the GEM DRM file mmap
345 * operation for CMA objects: In addition to the usual GEM VMA setup it
346 * immediately faults in the entire object instead of using on-demaind
347 * faulting. Drivers which employ the CMA helpers should use this function
348 * as their ->mmap() handler in the DRM device file's file_operations
349 * structure.
350 *
351 * Returns:
352 * 0 on success or a negative error code on failure.
256 */ 353 */
257int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma) 354int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
258{ 355{
@@ -272,7 +369,16 @@ int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma)
272EXPORT_SYMBOL_GPL(drm_gem_cma_mmap); 369EXPORT_SYMBOL_GPL(drm_gem_cma_mmap);
273 370
274#ifdef CONFIG_DEBUG_FS 371#ifdef CONFIG_DEBUG_FS
275void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m) 372/**
373 * drm_gem_cma_describe - describe a CMA GEM object for debugfs
374 * @cma_obj: CMA GEM object
375 * @m: debugfs file handle
376 *
377 * This function can be used to dump a human-readable representation of the
378 * CMA GEM object into a synthetic file.
379 */
380void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj,
381 struct seq_file *m)
276{ 382{
277 struct drm_gem_object *obj = &cma_obj->base; 383 struct drm_gem_object *obj = &cma_obj->base;
278 struct drm_device *dev = obj->dev; 384 struct drm_device *dev = obj->dev;
@@ -291,7 +397,18 @@ void drm_gem_cma_describe(struct drm_gem_cma_object *cma_obj, struct seq_file *m
291EXPORT_SYMBOL_GPL(drm_gem_cma_describe); 397EXPORT_SYMBOL_GPL(drm_gem_cma_describe);
292#endif 398#endif
293 399
294/* low-level interface prime helpers */ 400/**
401 * drm_gem_cma_prime_get_sg_table - provide a scatter/gather table of pinned
402 * pages for a CMA GEM object
403 * @obj: GEM object
404 *
405 * This function exports a scatter/gather table suitable for PRIME usage by
406 * calling the standard DMA mapping API. Drivers using the CMA helpers should
407 * set this as their DRM driver's ->gem_prime_get_sg_table() callback.
408 *
409 * Returns:
410 * A pointer to the scatter/gather table of pinned pages or NULL on failure.
411 */
295struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj) 412struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
296{ 413{
297 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 414 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -315,6 +432,23 @@ out:
315} 432}
316EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table); 433EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
317 434
435/**
436 * drm_gem_cma_prime_import_sg_table - produce a CMA GEM object from another
437 * driver's scatter/gather table of pinned pages
438 * @dev: device to import into
439 * @attach: DMA-BUF attachment
440 * @sgt: scatter/gather table of pinned pages
441 *
442 * This function imports a scatter/gather table exported via DMA-BUF by
443 * another driver. Imported buffers must be physically contiguous in memory
444 * (i.e. the scatter/gather table must contain a single entry). Drivers that
445 * use the CMA helpers should set this as their DRM driver's
446 * ->gem_prime_import_sg_table() callback.
447 *
448 * Returns:
449 * A pointer to a newly created GEM object or an ERR_PTR-encoded negative
450 * error code on failure.
451 */
318struct drm_gem_object * 452struct drm_gem_object *
319drm_gem_cma_prime_import_sg_table(struct drm_device *dev, 453drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
320 struct dma_buf_attachment *attach, 454 struct dma_buf_attachment *attach,
@@ -339,6 +473,18 @@ drm_gem_cma_prime_import_sg_table(struct drm_device *dev,
339} 473}
340EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table); 474EXPORT_SYMBOL_GPL(drm_gem_cma_prime_import_sg_table);
341 475
476/**
477 * drm_gem_cma_prime_mmap - memory-map an exported CMA GEM object
478 * @obj: GEM object
479 * @vma: VMA for the area to be mapped
480 *
481 * This function maps a buffer imported via DRM PRIME into a userspace
482 * process's address space. Drivers that use the CMA helpers should set this
483 * as their DRM driver's ->gem_prime_mmap() callback.
484 *
485 * Returns:
486 * 0 on success or a negative error code on failure.
487 */
342int drm_gem_cma_prime_mmap(struct drm_gem_object *obj, 488int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
343 struct vm_area_struct *vma) 489 struct vm_area_struct *vma)
344{ 490{
@@ -357,6 +503,20 @@ int drm_gem_cma_prime_mmap(struct drm_gem_object *obj,
357} 503}
358EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap); 504EXPORT_SYMBOL_GPL(drm_gem_cma_prime_mmap);
359 505
506/**
507 * drm_gem_cma_prime_vmap - map a CMA GEM object into the kernel's virtual
508 * address space
509 * @obj: GEM object
510 *
511 * This function maps a buffer exported via DRM PRIME into the kernel's
512 * virtual address space. Since the CMA buffers are already mapped into the
513 * kernel virtual address space this simply returns the cached virtual
514 * address. Drivers using the CMA helpers should set this as their DRM
515 * driver's ->gem_prime_vmap() callback.
516 *
517 * Returns:
518 * The kernel virtual address of the CMA GEM object's backing store.
519 */
360void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj) 520void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
361{ 521{
362 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj); 522 struct drm_gem_cma_object *cma_obj = to_drm_gem_cma_obj(obj);
@@ -365,6 +525,17 @@ void *drm_gem_cma_prime_vmap(struct drm_gem_object *obj)
365} 525}
366EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap); 526EXPORT_SYMBOL_GPL(drm_gem_cma_prime_vmap);
367 527
528/**
529 * drm_gem_cma_prime_vunmap - unmap a CMA GEM object from the kernel's virtual
530 * address space
531 * @obj: GEM object
532 * @vaddr: kernel virtual address where the CMA GEM object was mapped
533 *
534 * This function removes a buffer exported via DRM PRIME from the kernel's
535 * virtual address space. This is a no-op because CMA buffers cannot be
536 * unmapped from kernel space. Drivers using the CMA helpers should set this
537 * as their DRM driver's ->gem_prime_vunmap() callback.
538 */
368void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr) 539void drm_gem_cma_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
369{ 540{
370 /* Nothing to do */ 541 /* Nothing to do */
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 5ef03c216a27..f5a5f18efa5b 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -166,7 +166,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
166 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 166 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
167 167
168 /* 168 /*
169 * If the vblank interrupt was already disbled update the count 169 * If the vblank interrupt was already disabled update the count
170 * and timestamp to maintain the appearance that the counter 170 * and timestamp to maintain the appearance that the counter
171 * has been ticking all along until this time. This makes the 171 * has been ticking all along until this time. This makes the
172 * count account for the entire time between drm_vblank_on() and 172 * count account for the entire time between drm_vblank_on() and
@@ -1029,7 +1029,8 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
1029{ 1029{
1030 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 1030 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
1031 1031
1032 BUG_ON(atomic_read(&vblank->refcount) == 0); 1032 if (WARN_ON(atomic_read(&vblank->refcount) == 0))
1033 return;
1033 1034
1034 if (WARN_ON(crtc >= dev->num_crtcs)) 1035 if (WARN_ON(crtc >= dev->num_crtcs))
1035 return; 1036 return;
@@ -1190,7 +1191,7 @@ EXPORT_SYMBOL(drm_crtc_vblank_off);
1190 * 1191 *
1191 * This functions restores the vblank interrupt state captured with 1192 * This functions restores the vblank interrupt state captured with
1192 * drm_vblank_off() again. Note that calls to drm_vblank_on() and 1193 * drm_vblank_off() again. Note that calls to drm_vblank_on() and
1193 * drm_vblank_off() can be unbalanced and so can also be unconditionaly called 1194 * drm_vblank_off() can be unbalanced and so can also be unconditionally called
1194 * in driver load code to reflect the current hardware state of the crtc. 1195 * in driver load code to reflect the current hardware state of the crtc.
1195 * 1196 *
1196 * This is the legacy version of drm_crtc_vblank_on(). 1197 * This is the legacy version of drm_crtc_vblank_on().
@@ -1237,7 +1238,7 @@ EXPORT_SYMBOL(drm_vblank_on);
1237 * 1238 *
1238 * This functions restores the vblank interrupt state captured with 1239 * This functions restores the vblank interrupt state captured with
1239 * drm_vblank_off() again. Note that calls to drm_vblank_on() and 1240 * drm_vblank_off() again. Note that calls to drm_vblank_on() and
1240 * drm_vblank_off() can be unbalanced and so can also be unconditionaly called 1241 * drm_vblank_off() can be unbalanced and so can also be unconditionally called
1241 * in driver load code to reflect the current hardware state of the crtc. 1242 * in driver load code to reflect the current hardware state of the crtc.
1242 * 1243 *
1243 * This is the native kms version of drm_vblank_on(). 1244 * This is the native kms version of drm_vblank_on().
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index eb6dfe52cab2..c0644bb865f2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -35,6 +35,16 @@
35 35
36#include <video/mipi_display.h> 36#include <video/mipi_display.h>
37 37
38/**
39 * DOC: dsi helpers
40 *
41 * These functions contain some common logic and helpers to deal with MIPI DSI
42 * peripherals.
43 *
44 * Helpers are provided for a number of standard MIPI DSI command as well as a
45 * subset of the MIPI DCS command set.
46 */
47
38static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv) 48static int mipi_dsi_device_match(struct device *dev, struct device_driver *drv)
39{ 49{
40 return of_driver_match_device(dev, drv); 50 return of_driver_match_device(dev, drv);
@@ -57,6 +67,29 @@ static struct bus_type mipi_dsi_bus_type = {
57 .pm = &mipi_dsi_device_pm_ops, 67 .pm = &mipi_dsi_device_pm_ops,
58}; 68};
59 69
70static int of_device_match(struct device *dev, void *data)
71{
72 return dev->of_node == data;
73}
74
75/**
76 * of_find_mipi_dsi_device_by_node() - find the MIPI DSI device matching a
77 * device tree node
78 * @np: device tree node
79 *
80 * Return: A pointer to the MIPI DSI device corresponding to @np or NULL if no
81 * such device exists (or has not been registered yet).
82 */
83struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np)
84{
85 struct device *dev;
86
87 dev = bus_find_device(&mipi_dsi_bus_type, NULL, np, of_device_match);
88
89 return dev ? to_mipi_dsi_device(dev) : NULL;
90}
91EXPORT_SYMBOL(of_find_mipi_dsi_device_by_node);
92
60static void mipi_dsi_dev_release(struct device *dev) 93static void mipi_dsi_dev_release(struct device *dev)
61{ 94{
62 struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev); 95 struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
@@ -198,59 +231,351 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
198} 231}
199EXPORT_SYMBOL(mipi_dsi_detach); 232EXPORT_SYMBOL(mipi_dsi_detach);
200 233
234static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
235 struct mipi_dsi_msg *msg)
236{
237 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
238
239 if (!ops || !ops->transfer)
240 return -ENOSYS;
241
242 if (dsi->mode_flags & MIPI_DSI_MODE_LPM)
243 msg->flags |= MIPI_DSI_MSG_USE_LPM;
244
245 return ops->transfer(dsi->host, msg);
246}
247
201/** 248/**
202 * mipi_dsi_dcs_write - send DCS write command 249 * mipi_dsi_packet_format_is_short - check if a packet is of the short format
203 * @dsi: DSI device 250 * @type: MIPI DSI data type of the packet
204 * @data: pointer to the command followed by parameters 251 *
205 * @len: length of @data 252 * Return: true if the packet for the given data type is a short packet, false
253 * otherwise.
206 */ 254 */
207ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data, 255bool mipi_dsi_packet_format_is_short(u8 type)
208 size_t len) 256{
257 switch (type) {
258 case MIPI_DSI_V_SYNC_START:
259 case MIPI_DSI_V_SYNC_END:
260 case MIPI_DSI_H_SYNC_START:
261 case MIPI_DSI_H_SYNC_END:
262 case MIPI_DSI_END_OF_TRANSMISSION:
263 case MIPI_DSI_COLOR_MODE_OFF:
264 case MIPI_DSI_COLOR_MODE_ON:
265 case MIPI_DSI_SHUTDOWN_PERIPHERAL:
266 case MIPI_DSI_TURN_ON_PERIPHERAL:
267 case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
268 case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
269 case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
270 case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
271 case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
272 case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
273 case MIPI_DSI_DCS_SHORT_WRITE:
274 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
275 case MIPI_DSI_DCS_READ:
276 case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
277 return true;
278 }
279
280 return false;
281}
282EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
283
284/**
285 * mipi_dsi_packet_format_is_long - check if a packet is of the long format
286 * @type: MIPI DSI data type of the packet
287 *
288 * Return: true if the packet for the given data type is a long packet, false
289 * otherwise.
290 */
291bool mipi_dsi_packet_format_is_long(u8 type)
292{
293 switch (type) {
294 case MIPI_DSI_NULL_PACKET:
295 case MIPI_DSI_BLANKING_PACKET:
296 case MIPI_DSI_GENERIC_LONG_WRITE:
297 case MIPI_DSI_DCS_LONG_WRITE:
298 case MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20:
299 case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24:
300 case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16:
301 case MIPI_DSI_PACKED_PIXEL_STREAM_30:
302 case MIPI_DSI_PACKED_PIXEL_STREAM_36:
303 case MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR12:
304 case MIPI_DSI_PACKED_PIXEL_STREAM_16:
305 case MIPI_DSI_PACKED_PIXEL_STREAM_18:
306 case MIPI_DSI_PIXEL_STREAM_3BYTE_18:
307 case MIPI_DSI_PACKED_PIXEL_STREAM_24:
308 return true;
309 }
310
311 return false;
312}
313EXPORT_SYMBOL(mipi_dsi_packet_format_is_long);
314
315/**
316 * mipi_dsi_create_packet - create a packet from a message according to the
317 * DSI protocol
318 * @packet: pointer to a DSI packet structure
319 * @msg: message to translate into a packet
320 *
321 * Return: 0 on success or a negative error code on failure.
322 */
323int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
324 const struct mipi_dsi_msg *msg)
325{
326 const u8 *tx = msg->tx_buf;
327
328 if (!packet || !msg)
329 return -EINVAL;
330
331 /* do some minimum sanity checking */
332 if (!mipi_dsi_packet_format_is_short(msg->type) &&
333 !mipi_dsi_packet_format_is_long(msg->type))
334 return -EINVAL;
335
336 if (msg->channel > 3)
337 return -EINVAL;
338
339 memset(packet, 0, sizeof(*packet));
340 packet->header[0] = ((msg->channel & 0x3) << 6) | (msg->type & 0x3f);
341
342 /* TODO: compute ECC if hardware support is not available */
343
344 /*
345 * Long write packets contain the word count in header bytes 1 and 2.
346 * The payload follows the header and is word count bytes long.
347 *
348 * Short write packets encode up to two parameters in header bytes 1
349 * and 2.
350 */
351 if (mipi_dsi_packet_format_is_long(msg->type)) {
352 packet->header[1] = (msg->tx_len >> 0) & 0xff;
353 packet->header[2] = (msg->tx_len >> 8) & 0xff;
354
355 packet->payload_length = msg->tx_len;
356 packet->payload = tx;
357 } else {
358 packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0;
359 packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0;
360 }
361
362 packet->size = sizeof(packet->header) + packet->payload_length;
363
364 return 0;
365}
366EXPORT_SYMBOL(mipi_dsi_create_packet);
367
368/*
369 * mipi_dsi_set_maximum_return_packet_size() - specify the maximum size of the
370 * the payload in a long packet transmitted from the peripheral back to the
371 * host processor
372 * @dsi: DSI peripheral device
373 * @value: the maximum size of the payload
374 *
375 * Return: 0 on success or a negative error code on failure.
376 */
377int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
378 u16 value)
379{
380 u8 tx[2] = { value & 0xff, value >> 8 };
381 struct mipi_dsi_msg msg = {
382 .channel = dsi->channel,
383 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
384 .tx_len = sizeof(tx),
385 .tx_buf = tx,
386 };
387
388 return mipi_dsi_device_transfer(dsi, &msg);
389}
390EXPORT_SYMBOL(mipi_dsi_set_maximum_return_packet_size);
391
392/**
393 * mipi_dsi_generic_write() - transmit data using a generic write packet
394 * @dsi: DSI peripheral device
395 * @payload: buffer containing the payload
396 * @size: size of payload buffer
397 *
398 * This function will automatically choose the right data type depending on
399 * the payload length.
400 *
401 * Return: The number of bytes transmitted on success or a negative error code
402 * on failure.
403 */
404ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
405 size_t size)
406{
407 struct mipi_dsi_msg msg = {
408 .channel = dsi->channel,
409 .tx_buf = payload,
410 .tx_len = size
411 };
412
413 switch (size) {
414 case 0:
415 msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
416 break;
417
418 case 1:
419 msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
420 break;
421
422 case 2:
423 msg.type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
424 break;
425
426 default:
427 msg.type = MIPI_DSI_GENERIC_LONG_WRITE;
428 break;
429 }
430
431 return mipi_dsi_device_transfer(dsi, &msg);
432}
433EXPORT_SYMBOL(mipi_dsi_generic_write);
434
435/**
436 * mipi_dsi_generic_read() - receive data using a generic read packet
437 * @dsi: DSI peripheral device
438 * @params: buffer containing the request parameters
439 * @num_params: number of request parameters
440 * @data: buffer in which to return the received data
441 * @size: size of receive buffer
442 *
443 * This function will automatically choose the right data type depending on
444 * the number of parameters passed in.
445 *
446 * Return: The number of bytes successfully read or a negative error code on
447 * failure.
448 */
449ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
450 size_t num_params, void *data, size_t size)
451{
452 struct mipi_dsi_msg msg = {
453 .channel = dsi->channel,
454 .tx_len = num_params,
455 .tx_buf = params,
456 .rx_len = size,
457 .rx_buf = data
458 };
459
460 switch (num_params) {
461 case 0:
462 msg.type = MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM;
463 break;
464
465 case 1:
466 msg.type = MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM;
467 break;
468
469 case 2:
470 msg.type = MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM;
471 break;
472
473 default:
474 return -EINVAL;
475 }
476
477 return mipi_dsi_device_transfer(dsi, &msg);
478}
479EXPORT_SYMBOL(mipi_dsi_generic_read);
480
481/**
482 * mipi_dsi_dcs_write_buffer() - transmit a DCS command with payload
483 * @dsi: DSI peripheral device
484 * @data: buffer containing data to be transmitted
485 * @len: size of transmission buffer
486 *
487 * This function will automatically choose the right data type depending on
488 * the command payload length.
489 *
490 * Return: The number of bytes successfully transmitted or a negative error
491 * code on failure.
492 */
493ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
494 const void *data, size_t len)
209{ 495{
210 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
211 struct mipi_dsi_msg msg = { 496 struct mipi_dsi_msg msg = {
212 .channel = dsi->channel, 497 .channel = dsi->channel,
213 .tx_buf = data, 498 .tx_buf = data,
214 .tx_len = len 499 .tx_len = len
215 }; 500 };
216 501
217 if (!ops || !ops->transfer)
218 return -ENOSYS;
219
220 switch (len) { 502 switch (len) {
221 case 0: 503 case 0:
222 return -EINVAL; 504 return -EINVAL;
505
223 case 1: 506 case 1:
224 msg.type = MIPI_DSI_DCS_SHORT_WRITE; 507 msg.type = MIPI_DSI_DCS_SHORT_WRITE;
225 break; 508 break;
509
226 case 2: 510 case 2:
227 msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM; 511 msg.type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
228 break; 512 break;
513
229 default: 514 default:
230 msg.type = MIPI_DSI_DCS_LONG_WRITE; 515 msg.type = MIPI_DSI_DCS_LONG_WRITE;
231 break; 516 break;
232 } 517 }
233 518
234 if (dsi->mode_flags & MIPI_DSI_MODE_LPM) 519 return mipi_dsi_device_transfer(dsi, &msg);
235 msg.flags = MIPI_DSI_MSG_USE_LPM; 520}
521EXPORT_SYMBOL(mipi_dsi_dcs_write_buffer);
236 522
237 return ops->transfer(dsi->host, &msg); 523/**
524 * mipi_dsi_dcs_write() - send DCS write command
525 * @dsi: DSI peripheral device
526 * @cmd: DCS command
527 * @data: buffer containing the command payload
528 * @len: command payload length
529 *
530 * This function will automatically choose the right data type depending on
531 * the command payload length.
532 *
533 * Return: The number of bytes successfully transmitted or a negative error
534 * code on failure.
535 */
536ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
537 const void *data, size_t len)
538{
539 ssize_t err;
540 size_t size;
541 u8 *tx;
542
543 if (len > 0) {
544 size = 1 + len;
545
546 tx = kmalloc(size, GFP_KERNEL);
547 if (!tx)
548 return -ENOMEM;
549
550 /* concatenate the DCS command byte and the payload */
551 tx[0] = cmd;
552 memcpy(&tx[1], data, len);
553 } else {
554 tx = &cmd;
555 size = 1;
556 }
557
558 err = mipi_dsi_dcs_write_buffer(dsi, tx, size);
559
560 if (len > 0)
561 kfree(tx);
562
563 return err;
238} 564}
239EXPORT_SYMBOL(mipi_dsi_dcs_write); 565EXPORT_SYMBOL(mipi_dsi_dcs_write);
240 566
241/** 567/**
242 * mipi_dsi_dcs_read - send DCS read request command 568 * mipi_dsi_dcs_read() - send DCS read request command
243 * @dsi: DSI device 569 * @dsi: DSI peripheral device
244 * @cmd: DCS read command 570 * @cmd: DCS command
245 * @data: pointer to read buffer 571 * @data: buffer in which to receive data
246 * @len: length of @data 572 * @len: size of receive buffer
247 * 573 *
248 * Function returns number of read bytes or error code. 574 * Return: The number of bytes read or a negative error code on failure.
249 */ 575 */
250ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, 576ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
251 size_t len) 577 size_t len)
252{ 578{
253 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
254 struct mipi_dsi_msg msg = { 579 struct mipi_dsi_msg msg = {
255 .channel = dsi->channel, 580 .channel = dsi->channel,
256 .type = MIPI_DSI_DCS_READ, 581 .type = MIPI_DSI_DCS_READ,
@@ -260,15 +585,282 @@ ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
260 .rx_len = len 585 .rx_len = len
261 }; 586 };
262 587
263 if (!ops || !ops->transfer) 588 return mipi_dsi_device_transfer(dsi, &msg);
264 return -ENOSYS; 589}
590EXPORT_SYMBOL(mipi_dsi_dcs_read);
265 591
266 if (dsi->mode_flags & MIPI_DSI_MODE_LPM) 592/**
267 msg.flags = MIPI_DSI_MSG_USE_LPM; 593 * mipi_dsi_dcs_nop() - send DCS nop packet
594 * @dsi: DSI peripheral device
595 *
596 * Return: 0 on success or a negative error code on failure.
597 */
598int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi)
599{
600 ssize_t err;
601
602 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_NOP, NULL, 0);
603 if (err < 0)
604 return err;
268 605
269 return ops->transfer(dsi->host, &msg); 606 return 0;
270} 607}
271EXPORT_SYMBOL(mipi_dsi_dcs_read); 608EXPORT_SYMBOL(mipi_dsi_dcs_nop);
609
610/**
611 * mipi_dsi_dcs_soft_reset() - perform a software reset of the display module
612 * @dsi: DSI peripheral device
613 *
614 * Return: 0 on success or a negative error code on failure.
615 */
616int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi)
617{
618 ssize_t err;
619
620 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SOFT_RESET, NULL, 0);
621 if (err < 0)
622 return err;
623
624 return 0;
625}
626EXPORT_SYMBOL(mipi_dsi_dcs_soft_reset);
627
628/**
629 * mipi_dsi_dcs_get_power_mode() - query the display module's current power
630 * mode
631 * @dsi: DSI peripheral device
632 * @mode: return location for the current power mode
633 *
634 * Return: 0 on success or a negative error code on failure.
635 */
636int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode)
637{
638 ssize_t err;
639
640 err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_POWER_MODE, mode,
641 sizeof(*mode));
642 if (err <= 0) {
643 if (err == 0)
644 err = -ENODATA;
645
646 return err;
647 }
648
649 return 0;
650}
651EXPORT_SYMBOL(mipi_dsi_dcs_get_power_mode);
652
653/**
654 * mipi_dsi_dcs_get_pixel_format() - gets the pixel format for the RGB image
655 * data used by the interface
656 * @dsi: DSI peripheral device
657 * @format: return location for the pixel format
658 *
659 * Return: 0 on success or a negative error code on failure.
660 */
661int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format)
662{
663 ssize_t err;
664
665 err = mipi_dsi_dcs_read(dsi, MIPI_DCS_GET_PIXEL_FORMAT, format,
666 sizeof(*format));
667 if (err <= 0) {
668 if (err == 0)
669 err = -ENODATA;
670
671 return err;
672 }
673
674 return 0;
675}
676EXPORT_SYMBOL(mipi_dsi_dcs_get_pixel_format);
677
678/**
679 * mipi_dsi_dcs_enter_sleep_mode() - disable all unnecessary blocks inside the
680 * display module except interface communication
681 * @dsi: DSI peripheral device
682 *
683 * Return: 0 on success or a negative error code on failure.
684 */
685int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi)
686{
687 ssize_t err;
688
689 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_SLEEP_MODE, NULL, 0);
690 if (err < 0)
691 return err;
692
693 return 0;
694}
695EXPORT_SYMBOL(mipi_dsi_dcs_enter_sleep_mode);
696
697/**
698 * mipi_dsi_dcs_exit_sleep_mode() - enable all blocks inside the display
699 * module
700 * @dsi: DSI peripheral device
701 *
702 * Return: 0 on success or a negative error code on failure.
703 */
704int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi)
705{
706 ssize_t err;
707
708 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_EXIT_SLEEP_MODE, NULL, 0);
709 if (err < 0)
710 return err;
711
712 return 0;
713}
714EXPORT_SYMBOL(mipi_dsi_dcs_exit_sleep_mode);
715
716/**
717 * mipi_dsi_dcs_set_display_off() - stop displaying the image data on the
718 * display device
719 * @dsi: DSI peripheral device
720 *
721 * Return: 0 on success or a negative error code on failure.
722 */
723int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi)
724{
725 ssize_t err;
726
727 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_OFF, NULL, 0);
728 if (err < 0)
729 return err;
730
731 return 0;
732}
733EXPORT_SYMBOL(mipi_dsi_dcs_set_display_off);
734
735/**
736 * mipi_dsi_dcs_set_display_on() - start displaying the image data on the
737 * display device
738 * @dsi: DSI peripheral device
739 *
740 * Return: 0 on success or a negative error code on failure
741 */
742int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi)
743{
744 ssize_t err;
745
746 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_ON, NULL, 0);
747 if (err < 0)
748 return err;
749
750 return 0;
751}
752EXPORT_SYMBOL(mipi_dsi_dcs_set_display_on);
753
754/**
755 * mipi_dsi_dcs_set_column_address() - define the column extent of the frame
756 * memory accessed by the host processor
757 * @dsi: DSI peripheral device
758 * @start: first column of frame memory
759 * @end: last column of frame memory
760 *
761 * Return: 0 on success or a negative error code on failure.
762 */
763int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
764 u16 end)
765{
766 u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
767 ssize_t err;
768
769 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_COLUMN_ADDRESS, payload,
770 sizeof(payload));
771 if (err < 0)
772 return err;
773
774 return 0;
775}
776EXPORT_SYMBOL(mipi_dsi_dcs_set_column_address);
777
778/**
779 * mipi_dsi_dcs_set_page_address() - define the page extent of the frame
780 * memory accessed by the host processor
781 * @dsi: DSI peripheral device
782 * @start: first page of frame memory
783 * @end: last page of frame memory
784 *
785 * Return: 0 on success or a negative error code on failure.
786 */
787int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
788 u16 end)
789{
790 u8 payload[4] = { start >> 8, start & 0xff, end >> 8, end & 0xff };
791 ssize_t err;
792
793 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PAGE_ADDRESS, payload,
794 sizeof(payload));
795 if (err < 0)
796 return err;
797
798 return 0;
799}
800EXPORT_SYMBOL(mipi_dsi_dcs_set_page_address);
801
802/**
803 * mipi_dsi_dcs_set_tear_off() - turn off the display module's Tearing Effect
804 * output signal on the TE signal line
805 * @dsi: DSI peripheral device
806 *
807 * Return: 0 on success or a negative error code on failure
808 */
809int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi)
810{
811 ssize_t err;
812
813 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_OFF, NULL, 0);
814 if (err < 0)
815 return err;
816
817 return 0;
818}
819EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_off);
820
821/**
822 * mipi_dsi_dcs_set_tear_on() - turn on the display module's Tearing Effect
823 * output signal on the TE signal line.
824 * @dsi: DSI peripheral device
825 * @mode: the Tearing Effect Output Line mode
826 *
827 * Return: 0 on success or a negative error code on failure
828 */
829int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
830 enum mipi_dsi_dcs_tear_mode mode)
831{
832 u8 value = mode;
833 ssize_t err;
834
835 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_TEAR_ON, &value,
836 sizeof(value));
837 if (err < 0)
838 return err;
839
840 return 0;
841}
842EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on);
843
844/**
845 * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image
846 * data used by the interface
847 * @dsi: DSI peripheral device
848 * @format: pixel format
849 *
850 * Return: 0 on success or a negative error code on failure.
851 */
852int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format)
853{
854 ssize_t err;
855
856 err = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_PIXEL_FORMAT, &format,
857 sizeof(format));
858 if (err < 0)
859 return err;
860
861 return 0;
862}
863EXPORT_SYMBOL(mipi_dsi_dcs_set_pixel_format);
272 864
273static int mipi_dsi_drv_probe(struct device *dev) 865static int mipi_dsi_drv_probe(struct device *dev)
274{ 866{
@@ -295,12 +887,18 @@ static void mipi_dsi_drv_shutdown(struct device *dev)
295} 887}
296 888
297/** 889/**
298 * mipi_dsi_driver_register - register a driver for DSI devices 890 * mipi_dsi_driver_register_full() - register a driver for DSI devices
299 * @drv: DSI driver structure 891 * @drv: DSI driver structure
892 * @owner: owner module
893 *
894 * Return: 0 on success or a negative error code on failure.
300 */ 895 */
301int mipi_dsi_driver_register(struct mipi_dsi_driver *drv) 896int mipi_dsi_driver_register_full(struct mipi_dsi_driver *drv,
897 struct module *owner)
302{ 898{
303 drv->driver.bus = &mipi_dsi_bus_type; 899 drv->driver.bus = &mipi_dsi_bus_type;
900 drv->driver.owner = owner;
901
304 if (drv->probe) 902 if (drv->probe)
305 drv->driver.probe = mipi_dsi_drv_probe; 903 drv->driver.probe = mipi_dsi_drv_probe;
306 if (drv->remove) 904 if (drv->remove)
@@ -310,11 +908,13 @@ int mipi_dsi_driver_register(struct mipi_dsi_driver *drv)
310 908
311 return driver_register(&drv->driver); 909 return driver_register(&drv->driver);
312} 910}
313EXPORT_SYMBOL(mipi_dsi_driver_register); 911EXPORT_SYMBOL(mipi_dsi_driver_register_full);
314 912
315/** 913/**
316 * mipi_dsi_driver_unregister - unregister a driver for DSI devices 914 * mipi_dsi_driver_unregister() - unregister a driver for DSI devices
317 * @drv: DSI driver structure 915 * @drv: DSI driver structure
916 *
917 * Return: 0 on success or a negative error code on failure.
318 */ 918 */
319void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv) 919void mipi_dsi_driver_unregister(struct mipi_dsi_driver *drv)
320{ 920{
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index d1b7d2006529..6d8b941c8200 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -914,7 +914,7 @@ EXPORT_SYMBOL(drm_mode_equal_no_clocks_no_stereo);
914 * 914 *
915 * This function is a helper which can be used to validate modes against size 915 * This function is a helper which can be used to validate modes against size
916 * limitations of the DRM device/connector. If a mode is too big its status 916 * limitations of the DRM device/connector. If a mode is too big its status
917 * memeber is updated with the appropriate validation failure code. The list 917 * member is updated with the appropriate validation failure code. The list
918 * itself is not changed. 918 * itself is not changed.
919 */ 919 */
920void drm_mode_validate_size(struct drm_device *dev, 920void drm_mode_validate_size(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index 474e4d12a2d8..51cc47d827d8 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -157,14 +157,20 @@ void drm_modeset_unlock_all(struct drm_device *dev)
157EXPORT_SYMBOL(drm_modeset_unlock_all); 157EXPORT_SYMBOL(drm_modeset_unlock_all);
158 158
159/** 159/**
160 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx 160 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
161 * @crtc: drm crtc 161 * @crtc: DRM CRTC
162 * @plane: DRM plane to be updated on @crtc
163 *
164 * This function locks the given crtc and plane (which should be either the
165 * primary or cursor plane) using a hidden acquire context. This is necessary so
166 * that drivers internally using the atomic interfaces can grab further locks
167 * with the lock acquire context.
162 * 168 *
163 * This function locks the given crtc using a hidden acquire context. This is 169 * Note that @plane can be NULL, e.g. when the cursor support hasn't yet been
164 * necessary so that drivers internally using the atomic interfaces can grab 170 * converted to universal planes yet.
165 * further locks with the lock acquire context.
166 */ 171 */
167void drm_modeset_lock_crtc(struct drm_crtc *crtc) 172void drm_modeset_lock_crtc(struct drm_crtc *crtc,
173 struct drm_plane *plane)
168{ 174{
169 struct drm_modeset_acquire_ctx *ctx; 175 struct drm_modeset_acquire_ctx *ctx;
170 int ret; 176 int ret;
@@ -180,6 +186,18 @@ retry:
180 if (ret) 186 if (ret)
181 goto fail; 187 goto fail;
182 188
189 if (plane) {
190 ret = drm_modeset_lock(&plane->mutex, ctx);
191 if (ret)
192 goto fail;
193
194 if (plane->crtc) {
195 ret = drm_modeset_lock(&plane->crtc->mutex, ctx);
196 if (ret)
197 goto fail;
198 }
199 }
200
183 WARN_ON(crtc->acquire_ctx); 201 WARN_ON(crtc->acquire_ctx);
184 202
185 /* now we hold the locks, so now that it is safe, stash the 203 /* now we hold the locks, so now that it is safe, stash the
@@ -437,15 +455,14 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock)
437} 455}
438EXPORT_SYMBOL(drm_modeset_unlock); 456EXPORT_SYMBOL(drm_modeset_unlock);
439 457
440/* Temporary.. until we have sufficiently fine grained locking, there 458/* In some legacy codepaths it's convenient to just grab all the crtc and plane
441 * are a couple scenarios where it is convenient to grab all crtc locks. 459 * related locks. */
442 * It is planned to remove this:
443 */
444int drm_modeset_lock_all_crtcs(struct drm_device *dev, 460int drm_modeset_lock_all_crtcs(struct drm_device *dev,
445 struct drm_modeset_acquire_ctx *ctx) 461 struct drm_modeset_acquire_ctx *ctx)
446{ 462{
447 struct drm_mode_config *config = &dev->mode_config; 463 struct drm_mode_config *config = &dev->mode_config;
448 struct drm_crtc *crtc; 464 struct drm_crtc *crtc;
465 struct drm_plane *plane;
449 int ret = 0; 466 int ret = 0;
450 467
451 list_for_each_entry(crtc, &config->crtc_list, head) { 468 list_for_each_entry(crtc, &config->crtc_list, head) {
@@ -454,6 +471,12 @@ int drm_modeset_lock_all_crtcs(struct drm_device *dev,
454 return ret; 471 return ret;
455 } 472 }
456 473
474 list_for_each_entry(plane, &config->plane_list, head) {
475 ret = drm_modeset_lock(&plane->mutex, ctx);
476 if (ret)
477 return ret;
478 }
479
457 return 0; 480 return 0;
458} 481}
459EXPORT_SYMBOL(drm_modeset_lock_all_crtcs); 482EXPORT_SYMBOL(drm_modeset_lock_all_crtcs);
diff --git a/drivers/gpu/drm/drm_plane_helper.c b/drivers/gpu/drm/drm_plane_helper.c
index 827ec1a3040b..18a1ac6ac22f 100644
--- a/drivers/gpu/drm/drm_plane_helper.c
+++ b/drivers/gpu/drm/drm_plane_helper.c
@@ -27,10 +27,38 @@
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_plane_helper.h> 28#include <drm/drm_plane_helper.h>
29#include <drm/drm_rect.h> 29#include <drm/drm_rect.h>
30#include <drm/drm_plane_helper.h> 30#include <drm/drm_atomic.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/drm_atomic_helper.h>
31 33
32#define SUBPIXEL_MASK 0xffff 34#define SUBPIXEL_MASK 0xffff
33 35
36/**
37 * DOC: overview
38 *
39 * This helper library has two parts. The first part has support to implement
40 * primary plane support on top of the normal CRTC configuration interface.
41 * Since the legacy ->set_config interface ties the primary plane together with
42 * the CRTC state this does not allow userspace to disable the primary plane
43 * itself. To avoid too much duplicated code use
44 * drm_plane_helper_check_update() which can be used to enforce the same
45 * restrictions as primary planes had thus. The default primary plane only
46 * expose XRBG8888 and ARGB8888 as valid pixel formats for the attached
47 * framebuffer.
48 *
49 * Drivers are highly recommended to implement proper support for primary
50 * planes, and newly merged drivers must not rely upon these transitional
51 * helpers.
52 *
53 * The second part also implements transitional helpers which allow drivers to
54 * gradually switch to the atomic helper infrastructure for plane updates. Once
55 * that switch is complete drivers shouldn't use these any longer, instead using
56 * the proper legacy implementations for update and disable plane hooks provided
57 * by the atomic helpers.
58 *
59 * Again drivers are strongly urged to switch to the new interfaces.
60 */
61
34/* 62/*
35 * This is the minimal list of formats that seem to be safe for modeset use 63 * This is the minimal list of formats that seem to be safe for modeset use
36 * with all current DRM drivers. Most hardware can actually support more 64 * with all current DRM drivers. Most hardware can actually support more
@@ -127,6 +155,11 @@ int drm_plane_helper_check_update(struct drm_plane *plane,
127 return -ERANGE; 155 return -ERANGE;
128 } 156 }
129 157
158 if (!fb) {
159 *visible = false;
160 return 0;
161 }
162
130 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale); 163 *visible = drm_rect_clip_scaled(src, dest, clip, hscale, vscale);
131 if (!*visible) 164 if (!*visible)
132 /* 165 /*
@@ -369,3 +402,171 @@ int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
369 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs); 402 return drm_crtc_init_with_planes(dev, crtc, primary, NULL, funcs);
370} 403}
371EXPORT_SYMBOL(drm_crtc_init); 404EXPORT_SYMBOL(drm_crtc_init);
405
406int drm_plane_helper_commit(struct drm_plane *plane,
407 struct drm_plane_state *plane_state,
408 struct drm_framebuffer *old_fb)
409{
410 struct drm_plane_helper_funcs *plane_funcs;
411 struct drm_crtc *crtc[2];
412 struct drm_crtc_helper_funcs *crtc_funcs[2];
413 int i, ret = 0;
414
415 plane_funcs = plane->helper_private;
416
417 /* Since this is a transitional helper we can't assume that plane->state
418 * is always valid. Hence we need to use plane->crtc instead of
419 * plane->state->crtc as the old crtc. */
420 crtc[0] = plane->crtc;
421 crtc[1] = crtc[0] != plane_state->crtc ? plane_state->crtc : NULL;
422
423 for (i = 0; i < 2; i++)
424 crtc_funcs[i] = crtc[i] ? crtc[i]->helper_private : NULL;
425
426 if (plane_funcs->atomic_check) {
427 ret = plane_funcs->atomic_check(plane, plane_state);
428 if (ret)
429 goto out;
430 }
431
432 if (plane_funcs->prepare_fb && plane_state->fb) {
433 ret = plane_funcs->prepare_fb(plane, plane_state->fb);
434 if (ret)
435 goto out;
436 }
437
438 /* Point of no return, commit sw state. */
439 swap(plane->state, plane_state);
440
441 for (i = 0; i < 2; i++) {
442 if (crtc_funcs[i] && crtc_funcs[i]->atomic_begin)
443 crtc_funcs[i]->atomic_begin(crtc[i]);
444 }
445
446 plane_funcs->atomic_update(plane, plane_state);
447
448 for (i = 0; i < 2; i++) {
449 if (crtc_funcs[i] && crtc_funcs[i]->atomic_flush)
450 crtc_funcs[i]->atomic_flush(crtc[i]);
451 }
452
453 for (i = 0; i < 2; i++) {
454 if (!crtc[i])
455 continue;
456
457 /* There's no other way to figure out whether the crtc is running. */
458 ret = drm_crtc_vblank_get(crtc[i]);
459 if (ret == 0) {
460 drm_crtc_wait_one_vblank(crtc[i]);
461 drm_crtc_vblank_put(crtc[i]);
462 }
463
464 ret = 0;
465 }
466
467 if (plane_funcs->cleanup_fb && old_fb)
468 plane_funcs->cleanup_fb(plane, old_fb);
469out:
470 if (plane_state) {
471 if (plane->funcs->atomic_destroy_state)
472 plane->funcs->atomic_destroy_state(plane, plane_state);
473 else
474 drm_atomic_helper_plane_destroy_state(plane, plane_state);
475 }
476
477 return ret;
478}
479
480/**
481 * drm_plane_helper_update() - Helper for primary plane update
482 * @plane: plane object to update
483 * @crtc: owning CRTC of owning plane
484 * @fb: framebuffer to flip onto plane
485 * @crtc_x: x offset of primary plane on crtc
486 * @crtc_y: y offset of primary plane on crtc
487 * @crtc_w: width of primary plane rectangle on crtc
488 * @crtc_h: height of primary plane rectangle on crtc
489 * @src_x: x offset of @fb for panning
490 * @src_y: y offset of @fb for panning
491 * @src_w: width of source rectangle in @fb
492 * @src_h: height of source rectangle in @fb
493 *
494 * Provides a default plane update handler using the atomic plane update
495 * functions. It is fully left to the driver to check plane constraints and
496 * handle corner-cases like a fully occluded or otherwise invisible plane.
497 *
498 * This is useful for piecewise transitioning of a driver to the atomic helpers.
499 *
500 * RETURNS:
501 * Zero on success, error code on failure
502 */
503int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
504 struct drm_framebuffer *fb,
505 int crtc_x, int crtc_y,
506 unsigned int crtc_w, unsigned int crtc_h,
507 uint32_t src_x, uint32_t src_y,
508 uint32_t src_w, uint32_t src_h)
509{
510 struct drm_plane_state *plane_state;
511
512 if (plane->funcs->atomic_duplicate_state)
513 plane_state = plane->funcs->atomic_duplicate_state(plane);
514 else if (plane->state)
515 plane_state = drm_atomic_helper_plane_duplicate_state(plane);
516 else
517 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
518 if (!plane_state)
519 return -ENOMEM;
520
521 plane_state->crtc = crtc;
522 drm_atomic_set_fb_for_plane(plane_state, fb);
523 plane_state->crtc_x = crtc_x;
524 plane_state->crtc_y = crtc_y;
525 plane_state->crtc_h = crtc_h;
526 plane_state->crtc_w = crtc_w;
527 plane_state->src_x = src_x;
528 plane_state->src_y = src_y;
529 plane_state->src_h = src_h;
530 plane_state->src_w = src_w;
531
532 return drm_plane_helper_commit(plane, plane_state, plane->fb);
533}
534EXPORT_SYMBOL(drm_plane_helper_update);
535
536/**
537 * drm_plane_helper_disable() - Helper for primary plane disable
538 * @plane: plane to disable
539 *
540 * Provides a default plane disable handler using the atomic plane update
541 * functions. It is fully left to the driver to check plane constraints and
542 * handle corner-cases like a fully occluded or otherwise invisible plane.
543 *
544 * This is useful for piecewise transitioning of a driver to the atomic helpers.
545 *
546 * RETURNS:
547 * Zero on success, error code on failure
548 */
549int drm_plane_helper_disable(struct drm_plane *plane)
550{
551 struct drm_plane_state *plane_state;
552
553 /* crtc helpers love to call disable functions for already disabled hw
554 * functions. So cope with that. */
555 if (!plane->crtc)
556 return 0;
557
558 if (plane->funcs->atomic_duplicate_state)
559 plane_state = plane->funcs->atomic_duplicate_state(plane);
560 else if (plane->state)
561 plane_state = drm_atomic_helper_plane_duplicate_state(plane);
562 else
563 plane_state = kzalloc(sizeof(*plane_state), GFP_KERNEL);
564 if (!plane_state)
565 return -ENOMEM;
566
567 plane_state->crtc = NULL;
568 drm_atomic_set_fb_for_plane(plane_state, NULL);
569
570 return drm_plane_helper_commit(plane, plane_state, plane->fb);
571}
572EXPORT_SYMBOL(drm_plane_helper_disable);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index 78ca30808422..7482b06cd08f 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -328,7 +328,7 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = {
328 */ 328 */
329 329
330/** 330/**
331 * drm_gem_prime_export - helper library implemention of the export callback 331 * drm_gem_prime_export - helper library implementation of the export callback
332 * @dev: drm_device to export from 332 * @dev: drm_device to export from
333 * @obj: GEM object to export 333 * @obj: GEM object to export
334 * @flags: flags like DRM_CLOEXEC 334 * @flags: flags like DRM_CLOEXEC
@@ -483,7 +483,7 @@ out_unlock:
483EXPORT_SYMBOL(drm_gem_prime_handle_to_fd); 483EXPORT_SYMBOL(drm_gem_prime_handle_to_fd);
484 484
485/** 485/**
486 * drm_gem_prime_import - helper library implemention of the import callback 486 * drm_gem_prime_import - helper library implementation of the import callback
487 * @dev: drm_device to import into 487 * @dev: drm_device to import into
488 * @dma_buf: dma-buf object to import 488 * @dma_buf: dma-buf object to import
489 * 489 *
@@ -669,7 +669,7 @@ int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data,
669 * the driver is responsible for mapping the pages into the 669 * the driver is responsible for mapping the pages into the
670 * importers address space for use with dma_buf itself. 670 * importers address space for use with dma_buf itself.
671 */ 671 */
672struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages) 672struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages)
673{ 673{
674 struct sg_table *sg = NULL; 674 struct sg_table *sg = NULL;
675 int ret; 675 int ret;
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 6857e9ad6339..7483a47de8e4 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -118,7 +118,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
118 mode->status = MODE_UNVERIFIED; 118 mode->status = MODE_UNVERIFIED;
119 119
120 if (connector->force) { 120 if (connector->force) {
121 if (connector->force == DRM_FORCE_ON) 121 if (connector->force == DRM_FORCE_ON ||
122 connector->force == DRM_FORCE_ON_DIGITAL)
122 connector->status = connector_status_connected; 123 connector->status = connector_status_connected;
123 else 124 else
124 connector->status = connector_status_disconnected; 125 connector->status = connector_status_disconnected;
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index 6adb1e5cfb08..34d46aa75416 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -30,12 +30,17 @@
30#include <drm/drm_panel.h> 30#include <drm/drm_panel.h>
31#include <drm/bridge/ptn3460.h> 31#include <drm/bridge/ptn3460.h>
32 32
33#include "exynos_drm_drv.h"
34#include "exynos_dp_core.h" 33#include "exynos_dp_core.h"
35 34
36#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \ 35#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
37 connector) 36 connector)
38 37
38static inline struct exynos_dp_device *
39display_to_dp(struct exynos_drm_display *d)
40{
41 return container_of(d, struct exynos_dp_device, display);
42}
43
39struct bridge_init { 44struct bridge_init {
40 struct i2c_client *client; 45 struct i2c_client *client;
41 struct device_node *node; 46 struct device_node *node;
@@ -882,7 +887,7 @@ static void exynos_dp_hotplug(struct work_struct *work)
882 887
883static void exynos_dp_commit(struct exynos_drm_display *display) 888static void exynos_dp_commit(struct exynos_drm_display *display)
884{ 889{
885 struct exynos_dp_device *dp = display->ctx; 890 struct exynos_dp_device *dp = display_to_dp(display);
886 int ret; 891 int ret;
887 892
888 /* Keep the panel disabled while we configure video */ 893 /* Keep the panel disabled while we configure video */
@@ -1020,7 +1025,7 @@ static int exynos_drm_attach_lcd_bridge(struct drm_device *dev,
1020static int exynos_dp_create_connector(struct exynos_drm_display *display, 1025static int exynos_dp_create_connector(struct exynos_drm_display *display,
1021 struct drm_encoder *encoder) 1026 struct drm_encoder *encoder)
1022{ 1027{
1023 struct exynos_dp_device *dp = display->ctx; 1028 struct exynos_dp_device *dp = display_to_dp(display);
1024 struct drm_connector *connector = &dp->connector; 1029 struct drm_connector *connector = &dp->connector;
1025 int ret; 1030 int ret;
1026 1031
@@ -1052,33 +1057,19 @@ static int exynos_dp_create_connector(struct exynos_drm_display *display,
1052 1057
1053static void exynos_dp_phy_init(struct exynos_dp_device *dp) 1058static void exynos_dp_phy_init(struct exynos_dp_device *dp)
1054{ 1059{
1055 if (dp->phy) { 1060 if (dp->phy)
1056 phy_power_on(dp->phy); 1061 phy_power_on(dp->phy);
1057 } else if (dp->phy_addr) {
1058 u32 reg;
1059
1060 reg = __raw_readl(dp->phy_addr);
1061 reg |= dp->enable_mask;
1062 __raw_writel(reg, dp->phy_addr);
1063 }
1064} 1062}
1065 1063
1066static void exynos_dp_phy_exit(struct exynos_dp_device *dp) 1064static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
1067{ 1065{
1068 if (dp->phy) { 1066 if (dp->phy)
1069 phy_power_off(dp->phy); 1067 phy_power_off(dp->phy);
1070 } else if (dp->phy_addr) {
1071 u32 reg;
1072
1073 reg = __raw_readl(dp->phy_addr);
1074 reg &= ~(dp->enable_mask);
1075 __raw_writel(reg, dp->phy_addr);
1076 }
1077} 1068}
1078 1069
1079static void exynos_dp_poweron(struct exynos_drm_display *display) 1070static void exynos_dp_poweron(struct exynos_drm_display *display)
1080{ 1071{
1081 struct exynos_dp_device *dp = display->ctx; 1072 struct exynos_dp_device *dp = display_to_dp(display);
1082 1073
1083 if (dp->dpms_mode == DRM_MODE_DPMS_ON) 1074 if (dp->dpms_mode == DRM_MODE_DPMS_ON)
1084 return; 1075 return;
@@ -1099,7 +1090,7 @@ static void exynos_dp_poweron(struct exynos_drm_display *display)
1099 1090
1100static void exynos_dp_poweroff(struct exynos_drm_display *display) 1091static void exynos_dp_poweroff(struct exynos_drm_display *display)
1101{ 1092{
1102 struct exynos_dp_device *dp = display->ctx; 1093 struct exynos_dp_device *dp = display_to_dp(display);
1103 1094
1104 if (dp->dpms_mode != DRM_MODE_DPMS_ON) 1095 if (dp->dpms_mode != DRM_MODE_DPMS_ON)
1105 return; 1096 return;
@@ -1124,7 +1115,7 @@ static void exynos_dp_poweroff(struct exynos_drm_display *display)
1124 1115
1125static void exynos_dp_dpms(struct exynos_drm_display *display, int mode) 1116static void exynos_dp_dpms(struct exynos_drm_display *display, int mode)
1126{ 1117{
1127 struct exynos_dp_device *dp = display->ctx; 1118 struct exynos_dp_device *dp = display_to_dp(display);
1128 1119
1129 switch (mode) { 1120 switch (mode) {
1130 case DRM_MODE_DPMS_ON: 1121 case DRM_MODE_DPMS_ON:
@@ -1147,11 +1138,6 @@ static struct exynos_drm_display_ops exynos_dp_display_ops = {
1147 .commit = exynos_dp_commit, 1138 .commit = exynos_dp_commit,
1148}; 1139};
1149 1140
1150static struct exynos_drm_display exynos_dp_display = {
1151 .type = EXYNOS_DISPLAY_TYPE_LCD,
1152 .ops = &exynos_dp_display_ops,
1153};
1154
1155static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev) 1141static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
1156{ 1142{
1157 struct device_node *dp_node = dev->of_node; 1143 struct device_node *dp_node = dev->of_node;
@@ -1210,44 +1196,6 @@ static struct video_info *exynos_dp_dt_parse_pdata(struct device *dev)
1210 return dp_video_config; 1196 return dp_video_config;
1211} 1197}
1212 1198
1213static int exynos_dp_dt_parse_phydata(struct exynos_dp_device *dp)
1214{
1215 struct device_node *dp_phy_node = of_node_get(dp->dev->of_node);
1216 u32 phy_base;
1217 int ret = 0;
1218
1219 dp_phy_node = of_find_node_by_name(dp_phy_node, "dptx-phy");
1220 if (!dp_phy_node) {
1221 dp->phy = devm_phy_get(dp->dev, "dp");
1222 return PTR_ERR_OR_ZERO(dp->phy);
1223 }
1224
1225 if (of_property_read_u32(dp_phy_node, "reg", &phy_base)) {
1226 dev_err(dp->dev, "failed to get reg for dptx-phy\n");
1227 ret = -EINVAL;
1228 goto err;
1229 }
1230
1231 if (of_property_read_u32(dp_phy_node, "samsung,enable-mask",
1232 &dp->enable_mask)) {
1233 dev_err(dp->dev, "failed to get enable-mask for dptx-phy\n");
1234 ret = -EINVAL;
1235 goto err;
1236 }
1237
1238 dp->phy_addr = ioremap(phy_base, SZ_4);
1239 if (!dp->phy_addr) {
1240 dev_err(dp->dev, "failed to ioremap dp-phy\n");
1241 ret = -ENOMEM;
1242 goto err;
1243 }
1244
1245err:
1246 of_node_put(dp_phy_node);
1247
1248 return ret;
1249}
1250
1251static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp) 1199static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
1252{ 1200{
1253 int ret; 1201 int ret;
@@ -1263,10 +1211,10 @@ static int exynos_dp_dt_parse_panel(struct exynos_dp_device *dp)
1263 1211
1264static int exynos_dp_bind(struct device *dev, struct device *master, void *data) 1212static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1265{ 1213{
1214 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1266 struct platform_device *pdev = to_platform_device(dev); 1215 struct platform_device *pdev = to_platform_device(dev);
1267 struct drm_device *drm_dev = data; 1216 struct drm_device *drm_dev = data;
1268 struct resource *res; 1217 struct resource *res;
1269 struct exynos_dp_device *dp = exynos_dp_display.ctx;
1270 unsigned int irq_flags; 1218 unsigned int irq_flags;
1271 int ret = 0; 1219 int ret = 0;
1272 1220
@@ -1277,9 +1225,21 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1277 if (IS_ERR(dp->video_info)) 1225 if (IS_ERR(dp->video_info))
1278 return PTR_ERR(dp->video_info); 1226 return PTR_ERR(dp->video_info);
1279 1227
1280 ret = exynos_dp_dt_parse_phydata(dp); 1228 dp->phy = devm_phy_get(dp->dev, "dp");
1281 if (ret) 1229 if (IS_ERR(dp->phy)) {
1282 return ret; 1230 dev_err(dp->dev, "no DP phy configured\n");
1231 ret = PTR_ERR(dp->phy);
1232 if (ret) {
1233 /*
1234 * phy itself is not enabled, so we can move forward
1235 * assigning NULL to phy pointer.
1236 */
1237 if (ret == -ENOSYS || ret == -ENODEV)
1238 dp->phy = NULL;
1239 else
1240 return ret;
1241 }
1242 }
1283 1243
1284 if (!dp->panel) { 1244 if (!dp->panel) {
1285 ret = exynos_dp_dt_parse_panel(dp); 1245 ret = exynos_dp_dt_parse_panel(dp);
@@ -1346,17 +1306,15 @@ static int exynos_dp_bind(struct device *dev, struct device *master, void *data)
1346 1306
1347 dp->drm_dev = drm_dev; 1307 dp->drm_dev = drm_dev;
1348 1308
1349 platform_set_drvdata(pdev, &exynos_dp_display); 1309 return exynos_drm_create_enc_conn(drm_dev, &dp->display);
1350
1351 return exynos_drm_create_enc_conn(drm_dev, &exynos_dp_display);
1352} 1310}
1353 1311
1354static void exynos_dp_unbind(struct device *dev, struct device *master, 1312static void exynos_dp_unbind(struct device *dev, struct device *master,
1355 void *data) 1313 void *data)
1356{ 1314{
1357 struct exynos_drm_display *display = dev_get_drvdata(dev); 1315 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1358 1316
1359 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1317 exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
1360} 1318}
1361 1319
1362static const struct component_ops exynos_dp_ops = { 1320static const struct component_ops exynos_dp_ops = {
@@ -1371,16 +1329,20 @@ static int exynos_dp_probe(struct platform_device *pdev)
1371 struct exynos_dp_device *dp; 1329 struct exynos_dp_device *dp;
1372 int ret; 1330 int ret;
1373 1331
1374 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1375 exynos_dp_display.type);
1376 if (ret)
1377 return ret;
1378
1379 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device), 1332 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
1380 GFP_KERNEL); 1333 GFP_KERNEL);
1381 if (!dp) 1334 if (!dp)
1382 return -ENOMEM; 1335 return -ENOMEM;
1383 1336
1337 dp->display.type = EXYNOS_DISPLAY_TYPE_LCD;
1338 dp->display.ops = &exynos_dp_display_ops;
1339 platform_set_drvdata(pdev, dp);
1340
1341 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1342 dp->display.type);
1343 if (ret)
1344 return ret;
1345
1384 panel_node = of_parse_phandle(dev->of_node, "panel", 0); 1346 panel_node = of_parse_phandle(dev->of_node, "panel", 0);
1385 if (panel_node) { 1347 if (panel_node) {
1386 dp->panel = of_drm_find_panel(panel_node); 1348 dp->panel = of_drm_find_panel(panel_node);
@@ -1389,8 +1351,6 @@ static int exynos_dp_probe(struct platform_device *pdev)
1389 return -EPROBE_DEFER; 1351 return -EPROBE_DEFER;
1390 } 1352 }
1391 1353
1392 exynos_dp_display.ctx = dp;
1393
1394 ret = component_add(&pdev->dev, &exynos_dp_ops); 1354 ret = component_add(&pdev->dev, &exynos_dp_ops);
1395 if (ret) 1355 if (ret)
1396 exynos_drm_component_del(&pdev->dev, 1356 exynos_drm_component_del(&pdev->dev,
@@ -1410,19 +1370,17 @@ static int exynos_dp_remove(struct platform_device *pdev)
1410#ifdef CONFIG_PM_SLEEP 1370#ifdef CONFIG_PM_SLEEP
1411static int exynos_dp_suspend(struct device *dev) 1371static int exynos_dp_suspend(struct device *dev)
1412{ 1372{
1413 struct platform_device *pdev = to_platform_device(dev); 1373 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1414 struct exynos_drm_display *display = platform_get_drvdata(pdev);
1415 1374
1416 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1375 exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_OFF);
1417 return 0; 1376 return 0;
1418} 1377}
1419 1378
1420static int exynos_dp_resume(struct device *dev) 1379static int exynos_dp_resume(struct device *dev)
1421{ 1380{
1422 struct platform_device *pdev = to_platform_device(dev); 1381 struct exynos_dp_device *dp = dev_get_drvdata(dev);
1423 struct exynos_drm_display *display = platform_get_drvdata(pdev);
1424 1382
1425 exynos_dp_dpms(display, DRM_MODE_DPMS_ON); 1383 exynos_dp_dpms(&dp->display, DRM_MODE_DPMS_ON);
1426 return 0; 1384 return 0;
1427} 1385}
1428#endif 1386#endif
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.h b/drivers/gpu/drm/exynos/exynos_dp_core.h
index a1aee6931bd7..164f171168e7 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.h
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.h
@@ -17,6 +17,8 @@
17#include <drm/drm_dp_helper.h> 17#include <drm/drm_dp_helper.h>
18#include <drm/exynos_drm.h> 18#include <drm/exynos_drm.h>
19 19
20#include "exynos_drm_drv.h"
21
20#define DP_TIMEOUT_LOOP_COUNT 100 22#define DP_TIMEOUT_LOOP_COUNT 100
21#define MAX_CR_LOOP 5 23#define MAX_CR_LOOP 5
22#define MAX_EQ_LOOP 5 24#define MAX_EQ_LOOP 5
@@ -145,6 +147,7 @@ struct link_train {
145}; 147};
146 148
147struct exynos_dp_device { 149struct exynos_dp_device {
150 struct exynos_drm_display display;
148 struct device *dev; 151 struct device *dev;
149 struct drm_device *drm_dev; 152 struct drm_device *drm_dev;
150 struct drm_connector connector; 153 struct drm_connector connector;
@@ -153,8 +156,6 @@ struct exynos_dp_device {
153 struct clk *clock; 156 struct clk *clock;
154 unsigned int irq; 157 unsigned int irq;
155 void __iomem *reg_base; 158 void __iomem *reg_base;
156 void __iomem *phy_addr;
157 unsigned int enable_mask;
158 159
159 struct video_info *video_info; 160 struct video_info *video_info;
160 struct link_train link_train; 161 struct link_train link_train;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index 690dcddab725..e353d353836f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -15,10 +15,7 @@
15#ifndef _EXYNOS_DRM_CRTC_H_ 15#ifndef _EXYNOS_DRM_CRTC_H_
16#define _EXYNOS_DRM_CRTC_H_ 16#define _EXYNOS_DRM_CRTC_H_
17 17
18struct drm_device; 18#include "exynos_drm_drv.h"
19struct drm_crtc;
20struct exynos_drm_manager;
21struct exynos_drm_overlay;
22 19
23int exynos_drm_crtc_create(struct exynos_drm_manager *manager); 20int exynos_drm_crtc_create(struct exynos_drm_manager *manager);
24int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe); 21int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 3dc678ed9949..37678cf4425a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -22,6 +22,7 @@
22#include "exynos_drm_drv.h" 22#include "exynos_drm_drv.h"
23 23
24struct exynos_dpi { 24struct exynos_dpi {
25 struct exynos_drm_display display;
25 struct device *dev; 26 struct device *dev;
26 struct device_node *panel_node; 27 struct device_node *panel_node;
27 28
@@ -35,6 +36,11 @@ struct exynos_dpi {
35 36
36#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector) 37#define connector_to_dpi(c) container_of(c, struct exynos_dpi, connector)
37 38
39static inline struct exynos_dpi *display_to_dpi(struct exynos_drm_display *d)
40{
41 return container_of(d, struct exynos_dpi, display);
42}
43
38static enum drm_connector_status 44static enum drm_connector_status
39exynos_dpi_detect(struct drm_connector *connector, bool force) 45exynos_dpi_detect(struct drm_connector *connector, bool force)
40{ 46{
@@ -100,7 +106,7 @@ static struct drm_connector_helper_funcs exynos_dpi_connector_helper_funcs = {
100static int exynos_dpi_create_connector(struct exynos_drm_display *display, 106static int exynos_dpi_create_connector(struct exynos_drm_display *display,
101 struct drm_encoder *encoder) 107 struct drm_encoder *encoder)
102{ 108{
103 struct exynos_dpi *ctx = display->ctx; 109 struct exynos_dpi *ctx = display_to_dpi(display);
104 struct drm_connector *connector = &ctx->connector; 110 struct drm_connector *connector = &ctx->connector;
105 int ret; 111 int ret;
106 112
@@ -141,7 +147,7 @@ static void exynos_dpi_poweroff(struct exynos_dpi *ctx)
141 147
142static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode) 148static void exynos_dpi_dpms(struct exynos_drm_display *display, int mode)
143{ 149{
144 struct exynos_dpi *ctx = display->ctx; 150 struct exynos_dpi *ctx = display_to_dpi(display);
145 151
146 switch (mode) { 152 switch (mode) {
147 case DRM_MODE_DPMS_ON: 153 case DRM_MODE_DPMS_ON:
@@ -165,11 +171,6 @@ static struct exynos_drm_display_ops exynos_dpi_display_ops = {
165 .dpms = exynos_dpi_dpms 171 .dpms = exynos_dpi_dpms
166}; 172};
167 173
168static struct exynos_drm_display exynos_dpi_display = {
169 .type = EXYNOS_DISPLAY_TYPE_LCD,
170 .ops = &exynos_dpi_display_ops,
171};
172
173/* of_* functions will be removed after merge of of_graph patches */ 174/* of_* functions will be removed after merge of of_graph patches */
174static struct device_node * 175static struct device_node *
175of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg) 176of_get_child_by_name_reg(struct device_node *parent, const char *name, u32 reg)
@@ -299,20 +300,21 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
299 struct exynos_dpi *ctx; 300 struct exynos_dpi *ctx;
300 int ret; 301 int ret;
301 302
302 ret = exynos_drm_component_add(dev,
303 EXYNOS_DEVICE_TYPE_CONNECTOR,
304 exynos_dpi_display.type);
305 if (ret)
306 return ERR_PTR(ret);
307
308 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 303 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
309 if (!ctx) 304 if (!ctx)
310 goto err_del_component; 305 return ERR_PTR(-ENOMEM);
311 306
307 ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD;
308 ctx->display.ops = &exynos_dpi_display_ops;
312 ctx->dev = dev; 309 ctx->dev = dev;
313 exynos_dpi_display.ctx = ctx;
314 ctx->dpms_mode = DRM_MODE_DPMS_OFF; 310 ctx->dpms_mode = DRM_MODE_DPMS_OFF;
315 311
312 ret = exynos_drm_component_add(dev,
313 EXYNOS_DEVICE_TYPE_CONNECTOR,
314 ctx->display.type);
315 if (ret)
316 return ERR_PTR(ret);
317
316 ret = exynos_dpi_parse_dt(ctx); 318 ret = exynos_dpi_parse_dt(ctx);
317 if (ret < 0) { 319 if (ret < 0) {
318 devm_kfree(dev, ctx); 320 devm_kfree(dev, ctx);
@@ -328,7 +330,7 @@ struct exynos_drm_display *exynos_dpi_probe(struct device *dev)
328 } 330 }
329 } 331 }
330 332
331 return &exynos_dpi_display; 333 return &ctx->display;
332 334
333err_del_component: 335err_del_component:
334 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); 336 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
@@ -336,16 +338,16 @@ err_del_component:
336 return NULL; 338 return NULL;
337} 339}
338 340
339int exynos_dpi_remove(struct device *dev) 341int exynos_dpi_remove(struct exynos_drm_display *display)
340{ 342{
341 struct exynos_dpi *ctx = exynos_dpi_display.ctx; 343 struct exynos_dpi *ctx = display_to_dpi(display);
342 344
343 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); 345 exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF);
344 346
345 if (ctx->panel) 347 if (ctx->panel)
346 drm_panel_detach(ctx->panel); 348 drm_panel_detach(ctx->panel);
347 349
348 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR); 350 exynos_drm_component_del(ctx->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
349 351
350 return 0; 352 return 0;
351} 353}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index e277d4f12812..121470a83d1a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -203,8 +203,6 @@ static int exynos_drm_resume(struct drm_device *dev)
203 } 203 }
204 drm_modeset_unlock_all(dev); 204 drm_modeset_unlock_all(dev);
205 205
206 drm_helper_resume_force_mode(dev);
207
208 return 0; 206 return 0;
209} 207}
210 208
@@ -475,8 +473,6 @@ void exynos_drm_component_del(struct device *dev,
475 list_del(&cdev->list); 473 list_del(&cdev->list);
476 kfree(cdev); 474 kfree(cdev);
477 } 475 }
478
479 break;
480 } 476 }
481 477
482 mutex_unlock(&drm_component_lock); 478 mutex_unlock(&drm_component_lock);
@@ -556,182 +552,68 @@ static const struct component_master_ops exynos_drm_ops = {
556 .unbind = exynos_drm_unbind, 552 .unbind = exynos_drm_unbind,
557}; 553};
558 554
559static int exynos_drm_platform_probe(struct platform_device *pdev) 555static struct platform_driver *const exynos_drm_kms_drivers[] = {
560{
561 struct component_match *match;
562 int ret;
563
564 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
565 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
566
567#ifdef CONFIG_DRM_EXYNOS_FIMD 556#ifdef CONFIG_DRM_EXYNOS_FIMD
568 ret = platform_driver_register(&fimd_driver); 557 &fimd_driver,
569 if (ret < 0)
570 return ret;
571#endif 558#endif
572
573#ifdef CONFIG_DRM_EXYNOS_DP 559#ifdef CONFIG_DRM_EXYNOS_DP
574 ret = platform_driver_register(&dp_driver); 560 &dp_driver,
575 if (ret < 0)
576 goto err_unregister_fimd_drv;
577#endif 561#endif
578
579#ifdef CONFIG_DRM_EXYNOS_DSI 562#ifdef CONFIG_DRM_EXYNOS_DSI
580 ret = platform_driver_register(&dsi_driver); 563 &dsi_driver,
581 if (ret < 0)
582 goto err_unregister_dp_drv;
583#endif 564#endif
584
585#ifdef CONFIG_DRM_EXYNOS_HDMI 565#ifdef CONFIG_DRM_EXYNOS_HDMI
586 ret = platform_driver_register(&mixer_driver); 566 &mixer_driver,
587 if (ret < 0) 567 &hdmi_driver,
588 goto err_unregister_dsi_drv;
589 ret = platform_driver_register(&hdmi_driver);
590 if (ret < 0)
591 goto err_unregister_mixer_drv;
592#endif 568#endif
569};
593 570
594 match = exynos_drm_match_add(&pdev->dev); 571static struct platform_driver *const exynos_drm_non_kms_drivers[] = {
595 if (IS_ERR(match)) {
596 ret = PTR_ERR(match);
597 goto err_unregister_hdmi_drv;
598 }
599
600 ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
601 match);
602 if (ret < 0)
603 goto err_unregister_hdmi_drv;
604
605#ifdef CONFIG_DRM_EXYNOS_G2D 572#ifdef CONFIG_DRM_EXYNOS_G2D
606 ret = platform_driver_register(&g2d_driver); 573 &g2d_driver,
607 if (ret < 0)
608 goto err_del_component_master;
609#endif 574#endif
610
611#ifdef CONFIG_DRM_EXYNOS_FIMC 575#ifdef CONFIG_DRM_EXYNOS_FIMC
612 ret = platform_driver_register(&fimc_driver); 576 &fimc_driver,
613 if (ret < 0)
614 goto err_unregister_g2d_drv;
615#endif 577#endif
616
617#ifdef CONFIG_DRM_EXYNOS_ROTATOR 578#ifdef CONFIG_DRM_EXYNOS_ROTATOR
618 ret = platform_driver_register(&rotator_driver); 579 &rotator_driver,
619 if (ret < 0)
620 goto err_unregister_fimc_drv;
621#endif 580#endif
622
623#ifdef CONFIG_DRM_EXYNOS_GSC 581#ifdef CONFIG_DRM_EXYNOS_GSC
624 ret = platform_driver_register(&gsc_driver); 582 &gsc_driver,
625 if (ret < 0)
626 goto err_unregister_rotator_drv;
627#endif
628
629#ifdef CONFIG_DRM_EXYNOS_IPP
630 ret = platform_driver_register(&ipp_driver);
631 if (ret < 0)
632 goto err_unregister_gsc_drv;
633
634 ret = exynos_platform_device_ipp_register();
635 if (ret < 0)
636 goto err_unregister_ipp_drv;
637#endif 583#endif
638
639 return ret;
640
641#ifdef CONFIG_DRM_EXYNOS_IPP 584#ifdef CONFIG_DRM_EXYNOS_IPP
642err_unregister_ipp_drv: 585 &ipp_driver,
643 platform_driver_unregister(&ipp_driver);
644err_unregister_gsc_drv:
645#endif
646
647#ifdef CONFIG_DRM_EXYNOS_GSC
648 platform_driver_unregister(&gsc_driver);
649err_unregister_rotator_drv:
650#endif 586#endif
587};
651 588
652#ifdef CONFIG_DRM_EXYNOS_ROTATOR 589static int exynos_drm_platform_probe(struct platform_device *pdev)
653 platform_driver_unregister(&rotator_driver); 590{
654err_unregister_fimc_drv: 591 struct component_match *match;
655#endif
656
657#ifdef CONFIG_DRM_EXYNOS_FIMC
658 platform_driver_unregister(&fimc_driver);
659err_unregister_g2d_drv:
660#endif
661
662#ifdef CONFIG_DRM_EXYNOS_G2D
663 platform_driver_unregister(&g2d_driver);
664err_del_component_master:
665#endif
666 component_master_del(&pdev->dev, &exynos_drm_ops);
667
668err_unregister_hdmi_drv:
669#ifdef CONFIG_DRM_EXYNOS_HDMI
670 platform_driver_unregister(&hdmi_driver);
671err_unregister_mixer_drv:
672 platform_driver_unregister(&mixer_driver);
673err_unregister_dsi_drv:
674#endif
675 592
676#ifdef CONFIG_DRM_EXYNOS_DSI 593 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
677 platform_driver_unregister(&dsi_driver); 594 exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls);
678err_unregister_dp_drv:
679#endif
680 595
681#ifdef CONFIG_DRM_EXYNOS_DP 596 match = exynos_drm_match_add(&pdev->dev);
682 platform_driver_unregister(&dp_driver); 597 if (IS_ERR(match)) {
683err_unregister_fimd_drv: 598 return PTR_ERR(match);
684#endif 599 }
685 600
686#ifdef CONFIG_DRM_EXYNOS_FIMD 601 return component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
687 platform_driver_unregister(&fimd_driver); 602 match);
688#endif
689 return ret;
690} 603}
691 604
692static int exynos_drm_platform_remove(struct platform_device *pdev) 605static int exynos_drm_platform_remove(struct platform_device *pdev)
693{ 606{
694#ifdef CONFIG_DRM_EXYNOS_IPP
695 exynos_platform_device_ipp_unregister();
696 platform_driver_unregister(&ipp_driver);
697#endif
698
699#ifdef CONFIG_DRM_EXYNOS_GSC
700 platform_driver_unregister(&gsc_driver);
701#endif
702
703#ifdef CONFIG_DRM_EXYNOS_ROTATOR
704 platform_driver_unregister(&rotator_driver);
705#endif
706
707#ifdef CONFIG_DRM_EXYNOS_FIMC
708 platform_driver_unregister(&fimc_driver);
709#endif
710
711#ifdef CONFIG_DRM_EXYNOS_G2D
712 platform_driver_unregister(&g2d_driver);
713#endif
714
715#ifdef CONFIG_DRM_EXYNOS_HDMI
716 platform_driver_unregister(&mixer_driver);
717 platform_driver_unregister(&hdmi_driver);
718#endif
719
720#ifdef CONFIG_DRM_EXYNOS_FIMD
721 platform_driver_unregister(&fimd_driver);
722#endif
723
724#ifdef CONFIG_DRM_EXYNOS_DSI
725 platform_driver_unregister(&dsi_driver);
726#endif
727
728#ifdef CONFIG_DRM_EXYNOS_DP
729 platform_driver_unregister(&dp_driver);
730#endif
731 component_master_del(&pdev->dev, &exynos_drm_ops); 607 component_master_del(&pdev->dev, &exynos_drm_ops);
732 return 0; 608 return 0;
733} 609}
734 610
611static const char * const strings[] = {
612 "samsung,exynos3",
613 "samsung,exynos4",
614 "samsung,exynos5",
615};
616
735static struct platform_driver exynos_drm_platform_driver = { 617static struct platform_driver exynos_drm_platform_driver = {
736 .probe = exynos_drm_platform_probe, 618 .probe = exynos_drm_platform_probe,
737 .remove = exynos_drm_platform_remove, 619 .remove = exynos_drm_platform_remove,
@@ -743,7 +625,25 @@ static struct platform_driver exynos_drm_platform_driver = {
743 625
744static int exynos_drm_init(void) 626static int exynos_drm_init(void)
745{ 627{
746 int ret; 628 bool is_exynos = false;
629 int ret, i, j;
630
631 /*
632 * Register device object only in case of Exynos SoC.
633 *
634 * Below codes resolves temporarily infinite loop issue incurred
635 * by Exynos drm driver when using multi-platform kernel.
636 * So these codes will be replaced with more generic way later.
637 */
638 for (i = 0; i < ARRAY_SIZE(strings); i++) {
639 if (of_machine_is_compatible(strings[i])) {
640 is_exynos = true;
641 break;
642 }
643 }
644
645 if (!is_exynos)
646 return -ENODEV;
747 647
748 /* 648 /*
749 * Register device object only in case of Exynos SoC. 649 * Register device object only in case of Exynos SoC.
@@ -762,24 +662,50 @@ static int exynos_drm_init(void)
762 if (IS_ERR(exynos_drm_pdev)) 662 if (IS_ERR(exynos_drm_pdev))
763 return PTR_ERR(exynos_drm_pdev); 663 return PTR_ERR(exynos_drm_pdev);
764 664
765#ifdef CONFIG_DRM_EXYNOS_VIDI
766 ret = exynos_drm_probe_vidi(); 665 ret = exynos_drm_probe_vidi();
767 if (ret < 0) 666 if (ret < 0)
768 goto err_unregister_pd; 667 goto err_unregister_pd;
668
669 for (i = 0; i < ARRAY_SIZE(exynos_drm_kms_drivers); ++i) {
670 ret = platform_driver_register(exynos_drm_kms_drivers[i]);
671 if (ret < 0)
672 goto err_unregister_kms_drivers;
673 }
674
675 for (j = 0; j < ARRAY_SIZE(exynos_drm_non_kms_drivers); ++j) {
676 ret = platform_driver_register(exynos_drm_non_kms_drivers[j]);
677 if (ret < 0)
678 goto err_unregister_non_kms_drivers;
679 }
680
681#ifdef CONFIG_DRM_EXYNOS_IPP
682 ret = exynos_platform_device_ipp_register();
683 if (ret < 0)
684 goto err_unregister_non_kms_drivers;
769#endif 685#endif
770 686
771 ret = platform_driver_register(&exynos_drm_platform_driver); 687 ret = platform_driver_register(&exynos_drm_platform_driver);
772 if (ret) 688 if (ret)
773 goto err_remove_vidi; 689 goto err_unregister_resources;
774 690
775 return 0; 691 return 0;
776 692
777err_remove_vidi: 693err_unregister_resources:
778#ifdef CONFIG_DRM_EXYNOS_VIDI 694#ifdef CONFIG_DRM_EXYNOS_IPP
695 exynos_platform_device_ipp_unregister();
696#endif
697
698err_unregister_non_kms_drivers:
699 while (--j >= 0)
700 platform_driver_unregister(exynos_drm_non_kms_drivers[j]);
701
702err_unregister_kms_drivers:
703 while (--i >= 0)
704 platform_driver_unregister(exynos_drm_kms_drivers[i]);
705
779 exynos_drm_remove_vidi(); 706 exynos_drm_remove_vidi();
780 707
781err_unregister_pd: 708err_unregister_pd:
782#endif
783 platform_device_unregister(exynos_drm_pdev); 709 platform_device_unregister(exynos_drm_pdev);
784 710
785 return ret; 711 return ret;
@@ -787,10 +713,22 @@ err_unregister_pd:
787 713
788static void exynos_drm_exit(void) 714static void exynos_drm_exit(void)
789{ 715{
716 int i;
717
718#ifdef CONFIG_DRM_EXYNOS_IPP
719 exynos_platform_device_ipp_unregister();
720#endif
721
722 for (i = ARRAY_SIZE(exynos_drm_non_kms_drivers) - 1; i >= 0; --i)
723 platform_driver_unregister(exynos_drm_non_kms_drivers[i]);
724
725 for (i = ARRAY_SIZE(exynos_drm_kms_drivers) - 1; i >= 0; --i)
726 platform_driver_unregister(exynos_drm_kms_drivers[i]);
727
790 platform_driver_unregister(&exynos_drm_platform_driver); 728 platform_driver_unregister(&exynos_drm_platform_driver);
791#ifdef CONFIG_DRM_EXYNOS_VIDI 729
792 exynos_drm_remove_vidi(); 730 exynos_drm_remove_vidi();
793#endif 731
794 platform_device_unregister(exynos_drm_pdev); 732 platform_device_unregister(exynos_drm_pdev);
795} 733}
796 734
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index d22e640f59a0..2e5063488c50 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -15,6 +15,7 @@
15#ifndef _EXYNOS_DRM_DRV_H_ 15#ifndef _EXYNOS_DRM_DRV_H_
16#define _EXYNOS_DRM_DRV_H_ 16#define _EXYNOS_DRM_DRV_H_
17 17
18#include <drm/drmP.h>
18#include <linux/module.h> 19#include <linux/module.h>
19 20
20#define MAX_CRTC 3 21#define MAX_CRTC 3
@@ -22,24 +23,6 @@
22#define MAX_FB_BUFFER 4 23#define MAX_FB_BUFFER 4
23#define DEFAULT_ZPOS -1 24#define DEFAULT_ZPOS -1
24 25
25#define _wait_for(COND, MS) ({ \
26 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
27 int ret__ = 0; \
28 while (!(COND)) { \
29 if (time_after(jiffies, timeout__)) { \
30 ret__ = -ETIMEDOUT; \
31 break; \
32 } \
33 } \
34 ret__; \
35})
36
37#define wait_for(COND, MS) _wait_for(COND, MS)
38
39struct drm_device;
40struct exynos_drm_overlay;
41struct drm_connector;
42
43/* This enumerates device type. */ 26/* This enumerates device type. */
44enum exynos_drm_device_type { 27enum exynos_drm_device_type {
45 EXYNOS_DEVICE_TYPE_NONE, 28 EXYNOS_DEVICE_TYPE_NONE,
@@ -83,10 +66,10 @@ enum exynos_drm_output_type {
83 * @dma_addr: array of bus(accessed by dma) address to the memory region 66 * @dma_addr: array of bus(accessed by dma) address to the memory region
84 * allocated for a overlay. 67 * allocated for a overlay.
85 * @zpos: order of overlay layer(z position). 68 * @zpos: order of overlay layer(z position).
86 * @default_win: a window to be enabled.
87 * @color_key: color key on or off.
88 * @index_color: if using color key feature then this value would be used 69 * @index_color: if using color key feature then this value would be used
89 * as index color. 70 * as index color.
71 * @default_win: a window to be enabled.
72 * @color_key: color key on or off.
90 * @local_path: in case of lcd type, local path mode on or off. 73 * @local_path: in case of lcd type, local path mode on or off.
91 * @transparency: transparency on or off. 74 * @transparency: transparency on or off.
92 * @activated: activated or not. 75 * @activated: activated or not.
@@ -114,19 +97,20 @@ struct exynos_drm_overlay {
114 uint32_t pixel_format; 97 uint32_t pixel_format;
115 dma_addr_t dma_addr[MAX_FB_BUFFER]; 98 dma_addr_t dma_addr[MAX_FB_BUFFER];
116 int zpos; 99 int zpos;
117
118 bool default_win;
119 bool color_key;
120 unsigned int index_color; 100 unsigned int index_color;
121 bool local_path; 101
122 bool transparency; 102 bool default_win:1;
123 bool activated; 103 bool color_key:1;
104 bool local_path:1;
105 bool transparency:1;
106 bool activated:1;
124}; 107};
125 108
126/* 109/*
127 * Exynos DRM Display Structure. 110 * Exynos DRM Display Structure.
128 * - this structure is common to analog tv, digital tv and lcd panel. 111 * - this structure is common to analog tv, digital tv and lcd panel.
129 * 112 *
113 * @create_connector: initialize and register a new connector
130 * @remove: cleans up the display for removal 114 * @remove: cleans up the display for removal
131 * @mode_fixup: fix mode data comparing to hw specific display mode. 115 * @mode_fixup: fix mode data comparing to hw specific display mode.
132 * @mode_set: convert drm_display_mode to hw specific display mode and 116 * @mode_set: convert drm_display_mode to hw specific display mode and
@@ -168,7 +152,6 @@ struct exynos_drm_display {
168 struct drm_encoder *encoder; 152 struct drm_encoder *encoder;
169 struct drm_connector *connector; 153 struct drm_connector *connector;
170 struct exynos_drm_display_ops *ops; 154 struct exynos_drm_display_ops *ops;
171 void *ctx;
172}; 155};
173 156
174/* 157/*
@@ -227,7 +210,6 @@ struct exynos_drm_manager {
227 struct drm_crtc *crtc; 210 struct drm_crtc *crtc;
228 int pipe; 211 int pipe;
229 struct exynos_drm_manager_ops *ops; 212 struct exynos_drm_manager_ops *ops;
230 void *ctx;
231}; 213};
232 214
233struct exynos_drm_g2d_private { 215struct exynos_drm_g2d_private {
@@ -279,8 +261,6 @@ struct exynos_drm_private {
279 * @dev: pointer to device object for subdrv device driver. 261 * @dev: pointer to device object for subdrv device driver.
280 * @drm_dev: pointer to drm_device and this pointer would be set 262 * @drm_dev: pointer to drm_device and this pointer would be set
281 * when sub driver calls exynos_drm_subdrv_register(). 263 * when sub driver calls exynos_drm_subdrv_register().
282 * @manager: subdrv has its own manager to control a hardware appropriately
283 * and we can access a hardware drawing on this manager.
284 * @probe: this callback would be called by exynos drm driver after 264 * @probe: this callback would be called by exynos drm driver after
285 * subdrv is registered to it. 265 * subdrv is registered to it.
286 * @remove: this callback is used to release resources created 266 * @remove: this callback is used to release resources created
@@ -312,45 +292,34 @@ int exynos_drm_device_subdrv_remove(struct drm_device *dev);
312int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file); 292int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
313void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file); 293void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
314 294
315/* 295#ifdef CONFIG_DRM_EXYNOS_IPP
316 * this function registers exynos drm hdmi platform device. It ensures only one
317 * instance of the device is created.
318 */
319int exynos_platform_device_hdmi_register(void);
320
321/*
322 * this function unregisters exynos drm hdmi platform device if it exists.
323 */
324void exynos_platform_device_hdmi_unregister(void);
325
326/*
327 * this function registers exynos drm ipp platform device.
328 */
329int exynos_platform_device_ipp_register(void); 296int exynos_platform_device_ipp_register(void);
330
331/*
332 * this function unregisters exynos drm ipp platform device if it exists.
333 */
334void exynos_platform_device_ipp_unregister(void); 297void exynos_platform_device_ipp_unregister(void);
298#else
299static inline int exynos_platform_device_ipp_register(void) { return 0; }
300static inline void exynos_platform_device_ipp_unregister(void) {}
301#endif
302
335 303
336#ifdef CONFIG_DRM_EXYNOS_DPI 304#ifdef CONFIG_DRM_EXYNOS_DPI
337struct exynos_drm_display * exynos_dpi_probe(struct device *dev); 305struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
338int exynos_dpi_remove(struct device *dev); 306int exynos_dpi_remove(struct exynos_drm_display *display);
339#else 307#else
340static inline struct exynos_drm_display * 308static inline struct exynos_drm_display *
341exynos_dpi_probe(struct device *dev) { return NULL; } 309exynos_dpi_probe(struct device *dev) { return NULL; }
342static inline int exynos_dpi_remove(struct device *dev) { return 0; } 310static inline int exynos_dpi_remove(struct exynos_drm_display *display)
311{
312 return 0;
313}
343#endif 314#endif
344 315
345/* 316#ifdef CONFIG_DRM_EXYNOS_VIDI
346 * this function registers exynos drm vidi platform device/driver.
347 */
348int exynos_drm_probe_vidi(void); 317int exynos_drm_probe_vidi(void);
349
350/*
351 * this function unregister exynos drm vidi platform device/driver.
352 */
353void exynos_drm_remove_vidi(void); 318void exynos_drm_remove_vidi(void);
319#else
320static inline int exynos_drm_probe_vidi(void) { return 0; }
321static inline void exynos_drm_remove_vidi(void) {}
322#endif
354 323
355/* This function creates a encoder and a connector, and initializes them. */ 324/* This function creates a encoder and a connector, and initializes them. */
356int exynos_drm_create_enc_conn(struct drm_device *dev, 325int exynos_drm_create_enc_conn(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index acf7e9e39dcd..05fe93dc57a8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -268,9 +268,9 @@ struct exynos_dsi_driver_data {
268}; 268};
269 269
270struct exynos_dsi { 270struct exynos_dsi {
271 struct exynos_drm_display display;
271 struct mipi_dsi_host dsi_host; 272 struct mipi_dsi_host dsi_host;
272 struct drm_connector connector; 273 struct drm_connector connector;
273 struct drm_encoder *encoder;
274 struct device_node *panel_node; 274 struct device_node *panel_node;
275 struct drm_panel *panel; 275 struct drm_panel *panel;
276 struct device *dev; 276 struct device *dev;
@@ -304,6 +304,11 @@ struct exynos_dsi {
304#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host) 304#define host_to_dsi(host) container_of(host, struct exynos_dsi, dsi_host)
305#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector) 305#define connector_to_dsi(c) container_of(c, struct exynos_dsi, connector)
306 306
307static inline struct exynos_dsi *display_to_dsi(struct exynos_drm_display *d)
308{
309 return container_of(d, struct exynos_dsi, display);
310}
311
307static struct exynos_dsi_driver_data exynos3_dsi_driver_data = { 312static struct exynos_dsi_driver_data exynos3_dsi_driver_data = {
308 .plltmr_reg = 0x50, 313 .plltmr_reg = 0x50,
309 .has_freqband = 1, 314 .has_freqband = 1,
@@ -316,6 +321,11 @@ static struct exynos_dsi_driver_data exynos4_dsi_driver_data = {
316 .has_clklane_stop = 1, 321 .has_clklane_stop = 1,
317}; 322};
318 323
324static struct exynos_dsi_driver_data exynos4415_dsi_driver_data = {
325 .plltmr_reg = 0x58,
326 .has_clklane_stop = 1,
327};
328
319static struct exynos_dsi_driver_data exynos5_dsi_driver_data = { 329static struct exynos_dsi_driver_data exynos5_dsi_driver_data = {
320 .plltmr_reg = 0x58, 330 .plltmr_reg = 0x58,
321}; 331};
@@ -325,6 +335,8 @@ static struct of_device_id exynos_dsi_of_match[] = {
325 .data = &exynos3_dsi_driver_data }, 335 .data = &exynos3_dsi_driver_data },
326 { .compatible = "samsung,exynos4210-mipi-dsi", 336 { .compatible = "samsung,exynos4210-mipi-dsi",
327 .data = &exynos4_dsi_driver_data }, 337 .data = &exynos4_dsi_driver_data },
338 { .compatible = "samsung,exynos4415-mipi-dsi",
339 .data = &exynos4415_dsi_driver_data },
328 { .compatible = "samsung,exynos5410-mipi-dsi", 340 { .compatible = "samsung,exynos5410-mipi-dsi",
329 .data = &exynos5_dsi_driver_data }, 341 .data = &exynos5_dsi_driver_data },
330 { } 342 { }
@@ -1104,7 +1116,7 @@ static irqreturn_t exynos_dsi_irq(int irq, void *dev_id)
1104static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id) 1116static irqreturn_t exynos_dsi_te_irq_handler(int irq, void *dev_id)
1105{ 1117{
1106 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id; 1118 struct exynos_dsi *dsi = (struct exynos_dsi *)dev_id;
1107 struct drm_encoder *encoder = dsi->encoder; 1119 struct drm_encoder *encoder = dsi->display.encoder;
1108 1120
1109 if (dsi->state & DSIM_STATE_ENABLED) 1121 if (dsi->state & DSIM_STATE_ENABLED)
1110 exynos_drm_crtc_te_handler(encoder->crtc); 1122 exynos_drm_crtc_te_handler(encoder->crtc);
@@ -1143,6 +1155,7 @@ static int exynos_dsi_init(struct exynos_dsi *dsi)
1143static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi) 1155static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1144{ 1156{
1145 int ret; 1157 int ret;
1158 int te_gpio_irq;
1146 1159
1147 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0); 1160 dsi->te_gpio = of_get_named_gpio(dsi->panel_node, "te-gpios", 0);
1148 if (!gpio_is_valid(dsi->te_gpio)) { 1161 if (!gpio_is_valid(dsi->te_gpio)) {
@@ -1157,14 +1170,10 @@ static int exynos_dsi_register_te_irq(struct exynos_dsi *dsi)
1157 goto out; 1170 goto out;
1158 } 1171 }
1159 1172
1160 /* 1173 te_gpio_irq = gpio_to_irq(dsi->te_gpio);
1161 * This TE GPIO IRQ should not be set to IRQ_NOAUTOEN, because panel 1174
1162 * calls drm_panel_init() first then calls mipi_dsi_attach() in probe(). 1175 irq_set_status_flags(te_gpio_irq, IRQ_NOAUTOEN);
1163 * It means that te_gpio is invalid when exynos_dsi_enable_irq() is 1176 ret = request_threaded_irq(te_gpio_irq, exynos_dsi_te_irq_handler, NULL,
1164 * called by drm_panel_init() before panel is attached.
1165 */
1166 ret = request_threaded_irq(gpio_to_irq(dsi->te_gpio),
1167 exynos_dsi_te_irq_handler, NULL,
1168 IRQF_TRIGGER_RISING, "TE", dsi); 1177 IRQF_TRIGGER_RISING, "TE", dsi);
1169 if (ret) { 1178 if (ret) {
1170 dev_err(dsi->dev, "request interrupt failed with %d\n", ret); 1179 dev_err(dsi->dev, "request interrupt failed with %d\n", ret);
@@ -1195,9 +1204,6 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
1195 dsi->mode_flags = device->mode_flags; 1204 dsi->mode_flags = device->mode_flags;
1196 dsi->panel_node = device->dev.of_node; 1205 dsi->panel_node = device->dev.of_node;
1197 1206
1198 if (dsi->connector.dev)
1199 drm_helper_hpd_irq_event(dsi->connector.dev);
1200
1201 /* 1207 /*
1202 * This is a temporary solution and should be made by more generic way. 1208 * This is a temporary solution and should be made by more generic way.
1203 * 1209 *
@@ -1211,6 +1217,9 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
1211 return ret; 1217 return ret;
1212 } 1218 }
1213 1219
1220 if (dsi->connector.dev)
1221 drm_helper_hpd_irq_event(dsi->connector.dev);
1222
1214 return 0; 1223 return 0;
1215} 1224}
1216 1225
@@ -1236,7 +1245,7 @@ static bool exynos_dsi_is_short_dsi_type(u8 type)
1236} 1245}
1237 1246
1238static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host, 1247static ssize_t exynos_dsi_host_transfer(struct mipi_dsi_host *host,
1239 struct mipi_dsi_msg *msg) 1248 const struct mipi_dsi_msg *msg)
1240{ 1249{
1241 struct exynos_dsi *dsi = host_to_dsi(host); 1250 struct exynos_dsi *dsi = host_to_dsi(host);
1242 struct exynos_dsi_transfer xfer; 1251 struct exynos_dsi_transfer xfer;
@@ -1369,16 +1378,17 @@ static int exynos_dsi_enable(struct exynos_dsi *dsi)
1369 exynos_dsi_set_display_mode(dsi); 1378 exynos_dsi_set_display_mode(dsi);
1370 exynos_dsi_set_display_enable(dsi, true); 1379 exynos_dsi_set_display_enable(dsi, true);
1371 1380
1381 dsi->state |= DSIM_STATE_ENABLED;
1382
1372 ret = drm_panel_enable(dsi->panel); 1383 ret = drm_panel_enable(dsi->panel);
1373 if (ret < 0) { 1384 if (ret < 0) {
1385 dsi->state &= ~DSIM_STATE_ENABLED;
1374 exynos_dsi_set_display_enable(dsi, false); 1386 exynos_dsi_set_display_enable(dsi, false);
1375 drm_panel_unprepare(dsi->panel); 1387 drm_panel_unprepare(dsi->panel);
1376 exynos_dsi_poweroff(dsi); 1388 exynos_dsi_poweroff(dsi);
1377 return ret; 1389 return ret;
1378 } 1390 }
1379 1391
1380 dsi->state |= DSIM_STATE_ENABLED;
1381
1382 return 0; 1392 return 0;
1383} 1393}
1384 1394
@@ -1397,7 +1407,7 @@ static void exynos_dsi_disable(struct exynos_dsi *dsi)
1397 1407
1398static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode) 1408static void exynos_dsi_dpms(struct exynos_drm_display *display, int mode)
1399{ 1409{
1400 struct exynos_dsi *dsi = display->ctx; 1410 struct exynos_dsi *dsi = display_to_dsi(display);
1401 1411
1402 if (dsi->panel) { 1412 if (dsi->panel) {
1403 switch (mode) { 1413 switch (mode) {
@@ -1474,7 +1484,7 @@ exynos_dsi_best_encoder(struct drm_connector *connector)
1474{ 1484{
1475 struct exynos_dsi *dsi = connector_to_dsi(connector); 1485 struct exynos_dsi *dsi = connector_to_dsi(connector);
1476 1486
1477 return dsi->encoder; 1487 return dsi->display.encoder;
1478} 1488}
1479 1489
1480static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = { 1490static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
@@ -1486,12 +1496,10 @@ static struct drm_connector_helper_funcs exynos_dsi_connector_helper_funcs = {
1486static int exynos_dsi_create_connector(struct exynos_drm_display *display, 1496static int exynos_dsi_create_connector(struct exynos_drm_display *display,
1487 struct drm_encoder *encoder) 1497 struct drm_encoder *encoder)
1488{ 1498{
1489 struct exynos_dsi *dsi = display->ctx; 1499 struct exynos_dsi *dsi = display_to_dsi(display);
1490 struct drm_connector *connector = &dsi->connector; 1500 struct drm_connector *connector = &dsi->connector;
1491 int ret; 1501 int ret;
1492 1502
1493 dsi->encoder = encoder;
1494
1495 connector->polled = DRM_CONNECTOR_POLL_HPD; 1503 connector->polled = DRM_CONNECTOR_POLL_HPD;
1496 1504
1497 ret = drm_connector_init(encoder->dev, connector, 1505 ret = drm_connector_init(encoder->dev, connector,
@@ -1512,7 +1520,7 @@ static int exynos_dsi_create_connector(struct exynos_drm_display *display,
1512static void exynos_dsi_mode_set(struct exynos_drm_display *display, 1520static void exynos_dsi_mode_set(struct exynos_drm_display *display,
1513 struct drm_display_mode *mode) 1521 struct drm_display_mode *mode)
1514{ 1522{
1515 struct exynos_dsi *dsi = display->ctx; 1523 struct exynos_dsi *dsi = display_to_dsi(display);
1516 struct videomode *vm = &dsi->vm; 1524 struct videomode *vm = &dsi->vm;
1517 1525
1518 vm->hactive = mode->hdisplay; 1526 vm->hactive = mode->hdisplay;
@@ -1531,10 +1539,6 @@ static struct exynos_drm_display_ops exynos_dsi_display_ops = {
1531 .dpms = exynos_dsi_dpms 1539 .dpms = exynos_dsi_dpms
1532}; 1540};
1533 1541
1534static struct exynos_drm_display exynos_dsi_display = {
1535 .type = EXYNOS_DISPLAY_TYPE_LCD,
1536 .ops = &exynos_dsi_display_ops,
1537};
1538MODULE_DEVICE_TABLE(of, exynos_dsi_of_match); 1542MODULE_DEVICE_TABLE(of, exynos_dsi_of_match);
1539 1543
1540/* of_* functions will be removed after merge of of_graph patches */ 1544/* of_* functions will be removed after merge of of_graph patches */
@@ -1640,28 +1644,28 @@ end:
1640static int exynos_dsi_bind(struct device *dev, struct device *master, 1644static int exynos_dsi_bind(struct device *dev, struct device *master,
1641 void *data) 1645 void *data)
1642{ 1646{
1647 struct exynos_drm_display *display = dev_get_drvdata(dev);
1648 struct exynos_dsi *dsi = display_to_dsi(display);
1643 struct drm_device *drm_dev = data; 1649 struct drm_device *drm_dev = data;
1644 struct exynos_dsi *dsi;
1645 int ret; 1650 int ret;
1646 1651
1647 ret = exynos_drm_create_enc_conn(drm_dev, &exynos_dsi_display); 1652 ret = exynos_drm_create_enc_conn(drm_dev, display);
1648 if (ret) { 1653 if (ret) {
1649 DRM_ERROR("Encoder create [%d] failed with %d\n", 1654 DRM_ERROR("Encoder create [%d] failed with %d\n",
1650 exynos_dsi_display.type, ret); 1655 display->type, ret);
1651 return ret; 1656 return ret;
1652 } 1657 }
1653 1658
1654 dsi = exynos_dsi_display.ctx;
1655
1656 return mipi_dsi_host_register(&dsi->dsi_host); 1659 return mipi_dsi_host_register(&dsi->dsi_host);
1657} 1660}
1658 1661
1659static void exynos_dsi_unbind(struct device *dev, struct device *master, 1662static void exynos_dsi_unbind(struct device *dev, struct device *master,
1660 void *data) 1663 void *data)
1661{ 1664{
1662 struct exynos_dsi *dsi = exynos_dsi_display.ctx; 1665 struct exynos_drm_display *display = dev_get_drvdata(dev);
1666 struct exynos_dsi *dsi = display_to_dsi(display);
1663 1667
1664 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); 1668 exynos_dsi_dpms(display, DRM_MODE_DPMS_OFF);
1665 1669
1666 mipi_dsi_host_unregister(&dsi->dsi_host); 1670 mipi_dsi_host_unregister(&dsi->dsi_host);
1667} 1671}
@@ -1673,22 +1677,23 @@ static const struct component_ops exynos_dsi_component_ops = {
1673 1677
1674static int exynos_dsi_probe(struct platform_device *pdev) 1678static int exynos_dsi_probe(struct platform_device *pdev)
1675{ 1679{
1680 struct device *dev = &pdev->dev;
1676 struct resource *res; 1681 struct resource *res;
1677 struct exynos_dsi *dsi; 1682 struct exynos_dsi *dsi;
1678 int ret; 1683 int ret;
1679 1684
1680 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, 1685 dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
1681 exynos_dsi_display.type); 1686 if (!dsi)
1687 return -ENOMEM;
1688
1689 dsi->display.type = EXYNOS_DISPLAY_TYPE_LCD;
1690 dsi->display.ops = &exynos_dsi_display_ops;
1691
1692 ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
1693 dsi->display.type);
1682 if (ret) 1694 if (ret)
1683 return ret; 1695 return ret;
1684 1696
1685 dsi = devm_kzalloc(&pdev->dev, sizeof(*dsi), GFP_KERNEL);
1686 if (!dsi) {
1687 dev_err(&pdev->dev, "failed to allocate dsi object.\n");
1688 ret = -ENOMEM;
1689 goto err_del_component;
1690 }
1691
1692 /* To be checked as invalid one */ 1697 /* To be checked as invalid one */
1693 dsi->te_gpio = -ENOENT; 1698 dsi->te_gpio = -ENOENT;
1694 1699
@@ -1697,9 +1702,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1697 INIT_LIST_HEAD(&dsi->transfer_list); 1702 INIT_LIST_HEAD(&dsi->transfer_list);
1698 1703
1699 dsi->dsi_host.ops = &exynos_dsi_ops; 1704 dsi->dsi_host.ops = &exynos_dsi_ops;
1700 dsi->dsi_host.dev = &pdev->dev; 1705 dsi->dsi_host.dev = dev;
1701 1706
1702 dsi->dev = &pdev->dev; 1707 dsi->dev = dev;
1703 dsi->driver_data = exynos_dsi_get_driver_data(pdev); 1708 dsi->driver_data = exynos_dsi_get_driver_data(pdev);
1704 1709
1705 ret = exynos_dsi_parse_dt(dsi); 1710 ret = exynos_dsi_parse_dt(dsi);
@@ -1708,70 +1713,68 @@ static int exynos_dsi_probe(struct platform_device *pdev)
1708 1713
1709 dsi->supplies[0].supply = "vddcore"; 1714 dsi->supplies[0].supply = "vddcore";
1710 dsi->supplies[1].supply = "vddio"; 1715 dsi->supplies[1].supply = "vddio";
1711 ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(dsi->supplies), 1716 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
1712 dsi->supplies); 1717 dsi->supplies);
1713 if (ret) { 1718 if (ret) {
1714 dev_info(&pdev->dev, "failed to get regulators: %d\n", ret); 1719 dev_info(dev, "failed to get regulators: %d\n", ret);
1715 return -EPROBE_DEFER; 1720 return -EPROBE_DEFER;
1716 } 1721 }
1717 1722
1718 dsi->pll_clk = devm_clk_get(&pdev->dev, "pll_clk"); 1723 dsi->pll_clk = devm_clk_get(dev, "pll_clk");
1719 if (IS_ERR(dsi->pll_clk)) { 1724 if (IS_ERR(dsi->pll_clk)) {
1720 dev_info(&pdev->dev, "failed to get dsi pll input clock\n"); 1725 dev_info(dev, "failed to get dsi pll input clock\n");
1721 ret = PTR_ERR(dsi->pll_clk); 1726 ret = PTR_ERR(dsi->pll_clk);
1722 goto err_del_component; 1727 goto err_del_component;
1723 } 1728 }
1724 1729
1725 dsi->bus_clk = devm_clk_get(&pdev->dev, "bus_clk"); 1730 dsi->bus_clk = devm_clk_get(dev, "bus_clk");
1726 if (IS_ERR(dsi->bus_clk)) { 1731 if (IS_ERR(dsi->bus_clk)) {
1727 dev_info(&pdev->dev, "failed to get dsi bus clock\n"); 1732 dev_info(dev, "failed to get dsi bus clock\n");
1728 ret = PTR_ERR(dsi->bus_clk); 1733 ret = PTR_ERR(dsi->bus_clk);
1729 goto err_del_component; 1734 goto err_del_component;
1730 } 1735 }
1731 1736
1732 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1737 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1733 dsi->reg_base = devm_ioremap_resource(&pdev->dev, res); 1738 dsi->reg_base = devm_ioremap_resource(dev, res);
1734 if (IS_ERR(dsi->reg_base)) { 1739 if (IS_ERR(dsi->reg_base)) {
1735 dev_err(&pdev->dev, "failed to remap io region\n"); 1740 dev_err(dev, "failed to remap io region\n");
1736 ret = PTR_ERR(dsi->reg_base); 1741 ret = PTR_ERR(dsi->reg_base);
1737 goto err_del_component; 1742 goto err_del_component;
1738 } 1743 }
1739 1744
1740 dsi->phy = devm_phy_get(&pdev->dev, "dsim"); 1745 dsi->phy = devm_phy_get(dev, "dsim");
1741 if (IS_ERR(dsi->phy)) { 1746 if (IS_ERR(dsi->phy)) {
1742 dev_info(&pdev->dev, "failed to get dsim phy\n"); 1747 dev_info(dev, "failed to get dsim phy\n");
1743 ret = PTR_ERR(dsi->phy); 1748 ret = PTR_ERR(dsi->phy);
1744 goto err_del_component; 1749 goto err_del_component;
1745 } 1750 }
1746 1751
1747 dsi->irq = platform_get_irq(pdev, 0); 1752 dsi->irq = platform_get_irq(pdev, 0);
1748 if (dsi->irq < 0) { 1753 if (dsi->irq < 0) {
1749 dev_err(&pdev->dev, "failed to request dsi irq resource\n"); 1754 dev_err(dev, "failed to request dsi irq resource\n");
1750 ret = dsi->irq; 1755 ret = dsi->irq;
1751 goto err_del_component; 1756 goto err_del_component;
1752 } 1757 }
1753 1758
1754 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN); 1759 irq_set_status_flags(dsi->irq, IRQ_NOAUTOEN);
1755 ret = devm_request_threaded_irq(&pdev->dev, dsi->irq, NULL, 1760 ret = devm_request_threaded_irq(dev, dsi->irq, NULL,
1756 exynos_dsi_irq, IRQF_ONESHOT, 1761 exynos_dsi_irq, IRQF_ONESHOT,
1757 dev_name(&pdev->dev), dsi); 1762 dev_name(dev), dsi);
1758 if (ret) { 1763 if (ret) {
1759 dev_err(&pdev->dev, "failed to request dsi irq\n"); 1764 dev_err(dev, "failed to request dsi irq\n");
1760 goto err_del_component; 1765 goto err_del_component;
1761 } 1766 }
1762 1767
1763 exynos_dsi_display.ctx = dsi; 1768 platform_set_drvdata(pdev, &dsi->display);
1764
1765 platform_set_drvdata(pdev, &exynos_dsi_display);
1766 1769
1767 ret = component_add(&pdev->dev, &exynos_dsi_component_ops); 1770 ret = component_add(dev, &exynos_dsi_component_ops);
1768 if (ret) 1771 if (ret)
1769 goto err_del_component; 1772 goto err_del_component;
1770 1773
1771 return ret; 1774 return ret;
1772 1775
1773err_del_component: 1776err_del_component:
1774 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); 1777 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
1775 return ret; 1778 return ret;
1776} 1779}
1777 1780
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index b7a1620a7e79..26305d8dd93a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -14,8 +14,6 @@
14#ifndef _EXYNOS_DRM_ENCODER_H_ 14#ifndef _EXYNOS_DRM_ENCODER_H_
15#define _EXYNOS_DRM_ENCODER_H_ 15#define _EXYNOS_DRM_ENCODER_H_
16 16
17struct exynos_drm_manager;
18
19void exynos_drm_encoder_setup(struct drm_device *dev); 17void exynos_drm_encoder_setup(struct drm_device *dev);
20struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev, 18struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
21 struct exynos_drm_display *mgr, 19 struct exynos_drm_display *mgr,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 085b066a9993..e5810d13bf9c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -84,8 +84,6 @@
84/* FIMD has totally five hardware windows. */ 84/* FIMD has totally five hardware windows. */
85#define WINDOWS_NR 5 85#define WINDOWS_NR 5
86 86
87#define get_fimd_manager(mgr) platform_get_drvdata(to_platform_device(dev))
88
89struct fimd_driver_data { 87struct fimd_driver_data {
90 unsigned int timing_base; 88 unsigned int timing_base;
91 unsigned int lcdblk_offset; 89 unsigned int lcdblk_offset;
@@ -96,6 +94,7 @@ struct fimd_driver_data {
96 unsigned int has_clksel:1; 94 unsigned int has_clksel:1;
97 unsigned int has_limited_fmt:1; 95 unsigned int has_limited_fmt:1;
98 unsigned int has_vidoutcon:1; 96 unsigned int has_vidoutcon:1;
97 unsigned int has_vtsel:1;
99}; 98};
100 99
101static struct fimd_driver_data s3c64xx_fimd_driver_data = { 100static struct fimd_driver_data s3c64xx_fimd_driver_data = {
@@ -118,6 +117,17 @@ static struct fimd_driver_data exynos4_fimd_driver_data = {
118 .lcdblk_vt_shift = 10, 117 .lcdblk_vt_shift = 10,
119 .lcdblk_bypass_shift = 1, 118 .lcdblk_bypass_shift = 1,
120 .has_shadowcon = 1, 119 .has_shadowcon = 1,
120 .has_vtsel = 1,
121};
122
123static struct fimd_driver_data exynos4415_fimd_driver_data = {
124 .timing_base = 0x20000,
125 .lcdblk_offset = 0x210,
126 .lcdblk_vt_shift = 10,
127 .lcdblk_bypass_shift = 1,
128 .has_shadowcon = 1,
129 .has_vidoutcon = 1,
130 .has_vtsel = 1,
121}; 131};
122 132
123static struct fimd_driver_data exynos5_fimd_driver_data = { 133static struct fimd_driver_data exynos5_fimd_driver_data = {
@@ -127,6 +137,7 @@ static struct fimd_driver_data exynos5_fimd_driver_data = {
127 .lcdblk_bypass_shift = 15, 137 .lcdblk_bypass_shift = 15,
128 .has_shadowcon = 1, 138 .has_shadowcon = 1,
129 .has_vidoutcon = 1, 139 .has_vidoutcon = 1,
140 .has_vtsel = 1,
130}; 141};
131 142
132struct fimd_win_data { 143struct fimd_win_data {
@@ -146,6 +157,7 @@ struct fimd_win_data {
146}; 157};
147 158
148struct fimd_context { 159struct fimd_context {
160 struct exynos_drm_manager manager;
149 struct device *dev; 161 struct device *dev;
150 struct drm_device *drm_dev; 162 struct drm_device *drm_dev;
151 struct clk *bus_clk; 163 struct clk *bus_clk;
@@ -173,6 +185,11 @@ struct fimd_context {
173 struct exynos_drm_display *display; 185 struct exynos_drm_display *display;
174}; 186};
175 187
188static inline struct fimd_context *mgr_to_fimd(struct exynos_drm_manager *mgr)
189{
190 return container_of(mgr, struct fimd_context, manager);
191}
192
176static const struct of_device_id fimd_driver_dt_match[] = { 193static const struct of_device_id fimd_driver_dt_match[] = {
177 { .compatible = "samsung,s3c6400-fimd", 194 { .compatible = "samsung,s3c6400-fimd",
178 .data = &s3c64xx_fimd_driver_data }, 195 .data = &s3c64xx_fimd_driver_data },
@@ -180,6 +197,8 @@ static const struct of_device_id fimd_driver_dt_match[] = {
180 .data = &exynos3_fimd_driver_data }, 197 .data = &exynos3_fimd_driver_data },
181 { .compatible = "samsung,exynos4210-fimd", 198 { .compatible = "samsung,exynos4210-fimd",
182 .data = &exynos4_fimd_driver_data }, 199 .data = &exynos4_fimd_driver_data },
200 { .compatible = "samsung,exynos4415-fimd",
201 .data = &exynos4415_fimd_driver_data },
183 { .compatible = "samsung,exynos5250-fimd", 202 { .compatible = "samsung,exynos5250-fimd",
184 .data = &exynos5_fimd_driver_data }, 203 .data = &exynos5_fimd_driver_data },
185 {}, 204 {},
@@ -197,7 +216,7 @@ static inline struct fimd_driver_data *drm_fimd_get_driver_data(
197 216
198static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr) 217static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
199{ 218{
200 struct fimd_context *ctx = mgr->ctx; 219 struct fimd_context *ctx = mgr_to_fimd(mgr);
201 220
202 if (ctx->suspended) 221 if (ctx->suspended)
203 return; 222 return;
@@ -214,9 +233,35 @@ static void fimd_wait_for_vblank(struct exynos_drm_manager *mgr)
214 DRM_DEBUG_KMS("vblank wait timed out.\n"); 233 DRM_DEBUG_KMS("vblank wait timed out.\n");
215} 234}
216 235
236static void fimd_enable_video_output(struct fimd_context *ctx, int win,
237 bool enable)
238{
239 u32 val = readl(ctx->regs + WINCON(win));
240
241 if (enable)
242 val |= WINCONx_ENWIN;
243 else
244 val &= ~WINCONx_ENWIN;
245
246 writel(val, ctx->regs + WINCON(win));
247}
248
249static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
250 bool enable)
251{
252 u32 val = readl(ctx->regs + SHADOWCON);
253
254 if (enable)
255 val |= SHADOWCON_CHx_ENABLE(win);
256 else
257 val &= ~SHADOWCON_CHx_ENABLE(win);
258
259 writel(val, ctx->regs + SHADOWCON);
260}
261
217static void fimd_clear_channel(struct exynos_drm_manager *mgr) 262static void fimd_clear_channel(struct exynos_drm_manager *mgr)
218{ 263{
219 struct fimd_context *ctx = mgr->ctx; 264 struct fimd_context *ctx = mgr_to_fimd(mgr);
220 int win, ch_enabled = 0; 265 int win, ch_enabled = 0;
221 266
222 DRM_DEBUG_KMS("%s\n", __FILE__); 267 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -226,16 +271,12 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
226 u32 val = readl(ctx->regs + WINCON(win)); 271 u32 val = readl(ctx->regs + WINCON(win));
227 272
228 if (val & WINCONx_ENWIN) { 273 if (val & WINCONx_ENWIN) {
229 /* wincon */ 274 fimd_enable_video_output(ctx, win, false);
230 val &= ~WINCONx_ENWIN; 275
231 writel(val, ctx->regs + WINCON(win)); 276 if (ctx->driver_data->has_shadowcon)
232 277 fimd_enable_shadow_channel_path(ctx, win,
233 /* unprotect windows */ 278 false);
234 if (ctx->driver_data->has_shadowcon) { 279
235 val = readl(ctx->regs + SHADOWCON);
236 val &= ~SHADOWCON_CHx_ENABLE(win);
237 writel(val, ctx->regs + SHADOWCON);
238 }
239 ch_enabled = 1; 280 ch_enabled = 1;
240 } 281 }
241 } 282 }
@@ -253,7 +294,7 @@ static void fimd_clear_channel(struct exynos_drm_manager *mgr)
253static int fimd_mgr_initialize(struct exynos_drm_manager *mgr, 294static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
254 struct drm_device *drm_dev) 295 struct drm_device *drm_dev)
255{ 296{
256 struct fimd_context *ctx = mgr->ctx; 297 struct fimd_context *ctx = mgr_to_fimd(mgr);
257 struct exynos_drm_private *priv; 298 struct exynos_drm_private *priv;
258 priv = drm_dev->dev_private; 299 priv = drm_dev->dev_private;
259 300
@@ -275,7 +316,7 @@ static int fimd_mgr_initialize(struct exynos_drm_manager *mgr,
275 316
276static void fimd_mgr_remove(struct exynos_drm_manager *mgr) 317static void fimd_mgr_remove(struct exynos_drm_manager *mgr)
277{ 318{
278 struct fimd_context *ctx = mgr->ctx; 319 struct fimd_context *ctx = mgr_to_fimd(mgr);
279 320
280 /* detach this sub driver from iommu mapping if supported. */ 321 /* detach this sub driver from iommu mapping if supported. */
281 if (is_drm_iommu_supported(ctx->drm_dev)) 322 if (is_drm_iommu_supported(ctx->drm_dev))
@@ -315,14 +356,14 @@ static bool fimd_mode_fixup(struct exynos_drm_manager *mgr,
315static void fimd_mode_set(struct exynos_drm_manager *mgr, 356static void fimd_mode_set(struct exynos_drm_manager *mgr,
316 const struct drm_display_mode *in_mode) 357 const struct drm_display_mode *in_mode)
317{ 358{
318 struct fimd_context *ctx = mgr->ctx; 359 struct fimd_context *ctx = mgr_to_fimd(mgr);
319 360
320 drm_mode_copy(&ctx->mode, in_mode); 361 drm_mode_copy(&ctx->mode, in_mode);
321} 362}
322 363
323static void fimd_commit(struct exynos_drm_manager *mgr) 364static void fimd_commit(struct exynos_drm_manager *mgr)
324{ 365{
325 struct fimd_context *ctx = mgr->ctx; 366 struct fimd_context *ctx = mgr_to_fimd(mgr);
326 struct drm_display_mode *mode = &ctx->mode; 367 struct drm_display_mode *mode = &ctx->mode;
327 struct fimd_driver_data *driver_data = ctx->driver_data; 368 struct fimd_driver_data *driver_data = ctx->driver_data;
328 void *timing_base = ctx->regs + driver_data->timing_base; 369 void *timing_base = ctx->regs + driver_data->timing_base;
@@ -343,7 +384,8 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
343 writel(0, timing_base + I80IFCONFBx(0)); 384 writel(0, timing_base + I80IFCONFBx(0));
344 385
345 /* set video type selection to I80 interface */ 386 /* set video type selection to I80 interface */
346 if (ctx->sysreg && regmap_update_bits(ctx->sysreg, 387 if (driver_data->has_vtsel && ctx->sysreg &&
388 regmap_update_bits(ctx->sysreg,
347 driver_data->lcdblk_offset, 389 driver_data->lcdblk_offset,
348 0x3 << driver_data->lcdblk_vt_shift, 390 0x3 << driver_data->lcdblk_vt_shift,
349 0x1 << driver_data->lcdblk_vt_shift)) { 391 0x1 << driver_data->lcdblk_vt_shift)) {
@@ -421,7 +463,7 @@ static void fimd_commit(struct exynos_drm_manager *mgr)
421 463
422static int fimd_enable_vblank(struct exynos_drm_manager *mgr) 464static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
423{ 465{
424 struct fimd_context *ctx = mgr->ctx; 466 struct fimd_context *ctx = mgr_to_fimd(mgr);
425 u32 val; 467 u32 val;
426 468
427 if (ctx->suspended) 469 if (ctx->suspended)
@@ -431,12 +473,19 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
431 val = readl(ctx->regs + VIDINTCON0); 473 val = readl(ctx->regs + VIDINTCON0);
432 474
433 val |= VIDINTCON0_INT_ENABLE; 475 val |= VIDINTCON0_INT_ENABLE;
434 val |= VIDINTCON0_INT_FRAME;
435 476
436 val &= ~VIDINTCON0_FRAMESEL0_MASK; 477 if (ctx->i80_if) {
437 val |= VIDINTCON0_FRAMESEL0_VSYNC; 478 val |= VIDINTCON0_INT_I80IFDONE;
438 val &= ~VIDINTCON0_FRAMESEL1_MASK; 479 val |= VIDINTCON0_INT_SYSMAINCON;
439 val |= VIDINTCON0_FRAMESEL1_NONE; 480 val &= ~VIDINTCON0_INT_SYSSUBCON;
481 } else {
482 val |= VIDINTCON0_INT_FRAME;
483
484 val &= ~VIDINTCON0_FRAMESEL0_MASK;
485 val |= VIDINTCON0_FRAMESEL0_VSYNC;
486 val &= ~VIDINTCON0_FRAMESEL1_MASK;
487 val |= VIDINTCON0_FRAMESEL1_NONE;
488 }
440 489
441 writel(val, ctx->regs + VIDINTCON0); 490 writel(val, ctx->regs + VIDINTCON0);
442 } 491 }
@@ -446,7 +495,7 @@ static int fimd_enable_vblank(struct exynos_drm_manager *mgr)
446 495
447static void fimd_disable_vblank(struct exynos_drm_manager *mgr) 496static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
448{ 497{
449 struct fimd_context *ctx = mgr->ctx; 498 struct fimd_context *ctx = mgr_to_fimd(mgr);
450 u32 val; 499 u32 val;
451 500
452 if (ctx->suspended) 501 if (ctx->suspended)
@@ -455,9 +504,15 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
455 if (test_and_clear_bit(0, &ctx->irq_flags)) { 504 if (test_and_clear_bit(0, &ctx->irq_flags)) {
456 val = readl(ctx->regs + VIDINTCON0); 505 val = readl(ctx->regs + VIDINTCON0);
457 506
458 val &= ~VIDINTCON0_INT_FRAME;
459 val &= ~VIDINTCON0_INT_ENABLE; 507 val &= ~VIDINTCON0_INT_ENABLE;
460 508
509 if (ctx->i80_if) {
510 val &= ~VIDINTCON0_INT_I80IFDONE;
511 val &= ~VIDINTCON0_INT_SYSMAINCON;
512 val &= ~VIDINTCON0_INT_SYSSUBCON;
513 } else
514 val &= ~VIDINTCON0_INT_FRAME;
515
461 writel(val, ctx->regs + VIDINTCON0); 516 writel(val, ctx->regs + VIDINTCON0);
462 } 517 }
463} 518}
@@ -465,7 +520,7 @@ static void fimd_disable_vblank(struct exynos_drm_manager *mgr)
465static void fimd_win_mode_set(struct exynos_drm_manager *mgr, 520static void fimd_win_mode_set(struct exynos_drm_manager *mgr,
466 struct exynos_drm_overlay *overlay) 521 struct exynos_drm_overlay *overlay)
467{ 522{
468 struct fimd_context *ctx = mgr->ctx; 523 struct fimd_context *ctx = mgr_to_fimd(mgr);
469 struct fimd_win_data *win_data; 524 struct fimd_win_data *win_data;
470 int win; 525 int win;
471 unsigned long offset; 526 unsigned long offset;
@@ -623,7 +678,7 @@ static void fimd_shadow_protect_win(struct fimd_context *ctx,
623 678
624static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos) 679static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
625{ 680{
626 struct fimd_context *ctx = mgr->ctx; 681 struct fimd_context *ctx = mgr_to_fimd(mgr);
627 struct fimd_win_data *win_data; 682 struct fimd_win_data *win_data;
628 int win = zpos; 683 int win = zpos;
629 unsigned long val, alpha, size; 684 unsigned long val, alpha, size;
@@ -730,20 +785,14 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
730 if (win != 0) 785 if (win != 0)
731 fimd_win_set_colkey(ctx, win); 786 fimd_win_set_colkey(ctx, win);
732 787
733 /* wincon */ 788 fimd_enable_video_output(ctx, win, true);
734 val = readl(ctx->regs + WINCON(win)); 789
735 val |= WINCONx_ENWIN; 790 if (ctx->driver_data->has_shadowcon)
736 writel(val, ctx->regs + WINCON(win)); 791 fimd_enable_shadow_channel_path(ctx, win, true);
737 792
738 /* Enable DMA channel and unprotect windows */ 793 /* Enable DMA channel and unprotect windows */
739 fimd_shadow_protect_win(ctx, win, false); 794 fimd_shadow_protect_win(ctx, win, false);
740 795
741 if (ctx->driver_data->has_shadowcon) {
742 val = readl(ctx->regs + SHADOWCON);
743 val |= SHADOWCON_CHx_ENABLE(win);
744 writel(val, ctx->regs + SHADOWCON);
745 }
746
747 win_data->enabled = true; 796 win_data->enabled = true;
748 797
749 if (ctx->i80_if) 798 if (ctx->i80_if)
@@ -752,10 +801,9 @@ static void fimd_win_commit(struct exynos_drm_manager *mgr, int zpos)
752 801
753static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos) 802static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
754{ 803{
755 struct fimd_context *ctx = mgr->ctx; 804 struct fimd_context *ctx = mgr_to_fimd(mgr);
756 struct fimd_win_data *win_data; 805 struct fimd_win_data *win_data;
757 int win = zpos; 806 int win = zpos;
758 u32 val;
759 807
760 if (win == DEFAULT_ZPOS) 808 if (win == DEFAULT_ZPOS)
761 win = ctx->default_win; 809 win = ctx->default_win;
@@ -774,18 +822,12 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
774 /* protect windows */ 822 /* protect windows */
775 fimd_shadow_protect_win(ctx, win, true); 823 fimd_shadow_protect_win(ctx, win, true);
776 824
777 /* wincon */ 825 fimd_enable_video_output(ctx, win, false);
778 val = readl(ctx->regs + WINCON(win));
779 val &= ~WINCONx_ENWIN;
780 writel(val, ctx->regs + WINCON(win));
781 826
782 /* unprotect windows */ 827 if (ctx->driver_data->has_shadowcon)
783 if (ctx->driver_data->has_shadowcon) { 828 fimd_enable_shadow_channel_path(ctx, win, false);
784 val = readl(ctx->regs + SHADOWCON);
785 val &= ~SHADOWCON_CHx_ENABLE(win);
786 writel(val, ctx->regs + SHADOWCON);
787 }
788 829
830 /* unprotect windows */
789 fimd_shadow_protect_win(ctx, win, false); 831 fimd_shadow_protect_win(ctx, win, false);
790 832
791 win_data->enabled = false; 833 win_data->enabled = false;
@@ -793,7 +835,7 @@ static void fimd_win_disable(struct exynos_drm_manager *mgr, int zpos)
793 835
794static void fimd_window_suspend(struct exynos_drm_manager *mgr) 836static void fimd_window_suspend(struct exynos_drm_manager *mgr)
795{ 837{
796 struct fimd_context *ctx = mgr->ctx; 838 struct fimd_context *ctx = mgr_to_fimd(mgr);
797 struct fimd_win_data *win_data; 839 struct fimd_win_data *win_data;
798 int i; 840 int i;
799 841
@@ -803,12 +845,11 @@ static void fimd_window_suspend(struct exynos_drm_manager *mgr)
803 if (win_data->enabled) 845 if (win_data->enabled)
804 fimd_win_disable(mgr, i); 846 fimd_win_disable(mgr, i);
805 } 847 }
806 fimd_wait_for_vblank(mgr);
807} 848}
808 849
809static void fimd_window_resume(struct exynos_drm_manager *mgr) 850static void fimd_window_resume(struct exynos_drm_manager *mgr)
810{ 851{
811 struct fimd_context *ctx = mgr->ctx; 852 struct fimd_context *ctx = mgr_to_fimd(mgr);
812 struct fimd_win_data *win_data; 853 struct fimd_win_data *win_data;
813 int i; 854 int i;
814 855
@@ -821,7 +862,7 @@ static void fimd_window_resume(struct exynos_drm_manager *mgr)
821 862
822static void fimd_apply(struct exynos_drm_manager *mgr) 863static void fimd_apply(struct exynos_drm_manager *mgr)
823{ 864{
824 struct fimd_context *ctx = mgr->ctx; 865 struct fimd_context *ctx = mgr_to_fimd(mgr);
825 struct fimd_win_data *win_data; 866 struct fimd_win_data *win_data;
826 int i; 867 int i;
827 868
@@ -838,7 +879,7 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
838 879
839static int fimd_poweron(struct exynos_drm_manager *mgr) 880static int fimd_poweron(struct exynos_drm_manager *mgr)
840{ 881{
841 struct fimd_context *ctx = mgr->ctx; 882 struct fimd_context *ctx = mgr_to_fimd(mgr);
842 int ret; 883 int ret;
843 884
844 if (!ctx->suspended) 885 if (!ctx->suspended)
@@ -886,7 +927,7 @@ bus_clk_err:
886 927
887static int fimd_poweroff(struct exynos_drm_manager *mgr) 928static int fimd_poweroff(struct exynos_drm_manager *mgr)
888{ 929{
889 struct fimd_context *ctx = mgr->ctx; 930 struct fimd_context *ctx = mgr_to_fimd(mgr);
890 931
891 if (ctx->suspended) 932 if (ctx->suspended)
892 return 0; 933 return 0;
@@ -928,39 +969,41 @@ static void fimd_dpms(struct exynos_drm_manager *mgr, int mode)
928 969
929static void fimd_trigger(struct device *dev) 970static void fimd_trigger(struct device *dev)
930{ 971{
931 struct exynos_drm_manager *mgr = get_fimd_manager(dev); 972 struct fimd_context *ctx = dev_get_drvdata(dev);
932 struct fimd_context *ctx = mgr->ctx;
933 struct fimd_driver_data *driver_data = ctx->driver_data; 973 struct fimd_driver_data *driver_data = ctx->driver_data;
934 void *timing_base = ctx->regs + driver_data->timing_base; 974 void *timing_base = ctx->regs + driver_data->timing_base;
935 u32 reg; 975 u32 reg;
936 976
937 atomic_set(&ctx->triggering, 1); 977 /*
978 * Skips triggering if in triggering state, because multiple triggering
979 * requests can cause panel reset.
980 */
981 if (atomic_read(&ctx->triggering))
982 return;
938 983
939 reg = readl(ctx->regs + VIDINTCON0); 984 /* Enters triggering mode */
940 reg |= (VIDINTCON0_INT_ENABLE | VIDINTCON0_INT_I80IFDONE | 985 atomic_set(&ctx->triggering, 1);
941 VIDINTCON0_INT_SYSMAINCON);
942 writel(reg, ctx->regs + VIDINTCON0);
943 986
944 reg = readl(timing_base + TRIGCON); 987 reg = readl(timing_base + TRIGCON);
945 reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE); 988 reg |= (TRGMODE_I80_RGB_ENABLE_I80 | SWTRGCMD_I80_RGB_ENABLE);
946 writel(reg, timing_base + TRIGCON); 989 writel(reg, timing_base + TRIGCON);
990
991 /*
992 * Exits triggering mode if vblank is not enabled yet, because when the
993 * VIDINTCON0 register is not set, it can not exit from triggering mode.
994 */
995 if (!test_bit(0, &ctx->irq_flags))
996 atomic_set(&ctx->triggering, 0);
947} 997}
948 998
949static void fimd_te_handler(struct exynos_drm_manager *mgr) 999static void fimd_te_handler(struct exynos_drm_manager *mgr)
950{ 1000{
951 struct fimd_context *ctx = mgr->ctx; 1001 struct fimd_context *ctx = mgr_to_fimd(mgr);
952 1002
953 /* Checks the crtc is detached already from encoder */ 1003 /* Checks the crtc is detached already from encoder */
954 if (ctx->pipe < 0 || !ctx->drm_dev) 1004 if (ctx->pipe < 0 || !ctx->drm_dev)
955 return; 1005 return;
956 1006
957 /*
958 * Skips to trigger if in triggering state, because multiple triggering
959 * requests can cause panel reset.
960 */
961 if (atomic_read(&ctx->triggering))
962 return;
963
964 /* 1007 /*
965 * If there is a page flip request, triggers and handles the page flip 1008 * If there is a page flip request, triggers and handles the page flip
966 * event so that current fb can be updated into panel GRAM. 1009 * event so that current fb can be updated into panel GRAM.
@@ -972,10 +1015,10 @@ static void fimd_te_handler(struct exynos_drm_manager *mgr)
972 if (atomic_read(&ctx->wait_vsync_event)) { 1015 if (atomic_read(&ctx->wait_vsync_event)) {
973 atomic_set(&ctx->wait_vsync_event, 0); 1016 atomic_set(&ctx->wait_vsync_event, 0);
974 wake_up(&ctx->wait_vsync_queue); 1017 wake_up(&ctx->wait_vsync_queue);
975
976 if (!atomic_read(&ctx->triggering))
977 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
978 } 1018 }
1019
1020 if (test_bit(0, &ctx->irq_flags))
1021 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
979} 1022}
980 1023
981static struct exynos_drm_manager_ops fimd_manager_ops = { 1024static struct exynos_drm_manager_ops fimd_manager_ops = {
@@ -992,11 +1035,6 @@ static struct exynos_drm_manager_ops fimd_manager_ops = {
992 .te_handler = fimd_te_handler, 1035 .te_handler = fimd_te_handler,
993}; 1036};
994 1037
995static struct exynos_drm_manager fimd_manager = {
996 .type = EXYNOS_DISPLAY_TYPE_LCD,
997 .ops = &fimd_manager_ops,
998};
999
1000static irqreturn_t fimd_irq_handler(int irq, void *dev_id) 1038static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
1001{ 1039{
1002 struct fimd_context *ctx = (struct fimd_context *)dev_id; 1040 struct fimd_context *ctx = (struct fimd_context *)dev_id;
@@ -1013,16 +1051,10 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
1013 goto out; 1051 goto out;
1014 1052
1015 if (ctx->i80_if) { 1053 if (ctx->i80_if) {
1016 /* unset I80 frame done interrupt */ 1054 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
1017 val = readl(ctx->regs + VIDINTCON0);
1018 val &= ~(VIDINTCON0_INT_I80IFDONE | VIDINTCON0_INT_SYSMAINCON);
1019 writel(val, ctx->regs + VIDINTCON0);
1020 1055
1021 /* exit triggering mode */ 1056 /* Exits triggering mode */
1022 atomic_set(&ctx->triggering, 0); 1057 atomic_set(&ctx->triggering, 0);
1023
1024 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
1025 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
1026 } else { 1058 } else {
1027 drm_handle_vblank(ctx->drm_dev, ctx->pipe); 1059 drm_handle_vblank(ctx->drm_dev, ctx->pipe);
1028 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); 1060 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe);
@@ -1040,11 +1072,11 @@ out:
1040 1072
1041static int fimd_bind(struct device *dev, struct device *master, void *data) 1073static int fimd_bind(struct device *dev, struct device *master, void *data)
1042{ 1074{
1043 struct fimd_context *ctx = fimd_manager.ctx; 1075 struct fimd_context *ctx = dev_get_drvdata(dev);
1044 struct drm_device *drm_dev = data; 1076 struct drm_device *drm_dev = data;
1045 1077
1046 fimd_mgr_initialize(&fimd_manager, drm_dev); 1078 fimd_mgr_initialize(&ctx->manager, drm_dev);
1047 exynos_drm_crtc_create(&fimd_manager); 1079 exynos_drm_crtc_create(&ctx->manager);
1048 if (ctx->display) 1080 if (ctx->display)
1049 exynos_drm_create_enc_conn(drm_dev, ctx->display); 1081 exynos_drm_create_enc_conn(drm_dev, ctx->display);
1050 1082
@@ -1055,15 +1087,14 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
1055static void fimd_unbind(struct device *dev, struct device *master, 1087static void fimd_unbind(struct device *dev, struct device *master,
1056 void *data) 1088 void *data)
1057{ 1089{
1058 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1090 struct fimd_context *ctx = dev_get_drvdata(dev);
1059 struct fimd_context *ctx = fimd_manager.ctx;
1060 1091
1061 fimd_dpms(mgr, DRM_MODE_DPMS_OFF); 1092 fimd_dpms(&ctx->manager, DRM_MODE_DPMS_OFF);
1062 1093
1063 if (ctx->display) 1094 if (ctx->display)
1064 exynos_dpi_remove(dev); 1095 exynos_dpi_remove(ctx->display);
1065 1096
1066 fimd_mgr_remove(mgr); 1097 fimd_mgr_remove(&ctx->manager);
1067} 1098}
1068 1099
1069static const struct component_ops fimd_component_ops = { 1100static const struct component_ops fimd_component_ops = {
@@ -1079,21 +1110,20 @@ static int fimd_probe(struct platform_device *pdev)
1079 struct resource *res; 1110 struct resource *res;
1080 int ret = -EINVAL; 1111 int ret = -EINVAL;
1081 1112
1082 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC, 1113 if (!dev->of_node)
1083 fimd_manager.type); 1114 return -ENODEV;
1084 if (ret)
1085 return ret;
1086
1087 if (!dev->of_node) {
1088 ret = -ENODEV;
1089 goto err_del_component;
1090 }
1091 1115
1092 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); 1116 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
1093 if (!ctx) { 1117 if (!ctx)
1094 ret = -ENOMEM; 1118 return -ENOMEM;
1095 goto err_del_component; 1119
1096 } 1120 ctx->manager.type = EXYNOS_DISPLAY_TYPE_LCD;
1121 ctx->manager.ops = &fimd_manager_ops;
1122
1123 ret = exynos_drm_component_add(dev, EXYNOS_DEVICE_TYPE_CRTC,
1124 ctx->manager.type);
1125 if (ret)
1126 return ret;
1097 1127
1098 ctx->dev = dev; 1128 ctx->dev = dev;
1099 ctx->suspended = true; 1129 ctx->suspended = true;
@@ -1182,27 +1212,27 @@ static int fimd_probe(struct platform_device *pdev)
1182 init_waitqueue_head(&ctx->wait_vsync_queue); 1212 init_waitqueue_head(&ctx->wait_vsync_queue);
1183 atomic_set(&ctx->wait_vsync_event, 0); 1213 atomic_set(&ctx->wait_vsync_event, 0);
1184 1214
1185 platform_set_drvdata(pdev, &fimd_manager); 1215 platform_set_drvdata(pdev, ctx);
1186
1187 fimd_manager.ctx = ctx;
1188 1216
1189 ctx->display = exynos_dpi_probe(dev); 1217 ctx->display = exynos_dpi_probe(dev);
1190 if (IS_ERR(ctx->display)) 1218 if (IS_ERR(ctx->display)) {
1191 return PTR_ERR(ctx->display); 1219 ret = PTR_ERR(ctx->display);
1220 goto err_del_component;
1221 }
1192 1222
1193 pm_runtime_enable(&pdev->dev); 1223 pm_runtime_enable(dev);
1194 1224
1195 ret = component_add(&pdev->dev, &fimd_component_ops); 1225 ret = component_add(dev, &fimd_component_ops);
1196 if (ret) 1226 if (ret)
1197 goto err_disable_pm_runtime; 1227 goto err_disable_pm_runtime;
1198 1228
1199 return ret; 1229 return ret;
1200 1230
1201err_disable_pm_runtime: 1231err_disable_pm_runtime:
1202 pm_runtime_disable(&pdev->dev); 1232 pm_runtime_disable(dev);
1203 1233
1204err_del_component: 1234err_del_component:
1205 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); 1235 exynos_drm_component_del(dev, EXYNOS_DEVICE_TYPE_CRTC);
1206 return ret; 1236 return ret;
1207} 1237}
1208 1238
diff --git a/drivers/gpu/drm/exynos/exynos_drm_iommu.h b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
index 72376d41c512..35d25889b476 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_iommu.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_iommu.h
@@ -40,7 +40,6 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
40 40
41#else 41#else
42 42
43struct dma_iommu_mapping;
44static inline int drm_create_iommu_mapping(struct drm_device *drm_dev) 43static inline int drm_create_iommu_mapping(struct drm_device *drm_dev)
45{ 44{
46 return 0; 45 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 00d74b18f7cb..d5ad17dfc24d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -426,18 +426,21 @@ int exynos_drm_ipp_set_property(struct drm_device *drm_dev, void *data,
426 c_node->start_work = ipp_create_cmd_work(); 426 c_node->start_work = ipp_create_cmd_work();
427 if (IS_ERR(c_node->start_work)) { 427 if (IS_ERR(c_node->start_work)) {
428 DRM_ERROR("failed to create start work.\n"); 428 DRM_ERROR("failed to create start work.\n");
429 ret = PTR_ERR(c_node->start_work);
429 goto err_remove_id; 430 goto err_remove_id;
430 } 431 }
431 432
432 c_node->stop_work = ipp_create_cmd_work(); 433 c_node->stop_work = ipp_create_cmd_work();
433 if (IS_ERR(c_node->stop_work)) { 434 if (IS_ERR(c_node->stop_work)) {
434 DRM_ERROR("failed to create stop work.\n"); 435 DRM_ERROR("failed to create stop work.\n");
436 ret = PTR_ERR(c_node->stop_work);
435 goto err_free_start; 437 goto err_free_start;
436 } 438 }
437 439
438 c_node->event_work = ipp_create_event_work(); 440 c_node->event_work = ipp_create_event_work();
439 if (IS_ERR(c_node->event_work)) { 441 if (IS_ERR(c_node->event_work)) {
440 DRM_ERROR("failed to create event work.\n"); 442 DRM_ERROR("failed to create event work.\n");
443 ret = PTR_ERR(c_node->event_work);
441 goto err_free_stop; 444 goto err_free_stop;
442 } 445 }
443 446
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 50faf913e574..45899fb63272 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -14,6 +14,7 @@
14 14
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17#include <linux/component.h>
17 18
18#include <drm/exynos_drm.h> 19#include <drm/exynos_drm.h>
19 20
@@ -28,7 +29,6 @@
28/* vidi has totally three virtual windows. */ 29/* vidi has totally three virtual windows. */
29#define WINDOWS_NR 3 30#define WINDOWS_NR 3
30 31
31#define get_vidi_mgr(dev) platform_get_drvdata(to_platform_device(dev))
32#define ctx_from_connector(c) container_of(c, struct vidi_context, \ 32#define ctx_from_connector(c) container_of(c, struct vidi_context, \
33 connector) 33 connector)
34 34
@@ -47,11 +47,13 @@ struct vidi_win_data {
47}; 47};
48 48
49struct vidi_context { 49struct vidi_context {
50 struct exynos_drm_manager manager;
51 struct exynos_drm_display display;
52 struct platform_device *pdev;
50 struct drm_device *drm_dev; 53 struct drm_device *drm_dev;
51 struct drm_crtc *crtc; 54 struct drm_crtc *crtc;
52 struct drm_encoder *encoder; 55 struct drm_encoder *encoder;
53 struct drm_connector connector; 56 struct drm_connector connector;
54 struct exynos_drm_subdrv subdrv;
55 struct vidi_win_data win_data[WINDOWS_NR]; 57 struct vidi_win_data win_data[WINDOWS_NR];
56 struct edid *raw_edid; 58 struct edid *raw_edid;
57 unsigned int clkdiv; 59 unsigned int clkdiv;
@@ -66,6 +68,16 @@ struct vidi_context {
66 int pipe; 68 int pipe;
67}; 69};
68 70
71static inline struct vidi_context *manager_to_vidi(struct exynos_drm_manager *m)
72{
73 return container_of(m, struct vidi_context, manager);
74}
75
76static inline struct vidi_context *display_to_vidi(struct exynos_drm_display *d)
77{
78 return container_of(d, struct vidi_context, display);
79}
80
69static const char fake_edid_info[] = { 81static const char fake_edid_info[] = {
70 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05, 82 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x4c, 0x2d, 0x05, 0x05,
71 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78, 83 0x00, 0x00, 0x00, 0x00, 0x30, 0x12, 0x01, 0x03, 0x80, 0x10, 0x09, 0x78,
@@ -93,7 +105,7 @@ static const char fake_edid_info[] = {
93 105
94static void vidi_apply(struct exynos_drm_manager *mgr) 106static void vidi_apply(struct exynos_drm_manager *mgr)
95{ 107{
96 struct vidi_context *ctx = mgr->ctx; 108 struct vidi_context *ctx = manager_to_vidi(mgr);
97 struct exynos_drm_manager_ops *mgr_ops = mgr->ops; 109 struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
98 struct vidi_win_data *win_data; 110 struct vidi_win_data *win_data;
99 int i; 111 int i;
@@ -110,7 +122,7 @@ static void vidi_apply(struct exynos_drm_manager *mgr)
110 122
111static void vidi_commit(struct exynos_drm_manager *mgr) 123static void vidi_commit(struct exynos_drm_manager *mgr)
112{ 124{
113 struct vidi_context *ctx = mgr->ctx; 125 struct vidi_context *ctx = manager_to_vidi(mgr);
114 126
115 if (ctx->suspended) 127 if (ctx->suspended)
116 return; 128 return;
@@ -118,7 +130,7 @@ static void vidi_commit(struct exynos_drm_manager *mgr)
118 130
119static int vidi_enable_vblank(struct exynos_drm_manager *mgr) 131static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
120{ 132{
121 struct vidi_context *ctx = mgr->ctx; 133 struct vidi_context *ctx = manager_to_vidi(mgr);
122 134
123 if (ctx->suspended) 135 if (ctx->suspended)
124 return -EPERM; 136 return -EPERM;
@@ -140,7 +152,7 @@ static int vidi_enable_vblank(struct exynos_drm_manager *mgr)
140 152
141static void vidi_disable_vblank(struct exynos_drm_manager *mgr) 153static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
142{ 154{
143 struct vidi_context *ctx = mgr->ctx; 155 struct vidi_context *ctx = manager_to_vidi(mgr);
144 156
145 if (ctx->suspended) 157 if (ctx->suspended)
146 return; 158 return;
@@ -152,7 +164,7 @@ static void vidi_disable_vblank(struct exynos_drm_manager *mgr)
152static void vidi_win_mode_set(struct exynos_drm_manager *mgr, 164static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
153 struct exynos_drm_overlay *overlay) 165 struct exynos_drm_overlay *overlay)
154{ 166{
155 struct vidi_context *ctx = mgr->ctx; 167 struct vidi_context *ctx = manager_to_vidi(mgr);
156 struct vidi_win_data *win_data; 168 struct vidi_win_data *win_data;
157 int win; 169 int win;
158 unsigned long offset; 170 unsigned long offset;
@@ -204,7 +216,7 @@ static void vidi_win_mode_set(struct exynos_drm_manager *mgr,
204 216
205static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos) 217static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
206{ 218{
207 struct vidi_context *ctx = mgr->ctx; 219 struct vidi_context *ctx = manager_to_vidi(mgr);
208 struct vidi_win_data *win_data; 220 struct vidi_win_data *win_data;
209 int win = zpos; 221 int win = zpos;
210 222
@@ -229,7 +241,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
229 241
230static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos) 242static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
231{ 243{
232 struct vidi_context *ctx = mgr->ctx; 244 struct vidi_context *ctx = manager_to_vidi(mgr);
233 struct vidi_win_data *win_data; 245 struct vidi_win_data *win_data;
234 int win = zpos; 246 int win = zpos;
235 247
@@ -247,7 +259,7 @@ static void vidi_win_disable(struct exynos_drm_manager *mgr, int zpos)
247 259
248static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable) 260static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
249{ 261{
250 struct vidi_context *ctx = mgr->ctx; 262 struct vidi_context *ctx = manager_to_vidi(mgr);
251 263
252 DRM_DEBUG_KMS("%s\n", __FILE__); 264 DRM_DEBUG_KMS("%s\n", __FILE__);
253 265
@@ -271,7 +283,7 @@ static int vidi_power_on(struct exynos_drm_manager *mgr, bool enable)
271 283
272static void vidi_dpms(struct exynos_drm_manager *mgr, int mode) 284static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
273{ 285{
274 struct vidi_context *ctx = mgr->ctx; 286 struct vidi_context *ctx = manager_to_vidi(mgr);
275 287
276 DRM_DEBUG_KMS("%d\n", mode); 288 DRM_DEBUG_KMS("%d\n", mode);
277 289
@@ -297,7 +309,7 @@ static void vidi_dpms(struct exynos_drm_manager *mgr, int mode)
297static int vidi_mgr_initialize(struct exynos_drm_manager *mgr, 309static int vidi_mgr_initialize(struct exynos_drm_manager *mgr,
298 struct drm_device *drm_dev) 310 struct drm_device *drm_dev)
299{ 311{
300 struct vidi_context *ctx = mgr->ctx; 312 struct vidi_context *ctx = manager_to_vidi(mgr);
301 struct exynos_drm_private *priv = drm_dev->dev_private; 313 struct exynos_drm_private *priv = drm_dev->dev_private;
302 314
303 mgr->drm_dev = ctx->drm_dev = drm_dev; 315 mgr->drm_dev = ctx->drm_dev = drm_dev;
@@ -316,11 +328,6 @@ static struct exynos_drm_manager_ops vidi_manager_ops = {
316 .win_disable = vidi_win_disable, 328 .win_disable = vidi_win_disable,
317}; 329};
318 330
319static struct exynos_drm_manager vidi_manager = {
320 .type = EXYNOS_DISPLAY_TYPE_VIDI,
321 .ops = &vidi_manager_ops,
322};
323
324static void vidi_fake_vblank_handler(struct work_struct *work) 331static void vidi_fake_vblank_handler(struct work_struct *work)
325{ 332{
326 struct vidi_context *ctx = container_of(work, struct vidi_context, 333 struct vidi_context *ctx = container_of(work, struct vidi_context,
@@ -349,9 +356,8 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
349static int vidi_show_connection(struct device *dev, 356static int vidi_show_connection(struct device *dev,
350 struct device_attribute *attr, char *buf) 357 struct device_attribute *attr, char *buf)
351{ 358{
359 struct vidi_context *ctx = dev_get_drvdata(dev);
352 int rc; 360 int rc;
353 struct exynos_drm_manager *mgr = get_vidi_mgr(dev);
354 struct vidi_context *ctx = mgr->ctx;
355 361
356 mutex_lock(&ctx->lock); 362 mutex_lock(&ctx->lock);
357 363
@@ -366,8 +372,7 @@ static int vidi_store_connection(struct device *dev,
366 struct device_attribute *attr, 372 struct device_attribute *attr,
367 const char *buf, size_t len) 373 const char *buf, size_t len)
368{ 374{
369 struct exynos_drm_manager *mgr = get_vidi_mgr(dev); 375 struct vidi_context *ctx = dev_get_drvdata(dev);
370 struct vidi_context *ctx = mgr->ctx;
371 int ret; 376 int ret;
372 377
373 ret = kstrtoint(buf, 0, &ctx->connected); 378 ret = kstrtoint(buf, 0, &ctx->connected);
@@ -420,7 +425,7 @@ int vidi_connection_ioctl(struct drm_device *drm_dev, void *data,
420 display = exynos_drm_get_display(encoder); 425 display = exynos_drm_get_display(encoder);
421 426
422 if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) { 427 if (display->type == EXYNOS_DISPLAY_TYPE_VIDI) {
423 ctx = display->ctx; 428 ctx = display_to_vidi(display);
424 break; 429 break;
425 } 430 }
426 } 431 }
@@ -530,7 +535,7 @@ static struct drm_connector_helper_funcs vidi_connector_helper_funcs = {
530static int vidi_create_connector(struct exynos_drm_display *display, 535static int vidi_create_connector(struct exynos_drm_display *display,
531 struct drm_encoder *encoder) 536 struct drm_encoder *encoder)
532{ 537{
533 struct vidi_context *ctx = display->ctx; 538 struct vidi_context *ctx = display_to_vidi(display);
534 struct drm_connector *connector = &ctx->connector; 539 struct drm_connector *connector = &ctx->connector;
535 int ret; 540 int ret;
536 541
@@ -556,27 +561,22 @@ static struct exynos_drm_display_ops vidi_display_ops = {
556 .create_connector = vidi_create_connector, 561 .create_connector = vidi_create_connector,
557}; 562};
558 563
559static struct exynos_drm_display vidi_display = { 564static int vidi_bind(struct device *dev, struct device *master, void *data)
560 .type = EXYNOS_DISPLAY_TYPE_VIDI,
561 .ops = &vidi_display_ops,
562};
563
564static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
565{ 565{
566 struct exynos_drm_manager *mgr = get_vidi_mgr(dev); 566 struct vidi_context *ctx = dev_get_drvdata(dev);
567 struct vidi_context *ctx = mgr->ctx; 567 struct drm_device *drm_dev = data;
568 struct drm_crtc *crtc = ctx->crtc; 568 struct drm_crtc *crtc = ctx->crtc;
569 int ret; 569 int ret;
570 570
571 vidi_mgr_initialize(mgr, drm_dev); 571 vidi_mgr_initialize(&ctx->manager, drm_dev);
572 572
573 ret = exynos_drm_crtc_create(&vidi_manager); 573 ret = exynos_drm_crtc_create(&ctx->manager);
574 if (ret) { 574 if (ret) {
575 DRM_ERROR("failed to create crtc.\n"); 575 DRM_ERROR("failed to create crtc.\n");
576 return ret; 576 return ret;
577 } 577 }
578 578
579 ret = exynos_drm_create_enc_conn(drm_dev, &vidi_display); 579 ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display);
580 if (ret) { 580 if (ret) {
581 crtc->funcs->destroy(crtc); 581 crtc->funcs->destroy(crtc);
582 DRM_ERROR("failed to create encoder and connector.\n"); 582 DRM_ERROR("failed to create encoder and connector.\n");
@@ -586,9 +586,18 @@ static int vidi_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
586 return 0; 586 return 0;
587} 587}
588 588
589
590static void vidi_unbind(struct device *dev, struct device *master, void *data)
591{
592}
593
594static const struct component_ops vidi_component_ops = {
595 .bind = vidi_bind,
596 .unbind = vidi_unbind,
597};
598
589static int vidi_probe(struct platform_device *pdev) 599static int vidi_probe(struct platform_device *pdev)
590{ 600{
591 struct exynos_drm_subdrv *subdrv;
592 struct vidi_context *ctx; 601 struct vidi_context *ctx;
593 int ret; 602 int ret;
594 603
@@ -596,40 +605,54 @@ static int vidi_probe(struct platform_device *pdev)
596 if (!ctx) 605 if (!ctx)
597 return -ENOMEM; 606 return -ENOMEM;
598 607
608 ctx->manager.type = EXYNOS_DISPLAY_TYPE_VIDI;
609 ctx->manager.ops = &vidi_manager_ops;
610 ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI;
611 ctx->display.ops = &vidi_display_ops;
599 ctx->default_win = 0; 612 ctx->default_win = 0;
613 ctx->pdev = pdev;
600 614
601 INIT_WORK(&ctx->work, vidi_fake_vblank_handler); 615 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
602 616 ctx->manager.type);
603 vidi_manager.ctx = ctx; 617 if (ret)
604 vidi_display.ctx = ctx; 618 return ret;
605 619
606 mutex_init(&ctx->lock); 620 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
621 ctx->display.type);
622 if (ret)
623 goto err_del_crtc_component;
607 624
608 platform_set_drvdata(pdev, &vidi_manager); 625 INIT_WORK(&ctx->work, vidi_fake_vblank_handler);
609 626
610 subdrv = &ctx->subdrv; 627 mutex_init(&ctx->lock);
611 subdrv->dev = &pdev->dev;
612 subdrv->probe = vidi_subdrv_probe;
613 628
614 ret = exynos_drm_subdrv_register(subdrv); 629 platform_set_drvdata(pdev, ctx);
615 if (ret < 0) {
616 dev_err(&pdev->dev, "failed to register drm vidi device\n");
617 return ret;
618 }
619 630
620 ret = device_create_file(&pdev->dev, &dev_attr_connection); 631 ret = device_create_file(&pdev->dev, &dev_attr_connection);
621 if (ret < 0) { 632 if (ret < 0) {
622 exynos_drm_subdrv_unregister(subdrv); 633 DRM_ERROR("failed to create connection sysfs.\n");
623 DRM_INFO("failed to create connection sysfs.\n"); 634 goto err_del_conn_component;
624 } 635 }
625 636
626 return 0; 637 ret = component_add(&pdev->dev, &vidi_component_ops);
638 if (ret)
639 goto err_remove_file;
640
641 return ret;
642
643err_remove_file:
644 device_remove_file(&pdev->dev, &dev_attr_connection);
645err_del_conn_component:
646 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
647err_del_crtc_component:
648 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
649
650 return ret;
627} 651}
628 652
629static int vidi_remove(struct platform_device *pdev) 653static int vidi_remove(struct platform_device *pdev)
630{ 654{
631 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); 655 struct vidi_context *ctx = platform_get_drvdata(pdev);
632 struct vidi_context *ctx = mgr->ctx;
633 656
634 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 657 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
635 kfree(ctx->raw_edid); 658 kfree(ctx->raw_edid);
@@ -638,6 +661,10 @@ static int vidi_remove(struct platform_device *pdev)
638 return -EINVAL; 661 return -EINVAL;
639 } 662 }
640 663
664 component_del(&pdev->dev, &vidi_component_ops);
665 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR);
666 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
667
641 return 0; 668 return 0;
642} 669}
643 670
@@ -668,12 +695,19 @@ int exynos_drm_probe_vidi(void)
668 return ret; 695 return ret;
669} 696}
670 697
698static int exynos_drm_remove_vidi_device(struct device *dev, void *data)
699{
700 platform_device_unregister(to_platform_device(dev));
701
702 return 0;
703}
704
671void exynos_drm_remove_vidi(void) 705void exynos_drm_remove_vidi(void)
672{ 706{
673 struct vidi_context *ctx = vidi_manager.ctx; 707 int ret = driver_for_each_device(&vidi_driver.driver, NULL, NULL,
674 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 708 exynos_drm_remove_vidi_device);
675 struct platform_device *pdev = to_platform_device(subdrv->dev); 709 /* silence compiler warning */
710 (void)ret;
676 711
677 platform_driver_unregister(&vidi_driver); 712 platform_driver_unregister(&vidi_driver);
678 platform_device_unregister(pdev);
679} 713}
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 563a19e62eb2..5765a161abdd 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -49,7 +49,6 @@
49#include <linux/gpio.h> 49#include <linux/gpio.h>
50#include <media/s5p_hdmi.h> 50#include <media/s5p_hdmi.h>
51 51
52#define get_hdmi_display(dev) platform_get_drvdata(to_platform_device(dev))
53#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector) 52#define ctx_from_connector(c) container_of(c, struct hdmi_context, connector)
54 53
55#define HOTPLUG_DEBOUNCE_MS 1100 54#define HOTPLUG_DEBOUNCE_MS 1100
@@ -182,6 +181,7 @@ struct hdmi_conf_regs {
182}; 181};
183 182
184struct hdmi_context { 183struct hdmi_context {
184 struct exynos_drm_display display;
185 struct device *dev; 185 struct device *dev;
186 struct drm_device *drm_dev; 186 struct drm_device *drm_dev;
187 struct drm_connector connector; 187 struct drm_connector connector;
@@ -213,6 +213,11 @@ struct hdmi_context {
213 enum hdmi_type type; 213 enum hdmi_type type;
214}; 214};
215 215
216static inline struct hdmi_context *display_to_hdmi(struct exynos_drm_display *d)
217{
218 return container_of(d, struct hdmi_context, display);
219}
220
216struct hdmiphy_config { 221struct hdmiphy_config {
217 int pixel_clock; 222 int pixel_clock;
218 u8 conf[32]; 223 u8 conf[32];
@@ -1123,7 +1128,7 @@ static struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
1123static int hdmi_create_connector(struct exynos_drm_display *display, 1128static int hdmi_create_connector(struct exynos_drm_display *display,
1124 struct drm_encoder *encoder) 1129 struct drm_encoder *encoder)
1125{ 1130{
1126 struct hdmi_context *hdata = display->ctx; 1131 struct hdmi_context *hdata = display_to_hdmi(display);
1127 struct drm_connector *connector = &hdata->connector; 1132 struct drm_connector *connector = &hdata->connector;
1128 int ret; 1133 int ret;
1129 1134
@@ -2000,7 +2005,7 @@ static void hdmi_v14_mode_set(struct hdmi_context *hdata,
2000static void hdmi_mode_set(struct exynos_drm_display *display, 2005static void hdmi_mode_set(struct exynos_drm_display *display,
2001 struct drm_display_mode *mode) 2006 struct drm_display_mode *mode)
2002{ 2007{
2003 struct hdmi_context *hdata = display->ctx; 2008 struct hdmi_context *hdata = display_to_hdmi(display);
2004 struct drm_display_mode *m = mode; 2009 struct drm_display_mode *m = mode;
2005 2010
2006 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n", 2011 DRM_DEBUG_KMS("xres=%d, yres=%d, refresh=%d, intl=%s\n",
@@ -2019,7 +2024,7 @@ static void hdmi_mode_set(struct exynos_drm_display *display,
2019 2024
2020static void hdmi_commit(struct exynos_drm_display *display) 2025static void hdmi_commit(struct exynos_drm_display *display)
2021{ 2026{
2022 struct hdmi_context *hdata = display->ctx; 2027 struct hdmi_context *hdata = display_to_hdmi(display);
2023 2028
2024 mutex_lock(&hdata->hdmi_mutex); 2029 mutex_lock(&hdata->hdmi_mutex);
2025 if (!hdata->powered) { 2030 if (!hdata->powered) {
@@ -2033,7 +2038,7 @@ static void hdmi_commit(struct exynos_drm_display *display)
2033 2038
2034static void hdmi_poweron(struct exynos_drm_display *display) 2039static void hdmi_poweron(struct exynos_drm_display *display)
2035{ 2040{
2036 struct hdmi_context *hdata = display->ctx; 2041 struct hdmi_context *hdata = display_to_hdmi(display);
2037 struct hdmi_resources *res = &hdata->res; 2042 struct hdmi_resources *res = &hdata->res;
2038 2043
2039 mutex_lock(&hdata->hdmi_mutex); 2044 mutex_lock(&hdata->hdmi_mutex);
@@ -2064,7 +2069,7 @@ static void hdmi_poweron(struct exynos_drm_display *display)
2064 2069
2065static void hdmi_poweroff(struct exynos_drm_display *display) 2070static void hdmi_poweroff(struct exynos_drm_display *display)
2066{ 2071{
2067 struct hdmi_context *hdata = display->ctx; 2072 struct hdmi_context *hdata = display_to_hdmi(display);
2068 struct hdmi_resources *res = &hdata->res; 2073 struct hdmi_resources *res = &hdata->res;
2069 2074
2070 mutex_lock(&hdata->hdmi_mutex); 2075 mutex_lock(&hdata->hdmi_mutex);
@@ -2099,7 +2104,7 @@ out:
2099 2104
2100static void hdmi_dpms(struct exynos_drm_display *display, int mode) 2105static void hdmi_dpms(struct exynos_drm_display *display, int mode)
2101{ 2106{
2102 struct hdmi_context *hdata = display->ctx; 2107 struct hdmi_context *hdata = display_to_hdmi(display);
2103 struct drm_encoder *encoder = hdata->encoder; 2108 struct drm_encoder *encoder = hdata->encoder;
2104 struct drm_crtc *crtc = encoder->crtc; 2109 struct drm_crtc *crtc = encoder->crtc;
2105 struct drm_crtc_helper_funcs *funcs = NULL; 2110 struct drm_crtc_helper_funcs *funcs = NULL;
@@ -2143,11 +2148,6 @@ static struct exynos_drm_display_ops hdmi_display_ops = {
2143 .commit = hdmi_commit, 2148 .commit = hdmi_commit,
2144}; 2149};
2145 2150
2146static struct exynos_drm_display hdmi_display = {
2147 .type = EXYNOS_DISPLAY_TYPE_HDMI,
2148 .ops = &hdmi_display_ops,
2149};
2150
2151static void hdmi_hotplug_work_func(struct work_struct *work) 2151static void hdmi_hotplug_work_func(struct work_struct *work)
2152{ 2152{
2153 struct hdmi_context *hdata; 2153 struct hdmi_context *hdata;
@@ -2302,12 +2302,11 @@ MODULE_DEVICE_TABLE (of, hdmi_match_types);
2302static int hdmi_bind(struct device *dev, struct device *master, void *data) 2302static int hdmi_bind(struct device *dev, struct device *master, void *data)
2303{ 2303{
2304 struct drm_device *drm_dev = data; 2304 struct drm_device *drm_dev = data;
2305 struct hdmi_context *hdata; 2305 struct hdmi_context *hdata = dev_get_drvdata(dev);
2306 2306
2307 hdata = hdmi_display.ctx;
2308 hdata->drm_dev = drm_dev; 2307 hdata->drm_dev = drm_dev;
2309 2308
2310 return exynos_drm_create_enc_conn(drm_dev, &hdmi_display); 2309 return exynos_drm_create_enc_conn(drm_dev, &hdata->display);
2311} 2310}
2312 2311
2313static void hdmi_unbind(struct device *dev, struct device *master, void *data) 2312static void hdmi_unbind(struct device *dev, struct device *master, void *data)
@@ -2349,31 +2348,28 @@ static int hdmi_probe(struct platform_device *pdev)
2349 struct resource *res; 2348 struct resource *res;
2350 int ret; 2349 int ret;
2351 2350
2352 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR, 2351 if (!dev->of_node)
2353 hdmi_display.type); 2352 return -ENODEV;
2354 if (ret)
2355 return ret;
2356
2357 if (!dev->of_node) {
2358 ret = -ENODEV;
2359 goto err_del_component;
2360 }
2361 2353
2362 pdata = drm_hdmi_dt_parse_pdata(dev); 2354 pdata = drm_hdmi_dt_parse_pdata(dev);
2363 if (!pdata) { 2355 if (!pdata)
2364 ret = -EINVAL; 2356 return -EINVAL;
2365 goto err_del_component;
2366 }
2367 2357
2368 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL); 2358 hdata = devm_kzalloc(dev, sizeof(struct hdmi_context), GFP_KERNEL);
2369 if (!hdata) { 2359 if (!hdata)
2370 ret = -ENOMEM; 2360 return -ENOMEM;
2371 goto err_del_component; 2361
2372 } 2362 hdata->display.type = EXYNOS_DISPLAY_TYPE_HDMI;
2363 hdata->display.ops = &hdmi_display_ops;
2364
2365 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CONNECTOR,
2366 hdata->display.type);
2367 if (ret)
2368 return ret;
2373 2369
2374 mutex_init(&hdata->hdmi_mutex); 2370 mutex_init(&hdata->hdmi_mutex);
2375 2371
2376 platform_set_drvdata(pdev, &hdmi_display); 2372 platform_set_drvdata(pdev, hdata);
2377 2373
2378 match = of_match_node(hdmi_match_types, dev->of_node); 2374 match = of_match_node(hdmi_match_types, dev->of_node);
2379 if (!match) { 2375 if (!match) {
@@ -2485,7 +2481,6 @@ out_get_phy_port:
2485 } 2481 }
2486 2482
2487 pm_runtime_enable(dev); 2483 pm_runtime_enable(dev);
2488 hdmi_display.ctx = hdata;
2489 2484
2490 ret = component_add(&pdev->dev, &hdmi_component_ops); 2485 ret = component_add(&pdev->dev, &hdmi_component_ops);
2491 if (ret) 2486 if (ret)
@@ -2510,7 +2505,7 @@ err_del_component:
2510 2505
2511static int hdmi_remove(struct platform_device *pdev) 2506static int hdmi_remove(struct platform_device *pdev)
2512{ 2507{
2513 struct hdmi_context *hdata = hdmi_display.ctx; 2508 struct hdmi_context *hdata = platform_get_drvdata(pdev);
2514 2509
2515 cancel_delayed_work_sync(&hdata->hotplug_work); 2510 cancel_delayed_work_sync(&hdata->hotplug_work);
2516 2511
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index a41c84ee3a2d..820b76234ef4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -40,8 +40,6 @@
40#include "exynos_drm_iommu.h" 40#include "exynos_drm_iommu.h"
41#include "exynos_mixer.h" 41#include "exynos_mixer.h"
42 42
43#define get_mixer_manager(dev) platform_get_drvdata(to_platform_device(dev))
44
45#define MIXER_WIN_NR 3 43#define MIXER_WIN_NR 3
46#define MIXER_DEFAULT_WIN 0 44#define MIXER_DEFAULT_WIN 0
47 45
@@ -86,6 +84,7 @@ enum mixer_version_id {
86}; 84};
87 85
88struct mixer_context { 86struct mixer_context {
87 struct exynos_drm_manager manager;
89 struct platform_device *pdev; 88 struct platform_device *pdev;
90 struct device *dev; 89 struct device *dev;
91 struct drm_device *drm_dev; 90 struct drm_device *drm_dev;
@@ -104,6 +103,11 @@ struct mixer_context {
104 atomic_t wait_vsync_event; 103 atomic_t wait_vsync_event;
105}; 104};
106 105
106static inline struct mixer_context *mgr_to_mixer(struct exynos_drm_manager *mgr)
107{
108 return container_of(mgr, struct mixer_context, manager);
109}
110
107struct mixer_drv_data { 111struct mixer_drv_data {
108 enum mixer_version_id version; 112 enum mixer_version_id version;
109 bool is_vp_enabled; 113 bool is_vp_enabled;
@@ -854,7 +858,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
854 struct drm_device *drm_dev) 858 struct drm_device *drm_dev)
855{ 859{
856 int ret; 860 int ret;
857 struct mixer_context *mixer_ctx = mgr->ctx; 861 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
858 struct exynos_drm_private *priv; 862 struct exynos_drm_private *priv;
859 priv = drm_dev->dev_private; 863 priv = drm_dev->dev_private;
860 864
@@ -885,7 +889,7 @@ static int mixer_initialize(struct exynos_drm_manager *mgr,
885 889
886static void mixer_mgr_remove(struct exynos_drm_manager *mgr) 890static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
887{ 891{
888 struct mixer_context *mixer_ctx = mgr->ctx; 892 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
889 893
890 if (is_drm_iommu_supported(mixer_ctx->drm_dev)) 894 if (is_drm_iommu_supported(mixer_ctx->drm_dev))
891 drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev); 895 drm_iommu_detach_device(mixer_ctx->drm_dev, mixer_ctx->dev);
@@ -893,7 +897,7 @@ static void mixer_mgr_remove(struct exynos_drm_manager *mgr)
893 897
894static int mixer_enable_vblank(struct exynos_drm_manager *mgr) 898static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
895{ 899{
896 struct mixer_context *mixer_ctx = mgr->ctx; 900 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
897 struct mixer_resources *res = &mixer_ctx->mixer_res; 901 struct mixer_resources *res = &mixer_ctx->mixer_res;
898 902
899 if (!mixer_ctx->powered) { 903 if (!mixer_ctx->powered) {
@@ -910,7 +914,7 @@ static int mixer_enable_vblank(struct exynos_drm_manager *mgr)
910 914
911static void mixer_disable_vblank(struct exynos_drm_manager *mgr) 915static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
912{ 916{
913 struct mixer_context *mixer_ctx = mgr->ctx; 917 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
914 struct mixer_resources *res = &mixer_ctx->mixer_res; 918 struct mixer_resources *res = &mixer_ctx->mixer_res;
915 919
916 /* disable vsync interrupt */ 920 /* disable vsync interrupt */
@@ -920,7 +924,7 @@ static void mixer_disable_vblank(struct exynos_drm_manager *mgr)
920static void mixer_win_mode_set(struct exynos_drm_manager *mgr, 924static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
921 struct exynos_drm_overlay *overlay) 925 struct exynos_drm_overlay *overlay)
922{ 926{
923 struct mixer_context *mixer_ctx = mgr->ctx; 927 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
924 struct hdmi_win_data *win_data; 928 struct hdmi_win_data *win_data;
925 int win; 929 int win;
926 930
@@ -971,7 +975,7 @@ static void mixer_win_mode_set(struct exynos_drm_manager *mgr,
971 975
972static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos) 976static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
973{ 977{
974 struct mixer_context *mixer_ctx = mgr->ctx; 978 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
975 int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos; 979 int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
976 980
977 DRM_DEBUG_KMS("win: %d\n", win); 981 DRM_DEBUG_KMS("win: %d\n", win);
@@ -993,7 +997,7 @@ static void mixer_win_commit(struct exynos_drm_manager *mgr, int zpos)
993 997
994static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos) 998static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
995{ 999{
996 struct mixer_context *mixer_ctx = mgr->ctx; 1000 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
997 struct mixer_resources *res = &mixer_ctx->mixer_res; 1001 struct mixer_resources *res = &mixer_ctx->mixer_res;
998 int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos; 1002 int win = zpos == DEFAULT_ZPOS ? MIXER_DEFAULT_WIN : zpos;
999 unsigned long flags; 1003 unsigned long flags;
@@ -1021,7 +1025,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
1021 1025
1022static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) 1026static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
1023{ 1027{
1024 struct mixer_context *mixer_ctx = mgr->ctx; 1028 struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
1025 1029
1026 mutex_lock(&mixer_ctx->mixer_mutex); 1030 mutex_lock(&mixer_ctx->mixer_mutex);
1027 if (!mixer_ctx->powered) { 1031 if (!mixer_ctx->powered) {
@@ -1048,7 +1052,7 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
1048 1052
1049static void mixer_window_suspend(struct exynos_drm_manager *mgr) 1053static void mixer_window_suspend(struct exynos_drm_manager *mgr)
1050{ 1054{
1051 struct mixer_context *ctx = mgr->ctx; 1055 struct mixer_context *ctx = mgr_to_mixer(mgr);
1052 struct hdmi_win_data *win_data; 1056 struct hdmi_win_data *win_data;
1053 int i; 1057 int i;
1054 1058
@@ -1062,7 +1066,7 @@ static void mixer_window_suspend(struct exynos_drm_manager *mgr)
1062 1066
1063static void mixer_window_resume(struct exynos_drm_manager *mgr) 1067static void mixer_window_resume(struct exynos_drm_manager *mgr)
1064{ 1068{
1065 struct mixer_context *ctx = mgr->ctx; 1069 struct mixer_context *ctx = mgr_to_mixer(mgr);
1066 struct hdmi_win_data *win_data; 1070 struct hdmi_win_data *win_data;
1067 int i; 1071 int i;
1068 1072
@@ -1077,7 +1081,7 @@ static void mixer_window_resume(struct exynos_drm_manager *mgr)
1077 1081
1078static void mixer_poweron(struct exynos_drm_manager *mgr) 1082static void mixer_poweron(struct exynos_drm_manager *mgr)
1079{ 1083{
1080 struct mixer_context *ctx = mgr->ctx; 1084 struct mixer_context *ctx = mgr_to_mixer(mgr);
1081 struct mixer_resources *res = &ctx->mixer_res; 1085 struct mixer_resources *res = &ctx->mixer_res;
1082 1086
1083 mutex_lock(&ctx->mixer_mutex); 1087 mutex_lock(&ctx->mixer_mutex);
@@ -1111,7 +1115,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
1111 1115
1112static void mixer_poweroff(struct exynos_drm_manager *mgr) 1116static void mixer_poweroff(struct exynos_drm_manager *mgr)
1113{ 1117{
1114 struct mixer_context *ctx = mgr->ctx; 1118 struct mixer_context *ctx = mgr_to_mixer(mgr);
1115 struct mixer_resources *res = &ctx->mixer_res; 1119 struct mixer_resources *res = &ctx->mixer_res;
1116 1120
1117 mutex_lock(&ctx->mixer_mutex); 1121 mutex_lock(&ctx->mixer_mutex);
@@ -1187,11 +1191,6 @@ static struct exynos_drm_manager_ops mixer_manager_ops = {
1187 .win_disable = mixer_win_disable, 1191 .win_disable = mixer_win_disable,
1188}; 1192};
1189 1193
1190static struct exynos_drm_manager mixer_manager = {
1191 .type = EXYNOS_DISPLAY_TYPE_HDMI,
1192 .ops = &mixer_manager_ops,
1193};
1194
1195static struct mixer_drv_data exynos5420_mxr_drv_data = { 1194static struct mixer_drv_data exynos5420_mxr_drv_data = {
1196 .version = MXR_VER_128_0_0_184, 1195 .version = MXR_VER_128_0_0_184,
1197 .is_vp_enabled = 0, 1196 .is_vp_enabled = 0,
@@ -1249,48 +1248,17 @@ MODULE_DEVICE_TABLE(of, mixer_match_types);
1249 1248
1250static int mixer_bind(struct device *dev, struct device *manager, void *data) 1249static int mixer_bind(struct device *dev, struct device *manager, void *data)
1251{ 1250{
1252 struct platform_device *pdev = to_platform_device(dev); 1251 struct mixer_context *ctx = dev_get_drvdata(dev);
1253 struct drm_device *drm_dev = data; 1252 struct drm_device *drm_dev = data;
1254 struct mixer_context *ctx;
1255 struct mixer_drv_data *drv;
1256 int ret; 1253 int ret;
1257 1254
1258 dev_info(dev, "probe start\n"); 1255 ret = mixer_initialize(&ctx->manager, drm_dev);
1259
1260 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
1261 if (!ctx) {
1262 DRM_ERROR("failed to alloc mixer context.\n");
1263 return -ENOMEM;
1264 }
1265
1266 mutex_init(&ctx->mixer_mutex);
1267
1268 if (dev->of_node) {
1269 const struct of_device_id *match;
1270 match = of_match_node(mixer_match_types, dev->of_node);
1271 drv = (struct mixer_drv_data *)match->data;
1272 } else {
1273 drv = (struct mixer_drv_data *)
1274 platform_get_device_id(pdev)->driver_data;
1275 }
1276
1277 ctx->pdev = pdev;
1278 ctx->dev = dev;
1279 ctx->vp_enabled = drv->is_vp_enabled;
1280 ctx->has_sclk = drv->has_sclk;
1281 ctx->mxr_ver = drv->version;
1282 init_waitqueue_head(&ctx->wait_vsync_queue);
1283 atomic_set(&ctx->wait_vsync_event, 0);
1284
1285 mixer_manager.ctx = ctx;
1286 ret = mixer_initialize(&mixer_manager, drm_dev);
1287 if (ret) 1256 if (ret)
1288 return ret; 1257 return ret;
1289 1258
1290 platform_set_drvdata(pdev, &mixer_manager); 1259 ret = exynos_drm_crtc_create(&ctx->manager);
1291 ret = exynos_drm_crtc_create(&mixer_manager);
1292 if (ret) { 1260 if (ret) {
1293 mixer_mgr_remove(&mixer_manager); 1261 mixer_mgr_remove(&ctx->manager);
1294 return ret; 1262 return ret;
1295 } 1263 }
1296 1264
@@ -1301,11 +1269,9 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
1301 1269
1302static void mixer_unbind(struct device *dev, struct device *master, void *data) 1270static void mixer_unbind(struct device *dev, struct device *master, void *data)
1303{ 1271{
1304 struct exynos_drm_manager *mgr = dev_get_drvdata(dev); 1272 struct mixer_context *ctx = dev_get_drvdata(dev);
1305 1273
1306 dev_info(dev, "remove successful\n"); 1274 mixer_mgr_remove(&ctx->manager);
1307
1308 mixer_mgr_remove(mgr);
1309 1275
1310 pm_runtime_disable(dev); 1276 pm_runtime_disable(dev);
1311} 1277}
@@ -1317,22 +1283,62 @@ static const struct component_ops mixer_component_ops = {
1317 1283
1318static int mixer_probe(struct platform_device *pdev) 1284static int mixer_probe(struct platform_device *pdev)
1319{ 1285{
1286 struct device *dev = &pdev->dev;
1287 struct mixer_drv_data *drv;
1288 struct mixer_context *ctx;
1320 int ret; 1289 int ret;
1321 1290
1291 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
1292 if (!ctx) {
1293 DRM_ERROR("failed to alloc mixer context.\n");
1294 return -ENOMEM;
1295 }
1296
1297 mutex_init(&ctx->mixer_mutex);
1298
1299 ctx->manager.type = EXYNOS_DISPLAY_TYPE_HDMI;
1300 ctx->manager.ops = &mixer_manager_ops;
1301
1302 if (dev->of_node) {
1303 const struct of_device_id *match;
1304
1305 match = of_match_node(mixer_match_types, dev->of_node);
1306 drv = (struct mixer_drv_data *)match->data;
1307 } else {
1308 drv = (struct mixer_drv_data *)
1309 platform_get_device_id(pdev)->driver_data;
1310 }
1311
1312 ctx->pdev = pdev;
1313 ctx->dev = dev;
1314 ctx->vp_enabled = drv->is_vp_enabled;
1315 ctx->has_sclk = drv->has_sclk;
1316 ctx->mxr_ver = drv->version;
1317 init_waitqueue_head(&ctx->wait_vsync_queue);
1318 atomic_set(&ctx->wait_vsync_event, 0);
1319
1320 platform_set_drvdata(pdev, ctx);
1321
1322 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC, 1322 ret = exynos_drm_component_add(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC,
1323 mixer_manager.type); 1323 ctx->manager.type);
1324 if (ret) 1324 if (ret)
1325 return ret; 1325 return ret;
1326 1326
1327 ret = component_add(&pdev->dev, &mixer_component_ops); 1327 ret = component_add(&pdev->dev, &mixer_component_ops);
1328 if (ret) 1328 if (ret) {
1329 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); 1329 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1330 return ret;
1331 }
1332
1333 pm_runtime_enable(dev);
1330 1334
1331 return ret; 1335 return ret;
1332} 1336}
1333 1337
1334static int mixer_remove(struct platform_device *pdev) 1338static int mixer_remove(struct platform_device *pdev)
1335{ 1339{
1340 pm_runtime_disable(&pdev->dev);
1341
1336 component_del(&pdev->dev, &mixer_component_ops); 1342 component_del(&pdev->dev, &mixer_component_ops);
1337 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC); 1343 exynos_drm_component_del(&pdev->dev, EXYNOS_DEVICE_TYPE_CRTC);
1338 1344
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
index b15315576376..190e55f2f891 100644
--- a/drivers/gpu/drm/gma500/Makefile
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -39,6 +39,7 @@ gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
39gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \ 39gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
40 oaktrail_crtc.o \ 40 oaktrail_crtc.o \
41 oaktrail_lvds.o \ 41 oaktrail_lvds.o \
42 oaktrail_lvds_i2c.o \
42 oaktrail_hdmi.o \ 43 oaktrail_hdmi.o \
43 oaktrail_hdmi_i2c.o 44 oaktrail_hdmi_i2c.o
44 45
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 9f158eab517a..0fafb8e2483a 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -37,6 +37,201 @@
37#include "gma_display.h" 37#include "gma_display.h"
38#include <drm/drm_dp_helper.h> 38#include <drm/drm_dp_helper.h>
39 39
40/**
41 * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
42 * aux algorithm
43 * @running: set by the algo indicating whether an i2c is ongoing or whether
44 * the i2c bus is quiescent
45 * @address: i2c target address for the currently ongoing transfer
46 * @aux_ch: driver callback to transfer a single byte of the i2c payload
47 */
48struct i2c_algo_dp_aux_data {
49 bool running;
50 u16 address;
51 int (*aux_ch) (struct i2c_adapter *adapter,
52 int mode, uint8_t write_byte,
53 uint8_t *read_byte);
54};
55
56/* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
57static int
58i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
59 uint8_t write_byte, uint8_t *read_byte)
60{
61 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
62 int ret;
63
64 ret = (*algo_data->aux_ch)(adapter, mode,
65 write_byte, read_byte);
66 return ret;
67}
68
69/*
70 * I2C over AUX CH
71 */
72
73/*
74 * Send the address. If the I2C link is running, this 'restarts'
75 * the connection with the new address, this is used for doing
76 * a write followed by a read (as needed for DDC)
77 */
78static int
79i2c_algo_dp_aux_address(struct i2c_adapter *adapter, u16 address, bool reading)
80{
81 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
82 int mode = MODE_I2C_START;
83 int ret;
84
85 if (reading)
86 mode |= MODE_I2C_READ;
87 else
88 mode |= MODE_I2C_WRITE;
89 algo_data->address = address;
90 algo_data->running = true;
91 ret = i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
92 return ret;
93}
94
95/*
96 * Stop the I2C transaction. This closes out the link, sending
97 * a bare address packet with the MOT bit turned off
98 */
99static void
100i2c_algo_dp_aux_stop(struct i2c_adapter *adapter, bool reading)
101{
102 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
103 int mode = MODE_I2C_STOP;
104
105 if (reading)
106 mode |= MODE_I2C_READ;
107 else
108 mode |= MODE_I2C_WRITE;
109 if (algo_data->running) {
110 (void) i2c_algo_dp_aux_transaction(adapter, mode, 0, NULL);
111 algo_data->running = false;
112 }
113}
114
115/*
116 * Write a single byte to the current I2C address, the
117 * the I2C link must be running or this returns -EIO
118 */
119static int
120i2c_algo_dp_aux_put_byte(struct i2c_adapter *adapter, u8 byte)
121{
122 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
123 int ret;
124
125 if (!algo_data->running)
126 return -EIO;
127
128 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_WRITE, byte, NULL);
129 return ret;
130}
131
132/*
133 * Read a single byte from the current I2C address, the
134 * I2C link must be running or this returns -EIO
135 */
136static int
137i2c_algo_dp_aux_get_byte(struct i2c_adapter *adapter, u8 *byte_ret)
138{
139 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
140 int ret;
141
142 if (!algo_data->running)
143 return -EIO;
144
145 ret = i2c_algo_dp_aux_transaction(adapter, MODE_I2C_READ, 0, byte_ret);
146 return ret;
147}
148
149static int
150i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
151 struct i2c_msg *msgs,
152 int num)
153{
154 int ret = 0;
155 bool reading = false;
156 int m;
157 int b;
158
159 for (m = 0; m < num; m++) {
160 u16 len = msgs[m].len;
161 u8 *buf = msgs[m].buf;
162 reading = (msgs[m].flags & I2C_M_RD) != 0;
163 ret = i2c_algo_dp_aux_address(adapter, msgs[m].addr, reading);
164 if (ret < 0)
165 break;
166 if (reading) {
167 for (b = 0; b < len; b++) {
168 ret = i2c_algo_dp_aux_get_byte(adapter, &buf[b]);
169 if (ret < 0)
170 break;
171 }
172 } else {
173 for (b = 0; b < len; b++) {
174 ret = i2c_algo_dp_aux_put_byte(adapter, buf[b]);
175 if (ret < 0)
176 break;
177 }
178 }
179 if (ret < 0)
180 break;
181 }
182 if (ret >= 0)
183 ret = num;
184 i2c_algo_dp_aux_stop(adapter, reading);
185 DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
186 return ret;
187}
188
189static u32
190i2c_algo_dp_aux_functionality(struct i2c_adapter *adapter)
191{
192 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
193 I2C_FUNC_SMBUS_READ_BLOCK_DATA |
194 I2C_FUNC_SMBUS_BLOCK_PROC_CALL |
195 I2C_FUNC_10BIT_ADDR;
196}
197
198static const struct i2c_algorithm i2c_dp_aux_algo = {
199 .master_xfer = i2c_algo_dp_aux_xfer,
200 .functionality = i2c_algo_dp_aux_functionality,
201};
202
203static void
204i2c_dp_aux_reset_bus(struct i2c_adapter *adapter)
205{
206 (void) i2c_algo_dp_aux_address(adapter, 0, false);
207 (void) i2c_algo_dp_aux_stop(adapter, false);
208}
209
210static int
211i2c_dp_aux_prepare_bus(struct i2c_adapter *adapter)
212{
213 adapter->algo = &i2c_dp_aux_algo;
214 adapter->retries = 3;
215 i2c_dp_aux_reset_bus(adapter);
216 return 0;
217}
218
219/*
220 * FIXME: This is the old dp aux helper, gma500 is the last driver that needs to
221 * be ported over to the new helper code in drm_dp_helper.c like i915 or radeon.
222 */
223static int __deprecated
224i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
225{
226 int error;
227
228 error = i2c_dp_aux_prepare_bus(adapter);
229 if (error)
230 return error;
231 error = i2c_add_adapter(adapter);
232 return error;
233}
234
40#define _wait_for(COND, MS, W) ({ \ 235#define _wait_for(COND, MS, W) ({ \
41 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ 236 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
42 int ret__ = 0; \ 237 int ret__ = 0; \
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
index 87885d8c06e8..6b43ae3ffd73 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.c
@@ -25,6 +25,7 @@
25 */ 25 */
26 26
27#include <linux/freezer.h> 27#include <linux/freezer.h>
28#include <video/mipi_display.h>
28 29
29#include "mdfld_dsi_output.h" 30#include "mdfld_dsi_output.h"
30#include "mdfld_dsi_pkg_sender.h" 31#include "mdfld_dsi_pkg_sender.h"
@@ -32,20 +33,6 @@
32 33
33#define MDFLD_DSI_READ_MAX_COUNT 5000 34#define MDFLD_DSI_READ_MAX_COUNT 5000
34 35
35enum data_type {
36 DSI_DT_GENERIC_SHORT_WRITE_0 = 0x03,
37 DSI_DT_GENERIC_SHORT_WRITE_1 = 0x13,
38 DSI_DT_GENERIC_SHORT_WRITE_2 = 0x23,
39 DSI_DT_GENERIC_READ_0 = 0x04,
40 DSI_DT_GENERIC_READ_1 = 0x14,
41 DSI_DT_GENERIC_READ_2 = 0x24,
42 DSI_DT_GENERIC_LONG_WRITE = 0x29,
43 DSI_DT_DCS_SHORT_WRITE_0 = 0x05,
44 DSI_DT_DCS_SHORT_WRITE_1 = 0x15,
45 DSI_DT_DCS_READ = 0x06,
46 DSI_DT_DCS_LONG_WRITE = 0x39,
47};
48
49enum { 36enum {
50 MDFLD_DSI_PANEL_MODE_SLEEP = 0x1, 37 MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
51}; 38};
@@ -321,9 +308,9 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
321 u8 cmd; 308 u8 cmd;
322 309
323 switch (data_type) { 310 switch (data_type) {
324 case DSI_DT_DCS_SHORT_WRITE_0: 311 case MIPI_DSI_DCS_SHORT_WRITE:
325 case DSI_DT_DCS_SHORT_WRITE_1: 312 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
326 case DSI_DT_DCS_LONG_WRITE: 313 case MIPI_DSI_DCS_LONG_WRITE:
327 cmd = *data; 314 cmd = *data;
328 break; 315 break;
329 default: 316 default:
@@ -334,12 +321,12 @@ static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
334 sender->status = MDFLD_DSI_PKG_SENDER_BUSY; 321 sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
335 322
336 /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/ 323 /*wait for 120 milliseconds in case exit_sleep_mode just be sent*/
337 if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) { 324 if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
338 /*TODO: replace it with msleep later*/ 325 /*TODO: replace it with msleep later*/
339 mdelay(120); 326 mdelay(120);
340 } 327 }
341 328
342 if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) { 329 if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
343 /*TODO: replace it with msleep later*/ 330 /*TODO: replace it with msleep later*/
344 mdelay(120); 331 mdelay(120);
345 } 332 }
@@ -352,9 +339,9 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
352 u8 cmd; 339 u8 cmd;
353 340
354 switch (data_type) { 341 switch (data_type) {
355 case DSI_DT_DCS_SHORT_WRITE_0: 342 case MIPI_DSI_DCS_SHORT_WRITE:
356 case DSI_DT_DCS_SHORT_WRITE_1: 343 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
357 case DSI_DT_DCS_LONG_WRITE: 344 case MIPI_DSI_DCS_LONG_WRITE:
358 cmd = *data; 345 cmd = *data;
359 break; 346 break;
360 default: 347 default:
@@ -362,15 +349,15 @@ static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
362 } 349 }
363 350
364 /*update panel status*/ 351 /*update panel status*/
365 if (unlikely(cmd == DCS_ENTER_SLEEP_MODE)) { 352 if (unlikely(cmd == MIPI_DCS_ENTER_SLEEP_MODE)) {
366 sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP; 353 sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
367 /*TODO: replace it with msleep later*/ 354 /*TODO: replace it with msleep later*/
368 mdelay(120); 355 mdelay(120);
369 } else if (unlikely(cmd == DCS_EXIT_SLEEP_MODE)) { 356 } else if (unlikely(cmd == MIPI_DCS_EXIT_SLEEP_MODE)) {
370 sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP; 357 sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
371 /*TODO: replace it with msleep later*/ 358 /*TODO: replace it with msleep later*/
372 mdelay(120); 359 mdelay(120);
373 } else if (unlikely(cmd == DCS_SOFT_RESET)) { 360 } else if (unlikely(cmd == MIPI_DCS_SOFT_RESET)) {
374 /*TODO: replace it with msleep later*/ 361 /*TODO: replace it with msleep later*/
375 mdelay(5); 362 mdelay(5);
376 } 363 }
@@ -405,19 +392,19 @@ static int send_pkg(struct mdfld_dsi_pkg_sender *sender, u8 data_type,
405 } 392 }
406 393
407 switch (data_type) { 394 switch (data_type) {
408 case DSI_DT_GENERIC_SHORT_WRITE_0: 395 case MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM:
409 case DSI_DT_GENERIC_SHORT_WRITE_1: 396 case MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM:
410 case DSI_DT_GENERIC_SHORT_WRITE_2: 397 case MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM:
411 case DSI_DT_GENERIC_READ_0: 398 case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
412 case DSI_DT_GENERIC_READ_1: 399 case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
413 case DSI_DT_GENERIC_READ_2: 400 case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
414 case DSI_DT_DCS_SHORT_WRITE_0: 401 case MIPI_DSI_DCS_SHORT_WRITE:
415 case DSI_DT_DCS_SHORT_WRITE_1: 402 case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
416 case DSI_DT_DCS_READ: 403 case MIPI_DSI_DCS_READ:
417 ret = send_short_pkg(sender, data_type, data[0], data[1], hs); 404 ret = send_short_pkg(sender, data_type, data[0], data[1], hs);
418 break; 405 break;
419 case DSI_DT_GENERIC_LONG_WRITE: 406 case MIPI_DSI_GENERIC_LONG_WRITE:
420 case DSI_DT_DCS_LONG_WRITE: 407 case MIPI_DSI_DCS_LONG_WRITE:
421 ret = send_long_pkg(sender, data_type, data, len, hs); 408 ret = send_long_pkg(sender, data_type, data, len, hs);
422 break; 409 break;
423 } 410 }
@@ -440,7 +427,7 @@ int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
440 } 427 }
441 428
442 spin_lock_irqsave(&sender->lock, flags); 429 spin_lock_irqsave(&sender->lock, flags);
443 send_pkg(sender, DSI_DT_DCS_LONG_WRITE, data, len, hs); 430 send_pkg(sender, MIPI_DSI_DCS_LONG_WRITE, data, len, hs);
444 spin_unlock_irqrestore(&sender->lock, flags); 431 spin_unlock_irqrestore(&sender->lock, flags);
445 432
446 return 0; 433 return 0;
@@ -461,10 +448,10 @@ int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
461 data[0] = cmd; 448 data[0] = cmd;
462 449
463 if (param_num) { 450 if (param_num) {
464 data_type = DSI_DT_DCS_SHORT_WRITE_1; 451 data_type = MIPI_DSI_DCS_SHORT_WRITE_PARAM;
465 data[1] = param; 452 data[1] = param;
466 } else { 453 } else {
467 data_type = DSI_DT_DCS_SHORT_WRITE_0; 454 data_type = MIPI_DSI_DCS_SHORT_WRITE;
468 data[1] = 0; 455 data[1] = 0;
469 } 456 }
470 457
@@ -489,17 +476,17 @@ int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender, u8 param0,
489 476
490 switch (param_num) { 477 switch (param_num) {
491 case 0: 478 case 0:
492 data_type = DSI_DT_GENERIC_SHORT_WRITE_0; 479 data_type = MIPI_DSI_GENERIC_SHORT_WRITE_0_PARAM;
493 data[0] = 0; 480 data[0] = 0;
494 data[1] = 0; 481 data[1] = 0;
495 break; 482 break;
496 case 1: 483 case 1:
497 data_type = DSI_DT_GENERIC_SHORT_WRITE_1; 484 data_type = MIPI_DSI_GENERIC_SHORT_WRITE_1_PARAM;
498 data[0] = param0; 485 data[0] = param0;
499 data[1] = 0; 486 data[1] = 0;
500 break; 487 break;
501 case 2: 488 case 2:
502 data_type = DSI_DT_GENERIC_SHORT_WRITE_2; 489 data_type = MIPI_DSI_GENERIC_SHORT_WRITE_2_PARAM;
503 data[0] = param0; 490 data[0] = param0;
504 data[1] = param1; 491 data[1] = param1;
505 break; 492 break;
@@ -523,7 +510,7 @@ int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender, u8 *data,
523 } 510 }
524 511
525 spin_lock_irqsave(&sender->lock, flags); 512 spin_lock_irqsave(&sender->lock, flags);
526 send_pkg(sender, DSI_DT_GENERIC_LONG_WRITE, data, len, hs); 513 send_pkg(sender, MIPI_DSI_GENERIC_LONG_WRITE, data, len, hs);
527 spin_unlock_irqrestore(&sender->lock, flags); 514 spin_unlock_irqrestore(&sender->lock, flags);
528 515
529 return 0; 516 return 0;
@@ -594,7 +581,7 @@ int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender, u8 cmd,
594 return -EINVAL; 581 return -EINVAL;
595 } 582 }
596 583
597 return __read_panel_data(sender, DSI_DT_DCS_READ, &cmd, 1, 584 return __read_panel_data(sender, MIPI_DSI_DCS_READ, &cmd, 1,
598 data, len, hs); 585 data, len, hs);
599} 586}
600 587
diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
index 459cd7ea8b81..0478a21c15d5 100644
--- a/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
+++ b/drivers/gpu/drm/gma500/mdfld_dsi_pkg_sender.h
@@ -62,18 +62,6 @@ struct mdfld_dsi_pkg_sender {
62 u32 mipi_cmd_len_reg; 62 u32 mipi_cmd_len_reg;
63}; 63};
64 64
65/* DCS definitions */
66#define DCS_SOFT_RESET 0x01
67#define DCS_ENTER_SLEEP_MODE 0x10
68#define DCS_EXIT_SLEEP_MODE 0x11
69#define DCS_SET_DISPLAY_OFF 0x28
70#define DCS_SET_DISPLAY_ON 0x29
71#define DCS_SET_COLUMN_ADDRESS 0x2a
72#define DCS_SET_PAGE_ADDRESS 0x2b
73#define DCS_WRITE_MEM_START 0x2c
74#define DCS_SET_TEAR_OFF 0x34
75#define DCS_SET_TEAR_ON 0x35
76
77extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector, 65extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
78 int pipe); 66 int pipe);
79extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender); 67extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index 0d39da6e8b7a..83bbc271bcfb 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -359,22 +359,26 @@ void oaktrail_lvds_init(struct drm_device *dev,
359 * if closed, act like it's not there for now 359 * if closed, act like it's not there for now
360 */ 360 */
361 361
362 edid = NULL;
362 mutex_lock(&dev->mode_config.mutex); 363 mutex_lock(&dev->mode_config.mutex);
363 i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus); 364 i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
364 if (i2c_adap == NULL) 365 if (i2c_adap)
365 dev_err(dev->dev, "No ddc adapter available!\n"); 366 edid = drm_get_edid(connector, i2c_adap);
367 if (edid == NULL && dev_priv->lpc_gpio_base) {
368 oaktrail_lvds_i2c_init(encoder);
369 if (gma_encoder->ddc_bus != NULL) {
370 i2c_adap = &gma_encoder->ddc_bus->adapter;
371 edid = drm_get_edid(connector, i2c_adap);
372 }
373 }
366 /* 374 /*
367 * Attempt to get the fixed panel mode from DDC. Assume that the 375 * Attempt to get the fixed panel mode from DDC. Assume that the
368 * preferred mode is the right one. 376 * preferred mode is the right one.
369 */ 377 */
370 if (i2c_adap) { 378 if (edid) {
371 edid = drm_get_edid(connector, i2c_adap); 379 drm_mode_connector_update_edid_property(connector, edid);
372 if (edid) { 380 drm_add_edid_modes(connector, edid);
373 drm_mode_connector_update_edid_property(connector, 381 kfree(edid);
374 edid);
375 drm_add_edid_modes(connector, edid);
376 kfree(edid);
377 }
378 382
379 list_for_each_entry(scan, &connector->probed_modes, head) { 383 list_for_each_entry(scan, &connector->probed_modes, head) {
380 if (scan->type & DRM_MODE_TYPE_PREFERRED) { 384 if (scan->type & DRM_MODE_TYPE_PREFERRED) {
@@ -383,7 +387,8 @@ void oaktrail_lvds_init(struct drm_device *dev,
383 goto out; /* FIXME: check for quirks */ 387 goto out; /* FIXME: check for quirks */
384 } 388 }
385 } 389 }
386 } 390 } else
391 dev_err(dev->dev, "No ddc adapter available!\n");
387 /* 392 /*
388 * If we didn't get EDID, try geting panel timing 393 * If we didn't get EDID, try geting panel timing
389 * from configuration data 394 * from configuration data
@@ -411,8 +416,10 @@ failed_find:
411 mutex_unlock(&dev->mode_config.mutex); 416 mutex_unlock(&dev->mode_config.mutex);
412 417
413 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n"); 418 dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
414 if (gma_encoder->ddc_bus) 419 if (gma_encoder->ddc_bus) {
415 psb_intel_i2c_destroy(gma_encoder->ddc_bus); 420 psb_intel_i2c_destroy(gma_encoder->ddc_bus);
421 gma_encoder->ddc_bus = NULL;
422 }
416 423
417/* failed_ddc: */ 424/* failed_ddc: */
418 425
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
new file mode 100644
index 000000000000..f913a62eee5f
--- /dev/null
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -0,0 +1,170 @@
1/*
2 * Copyright (c) 2002-2010, Intel Corporation.
3 * Copyright (c) 2014 ATRON electronic GmbH
4 * Author: Jan Safrata <jan.nikitenko@gmail.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 *
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/types.h>
30#include <linux/i2c.h>
31#include <linux/i2c-algo-bit.h>
32#include <linux/init.h>
33#include <linux/io.h>
34#include <linux/delay.h>
35
36#include <drm/drmP.h>
37#include "psb_drv.h"
38#include "psb_intel_reg.h"
39
40
41/*
42 * LPC GPIO based I2C bus for LVDS of Atom E6xx
43 */
44
45/*-----------------------------------------------------------------------------
46 * LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part
47 * Atom E6xx [D31:F0]
48 ----------------------------------------------------------------------------*/
49#define RGEN 0x20
50#define RGIO 0x24
51#define RGLVL 0x28
52#define RGTPE 0x2C
53#define RGTNE 0x30
54#define RGGPE 0x34
55#define RGSMI 0x38
56#define RGTS 0x3C
57
58/* The LVDS GPIO clock lines are GPIOSUS[3]
59 * The LVDS GPIO data lines are GPIOSUS[4]
60 */
61#define GPIO_CLOCK 0x08
62#define GPIO_DATA 0x10
63
64#define LPC_READ_REG(chan, r) inl((chan)->reg + (r))
65#define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r))
66
67static int get_clock(void *data)
68{
69 struct psb_intel_i2c_chan *chan = data;
70 u32 val, tmp;
71
72 val = LPC_READ_REG(chan, RGIO);
73 val |= GPIO_CLOCK;
74 LPC_WRITE_REG(chan, RGIO, val);
75 tmp = LPC_READ_REG(chan, RGLVL);
76 val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0;
77
78 return val;
79}
80
81static int get_data(void *data)
82{
83 struct psb_intel_i2c_chan *chan = data;
84 u32 val, tmp;
85
86 val = LPC_READ_REG(chan, RGIO);
87 val |= GPIO_DATA;
88 LPC_WRITE_REG(chan, RGIO, val);
89 tmp = LPC_READ_REG(chan, RGLVL);
90 val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0;
91
92 return val;
93}
94
95static void set_clock(void *data, int state_high)
96{
97 struct psb_intel_i2c_chan *chan = data;
98 u32 val;
99
100 if (state_high) {
101 val = LPC_READ_REG(chan, RGIO);
102 val |= GPIO_CLOCK;
103 LPC_WRITE_REG(chan, RGIO, val);
104 } else {
105 val = LPC_READ_REG(chan, RGIO);
106 val &= ~GPIO_CLOCK;
107 LPC_WRITE_REG(chan, RGIO, val);
108 val = LPC_READ_REG(chan, RGLVL);
109 val &= ~GPIO_CLOCK;
110 LPC_WRITE_REG(chan, RGLVL, val);
111 }
112}
113
114static void set_data(void *data, int state_high)
115{
116 struct psb_intel_i2c_chan *chan = data;
117 u32 val;
118
119 if (state_high) {
120 val = LPC_READ_REG(chan, RGIO);
121 val |= GPIO_DATA;
122 LPC_WRITE_REG(chan, RGIO, val);
123 } else {
124 val = LPC_READ_REG(chan, RGIO);
125 val &= ~GPIO_DATA;
126 LPC_WRITE_REG(chan, RGIO, val);
127 val = LPC_READ_REG(chan, RGLVL);
128 val &= ~GPIO_DATA;
129 LPC_WRITE_REG(chan, RGLVL, val);
130 }
131}
132
133void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
134{
135 struct drm_device *dev = encoder->dev;
136 struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
137 struct drm_psb_private *dev_priv = dev->dev_private;
138 struct psb_intel_i2c_chan *chan;
139
140 chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
141 if (!chan)
142 return;
143
144 chan->drm_dev = dev;
145 chan->reg = dev_priv->lpc_gpio_base;
146 strncpy(chan->adapter.name, "gma500 LPC", I2C_NAME_SIZE - 1);
147 chan->adapter.owner = THIS_MODULE;
148 chan->adapter.algo_data = &chan->algo;
149 chan->adapter.dev.parent = &dev->pdev->dev;
150 chan->algo.setsda = set_data;
151 chan->algo.setscl = set_clock;
152 chan->algo.getsda = get_data;
153 chan->algo.getscl = get_clock;
154 chan->algo.udelay = 100;
155 chan->algo.timeout = usecs_to_jiffies(2200);
156 chan->algo.data = chan;
157
158 i2c_set_adapdata(&chan->adapter, chan);
159
160 set_data(chan, 1);
161 set_clock(chan, 1);
162 udelay(50);
163
164 if (i2c_bit_add_bus(&chan->adapter)) {
165 kfree(chan);
166 return;
167 }
168
169 gma_encoder->ddc_bus = chan;
170}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6ec3a905fdd2..92e7e5795398 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -212,6 +212,8 @@ static int psb_driver_unload(struct drm_device *dev)
212 } 212 }
213 if (dev_priv->aux_pdev) 213 if (dev_priv->aux_pdev)
214 pci_dev_put(dev_priv->aux_pdev); 214 pci_dev_put(dev_priv->aux_pdev);
215 if (dev_priv->lpc_pdev)
216 pci_dev_put(dev_priv->lpc_pdev);
215 217
216 /* Destroy VBT data */ 218 /* Destroy VBT data */
217 psb_intel_destroy_bios(dev); 219 psb_intel_destroy_bios(dev);
@@ -280,6 +282,24 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
280 DRM_DEBUG_KMS("Couldn't find aux pci device"); 282 DRM_DEBUG_KMS("Couldn't find aux pci device");
281 } 283 }
282 dev_priv->gmbus_reg = dev_priv->aux_reg; 284 dev_priv->gmbus_reg = dev_priv->aux_reg;
285
286 dev_priv->lpc_pdev = pci_get_bus_and_slot(0, PCI_DEVFN(31, 0));
287 if (dev_priv->lpc_pdev) {
288 pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
289 &dev_priv->lpc_gpio_base);
290 pci_write_config_dword(dev_priv->lpc_pdev, PSB_LPC_GBA,
291 (u32)dev_priv->lpc_gpio_base | (1L<<31));
292 pci_read_config_word(dev_priv->lpc_pdev, PSB_LPC_GBA,
293 &dev_priv->lpc_gpio_base);
294 dev_priv->lpc_gpio_base &= 0xffc0;
295 if (dev_priv->lpc_gpio_base)
296 DRM_DEBUG_KMS("Found LPC GPIO at 0x%04x\n",
297 dev_priv->lpc_gpio_base);
298 else {
299 pci_dev_put(dev_priv->lpc_pdev);
300 dev_priv->lpc_pdev = NULL;
301 }
302 }
283 } else { 303 } else {
284 dev_priv->gmbus_reg = dev_priv->vdc_reg; 304 dev_priv->gmbus_reg = dev_priv->vdc_reg;
285 } 305 }
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 55ebe2bd88dd..e38057b91865 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -83,6 +83,7 @@ enum {
83#define PSB_PGETBL_CTL 0x2020 83#define PSB_PGETBL_CTL 0x2020
84#define _PSB_PGETBL_ENABLED 0x00000001 84#define _PSB_PGETBL_ENABLED 0x00000001
85#define PSB_SGX_2D_SLAVE_PORT 0x4000 85#define PSB_SGX_2D_SLAVE_PORT 0x4000
86#define PSB_LPC_GBA 0x44
86 87
87/* TODO: To get rid of */ 88/* TODO: To get rid of */
88#define PSB_TT_PRIV0_LIMIT (256*1024*1024) 89#define PSB_TT_PRIV0_LIMIT (256*1024*1024)
@@ -441,6 +442,7 @@ struct psb_ops;
441struct drm_psb_private { 442struct drm_psb_private {
442 struct drm_device *dev; 443 struct drm_device *dev;
443 struct pci_dev *aux_pdev; /* Currently only used by mrst */ 444 struct pci_dev *aux_pdev; /* Currently only used by mrst */
445 struct pci_dev *lpc_pdev; /* Currently only used by mrst */
444 const struct psb_ops *ops; 446 const struct psb_ops *ops;
445 const struct psb_offset *regmap; 447 const struct psb_offset *regmap;
446 448
@@ -470,6 +472,7 @@ struct drm_psb_private {
470 uint8_t __iomem *sgx_reg; 472 uint8_t __iomem *sgx_reg;
471 uint8_t __iomem *vdc_reg; 473 uint8_t __iomem *vdc_reg;
472 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */ 474 uint8_t __iomem *aux_reg; /* Auxillary vdc pipe regs */
475 uint16_t lpc_gpio_base;
473 uint32_t gatt_free_offset; 476 uint32_t gatt_free_offset;
474 477
475 /* Fencing / irq */ 478 /* Fencing / irq */
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 87b50ba64ed4..b21a09451d1d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -21,6 +21,7 @@
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22 22
23#include <drm/drmP.h> 23#include <drm/drmP.h>
24#include <drm/drm_plane_helper.h>
24#include "framebuffer.h" 25#include "framebuffer.h"
25#include "psb_drv.h" 26#include "psb_drv.h"
26#include "psb_intel_drv.h" 27#include "psb_intel_drv.h"
diff --git a/drivers/gpu/drm/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 336bd3aa1a06..860dd2177ca1 100644
--- a/drivers/gpu/drm/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -223,6 +223,7 @@ extern void oaktrail_lvds_init(struct drm_device *dev,
223extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev); 223extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
224extern void oaktrail_dsi_init(struct drm_device *dev, 224extern void oaktrail_dsi_init(struct drm_device *dev,
225 struct psb_intel_mode_device *mode_dev); 225 struct psb_intel_mode_device *mode_dev);
226extern void oaktrail_lvds_i2c_init(struct drm_encoder *encoder);
226extern void mid_dsi_init(struct drm_device *dev, 227extern void mid_dsi_init(struct drm_device *dev,
227 struct psb_intel_mode_device *mode_dev, int dsi_num); 228 struct psb_intel_mode_device *mode_dev, int dsi_num);
228 229
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 0be96fdb5e28..58529cea575d 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1631,57 +1631,8 @@ static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
1631 return !list_empty(&connector->probed_modes); 1631 return !list_empty(&connector->probed_modes);
1632} 1632}
1633 1633
1634static void
1635psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1636{
1637 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1638 struct drm_device *dev = connector->dev;
1639
1640 if (psb_intel_sdvo_connector->left)
1641 drm_property_destroy(dev, psb_intel_sdvo_connector->left);
1642 if (psb_intel_sdvo_connector->right)
1643 drm_property_destroy(dev, psb_intel_sdvo_connector->right);
1644 if (psb_intel_sdvo_connector->top)
1645 drm_property_destroy(dev, psb_intel_sdvo_connector->top);
1646 if (psb_intel_sdvo_connector->bottom)
1647 drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
1648 if (psb_intel_sdvo_connector->hpos)
1649 drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
1650 if (psb_intel_sdvo_connector->vpos)
1651 drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
1652 if (psb_intel_sdvo_connector->saturation)
1653 drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
1654 if (psb_intel_sdvo_connector->contrast)
1655 drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
1656 if (psb_intel_sdvo_connector->hue)
1657 drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
1658 if (psb_intel_sdvo_connector->sharpness)
1659 drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
1660 if (psb_intel_sdvo_connector->flicker_filter)
1661 drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
1662 if (psb_intel_sdvo_connector->flicker_filter_2d)
1663 drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
1664 if (psb_intel_sdvo_connector->flicker_filter_adaptive)
1665 drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
1666 if (psb_intel_sdvo_connector->tv_luma_filter)
1667 drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
1668 if (psb_intel_sdvo_connector->tv_chroma_filter)
1669 drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
1670 if (psb_intel_sdvo_connector->dot_crawl)
1671 drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
1672 if (psb_intel_sdvo_connector->brightness)
1673 drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
1674}
1675
1676static void psb_intel_sdvo_destroy(struct drm_connector *connector) 1634static void psb_intel_sdvo_destroy(struct drm_connector *connector)
1677{ 1635{
1678 struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
1679
1680 if (psb_intel_sdvo_connector->tv_format)
1681 drm_property_destroy(connector->dev,
1682 psb_intel_sdvo_connector->tv_format);
1683
1684 psb_intel_sdvo_destroy_enhance_property(connector);
1685 drm_connector_unregister(connector); 1636 drm_connector_unregister(connector);
1686 drm_connector_cleanup(connector); 1637 drm_connector_cleanup(connector);
1687 kfree(connector); 1638 kfree(connector);
diff --git a/drivers/gpu/drm/i2c/Kconfig b/drivers/gpu/drm/i2c/Kconfig
index 4d341db462a2..22c7ed63a001 100644
--- a/drivers/gpu/drm/i2c/Kconfig
+++ b/drivers/gpu/drm/i2c/Kconfig
@@ -1,6 +1,12 @@
1menu "I2C encoder or helper chips" 1menu "I2C encoder or helper chips"
2 depends on DRM && DRM_KMS_HELPER && I2C 2 depends on DRM && DRM_KMS_HELPER && I2C
3 3
4config DRM_I2C_ADV7511
5 tristate "AV7511 encoder"
6 select REGMAP_I2C
7 help
8 Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
9
4config DRM_I2C_CH7006 10config DRM_I2C_CH7006
5 tristate "Chrontel ch7006 TV encoder" 11 tristate "Chrontel ch7006 TV encoder"
6 default m if DRM_NOUVEAU 12 default m if DRM_NOUVEAU
diff --git a/drivers/gpu/drm/i2c/Makefile b/drivers/gpu/drm/i2c/Makefile
index 43aa33baebed..2c72eb584ab7 100644
--- a/drivers/gpu/drm/i2c/Makefile
+++ b/drivers/gpu/drm/i2c/Makefile
@@ -1,5 +1,7 @@
1ccflags-y := -Iinclude/drm 1ccflags-y := -Iinclude/drm
2 2
3obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
4
3ch7006-y := ch7006_drv.o ch7006_mode.o 5ch7006-y := ch7006_drv.o ch7006_mode.o
4obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o 6obj-$(CONFIG_DRM_I2C_CH7006) += ch7006.o
5 7
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
new file mode 100644
index 000000000000..faf1c0c5ab2e
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -0,0 +1,1010 @@
1/*
2 * Analog Devices ADV7511 HDMI transmitter driver
3 *
4 * Copyright 2012 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2.
7 */
8
9#include <linux/device.h>
10#include <linux/gpio/consumer.h>
11#include <linux/i2c.h>
12#include <linux/module.h>
13#include <linux/regmap.h>
14#include <linux/slab.h>
15
16#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_edid.h>
19#include <drm/drm_encoder_slave.h>
20
21#include "adv7511.h"
22
23struct adv7511 {
24 struct i2c_client *i2c_main;
25 struct i2c_client *i2c_edid;
26
27 struct regmap *regmap;
28 struct regmap *packet_memory_regmap;
29 enum drm_connector_status status;
30 int dpms_mode;
31
32 unsigned int f_tmds;
33
34 unsigned int current_edid_segment;
35 uint8_t edid_buf[256];
36
37 wait_queue_head_t wq;
38 struct drm_encoder *encoder;
39
40 bool embedded_sync;
41 enum adv7511_sync_polarity vsync_polarity;
42 enum adv7511_sync_polarity hsync_polarity;
43 bool rgb;
44
45 struct edid *edid;
46
47 struct gpio_desc *gpio_pd;
48};
49
50static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
51{
52 return to_encoder_slave(encoder)->slave_priv;
53}
54
55/* ADI recommended values for proper operation. */
56static const struct reg_default adv7511_fixed_registers[] = {
57 { 0x98, 0x03 },
58 { 0x9a, 0xe0 },
59 { 0x9c, 0x30 },
60 { 0x9d, 0x61 },
61 { 0xa2, 0xa4 },
62 { 0xa3, 0xa4 },
63 { 0xe0, 0xd0 },
64 { 0xf9, 0x00 },
65 { 0x55, 0x02 },
66};
67
68/* -----------------------------------------------------------------------------
69 * Register access
70 */
71
72static const uint8_t adv7511_register_defaults[] = {
73 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 00 */
74 0x00, 0x00, 0x01, 0x0e, 0xbc, 0x18, 0x01, 0x13,
75 0x25, 0x37, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 10 */
76 0x46, 0x62, 0x04, 0xa8, 0x00, 0x00, 0x1c, 0x84,
77 0x1c, 0xbf, 0x04, 0xa8, 0x1e, 0x70, 0x02, 0x1e, /* 20 */
78 0x00, 0x00, 0x04, 0xa8, 0x08, 0x12, 0x1b, 0xac,
79 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 */
80 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0xb0,
81 0x00, 0x50, 0x90, 0x7e, 0x79, 0x70, 0x00, 0x00, /* 40 */
82 0x00, 0xa8, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00,
83 0x00, 0x00, 0x02, 0x0d, 0x00, 0x00, 0x00, 0x00, /* 50 */
84 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
85 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 60 */
86 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
87 0x01, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 70 */
88 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
89 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 80 */
90 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
91 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, /* 90 */
92 0x0b, 0x02, 0x00, 0x18, 0x5a, 0x60, 0x00, 0x00,
93 0x00, 0x00, 0x80, 0x80, 0x08, 0x04, 0x00, 0x00, /* a0 */
94 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40, 0x14,
95 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* b0 */
96 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
97 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* c0 */
98 0x00, 0x03, 0x00, 0x00, 0x02, 0x00, 0x01, 0x04,
99 0x30, 0xff, 0x80, 0x80, 0x80, 0x00, 0x00, 0x00, /* d0 */
100 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x01,
101 0x80, 0x75, 0x00, 0x00, 0x60, 0x00, 0x00, 0x00, /* e0 */
102 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
103 0x00, 0x00, 0x00, 0x00, 0x00, 0x75, 0x11, 0x00, /* f0 */
104 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
105};
106
107static bool adv7511_register_volatile(struct device *dev, unsigned int reg)
108{
109 switch (reg) {
110 case ADV7511_REG_CHIP_REVISION:
111 case ADV7511_REG_SPDIF_FREQ:
112 case ADV7511_REG_CTS_AUTOMATIC1:
113 case ADV7511_REG_CTS_AUTOMATIC2:
114 case ADV7511_REG_VIC_DETECTED:
115 case ADV7511_REG_VIC_SEND:
116 case ADV7511_REG_AUX_VIC_DETECTED:
117 case ADV7511_REG_STATUS:
118 case ADV7511_REG_GC(1):
119 case ADV7511_REG_INT(0):
120 case ADV7511_REG_INT(1):
121 case ADV7511_REG_PLL_STATUS:
122 case ADV7511_REG_AN(0):
123 case ADV7511_REG_AN(1):
124 case ADV7511_REG_AN(2):
125 case ADV7511_REG_AN(3):
126 case ADV7511_REG_AN(4):
127 case ADV7511_REG_AN(5):
128 case ADV7511_REG_AN(6):
129 case ADV7511_REG_AN(7):
130 case ADV7511_REG_HDCP_STATUS:
131 case ADV7511_REG_BCAPS:
132 case ADV7511_REG_BKSV(0):
133 case ADV7511_REG_BKSV(1):
134 case ADV7511_REG_BKSV(2):
135 case ADV7511_REG_BKSV(3):
136 case ADV7511_REG_BKSV(4):
137 case ADV7511_REG_DDC_STATUS:
138 case ADV7511_REG_BSTATUS(0):
139 case ADV7511_REG_BSTATUS(1):
140 case ADV7511_REG_CHIP_ID_HIGH:
141 case ADV7511_REG_CHIP_ID_LOW:
142 return true;
143 }
144
145 return false;
146}
147
148static const struct regmap_config adv7511_regmap_config = {
149 .reg_bits = 8,
150 .val_bits = 8,
151
152 .max_register = 0xff,
153 .cache_type = REGCACHE_RBTREE,
154 .reg_defaults_raw = adv7511_register_defaults,
155 .num_reg_defaults_raw = ARRAY_SIZE(adv7511_register_defaults),
156
157 .volatile_reg = adv7511_register_volatile,
158};
159
160/* -----------------------------------------------------------------------------
161 * Hardware configuration
162 */
163
164static void adv7511_set_colormap(struct adv7511 *adv7511, bool enable,
165 const uint16_t *coeff,
166 unsigned int scaling_factor)
167{
168 unsigned int i;
169
170 regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
171 ADV7511_CSC_UPDATE_MODE, ADV7511_CSC_UPDATE_MODE);
172
173 if (enable) {
174 for (i = 0; i < 12; ++i) {
175 regmap_update_bits(adv7511->regmap,
176 ADV7511_REG_CSC_UPPER(i),
177 0x1f, coeff[i] >> 8);
178 regmap_write(adv7511->regmap,
179 ADV7511_REG_CSC_LOWER(i),
180 coeff[i] & 0xff);
181 }
182 }
183
184 if (enable)
185 regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
186 0xe0, 0x80 | (scaling_factor << 5));
187 else
188 regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(0),
189 0x80, 0x00);
190
191 regmap_update_bits(adv7511->regmap, ADV7511_REG_CSC_UPPER(1),
192 ADV7511_CSC_UPDATE_MODE, 0);
193}
194
195static int adv7511_packet_enable(struct adv7511 *adv7511, unsigned int packet)
196{
197 if (packet & 0xff)
198 regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
199 packet, 0xff);
200
201 if (packet & 0xff00) {
202 packet >>= 8;
203 regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
204 packet, 0xff);
205 }
206
207 return 0;
208}
209
210static int adv7511_packet_disable(struct adv7511 *adv7511, unsigned int packet)
211{
212 if (packet & 0xff)
213 regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE0,
214 packet, 0x00);
215
216 if (packet & 0xff00) {
217 packet >>= 8;
218 regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1,
219 packet, 0x00);
220 }
221
222 return 0;
223}
224
225/* Coefficients for adv7511 color space conversion */
226static const uint16_t adv7511_csc_ycbcr_to_rgb[] = {
227 0x0734, 0x04ad, 0x0000, 0x1c1b,
228 0x1ddc, 0x04ad, 0x1f24, 0x0135,
229 0x0000, 0x04ad, 0x087c, 0x1b77,
230};
231
232static void adv7511_set_config_csc(struct adv7511 *adv7511,
233 struct drm_connector *connector,
234 bool rgb)
235{
236 struct adv7511_video_config config;
237 bool output_format_422, output_format_ycbcr;
238 unsigned int mode;
239 uint8_t infoframe[17];
240
241 if (adv7511->edid)
242 config.hdmi_mode = drm_detect_hdmi_monitor(adv7511->edid);
243 else
244 config.hdmi_mode = false;
245
246 hdmi_avi_infoframe_init(&config.avi_infoframe);
247
248 config.avi_infoframe.scan_mode = HDMI_SCAN_MODE_UNDERSCAN;
249
250 if (rgb) {
251 config.csc_enable = false;
252 config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
253 } else {
254 config.csc_scaling_factor = ADV7511_CSC_SCALING_4;
255 config.csc_coefficents = adv7511_csc_ycbcr_to_rgb;
256
257 if ((connector->display_info.color_formats &
258 DRM_COLOR_FORMAT_YCRCB422) &&
259 config.hdmi_mode) {
260 config.csc_enable = false;
261 config.avi_infoframe.colorspace =
262 HDMI_COLORSPACE_YUV422;
263 } else {
264 config.csc_enable = true;
265 config.avi_infoframe.colorspace = HDMI_COLORSPACE_RGB;
266 }
267 }
268
269 if (config.hdmi_mode) {
270 mode = ADV7511_HDMI_CFG_MODE_HDMI;
271
272 switch (config.avi_infoframe.colorspace) {
273 case HDMI_COLORSPACE_YUV444:
274 output_format_422 = false;
275 output_format_ycbcr = true;
276 break;
277 case HDMI_COLORSPACE_YUV422:
278 output_format_422 = true;
279 output_format_ycbcr = true;
280 break;
281 default:
282 output_format_422 = false;
283 output_format_ycbcr = false;
284 break;
285 }
286 } else {
287 mode = ADV7511_HDMI_CFG_MODE_DVI;
288 output_format_422 = false;
289 output_format_ycbcr = false;
290 }
291
292 adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
293
294 adv7511_set_colormap(adv7511, config.csc_enable,
295 config.csc_coefficents,
296 config.csc_scaling_factor);
297
298 regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x81,
299 (output_format_422 << 7) | output_format_ycbcr);
300
301 regmap_update_bits(adv7511->regmap, ADV7511_REG_HDCP_HDMI_CFG,
302 ADV7511_HDMI_CFG_MODE_MASK, mode);
303
304 hdmi_avi_infoframe_pack(&config.avi_infoframe, infoframe,
305 sizeof(infoframe));
306
307 /* The AVI infoframe id is not configurable */
308 regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION,
309 infoframe + 1, sizeof(infoframe) - 1);
310
311 adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME);
312}
313
314static void adv7511_set_link_config(struct adv7511 *adv7511,
315 const struct adv7511_link_config *config)
316{
317 /*
318 * The input style values documented in the datasheet don't match the
319 * hardware register field values :-(
320 */
321 static const unsigned int input_styles[4] = { 0, 2, 1, 3 };
322
323 unsigned int clock_delay;
324 unsigned int color_depth;
325 unsigned int input_id;
326
327 clock_delay = (config->clock_delay + 1200) / 400;
328 color_depth = config->input_color_depth == 8 ? 3
329 : (config->input_color_depth == 10 ? 1 : 2);
330
331 /* TODO Support input ID 6 */
332 if (config->input_colorspace != HDMI_COLORSPACE_YUV422)
333 input_id = config->input_clock == ADV7511_INPUT_CLOCK_DDR
334 ? 5 : 0;
335 else if (config->input_clock == ADV7511_INPUT_CLOCK_DDR)
336 input_id = config->embedded_sync ? 8 : 7;
337 else if (config->input_clock == ADV7511_INPUT_CLOCK_2X)
338 input_id = config->embedded_sync ? 4 : 3;
339 else
340 input_id = config->embedded_sync ? 2 : 1;
341
342 regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, 0xf,
343 input_id);
344 regmap_update_bits(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG1, 0x7e,
345 (color_depth << 4) |
346 (input_styles[config->input_style] << 2));
347 regmap_write(adv7511->regmap, ADV7511_REG_VIDEO_INPUT_CFG2,
348 config->input_justification << 3);
349 regmap_write(adv7511->regmap, ADV7511_REG_TIMING_GEN_SEQ,
350 config->sync_pulse << 2);
351
352 regmap_write(adv7511->regmap, 0xba, clock_delay << 5);
353
354 adv7511->embedded_sync = config->embedded_sync;
355 adv7511->hsync_polarity = config->hsync_polarity;
356 adv7511->vsync_polarity = config->vsync_polarity;
357 adv7511->rgb = config->input_colorspace == HDMI_COLORSPACE_RGB;
358}
359
360/* -----------------------------------------------------------------------------
361 * Interrupt and hotplug detection
362 */
363
364static bool adv7511_hpd(struct adv7511 *adv7511)
365{
366 unsigned int irq0;
367 int ret;
368
369 ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
370 if (ret < 0)
371 return false;
372
373 if (irq0 & ADV7511_INT0_HDP) {
374 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
375 ADV7511_INT0_HDP);
376 return true;
377 }
378
379 return false;
380}
381
382static irqreturn_t adv7511_irq_handler(int irq, void *devid)
383{
384 struct adv7511 *adv7511 = devid;
385
386 if (adv7511_hpd(adv7511))
387 drm_helper_hpd_irq_event(adv7511->encoder->dev);
388
389 wake_up_all(&adv7511->wq);
390
391 return IRQ_HANDLED;
392}
393
394static unsigned int adv7511_is_interrupt_pending(struct adv7511 *adv7511,
395 unsigned int irq)
396{
397 unsigned int irq0, irq1;
398 unsigned int pending;
399 int ret;
400
401 ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(0), &irq0);
402 if (ret < 0)
403 return 0;
404 ret = regmap_read(adv7511->regmap, ADV7511_REG_INT(1), &irq1);
405 if (ret < 0)
406 return 0;
407
408 pending = (irq1 << 8) | irq0;
409
410 return pending & irq;
411}
412
413static int adv7511_wait_for_interrupt(struct adv7511 *adv7511, int irq,
414 int timeout)
415{
416 unsigned int pending;
417 int ret;
418
419 if (adv7511->i2c_main->irq) {
420 ret = wait_event_interruptible_timeout(adv7511->wq,
421 adv7511_is_interrupt_pending(adv7511, irq),
422 msecs_to_jiffies(timeout));
423 if (ret <= 0)
424 return 0;
425 pending = adv7511_is_interrupt_pending(adv7511, irq);
426 } else {
427 if (timeout < 25)
428 timeout = 25;
429 do {
430 pending = adv7511_is_interrupt_pending(adv7511, irq);
431 if (pending)
432 break;
433 msleep(25);
434 timeout -= 25;
435 } while (timeout >= 25);
436 }
437
438 return pending;
439}
440
441/* -----------------------------------------------------------------------------
442 * EDID retrieval
443 */
444
445static int adv7511_get_edid_block(void *data, u8 *buf, unsigned int block,
446 size_t len)
447{
448 struct adv7511 *adv7511 = data;
449 struct i2c_msg xfer[2];
450 uint8_t offset;
451 unsigned int i;
452 int ret;
453
454 if (len > 128)
455 return -EINVAL;
456
457 if (adv7511->current_edid_segment != block / 2) {
458 unsigned int status;
459
460 ret = regmap_read(adv7511->regmap, ADV7511_REG_DDC_STATUS,
461 &status);
462 if (ret < 0)
463 return ret;
464
465 if (status != 2) {
466 regmap_write(adv7511->regmap, ADV7511_REG_EDID_SEGMENT,
467 block);
468 ret = adv7511_wait_for_interrupt(adv7511,
469 ADV7511_INT0_EDID_READY |
470 ADV7511_INT1_DDC_ERROR, 200);
471
472 if (!(ret & ADV7511_INT0_EDID_READY))
473 return -EIO;
474 }
475
476 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
477 ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
478
479 /* Break this apart, hopefully more I2C controllers will
480 * support 64 byte transfers than 256 byte transfers
481 */
482
483 xfer[0].addr = adv7511->i2c_edid->addr;
484 xfer[0].flags = 0;
485 xfer[0].len = 1;
486 xfer[0].buf = &offset;
487 xfer[1].addr = adv7511->i2c_edid->addr;
488 xfer[1].flags = I2C_M_RD;
489 xfer[1].len = 64;
490 xfer[1].buf = adv7511->edid_buf;
491
492 offset = 0;
493
494 for (i = 0; i < 4; ++i) {
495 ret = i2c_transfer(adv7511->i2c_edid->adapter, xfer,
496 ARRAY_SIZE(xfer));
497 if (ret < 0)
498 return ret;
499 else if (ret != 2)
500 return -EIO;
501
502 xfer[1].buf += 64;
503 offset += 64;
504 }
505
506 adv7511->current_edid_segment = block / 2;
507 }
508
509 if (block % 2 == 0)
510 memcpy(buf, adv7511->edid_buf, len);
511 else
512 memcpy(buf, adv7511->edid_buf + 128, len);
513
514 return 0;
515}
516
517/* -----------------------------------------------------------------------------
518 * Encoder operations
519 */
520
521static int adv7511_get_modes(struct drm_encoder *encoder,
522 struct drm_connector *connector)
523{
524 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
525 struct edid *edid;
526 unsigned int count;
527
528 /* Reading the EDID only works if the device is powered */
529 if (adv7511->dpms_mode != DRM_MODE_DPMS_ON) {
530 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
531 ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
532 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
533 ADV7511_POWER_POWER_DOWN, 0);
534 adv7511->current_edid_segment = -1;
535 }
536
537 edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
538
539 if (adv7511->dpms_mode != DRM_MODE_DPMS_ON)
540 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
541 ADV7511_POWER_POWER_DOWN,
542 ADV7511_POWER_POWER_DOWN);
543
544 kfree(adv7511->edid);
545 adv7511->edid = edid;
546 if (!edid)
547 return 0;
548
549 drm_mode_connector_update_edid_property(connector, edid);
550 count = drm_add_edid_modes(connector, edid);
551
552 adv7511_set_config_csc(adv7511, connector, adv7511->rgb);
553
554 return count;
555}
556
557static void adv7511_encoder_dpms(struct drm_encoder *encoder, int mode)
558{
559 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
560
561 switch (mode) {
562 case DRM_MODE_DPMS_ON:
563 adv7511->current_edid_segment = -1;
564
565 regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
566 ADV7511_INT0_EDID_READY | ADV7511_INT1_DDC_ERROR);
567 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
568 ADV7511_POWER_POWER_DOWN, 0);
569 /*
570 * Per spec it is allowed to pulse the HDP signal to indicate
571 * that the EDID information has changed. Some monitors do this
572 * when they wakeup from standby or are enabled. When the HDP
573 * goes low the adv7511 is reset and the outputs are disabled
574 * which might cause the monitor to go to standby again. To
575 * avoid this we ignore the HDP pin for the first few seconds
576 * after enabeling the output.
577 */
578 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
579 ADV7511_REG_POWER2_HDP_SRC_MASK,
580 ADV7511_REG_POWER2_HDP_SRC_NONE);
581 /* Most of the registers are reset during power down or
582 * when HPD is low
583 */
584 regcache_sync(adv7511->regmap);
585 break;
586 default:
587 /* TODO: setup additional power down modes */
588 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
589 ADV7511_POWER_POWER_DOWN,
590 ADV7511_POWER_POWER_DOWN);
591 regcache_mark_dirty(adv7511->regmap);
592 break;
593 }
594
595 adv7511->dpms_mode = mode;
596}
597
598static enum drm_connector_status
599adv7511_encoder_detect(struct drm_encoder *encoder,
600 struct drm_connector *connector)
601{
602 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
603 enum drm_connector_status status;
604 unsigned int val;
605 bool hpd;
606 int ret;
607
608 ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
609 if (ret < 0)
610 return connector_status_disconnected;
611
612 if (val & ADV7511_STATUS_HPD)
613 status = connector_status_connected;
614 else
615 status = connector_status_disconnected;
616
617 hpd = adv7511_hpd(adv7511);
618
619 /* The chip resets itself when the cable is disconnected, so in case
620 * there is a pending HPD interrupt and the cable is connected there was
621 * at least one transition from disconnected to connected and the chip
622 * has to be reinitialized. */
623 if (status == connector_status_connected && hpd &&
624 adv7511->dpms_mode == DRM_MODE_DPMS_ON) {
625 regcache_mark_dirty(adv7511->regmap);
626 adv7511_encoder_dpms(encoder, adv7511->dpms_mode);
627 adv7511_get_modes(encoder, connector);
628 if (adv7511->status == connector_status_connected)
629 status = connector_status_disconnected;
630 } else {
631 /* Renable HDP sensing */
632 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER2,
633 ADV7511_REG_POWER2_HDP_SRC_MASK,
634 ADV7511_REG_POWER2_HDP_SRC_BOTH);
635 }
636
637 adv7511->status = status;
638 return status;
639}
640
641static int adv7511_encoder_mode_valid(struct drm_encoder *encoder,
642 struct drm_display_mode *mode)
643{
644 if (mode->clock > 165000)
645 return MODE_CLOCK_HIGH;
646
647 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
648 return MODE_NO_INTERLACE;
649
650 return MODE_OK;
651}
652
653static void adv7511_encoder_mode_set(struct drm_encoder *encoder,
654 struct drm_display_mode *mode,
655 struct drm_display_mode *adj_mode)
656{
657 struct adv7511 *adv7511 = encoder_to_adv7511(encoder);
658 unsigned int low_refresh_rate;
659 unsigned int hsync_polarity = 0;
660 unsigned int vsync_polarity = 0;
661
662 if (adv7511->embedded_sync) {
663 unsigned int hsync_offset, hsync_len;
664 unsigned int vsync_offset, vsync_len;
665
666 hsync_offset = adj_mode->crtc_hsync_start -
667 adj_mode->crtc_hdisplay;
668 vsync_offset = adj_mode->crtc_vsync_start -
669 adj_mode->crtc_vdisplay;
670 hsync_len = adj_mode->crtc_hsync_end -
671 adj_mode->crtc_hsync_start;
672 vsync_len = adj_mode->crtc_vsync_end -
673 adj_mode->crtc_vsync_start;
674
675 /* The hardware vsync generator has a off-by-one bug */
676 vsync_offset += 1;
677
678 regmap_write(adv7511->regmap, ADV7511_REG_HSYNC_PLACEMENT_MSB,
679 ((hsync_offset >> 10) & 0x7) << 5);
680 regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(0),
681 (hsync_offset >> 2) & 0xff);
682 regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(1),
683 ((hsync_offset & 0x3) << 6) |
684 ((hsync_len >> 4) & 0x3f));
685 regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(2),
686 ((hsync_len & 0xf) << 4) |
687 ((vsync_offset >> 6) & 0xf));
688 regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(3),
689 ((vsync_offset & 0x3f) << 2) |
690 ((vsync_len >> 8) & 0x3));
691 regmap_write(adv7511->regmap, ADV7511_REG_SYNC_DECODER(4),
692 vsync_len & 0xff);
693
694 hsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PHSYNC);
695 vsync_polarity = !(adj_mode->flags & DRM_MODE_FLAG_PVSYNC);
696 } else {
697 enum adv7511_sync_polarity mode_hsync_polarity;
698 enum adv7511_sync_polarity mode_vsync_polarity;
699
700 /**
701 * If the input signal is always low or always high we want to
702 * invert or let it passthrough depending on the polarity of the
703 * current mode.
704 **/
705 if (adj_mode->flags & DRM_MODE_FLAG_NHSYNC)
706 mode_hsync_polarity = ADV7511_SYNC_POLARITY_LOW;
707 else
708 mode_hsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
709
710 if (adj_mode->flags & DRM_MODE_FLAG_NVSYNC)
711 mode_vsync_polarity = ADV7511_SYNC_POLARITY_LOW;
712 else
713 mode_vsync_polarity = ADV7511_SYNC_POLARITY_HIGH;
714
715 if (adv7511->hsync_polarity != mode_hsync_polarity &&
716 adv7511->hsync_polarity !=
717 ADV7511_SYNC_POLARITY_PASSTHROUGH)
718 hsync_polarity = 1;
719
720 if (adv7511->vsync_polarity != mode_vsync_polarity &&
721 adv7511->vsync_polarity !=
722 ADV7511_SYNC_POLARITY_PASSTHROUGH)
723 vsync_polarity = 1;
724 }
725
726 if (mode->vrefresh <= 24000)
727 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_24HZ;
728 else if (mode->vrefresh <= 25000)
729 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_25HZ;
730 else if (mode->vrefresh <= 30000)
731 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_30HZ;
732 else
733 low_refresh_rate = ADV7511_LOW_REFRESH_RATE_NONE;
734
735 regmap_update_bits(adv7511->regmap, 0xfb,
736 0x6, low_refresh_rate << 1);
737 regmap_update_bits(adv7511->regmap, 0x17,
738 0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
739
740 /*
741 * TODO Test first order 4:2:2 to 4:4:4 up conversion method, which is
742 * supposed to give better results.
743 */
744
745 adv7511->f_tmds = mode->clock;
746}
747
748static struct drm_encoder_slave_funcs adv7511_encoder_funcs = {
749 .dpms = adv7511_encoder_dpms,
750 .mode_valid = adv7511_encoder_mode_valid,
751 .mode_set = adv7511_encoder_mode_set,
752 .detect = adv7511_encoder_detect,
753 .get_modes = adv7511_get_modes,
754};
755
756/* -----------------------------------------------------------------------------
757 * Probe & remove
758 */
759
760static int adv7511_parse_dt(struct device_node *np,
761 struct adv7511_link_config *config)
762{
763 const char *str;
764 int ret;
765
766 memset(config, 0, sizeof(*config));
767
768 of_property_read_u32(np, "adi,input-depth", &config->input_color_depth);
769 if (config->input_color_depth != 8 && config->input_color_depth != 10 &&
770 config->input_color_depth != 12)
771 return -EINVAL;
772
773 ret = of_property_read_string(np, "adi,input-colorspace", &str);
774 if (ret < 0)
775 return ret;
776
777 if (!strcmp(str, "rgb"))
778 config->input_colorspace = HDMI_COLORSPACE_RGB;
779 else if (!strcmp(str, "yuv422"))
780 config->input_colorspace = HDMI_COLORSPACE_YUV422;
781 else if (!strcmp(str, "yuv444"))
782 config->input_colorspace = HDMI_COLORSPACE_YUV444;
783 else
784 return -EINVAL;
785
786 ret = of_property_read_string(np, "adi,input-clock", &str);
787 if (ret < 0)
788 return ret;
789
790 if (!strcmp(str, "1x"))
791 config->input_clock = ADV7511_INPUT_CLOCK_1X;
792 else if (!strcmp(str, "2x"))
793 config->input_clock = ADV7511_INPUT_CLOCK_2X;
794 else if (!strcmp(str, "ddr"))
795 config->input_clock = ADV7511_INPUT_CLOCK_DDR;
796 else
797 return -EINVAL;
798
799 if (config->input_colorspace == HDMI_COLORSPACE_YUV422 ||
800 config->input_clock != ADV7511_INPUT_CLOCK_1X) {
801 ret = of_property_read_u32(np, "adi,input-style",
802 &config->input_style);
803 if (ret)
804 return ret;
805
806 if (config->input_style < 1 || config->input_style > 3)
807 return -EINVAL;
808
809 ret = of_property_read_string(np, "adi,input-justification",
810 &str);
811 if (ret < 0)
812 return ret;
813
814 if (!strcmp(str, "left"))
815 config->input_justification =
816 ADV7511_INPUT_JUSTIFICATION_LEFT;
817 else if (!strcmp(str, "evenly"))
818 config->input_justification =
819 ADV7511_INPUT_JUSTIFICATION_EVENLY;
820 else if (!strcmp(str, "right"))
821 config->input_justification =
822 ADV7511_INPUT_JUSTIFICATION_RIGHT;
823 else
824 return -EINVAL;
825
826 } else {
827 config->input_style = 1;
828 config->input_justification = ADV7511_INPUT_JUSTIFICATION_LEFT;
829 }
830
831 of_property_read_u32(np, "adi,clock-delay", &config->clock_delay);
832 if (config->clock_delay < -1200 || config->clock_delay > 1600)
833 return -EINVAL;
834
835 config->embedded_sync = of_property_read_bool(np, "adi,embedded-sync");
836
837 /* Hardcode the sync pulse configurations for now. */
838 config->sync_pulse = ADV7511_INPUT_SYNC_PULSE_NONE;
839 config->vsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
840 config->hsync_polarity = ADV7511_SYNC_POLARITY_PASSTHROUGH;
841
842 return 0;
843}
844
845static const int edid_i2c_addr = 0x7e;
846static const int packet_i2c_addr = 0x70;
847static const int cec_i2c_addr = 0x78;
848
849static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
850{
851 struct adv7511_link_config link_config;
852 struct adv7511 *adv7511;
853 struct device *dev = &i2c->dev;
854 unsigned int val;
855 int ret;
856
857 if (!dev->of_node)
858 return -EINVAL;
859
860 adv7511 = devm_kzalloc(dev, sizeof(*adv7511), GFP_KERNEL);
861 if (!adv7511)
862 return -ENOMEM;
863
864 adv7511->dpms_mode = DRM_MODE_DPMS_OFF;
865 adv7511->status = connector_status_disconnected;
866
867 ret = adv7511_parse_dt(dev->of_node, &link_config);
868 if (ret)
869 return ret;
870
871 /*
872 * The power down GPIO is optional. If present, toggle it from active to
873 * inactive to wake up the encoder.
874 */
875 adv7511->gpio_pd = devm_gpiod_get_optional(dev, "pd", GPIOD_OUT_HIGH);
876 if (IS_ERR(adv7511->gpio_pd))
877 return PTR_ERR(adv7511->gpio_pd);
878
879 if (adv7511->gpio_pd) {
880 mdelay(5);
881 gpiod_set_value_cansleep(adv7511->gpio_pd, 0);
882 }
883
884 adv7511->regmap = devm_regmap_init_i2c(i2c, &adv7511_regmap_config);
885 if (IS_ERR(adv7511->regmap))
886 return PTR_ERR(adv7511->regmap);
887
888 ret = regmap_read(adv7511->regmap, ADV7511_REG_CHIP_REVISION, &val);
889 if (ret)
890 return ret;
891 dev_dbg(dev, "Rev. %d\n", val);
892
893 ret = regmap_register_patch(adv7511->regmap, adv7511_fixed_registers,
894 ARRAY_SIZE(adv7511_fixed_registers));
895 if (ret)
896 return ret;
897
898 regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR, edid_i2c_addr);
899 regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR,
900 packet_i2c_addr);
901 regmap_write(adv7511->regmap, ADV7511_REG_CEC_I2C_ADDR, cec_i2c_addr);
902 adv7511_packet_disable(adv7511, 0xffff);
903
904 adv7511->i2c_main = i2c;
905 adv7511->i2c_edid = i2c_new_dummy(i2c->adapter, edid_i2c_addr >> 1);
906 if (!adv7511->i2c_edid)
907 return -ENOMEM;
908
909 if (i2c->irq) {
910 init_waitqueue_head(&adv7511->wq);
911
912 ret = devm_request_threaded_irq(dev, i2c->irq, NULL,
913 adv7511_irq_handler,
914 IRQF_ONESHOT, dev_name(dev),
915 adv7511);
916 if (ret)
917 goto err_i2c_unregister_device;
918 }
919
920 /* CEC is unused for now */
921 regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL,
922 ADV7511_CEC_CTRL_POWER_DOWN);
923
924 regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
925 ADV7511_POWER_POWER_DOWN, ADV7511_POWER_POWER_DOWN);
926
927 adv7511->current_edid_segment = -1;
928
929 i2c_set_clientdata(i2c, adv7511);
930
931 adv7511_set_link_config(adv7511, &link_config);
932
933 return 0;
934
935err_i2c_unregister_device:
936 i2c_unregister_device(adv7511->i2c_edid);
937
938 return ret;
939}
940
941static int adv7511_remove(struct i2c_client *i2c)
942{
943 struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
944
945 i2c_unregister_device(adv7511->i2c_edid);
946
947 kfree(adv7511->edid);
948
949 return 0;
950}
951
952static int adv7511_encoder_init(struct i2c_client *i2c, struct drm_device *dev,
953 struct drm_encoder_slave *encoder)
954{
955
956 struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
957
958 encoder->slave_priv = adv7511;
959 encoder->slave_funcs = &adv7511_encoder_funcs;
960
961 adv7511->encoder = &encoder->base;
962
963 return 0;
964}
965
966static const struct i2c_device_id adv7511_i2c_ids[] = {
967 { "adv7511", 0 },
968 { "adv7511w", 0 },
969 { "adv7513", 0 },
970 { }
971};
972MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
973
974static const struct of_device_id adv7511_of_ids[] = {
975 { .compatible = "adi,adv7511", },
976 { .compatible = "adi,adv7511w", },
977 { .compatible = "adi,adv7513", },
978 { }
979};
980MODULE_DEVICE_TABLE(of, adv7511_of_ids);
981
982static struct drm_i2c_encoder_driver adv7511_driver = {
983 .i2c_driver = {
984 .driver = {
985 .name = "adv7511",
986 .of_match_table = adv7511_of_ids,
987 },
988 .id_table = adv7511_i2c_ids,
989 .probe = adv7511_probe,
990 .remove = adv7511_remove,
991 },
992
993 .encoder_init = adv7511_encoder_init,
994};
995
996static int __init adv7511_init(void)
997{
998 return drm_i2c_encoder_register(THIS_MODULE, &adv7511_driver);
999}
1000module_init(adv7511_init);
1001
1002static void __exit adv7511_exit(void)
1003{
1004 drm_i2c_encoder_unregister(&adv7511_driver);
1005}
1006module_exit(adv7511_exit);
1007
1008MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
1009MODULE_DESCRIPTION("ADV7511 HDMI transmitter driver");
1010MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/i2c/adv7511.h b/drivers/gpu/drm/i2c/adv7511.h
new file mode 100644
index 000000000000..6599ed538426
--- /dev/null
+++ b/drivers/gpu/drm/i2c/adv7511.h
@@ -0,0 +1,289 @@
1/*
2 * Analog Devices ADV7511 HDMI transmitter driver
3 *
4 * Copyright 2012 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2.
7 */
8
9#ifndef __DRM_I2C_ADV7511_H__
10#define __DRM_I2C_ADV7511_H__
11
12#include <linux/hdmi.h>
13
14#define ADV7511_REG_CHIP_REVISION 0x00
15#define ADV7511_REG_N0 0x01
16#define ADV7511_REG_N1 0x02
17#define ADV7511_REG_N2 0x03
18#define ADV7511_REG_SPDIF_FREQ 0x04
19#define ADV7511_REG_CTS_AUTOMATIC1 0x05
20#define ADV7511_REG_CTS_AUTOMATIC2 0x06
21#define ADV7511_REG_CTS_MANUAL0 0x07
22#define ADV7511_REG_CTS_MANUAL1 0x08
23#define ADV7511_REG_CTS_MANUAL2 0x09
24#define ADV7511_REG_AUDIO_SOURCE 0x0a
25#define ADV7511_REG_AUDIO_CONFIG 0x0b
26#define ADV7511_REG_I2S_CONFIG 0x0c
27#define ADV7511_REG_I2S_WIDTH 0x0d
28#define ADV7511_REG_AUDIO_SUB_SRC0 0x0e
29#define ADV7511_REG_AUDIO_SUB_SRC1 0x0f
30#define ADV7511_REG_AUDIO_SUB_SRC2 0x10
31#define ADV7511_REG_AUDIO_SUB_SRC3 0x11
32#define ADV7511_REG_AUDIO_CFG1 0x12
33#define ADV7511_REG_AUDIO_CFG2 0x13
34#define ADV7511_REG_AUDIO_CFG3 0x14
35#define ADV7511_REG_I2C_FREQ_ID_CFG 0x15
36#define ADV7511_REG_VIDEO_INPUT_CFG1 0x16
37#define ADV7511_REG_CSC_UPPER(x) (0x18 + (x) * 2)
38#define ADV7511_REG_CSC_LOWER(x) (0x19 + (x) * 2)
39#define ADV7511_REG_SYNC_DECODER(x) (0x30 + (x))
40#define ADV7511_REG_DE_GENERATOR (0x35 + (x))
41#define ADV7511_REG_PIXEL_REPETITION 0x3b
42#define ADV7511_REG_VIC_MANUAL 0x3c
43#define ADV7511_REG_VIC_SEND 0x3d
44#define ADV7511_REG_VIC_DETECTED 0x3e
45#define ADV7511_REG_AUX_VIC_DETECTED 0x3f
46#define ADV7511_REG_PACKET_ENABLE0 0x40
47#define ADV7511_REG_POWER 0x41
48#define ADV7511_REG_STATUS 0x42
49#define ADV7511_REG_EDID_I2C_ADDR 0x43
50#define ADV7511_REG_PACKET_ENABLE1 0x44
51#define ADV7511_REG_PACKET_I2C_ADDR 0x45
52#define ADV7511_REG_DSD_ENABLE 0x46
53#define ADV7511_REG_VIDEO_INPUT_CFG2 0x48
54#define ADV7511_REG_INFOFRAME_UPDATE 0x4a
55#define ADV7511_REG_GC(x) (0x4b + (x)) /* 0x4b - 0x51 */
56#define ADV7511_REG_AVI_INFOFRAME_VERSION 0x52
57#define ADV7511_REG_AVI_INFOFRAME_LENGTH 0x53
58#define ADV7511_REG_AVI_INFOFRAME_CHECKSUM 0x54
59#define ADV7511_REG_AVI_INFOFRAME(x) (0x55 + (x)) /* 0x55 - 0x6f */
60#define ADV7511_REG_AUDIO_INFOFRAME_VERSION 0x70
61#define ADV7511_REG_AUDIO_INFOFRAME_LENGTH 0x71
62#define ADV7511_REG_AUDIO_INFOFRAME_CHECKSUM 0x72
63#define ADV7511_REG_AUDIO_INFOFRAME(x) (0x73 + (x)) /* 0x73 - 0x7c */
64#define ADV7511_REG_INT_ENABLE(x) (0x94 + (x))
65#define ADV7511_REG_INT(x) (0x96 + (x))
66#define ADV7511_REG_INPUT_CLK_DIV 0x9d
67#define ADV7511_REG_PLL_STATUS 0x9e
68#define ADV7511_REG_HDMI_POWER 0xa1
69#define ADV7511_REG_HDCP_HDMI_CFG 0xaf
70#define ADV7511_REG_AN(x) (0xb0 + (x)) /* 0xb0 - 0xb7 */
71#define ADV7511_REG_HDCP_STATUS 0xb8
72#define ADV7511_REG_BCAPS 0xbe
73#define ADV7511_REG_BKSV(x) (0xc0 + (x)) /* 0xc0 - 0xc3 */
74#define ADV7511_REG_EDID_SEGMENT 0xc4
75#define ADV7511_REG_DDC_STATUS 0xc8
76#define ADV7511_REG_EDID_READ_CTRL 0xc9
77#define ADV7511_REG_BSTATUS(x) (0xca + (x)) /* 0xca - 0xcb */
78#define ADV7511_REG_TIMING_GEN_SEQ 0xd0
79#define ADV7511_REG_POWER2 0xd6
80#define ADV7511_REG_HSYNC_PLACEMENT_MSB 0xfa
81
82#define ADV7511_REG_SYNC_ADJUSTMENT(x) (0xd7 + (x)) /* 0xd7 - 0xdc */
83#define ADV7511_REG_TMDS_CLOCK_INV 0xde
84#define ADV7511_REG_ARC_CTRL 0xdf
85#define ADV7511_REG_CEC_I2C_ADDR 0xe1
86#define ADV7511_REG_CEC_CTRL 0xe2
87#define ADV7511_REG_CHIP_ID_HIGH 0xf5
88#define ADV7511_REG_CHIP_ID_LOW 0xf6
89
90#define ADV7511_CSC_ENABLE BIT(7)
91#define ADV7511_CSC_UPDATE_MODE BIT(5)
92
93#define ADV7511_INT0_HDP BIT(7)
94#define ADV7511_INT0_VSYNC BIT(5)
95#define ADV7511_INT0_AUDIO_FIFO_FULL BIT(4)
96#define ADV7511_INT0_EDID_READY BIT(2)
97#define ADV7511_INT0_HDCP_AUTHENTICATED BIT(1)
98
99#define ADV7511_INT1_DDC_ERROR BIT(7)
100#define ADV7511_INT1_BKSV BIT(6)
101#define ADV7511_INT1_CEC_TX_READY BIT(5)
102#define ADV7511_INT1_CEC_TX_ARBIT_LOST BIT(4)
103#define ADV7511_INT1_CEC_TX_RETRY_TIMEOUT BIT(3)
104#define ADV7511_INT1_CEC_RX_READY3 BIT(2)
105#define ADV7511_INT1_CEC_RX_READY2 BIT(1)
106#define ADV7511_INT1_CEC_RX_READY1 BIT(0)
107
108#define ADV7511_ARC_CTRL_POWER_DOWN BIT(0)
109
110#define ADV7511_CEC_CTRL_POWER_DOWN BIT(0)
111
112#define ADV7511_POWER_POWER_DOWN BIT(6)
113
114#define ADV7511_HDMI_CFG_MODE_MASK 0x2
115#define ADV7511_HDMI_CFG_MODE_DVI 0x0
116#define ADV7511_HDMI_CFG_MODE_HDMI 0x2
117
118#define ADV7511_AUDIO_SELECT_I2C 0x0
119#define ADV7511_AUDIO_SELECT_SPDIF 0x1
120#define ADV7511_AUDIO_SELECT_DSD 0x2
121#define ADV7511_AUDIO_SELECT_HBR 0x3
122#define ADV7511_AUDIO_SELECT_DST 0x4
123
124#define ADV7511_I2S_SAMPLE_LEN_16 0x2
125#define ADV7511_I2S_SAMPLE_LEN_20 0x3
126#define ADV7511_I2S_SAMPLE_LEN_18 0x4
127#define ADV7511_I2S_SAMPLE_LEN_22 0x5
128#define ADV7511_I2S_SAMPLE_LEN_19 0x8
129#define ADV7511_I2S_SAMPLE_LEN_23 0x9
130#define ADV7511_I2S_SAMPLE_LEN_24 0xb
131#define ADV7511_I2S_SAMPLE_LEN_17 0xc
132#define ADV7511_I2S_SAMPLE_LEN_21 0xd
133
134#define ADV7511_SAMPLE_FREQ_44100 0x0
135#define ADV7511_SAMPLE_FREQ_48000 0x2
136#define ADV7511_SAMPLE_FREQ_32000 0x3
137#define ADV7511_SAMPLE_FREQ_88200 0x8
138#define ADV7511_SAMPLE_FREQ_96000 0xa
139#define ADV7511_SAMPLE_FREQ_176400 0xc
140#define ADV7511_SAMPLE_FREQ_192000 0xe
141
142#define ADV7511_STATUS_POWER_DOWN_POLARITY BIT(7)
143#define ADV7511_STATUS_HPD BIT(6)
144#define ADV7511_STATUS_MONITOR_SENSE BIT(5)
145#define ADV7511_STATUS_I2S_32BIT_MODE BIT(3)
146
147#define ADV7511_PACKET_ENABLE_N_CTS BIT(8+6)
148#define ADV7511_PACKET_ENABLE_AUDIO_SAMPLE BIT(8+5)
149#define ADV7511_PACKET_ENABLE_AVI_INFOFRAME BIT(8+4)
150#define ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME BIT(8+3)
151#define ADV7511_PACKET_ENABLE_GC BIT(7)
152#define ADV7511_PACKET_ENABLE_SPD BIT(6)
153#define ADV7511_PACKET_ENABLE_MPEG BIT(5)
154#define ADV7511_PACKET_ENABLE_ACP BIT(4)
155#define ADV7511_PACKET_ENABLE_ISRC BIT(3)
156#define ADV7511_PACKET_ENABLE_GM BIT(2)
157#define ADV7511_PACKET_ENABLE_SPARE2 BIT(1)
158#define ADV7511_PACKET_ENABLE_SPARE1 BIT(0)
159
160#define ADV7511_REG_POWER2_HDP_SRC_MASK 0xc0
161#define ADV7511_REG_POWER2_HDP_SRC_BOTH 0x00
162#define ADV7511_REG_POWER2_HDP_SRC_HDP 0x40
163#define ADV7511_REG_POWER2_HDP_SRC_CEC 0x80
164#define ADV7511_REG_POWER2_HDP_SRC_NONE 0xc0
165#define ADV7511_REG_POWER2_TDMS_ENABLE BIT(4)
166#define ADV7511_REG_POWER2_GATE_INPUT_CLK BIT(0)
167
168#define ADV7511_LOW_REFRESH_RATE_NONE 0x0
169#define ADV7511_LOW_REFRESH_RATE_24HZ 0x1
170#define ADV7511_LOW_REFRESH_RATE_25HZ 0x2
171#define ADV7511_LOW_REFRESH_RATE_30HZ 0x3
172
173#define ADV7511_AUDIO_CFG3_LEN_MASK 0x0f
174#define ADV7511_I2C_FREQ_ID_CFG_RATE_MASK 0xf0
175
176#define ADV7511_AUDIO_SOURCE_I2S 0
177#define ADV7511_AUDIO_SOURCE_SPDIF 1
178
179#define ADV7511_I2S_FORMAT_I2S 0
180#define ADV7511_I2S_FORMAT_RIGHT_J 1
181#define ADV7511_I2S_FORMAT_LEFT_J 2
182
183#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
184#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
185#define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x)
186#define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x)
187#define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x)
188#define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x)
189#define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x)
190#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x)
191
192enum adv7511_input_clock {
193 ADV7511_INPUT_CLOCK_1X,
194 ADV7511_INPUT_CLOCK_2X,
195 ADV7511_INPUT_CLOCK_DDR,
196};
197
198enum adv7511_input_justification {
199 ADV7511_INPUT_JUSTIFICATION_EVENLY = 0,
200 ADV7511_INPUT_JUSTIFICATION_RIGHT = 1,
201 ADV7511_INPUT_JUSTIFICATION_LEFT = 2,
202};
203
204enum adv7511_input_sync_pulse {
205 ADV7511_INPUT_SYNC_PULSE_DE = 0,
206 ADV7511_INPUT_SYNC_PULSE_HSYNC = 1,
207 ADV7511_INPUT_SYNC_PULSE_VSYNC = 2,
208 ADV7511_INPUT_SYNC_PULSE_NONE = 3,
209};
210
211/**
212 * enum adv7511_sync_polarity - Polarity for the input sync signals
213 * @ADV7511_SYNC_POLARITY_PASSTHROUGH: Sync polarity matches that of
214 * the currently configured mode.
215 * @ADV7511_SYNC_POLARITY_LOW: Sync polarity is low
216 * @ADV7511_SYNC_POLARITY_HIGH: Sync polarity is high
217 *
218 * If the polarity is set to either LOW or HIGH the driver will configure the
219 * ADV7511 to internally invert the sync signal if required to match the sync
220 * polarity setting for the currently selected output mode.
221 *
222 * If the polarity is set to PASSTHROUGH, the ADV7511 will route the signal
223 * unchanged. This is used when the upstream graphics core already generates
224 * the sync signals with the correct polarity.
225 */
226enum adv7511_sync_polarity {
227 ADV7511_SYNC_POLARITY_PASSTHROUGH,
228 ADV7511_SYNC_POLARITY_LOW,
229 ADV7511_SYNC_POLARITY_HIGH,
230};
231
232/**
233 * struct adv7511_link_config - Describes adv7511 hardware configuration
234 * @input_color_depth: Number of bits per color component (8, 10 or 12)
235 * @input_colorspace: The input colorspace (RGB, YUV444, YUV422)
236 * @input_clock: The input video clock style (1x, 2x, DDR)
237 * @input_style: The input component arrangement variant
238 * @input_justification: Video input format bit justification
239 * @clock_delay: Clock delay for the input clock (in ps)
240 * @embedded_sync: Video input uses BT.656-style embedded sync
241 * @sync_pulse: Select the sync pulse
242 * @vsync_polarity: vsync input signal configuration
243 * @hsync_polarity: hsync input signal configuration
244 */
245struct adv7511_link_config {
246 unsigned int input_color_depth;
247 enum hdmi_colorspace input_colorspace;
248 enum adv7511_input_clock input_clock;
249 unsigned int input_style;
250 enum adv7511_input_justification input_justification;
251
252 int clock_delay;
253
254 bool embedded_sync;
255 enum adv7511_input_sync_pulse sync_pulse;
256 enum adv7511_sync_polarity vsync_polarity;
257 enum adv7511_sync_polarity hsync_polarity;
258};
259
260/**
261 * enum adv7511_csc_scaling - Scaling factor for the ADV7511 CSC
262 * @ADV7511_CSC_SCALING_1: CSC results are not scaled
263 * @ADV7511_CSC_SCALING_2: CSC results are scaled by a factor of two
264 * @ADV7511_CSC_SCALING_4: CSC results are scalled by a factor of four
265 */
266enum adv7511_csc_scaling {
267 ADV7511_CSC_SCALING_1 = 0,
268 ADV7511_CSC_SCALING_2 = 1,
269 ADV7511_CSC_SCALING_4 = 2,
270};
271
272/**
273 * struct adv7511_video_config - Describes adv7511 hardware configuration
274 * @csc_enable: Whether to enable color space conversion
275 * @csc_scaling_factor: Color space conversion scaling factor
276 * @csc_coefficents: Color space conversion coefficents
277 * @hdmi_mode: Whether to use HDMI or DVI output mode
278 * @avi_infoframe: HDMI infoframe
279 */
280struct adv7511_video_config {
281 bool csc_enable;
282 enum adv7511_csc_scaling csc_scaling_factor;
283 const uint16_t *csc_coefficents;
284
285 bool hdmi_mode;
286 struct hdmi_avi_infoframe avi_infoframe;
287};
288
289#endif /* __DRM_I2C_ADV7511_H__ */
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index c1dd485aeb6c..e4083e41a600 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -11,7 +11,9 @@ i915-y := i915_drv.o \
11 i915_params.o \ 11 i915_params.o \
12 i915_suspend.o \ 12 i915_suspend.o \
13 i915_sysfs.o \ 13 i915_sysfs.o \
14 intel_pm.o 14 intel_pm.o \
15 intel_runtime_pm.o
16
15i915-$(CONFIG_COMPAT) += i915_ioc32.o 17i915-$(CONFIG_COMPAT) += i915_ioc32.o
16i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o 18i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
17 19
@@ -38,13 +40,18 @@ i915-y += i915_cmd_parser.o \
38# autogenerated null render state 40# autogenerated null render state
39i915-y += intel_renderstate_gen6.o \ 41i915-y += intel_renderstate_gen6.o \
40 intel_renderstate_gen7.o \ 42 intel_renderstate_gen7.o \
41 intel_renderstate_gen8.o 43 intel_renderstate_gen8.o \
44 intel_renderstate_gen9.o
42 45
43# modesetting core code 46# modesetting core code
44i915-y += intel_bios.o \ 47i915-y += intel_audio.o \
48 intel_bios.o \
45 intel_display.o \ 49 intel_display.o \
50 intel_fifo_underrun.o \
51 intel_frontbuffer.o \
46 intel_modes.o \ 52 intel_modes.o \
47 intel_overlay.o \ 53 intel_overlay.o \
54 intel_psr.o \
48 intel_sideband.o \ 55 intel_sideband.o \
49 intel_sprite.o 56 intel_sprite.o
50i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o 57i915-$(CONFIG_ACPI) += intel_acpi.o intel_opregion.o
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index 593b657d3e59..22c992a78ac6 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -73,7 +73,7 @@
73 * those commands required by the parser. This generally works because command 73 * those commands required by the parser. This generally works because command
74 * opcode ranges have standard command length encodings. So for commands that 74 * opcode ranges have standard command length encodings. So for commands that
75 * the parser does not need to check, it can easily skip them. This is 75 * the parser does not need to check, it can easily skip them. This is
76 * implementated via a per-ring length decoding vfunc. 76 * implemented via a per-ring length decoding vfunc.
77 * 77 *
78 * Unfortunately, there are a number of commands that do not follow the standard 78 * Unfortunately, there are a number of commands that do not follow the standard
79 * length encoding for their opcode range, primarily amongst the MI_* commands. 79 * length encoding for their opcode range, primarily amongst the MI_* commands.
@@ -138,6 +138,11 @@ static const struct drm_i915_cmd_descriptor common_cmds[] = {
138 .mask = MI_GLOBAL_GTT, 138 .mask = MI_GLOBAL_GTT,
139 .expected = 0, 139 .expected = 0,
140 }}, ), 140 }}, ),
141 /*
142 * MI_BATCH_BUFFER_START requires some special handling. It's not
143 * really a 'skip' action but it doesn't seem like it's worth adding
144 * a new action. See i915_parse_cmds().
145 */
141 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ), 146 CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
142}; 147};
143 148
@@ -408,6 +413,8 @@ static const u32 gen7_render_regs[] = {
408 REG64(PS_INVOCATION_COUNT), 413 REG64(PS_INVOCATION_COUNT),
409 REG64(PS_DEPTH_COUNT), 414 REG64(PS_DEPTH_COUNT),
410 OACONTROL, /* Only allowed for LRI and SRM. See below. */ 415 OACONTROL, /* Only allowed for LRI and SRM. See below. */
416 REG64(MI_PREDICATE_SRC0),
417 REG64(MI_PREDICATE_SRC1),
411 GEN7_3DPRIM_END_OFFSET, 418 GEN7_3DPRIM_END_OFFSET,
412 GEN7_3DPRIM_START_VERTEX, 419 GEN7_3DPRIM_START_VERTEX,
413 GEN7_3DPRIM_VERTEX_COUNT, 420 GEN7_3DPRIM_VERTEX_COUNT,
@@ -838,7 +845,7 @@ finish:
838 * @ring: the ring in question 845 * @ring: the ring in question
839 * 846 *
840 * Only certain platforms require software batch buffer command parsing, and 847 * Only certain platforms require software batch buffer command parsing, and
841 * only when enabled via module paramter. 848 * only when enabled via module parameter.
842 * 849 *
843 * Return: true if the ring requires software command parsing 850 * Return: true if the ring requires software command parsing
844 */ 851 */
@@ -847,12 +854,7 @@ bool i915_needs_cmd_parser(struct intel_engine_cs *ring)
847 if (!ring->needs_cmd_parser) 854 if (!ring->needs_cmd_parser)
848 return false; 855 return false;
849 856
850 /* 857 if (!USES_PPGTT(ring->dev))
851 * XXX: VLV is Gen7 and therefore has cmd_tables, but has PPGTT
852 * disabled. That will cause all of the parser's PPGTT checks to
853 * fail. For now, disable parsing when PPGTT is off.
854 */
855 if (USES_PPGTT(ring->dev))
856 return false; 858 return false;
857 859
858 return (i915.enable_cmd_parser == 1); 860 return (i915.enable_cmd_parser == 1);
@@ -888,8 +890,10 @@ static bool check_cmd(const struct intel_engine_cs *ring,
888 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands. 890 * OACONTROL writes to only MI_LOAD_REGISTER_IMM commands.
889 */ 891 */
890 if (reg_addr == OACONTROL) { 892 if (reg_addr == OACONTROL) {
891 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) 893 if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
894 DRM_DEBUG_DRIVER("CMD: Rejected LRM to OACONTROL\n");
892 return false; 895 return false;
896 }
893 897
894 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1)) 898 if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1))
895 *oacontrol_set = (cmd[2] != 0); 899 *oacontrol_set = (cmd[2] != 0);
@@ -958,7 +962,8 @@ static bool check_cmd(const struct intel_engine_cs *ring,
958 * Parses the specified batch buffer looking for privilege violations as 962 * Parses the specified batch buffer looking for privilege violations as
959 * described in the overview. 963 * described in the overview.
960 * 964 *
961 * Return: non-zero if the parser finds violations or otherwise fails 965 * Return: non-zero if the parser finds violations or otherwise fails; -EACCES
966 * if the batch appears legal but should use hardware parsing
962 */ 967 */
963int i915_parse_cmds(struct intel_engine_cs *ring, 968int i915_parse_cmds(struct intel_engine_cs *ring,
964 struct drm_i915_gem_object *batch_obj, 969 struct drm_i915_gem_object *batch_obj,
@@ -1005,6 +1010,16 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
1005 break; 1010 break;
1006 } 1011 }
1007 1012
1013 /*
1014 * If the batch buffer contains a chained batch, return an
1015 * error that tells the caller to abort and dispatch the
1016 * workload as a non-secure batch.
1017 */
1018 if (desc->cmd.value == MI_BATCH_BUFFER_START) {
1019 ret = -EACCES;
1020 break;
1021 }
1022
1008 if (desc->flags & CMD_DESC_FIXED) 1023 if (desc->flags & CMD_DESC_FIXED)
1009 length = desc->length.fixed; 1024 length = desc->length.fixed;
1010 else 1025 else
@@ -1059,6 +1074,8 @@ int i915_cmd_parser_get_version(void)
1059 * 1074 *
1060 * 1. Initial version. Checks batches and reports violations, but leaves 1075 * 1. Initial version. Checks batches and reports violations, but leaves
1061 * hardware parsing enabled (so does not allow new use cases). 1076 * hardware parsing enabled (so does not allow new use cases).
1077 * 2. Allow access to the MI_PREDICATE_SRC0 and
1078 * MI_PREDICATE_SRC1 registers.
1062 */ 1079 */
1063 return 1; 1080 return 2;
1064} 1081}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 063b44817e08..779a275eb1fd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -116,7 +116,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
116 116
117static inline const char *get_global_flag(struct drm_i915_gem_object *obj) 117static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
118{ 118{
119 return obj->has_global_gtt_mapping ? "g" : " "; 119 return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
120} 120}
121 121
122static void 122static void
@@ -516,7 +516,6 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
516 struct drm_info_node *node = m->private; 516 struct drm_info_node *node = m->private;
517 struct drm_device *dev = node->minor->dev; 517 struct drm_device *dev = node->minor->dev;
518 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
519 unsigned long flags;
520 struct intel_crtc *crtc; 519 struct intel_crtc *crtc;
521 int ret; 520 int ret;
522 521
@@ -529,7 +528,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
529 const char plane = plane_name(crtc->plane); 528 const char plane = plane_name(crtc->plane);
530 struct intel_unpin_work *work; 529 struct intel_unpin_work *work;
531 530
532 spin_lock_irqsave(&dev->event_lock, flags); 531 spin_lock_irq(&dev->event_lock);
533 work = crtc->unpin_work; 532 work = crtc->unpin_work;
534 if (work == NULL) { 533 if (work == NULL) {
535 seq_printf(m, "No flip due on pipe %c (plane %c)\n", 534 seq_printf(m, "No flip due on pipe %c (plane %c)\n",
@@ -575,7 +574,7 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
575 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset); 574 seq_printf(m, "MMIO update completed? %d\n", addr == work->gtt_offset);
576 } 575 }
577 } 576 }
578 spin_unlock_irqrestore(&dev->event_lock, flags); 577 spin_unlock_irq(&dev->event_lock);
579 } 578 }
580 579
581 mutex_unlock(&dev->struct_mutex); 580 mutex_unlock(&dev->struct_mutex);
@@ -717,7 +716,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
717 } 716 }
718 717
719 for_each_pipe(dev_priv, pipe) { 718 for_each_pipe(dev_priv, pipe) {
720 if (!intel_display_power_enabled(dev_priv, 719 if (!intel_display_power_is_enabled(dev_priv,
721 POWER_DOMAIN_PIPE(pipe))) { 720 POWER_DOMAIN_PIPE(pipe))) {
722 seq_printf(m, "Pipe %c power disabled\n", 721 seq_printf(m, "Pipe %c power disabled\n",
723 pipe_name(pipe)); 722 pipe_name(pipe));
@@ -1241,11 +1240,12 @@ static int vlv_drpc_info(struct seq_file *m)
1241 struct drm_info_node *node = m->private; 1240 struct drm_info_node *node = m->private;
1242 struct drm_device *dev = node->minor->dev; 1241 struct drm_device *dev = node->minor->dev;
1243 struct drm_i915_private *dev_priv = dev->dev_private; 1242 struct drm_i915_private *dev_priv = dev->dev_private;
1244 u32 rpmodectl1, rcctl1; 1243 u32 rpmodectl1, rcctl1, pw_status;
1245 unsigned fw_rendercount = 0, fw_mediacount = 0; 1244 unsigned fw_rendercount = 0, fw_mediacount = 0;
1246 1245
1247 intel_runtime_pm_get(dev_priv); 1246 intel_runtime_pm_get(dev_priv);
1248 1247
1248 pw_status = I915_READ(VLV_GTLC_PW_STATUS);
1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL); 1249 rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
1250 rcctl1 = I915_READ(GEN6_RC_CONTROL); 1250 rcctl1 = I915_READ(GEN6_RC_CONTROL);
1251 1251
@@ -1264,11 +1264,9 @@ static int vlv_drpc_info(struct seq_file *m)
1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE | 1264 yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
1265 GEN6_RC_CTL_EI_MODE(1)))); 1265 GEN6_RC_CTL_EI_MODE(1))));
1266 seq_printf(m, "Render Power Well: %s\n", 1266 seq_printf(m, "Render Power Well: %s\n",
1267 (I915_READ(VLV_GTLC_PW_STATUS) & 1267 (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1268 VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
1269 seq_printf(m, "Media Power Well: %s\n", 1268 seq_printf(m, "Media Power Well: %s\n",
1270 (I915_READ(VLV_GTLC_PW_STATUS) & 1269 (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1271 VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
1272 1270
1273 seq_printf(m, "Render RC6 residency since boot: %u\n", 1271 seq_printf(m, "Render RC6 residency since boot: %u\n",
1274 I915_READ(VLV_GT_RENDER_RC6)); 1272 I915_READ(VLV_GT_RENDER_RC6));
@@ -1774,6 +1772,50 @@ static int i915_context_status(struct seq_file *m, void *unused)
1774 return 0; 1772 return 0;
1775} 1773}
1776 1774
1775static void i915_dump_lrc_obj(struct seq_file *m,
1776 struct intel_engine_cs *ring,
1777 struct drm_i915_gem_object *ctx_obj)
1778{
1779 struct page *page;
1780 uint32_t *reg_state;
1781 int j;
1782 unsigned long ggtt_offset = 0;
1783
1784 if (ctx_obj == NULL) {
1785 seq_printf(m, "Context on %s with no gem object\n",
1786 ring->name);
1787 return;
1788 }
1789
1790 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1791 intel_execlists_ctx_id(ctx_obj));
1792
1793 if (!i915_gem_obj_ggtt_bound(ctx_obj))
1794 seq_puts(m, "\tNot bound in GGTT\n");
1795 else
1796 ggtt_offset = i915_gem_obj_ggtt_offset(ctx_obj);
1797
1798 if (i915_gem_object_get_pages(ctx_obj)) {
1799 seq_puts(m, "\tFailed to get pages for context object\n");
1800 return;
1801 }
1802
1803 page = i915_gem_object_get_page(ctx_obj, 1);
1804 if (!WARN_ON(page == NULL)) {
1805 reg_state = kmap_atomic(page);
1806
1807 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1808 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1809 ggtt_offset + 4096 + (j * 4),
1810 reg_state[j], reg_state[j + 1],
1811 reg_state[j + 2], reg_state[j + 3]);
1812 }
1813 kunmap_atomic(reg_state);
1814 }
1815
1816 seq_putc(m, '\n');
1817}
1818
1777static int i915_dump_lrc(struct seq_file *m, void *unused) 1819static int i915_dump_lrc(struct seq_file *m, void *unused)
1778{ 1820{
1779 struct drm_info_node *node = (struct drm_info_node *) m->private; 1821 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1794,29 +1836,9 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
1794 1836
1795 list_for_each_entry(ctx, &dev_priv->context_list, link) { 1837 list_for_each_entry(ctx, &dev_priv->context_list, link) {
1796 for_each_ring(ring, dev_priv, i) { 1838 for_each_ring(ring, dev_priv, i) {
1797 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 1839 if (ring->default_context != ctx)
1798 1840 i915_dump_lrc_obj(m, ring,
1799 if (ring->default_context == ctx) 1841 ctx->engine[i].state);
1800 continue;
1801
1802 if (ctx_obj) {
1803 struct page *page = i915_gem_object_get_page(ctx_obj, 1);
1804 uint32_t *reg_state = kmap_atomic(page);
1805 int j;
1806
1807 seq_printf(m, "CONTEXT: %s %u\n", ring->name,
1808 intel_execlists_ctx_id(ctx_obj));
1809
1810 for (j = 0; j < 0x600 / sizeof(u32) / 4; j += 4) {
1811 seq_printf(m, "\t[0x%08lx] 0x%08x 0x%08x 0x%08x 0x%08x\n",
1812 i915_gem_obj_ggtt_offset(ctx_obj) + 4096 + (j * 4),
1813 reg_state[j], reg_state[j + 1],
1814 reg_state[j + 2], reg_state[j + 3]);
1815 }
1816 kunmap_atomic(reg_state);
1817
1818 seq_putc(m, '\n');
1819 }
1820 } 1842 }
1821 } 1843 }
1822 1844
@@ -1849,6 +1871,8 @@ static int i915_execlists(struct seq_file *m, void *data)
1849 if (ret) 1871 if (ret)
1850 return ret; 1872 return ret;
1851 1873
1874 intel_runtime_pm_get(dev_priv);
1875
1852 for_each_ring(ring, dev_priv, ring_id) { 1876 for_each_ring(ring, dev_priv, ring_id) {
1853 struct intel_ctx_submit_request *head_req = NULL; 1877 struct intel_ctx_submit_request *head_req = NULL;
1854 int count = 0; 1878 int count = 0;
@@ -1900,6 +1924,7 @@ static int i915_execlists(struct seq_file *m, void *data)
1900 seq_putc(m, '\n'); 1924 seq_putc(m, '\n');
1901 } 1925 }
1902 1926
1927 intel_runtime_pm_put(dev_priv);
1903 mutex_unlock(&dev->struct_mutex); 1928 mutex_unlock(&dev->struct_mutex);
1904 1929
1905 return 0; 1930 return 0;
@@ -1973,6 +1998,8 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1973 if (IS_GEN3(dev) || IS_GEN4(dev)) { 1998 if (IS_GEN3(dev) || IS_GEN4(dev)) {
1974 seq_printf(m, "DDC = 0x%08x\n", 1999 seq_printf(m, "DDC = 0x%08x\n",
1975 I915_READ(DCC)); 2000 I915_READ(DCC));
2001 seq_printf(m, "DDC2 = 0x%08x\n",
2002 I915_READ(DCC2));
1976 seq_printf(m, "C0DRB3 = 0x%04x\n", 2003 seq_printf(m, "C0DRB3 = 0x%04x\n",
1977 I915_READ16(C0DRB3)); 2004 I915_READ16(C0DRB3));
1978 seq_printf(m, "C1DRB3 = 0x%04x\n", 2005 seq_printf(m, "C1DRB3 = 0x%04x\n",
@@ -1986,7 +2013,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1986 I915_READ(MAD_DIMM_C2)); 2013 I915_READ(MAD_DIMM_C2));
1987 seq_printf(m, "TILECTL = 0x%08x\n", 2014 seq_printf(m, "TILECTL = 0x%08x\n",
1988 I915_READ(TILECTL)); 2015 I915_READ(TILECTL));
1989 if (IS_GEN8(dev)) 2016 if (INTEL_INFO(dev)->gen >= 8)
1990 seq_printf(m, "GAMTARBMODE = 0x%08x\n", 2017 seq_printf(m, "GAMTARBMODE = 0x%08x\n",
1991 I915_READ(GAMTARBMODE)); 2018 I915_READ(GAMTARBMODE));
1992 else 2019 else
@@ -1995,6 +2022,10 @@ static int i915_swizzle_info(struct seq_file *m, void *data)
1995 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n", 2022 seq_printf(m, "DISP_ARB_CTL = 0x%08x\n",
1996 I915_READ(DISP_ARB_CTL)); 2023 I915_READ(DISP_ARB_CTL));
1997 } 2024 }
2025
2026 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2027 seq_puts(m, "L-shaped memory detected\n");
2028
1998 intel_runtime_pm_put(dev_priv); 2029 intel_runtime_pm_put(dev_priv);
1999 mutex_unlock(&dev->struct_mutex); 2030 mutex_unlock(&dev->struct_mutex);
2000 2031
@@ -2628,14 +2659,15 @@ static int i915_shared_dplls_info(struct seq_file *m, void *unused)
2628 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 2659 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
2629 2660
2630 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id); 2661 seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->name, pll->id);
2631 seq_printf(m, " refcount: %i, active: %i, on: %s\n", pll->refcount, 2662 seq_printf(m, " crtc_mask: 0x%08x, active: %d, on: %s\n",
2632 pll->active, yesno(pll->on)); 2663 pll->config.crtc_mask, pll->active, yesno(pll->on));
2633 seq_printf(m, " tracked hardware state:\n"); 2664 seq_printf(m, " tracked hardware state:\n");
2634 seq_printf(m, " dpll: 0x%08x\n", pll->hw_state.dpll); 2665 seq_printf(m, " dpll: 0x%08x\n", pll->config.hw_state.dpll);
2635 seq_printf(m, " dpll_md: 0x%08x\n", pll->hw_state.dpll_md); 2666 seq_printf(m, " dpll_md: 0x%08x\n",
2636 seq_printf(m, " fp0: 0x%08x\n", pll->hw_state.fp0); 2667 pll->config.hw_state.dpll_md);
2637 seq_printf(m, " fp1: 0x%08x\n", pll->hw_state.fp1); 2668 seq_printf(m, " fp0: 0x%08x\n", pll->config.hw_state.fp0);
2638 seq_printf(m, " wrpll: 0x%08x\n", pll->hw_state.wrpll); 2669 seq_printf(m, " fp1: 0x%08x\n", pll->config.hw_state.fp1);
2670 seq_printf(m, " wrpll: 0x%08x\n", pll->config.hw_state.wrpll);
2639 } 2671 }
2640 drm_modeset_unlock_all(dev); 2672 drm_modeset_unlock_all(dev);
2641 2673
@@ -2656,18 +2688,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
2656 2688
2657 intel_runtime_pm_get(dev_priv); 2689 intel_runtime_pm_get(dev_priv);
2658 2690
2659 seq_printf(m, "Workarounds applied: %d\n", dev_priv->num_wa_regs); 2691 seq_printf(m, "Workarounds applied: %d\n", dev_priv->workarounds.count);
2660 for (i = 0; i < dev_priv->num_wa_regs; ++i) { 2692 for (i = 0; i < dev_priv->workarounds.count; ++i) {
2661 u32 addr, mask; 2693 u32 addr, mask, value, read;
2662 2694 bool ok;
2663 addr = dev_priv->intel_wa_regs[i].addr; 2695
2664 mask = dev_priv->intel_wa_regs[i].mask; 2696 addr = dev_priv->workarounds.reg[i].addr;
2665 dev_priv->intel_wa_regs[i].value = I915_READ(addr) | mask; 2697 mask = dev_priv->workarounds.reg[i].mask;
2666 if (dev_priv->intel_wa_regs[i].addr) 2698 value = dev_priv->workarounds.reg[i].value;
2667 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n", 2699 read = I915_READ(addr);
2668 dev_priv->intel_wa_regs[i].addr, 2700 ok = (value & mask) == (read & mask);
2669 dev_priv->intel_wa_regs[i].value, 2701 seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X, read: 0x%08x, status: %s\n",
2670 dev_priv->intel_wa_regs[i].mask); 2702 addr, value, mask, read, ok ? "OK" : "FAIL");
2671 } 2703 }
2672 2704
2673 intel_runtime_pm_put(dev_priv); 2705 intel_runtime_pm_put(dev_priv);
@@ -2676,6 +2708,42 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
2676 return 0; 2708 return 0;
2677} 2709}
2678 2710
2711static int i915_ddb_info(struct seq_file *m, void *unused)
2712{
2713 struct drm_info_node *node = m->private;
2714 struct drm_device *dev = node->minor->dev;
2715 struct drm_i915_private *dev_priv = dev->dev_private;
2716 struct skl_ddb_allocation *ddb;
2717 struct skl_ddb_entry *entry;
2718 enum pipe pipe;
2719 int plane;
2720
2721 drm_modeset_lock_all(dev);
2722
2723 ddb = &dev_priv->wm.skl_hw.ddb;
2724
2725 seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
2726
2727 for_each_pipe(dev_priv, pipe) {
2728 seq_printf(m, "Pipe %c\n", pipe_name(pipe));
2729
2730 for_each_plane(pipe, plane) {
2731 entry = &ddb->plane[pipe][plane];
2732 seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1,
2733 entry->start, entry->end,
2734 skl_ddb_entry_size(entry));
2735 }
2736
2737 entry = &ddb->cursor[pipe];
2738 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
2739 entry->end, skl_ddb_entry_size(entry));
2740 }
2741
2742 drm_modeset_unlock_all(dev);
2743
2744 return 0;
2745}
2746
2679struct pipe_crc_info { 2747struct pipe_crc_info {
2680 const char *name; 2748 const char *name;
2681 struct drm_device *dev; 2749 struct drm_device *dev;
@@ -2969,6 +3037,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_device *dev, enum pipe pipe,
2969 break; 3037 break;
2970 } 3038 }
2971 break; 3039 break;
3040 default:
3041 break;
2972 } 3042 }
2973 } 3043 }
2974 drm_modeset_unlock_all(dev); 3044 drm_modeset_unlock_all(dev);
@@ -3256,6 +3326,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3256{ 3326{
3257 struct drm_i915_private *dev_priv = dev->dev_private; 3327 struct drm_i915_private *dev_priv = dev->dev_private;
3258 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; 3328 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
3329 struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
3330 pipe));
3259 u32 val = 0; /* shut up gcc */ 3331 u32 val = 0; /* shut up gcc */
3260 int ret; 3332 int ret;
3261 3333
@@ -3266,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3266 if (pipe_crc->source && source) 3338 if (pipe_crc->source && source)
3267 return -EINVAL; 3339 return -EINVAL;
3268 3340
3341 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
3342 DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
3343 return -EIO;
3344 }
3345
3269 if (IS_GEN2(dev)) 3346 if (IS_GEN2(dev))
3270 ret = i8xx_pipe_crc_ctl_reg(&source, &val); 3347 ret = i8xx_pipe_crc_ctl_reg(&source, &val);
3271 else if (INTEL_INFO(dev)->gen < 5) 3348 else if (INTEL_INFO(dev)->gen < 5)
@@ -3291,6 +3368,14 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3291 if (!pipe_crc->entries) 3368 if (!pipe_crc->entries)
3292 return -ENOMEM; 3369 return -ENOMEM;
3293 3370
3371 /*
3372 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
3373 * enabled and disabled dynamically based on package C states,
3374 * user space can't make reliable use of the CRCs, so let's just
3375 * completely disable it.
3376 */
3377 hsw_disable_ips(crtc);
3378
3294 spin_lock_irq(&pipe_crc->lock); 3379 spin_lock_irq(&pipe_crc->lock);
3295 pipe_crc->head = 0; 3380 pipe_crc->head = 0;
3296 pipe_crc->tail = 0; 3381 pipe_crc->tail = 0;
@@ -3329,6 +3414,8 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe,
3329 vlv_undo_pipe_scramble_reset(dev, pipe); 3414 vlv_undo_pipe_scramble_reset(dev, pipe);
3330 else if (IS_HASWELL(dev) && pipe == PIPE_A) 3415 else if (IS_HASWELL(dev) && pipe == PIPE_A)
3331 hsw_undo_trans_edp_pipe_A_crc_wa(dev); 3416 hsw_undo_trans_edp_pipe_A_crc_wa(dev);
3417
3418 hsw_enable_ips(crtc);
3332 } 3419 }
3333 3420
3334 return 0; 3421 return 0;
@@ -3506,7 +3593,7 @@ static const struct file_operations i915_display_crc_ctl_fops = {
3506 .write = display_crc_ctl_write 3593 .write = display_crc_ctl_write
3507}; 3594};
3508 3595
3509static void wm_latency_show(struct seq_file *m, const uint16_t wm[5]) 3596static void wm_latency_show(struct seq_file *m, const uint16_t wm[8])
3510{ 3597{
3511 struct drm_device *dev = m->private; 3598 struct drm_device *dev = m->private;
3512 int num_levels = ilk_wm_max_level(dev) + 1; 3599 int num_levels = ilk_wm_max_level(dev) + 1;
@@ -3517,13 +3604,17 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3517 for (level = 0; level < num_levels; level++) { 3604 for (level = 0; level < num_levels; level++) {
3518 unsigned int latency = wm[level]; 3605 unsigned int latency = wm[level];
3519 3606
3520 /* WM1+ latency values in 0.5us units */ 3607 /*
3521 if (level > 0) 3608 * - WM1+ latency values in 0.5us units
3609 * - latencies are in us on gen9
3610 */
3611 if (INTEL_INFO(dev)->gen >= 9)
3612 latency *= 10;
3613 else if (level > 0)
3522 latency *= 5; 3614 latency *= 5;
3523 3615
3524 seq_printf(m, "WM%d %u (%u.%u usec)\n", 3616 seq_printf(m, "WM%d %u (%u.%u usec)\n",
3525 level, wm[level], 3617 level, wm[level], latency / 10, latency % 10);
3526 latency / 10, latency % 10);
3527 } 3618 }
3528 3619
3529 drm_modeset_unlock_all(dev); 3620 drm_modeset_unlock_all(dev);
@@ -3532,8 +3623,15 @@ static void wm_latency_show(struct seq_file *m, const uint16_t wm[5])
3532static int pri_wm_latency_show(struct seq_file *m, void *data) 3623static int pri_wm_latency_show(struct seq_file *m, void *data)
3533{ 3624{
3534 struct drm_device *dev = m->private; 3625 struct drm_device *dev = m->private;
3626 struct drm_i915_private *dev_priv = dev->dev_private;
3627 const uint16_t *latencies;
3628
3629 if (INTEL_INFO(dev)->gen >= 9)
3630 latencies = dev_priv->wm.skl_latency;
3631 else
3632 latencies = to_i915(dev)->wm.pri_latency;
3535 3633
3536 wm_latency_show(m, to_i915(dev)->wm.pri_latency); 3634 wm_latency_show(m, latencies);
3537 3635
3538 return 0; 3636 return 0;
3539} 3637}
@@ -3541,8 +3639,15 @@ static int pri_wm_latency_show(struct seq_file *m, void *data)
3541static int spr_wm_latency_show(struct seq_file *m, void *data) 3639static int spr_wm_latency_show(struct seq_file *m, void *data)
3542{ 3640{
3543 struct drm_device *dev = m->private; 3641 struct drm_device *dev = m->private;
3642 struct drm_i915_private *dev_priv = dev->dev_private;
3643 const uint16_t *latencies;
3644
3645 if (INTEL_INFO(dev)->gen >= 9)
3646 latencies = dev_priv->wm.skl_latency;
3647 else
3648 latencies = to_i915(dev)->wm.spr_latency;
3544 3649
3545 wm_latency_show(m, to_i915(dev)->wm.spr_latency); 3650 wm_latency_show(m, latencies);
3546 3651
3547 return 0; 3652 return 0;
3548} 3653}
@@ -3550,8 +3655,15 @@ static int spr_wm_latency_show(struct seq_file *m, void *data)
3550static int cur_wm_latency_show(struct seq_file *m, void *data) 3655static int cur_wm_latency_show(struct seq_file *m, void *data)
3551{ 3656{
3552 struct drm_device *dev = m->private; 3657 struct drm_device *dev = m->private;
3658 struct drm_i915_private *dev_priv = dev->dev_private;
3659 const uint16_t *latencies;
3660
3661 if (INTEL_INFO(dev)->gen >= 9)
3662 latencies = dev_priv->wm.skl_latency;
3663 else
3664 latencies = to_i915(dev)->wm.cur_latency;
3553 3665
3554 wm_latency_show(m, to_i915(dev)->wm.cur_latency); 3666 wm_latency_show(m, latencies);
3555 3667
3556 return 0; 3668 return 0;
3557} 3669}
@@ -3587,11 +3699,11 @@ static int cur_wm_latency_open(struct inode *inode, struct file *file)
3587} 3699}
3588 3700
3589static ssize_t wm_latency_write(struct file *file, const char __user *ubuf, 3701static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3590 size_t len, loff_t *offp, uint16_t wm[5]) 3702 size_t len, loff_t *offp, uint16_t wm[8])
3591{ 3703{
3592 struct seq_file *m = file->private_data; 3704 struct seq_file *m = file->private_data;
3593 struct drm_device *dev = m->private; 3705 struct drm_device *dev = m->private;
3594 uint16_t new[5] = { 0 }; 3706 uint16_t new[8] = { 0 };
3595 int num_levels = ilk_wm_max_level(dev) + 1; 3707 int num_levels = ilk_wm_max_level(dev) + 1;
3596 int level; 3708 int level;
3597 int ret; 3709 int ret;
@@ -3605,7 +3717,9 @@ static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
3605 3717
3606 tmp[len] = '\0'; 3718 tmp[len] = '\0';
3607 3719
3608 ret = sscanf(tmp, "%hu %hu %hu %hu %hu", &new[0], &new[1], &new[2], &new[3], &new[4]); 3720 ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
3721 &new[0], &new[1], &new[2], &new[3],
3722 &new[4], &new[5], &new[6], &new[7]);
3609 if (ret != num_levels) 3723 if (ret != num_levels)
3610 return -EINVAL; 3724 return -EINVAL;
3611 3725
@@ -3625,8 +3739,15 @@ static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
3625{ 3739{
3626 struct seq_file *m = file->private_data; 3740 struct seq_file *m = file->private_data;
3627 struct drm_device *dev = m->private; 3741 struct drm_device *dev = m->private;
3742 struct drm_i915_private *dev_priv = dev->dev_private;
3743 uint16_t *latencies;
3628 3744
3629 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.pri_latency); 3745 if (INTEL_INFO(dev)->gen >= 9)
3746 latencies = dev_priv->wm.skl_latency;
3747 else
3748 latencies = to_i915(dev)->wm.pri_latency;
3749
3750 return wm_latency_write(file, ubuf, len, offp, latencies);
3630} 3751}
3631 3752
3632static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf, 3753static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3634,8 +3755,15 @@ static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
3634{ 3755{
3635 struct seq_file *m = file->private_data; 3756 struct seq_file *m = file->private_data;
3636 struct drm_device *dev = m->private; 3757 struct drm_device *dev = m->private;
3758 struct drm_i915_private *dev_priv = dev->dev_private;
3759 uint16_t *latencies;
3637 3760
3638 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.spr_latency); 3761 if (INTEL_INFO(dev)->gen >= 9)
3762 latencies = dev_priv->wm.skl_latency;
3763 else
3764 latencies = to_i915(dev)->wm.spr_latency;
3765
3766 return wm_latency_write(file, ubuf, len, offp, latencies);
3639} 3767}
3640 3768
3641static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf, 3769static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
@@ -3643,8 +3771,15 @@ static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
3643{ 3771{
3644 struct seq_file *m = file->private_data; 3772 struct seq_file *m = file->private_data;
3645 struct drm_device *dev = m->private; 3773 struct drm_device *dev = m->private;
3774 struct drm_i915_private *dev_priv = dev->dev_private;
3775 uint16_t *latencies;
3776
3777 if (INTEL_INFO(dev)->gen >= 9)
3778 latencies = dev_priv->wm.skl_latency;
3779 else
3780 latencies = to_i915(dev)->wm.cur_latency;
3646 3781
3647 return wm_latency_write(file, ubuf, len, offp, to_i915(dev)->wm.cur_latency); 3782 return wm_latency_write(file, ubuf, len, offp, latencies);
3648} 3783}
3649 3784
3650static const struct file_operations i915_pri_wm_latency_fops = { 3785static const struct file_operations i915_pri_wm_latency_fops = {
@@ -4187,6 +4322,7 @@ static const struct drm_info_list i915_debugfs_list[] = {
4187 {"i915_shared_dplls_info", i915_shared_dplls_info, 0}, 4322 {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
4188 {"i915_dp_mst_info", i915_dp_mst_info, 0}, 4323 {"i915_dp_mst_info", i915_dp_mst_info, 0},
4189 {"i915_wa_registers", i915_wa_registers, 0}, 4324 {"i915_wa_registers", i915_wa_registers, 0},
4325 {"i915_ddb_info", i915_ddb_info, 0},
4190}; 4326};
4191#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) 4327#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
4192 4328
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 318ade9bb5af..ecee3bcc8772 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -50,884 +50,6 @@
50#include <linux/pm_runtime.h> 50#include <linux/pm_runtime.h>
51#include <linux/oom.h> 51#include <linux/oom.h>
52 52
53#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
54
55#define BEGIN_LP_RING(n) \
56 intel_ring_begin(LP_RING(dev_priv), (n))
57
58#define OUT_RING(x) \
59 intel_ring_emit(LP_RING(dev_priv), x)
60
61#define ADVANCE_LP_RING() \
62 __intel_ring_advance(LP_RING(dev_priv))
63
64/**
65 * Lock test for when it's just for synchronization of ring access.
66 *
67 * In that case, we don't need to do it when GEM is initialized as nobody else
68 * has access to the ring.
69 */
70#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
71 if (LP_RING(dev->dev_private)->buffer->obj == NULL) \
72 LOCK_TEST_WITH_RETURN(dev, file); \
73} while (0)
74
75static inline u32
76intel_read_legacy_status_page(struct drm_i915_private *dev_priv, int reg)
77{
78 if (I915_NEED_GFX_HWS(dev_priv->dev))
79 return ioread32(dev_priv->dri1.gfx_hws_cpu_addr + reg);
80 else
81 return intel_read_status_page(LP_RING(dev_priv), reg);
82}
83
84#define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
85#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
86#define I915_BREADCRUMB_INDEX 0x21
87
88void i915_update_dri1_breadcrumb(struct drm_device *dev)
89{
90 struct drm_i915_private *dev_priv = dev->dev_private;
91 struct drm_i915_master_private *master_priv;
92
93 /*
94 * The dri breadcrumb update races against the drm master disappearing.
95 * Instead of trying to fix this (this is by far not the only ums issue)
96 * just don't do the update in kms mode.
97 */
98 if (drm_core_check_feature(dev, DRIVER_MODESET))
99 return;
100
101 if (dev->primary->master) {
102 master_priv = dev->primary->master->driver_priv;
103 if (master_priv->sarea_priv)
104 master_priv->sarea_priv->last_dispatch =
105 READ_BREADCRUMB(dev_priv);
106 }
107}
108
109static void i915_write_hws_pga(struct drm_device *dev)
110{
111 struct drm_i915_private *dev_priv = dev->dev_private;
112 u32 addr;
113
114 addr = dev_priv->status_page_dmah->busaddr;
115 if (INTEL_INFO(dev)->gen >= 4)
116 addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
117 I915_WRITE(HWS_PGA, addr);
118}
119
120/**
121 * Frees the hardware status page, whether it's a physical address or a virtual
122 * address set up by the X Server.
123 */
124static void i915_free_hws(struct drm_device *dev)
125{
126 struct drm_i915_private *dev_priv = dev->dev_private;
127 struct intel_engine_cs *ring = LP_RING(dev_priv);
128
129 if (dev_priv->status_page_dmah) {
130 drm_pci_free(dev, dev_priv->status_page_dmah);
131 dev_priv->status_page_dmah = NULL;
132 }
133
134 if (ring->status_page.gfx_addr) {
135 ring->status_page.gfx_addr = 0;
136 iounmap(dev_priv->dri1.gfx_hws_cpu_addr);
137 }
138
139 /* Need to rewrite hardware status page */
140 I915_WRITE(HWS_PGA, 0x1ffff000);
141}
142
143void i915_kernel_lost_context(struct drm_device *dev)
144{
145 struct drm_i915_private *dev_priv = dev->dev_private;
146 struct drm_i915_master_private *master_priv;
147 struct intel_engine_cs *ring = LP_RING(dev_priv);
148 struct intel_ringbuffer *ringbuf = ring->buffer;
149
150 /*
151 * We should never lose context on the ring with modesetting
152 * as we don't expose it to userspace
153 */
154 if (drm_core_check_feature(dev, DRIVER_MODESET))
155 return;
156
157 ringbuf->head = I915_READ_HEAD(ring) & HEAD_ADDR;
158 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
159 ringbuf->space = ringbuf->head - (ringbuf->tail + I915_RING_FREE_SPACE);
160 if (ringbuf->space < 0)
161 ringbuf->space += ringbuf->size;
162
163 if (!dev->primary->master)
164 return;
165
166 master_priv = dev->primary->master->driver_priv;
167 if (ringbuf->head == ringbuf->tail && master_priv->sarea_priv)
168 master_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
169}
170
171static int i915_dma_cleanup(struct drm_device *dev)
172{
173 struct drm_i915_private *dev_priv = dev->dev_private;
174 int i;
175
176 /* Make sure interrupts are disabled here because the uninstall ioctl
177 * may not have been called from userspace and after dev_private
178 * is freed, it's too late.
179 */
180 if (dev->irq_enabled)
181 drm_irq_uninstall(dev);
182
183 mutex_lock(&dev->struct_mutex);
184 for (i = 0; i < I915_NUM_RINGS; i++)
185 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
186 mutex_unlock(&dev->struct_mutex);
187
188 /* Clear the HWS virtual address at teardown */
189 if (I915_NEED_GFX_HWS(dev))
190 i915_free_hws(dev);
191
192 return 0;
193}
194
195static int i915_initialize(struct drm_device *dev, drm_i915_init_t *init)
196{
197 struct drm_i915_private *dev_priv = dev->dev_private;
198 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
199 int ret;
200
201 master_priv->sarea = drm_legacy_getsarea(dev);
202 if (master_priv->sarea) {
203 master_priv->sarea_priv = (drm_i915_sarea_t *)
204 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
205 } else {
206 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
207 }
208
209 if (init->ring_size != 0) {
210 if (LP_RING(dev_priv)->buffer->obj != NULL) {
211 i915_dma_cleanup(dev);
212 DRM_ERROR("Client tried to initialize ringbuffer in "
213 "GEM mode\n");
214 return -EINVAL;
215 }
216
217 ret = intel_render_ring_init_dri(dev,
218 init->ring_start,
219 init->ring_size);
220 if (ret) {
221 i915_dma_cleanup(dev);
222 return ret;
223 }
224 }
225
226 dev_priv->dri1.cpp = init->cpp;
227 dev_priv->dri1.back_offset = init->back_offset;
228 dev_priv->dri1.front_offset = init->front_offset;
229 dev_priv->dri1.current_page = 0;
230 if (master_priv->sarea_priv)
231 master_priv->sarea_priv->pf_current_page = 0;
232
233 /* Allow hardware batchbuffers unless told otherwise.
234 */
235 dev_priv->dri1.allow_batchbuffer = 1;
236
237 return 0;
238}
239
240static int i915_dma_resume(struct drm_device *dev)
241{
242 struct drm_i915_private *dev_priv = dev->dev_private;
243 struct intel_engine_cs *ring = LP_RING(dev_priv);
244
245 DRM_DEBUG_DRIVER("%s\n", __func__);
246
247 if (ring->buffer->virtual_start == NULL) {
248 DRM_ERROR("can not ioremap virtual address for"
249 " ring buffer\n");
250 return -ENOMEM;
251 }
252
253 /* Program Hardware Status Page */
254 if (!ring->status_page.page_addr) {
255 DRM_ERROR("Can not find hardware status page\n");
256 return -EINVAL;
257 }
258 DRM_DEBUG_DRIVER("hw status page @ %p\n",
259 ring->status_page.page_addr);
260 if (ring->status_page.gfx_addr != 0)
261 intel_ring_setup_status_page(ring);
262 else
263 i915_write_hws_pga(dev);
264
265 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
266
267 return 0;
268}
269
270static int i915_dma_init(struct drm_device *dev, void *data,
271 struct drm_file *file_priv)
272{
273 drm_i915_init_t *init = data;
274 int retcode = 0;
275
276 if (drm_core_check_feature(dev, DRIVER_MODESET))
277 return -ENODEV;
278
279 switch (init->func) {
280 case I915_INIT_DMA:
281 retcode = i915_initialize(dev, init);
282 break;
283 case I915_CLEANUP_DMA:
284 retcode = i915_dma_cleanup(dev);
285 break;
286 case I915_RESUME_DMA:
287 retcode = i915_dma_resume(dev);
288 break;
289 default:
290 retcode = -EINVAL;
291 break;
292 }
293
294 return retcode;
295}
296
297/* Implement basically the same security restrictions as hardware does
298 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
299 *
300 * Most of the calculations below involve calculating the size of a
301 * particular instruction. It's important to get the size right as
302 * that tells us where the next instruction to check is. Any illegal
303 * instruction detected will be given a size of zero, which is a
304 * signal to abort the rest of the buffer.
305 */
306static int validate_cmd(int cmd)
307{
308 switch (((cmd >> 29) & 0x7)) {
309 case 0x0:
310 switch ((cmd >> 23) & 0x3f) {
311 case 0x0:
312 return 1; /* MI_NOOP */
313 case 0x4:
314 return 1; /* MI_FLUSH */
315 default:
316 return 0; /* disallow everything else */
317 }
318 break;
319 case 0x1:
320 return 0; /* reserved */
321 case 0x2:
322 return (cmd & 0xff) + 2; /* 2d commands */
323 case 0x3:
324 if (((cmd >> 24) & 0x1f) <= 0x18)
325 return 1;
326
327 switch ((cmd >> 24) & 0x1f) {
328 case 0x1c:
329 return 1;
330 case 0x1d:
331 switch ((cmd >> 16) & 0xff) {
332 case 0x3:
333 return (cmd & 0x1f) + 2;
334 case 0x4:
335 return (cmd & 0xf) + 2;
336 default:
337 return (cmd & 0xffff) + 2;
338 }
339 case 0x1e:
340 if (cmd & (1 << 23))
341 return (cmd & 0xffff) + 1;
342 else
343 return 1;
344 case 0x1f:
345 if ((cmd & (1 << 23)) == 0) /* inline vertices */
346 return (cmd & 0x1ffff) + 2;
347 else if (cmd & (1 << 17)) /* indirect random */
348 if ((cmd & 0xffff) == 0)
349 return 0; /* unknown length, too hard */
350 else
351 return (((cmd & 0xffff) + 1) / 2) + 1;
352 else
353 return 2; /* indirect sequential */
354 default:
355 return 0;
356 }
357 default:
358 return 0;
359 }
360
361 return 0;
362}
363
364static int i915_emit_cmds(struct drm_device *dev, int *buffer, int dwords)
365{
366 struct drm_i915_private *dev_priv = dev->dev_private;
367 int i, ret;
368
369 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->buffer->size - 8)
370 return -EINVAL;
371
372 for (i = 0; i < dwords;) {
373 int sz = validate_cmd(buffer[i]);
374
375 if (sz == 0 || i + sz > dwords)
376 return -EINVAL;
377 i += sz;
378 }
379
380 ret = BEGIN_LP_RING((dwords+1)&~1);
381 if (ret)
382 return ret;
383
384 for (i = 0; i < dwords; i++)
385 OUT_RING(buffer[i]);
386 if (dwords & 1)
387 OUT_RING(0);
388
389 ADVANCE_LP_RING();
390
391 return 0;
392}
393
394int
395i915_emit_box(struct drm_device *dev,
396 struct drm_clip_rect *box,
397 int DR1, int DR4)
398{
399 struct drm_i915_private *dev_priv = dev->dev_private;
400 int ret;
401
402 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
403 box->y2 <= 0 || box->x2 <= 0) {
404 DRM_ERROR("Bad box %d,%d..%d,%d\n",
405 box->x1, box->y1, box->x2, box->y2);
406 return -EINVAL;
407 }
408
409 if (INTEL_INFO(dev)->gen >= 4) {
410 ret = BEGIN_LP_RING(4);
411 if (ret)
412 return ret;
413
414 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
415 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
416 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
417 OUT_RING(DR4);
418 } else {
419 ret = BEGIN_LP_RING(6);
420 if (ret)
421 return ret;
422
423 OUT_RING(GFX_OP_DRAWRECT_INFO);
424 OUT_RING(DR1);
425 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
426 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
427 OUT_RING(DR4);
428 OUT_RING(0);
429 }
430 ADVANCE_LP_RING();
431
432 return 0;
433}
434
435/* XXX: Emitting the counter should really be moved to part of the IRQ
436 * emit. For now, do it in both places:
437 */
438
439static void i915_emit_breadcrumb(struct drm_device *dev)
440{
441 struct drm_i915_private *dev_priv = dev->dev_private;
442 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
443
444 dev_priv->dri1.counter++;
445 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
446 dev_priv->dri1.counter = 0;
447 if (master_priv->sarea_priv)
448 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
449
450 if (BEGIN_LP_RING(4) == 0) {
451 OUT_RING(MI_STORE_DWORD_INDEX);
452 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
453 OUT_RING(dev_priv->dri1.counter);
454 OUT_RING(0);
455 ADVANCE_LP_RING();
456 }
457}
458
459static int i915_dispatch_cmdbuffer(struct drm_device *dev,
460 drm_i915_cmdbuffer_t *cmd,
461 struct drm_clip_rect *cliprects,
462 void *cmdbuf)
463{
464 int nbox = cmd->num_cliprects;
465 int i = 0, count, ret;
466
467 if (cmd->sz & 0x3) {
468 DRM_ERROR("alignment");
469 return -EINVAL;
470 }
471
472 i915_kernel_lost_context(dev);
473
474 count = nbox ? nbox : 1;
475
476 for (i = 0; i < count; i++) {
477 if (i < nbox) {
478 ret = i915_emit_box(dev, &cliprects[i],
479 cmd->DR1, cmd->DR4);
480 if (ret)
481 return ret;
482 }
483
484 ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
485 if (ret)
486 return ret;
487 }
488
489 i915_emit_breadcrumb(dev);
490 return 0;
491}
492
493static int i915_dispatch_batchbuffer(struct drm_device *dev,
494 drm_i915_batchbuffer_t *batch,
495 struct drm_clip_rect *cliprects)
496{
497 struct drm_i915_private *dev_priv = dev->dev_private;
498 int nbox = batch->num_cliprects;
499 int i, count, ret;
500
501 if ((batch->start | batch->used) & 0x7) {
502 DRM_ERROR("alignment");
503 return -EINVAL;
504 }
505
506 i915_kernel_lost_context(dev);
507
508 count = nbox ? nbox : 1;
509 for (i = 0; i < count; i++) {
510 if (i < nbox) {
511 ret = i915_emit_box(dev, &cliprects[i],
512 batch->DR1, batch->DR4);
513 if (ret)
514 return ret;
515 }
516
517 if (!IS_I830(dev) && !IS_845G(dev)) {
518 ret = BEGIN_LP_RING(2);
519 if (ret)
520 return ret;
521
522 if (INTEL_INFO(dev)->gen >= 4) {
523 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
524 OUT_RING(batch->start);
525 } else {
526 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
527 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
528 }
529 } else {
530 ret = BEGIN_LP_RING(4);
531 if (ret)
532 return ret;
533
534 OUT_RING(MI_BATCH_BUFFER);
535 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
536 OUT_RING(batch->start + batch->used - 4);
537 OUT_RING(0);
538 }
539 ADVANCE_LP_RING();
540 }
541
542
543 if (IS_G4X(dev) || IS_GEN5(dev)) {
544 if (BEGIN_LP_RING(2) == 0) {
545 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
546 OUT_RING(MI_NOOP);
547 ADVANCE_LP_RING();
548 }
549 }
550
551 i915_emit_breadcrumb(dev);
552 return 0;
553}
554
555static int i915_dispatch_flip(struct drm_device *dev)
556{
557 struct drm_i915_private *dev_priv = dev->dev_private;
558 struct drm_i915_master_private *master_priv =
559 dev->primary->master->driver_priv;
560 int ret;
561
562 if (!master_priv->sarea_priv)
563 return -EINVAL;
564
565 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
566 __func__,
567 dev_priv->dri1.current_page,
568 master_priv->sarea_priv->pf_current_page);
569
570 i915_kernel_lost_context(dev);
571
572 ret = BEGIN_LP_RING(10);
573 if (ret)
574 return ret;
575
576 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
577 OUT_RING(0);
578
579 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
580 OUT_RING(0);
581 if (dev_priv->dri1.current_page == 0) {
582 OUT_RING(dev_priv->dri1.back_offset);
583 dev_priv->dri1.current_page = 1;
584 } else {
585 OUT_RING(dev_priv->dri1.front_offset);
586 dev_priv->dri1.current_page = 0;
587 }
588 OUT_RING(0);
589
590 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
591 OUT_RING(0);
592
593 ADVANCE_LP_RING();
594
595 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter++;
596
597 if (BEGIN_LP_RING(4) == 0) {
598 OUT_RING(MI_STORE_DWORD_INDEX);
599 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
600 OUT_RING(dev_priv->dri1.counter);
601 OUT_RING(0);
602 ADVANCE_LP_RING();
603 }
604
605 master_priv->sarea_priv->pf_current_page = dev_priv->dri1.current_page;
606 return 0;
607}
608
609static int i915_quiescent(struct drm_device *dev)
610{
611 i915_kernel_lost_context(dev);
612 return intel_ring_idle(LP_RING(dev->dev_private));
613}
614
615static int i915_flush_ioctl(struct drm_device *dev, void *data,
616 struct drm_file *file_priv)
617{
618 int ret;
619
620 if (drm_core_check_feature(dev, DRIVER_MODESET))
621 return -ENODEV;
622
623 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
624
625 mutex_lock(&dev->struct_mutex);
626 ret = i915_quiescent(dev);
627 mutex_unlock(&dev->struct_mutex);
628
629 return ret;
630}
631
632static int i915_batchbuffer(struct drm_device *dev, void *data,
633 struct drm_file *file_priv)
634{
635 struct drm_i915_private *dev_priv = dev->dev_private;
636 struct drm_i915_master_private *master_priv;
637 drm_i915_sarea_t *sarea_priv;
638 drm_i915_batchbuffer_t *batch = data;
639 int ret;
640 struct drm_clip_rect *cliprects = NULL;
641
642 if (drm_core_check_feature(dev, DRIVER_MODESET))
643 return -ENODEV;
644
645 master_priv = dev->primary->master->driver_priv;
646 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
647
648 if (!dev_priv->dri1.allow_batchbuffer) {
649 DRM_ERROR("Batchbuffer ioctl disabled\n");
650 return -EINVAL;
651 }
652
653 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
654 batch->start, batch->used, batch->num_cliprects);
655
656 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
657
658 if (batch->num_cliprects < 0)
659 return -EINVAL;
660
661 if (batch->num_cliprects) {
662 cliprects = kcalloc(batch->num_cliprects,
663 sizeof(*cliprects),
664 GFP_KERNEL);
665 if (cliprects == NULL)
666 return -ENOMEM;
667
668 ret = copy_from_user(cliprects, batch->cliprects,
669 batch->num_cliprects *
670 sizeof(struct drm_clip_rect));
671 if (ret != 0) {
672 ret = -EFAULT;
673 goto fail_free;
674 }
675 }
676
677 mutex_lock(&dev->struct_mutex);
678 ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
679 mutex_unlock(&dev->struct_mutex);
680
681 if (sarea_priv)
682 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
683
684fail_free:
685 kfree(cliprects);
686
687 return ret;
688}
689
690static int i915_cmdbuffer(struct drm_device *dev, void *data,
691 struct drm_file *file_priv)
692{
693 struct drm_i915_private *dev_priv = dev->dev_private;
694 struct drm_i915_master_private *master_priv;
695 drm_i915_sarea_t *sarea_priv;
696 drm_i915_cmdbuffer_t *cmdbuf = data;
697 struct drm_clip_rect *cliprects = NULL;
698 void *batch_data;
699 int ret;
700
701 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
702 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
703
704 if (drm_core_check_feature(dev, DRIVER_MODESET))
705 return -ENODEV;
706
707 master_priv = dev->primary->master->driver_priv;
708 sarea_priv = (drm_i915_sarea_t *) master_priv->sarea_priv;
709
710 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
711
712 if (cmdbuf->num_cliprects < 0)
713 return -EINVAL;
714
715 batch_data = kmalloc(cmdbuf->sz, GFP_KERNEL);
716 if (batch_data == NULL)
717 return -ENOMEM;
718
719 ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
720 if (ret != 0) {
721 ret = -EFAULT;
722 goto fail_batch_free;
723 }
724
725 if (cmdbuf->num_cliprects) {
726 cliprects = kcalloc(cmdbuf->num_cliprects,
727 sizeof(*cliprects), GFP_KERNEL);
728 if (cliprects == NULL) {
729 ret = -ENOMEM;
730 goto fail_batch_free;
731 }
732
733 ret = copy_from_user(cliprects, cmdbuf->cliprects,
734 cmdbuf->num_cliprects *
735 sizeof(struct drm_clip_rect));
736 if (ret != 0) {
737 ret = -EFAULT;
738 goto fail_clip_free;
739 }
740 }
741
742 mutex_lock(&dev->struct_mutex);
743 ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
744 mutex_unlock(&dev->struct_mutex);
745 if (ret) {
746 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
747 goto fail_clip_free;
748 }
749
750 if (sarea_priv)
751 sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
752
753fail_clip_free:
754 kfree(cliprects);
755fail_batch_free:
756 kfree(batch_data);
757
758 return ret;
759}
760
761static int i915_emit_irq(struct drm_device *dev)
762{
763 struct drm_i915_private *dev_priv = dev->dev_private;
764 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
765
766 i915_kernel_lost_context(dev);
767
768 DRM_DEBUG_DRIVER("\n");
769
770 dev_priv->dri1.counter++;
771 if (dev_priv->dri1.counter > 0x7FFFFFFFUL)
772 dev_priv->dri1.counter = 1;
773 if (master_priv->sarea_priv)
774 master_priv->sarea_priv->last_enqueue = dev_priv->dri1.counter;
775
776 if (BEGIN_LP_RING(4) == 0) {
777 OUT_RING(MI_STORE_DWORD_INDEX);
778 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
779 OUT_RING(dev_priv->dri1.counter);
780 OUT_RING(MI_USER_INTERRUPT);
781 ADVANCE_LP_RING();
782 }
783
784 return dev_priv->dri1.counter;
785}
786
787static int i915_wait_irq(struct drm_device *dev, int irq_nr)
788{
789 struct drm_i915_private *dev_priv = dev->dev_private;
790 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
791 int ret = 0;
792 struct intel_engine_cs *ring = LP_RING(dev_priv);
793
794 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
795 READ_BREADCRUMB(dev_priv));
796
797 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
798 if (master_priv->sarea_priv)
799 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
800 return 0;
801 }
802
803 if (master_priv->sarea_priv)
804 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
805
806 if (ring->irq_get(ring)) {
807 DRM_WAIT_ON(ret, ring->irq_queue, 3 * HZ,
808 READ_BREADCRUMB(dev_priv) >= irq_nr);
809 ring->irq_put(ring);
810 } else if (wait_for(READ_BREADCRUMB(dev_priv) >= irq_nr, 3000))
811 ret = -EBUSY;
812
813 if (ret == -EBUSY) {
814 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
815 READ_BREADCRUMB(dev_priv), (int)dev_priv->dri1.counter);
816 }
817
818 return ret;
819}
820
821/* Needs the lock as it touches the ring.
822 */
823static int i915_irq_emit(struct drm_device *dev, void *data,
824 struct drm_file *file_priv)
825{
826 struct drm_i915_private *dev_priv = dev->dev_private;
827 drm_i915_irq_emit_t *emit = data;
828 int result;
829
830 if (drm_core_check_feature(dev, DRIVER_MODESET))
831 return -ENODEV;
832
833 if (!dev_priv || !LP_RING(dev_priv)->buffer->virtual_start) {
834 DRM_ERROR("called with no initialization\n");
835 return -EINVAL;
836 }
837
838 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
839
840 mutex_lock(&dev->struct_mutex);
841 result = i915_emit_irq(dev);
842 mutex_unlock(&dev->struct_mutex);
843
844 if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
845 DRM_ERROR("copy_to_user\n");
846 return -EFAULT;
847 }
848
849 return 0;
850}
851
852/* Doesn't need the hardware lock.
853 */
854static int i915_irq_wait(struct drm_device *dev, void *data,
855 struct drm_file *file_priv)
856{
857 struct drm_i915_private *dev_priv = dev->dev_private;
858 drm_i915_irq_wait_t *irqwait = data;
859
860 if (drm_core_check_feature(dev, DRIVER_MODESET))
861 return -ENODEV;
862
863 if (!dev_priv) {
864 DRM_ERROR("called with no initialization\n");
865 return -EINVAL;
866 }
867
868 return i915_wait_irq(dev, irqwait->irq_seq);
869}
870
871static int i915_vblank_pipe_get(struct drm_device *dev, void *data,
872 struct drm_file *file_priv)
873{
874 struct drm_i915_private *dev_priv = dev->dev_private;
875 drm_i915_vblank_pipe_t *pipe = data;
876
877 if (drm_core_check_feature(dev, DRIVER_MODESET))
878 return -ENODEV;
879
880 if (!dev_priv) {
881 DRM_ERROR("called with no initialization\n");
882 return -EINVAL;
883 }
884
885 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
886
887 return 0;
888}
889
890/**
891 * Schedule buffer swap at given vertical blank.
892 */
893static int i915_vblank_swap(struct drm_device *dev, void *data,
894 struct drm_file *file_priv)
895{
896 /* The delayed swap mechanism was fundamentally racy, and has been
897 * removed. The model was that the client requested a delayed flip/swap
898 * from the kernel, then waited for vblank before continuing to perform
899 * rendering. The problem was that the kernel might wake the client
900 * up before it dispatched the vblank swap (since the lock has to be
901 * held while touching the ringbuffer), in which case the client would
902 * clear and start the next frame before the swap occurred, and
903 * flicker would occur in addition to likely missing the vblank.
904 *
905 * In the absence of this ioctl, userland falls back to a correct path
906 * of waiting for a vblank, then dispatching the swap on its own.
907 * Context switching to userland and back is plenty fast enough for
908 * meeting the requirements of vblank swapping.
909 */
910 return -EINVAL;
911}
912
913static int i915_flip_bufs(struct drm_device *dev, void *data,
914 struct drm_file *file_priv)
915{
916 int ret;
917
918 if (drm_core_check_feature(dev, DRIVER_MODESET))
919 return -ENODEV;
920
921 DRM_DEBUG_DRIVER("%s\n", __func__);
922
923 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
924
925 mutex_lock(&dev->struct_mutex);
926 ret = i915_dispatch_flip(dev);
927 mutex_unlock(&dev->struct_mutex);
928
929 return ret;
930}
931 53
932static int i915_getparam(struct drm_device *dev, void *data, 54static int i915_getparam(struct drm_device *dev, void *data,
933 struct drm_file *file_priv) 55 struct drm_file *file_priv)
@@ -936,21 +58,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
936 drm_i915_getparam_t *param = data; 58 drm_i915_getparam_t *param = data;
937 int value; 59 int value;
938 60
939 if (!dev_priv) {
940 DRM_ERROR("called with no initialization\n");
941 return -EINVAL;
942 }
943
944 switch (param->param) { 61 switch (param->param) {
945 case I915_PARAM_IRQ_ACTIVE: 62 case I915_PARAM_IRQ_ACTIVE:
946 value = dev->pdev->irq ? 1 : 0;
947 break;
948 case I915_PARAM_ALLOW_BATCHBUFFER: 63 case I915_PARAM_ALLOW_BATCHBUFFER:
949 value = dev_priv->dri1.allow_batchbuffer ? 1 : 0;
950 break;
951 case I915_PARAM_LAST_DISPATCH: 64 case I915_PARAM_LAST_DISPATCH:
952 value = READ_BREADCRUMB(dev_priv); 65 /* Reject all old ums/dri params. */
953 break; 66 return -ENODEV;
954 case I915_PARAM_CHIPSET_ID: 67 case I915_PARAM_CHIPSET_ID:
955 value = dev->pdev->device; 68 value = dev->pdev->device;
956 break; 69 break;
@@ -1027,6 +140,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
1027 case I915_PARAM_CMD_PARSER_VERSION: 140 case I915_PARAM_CMD_PARSER_VERSION:
1028 value = i915_cmd_parser_get_version(); 141 value = i915_cmd_parser_get_version();
1029 break; 142 break;
143 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
144 value = 1;
145 break;
1030 default: 146 default:
1031 DRM_DEBUG("Unknown parameter %d\n", param->param); 147 DRM_DEBUG("Unknown parameter %d\n", param->param);
1032 return -EINVAL; 148 return -EINVAL;
@@ -1046,19 +162,13 @@ static int i915_setparam(struct drm_device *dev, void *data,
1046 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
1047 drm_i915_setparam_t *param = data; 163 drm_i915_setparam_t *param = data;
1048 164
1049 if (!dev_priv) {
1050 DRM_ERROR("called with no initialization\n");
1051 return -EINVAL;
1052 }
1053
1054 switch (param->param) { 165 switch (param->param) {
1055 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 166 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1056 break;
1057 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 167 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1058 break;
1059 case I915_SETPARAM_ALLOW_BATCHBUFFER: 168 case I915_SETPARAM_ALLOW_BATCHBUFFER:
1060 dev_priv->dri1.allow_batchbuffer = param->value ? 1 : 0; 169 /* Reject all old ums/dri params. */
1061 break; 170 return -ENODEV;
171
1062 case I915_SETPARAM_NUM_USED_FENCES: 172 case I915_SETPARAM_NUM_USED_FENCES:
1063 if (param->value > dev_priv->num_fence_regs || 173 if (param->value > dev_priv->num_fence_regs ||
1064 param->value < 0) 174 param->value < 0)
@@ -1075,54 +185,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
1075 return 0; 185 return 0;
1076} 186}
1077 187
1078static int i915_set_status_page(struct drm_device *dev, void *data,
1079 struct drm_file *file_priv)
1080{
1081 struct drm_i915_private *dev_priv = dev->dev_private;
1082 drm_i915_hws_addr_t *hws = data;
1083 struct intel_engine_cs *ring;
1084
1085 if (drm_core_check_feature(dev, DRIVER_MODESET))
1086 return -ENODEV;
1087
1088 if (!I915_NEED_GFX_HWS(dev))
1089 return -EINVAL;
1090
1091 if (!dev_priv) {
1092 DRM_ERROR("called with no initialization\n");
1093 return -EINVAL;
1094 }
1095
1096 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1097 WARN(1, "tried to set status page when mode setting active\n");
1098 return 0;
1099 }
1100
1101 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32)hws->addr);
1102
1103 ring = LP_RING(dev_priv);
1104 ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
1105
1106 dev_priv->dri1.gfx_hws_cpu_addr =
1107 ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
1108 if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
1109 i915_dma_cleanup(dev);
1110 ring->status_page.gfx_addr = 0;
1111 DRM_ERROR("can not ioremap virtual address for"
1112 " G33 hw status page\n");
1113 return -ENOMEM;
1114 }
1115
1116 memset_io(dev_priv->dri1.gfx_hws_cpu_addr, 0, PAGE_SIZE);
1117 I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
1118
1119 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1120 ring->status_page.gfx_addr);
1121 DRM_DEBUG_DRIVER("load hws at %p\n",
1122 ring->status_page.page_addr);
1123 return 0;
1124}
1125
1126static int i915_get_bridge_dev(struct drm_device *dev) 188static int i915_get_bridge_dev(struct drm_device *dev)
1127{ 189{
1128 struct drm_i915_private *dev_priv = dev->dev_private; 190 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1275,12 +337,12 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
1275 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 337 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1276 /* i915 resume handler doesn't set to D0 */ 338 /* i915 resume handler doesn't set to D0 */
1277 pci_set_power_state(dev->pdev, PCI_D0); 339 pci_set_power_state(dev->pdev, PCI_D0);
1278 i915_resume(dev); 340 i915_resume_legacy(dev);
1279 dev->switch_power_state = DRM_SWITCH_POWER_ON; 341 dev->switch_power_state = DRM_SWITCH_POWER_ON;
1280 } else { 342 } else {
1281 pr_err("switched off\n"); 343 pr_err("switched off\n");
1282 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; 344 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1283 i915_suspend(dev, pmm); 345 i915_suspend_legacy(dev, pmm);
1284 dev->switch_power_state = DRM_SWITCH_POWER_OFF; 346 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1285 } 347 }
1286} 348}
@@ -1338,14 +400,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1338 400
1339 intel_power_domains_init_hw(dev_priv); 401 intel_power_domains_init_hw(dev_priv);
1340 402
1341 /* 403 ret = intel_irq_install(dev_priv);
1342 * We enable some interrupt sources in our postinstall hooks, so mark
1343 * interrupts as enabled _before_ actually enabling them to avoid
1344 * special cases in our ordering checks.
1345 */
1346 dev_priv->pm._irqs_disabled = false;
1347
1348 ret = drm_irq_install(dev, dev->pdev->irq);
1349 if (ret) 404 if (ret)
1350 goto cleanup_gem_stolen; 405 goto cleanup_gem_stolen;
1351 406
@@ -1370,7 +425,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1370 goto cleanup_gem; 425 goto cleanup_gem;
1371 426
1372 /* Only enable hotplug handling once the fbdev is fully set up. */ 427 /* Only enable hotplug handling once the fbdev is fully set up. */
1373 intel_hpd_init(dev); 428 intel_hpd_init(dev_priv);
1374 429
1375 /* 430 /*
1376 * Some ports require correctly set-up hpd registers for detection to 431 * Some ports require correctly set-up hpd registers for detection to
@@ -1405,30 +460,6 @@ out:
1405 return ret; 460 return ret;
1406} 461}
1407 462
1408int i915_master_create(struct drm_device *dev, struct drm_master *master)
1409{
1410 struct drm_i915_master_private *master_priv;
1411
1412 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
1413 if (!master_priv)
1414 return -ENOMEM;
1415
1416 master->driver_priv = master_priv;
1417 return 0;
1418}
1419
1420void i915_master_destroy(struct drm_device *dev, struct drm_master *master)
1421{
1422 struct drm_i915_master_private *master_priv = master->driver_priv;
1423
1424 if (!master_priv)
1425 return;
1426
1427 kfree(master_priv);
1428
1429 master->driver_priv = NULL;
1430}
1431
1432#if IS_ENABLED(CONFIG_FB) 463#if IS_ENABLED(CONFIG_FB)
1433static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) 464static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
1434{ 465{
@@ -1534,7 +565,7 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
1534 565
1535 info = (struct intel_device_info *)&dev_priv->info; 566 info = (struct intel_device_info *)&dev_priv->info;
1536 567
1537 if (IS_VALLEYVIEW(dev)) 568 if (IS_VALLEYVIEW(dev) || INTEL_INFO(dev)->gen == 9)
1538 for_each_pipe(dev_priv, pipe) 569 for_each_pipe(dev_priv, pipe)
1539 info->num_sprites[pipe] = 2; 570 info->num_sprites[pipe] = 2;
1540 else 571 else
@@ -1614,7 +645,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1614 645
1615 spin_lock_init(&dev_priv->irq_lock); 646 spin_lock_init(&dev_priv->irq_lock);
1616 spin_lock_init(&dev_priv->gpu_error.lock); 647 spin_lock_init(&dev_priv->gpu_error.lock);
1617 spin_lock_init(&dev_priv->backlight_lock); 648 mutex_init(&dev_priv->backlight_lock);
1618 spin_lock_init(&dev_priv->uncore.lock); 649 spin_lock_init(&dev_priv->uncore.lock);
1619 spin_lock_init(&dev_priv->mm.object_stat_lock); 650 spin_lock_init(&dev_priv->mm.object_stat_lock);
1620 spin_lock_init(&dev_priv->mmio_flip_lock); 651 spin_lock_init(&dev_priv->mmio_flip_lock);
@@ -1742,7 +773,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1742 goto out_freewq; 773 goto out_freewq;
1743 } 774 }
1744 775
1745 intel_irq_init(dev); 776 intel_irq_init(dev_priv);
1746 intel_uncore_sanitize(dev); 777 intel_uncore_sanitize(dev);
1747 778
1748 /* Try to make sure MCHBAR is enabled before poking at it */ 779 /* Try to make sure MCHBAR is enabled before poking at it */
@@ -1784,9 +815,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1784 DRM_ERROR("failed to init modeset\n"); 815 DRM_ERROR("failed to init modeset\n");
1785 goto out_power_well; 816 goto out_power_well;
1786 } 817 }
1787 } else {
1788 /* Start out suspended in ums mode. */
1789 dev_priv->ums.mm_suspended = 1;
1790 } 818 }
1791 819
1792 i915_setup_sysfs(dev); 820 i915_setup_sysfs(dev);
@@ -1800,12 +828,12 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1800 if (IS_GEN5(dev)) 828 if (IS_GEN5(dev))
1801 intel_gpu_ips_init(dev_priv); 829 intel_gpu_ips_init(dev_priv);
1802 830
1803 intel_init_runtime_pm(dev_priv); 831 intel_runtime_pm_enable(dev_priv);
1804 832
1805 return 0; 833 return 0;
1806 834
1807out_power_well: 835out_power_well:
1808 intel_power_domains_remove(dev_priv); 836 intel_power_domains_fini(dev_priv);
1809 drm_vblank_cleanup(dev); 837 drm_vblank_cleanup(dev);
1810out_gem_unload: 838out_gem_unload:
1811 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 839 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1848,16 +876,10 @@ int i915_driver_unload(struct drm_device *dev)
1848 return ret; 876 return ret;
1849 } 877 }
1850 878
1851 intel_fini_runtime_pm(dev_priv); 879 intel_power_domains_fini(dev_priv);
1852 880
1853 intel_gpu_ips_teardown(); 881 intel_gpu_ips_teardown();
1854 882
1855 /* The i915.ko module is still not prepared to be loaded when
1856 * the power well is not enabled, so just enable it in case
1857 * we're going to unload/reload. */
1858 intel_display_set_init_power(dev_priv, true);
1859 intel_power_domains_remove(dev_priv);
1860
1861 i915_teardown_sysfs(dev); 883 i915_teardown_sysfs(dev);
1862 884
1863 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier)); 885 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
@@ -1868,8 +890,12 @@ int i915_driver_unload(struct drm_device *dev)
1868 890
1869 acpi_video_unregister(); 891 acpi_video_unregister();
1870 892
1871 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 893 if (drm_core_check_feature(dev, DRIVER_MODESET))
1872 intel_fbdev_fini(dev); 894 intel_fbdev_fini(dev);
895
896 drm_vblank_cleanup(dev);
897
898 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1873 intel_modeset_cleanup(dev); 899 intel_modeset_cleanup(dev);
1874 900
1875 /* 901 /*
@@ -1905,13 +931,8 @@ int i915_driver_unload(struct drm_device *dev)
1905 i915_gem_context_fini(dev); 931 i915_gem_context_fini(dev);
1906 mutex_unlock(&dev->struct_mutex); 932 mutex_unlock(&dev->struct_mutex);
1907 i915_gem_cleanup_stolen(dev); 933 i915_gem_cleanup_stolen(dev);
1908
1909 if (!I915_NEED_GFX_HWS(dev))
1910 i915_free_hws(dev);
1911 } 934 }
1912 935
1913 drm_vblank_cleanup(dev);
1914
1915 intel_teardown_gmbus(dev); 936 intel_teardown_gmbus(dev);
1916 intel_teardown_mchbar(dev); 937 intel_teardown_mchbar(dev);
1917 938
@@ -1959,23 +980,8 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file)
1959 */ 980 */
1960void i915_driver_lastclose(struct drm_device *dev) 981void i915_driver_lastclose(struct drm_device *dev)
1961{ 982{
1962 struct drm_i915_private *dev_priv = dev->dev_private; 983 intel_fbdev_restore_mode(dev);
1963 984 vga_switcheroo_process_delayed_switch();
1964 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1965 * goes right around and calls lastclose. Check for this and don't clean
1966 * up anything. */
1967 if (!dev_priv)
1968 return;
1969
1970 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1971 intel_fbdev_restore_mode(dev);
1972 vga_switcheroo_process_delayed_switch();
1973 return;
1974 }
1975
1976 i915_gem_lastclose(dev);
1977
1978 i915_dma_cleanup(dev);
1979} 985}
1980 986
1981void i915_driver_preclose(struct drm_device *dev, struct drm_file *file) 987void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
@@ -1999,24 +1005,24 @@ void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
1999} 1005}
2000 1006
2001const struct drm_ioctl_desc i915_ioctls[] = { 1007const struct drm_ioctl_desc i915_ioctls[] = {
2002 DRM_IOCTL_DEF_DRV(I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1008 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2003 DRM_IOCTL_DEF_DRV(I915_FLUSH, i915_flush_ioctl, DRM_AUTH), 1009 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
2004 DRM_IOCTL_DEF_DRV(I915_FLIP, i915_flip_bufs, DRM_AUTH), 1010 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
2005 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH), 1011 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
2006 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH), 1012 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
2007 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH), 1013 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
2008 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 1014 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
2009 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1015 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2010 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 1016 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
2011 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), 1017 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
2012 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1018 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2013 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH), 1019 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
2014 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1020 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2015 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1021 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2016 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH), 1022 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
2017 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 1023 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
2018 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1024 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
2019 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1025 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2020 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED), 1026 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
2021 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1027 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
2022 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED), 1028 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
@@ -2025,8 +1031,8 @@ const struct drm_ioctl_desc i915_ioctls[] = {
2025 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1031 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2026 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1032 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2027 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW), 1033 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
2028 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1034 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2029 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED), 1035 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
2030 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1036 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2031 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1037 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
2032 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), 1038 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2318b4c7a8f8..f990ab4c3efb 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -356,6 +356,19 @@ static const struct intel_device_info intel_cherryview_info = {
356 CURSOR_OFFSETS, 356 CURSOR_OFFSETS,
357}; 357};
358 358
359static const struct intel_device_info intel_skylake_info = {
360 .is_preliminary = 1,
361 .is_skylake = 1,
362 .gen = 9, .num_pipes = 3,
363 .need_gfx_hws = 1, .has_hotplug = 1,
364 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
365 .has_llc = 1,
366 .has_ddi = 1,
367 .has_fbc = 1,
368 GEN_DEFAULT_PIPEOFFSETS,
369 IVB_CURSOR_OFFSETS,
370};
371
359/* 372/*
360 * Make sure any device matches here are from most specific to most 373 * Make sure any device matches here are from most specific to most
361 * general. For example, since the Quanta match is based on the subsystem 374 * general. For example, since the Quanta match is based on the subsystem
@@ -392,7 +405,8 @@ static const struct intel_device_info intel_cherryview_info = {
392 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \ 405 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
393 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \ 406 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
394 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \ 407 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
395 INTEL_CHV_IDS(&intel_cherryview_info) 408 INTEL_CHV_IDS(&intel_cherryview_info), \
409 INTEL_SKL_IDS(&intel_skylake_info)
396 410
397static const struct pci_device_id pciidlist[] = { /* aka */ 411static const struct pci_device_id pciidlist[] = { /* aka */
398 INTEL_PCI_IDS, 412 INTEL_PCI_IDS,
@@ -449,7 +463,7 @@ void intel_detect_pch(struct drm_device *dev)
449 dev_priv->pch_type = PCH_LPT; 463 dev_priv->pch_type = PCH_LPT;
450 DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 464 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
451 WARN_ON(!IS_HASWELL(dev)); 465 WARN_ON(!IS_HASWELL(dev));
452 WARN_ON(IS_ULT(dev)); 466 WARN_ON(IS_HSW_ULT(dev));
453 } else if (IS_BROADWELL(dev)) { 467 } else if (IS_BROADWELL(dev)) {
454 dev_priv->pch_type = PCH_LPT; 468 dev_priv->pch_type = PCH_LPT;
455 dev_priv->pch_id = 469 dev_priv->pch_id =
@@ -460,7 +474,15 @@ void intel_detect_pch(struct drm_device *dev)
460 dev_priv->pch_type = PCH_LPT; 474 dev_priv->pch_type = PCH_LPT;
461 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 475 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
462 WARN_ON(!IS_HASWELL(dev)); 476 WARN_ON(!IS_HASWELL(dev));
463 WARN_ON(!IS_ULT(dev)); 477 WARN_ON(!IS_HSW_ULT(dev));
478 } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
479 dev_priv->pch_type = PCH_SPT;
480 DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
481 WARN_ON(!IS_SKYLAKE(dev));
482 } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) {
483 dev_priv->pch_type = PCH_SPT;
484 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
485 WARN_ON(!IS_SKYLAKE(dev));
464 } else 486 } else
465 continue; 487 continue;
466 488
@@ -529,10 +551,10 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
529} 551}
530 552
531static int intel_suspend_complete(struct drm_i915_private *dev_priv); 553static int intel_suspend_complete(struct drm_i915_private *dev_priv);
532static int intel_resume_prepare(struct drm_i915_private *dev_priv, 554static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
533 bool rpm_resume); 555 bool rpm_resume);
534 556
535static int i915_drm_freeze(struct drm_device *dev) 557static int i915_drm_suspend(struct drm_device *dev)
536{ 558{
537 struct drm_i915_private *dev_priv = dev->dev_private; 559 struct drm_i915_private *dev_priv = dev->dev_private;
538 struct drm_crtc *crtc; 560 struct drm_crtc *crtc;
@@ -562,6 +584,8 @@ static int i915_drm_freeze(struct drm_device *dev)
562 return error; 584 return error;
563 } 585 }
564 586
587 intel_suspend_gt_powersave(dev);
588
565 /* 589 /*
566 * Disable CRTCs directly since we want to preserve sw state 590 * Disable CRTCs directly since we want to preserve sw state
567 * for _thaw. Also, power gate the CRTC power wells. 591 * for _thaw. Also, power gate the CRTC power wells.
@@ -573,16 +597,12 @@ static int i915_drm_freeze(struct drm_device *dev)
573 597
574 intel_dp_mst_suspend(dev); 598 intel_dp_mst_suspend(dev);
575 599
576 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 600 intel_runtime_pm_disable_interrupts(dev_priv);
577
578 intel_runtime_pm_disable_interrupts(dev);
579 intel_hpd_cancel_work(dev_priv); 601 intel_hpd_cancel_work(dev_priv);
580 602
581 intel_suspend_encoders(dev_priv); 603 intel_suspend_encoders(dev_priv);
582 604
583 intel_suspend_gt_powersave(dev); 605 intel_suspend_hw(dev);
584
585 intel_modeset_suspend_hw(dev);
586 } 606 }
587 607
588 i915_gem_suspend_gtt_mappings(dev); 608 i915_gem_suspend_gtt_mappings(dev);
@@ -608,7 +628,26 @@ static int i915_drm_freeze(struct drm_device *dev)
608 return 0; 628 return 0;
609} 629}
610 630
611int i915_suspend(struct drm_device *dev, pm_message_t state) 631static int i915_drm_suspend_late(struct drm_device *drm_dev)
632{
633 struct drm_i915_private *dev_priv = drm_dev->dev_private;
634 int ret;
635
636 ret = intel_suspend_complete(dev_priv);
637
638 if (ret) {
639 DRM_ERROR("Suspend complete failed: %d\n", ret);
640
641 return ret;
642 }
643
644 pci_disable_device(drm_dev->pdev);
645 pci_set_power_state(drm_dev->pdev, PCI_D3hot);
646
647 return 0;
648}
649
650int i915_suspend_legacy(struct drm_device *dev, pm_message_t state)
612{ 651{
613 int error; 652 int error;
614 653
@@ -618,48 +657,25 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
618 return -ENODEV; 657 return -ENODEV;
619 } 658 }
620 659
621 if (state.event == PM_EVENT_PRETHAW) 660 if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
622 return 0; 661 state.event != PM_EVENT_FREEZE))
623 662 return -EINVAL;
624 663
625 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 664 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
626 return 0; 665 return 0;
627 666
628 error = i915_drm_freeze(dev); 667 error = i915_drm_suspend(dev);
629 if (error) 668 if (error)
630 return error; 669 return error;
631 670
632 if (state.event == PM_EVENT_SUSPEND) { 671 return i915_drm_suspend_late(dev);
633 /* Shut down the device */
634 pci_disable_device(dev->pdev);
635 pci_set_power_state(dev->pdev, PCI_D3hot);
636 }
637
638 return 0;
639} 672}
640 673
641static int i915_drm_thaw_early(struct drm_device *dev) 674static int i915_drm_resume(struct drm_device *dev)
642{ 675{
643 struct drm_i915_private *dev_priv = dev->dev_private; 676 struct drm_i915_private *dev_priv = dev->dev_private;
644 int ret;
645
646 ret = intel_resume_prepare(dev_priv, false);
647 if (ret)
648 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
649 677
650 intel_uncore_early_sanitize(dev, true); 678 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
651 intel_uncore_sanitize(dev);
652 intel_power_domains_init_hw(dev_priv);
653
654 return ret;
655}
656
657static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
658{
659 struct drm_i915_private *dev_priv = dev->dev_private;
660
661 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
662 restore_gtt_mappings) {
663 mutex_lock(&dev->struct_mutex); 679 mutex_lock(&dev->struct_mutex);
664 i915_gem_restore_gtt_mappings(dev); 680 i915_gem_restore_gtt_mappings(dev);
665 mutex_unlock(&dev->struct_mutex); 681 mutex_unlock(&dev->struct_mutex);
@@ -680,30 +696,29 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
680 } 696 }
681 mutex_unlock(&dev->struct_mutex); 697 mutex_unlock(&dev->struct_mutex);
682 698
683 intel_runtime_pm_restore_interrupts(dev); 699 /* We need working interrupts for modeset enabling ... */
700 intel_runtime_pm_enable_interrupts(dev_priv);
684 701
685 intel_modeset_init_hw(dev); 702 intel_modeset_init_hw(dev);
686 703
687 { 704 spin_lock_irq(&dev_priv->irq_lock);
688 unsigned long irqflags; 705 if (dev_priv->display.hpd_irq_setup)
689 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 706 dev_priv->display.hpd_irq_setup(dev);
690 if (dev_priv->display.hpd_irq_setup) 707 spin_unlock_irq(&dev_priv->irq_lock);
691 dev_priv->display.hpd_irq_setup(dev);
692 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
693 }
694 708
695 intel_dp_mst_resume(dev);
696 drm_modeset_lock_all(dev); 709 drm_modeset_lock_all(dev);
697 intel_modeset_setup_hw_state(dev, true); 710 intel_modeset_setup_hw_state(dev, true);
698 drm_modeset_unlock_all(dev); 711 drm_modeset_unlock_all(dev);
699 712
713 intel_dp_mst_resume(dev);
714
700 /* 715 /*
701 * ... but also need to make sure that hotplug processing 716 * ... but also need to make sure that hotplug processing
702 * doesn't cause havoc. Like in the driver load code we don't 717 * doesn't cause havoc. Like in the driver load code we don't
703 * bother with the tiny race here where we might loose hotplug 718 * bother with the tiny race here where we might loose hotplug
704 * notifications. 719 * notifications.
705 * */ 720 * */
706 intel_hpd_init(dev); 721 intel_hpd_init(dev_priv);
707 /* Config may have changed between suspend and resume */ 722 /* Config may have changed between suspend and resume */
708 drm_helper_hpd_irq_event(dev); 723 drm_helper_hpd_irq_event(dev);
709 } 724 }
@@ -718,21 +733,15 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
718 733
719 intel_opregion_notify_adapter(dev, PCI_D0); 734 intel_opregion_notify_adapter(dev, PCI_D0);
720 735
721 return 0; 736 drm_kms_helper_poll_enable(dev);
722}
723
724static int i915_drm_thaw(struct drm_device *dev)
725{
726 if (drm_core_check_feature(dev, DRIVER_MODESET))
727 i915_check_and_clear_faults(dev);
728 737
729 return __i915_drm_thaw(dev, true); 738 return 0;
730} 739}
731 740
732static int i915_resume_early(struct drm_device *dev) 741static int i915_drm_resume_early(struct drm_device *dev)
733{ 742{
734 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) 743 struct drm_i915_private *dev_priv = dev->dev_private;
735 return 0; 744 int ret = 0;
736 745
737 /* 746 /*
738 * We have a resume ordering issue with the snd-hda driver also 747 * We have a resume ordering issue with the snd-hda driver also
@@ -748,33 +757,34 @@ static int i915_resume_early(struct drm_device *dev)
748 757
749 pci_set_master(dev->pdev); 758 pci_set_master(dev->pdev);
750 759
751 return i915_drm_thaw_early(dev); 760 if (IS_VALLEYVIEW(dev_priv))
761 ret = vlv_resume_prepare(dev_priv, false);
762 if (ret)
763 DRM_ERROR("Resume prepare failed: %d,Continuing resume\n", ret);
764
765 intel_uncore_early_sanitize(dev, true);
766
767 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
768 hsw_disable_pc8(dev_priv);
769
770 intel_uncore_sanitize(dev);
771 intel_power_domains_init_hw(dev_priv);
772
773 return ret;
752} 774}
753 775
754int i915_resume(struct drm_device *dev) 776int i915_resume_legacy(struct drm_device *dev)
755{ 777{
756 struct drm_i915_private *dev_priv = dev->dev_private;
757 int ret; 778 int ret;
758 779
759 /* 780 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
760 * Platforms with opregion should have sane BIOS, older ones (gen3 and 781 return 0;
761 * earlier) need to restore the GTT mappings since the BIOS might clear 782
762 * all our scratch PTEs. 783 ret = i915_drm_resume_early(dev);
763 */
764 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
765 if (ret) 784 if (ret)
766 return ret; 785 return ret;
767 786
768 drm_kms_helper_poll_enable(dev); 787 return i915_drm_resume(dev);
769 return 0;
770}
771
772static int i915_resume_legacy(struct drm_device *dev)
773{
774 i915_resume_early(dev);
775 i915_resume(dev);
776
777 return 0;
778} 788}
779 789
780/** 790/**
@@ -820,6 +830,9 @@ int i915_reset(struct drm_device *dev)
820 } 830 }
821 } 831 }
822 832
833 if (i915_stop_ring_allow_warn(dev_priv))
834 pr_notice("drm/i915: Resetting chip after gpu hang\n");
835
823 if (ret) { 836 if (ret) {
824 DRM_ERROR("Failed to reset chip: %i\n", ret); 837 DRM_ERROR("Failed to reset chip: %i\n", ret);
825 mutex_unlock(&dev->struct_mutex); 838 mutex_unlock(&dev->struct_mutex);
@@ -840,10 +853,7 @@ int i915_reset(struct drm_device *dev)
840 * was running at the time of the reset (i.e. we weren't VT 853 * was running at the time of the reset (i.e. we weren't VT
841 * switched away). 854 * switched away).
842 */ 855 */
843 if (drm_core_check_feature(dev, DRIVER_MODESET) || 856 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
844 !dev_priv->ums.mm_suspended) {
845 dev_priv->ums.mm_suspended = 0;
846
847 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */ 857 /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
848 dev_priv->gpu_error.reload_in_reset = true; 858 dev_priv->gpu_error.reload_in_reset = true;
849 859
@@ -923,15 +933,13 @@ static int i915_pm_suspend(struct device *dev)
923 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 933 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
924 return 0; 934 return 0;
925 935
926 return i915_drm_freeze(drm_dev); 936 return i915_drm_suspend(drm_dev);
927} 937}
928 938
929static int i915_pm_suspend_late(struct device *dev) 939static int i915_pm_suspend_late(struct device *dev)
930{ 940{
931 struct pci_dev *pdev = to_pci_dev(dev); 941 struct pci_dev *pdev = to_pci_dev(dev);
932 struct drm_device *drm_dev = pci_get_drvdata(pdev); 942 struct drm_device *drm_dev = pci_get_drvdata(pdev);
933 struct drm_i915_private *dev_priv = drm_dev->dev_private;
934 int ret;
935 943
936 /* 944 /*
937 * We have a suspedn ordering issue with the snd-hda driver also 945 * We have a suspedn ordering issue with the snd-hda driver also
@@ -945,16 +953,7 @@ static int i915_pm_suspend_late(struct device *dev)
945 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) 953 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
946 return 0; 954 return 0;
947 955
948 ret = intel_suspend_complete(dev_priv); 956 return i915_drm_suspend_late(drm_dev);
949
950 if (ret)
951 DRM_ERROR("Suspend complete failed: %d\n", ret);
952 else {
953 pci_disable_device(pdev);
954 pci_set_power_state(pdev, PCI_D3hot);
955 }
956
957 return ret;
958} 957}
959 958
960static int i915_pm_resume_early(struct device *dev) 959static int i915_pm_resume_early(struct device *dev)
@@ -962,61 +961,21 @@ static int i915_pm_resume_early(struct device *dev)
962 struct pci_dev *pdev = to_pci_dev(dev); 961 struct pci_dev *pdev = to_pci_dev(dev);
963 struct drm_device *drm_dev = pci_get_drvdata(pdev); 962 struct drm_device *drm_dev = pci_get_drvdata(pdev);
964 963
965 return i915_resume_early(drm_dev); 964 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
966} 965 return 0;
967
968static int i915_pm_resume(struct device *dev)
969{
970 struct pci_dev *pdev = to_pci_dev(dev);
971 struct drm_device *drm_dev = pci_get_drvdata(pdev);
972
973 return i915_resume(drm_dev);
974}
975
976static int i915_pm_freeze(struct device *dev)
977{
978 struct pci_dev *pdev = to_pci_dev(dev);
979 struct drm_device *drm_dev = pci_get_drvdata(pdev);
980
981 if (!drm_dev || !drm_dev->dev_private) {
982 dev_err(dev, "DRM not initialized, aborting suspend.\n");
983 return -ENODEV;
984 }
985
986 return i915_drm_freeze(drm_dev);
987}
988
989static int i915_pm_freeze_late(struct device *dev)
990{
991 struct pci_dev *pdev = to_pci_dev(dev);
992 struct drm_device *drm_dev = pci_get_drvdata(pdev);
993 struct drm_i915_private *dev_priv = drm_dev->dev_private;
994
995 return intel_suspend_complete(dev_priv);
996}
997
998static int i915_pm_thaw_early(struct device *dev)
999{
1000 struct pci_dev *pdev = to_pci_dev(dev);
1001 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1002 966
1003 return i915_drm_thaw_early(drm_dev); 967 return i915_drm_resume_early(drm_dev);
1004} 968}
1005 969
1006static int i915_pm_thaw(struct device *dev) 970static int i915_pm_resume(struct device *dev)
1007{ 971{
1008 struct pci_dev *pdev = to_pci_dev(dev); 972 struct pci_dev *pdev = to_pci_dev(dev);
1009 struct drm_device *drm_dev = pci_get_drvdata(pdev); 973 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1010 974
1011 return i915_drm_thaw(drm_dev); 975 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1012} 976 return 0;
1013
1014static int i915_pm_poweroff(struct device *dev)
1015{
1016 struct pci_dev *pdev = to_pci_dev(dev);
1017 struct drm_device *drm_dev = pci_get_drvdata(pdev);
1018 977
1019 return i915_drm_freeze(drm_dev); 978 return i915_drm_resume(drm_dev);
1020} 979}
1021 980
1022static int hsw_suspend_complete(struct drm_i915_private *dev_priv) 981static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
@@ -1026,25 +985,6 @@ static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
1026 return 0; 985 return 0;
1027} 986}
1028 987
1029static int snb_resume_prepare(struct drm_i915_private *dev_priv,
1030 bool rpm_resume)
1031{
1032 struct drm_device *dev = dev_priv->dev;
1033
1034 if (rpm_resume)
1035 intel_init_pch_refclk(dev);
1036
1037 return 0;
1038}
1039
1040static int hsw_resume_prepare(struct drm_i915_private *dev_priv,
1041 bool rpm_resume)
1042{
1043 hsw_disable_pc8(dev_priv);
1044
1045 return 0;
1046}
1047
1048/* 988/*
1049 * Save all Gunit registers that may be lost after a D3 and a subsequent 989 * Save all Gunit registers that may be lost after a D3 and a subsequent
1050 * S0i[R123] transition. The list of registers needing a save/restore is 990 * S0i[R123] transition. The list of registers needing a save/restore is
@@ -1449,18 +1389,13 @@ static int intel_runtime_suspend(struct device *device)
1449 i915_gem_release_all_mmaps(dev_priv); 1389 i915_gem_release_all_mmaps(dev_priv);
1450 mutex_unlock(&dev->struct_mutex); 1390 mutex_unlock(&dev->struct_mutex);
1451 1391
1452 /* 1392 intel_suspend_gt_powersave(dev);
1453 * rps.work can't be rearmed here, since we get here only after making 1393 intel_runtime_pm_disable_interrupts(dev_priv);
1454 * sure the GPU is idle and the RPS freq is set to the minimum. See
1455 * intel_mark_idle().
1456 */
1457 cancel_work_sync(&dev_priv->rps.work);
1458 intel_runtime_pm_disable_interrupts(dev);
1459 1394
1460 ret = intel_suspend_complete(dev_priv); 1395 ret = intel_suspend_complete(dev_priv);
1461 if (ret) { 1396 if (ret) {
1462 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret); 1397 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1463 intel_runtime_pm_restore_interrupts(dev); 1398 intel_runtime_pm_enable_interrupts(dev_priv);
1464 1399
1465 return ret; 1400 return ret;
1466 } 1401 }
@@ -1502,7 +1437,7 @@ static int intel_runtime_resume(struct device *device)
1502 struct pci_dev *pdev = to_pci_dev(device); 1437 struct pci_dev *pdev = to_pci_dev(device);
1503 struct drm_device *dev = pci_get_drvdata(pdev); 1438 struct drm_device *dev = pci_get_drvdata(pdev);
1504 struct drm_i915_private *dev_priv = dev->dev_private; 1439 struct drm_i915_private *dev_priv = dev->dev_private;
1505 int ret; 1440 int ret = 0;
1506 1441
1507 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev))) 1442 if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev)))
1508 return -ENODEV; 1443 return -ENODEV;
@@ -1512,7 +1447,13 @@ static int intel_runtime_resume(struct device *device)
1512 intel_opregion_notify_adapter(dev, PCI_D0); 1447 intel_opregion_notify_adapter(dev, PCI_D0);
1513 dev_priv->pm.suspended = false; 1448 dev_priv->pm.suspended = false;
1514 1449
1515 ret = intel_resume_prepare(dev_priv, true); 1450 if (IS_GEN6(dev_priv))
1451 intel_init_pch_refclk(dev);
1452 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
1453 hsw_disable_pc8(dev_priv);
1454 else if (IS_VALLEYVIEW(dev_priv))
1455 ret = vlv_resume_prepare(dev_priv, true);
1456
1516 /* 1457 /*
1517 * No point of rolling back things in case of an error, as the best 1458 * No point of rolling back things in case of an error, as the best
1518 * we can do is to hope that things will still work (and disable RPM). 1459 * we can do is to hope that things will still work (and disable RPM).
@@ -1520,8 +1461,8 @@ static int intel_runtime_resume(struct device *device)
1520 i915_gem_init_swizzling(dev); 1461 i915_gem_init_swizzling(dev);
1521 gen6_update_ring_freq(dev); 1462 gen6_update_ring_freq(dev);
1522 1463
1523 intel_runtime_pm_restore_interrupts(dev); 1464 intel_runtime_pm_enable_interrupts(dev_priv);
1524 intel_reset_gt_powersave(dev); 1465 intel_enable_gt_powersave(dev);
1525 1466
1526 if (ret) 1467 if (ret)
1527 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret); 1468 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
@@ -1550,41 +1491,41 @@ static int intel_suspend_complete(struct drm_i915_private *dev_priv)
1550 return ret; 1491 return ret;
1551} 1492}
1552 1493
1553/*
1554 * This function implements common functionality of runtime and system
1555 * resume sequence. Variable rpm_resume used for implementing different
1556 * code paths.
1557 */
1558static int intel_resume_prepare(struct drm_i915_private *dev_priv,
1559 bool rpm_resume)
1560{
1561 struct drm_device *dev = dev_priv->dev;
1562 int ret;
1563
1564 if (IS_GEN6(dev))
1565 ret = snb_resume_prepare(dev_priv, rpm_resume);
1566 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1567 ret = hsw_resume_prepare(dev_priv, rpm_resume);
1568 else if (IS_VALLEYVIEW(dev))
1569 ret = vlv_resume_prepare(dev_priv, rpm_resume);
1570 else
1571 ret = 0;
1572
1573 return ret;
1574}
1575
1576static const struct dev_pm_ops i915_pm_ops = { 1494static const struct dev_pm_ops i915_pm_ops = {
1495 /*
1496 * S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
1497 * PMSG_RESUME]
1498 */
1577 .suspend = i915_pm_suspend, 1499 .suspend = i915_pm_suspend,
1578 .suspend_late = i915_pm_suspend_late, 1500 .suspend_late = i915_pm_suspend_late,
1579 .resume_early = i915_pm_resume_early, 1501 .resume_early = i915_pm_resume_early,
1580 .resume = i915_pm_resume, 1502 .resume = i915_pm_resume,
1581 .freeze = i915_pm_freeze, 1503
1582 .freeze_late = i915_pm_freeze_late, 1504 /*
1583 .thaw_early = i915_pm_thaw_early, 1505 * S4 event handlers
1584 .thaw = i915_pm_thaw, 1506 * @freeze, @freeze_late : called (1) before creating the
1585 .poweroff = i915_pm_poweroff, 1507 * hibernation image [PMSG_FREEZE] and
1508 * (2) after rebooting, before restoring
1509 * the image [PMSG_QUIESCE]
1510 * @thaw, @thaw_early : called (1) after creating the hibernation
1511 * image, before writing it [PMSG_THAW]
1512 * and (2) after failing to create or
1513 * restore the image [PMSG_RECOVER]
1514 * @poweroff, @poweroff_late: called after writing the hibernation
1515 * image, before rebooting [PMSG_HIBERNATE]
1516 * @restore, @restore_early : called after rebooting and restoring the
1517 * hibernation image [PMSG_RESTORE]
1518 */
1519 .freeze = i915_pm_suspend,
1520 .freeze_late = i915_pm_suspend_late,
1521 .thaw_early = i915_pm_resume_early,
1522 .thaw = i915_pm_resume,
1523 .poweroff = i915_pm_suspend,
1524 .poweroff_late = i915_pm_suspend_late,
1586 .restore_early = i915_pm_resume_early, 1525 .restore_early = i915_pm_resume_early,
1587 .restore = i915_pm_resume, 1526 .restore = i915_pm_resume,
1527
1528 /* S0ix (via runtime suspend) event handlers */
1588 .runtime_suspend = intel_runtime_suspend, 1529 .runtime_suspend = intel_runtime_suspend,
1589 .runtime_resume = intel_runtime_resume, 1530 .runtime_resume = intel_runtime_resume,
1590}; 1531};
@@ -1626,12 +1567,10 @@ static struct drm_driver driver = {
1626 .set_busid = drm_pci_set_busid, 1567 .set_busid = drm_pci_set_busid,
1627 1568
1628 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ 1569 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1629 .suspend = i915_suspend, 1570 .suspend = i915_suspend_legacy,
1630 .resume = i915_resume_legacy, 1571 .resume = i915_resume_legacy,
1631 1572
1632 .device_is_agp = i915_driver_device_is_agp, 1573 .device_is_agp = i915_driver_device_is_agp,
1633 .master_create = i915_master_create,
1634 .master_destroy = i915_master_destroy,
1635#if defined(CONFIG_DEBUG_FS) 1574#if defined(CONFIG_DEBUG_FS)
1636 .debugfs_init = i915_debugfs_init, 1575 .debugfs_init = i915_debugfs_init,
1637 .debugfs_cleanup = i915_debugfs_cleanup, 1576 .debugfs_cleanup = i915_debugfs_cleanup,
@@ -1645,7 +1584,7 @@ static struct drm_driver driver = {
1645 .gem_prime_import = i915_gem_prime_import, 1584 .gem_prime_import = i915_gem_prime_import,
1646 1585
1647 .dumb_create = i915_gem_dumb_create, 1586 .dumb_create = i915_gem_dumb_create,
1648 .dumb_map_offset = i915_gem_mmap_gtt, 1587 .dumb_map_offset = i915_gem_dumb_map_offset,
1649 .dumb_destroy = drm_gem_dumb_destroy, 1588 .dumb_destroy = drm_gem_dumb_destroy,
1650 .ioctls = i915_ioctls, 1589 .ioctls = i915_ioctls,
1651 .fops = &i915_driver_fops, 1590 .fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 16a6f6d187a1..63bcda5541ec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -55,7 +55,10 @@
55 55
56#define DRIVER_NAME "i915" 56#define DRIVER_NAME "i915"
57#define DRIVER_DESC "Intel Graphics" 57#define DRIVER_DESC "Intel Graphics"
58#define DRIVER_DATE "20140905" 58#define DRIVER_DATE "20141121"
59
60#undef WARN_ON
61#define WARN_ON(x) WARN(x, "WARN_ON(" #x ")")
59 62
60enum pipe { 63enum pipe {
61 INVALID_PIPE = -1, 64 INVALID_PIPE = -1,
@@ -76,6 +79,14 @@ enum transcoder {
76}; 79};
77#define transcoder_name(t) ((t) + 'A') 80#define transcoder_name(t) ((t) + 'A')
78 81
82/*
83 * This is the maximum (across all platforms) number of planes (primary +
84 * sprites) that can be active at the same time on one pipe.
85 *
86 * This value doesn't count the cursor plane.
87 */
88#define I915_MAX_PLANES 3
89
79enum plane { 90enum plane {
80 PLANE_A = 0, 91 PLANE_A = 0,
81 PLANE_B, 92 PLANE_B,
@@ -202,10 +213,15 @@ enum intel_dpll_id {
202 /* real shared dpll ids must be >= 0 */ 213 /* real shared dpll ids must be >= 0 */
203 DPLL_ID_PCH_PLL_A = 0, 214 DPLL_ID_PCH_PLL_A = 0,
204 DPLL_ID_PCH_PLL_B = 1, 215 DPLL_ID_PCH_PLL_B = 1,
216 /* hsw/bdw */
205 DPLL_ID_WRPLL1 = 0, 217 DPLL_ID_WRPLL1 = 0,
206 DPLL_ID_WRPLL2 = 1, 218 DPLL_ID_WRPLL2 = 1,
219 /* skl */
220 DPLL_ID_SKL_DPLL1 = 0,
221 DPLL_ID_SKL_DPLL2 = 1,
222 DPLL_ID_SKL_DPLL3 = 2,
207}; 223};
208#define I915_NUM_PLLS 2 224#define I915_NUM_PLLS 3
209 225
210struct intel_dpll_hw_state { 226struct intel_dpll_hw_state {
211 /* i9xx, pch plls */ 227 /* i9xx, pch plls */
@@ -216,16 +232,33 @@ struct intel_dpll_hw_state {
216 232
217 /* hsw, bdw */ 233 /* hsw, bdw */
218 uint32_t wrpll; 234 uint32_t wrpll;
235
236 /* skl */
237 /*
238 * DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
239 * lower part of crtl1 and they get shifted into position when writing
240 * the register. This allows us to easily compare the state to share
241 * the DPLL.
242 */
243 uint32_t ctrl1;
244 /* HDMI only, 0 when used for DP */
245 uint32_t cfgcr1, cfgcr2;
246};
247
248struct intel_shared_dpll_config {
249 unsigned crtc_mask; /* mask of CRTCs sharing this PLL */
250 struct intel_dpll_hw_state hw_state;
219}; 251};
220 252
221struct intel_shared_dpll { 253struct intel_shared_dpll {
222 int refcount; /* count of number of CRTCs sharing this PLL */ 254 struct intel_shared_dpll_config config;
255 struct intel_shared_dpll_config *new_config;
256
223 int active; /* count of number of active CRTCs (i.e. DPMS on) */ 257 int active; /* count of number of active CRTCs (i.e. DPMS on) */
224 bool on; /* is the PLL actually active? Disabled during modeset */ 258 bool on; /* is the PLL actually active? Disabled during modeset */
225 const char *name; 259 const char *name;
226 /* should match the index in the dev_priv->shared_dplls array */ 260 /* should match the index in the dev_priv->shared_dplls array */
227 enum intel_dpll_id id; 261 enum intel_dpll_id id;
228 struct intel_dpll_hw_state hw_state;
229 /* The mode_set hook is optional and should be used together with the 262 /* The mode_set hook is optional and should be used together with the
230 * intel_prepare_shared_dpll function. */ 263 * intel_prepare_shared_dpll function. */
231 void (*mode_set)(struct drm_i915_private *dev_priv, 264 void (*mode_set)(struct drm_i915_private *dev_priv,
@@ -239,6 +272,11 @@ struct intel_shared_dpll {
239 struct intel_dpll_hw_state *hw_state); 272 struct intel_dpll_hw_state *hw_state);
240}; 273};
241 274
275#define SKL_DPLL0 0
276#define SKL_DPLL1 1
277#define SKL_DPLL2 2
278#define SKL_DPLL3 3
279
242/* Used by dp and fdi links */ 280/* Used by dp and fdi links */
243struct intel_link_m_n { 281struct intel_link_m_n {
244 uint32_t tu; 282 uint32_t tu;
@@ -267,7 +305,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
267#define DRIVER_PATCHLEVEL 0 305#define DRIVER_PATCHLEVEL 0
268 306
269#define WATCH_LISTS 0 307#define WATCH_LISTS 0
270#define WATCH_GTT 0
271 308
272struct opregion_header; 309struct opregion_header;
273struct opregion_acpi; 310struct opregion_acpi;
@@ -290,12 +327,6 @@ struct intel_opregion {
290struct intel_overlay; 327struct intel_overlay;
291struct intel_overlay_error_state; 328struct intel_overlay_error_state;
292 329
293struct drm_local_map;
294
295struct drm_i915_master_private {
296 struct drm_local_map *sarea;
297 struct _drm_i915_sarea *sarea_priv;
298};
299#define I915_FENCE_REG_NONE -1 330#define I915_FENCE_REG_NONE -1
300#define I915_MAX_NUM_FENCES 32 331#define I915_MAX_NUM_FENCES 32
301/* 32 fences + sign bit for FENCE_REG_NONE */ 332/* 32 fences + sign bit for FENCE_REG_NONE */
@@ -426,6 +457,7 @@ struct drm_i915_error_state {
426}; 457};
427 458
428struct intel_connector; 459struct intel_connector;
460struct intel_encoder;
429struct intel_crtc_config; 461struct intel_crtc_config;
430struct intel_plane_config; 462struct intel_plane_config;
431struct intel_crtc; 463struct intel_crtc;
@@ -452,7 +484,7 @@ struct drm_i915_display_funcs {
452 * Returns true on success, false on failure. 484 * Returns true on success, false on failure.
453 */ 485 */
454 bool (*find_dpll)(const struct intel_limit *limit, 486 bool (*find_dpll)(const struct intel_limit *limit,
455 struct drm_crtc *crtc, 487 struct intel_crtc *crtc,
456 int target, int refclk, 488 int target, int refclk,
457 struct dpll *match_clock, 489 struct dpll *match_clock,
458 struct dpll *best_clock); 490 struct dpll *best_clock);
@@ -468,15 +500,14 @@ struct drm_i915_display_funcs {
468 struct intel_crtc_config *); 500 struct intel_crtc_config *);
469 void (*get_plane_config)(struct intel_crtc *, 501 void (*get_plane_config)(struct intel_crtc *,
470 struct intel_plane_config *); 502 struct intel_plane_config *);
471 int (*crtc_mode_set)(struct drm_crtc *crtc, 503 int (*crtc_compute_clock)(struct intel_crtc *crtc);
472 int x, int y,
473 struct drm_framebuffer *old_fb);
474 void (*crtc_enable)(struct drm_crtc *crtc); 504 void (*crtc_enable)(struct drm_crtc *crtc);
475 void (*crtc_disable)(struct drm_crtc *crtc); 505 void (*crtc_disable)(struct drm_crtc *crtc);
476 void (*off)(struct drm_crtc *crtc); 506 void (*off)(struct drm_crtc *crtc);
477 void (*write_eld)(struct drm_connector *connector, 507 void (*audio_codec_enable)(struct drm_connector *connector,
478 struct drm_crtc *crtc, 508 struct intel_encoder *encoder,
479 struct drm_display_mode *mode); 509 struct drm_display_mode *mode);
510 void (*audio_codec_disable)(struct intel_encoder *encoder);
480 void (*fdi_link_train)(struct drm_crtc *crtc); 511 void (*fdi_link_train)(struct drm_crtc *crtc);
481 void (*init_clock_gating)(struct drm_device *dev); 512 void (*init_clock_gating)(struct drm_device *dev);
482 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc, 513 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
@@ -494,7 +525,7 @@ struct drm_i915_display_funcs {
494 /* display clock increase/decrease */ 525 /* display clock increase/decrease */
495 /* pll clock increase/decrease */ 526 /* pll clock increase/decrease */
496 527
497 int (*setup_backlight)(struct intel_connector *connector); 528 int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe);
498 uint32_t (*get_backlight)(struct intel_connector *connector); 529 uint32_t (*get_backlight)(struct intel_connector *connector);
499 void (*set_backlight)(struct intel_connector *connector, 530 void (*set_backlight)(struct intel_connector *connector,
500 uint32_t level); 531 uint32_t level);
@@ -533,6 +564,7 @@ struct intel_uncore {
533 564
534 unsigned fw_rendercount; 565 unsigned fw_rendercount;
535 unsigned fw_mediacount; 566 unsigned fw_mediacount;
567 unsigned fw_blittercount;
536 568
537 struct timer_list force_wake_timer; 569 struct timer_list force_wake_timer;
538}; 570};
@@ -551,6 +583,7 @@ struct intel_uncore {
551 func(is_ivybridge) sep \ 583 func(is_ivybridge) sep \
552 func(is_valleyview) sep \ 584 func(is_valleyview) sep \
553 func(is_haswell) sep \ 585 func(is_haswell) sep \
586 func(is_skylake) sep \
554 func(is_preliminary) sep \ 587 func(is_preliminary) sep \
555 func(has_fbc) sep \ 588 func(has_fbc) sep \
556 func(has_pipe_cxsr) sep \ 589 func(has_pipe_cxsr) sep \
@@ -646,6 +679,7 @@ struct intel_context {
646 struct { 679 struct {
647 struct drm_i915_gem_object *state; 680 struct drm_i915_gem_object *state;
648 struct intel_ringbuffer *ringbuf; 681 struct intel_ringbuffer *ringbuf;
682 int unpin_count;
649 } engine[I915_NUM_RINGS]; 683 } engine[I915_NUM_RINGS];
650 684
651 struct list_head link; 685 struct list_head link;
@@ -663,6 +697,18 @@ struct i915_fbc {
663 697
664 bool false_color; 698 bool false_color;
665 699
700 /* Tracks whether the HW is actually enabled, not whether the feature is
701 * possible. */
702 bool enabled;
703
704 /* On gen8 some rings cannont perform fbc clean operation so for now
705 * we are doing this on SW with mmio.
706 * This variable works in the opposite information direction
707 * of ring->fbc_dirty telling software on frontbuffer tracking
708 * to perform the cache clean on sw side.
709 */
710 bool need_sw_cache_clean;
711
666 struct intel_fbc_work { 712 struct intel_fbc_work {
667 struct delayed_work work; 713 struct delayed_work work;
668 struct drm_crtc *crtc; 714 struct drm_crtc *crtc;
@@ -704,6 +750,7 @@ enum intel_pch {
704 PCH_IBX, /* Ibexpeak PCH */ 750 PCH_IBX, /* Ibexpeak PCH */
705 PCH_CPT, /* Cougarpoint PCH */ 751 PCH_CPT, /* Cougarpoint PCH */
706 PCH_LPT, /* Lynxpoint PCH */ 752 PCH_LPT, /* Lynxpoint PCH */
753 PCH_SPT, /* Sunrisepoint PCH */
707 PCH_NOP, 754 PCH_NOP,
708}; 755};
709 756
@@ -717,6 +764,7 @@ enum intel_sbi_destination {
717#define QUIRK_INVERT_BRIGHTNESS (1<<2) 764#define QUIRK_INVERT_BRIGHTNESS (1<<2)
718#define QUIRK_BACKLIGHT_PRESENT (1<<3) 765#define QUIRK_BACKLIGHT_PRESENT (1<<3)
719#define QUIRK_PIPEB_FORCE (1<<4) 766#define QUIRK_PIPEB_FORCE (1<<4)
767#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
720 768
721struct intel_fbdev; 769struct intel_fbdev;
722struct intel_fbc_work; 770struct intel_fbc_work;
@@ -768,7 +816,6 @@ struct i915_suspend_saved_registers {
768 u32 saveBLC_HIST_CTL; 816 u32 saveBLC_HIST_CTL;
769 u32 saveBLC_PWM_CTL; 817 u32 saveBLC_PWM_CTL;
770 u32 saveBLC_PWM_CTL2; 818 u32 saveBLC_PWM_CTL2;
771 u32 saveBLC_HIST_CTL_B;
772 u32 saveBLC_CPU_PWM_CTL; 819 u32 saveBLC_CPU_PWM_CTL;
773 u32 saveBLC_CPU_PWM_CTL2; 820 u32 saveBLC_CPU_PWM_CTL2;
774 u32 saveFPB0; 821 u32 saveFPB0;
@@ -877,6 +924,7 @@ struct i915_suspend_saved_registers {
877 u32 savePIPEB_LINK_N1; 924 u32 savePIPEB_LINK_N1;
878 u32 saveMCHBAR_RENDER_STANDBY; 925 u32 saveMCHBAR_RENDER_STANDBY;
879 u32 savePCH_PORT_HOTPLUG; 926 u32 savePCH_PORT_HOTPLUG;
927 u16 saveGCDGMBUS;
880}; 928};
881 929
882struct vlv_s0ix_state { 930struct vlv_s0ix_state {
@@ -947,8 +995,12 @@ struct intel_rps_ei {
947}; 995};
948 996
949struct intel_gen6_power_mgmt { 997struct intel_gen6_power_mgmt {
950 /* work and pm_iir are protected by dev_priv->irq_lock */ 998 /*
999 * work, interrupts_enabled and pm_iir are protected by
1000 * dev_priv->irq_lock
1001 */
951 struct work_struct work; 1002 struct work_struct work;
1003 bool interrupts_enabled;
952 u32 pm_iir; 1004 u32 pm_iir;
953 1005
954 /* Frequencies are stored in potentially platform dependent multiples. 1006 /* Frequencies are stored in potentially platform dependent multiples.
@@ -1071,31 +1123,6 @@ struct i915_power_domains {
1071 struct i915_power_well *power_wells; 1123 struct i915_power_well *power_wells;
1072}; 1124};
1073 1125
1074struct i915_dri1_state {
1075 unsigned allow_batchbuffer : 1;
1076 u32 __iomem *gfx_hws_cpu_addr;
1077
1078 unsigned int cpp;
1079 int back_offset;
1080 int front_offset;
1081 int current_page;
1082 int page_flipping;
1083
1084 uint32_t counter;
1085};
1086
1087struct i915_ums_state {
1088 /**
1089 * Flag if the X Server, and thus DRM, is not currently in
1090 * control of the device.
1091 *
1092 * This is set between LeaveVT and EnterVT. It needs to be
1093 * replaced with a semaphore. It also needs to be
1094 * transitioned away from for kernel modesetting.
1095 */
1096 int mm_suspended;
1097};
1098
1099#define MAX_L3_SLICES 2 1126#define MAX_L3_SLICES 2
1100struct intel_l3_parity { 1127struct intel_l3_parity {
1101 u32 *remap_info[MAX_L3_SLICES]; 1128 u32 *remap_info[MAX_L3_SLICES];
@@ -1357,6 +1384,49 @@ struct ilk_wm_values {
1357 enum intel_ddb_partitioning partitioning; 1384 enum intel_ddb_partitioning partitioning;
1358}; 1385};
1359 1386
1387struct skl_ddb_entry {
1388 uint16_t start, end; /* in number of blocks, 'end' is exclusive */
1389};
1390
1391static inline uint16_t skl_ddb_entry_size(const struct skl_ddb_entry *entry)
1392{
1393 return entry->end - entry->start;
1394}
1395
1396static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
1397 const struct skl_ddb_entry *e2)
1398{
1399 if (e1->start == e2->start && e1->end == e2->end)
1400 return true;
1401
1402 return false;
1403}
1404
1405struct skl_ddb_allocation {
1406 struct skl_ddb_entry pipe[I915_MAX_PIPES];
1407 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
1408 struct skl_ddb_entry cursor[I915_MAX_PIPES];
1409};
1410
1411struct skl_wm_values {
1412 bool dirty[I915_MAX_PIPES];
1413 struct skl_ddb_allocation ddb;
1414 uint32_t wm_linetime[I915_MAX_PIPES];
1415 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8];
1416 uint32_t cursor[I915_MAX_PIPES][8];
1417 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES];
1418 uint32_t cursor_trans[I915_MAX_PIPES];
1419};
1420
1421struct skl_wm_level {
1422 bool plane_en[I915_MAX_PLANES];
1423 bool cursor_en;
1424 uint16_t plane_res_b[I915_MAX_PLANES];
1425 uint8_t plane_res_l[I915_MAX_PLANES];
1426 uint16_t cursor_res_b;
1427 uint8_t cursor_res_l;
1428};
1429
1360/* 1430/*
1361 * This struct helps tracking the state needed for runtime PM, which puts the 1431 * This struct helps tracking the state needed for runtime PM, which puts the
1362 * device in PCI D3 state. Notice that when this happens, nothing on the 1432 * device in PCI D3 state. Notice that when this happens, nothing on the
@@ -1369,7 +1439,7 @@ struct ilk_wm_values {
1369 * 1439 *
1370 * Our driver uses the autosuspend delay feature, which means we'll only really 1440 * Our driver uses the autosuspend delay feature, which means we'll only really
1371 * suspend if we stay with zero refcount for a certain amount of time. The 1441 * suspend if we stay with zero refcount for a certain amount of time. The
1372 * default value is currently very conservative (see intel_init_runtime_pm), but 1442 * default value is currently very conservative (see intel_runtime_pm_enable), but
1373 * it can be changed with the standard runtime PM files from sysfs. 1443 * it can be changed with the standard runtime PM files from sysfs.
1374 * 1444 *
1375 * The irqs_disabled variable becomes true exactly after we disable the IRQs and 1445 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
@@ -1382,7 +1452,7 @@ struct ilk_wm_values {
1382 */ 1452 */
1383struct i915_runtime_pm { 1453struct i915_runtime_pm {
1384 bool suspended; 1454 bool suspended;
1385 bool _irqs_disabled; 1455 bool irqs_enabled;
1386}; 1456};
1387 1457
1388enum intel_pipe_crc_source { 1458enum intel_pipe_crc_source {
@@ -1426,6 +1496,20 @@ struct i915_frontbuffer_tracking {
1426 unsigned flip_bits; 1496 unsigned flip_bits;
1427}; 1497};
1428 1498
1499struct i915_wa_reg {
1500 u32 addr;
1501 u32 value;
1502 /* bitmask representing WA bits */
1503 u32 mask;
1504};
1505
1506#define I915_MAX_WA_REGS 16
1507
1508struct i915_workarounds {
1509 struct i915_wa_reg reg[I915_MAX_WA_REGS];
1510 u32 count;
1511};
1512
1429struct drm_i915_private { 1513struct drm_i915_private {
1430 struct drm_device *dev; 1514 struct drm_device *dev;
1431 struct kmem_cache *slab; 1515 struct kmem_cache *slab;
@@ -1505,11 +1589,13 @@ struct drm_i915_private {
1505 struct intel_opregion opregion; 1589 struct intel_opregion opregion;
1506 struct intel_vbt_data vbt; 1590 struct intel_vbt_data vbt;
1507 1591
1592 bool preserve_bios_swizzle;
1593
1508 /* overlay */ 1594 /* overlay */
1509 struct intel_overlay *overlay; 1595 struct intel_overlay *overlay;
1510 1596
1511 /* backlight registers and fields in struct intel_panel */ 1597 /* backlight registers and fields in struct intel_panel */
1512 spinlock_t backlight_lock; 1598 struct mutex backlight_lock;
1513 1599
1514 /* LVDS info */ 1600 /* LVDS info */
1515 bool no_aux_handshake; 1601 bool no_aux_handshake;
@@ -1523,6 +1609,7 @@ struct drm_i915_private {
1523 1609
1524 unsigned int fsb_freq, mem_freq, is_ddr3; 1610 unsigned int fsb_freq, mem_freq, is_ddr3;
1525 unsigned int vlv_cdclk_freq; 1611 unsigned int vlv_cdclk_freq;
1612 unsigned int hpll_freq;
1526 1613
1527 /** 1614 /**
1528 * wq - Driver workqueue for GEM. 1615 * wq - Driver workqueue for GEM.
@@ -1568,19 +1655,7 @@ struct drm_i915_private {
1568 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS]; 1655 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
1569 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV]; 1656 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
1570 1657
1571 /* 1658 struct i915_workarounds workarounds;
1572 * workarounds are currently applied at different places and
1573 * changes are being done to consolidate them so exact count is
1574 * not clear at this point, use a max value for now.
1575 */
1576#define I915_MAX_WA_REGS 16
1577 struct {
1578 u32 addr;
1579 u32 value;
1580 /* bitmask representing WA bits */
1581 u32 mask;
1582 } intel_wa_regs[I915_MAX_WA_REGS];
1583 u32 num_wa_regs;
1584 1659
1585 /* Reclocking support */ 1660 /* Reclocking support */
1586 bool render_reclock_avail; 1661 bool render_reclock_avail;
@@ -1644,9 +1719,25 @@ struct drm_i915_private {
1644 uint16_t spr_latency[5]; 1719 uint16_t spr_latency[5];
1645 /* cursor */ 1720 /* cursor */
1646 uint16_t cur_latency[5]; 1721 uint16_t cur_latency[5];
1722 /*
1723 * Raw watermark memory latency values
1724 * for SKL for all 8 levels
1725 * in 1us units.
1726 */
1727 uint16_t skl_latency[8];
1728
1729 /*
1730 * The skl_wm_values structure is a bit too big for stack
1731 * allocation, so we keep the staging struct where we store
1732 * intermediate results here instead.
1733 */
1734 struct skl_wm_values skl_results;
1647 1735
1648 /* current hardware state */ 1736 /* current hardware state */
1649 struct ilk_wm_values hw; 1737 union {
1738 struct ilk_wm_values hw;
1739 struct skl_wm_values skl_hw;
1740 };
1650 } wm; 1741 } wm;
1651 1742
1652 struct i915_runtime_pm pm; 1743 struct i915_runtime_pm pm;
@@ -1667,12 +1758,6 @@ struct drm_i915_private {
1667 1758
1668 uint32_t bios_vgacntr; 1759 uint32_t bios_vgacntr;
1669 1760
1670 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1671 * here! */
1672 struct i915_dri1_state dri1;
1673 /* Old ums support infrastructure, same warning applies. */
1674 struct i915_ums_state ums;
1675
1676 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ 1761 /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
1677 struct { 1762 struct {
1678 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, 1763 int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -1830,8 +1915,6 @@ struct drm_i915_gem_object {
1830 unsigned long gt_ro:1; 1915 unsigned long gt_ro:1;
1831 unsigned int cache_level:3; 1916 unsigned int cache_level:3;
1832 1917
1833 unsigned int has_aliasing_ppgtt_mapping:1;
1834 unsigned int has_global_gtt_mapping:1;
1835 unsigned int has_dma_mapping:1; 1918 unsigned int has_dma_mapping:1;
1836 1919
1837 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS; 1920 unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
@@ -1864,10 +1947,10 @@ struct drm_i915_gem_object {
1864 unsigned long user_pin_count; 1947 unsigned long user_pin_count;
1865 struct drm_file *pin_filp; 1948 struct drm_file *pin_filp;
1866 1949
1867 /** for phy allocated objects */
1868 struct drm_dma_handle *phys_handle;
1869
1870 union { 1950 union {
1951 /** for phy allocated objects */
1952 struct drm_dma_handle *phys_handle;
1953
1871 struct i915_gem_userptr { 1954 struct i915_gem_userptr {
1872 uintptr_t ptr; 1955 uintptr_t ptr;
1873 unsigned read_only :1; 1956 unsigned read_only :1;
@@ -2073,6 +2156,7 @@ struct drm_i915_cmd_table {
2073#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2156#define IS_CHERRYVIEW(dev) (INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2074#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell) 2157#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
2075#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev)) 2158#define IS_BROADWELL(dev) (!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))
2159#define IS_SKYLAKE(dev) (INTEL_INFO(dev)->is_skylake)
2076#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 2160#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
2077#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \ 2161#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
2078 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) 2162 (INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
@@ -2080,9 +2164,10 @@ struct drm_i915_cmd_table {
2080 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \ 2164 ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
2081 (INTEL_DEVID(dev) & 0xf) == 0x6 || \ 2165 (INTEL_DEVID(dev) & 0xf) == 0x6 || \
2082 (INTEL_DEVID(dev) & 0xf) == 0xe)) 2166 (INTEL_DEVID(dev) & 0xf) == 0xe))
2167#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
2168 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2083#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \ 2169#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
2084 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00) 2170 (INTEL_DEVID(dev) & 0xFF00) == 0x0A00)
2085#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
2086#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \ 2171#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
2087 (INTEL_DEVID(dev) & 0x00F0) == 0x0020) 2172 (INTEL_DEVID(dev) & 0x00F0) == 0x0020)
2088/* ULX machines are also considered ULT. */ 2173/* ULX machines are also considered ULT. */
@@ -2103,6 +2188,7 @@ struct drm_i915_cmd_table {
2103#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6) 2188#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
2104#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7) 2189#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
2105#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8) 2190#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
2191#define IS_GEN9(dev) (INTEL_INFO(dev)->gen == 9)
2106 2192
2107#define RENDER_RING (1<<RCS) 2193#define RENDER_RING (1<<RCS)
2108#define BSD_RING (1<<VCS) 2194#define BSD_RING (1<<VCS)
@@ -2115,13 +2201,11 @@ struct drm_i915_cmd_table {
2115#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING) 2201#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
2116#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc) 2202#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
2117#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ 2203#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
2118 to_i915(dev)->ellc_size) 2204 __I915__(dev)->ellc_size)
2119#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 2205#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
2120 2206
2121#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6) 2207#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
2122#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8) 2208#define HAS_LOGICAL_RING_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 8)
2123#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >= 6)
2124#define HAS_PPGTT(dev) (INTEL_INFO(dev)->gen >= 7 && !IS_GEN8(dev))
2125#define USES_PPGTT(dev) (i915.enable_ppgtt) 2209#define USES_PPGTT(dev) (i915.enable_ppgtt)
2126#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2) 2210#define USES_FULL_PPGTT(dev) (i915.enable_ppgtt == 2)
2127 2211
@@ -2154,13 +2238,15 @@ struct drm_i915_cmd_table {
2154#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 2238#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
2155#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 2239#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
2156 2240
2157#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev)) 2241#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
2158 2242
2159#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi) 2243#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
2160#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg) 2244#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
2161#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2245#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
2162#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \ 2246#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
2163 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev)) 2247 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
2248#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
2249#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
2164 2250
2165#define INTEL_PCH_DEVICE_ID_MASK 0xff00 2251#define INTEL_PCH_DEVICE_ID_MASK 0xff00
2166#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 2252#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
@@ -2168,8 +2254,11 @@ struct drm_i915_cmd_table {
2168#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 2254#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
2169#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00 2255#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
2170#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2256#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
2257#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
2258#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
2171 2259
2172#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type) 2260#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
2261#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
2173#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) 2262#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
2174#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 2263#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
2175#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 2264#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
@@ -2189,8 +2278,8 @@ struct drm_i915_cmd_table {
2189extern const struct drm_ioctl_desc i915_ioctls[]; 2278extern const struct drm_ioctl_desc i915_ioctls[];
2190extern int i915_max_ioctl; 2279extern int i915_max_ioctl;
2191 2280
2192extern int i915_suspend(struct drm_device *dev, pm_message_t state); 2281extern int i915_suspend_legacy(struct drm_device *dev, pm_message_t state);
2193extern int i915_resume(struct drm_device *dev); 2282extern int i915_resume_legacy(struct drm_device *dev);
2194extern int i915_master_create(struct drm_device *dev, struct drm_master *master); 2283extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
2195extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); 2284extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
2196 2285
@@ -2227,8 +2316,6 @@ struct i915_params {
2227extern struct i915_params i915 __read_mostly; 2316extern struct i915_params i915 __read_mostly;
2228 2317
2229 /* i915_dma.c */ 2318 /* i915_dma.c */
2230void i915_update_dri1_breadcrumb(struct drm_device *dev);
2231extern void i915_kernel_lost_context(struct drm_device * dev);
2232extern int i915_driver_load(struct drm_device *, unsigned long flags); 2319extern int i915_driver_load(struct drm_device *, unsigned long flags);
2233extern int i915_driver_unload(struct drm_device *); 2320extern int i915_driver_unload(struct drm_device *);
2234extern int i915_driver_open(struct drm_device *dev, struct drm_file *file); 2321extern int i915_driver_open(struct drm_device *dev, struct drm_file *file);
@@ -2242,9 +2329,6 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
2242extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 2329extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
2243 unsigned long arg); 2330 unsigned long arg);
2244#endif 2331#endif
2245extern int i915_emit_box(struct drm_device *dev,
2246 struct drm_clip_rect *box,
2247 int DR1, int DR4);
2248extern int intel_gpu_reset(struct drm_device *dev); 2332extern int intel_gpu_reset(struct drm_device *dev);
2249extern int i915_reset(struct drm_device *dev); 2333extern int i915_reset(struct drm_device *dev);
2250extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 2334extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
@@ -2260,10 +2344,10 @@ __printf(3, 4)
2260void i915_handle_error(struct drm_device *dev, bool wedged, 2344void i915_handle_error(struct drm_device *dev, bool wedged,
2261 const char *fmt, ...); 2345 const char *fmt, ...);
2262 2346
2263void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, 2347extern void intel_irq_init(struct drm_i915_private *dev_priv);
2264 int new_delay); 2348extern void intel_hpd_init(struct drm_i915_private *dev_priv);
2265extern void intel_irq_init(struct drm_device *dev); 2349int intel_irq_install(struct drm_i915_private *dev_priv);
2266extern void intel_hpd_init(struct drm_device *dev); 2350void intel_irq_uninstall(struct drm_i915_private *dev_priv);
2267 2351
2268extern void intel_uncore_sanitize(struct drm_device *dev); 2352extern void intel_uncore_sanitize(struct drm_device *dev);
2269extern void intel_uncore_early_sanitize(struct drm_device *dev, 2353extern void intel_uncore_early_sanitize(struct drm_device *dev,
@@ -2283,10 +2367,19 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
2283 2367
2284void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); 2368void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv);
2285void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv); 2369void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
2370void
2371ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2372void
2373ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask);
2374void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
2375 uint32_t interrupt_mask,
2376 uint32_t enabled_irq_mask);
2377#define ibx_enable_display_interrupt(dev_priv, bits) \
2378 ibx_display_interrupt_update((dev_priv), (bits), (bits))
2379#define ibx_disable_display_interrupt(dev_priv, bits) \
2380 ibx_display_interrupt_update((dev_priv), (bits), 0)
2286 2381
2287/* i915_gem.c */ 2382/* i915_gem.c */
2288int i915_gem_init_ioctl(struct drm_device *dev, void *data,
2289 struct drm_file *file_priv);
2290int i915_gem_create_ioctl(struct drm_device *dev, void *data, 2383int i915_gem_create_ioctl(struct drm_device *dev, void *data,
2291 struct drm_file *file_priv); 2384 struct drm_file *file_priv);
2292int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 2385int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
@@ -2333,10 +2426,6 @@ int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2333 struct drm_file *file_priv); 2426 struct drm_file *file_priv);
2334int i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 2427int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
2335 struct drm_file *file_priv); 2428 struct drm_file *file_priv);
2336int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2337 struct drm_file *file_priv);
2338int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2339 struct drm_file *file_priv);
2340int i915_gem_set_tiling(struct drm_device *dev, void *data, 2429int i915_gem_set_tiling(struct drm_device *dev, void *data,
2341 struct drm_file *file_priv); 2430 struct drm_file *file_priv);
2342int i915_gem_get_tiling(struct drm_device *dev, void *data, 2431int i915_gem_get_tiling(struct drm_device *dev, void *data,
@@ -2379,7 +2468,6 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
2379int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2468int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
2380void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2469void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
2381void i915_gem_release_mmap(struct drm_i915_gem_object *obj); 2470void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
2382void i915_gem_lastclose(struct drm_device *dev);
2383 2471
2384int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, 2472int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
2385 int *needs_clflush); 2473 int *needs_clflush);
@@ -2413,8 +2501,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
2413int i915_gem_dumb_create(struct drm_file *file_priv, 2501int i915_gem_dumb_create(struct drm_file *file_priv,
2414 struct drm_device *dev, 2502 struct drm_device *dev,
2415 struct drm_mode_create_dumb *args); 2503 struct drm_mode_create_dumb *args);
2416int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2504int i915_gem_dumb_map_offset(struct drm_file *file_priv,
2417 uint32_t handle, uint64_t *offset); 2505 struct drm_device *dev, uint32_t handle,
2506 uint64_t *offset);
2418/** 2507/**
2419 * Returns true if seq1 is later than seq2. 2508 * Returns true if seq1 is later than seq2.
2420 */ 2509 */
@@ -2486,6 +2575,11 @@ int __i915_add_request(struct intel_engine_cs *ring,
2486 u32 *seqno); 2575 u32 *seqno);
2487#define i915_add_request(ring, seqno) \ 2576#define i915_add_request(ring, seqno) \
2488 __i915_add_request(ring, NULL, NULL, seqno) 2577 __i915_add_request(ring, NULL, NULL, seqno)
2578int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
2579 unsigned reset_counter,
2580 bool interruptible,
2581 s64 *timeout,
2582 struct drm_i915_file_private *file_priv);
2489int __must_check i915_wait_seqno(struct intel_engine_cs *ring, 2583int __must_check i915_wait_seqno(struct intel_engine_cs *ring,
2490 uint32_t seqno); 2584 uint32_t seqno);
2491int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 2585int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
@@ -2755,7 +2849,6 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
2755extern void intel_i2c_reset(struct drm_device *dev); 2849extern void intel_i2c_reset(struct drm_device *dev);
2756 2850
2757/* intel_opregion.c */ 2851/* intel_opregion.c */
2758struct intel_encoder;
2759#ifdef CONFIG_ACPI 2852#ifdef CONFIG_ACPI
2760extern int intel_opregion_setup(struct drm_device *dev); 2853extern int intel_opregion_setup(struct drm_device *dev);
2761extern void intel_opregion_init(struct drm_device *dev); 2854extern void intel_opregion_init(struct drm_device *dev);
@@ -2793,7 +2886,6 @@ static inline void intel_unregister_dsm_handler(void) { return; }
2793 2886
2794/* modesetting */ 2887/* modesetting */
2795extern void intel_modeset_init_hw(struct drm_device *dev); 2888extern void intel_modeset_init_hw(struct drm_device *dev);
2796extern void intel_modeset_suspend_hw(struct drm_device *dev);
2797extern void intel_modeset_init(struct drm_device *dev); 2889extern void intel_modeset_init(struct drm_device *dev);
2798extern void intel_modeset_gem_init(struct drm_device *dev); 2890extern void intel_modeset_gem_init(struct drm_device *dev);
2799extern void intel_modeset_cleanup(struct drm_device *dev); 2891extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -2804,7 +2896,7 @@ extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2804extern void i915_redisable_vga(struct drm_device *dev); 2896extern void i915_redisable_vga(struct drm_device *dev);
2805extern void i915_redisable_vga_power_on(struct drm_device *dev); 2897extern void i915_redisable_vga_power_on(struct drm_device *dev);
2806extern bool intel_fbc_enabled(struct drm_device *dev); 2898extern bool intel_fbc_enabled(struct drm_device *dev);
2807extern void gen8_fbc_sw_flush(struct drm_device *dev, u32 value); 2899extern void bdw_fbc_sw_flush(struct drm_device *dev, u32 value);
2808extern void intel_disable_fbc(struct drm_device *dev); 2900extern void intel_disable_fbc(struct drm_device *dev);
2809extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 2901extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
2810extern void intel_init_pch_refclk(struct drm_device *dev); 2902extern void intel_init_pch_refclk(struct drm_device *dev);
@@ -2842,8 +2934,8 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2842void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); 2934void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2843void assert_force_wake_inactive(struct drm_i915_private *dev_priv); 2935void assert_force_wake_inactive(struct drm_i915_private *dev_priv);
2844 2936
2845int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val); 2937int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val);
2846int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); 2938int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val);
2847 2939
2848/* intel_sideband.c */ 2940/* intel_sideband.c */
2849u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr); 2941u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
@@ -2873,7 +2965,9 @@ int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
2873 2965
2874#define FORCEWAKE_RENDER (1 << 0) 2966#define FORCEWAKE_RENDER (1 << 0)
2875#define FORCEWAKE_MEDIA (1 << 1) 2967#define FORCEWAKE_MEDIA (1 << 1)
2876#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA) 2968#define FORCEWAKE_BLITTER (1 << 2)
2969#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA | \
2970 FORCEWAKE_BLITTER)
2877 2971
2878 2972
2879#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) 2973#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
@@ -2939,6 +3033,11 @@ static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2939 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1); 3033 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2940} 3034}
2941 3035
3036static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
3037{
3038 return min_t(u64, MAX_JIFFY_OFFSET, nsecs_to_jiffies64(n) + 1);
3039}
3040
2942static inline unsigned long 3041static inline unsigned long
2943timespec_to_jiffies_timeout(const struct timespec *value) 3042timespec_to_jiffies_timeout(const struct timespec *value)
2944{ 3043{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 28f91df2604d..4a9faea626db 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -160,33 +160,6 @@ i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
160} 160}
161 161
162int 162int
163i915_gem_init_ioctl(struct drm_device *dev, void *data,
164 struct drm_file *file)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 struct drm_i915_gem_init *args = data;
168
169 if (drm_core_check_feature(dev, DRIVER_MODESET))
170 return -ENODEV;
171
172 if (args->gtt_start >= args->gtt_end ||
173 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
174 return -EINVAL;
175
176 /* GEM with user mode setting was never supported on ilk and later. */
177 if (INTEL_INFO(dev)->gen >= 5)
178 return -ENODEV;
179
180 mutex_lock(&dev->struct_mutex);
181 i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
182 args->gtt_end);
183 dev_priv->gtt.mappable_end = args->gtt_end;
184 mutex_unlock(&dev->struct_mutex);
185
186 return 0;
187}
188
189int
190i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 163i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
191 struct drm_file *file) 164 struct drm_file *file)
192{ 165{
@@ -208,40 +181,137 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
208 return 0; 181 return 0;
209} 182}
210 183
211static void i915_gem_object_detach_phys(struct drm_i915_gem_object *obj) 184static int
185i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
212{ 186{
213 drm_dma_handle_t *phys = obj->phys_handle; 187 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
188 char *vaddr = obj->phys_handle->vaddr;
189 struct sg_table *st;
190 struct scatterlist *sg;
191 int i;
214 192
215 if (!phys) 193 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
216 return; 194 return -EINVAL;
217 195
218 if (obj->madv == I915_MADV_WILLNEED) { 196 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
197 struct page *page;
198 char *src;
199
200 page = shmem_read_mapping_page(mapping, i);
201 if (IS_ERR(page))
202 return PTR_ERR(page);
203
204 src = kmap_atomic(page);
205 memcpy(vaddr, src, PAGE_SIZE);
206 drm_clflush_virt_range(vaddr, PAGE_SIZE);
207 kunmap_atomic(src);
208
209 page_cache_release(page);
210 vaddr += PAGE_SIZE;
211 }
212
213 i915_gem_chipset_flush(obj->base.dev);
214
215 st = kmalloc(sizeof(*st), GFP_KERNEL);
216 if (st == NULL)
217 return -ENOMEM;
218
219 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
220 kfree(st);
221 return -ENOMEM;
222 }
223
224 sg = st->sgl;
225 sg->offset = 0;
226 sg->length = obj->base.size;
227
228 sg_dma_address(sg) = obj->phys_handle->busaddr;
229 sg_dma_len(sg) = obj->base.size;
230
231 obj->pages = st;
232 obj->has_dma_mapping = true;
233 return 0;
234}
235
236static void
237i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
238{
239 int ret;
240
241 BUG_ON(obj->madv == __I915_MADV_PURGED);
242
243 ret = i915_gem_object_set_to_cpu_domain(obj, true);
244 if (ret) {
245 /* In the event of a disaster, abandon all caches and
246 * hope for the best.
247 */
248 WARN_ON(ret != -EIO);
249 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
250 }
251
252 if (obj->madv == I915_MADV_DONTNEED)
253 obj->dirty = 0;
254
255 if (obj->dirty) {
219 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping; 256 struct address_space *mapping = file_inode(obj->base.filp)->i_mapping;
220 char *vaddr = phys->vaddr; 257 char *vaddr = obj->phys_handle->vaddr;
221 int i; 258 int i;
222 259
223 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 260 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
224 struct page *page = shmem_read_mapping_page(mapping, i); 261 struct page *page;
225 if (!IS_ERR(page)) { 262 char *dst;
226 char *dst = kmap_atomic(page); 263
227 memcpy(dst, vaddr, PAGE_SIZE); 264 page = shmem_read_mapping_page(mapping, i);
228 drm_clflush_virt_range(dst, PAGE_SIZE); 265 if (IS_ERR(page))
229 kunmap_atomic(dst); 266 continue;
230 267
231 set_page_dirty(page); 268 dst = kmap_atomic(page);
269 drm_clflush_virt_range(vaddr, PAGE_SIZE);
270 memcpy(dst, vaddr, PAGE_SIZE);
271 kunmap_atomic(dst);
272
273 set_page_dirty(page);
274 if (obj->madv == I915_MADV_WILLNEED)
232 mark_page_accessed(page); 275 mark_page_accessed(page);
233 page_cache_release(page); 276 page_cache_release(page);
234 }
235 vaddr += PAGE_SIZE; 277 vaddr += PAGE_SIZE;
236 } 278 }
237 i915_gem_chipset_flush(obj->base.dev); 279 obj->dirty = 0;
238 } 280 }
239 281
240#ifdef CONFIG_X86 282 sg_free_table(obj->pages);
241 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE); 283 kfree(obj->pages);
242#endif 284
243 drm_pci_free(obj->base.dev, phys); 285 obj->has_dma_mapping = false;
244 obj->phys_handle = NULL; 286}
287
288static void
289i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
290{
291 drm_pci_free(obj->base.dev, obj->phys_handle);
292}
293
294static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
295 .get_pages = i915_gem_object_get_pages_phys,
296 .put_pages = i915_gem_object_put_pages_phys,
297 .release = i915_gem_object_release_phys,
298};
299
300static int
301drop_pages(struct drm_i915_gem_object *obj)
302{
303 struct i915_vma *vma, *next;
304 int ret;
305
306 drm_gem_object_reference(&obj->base);
307 list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link)
308 if (i915_vma_unbind(vma))
309 break;
310
311 ret = i915_gem_object_put_pages(obj);
312 drm_gem_object_unreference(&obj->base);
313
314 return ret;
245} 315}
246 316
247int 317int
@@ -249,9 +319,7 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
249 int align) 319 int align)
250{ 320{
251 drm_dma_handle_t *phys; 321 drm_dma_handle_t *phys;
252 struct address_space *mapping; 322 int ret;
253 char *vaddr;
254 int i;
255 323
256 if (obj->phys_handle) { 324 if (obj->phys_handle) {
257 if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 325 if ((unsigned long)obj->phys_handle->vaddr & (align -1))
@@ -266,41 +334,19 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
266 if (obj->base.filp == NULL) 334 if (obj->base.filp == NULL)
267 return -EINVAL; 335 return -EINVAL;
268 336
337 ret = drop_pages(obj);
338 if (ret)
339 return ret;
340
269 /* create a new object */ 341 /* create a new object */
270 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 342 phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
271 if (!phys) 343 if (!phys)
272 return -ENOMEM; 344 return -ENOMEM;
273 345
274 vaddr = phys->vaddr;
275#ifdef CONFIG_X86
276 set_memory_wc((unsigned long)vaddr, phys->size / PAGE_SIZE);
277#endif
278 mapping = file_inode(obj->base.filp)->i_mapping;
279 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
280 struct page *page;
281 char *src;
282
283 page = shmem_read_mapping_page(mapping, i);
284 if (IS_ERR(page)) {
285#ifdef CONFIG_X86
286 set_memory_wb((unsigned long)phys->vaddr, phys->size / PAGE_SIZE);
287#endif
288 drm_pci_free(obj->base.dev, phys);
289 return PTR_ERR(page);
290 }
291
292 src = kmap_atomic(page);
293 memcpy(vaddr, src, PAGE_SIZE);
294 kunmap_atomic(src);
295
296 mark_page_accessed(page);
297 page_cache_release(page);
298
299 vaddr += PAGE_SIZE;
300 }
301
302 obj->phys_handle = phys; 346 obj->phys_handle = phys;
303 return 0; 347 obj->ops = &i915_gem_phys_ops;
348
349 return i915_gem_object_get_pages(obj);
304} 350}
305 351
306static int 352static int
@@ -311,6 +357,14 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
311 struct drm_device *dev = obj->base.dev; 357 struct drm_device *dev = obj->base.dev;
312 void *vaddr = obj->phys_handle->vaddr + args->offset; 358 void *vaddr = obj->phys_handle->vaddr + args->offset;
313 char __user *user_data = to_user_ptr(args->data_ptr); 359 char __user *user_data = to_user_ptr(args->data_ptr);
360 int ret;
361
362 /* We manually control the domain here and pretend that it
363 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
364 */
365 ret = i915_gem_object_wait_rendering(obj, false);
366 if (ret)
367 return ret;
314 368
315 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 369 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
316 unsigned long unwritten; 370 unsigned long unwritten;
@@ -326,6 +380,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
326 return -EFAULT; 380 return -EFAULT;
327 } 381 }
328 382
383 drm_clflush_virt_range(vaddr, args->size);
329 i915_gem_chipset_flush(dev); 384 i915_gem_chipset_flush(dev);
330 return 0; 385 return 0;
331} 386}
@@ -346,6 +401,7 @@ static int
346i915_gem_create(struct drm_file *file, 401i915_gem_create(struct drm_file *file,
347 struct drm_device *dev, 402 struct drm_device *dev,
348 uint64_t size, 403 uint64_t size,
404 bool dumb,
349 uint32_t *handle_p) 405 uint32_t *handle_p)
350{ 406{
351 struct drm_i915_gem_object *obj; 407 struct drm_i915_gem_object *obj;
@@ -361,6 +417,7 @@ i915_gem_create(struct drm_file *file,
361 if (obj == NULL) 417 if (obj == NULL)
362 return -ENOMEM; 418 return -ENOMEM;
363 419
420 obj->base.dumb = dumb;
364 ret = drm_gem_handle_create(file, &obj->base, &handle); 421 ret = drm_gem_handle_create(file, &obj->base, &handle);
365 /* drop reference from allocate - handle holds it now */ 422 /* drop reference from allocate - handle holds it now */
366 drm_gem_object_unreference_unlocked(&obj->base); 423 drm_gem_object_unreference_unlocked(&obj->base);
@@ -380,7 +437,7 @@ i915_gem_dumb_create(struct drm_file *file,
380 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 437 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
381 args->size = args->pitch * args->height; 438 args->size = args->pitch * args->height;
382 return i915_gem_create(file, dev, 439 return i915_gem_create(file, dev,
383 args->size, &args->handle); 440 args->size, true, &args->handle);
384} 441}
385 442
386/** 443/**
@@ -393,7 +450,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
393 struct drm_i915_gem_create *args = data; 450 struct drm_i915_gem_create *args = data;
394 451
395 return i915_gem_create(file, dev, 452 return i915_gem_create(file, dev,
396 args->size, &args->handle); 453 args->size, false, &args->handle);
397} 454}
398 455
399static inline int 456static inline int
@@ -1046,11 +1103,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1046 * pread/pwrite currently are reading and writing from the CPU 1103 * pread/pwrite currently are reading and writing from the CPU
1047 * perspective, requiring manual detiling by the client. 1104 * perspective, requiring manual detiling by the client.
1048 */ 1105 */
1049 if (obj->phys_handle) {
1050 ret = i915_gem_phys_pwrite(obj, args, file);
1051 goto out;
1052 }
1053
1054 if (obj->tiling_mode == I915_TILING_NONE && 1106 if (obj->tiling_mode == I915_TILING_NONE &&
1055 obj->base.write_domain != I915_GEM_DOMAIN_CPU && 1107 obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
1056 cpu_write_needs_clflush(obj)) { 1108 cpu_write_needs_clflush(obj)) {
@@ -1060,8 +1112,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1060 * textures). Fallback to the shmem path in that case. */ 1112 * textures). Fallback to the shmem path in that case. */
1061 } 1113 }
1062 1114
1063 if (ret == -EFAULT || ret == -ENOSPC) 1115 if (ret == -EFAULT || ret == -ENOSPC) {
1064 ret = i915_gem_shmem_pwrite(dev, obj, args, file); 1116 if (obj->phys_handle)
1117 ret = i915_gem_phys_pwrite(obj, args, file);
1118 else
1119 ret = i915_gem_shmem_pwrite(dev, obj, args, file);
1120 }
1065 1121
1066out: 1122out:
1067 drm_gem_object_unreference(&obj->base); 1123 drm_gem_object_unreference(&obj->base);
@@ -1134,7 +1190,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1134} 1190}
1135 1191
1136/** 1192/**
1137 * __wait_seqno - wait until execution of seqno has finished 1193 * __i915_wait_seqno - wait until execution of seqno has finished
1138 * @ring: the ring expected to report seqno 1194 * @ring: the ring expected to report seqno
1139 * @seqno: duh! 1195 * @seqno: duh!
1140 * @reset_counter: reset sequence associated with the given seqno 1196 * @reset_counter: reset sequence associated with the given seqno
@@ -1151,7 +1207,7 @@ static bool can_wait_boost(struct drm_i915_file_private *file_priv)
1151 * Returns 0 if the seqno was found within the alloted time. Else returns the 1207 * Returns 0 if the seqno was found within the alloted time. Else returns the
1152 * errno with remaining time filled in timeout argument. 1208 * errno with remaining time filled in timeout argument.
1153 */ 1209 */
1154static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno, 1210int __i915_wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1155 unsigned reset_counter, 1211 unsigned reset_counter,
1156 bool interruptible, 1212 bool interruptible,
1157 s64 *timeout, 1213 s64 *timeout,
@@ -1171,7 +1227,8 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1171 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno)) 1227 if (i915_seqno_passed(ring->get_seqno(ring, true), seqno))
1172 return 0; 1228 return 0;
1173 1229
1174 timeout_expire = timeout ? jiffies + nsecs_to_jiffies((u64)*timeout) : 0; 1230 timeout_expire = timeout ?
1231 jiffies + nsecs_to_jiffies_timeout((u64)*timeout) : 0;
1175 1232
1176 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) { 1233 if (INTEL_INFO(dev)->gen >= 6 && ring->id == RCS && can_wait_boost(file_priv)) {
1177 gen6_rps_boost(dev_priv); 1234 gen6_rps_boost(dev_priv);
@@ -1247,6 +1304,16 @@ static int __wait_seqno(struct intel_engine_cs *ring, u32 seqno,
1247 s64 tres = *timeout - (now - before); 1304 s64 tres = *timeout - (now - before);
1248 1305
1249 *timeout = tres < 0 ? 0 : tres; 1306 *timeout = tres < 0 ? 0 : tres;
1307
1308 /*
1309 * Apparently ktime isn't accurate enough and occasionally has a
1310 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
1311 * things up to make the test happy. We allow up to 1 jiffy.
1312 *
1313 * This is a regrssion from the timespec->ktime conversion.
1314 */
1315 if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
1316 *timeout = 0;
1250 } 1317 }
1251 1318
1252 return ret; 1319 return ret;
@@ -1262,6 +1329,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1262 struct drm_device *dev = ring->dev; 1329 struct drm_device *dev = ring->dev;
1263 struct drm_i915_private *dev_priv = dev->dev_private; 1330 struct drm_i915_private *dev_priv = dev->dev_private;
1264 bool interruptible = dev_priv->mm.interruptible; 1331 bool interruptible = dev_priv->mm.interruptible;
1332 unsigned reset_counter;
1265 int ret; 1333 int ret;
1266 1334
1267 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 1335 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1275,14 +1343,13 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
1275 if (ret) 1343 if (ret)
1276 return ret; 1344 return ret;
1277 1345
1278 return __wait_seqno(ring, seqno, 1346 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1279 atomic_read(&dev_priv->gpu_error.reset_counter), 1347 return __i915_wait_seqno(ring, seqno, reset_counter, interruptible,
1280 interruptible, NULL, NULL); 1348 NULL, NULL);
1281} 1349}
1282 1350
1283static int 1351static int
1284i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj, 1352i915_gem_object_wait_rendering__tail(struct drm_i915_gem_object *obj)
1285 struct intel_engine_cs *ring)
1286{ 1353{
1287 if (!obj->active) 1354 if (!obj->active)
1288 return 0; 1355 return 0;
@@ -1319,7 +1386,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1319 if (ret) 1386 if (ret)
1320 return ret; 1387 return ret;
1321 1388
1322 return i915_gem_object_wait_rendering__tail(obj, ring); 1389 return i915_gem_object_wait_rendering__tail(obj);
1323} 1390}
1324 1391
1325/* A nonblocking variant of the above wait. This is a highly dangerous routine 1392/* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1354,12 +1421,13 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
1354 1421
1355 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 1422 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
1356 mutex_unlock(&dev->struct_mutex); 1423 mutex_unlock(&dev->struct_mutex);
1357 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, file_priv); 1424 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
1425 file_priv);
1358 mutex_lock(&dev->struct_mutex); 1426 mutex_lock(&dev->struct_mutex);
1359 if (ret) 1427 if (ret)
1360 return ret; 1428 return ret;
1361 1429
1362 return i915_gem_object_wait_rendering__tail(obj, ring); 1430 return i915_gem_object_wait_rendering__tail(obj);
1363} 1431}
1364 1432
1365/** 1433/**
@@ -1466,6 +1534,16 @@ unlock:
1466 * 1534 *
1467 * While the mapping holds a reference on the contents of the object, it doesn't 1535 * While the mapping holds a reference on the contents of the object, it doesn't
1468 * imply a ref on the object itself. 1536 * imply a ref on the object itself.
1537 *
1538 * IMPORTANT:
1539 *
1540 * DRM driver writers who look a this function as an example for how to do GEM
1541 * mmap support, please don't implement mmap support like here. The modern way
1542 * to implement DRM mmap support is with an mmap offset ioctl (like
1543 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1544 * That way debug tooling like valgrind will understand what's going on, hiding
1545 * the mmap call in a driver private ioctl will break that. The i915 driver only
1546 * does cpu mmaps this way because we didn't know better.
1469 */ 1547 */
1470int 1548int
1471i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1549i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
@@ -1762,10 +1840,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
1762 drm_gem_free_mmap_offset(&obj->base); 1840 drm_gem_free_mmap_offset(&obj->base);
1763} 1841}
1764 1842
1765int 1843static int
1766i915_gem_mmap_gtt(struct drm_file *file, 1844i915_gem_mmap_gtt(struct drm_file *file,
1767 struct drm_device *dev, 1845 struct drm_device *dev,
1768 uint32_t handle, 1846 uint32_t handle, bool dumb,
1769 uint64_t *offset) 1847 uint64_t *offset)
1770{ 1848{
1771 struct drm_i915_private *dev_priv = dev->dev_private; 1849 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1782,6 +1860,13 @@ i915_gem_mmap_gtt(struct drm_file *file,
1782 goto unlock; 1860 goto unlock;
1783 } 1861 }
1784 1862
1863 /*
1864 * We don't allow dumb mmaps on objects created using another
1865 * interface.
1866 */
1867 WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
1868 "Illegal dumb map of accelerated buffer.\n");
1869
1785 if (obj->base.size > dev_priv->gtt.mappable_end) { 1870 if (obj->base.size > dev_priv->gtt.mappable_end) {
1786 ret = -E2BIG; 1871 ret = -E2BIG;
1787 goto out; 1872 goto out;
@@ -1806,6 +1891,15 @@ unlock:
1806 return ret; 1891 return ret;
1807} 1892}
1808 1893
1894int
1895i915_gem_dumb_map_offset(struct drm_file *file,
1896 struct drm_device *dev,
1897 uint32_t handle,
1898 uint64_t *offset)
1899{
1900 return i915_gem_mmap_gtt(file, dev, handle, true, offset);
1901}
1902
1809/** 1903/**
1810 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1904 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1811 * @dev: DRM device 1905 * @dev: DRM device
@@ -1827,7 +1921,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1827{ 1921{
1828 struct drm_i915_gem_mmap_gtt *args = data; 1922 struct drm_i915_gem_mmap_gtt *args = data;
1829 1923
1830 return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1924 return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
1831} 1925}
1832 1926
1833static inline int 1927static inline int
@@ -1945,7 +2039,14 @@ unsigned long
1945i915_gem_shrink(struct drm_i915_private *dev_priv, 2039i915_gem_shrink(struct drm_i915_private *dev_priv,
1946 long target, unsigned flags) 2040 long target, unsigned flags)
1947{ 2041{
1948 const bool purgeable_only = flags & I915_SHRINK_PURGEABLE; 2042 const struct {
2043 struct list_head *list;
2044 unsigned int bit;
2045 } phases[] = {
2046 { &dev_priv->mm.unbound_list, I915_SHRINK_UNBOUND },
2047 { &dev_priv->mm.bound_list, I915_SHRINK_BOUND },
2048 { NULL, 0 },
2049 }, *phase;
1949 unsigned long count = 0; 2050 unsigned long count = 0;
1950 2051
1951 /* 2052 /*
@@ -1967,48 +2068,30 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
1967 * dev->struct_mutex and so we won't ever be able to observe an 2068 * dev->struct_mutex and so we won't ever be able to observe an
1968 * object on the bound_list with a reference count equals 0. 2069 * object on the bound_list with a reference count equals 0.
1969 */ 2070 */
1970 if (flags & I915_SHRINK_UNBOUND) { 2071 for (phase = phases; phase->list; phase++) {
1971 struct list_head still_in_list; 2072 struct list_head still_in_list;
1972 2073
1973 INIT_LIST_HEAD(&still_in_list); 2074 if ((flags & phase->bit) == 0)
1974 while (count < target && !list_empty(&dev_priv->mm.unbound_list)) { 2075 continue;
1975 struct drm_i915_gem_object *obj;
1976
1977 obj = list_first_entry(&dev_priv->mm.unbound_list,
1978 typeof(*obj), global_list);
1979 list_move_tail(&obj->global_list, &still_in_list);
1980
1981 if (!i915_gem_object_is_purgeable(obj) && purgeable_only)
1982 continue;
1983
1984 drm_gem_object_reference(&obj->base);
1985
1986 if (i915_gem_object_put_pages(obj) == 0)
1987 count += obj->base.size >> PAGE_SHIFT;
1988
1989 drm_gem_object_unreference(&obj->base);
1990 }
1991 list_splice(&still_in_list, &dev_priv->mm.unbound_list);
1992 }
1993
1994 if (flags & I915_SHRINK_BOUND) {
1995 struct list_head still_in_list;
1996 2076
1997 INIT_LIST_HEAD(&still_in_list); 2077 INIT_LIST_HEAD(&still_in_list);
1998 while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 2078 while (count < target && !list_empty(phase->list)) {
1999 struct drm_i915_gem_object *obj; 2079 struct drm_i915_gem_object *obj;
2000 struct i915_vma *vma, *v; 2080 struct i915_vma *vma, *v;
2001 2081
2002 obj = list_first_entry(&dev_priv->mm.bound_list, 2082 obj = list_first_entry(phase->list,
2003 typeof(*obj), global_list); 2083 typeof(*obj), global_list);
2004 list_move_tail(&obj->global_list, &still_in_list); 2084 list_move_tail(&obj->global_list, &still_in_list);
2005 2085
2006 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 2086 if (flags & I915_SHRINK_PURGEABLE &&
2087 !i915_gem_object_is_purgeable(obj))
2007 continue; 2088 continue;
2008 2089
2009 drm_gem_object_reference(&obj->base); 2090 drm_gem_object_reference(&obj->base);
2010 2091
2011 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 2092 /* For the unbound phase, this should be a no-op! */
2093 list_for_each_entry_safe(vma, v,
2094 &obj->vma_list, vma_link)
2012 if (i915_vma_unbind(vma)) 2095 if (i915_vma_unbind(vma))
2013 break; 2096 break;
2014 2097
@@ -2017,7 +2100,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
2017 2100
2018 drm_gem_object_unreference(&obj->base); 2101 drm_gem_object_unreference(&obj->base);
2019 } 2102 }
2020 list_splice(&still_in_list, &dev_priv->mm.bound_list); 2103 list_splice(&still_in_list, phase->list);
2021 } 2104 }
2022 2105
2023 return count; 2106 return count;
@@ -2122,6 +2205,10 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2122 if (i915_gem_object_needs_bit17_swizzle(obj)) 2205 if (i915_gem_object_needs_bit17_swizzle(obj))
2123 i915_gem_object_do_bit_17_swizzle(obj); 2206 i915_gem_object_do_bit_17_swizzle(obj);
2124 2207
2208 if (obj->tiling_mode != I915_TILING_NONE &&
2209 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
2210 i915_gem_object_pin_pages(obj);
2211
2125 return 0; 2212 return 0;
2126 2213
2127err_pages: 2214err_pages:
@@ -2420,15 +2507,13 @@ int __i915_add_request(struct intel_engine_cs *ring,
2420 ring->outstanding_lazy_seqno = 0; 2507 ring->outstanding_lazy_seqno = 0;
2421 ring->preallocated_lazy_request = NULL; 2508 ring->preallocated_lazy_request = NULL;
2422 2509
2423 if (!dev_priv->ums.mm_suspended) { 2510 i915_queue_hangcheck(ring->dev);
2424 i915_queue_hangcheck(ring->dev);
2425 2511
2426 cancel_delayed_work_sync(&dev_priv->mm.idle_work); 2512 cancel_delayed_work_sync(&dev_priv->mm.idle_work);
2427 queue_delayed_work(dev_priv->wq, 2513 queue_delayed_work(dev_priv->wq,
2428 &dev_priv->mm.retire_work, 2514 &dev_priv->mm.retire_work,
2429 round_jiffies_up_relative(HZ)); 2515 round_jiffies_up_relative(HZ));
2430 intel_mark_busy(dev_priv->dev); 2516 intel_mark_busy(dev_priv->dev);
2431 }
2432 2517
2433 if (out_seqno) 2518 if (out_seqno)
2434 *out_seqno = request->seqno; 2519 *out_seqno = request->seqno;
@@ -2495,12 +2580,20 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
2495 2580
2496static void i915_gem_free_request(struct drm_i915_gem_request *request) 2581static void i915_gem_free_request(struct drm_i915_gem_request *request)
2497{ 2582{
2583 struct intel_context *ctx = request->ctx;
2584
2498 list_del(&request->list); 2585 list_del(&request->list);
2499 i915_gem_request_remove_from_client(request); 2586 i915_gem_request_remove_from_client(request);
2500 2587
2501 if (request->ctx) 2588 if (ctx) {
2502 i915_gem_context_unreference(request->ctx); 2589 if (i915.enable_execlists) {
2590 struct intel_engine_cs *ring = request->ring;
2503 2591
2592 if (ctx != ring->default_context)
2593 intel_lr_context_unpin(ring, ctx);
2594 }
2595 i915_gem_context_unreference(ctx);
2596 }
2504 kfree(request); 2597 kfree(request);
2505} 2598}
2506 2599
@@ -2555,6 +2648,23 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2555 } 2648 }
2556 2649
2557 /* 2650 /*
2651 * Clear the execlists queue up before freeing the requests, as those
2652 * are the ones that keep the context and ringbuffer backing objects
2653 * pinned in place.
2654 */
2655 while (!list_empty(&ring->execlist_queue)) {
2656 struct intel_ctx_submit_request *submit_req;
2657
2658 submit_req = list_first_entry(&ring->execlist_queue,
2659 struct intel_ctx_submit_request,
2660 execlist_link);
2661 list_del(&submit_req->execlist_link);
2662 intel_runtime_pm_put(dev_priv);
2663 i915_gem_context_unreference(submit_req->ctx);
2664 kfree(submit_req);
2665 }
2666
2667 /*
2558 * We must free the requests after all the corresponding objects have 2668 * We must free the requests after all the corresponding objects have
2559 * been moved off active lists. Which is the same order as the normal 2669 * been moved off active lists. Which is the same order as the normal
2560 * retire_requests function does. This is important if object hold 2670 * retire_requests function does. This is important if object hold
@@ -2571,18 +2681,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
2571 i915_gem_free_request(request); 2681 i915_gem_free_request(request);
2572 } 2682 }
2573 2683
2574 while (!list_empty(&ring->execlist_queue)) {
2575 struct intel_ctx_submit_request *submit_req;
2576
2577 submit_req = list_first_entry(&ring->execlist_queue,
2578 struct intel_ctx_submit_request,
2579 execlist_link);
2580 list_del(&submit_req->execlist_link);
2581 intel_runtime_pm_put(dev_priv);
2582 i915_gem_context_unreference(submit_req->ctx);
2583 kfree(submit_req);
2584 }
2585
2586 /* These may not have been flush before the reset, do so now */ 2684 /* These may not have been flush before the reset, do so now */
2587 kfree(ring->preallocated_lazy_request); 2685 kfree(ring->preallocated_lazy_request);
2588 ring->preallocated_lazy_request = NULL; 2686 ring->preallocated_lazy_request = NULL;
@@ -2719,6 +2817,15 @@ i915_gem_retire_requests(struct drm_device *dev)
2719 for_each_ring(ring, dev_priv, i) { 2817 for_each_ring(ring, dev_priv, i) {
2720 i915_gem_retire_requests_ring(ring); 2818 i915_gem_retire_requests_ring(ring);
2721 idle &= list_empty(&ring->request_list); 2819 idle &= list_empty(&ring->request_list);
2820 if (i915.enable_execlists) {
2821 unsigned long flags;
2822
2823 spin_lock_irqsave(&ring->execlist_lock, flags);
2824 idle &= list_empty(&ring->execlist_queue);
2825 spin_unlock_irqrestore(&ring->execlist_lock, flags);
2826
2827 intel_execlists_retire_requests(ring);
2828 }
2722 } 2829 }
2723 2830
2724 if (idle) 2831 if (idle)
@@ -2811,6 +2918,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2811 u32 seqno = 0; 2918 u32 seqno = 0;
2812 int ret = 0; 2919 int ret = 0;
2813 2920
2921 if (args->flags != 0)
2922 return -EINVAL;
2923
2814 ret = i915_mutex_lock_interruptible(dev); 2924 ret = i915_mutex_lock_interruptible(dev);
2815 if (ret) 2925 if (ret)
2816 return ret; 2926 return ret;
@@ -2846,8 +2956,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2846 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); 2956 reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
2847 mutex_unlock(&dev->struct_mutex); 2957 mutex_unlock(&dev->struct_mutex);
2848 2958
2849 return __wait_seqno(ring, seqno, reset_counter, true, &args->timeout_ns, 2959 return __i915_wait_seqno(ring, seqno, reset_counter, true,
2850 file->driver_priv); 2960 &args->timeout_ns, file->driver_priv);
2851 2961
2852out: 2962out:
2853 drm_gem_object_unreference(&obj->base); 2963 drm_gem_object_unreference(&obj->base);
@@ -3166,6 +3276,7 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg,
3166 obj->stride, obj->tiling_mode); 3276 obj->stride, obj->tiling_mode);
3167 3277
3168 switch (INTEL_INFO(dev)->gen) { 3278 switch (INTEL_INFO(dev)->gen) {
3279 case 9:
3169 case 8: 3280 case 8:
3170 case 7: 3281 case 7:
3171 case 6: 3282 case 6:
@@ -3384,46 +3495,6 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3384 return true; 3495 return true;
3385} 3496}
3386 3497
3387static void i915_gem_verify_gtt(struct drm_device *dev)
3388{
3389#if WATCH_GTT
3390 struct drm_i915_private *dev_priv = dev->dev_private;
3391 struct drm_i915_gem_object *obj;
3392 int err = 0;
3393
3394 list_for_each_entry(obj, &dev_priv->mm.gtt_list, global_list) {
3395 if (obj->gtt_space == NULL) {
3396 printk(KERN_ERR "object found on GTT list with no space reserved\n");
3397 err++;
3398 continue;
3399 }
3400
3401 if (obj->cache_level != obj->gtt_space->color) {
3402 printk(KERN_ERR "object reserved space [%08lx, %08lx] with wrong color, cache_level=%x, color=%lx\n",
3403 i915_gem_obj_ggtt_offset(obj),
3404 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3405 obj->cache_level,
3406 obj->gtt_space->color);
3407 err++;
3408 continue;
3409 }
3410
3411 if (!i915_gem_valid_gtt_space(dev,
3412 obj->gtt_space,
3413 obj->cache_level)) {
3414 printk(KERN_ERR "invalid GTT space found at [%08lx, %08lx] - color=%x\n",
3415 i915_gem_obj_ggtt_offset(obj),
3416 i915_gem_obj_ggtt_offset(obj) + i915_gem_obj_ggtt_size(obj),
3417 obj->cache_level);
3418 err++;
3419 continue;
3420 }
3421 }
3422
3423 WARN_ON(err);
3424#endif
3425}
3426
3427/** 3498/**
3428 * Finds free space in the GTT aperture and binds the object there. 3499 * Finds free space in the GTT aperture and binds the object there.
3429 */ 3500 */
@@ -3514,25 +3585,10 @@ search_free:
3514 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3585 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3515 list_add_tail(&vma->mm_list, &vm->inactive_list); 3586 list_add_tail(&vma->mm_list, &vm->inactive_list);
3516 3587
3517 if (i915_is_ggtt(vm)) {
3518 bool mappable, fenceable;
3519
3520 fenceable = (vma->node.size == fence_size &&
3521 (vma->node.start & (fence_alignment - 1)) == 0);
3522
3523 mappable = (vma->node.start + obj->base.size <=
3524 dev_priv->gtt.mappable_end);
3525
3526 obj->map_and_fenceable = mappable && fenceable;
3527 }
3528
3529 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
3530
3531 trace_i915_vma_bind(vma, flags); 3588 trace_i915_vma_bind(vma, flags);
3532 vma->bind_vma(vma, obj->cache_level, 3589 vma->bind_vma(vma, obj->cache_level,
3533 flags & (PIN_MAPPABLE | PIN_GLOBAL) ? GLOBAL_BIND : 0); 3590 flags & PIN_GLOBAL ? GLOBAL_BIND : 0);
3534 3591
3535 i915_gem_verify_gtt(dev);
3536 return vma; 3592 return vma;
3537 3593
3538err_remove_node: 3594err_remove_node:
@@ -3560,7 +3616,7 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj,
3560 * Stolen memory is always coherent with the GPU as it is explicitly 3616 * Stolen memory is always coherent with the GPU as it is explicitly
3561 * marked as wc by the system, or the system is cache-coherent. 3617 * marked as wc by the system, or the system is cache-coherent.
3562 */ 3618 */
3563 if (obj->stolen) 3619 if (obj->stolen || obj->phys_handle)
3564 return false; 3620 return false;
3565 3621
3566 /* If the GPU is snooping the contents of the CPU cache, 3622 /* If the GPU is snooping the contents of the CPU cache,
@@ -3739,7 +3795,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3739 list_for_each_entry(vma, &obj->vma_list, vma_link) 3795 list_for_each_entry(vma, &obj->vma_list, vma_link)
3740 if (drm_mm_node_allocated(&vma->node)) 3796 if (drm_mm_node_allocated(&vma->node))
3741 vma->bind_vma(vma, cache_level, 3797 vma->bind_vma(vma, cache_level,
3742 obj->has_global_gtt_mapping ? GLOBAL_BIND : 0); 3798 vma->bound & GLOBAL_BIND);
3743 } 3799 }
3744 3800
3745 list_for_each_entry(vma, &obj->vma_list, vma_link) 3801 list_for_each_entry(vma, &obj->vma_list, vma_link)
@@ -3769,7 +3825,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3769 old_write_domain); 3825 old_write_domain);
3770 } 3826 }
3771 3827
3772 i915_gem_verify_gtt(dev);
3773 return 0; 3828 return 0;
3774} 3829}
3775 3830
@@ -4067,7 +4122,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4067 if (seqno == 0) 4122 if (seqno == 0)
4068 return 0; 4123 return 0;
4069 4124
4070 ret = __wait_seqno(ring, seqno, reset_counter, true, NULL, NULL); 4125 ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL, NULL);
4071 if (ret == 0) 4126 if (ret == 0)
4072 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0); 4127 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4073 4128
@@ -4101,6 +4156,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4101{ 4156{
4102 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 4157 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
4103 struct i915_vma *vma; 4158 struct i915_vma *vma;
4159 unsigned bound;
4104 int ret; 4160 int ret;
4105 4161
4106 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base)) 4162 if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
@@ -4109,6 +4165,9 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4109 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm))) 4165 if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
4110 return -EINVAL; 4166 return -EINVAL;
4111 4167
4168 if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
4169 return -EINVAL;
4170
4112 vma = i915_gem_obj_to_vma(obj, vm); 4171 vma = i915_gem_obj_to_vma(obj, vm);
4113 if (vma) { 4172 if (vma) {
4114 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) 4173 if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
@@ -4130,15 +4189,39 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
4130 } 4189 }
4131 } 4190 }
4132 4191
4192 bound = vma ? vma->bound : 0;
4133 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) { 4193 if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
4134 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags); 4194 vma = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
4135 if (IS_ERR(vma)) 4195 if (IS_ERR(vma))
4136 return PTR_ERR(vma); 4196 return PTR_ERR(vma);
4137 } 4197 }
4138 4198
4139 if (flags & PIN_GLOBAL && !obj->has_global_gtt_mapping) 4199 if (flags & PIN_GLOBAL && !(vma->bound & GLOBAL_BIND))
4140 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); 4200 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
4141 4201
4202 if ((bound ^ vma->bound) & GLOBAL_BIND) {
4203 bool mappable, fenceable;
4204 u32 fence_size, fence_alignment;
4205
4206 fence_size = i915_gem_get_gtt_size(obj->base.dev,
4207 obj->base.size,
4208 obj->tiling_mode);
4209 fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
4210 obj->base.size,
4211 obj->tiling_mode,
4212 true);
4213
4214 fenceable = (vma->node.size == fence_size &&
4215 (vma->node.start & (fence_alignment - 1)) == 0);
4216
4217 mappable = (vma->node.start + obj->base.size <=
4218 dev_priv->gtt.mappable_end);
4219
4220 obj->map_and_fenceable = mappable && fenceable;
4221 }
4222
4223 WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
4224
4142 vma->pin_count++; 4225 vma->pin_count++;
4143 if (flags & PIN_MAPPABLE) 4226 if (flags & PIN_MAPPABLE)
4144 obj->pin_mappable |= true; 4227 obj->pin_mappable |= true;
@@ -4193,7 +4276,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4193 struct drm_i915_gem_object *obj; 4276 struct drm_i915_gem_object *obj;
4194 int ret; 4277 int ret;
4195 4278
4196 if (INTEL_INFO(dev)->gen >= 6) 4279 if (drm_core_check_feature(dev, DRIVER_MODESET))
4197 return -ENODEV; 4280 return -ENODEV;
4198 4281
4199 ret = i915_mutex_lock_interruptible(dev); 4282 ret = i915_mutex_lock_interruptible(dev);
@@ -4249,6 +4332,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4249 struct drm_i915_gem_object *obj; 4332 struct drm_i915_gem_object *obj;
4250 int ret; 4333 int ret;
4251 4334
4335 if (drm_core_check_feature(dev, DRIVER_MODESET))
4336 return -ENODEV;
4337
4252 ret = i915_mutex_lock_interruptible(dev); 4338 ret = i915_mutex_lock_interruptible(dev);
4253 if (ret) 4339 if (ret)
4254 return ret; 4340 return ret;
@@ -4326,6 +4412,7 @@ int
4326i915_gem_madvise_ioctl(struct drm_device *dev, void *data, 4412i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4327 struct drm_file *file_priv) 4413 struct drm_file *file_priv)
4328{ 4414{
4415 struct drm_i915_private *dev_priv = dev->dev_private;
4329 struct drm_i915_gem_madvise *args = data; 4416 struct drm_i915_gem_madvise *args = data;
4330 struct drm_i915_gem_object *obj; 4417 struct drm_i915_gem_object *obj;
4331 int ret; 4418 int ret;
@@ -4353,6 +4440,15 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4353 goto out; 4440 goto out;
4354 } 4441 }
4355 4442
4443 if (obj->pages &&
4444 obj->tiling_mode != I915_TILING_NONE &&
4445 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4446 if (obj->madv == I915_MADV_WILLNEED)
4447 i915_gem_object_unpin_pages(obj);
4448 if (args->madv == I915_MADV_WILLNEED)
4449 i915_gem_object_pin_pages(obj);
4450 }
4451
4356 if (obj->madv != __I915_MADV_PURGED) 4452 if (obj->madv != __I915_MADV_PURGED)
4357 obj->madv = args->madv; 4453 obj->madv = args->madv;
4358 4454
@@ -4495,8 +4591,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4495 } 4591 }
4496 } 4592 }
4497 4593
4498 i915_gem_object_detach_phys(obj);
4499
4500 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up 4594 /* Stolen objects don't hold a ref, but do hold pin count. Fix that up
4501 * before progressing. */ 4595 * before progressing. */
4502 if (obj->stolen) 4596 if (obj->stolen)
@@ -4504,6 +4598,11 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
4504 4598
4505 WARN_ON(obj->frontbuffer_bits); 4599 WARN_ON(obj->frontbuffer_bits);
4506 4600
4601 if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
4602 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
4603 obj->tiling_mode != I915_TILING_NONE)
4604 i915_gem_object_unpin_pages(obj);
4605
4507 if (WARN_ON(obj->pages_pin_count)) 4606 if (WARN_ON(obj->pages_pin_count))
4508 obj->pages_pin_count = 0; 4607 obj->pages_pin_count = 0;
4509 if (discard_backing_storage(obj)) 4608 if (discard_backing_storage(obj))
@@ -4576,9 +4675,6 @@ i915_gem_suspend(struct drm_device *dev)
4576 int ret = 0; 4675 int ret = 0;
4577 4676
4578 mutex_lock(&dev->struct_mutex); 4677 mutex_lock(&dev->struct_mutex);
4579 if (dev_priv->ums.mm_suspended)
4580 goto err;
4581
4582 ret = i915_gpu_idle(dev); 4678 ret = i915_gpu_idle(dev);
4583 if (ret) 4679 if (ret)
4584 goto err; 4680 goto err;
@@ -4589,15 +4685,7 @@ i915_gem_suspend(struct drm_device *dev)
4589 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4685 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4590 i915_gem_evict_everything(dev); 4686 i915_gem_evict_everything(dev);
4591 4687
4592 i915_kernel_lost_context(dev);
4593 i915_gem_stop_ringbuffers(dev); 4688 i915_gem_stop_ringbuffers(dev);
4594
4595 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4596 * We need to replace this with a semaphore, or something.
4597 * And not confound ums.mm_suspended!
4598 */
4599 dev_priv->ums.mm_suspended = !drm_core_check_feature(dev,
4600 DRIVER_MODESET);
4601 mutex_unlock(&dev->struct_mutex); 4689 mutex_unlock(&dev->struct_mutex);
4602 4690
4603 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer); 4691 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
@@ -4888,9 +4976,6 @@ int i915_gem_init(struct drm_device *dev)
4888 } 4976 }
4889 mutex_unlock(&dev->struct_mutex); 4977 mutex_unlock(&dev->struct_mutex);
4890 4978
4891 /* Allow hardware batchbuffers unless told otherwise, but not for KMS. */
4892 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4893 dev_priv->dri1.allow_batchbuffer = 1;
4894 return ret; 4979 return ret;
4895} 4980}
4896 4981
@@ -4905,74 +4990,6 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4905 dev_priv->gt.cleanup_ring(ring); 4990 dev_priv->gt.cleanup_ring(ring);
4906} 4991}
4907 4992
4908int
4909i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4910 struct drm_file *file_priv)
4911{
4912 struct drm_i915_private *dev_priv = dev->dev_private;
4913 int ret;
4914
4915 if (drm_core_check_feature(dev, DRIVER_MODESET))
4916 return 0;
4917
4918 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
4919 DRM_ERROR("Reenabling wedged hardware, good luck\n");
4920 atomic_set(&dev_priv->gpu_error.reset_counter, 0);
4921 }
4922
4923 mutex_lock(&dev->struct_mutex);
4924 dev_priv->ums.mm_suspended = 0;
4925
4926 ret = i915_gem_init_hw(dev);
4927 if (ret != 0) {
4928 mutex_unlock(&dev->struct_mutex);
4929 return ret;
4930 }
4931
4932 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4933
4934 ret = drm_irq_install(dev, dev->pdev->irq);
4935 if (ret)
4936 goto cleanup_ringbuffer;
4937 mutex_unlock(&dev->struct_mutex);
4938
4939 return 0;
4940
4941cleanup_ringbuffer:
4942 i915_gem_cleanup_ringbuffer(dev);
4943 dev_priv->ums.mm_suspended = 1;
4944 mutex_unlock(&dev->struct_mutex);
4945
4946 return ret;
4947}
4948
4949int
4950i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
4951 struct drm_file *file_priv)
4952{
4953 if (drm_core_check_feature(dev, DRIVER_MODESET))
4954 return 0;
4955
4956 mutex_lock(&dev->struct_mutex);
4957 drm_irq_uninstall(dev);
4958 mutex_unlock(&dev->struct_mutex);
4959
4960 return i915_gem_suspend(dev);
4961}
4962
4963void
4964i915_gem_lastclose(struct drm_device *dev)
4965{
4966 int ret;
4967
4968 if (drm_core_check_feature(dev, DRIVER_MODESET))
4969 return;
4970
4971 ret = i915_gem_suspend(dev);
4972 if (ret)
4973 DRM_ERROR("failed to idle hardware: %d\n", ret);
4974}
4975
4976static void 4993static void
4977init_ring_lists(struct intel_engine_cs *ring) 4994init_ring_lists(struct intel_engine_cs *ring)
4978{ 4995{
@@ -5119,6 +5136,15 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
5119 return ret; 5136 return ret;
5120} 5137}
5121 5138
5139/**
5140 * i915_gem_track_fb - update frontbuffer tracking
5141 * old: current GEM buffer for the frontbuffer slots
5142 * new: new GEM buffer for the frontbuffer slots
5143 * frontbuffer_bits: bitmask of frontbuffer slots
5144 *
5145 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5146 * from @old and setting them in @new. Both @old and @new can be NULL.
5147 */
5122void i915_gem_track_fb(struct drm_i915_gem_object *old, 5148void i915_gem_track_fb(struct drm_i915_gem_object *old,
5123 struct drm_i915_gem_object *new, 5149 struct drm_i915_gem_object *new,
5124 unsigned frontbuffer_bits) 5150 unsigned frontbuffer_bits)
@@ -5302,7 +5328,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5302 struct drm_device *dev = dev_priv->dev; 5328 struct drm_device *dev = dev_priv->dev;
5303 struct drm_i915_gem_object *obj; 5329 struct drm_i915_gem_object *obj;
5304 unsigned long timeout = msecs_to_jiffies(5000) + 1; 5330 unsigned long timeout = msecs_to_jiffies(5000) + 1;
5305 unsigned long pinned, bound, unbound, freed; 5331 unsigned long pinned, bound, unbound, freed_pages;
5306 bool was_interruptible; 5332 bool was_interruptible;
5307 bool unlock; 5333 bool unlock;
5308 5334
@@ -5319,7 +5345,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5319 was_interruptible = dev_priv->mm.interruptible; 5345 was_interruptible = dev_priv->mm.interruptible;
5320 dev_priv->mm.interruptible = false; 5346 dev_priv->mm.interruptible = false;
5321 5347
5322 freed = i915_gem_shrink_all(dev_priv); 5348 freed_pages = i915_gem_shrink_all(dev_priv);
5323 5349
5324 dev_priv->mm.interruptible = was_interruptible; 5350 dev_priv->mm.interruptible = was_interruptible;
5325 5351
@@ -5350,14 +5376,15 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
5350 if (unlock) 5376 if (unlock)
5351 mutex_unlock(&dev->struct_mutex); 5377 mutex_unlock(&dev->struct_mutex);
5352 5378
5353 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n", 5379 if (freed_pages || unbound || bound)
5354 freed, pinned); 5380 pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
5381 freed_pages << PAGE_SHIFT, pinned);
5355 if (unbound || bound) 5382 if (unbound || bound)
5356 pr_err("%lu and %lu bytes still available in the " 5383 pr_err("%lu and %lu bytes still available in the "
5357 "bound and unbound GPU page lists.\n", 5384 "bound and unbound GPU page lists.\n",
5358 bound, unbound); 5385 bound, unbound);
5359 5386
5360 *(unsigned long *)ptr += freed; 5387 *(unsigned long *)ptr += freed_pages;
5361 return NOTIFY_DONE; 5388 return NOTIFY_DONE;
5362} 5389}
5363 5390
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a5221d8f1580..d17ff435f276 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -88,6 +88,7 @@
88#include <drm/drmP.h> 88#include <drm/drmP.h>
89#include <drm/i915_drm.h> 89#include <drm/i915_drm.h>
90#include "i915_drv.h" 90#include "i915_drv.h"
91#include "i915_trace.h"
91 92
92/* This is a HW constraint. The value below is the largest known requirement 93/* This is a HW constraint. The value below is the largest known requirement
93 * I've seen in a spec to date, and that was a workaround for a non-shipping 94 * I've seen in a spec to date, and that was a workaround for a non-shipping
@@ -137,6 +138,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
137 struct intel_context *ctx = container_of(ctx_ref, 138 struct intel_context *ctx = container_of(ctx_ref,
138 typeof(*ctx), ref); 139 typeof(*ctx), ref);
139 140
141 trace_i915_context_free(ctx);
142
140 if (i915.enable_execlists) 143 if (i915.enable_execlists)
141 intel_lr_context_free(ctx); 144 intel_lr_context_free(ctx);
142 145
@@ -274,6 +277,8 @@ i915_gem_create_context(struct drm_device *dev,
274 ctx->ppgtt = ppgtt; 277 ctx->ppgtt = ppgtt;
275 } 278 }
276 279
280 trace_i915_context_create(ctx);
281
277 return ctx; 282 return ctx;
278 283
279err_unpin: 284err_unpin:
@@ -522,6 +527,7 @@ static int do_switch(struct intel_engine_cs *ring,
522 struct intel_context *from = ring->last_context; 527 struct intel_context *from = ring->last_context;
523 u32 hw_flags = 0; 528 u32 hw_flags = 0;
524 bool uninitialized = false; 529 bool uninitialized = false;
530 struct i915_vma *vma;
525 int ret, i; 531 int ret, i;
526 532
527 if (from != NULL && ring == &dev_priv->ring[RCS]) { 533 if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -548,6 +554,7 @@ static int do_switch(struct intel_engine_cs *ring,
548 from = ring->last_context; 554 from = ring->last_context;
549 555
550 if (to->ppgtt) { 556 if (to->ppgtt) {
557 trace_switch_mm(ring, to);
551 ret = to->ppgtt->switch_mm(to->ppgtt, ring); 558 ret = to->ppgtt->switch_mm(to->ppgtt, ring);
552 if (ret) 559 if (ret)
553 goto unpin_out; 560 goto unpin_out;
@@ -571,11 +578,10 @@ static int do_switch(struct intel_engine_cs *ring,
571 if (ret) 578 if (ret)
572 goto unpin_out; 579 goto unpin_out;
573 580
574 if (!to->legacy_hw_ctx.rcs_state->has_global_gtt_mapping) { 581 vma = i915_gem_obj_to_ggtt(to->legacy_hw_ctx.rcs_state);
575 struct i915_vma *vma = i915_gem_obj_to_vma(to->legacy_hw_ctx.rcs_state, 582 if (!(vma->bound & GLOBAL_BIND))
576 &dev_priv->gtt.base); 583 vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level,
577 vma->bind_vma(vma, to->legacy_hw_ctx.rcs_state->cache_level, GLOBAL_BIND); 584 GLOBAL_BIND);
578 }
579 585
580 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) 586 if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
581 hw_flags |= MI_RESTORE_INHIBIT; 587 hw_flags |= MI_RESTORE_INHIBIT;
@@ -629,7 +635,7 @@ done:
629 635
630 if (uninitialized) { 636 if (uninitialized) {
631 if (ring->init_context) { 637 if (ring->init_context) {
632 ret = ring->init_context(ring); 638 ret = ring->init_context(ring, to);
633 if (ret) 639 if (ret)
634 DRM_ERROR("ring init context: %d\n", ret); 640 DRM_ERROR("ring init context: %d\n", ret);
635 } 641 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 1a0611bb576b..f06027ba3ee5 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,6 +121,9 @@ eb_lookup_vmas(struct eb_vmas *eb,
121 goto err; 121 goto err;
122 } 122 }
123 123
124 WARN_ONCE(obj->base.dumb,
125 "GPU use of dumb buffer is illegal.\n");
126
124 drm_gem_object_reference(&obj->base); 127 drm_gem_object_reference(&obj->base);
125 list_add_tail(&obj->obj_exec_link, &objects); 128 list_add_tail(&obj->obj_exec_link, &objects);
126 } 129 }
@@ -357,12 +360,9 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
357 * through the ppgtt for non_secure batchbuffers. */ 360 * through the ppgtt for non_secure batchbuffers. */
358 if (unlikely(IS_GEN6(dev) && 361 if (unlikely(IS_GEN6(dev) &&
359 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && 362 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
360 !target_i915_obj->has_global_gtt_mapping)) { 363 !(target_vma->bound & GLOBAL_BIND)))
361 struct i915_vma *vma = 364 target_vma->bind_vma(target_vma, target_i915_obj->cache_level,
362 list_first_entry(&target_i915_obj->vma_list, 365 GLOBAL_BIND);
363 typeof(*vma), vma_link);
364 vma->bind_vma(vma, target_i915_obj->cache_level, GLOBAL_BIND);
365 }
366 366
367 /* Validate that the target is in a valid r/w GPU domain */ 367 /* Validate that the target is in a valid r/w GPU domain */
368 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) { 368 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
@@ -531,7 +531,7 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
531 531
532 flags = 0; 532 flags = 0;
533 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) 533 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
534 flags |= PIN_MAPPABLE; 534 flags |= PIN_GLOBAL | PIN_MAPPABLE;
535 if (entry->flags & EXEC_OBJECT_NEEDS_GTT) 535 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
536 flags |= PIN_GLOBAL; 536 flags |= PIN_GLOBAL;
537 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) 537 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
@@ -1023,6 +1023,47 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
1023 return 0; 1023 return 0;
1024} 1024}
1025 1025
1026static int
1027i915_emit_box(struct intel_engine_cs *ring,
1028 struct drm_clip_rect *box,
1029 int DR1, int DR4)
1030{
1031 int ret;
1032
1033 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
1034 box->y2 <= 0 || box->x2 <= 0) {
1035 DRM_ERROR("Bad box %d,%d..%d,%d\n",
1036 box->x1, box->y1, box->x2, box->y2);
1037 return -EINVAL;
1038 }
1039
1040 if (INTEL_INFO(ring->dev)->gen >= 4) {
1041 ret = intel_ring_begin(ring, 4);
1042 if (ret)
1043 return ret;
1044
1045 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965);
1046 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1047 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1048 intel_ring_emit(ring, DR4);
1049 } else {
1050 ret = intel_ring_begin(ring, 6);
1051 if (ret)
1052 return ret;
1053
1054 intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO);
1055 intel_ring_emit(ring, DR1);
1056 intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16);
1057 intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16);
1058 intel_ring_emit(ring, DR4);
1059 intel_ring_emit(ring, 0);
1060 }
1061 intel_ring_advance(ring);
1062
1063 return 0;
1064}
1065
1066
1026int 1067int
1027i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file, 1068i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1028 struct intel_engine_cs *ring, 1069 struct intel_engine_cs *ring,
@@ -1151,7 +1192,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
1151 exec_len = args->batch_len; 1192 exec_len = args->batch_len;
1152 if (cliprects) { 1193 if (cliprects) {
1153 for (i = 0; i < args->num_cliprects; i++) { 1194 for (i = 0; i < args->num_cliprects; i++) {
1154 ret = i915_emit_box(dev, &cliprects[i], 1195 ret = i915_emit_box(ring, &cliprects[i],
1155 args->DR1, args->DR4); 1196 args->DR1, args->DR4);
1156 if (ret) 1197 if (ret)
1157 goto error; 1198 goto error;
@@ -1300,12 +1341,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1300 if (ret) 1341 if (ret)
1301 goto pre_mutex_err; 1342 goto pre_mutex_err;
1302 1343
1303 if (dev_priv->ums.mm_suspended) {
1304 mutex_unlock(&dev->struct_mutex);
1305 ret = -EBUSY;
1306 goto pre_mutex_err;
1307 }
1308
1309 ctx = i915_gem_validate_context(dev, file, ring, ctx_id); 1344 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1310 if (IS_ERR(ctx)) { 1345 if (IS_ERR(ctx)) {
1311 mutex_unlock(&dev->struct_mutex); 1346 mutex_unlock(&dev->struct_mutex);
@@ -1368,17 +1403,19 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1368 batch_obj, 1403 batch_obj,
1369 args->batch_start_offset, 1404 args->batch_start_offset,
1370 file->is_master); 1405 file->is_master);
1371 if (ret) 1406 if (ret) {
1372 goto err; 1407 if (ret != -EACCES)
1373 1408 goto err;
1374 /* 1409 } else {
1375 * XXX: Actually do this when enabling batch copy... 1410 /*
1376 * 1411 * XXX: Actually do this when enabling batch copy...
1377 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit 1412 *
1378 * from MI_BATCH_BUFFER_START commands issued in the 1413 * Set the DISPATCH_SECURE bit to remove the NON_SECURE bit
1379 * dispatch_execbuffer implementations. We specifically don't 1414 * from MI_BATCH_BUFFER_START commands issued in the
1380 * want that set when the command parser is enabled. 1415 * dispatch_execbuffer implementations. We specifically don't
1381 */ 1416 * want that set when the command parser is enabled.
1417 */
1418 }
1382 } 1419 }
1383 1420
1384 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure 1421 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 728938f02341..171f6eafdeee 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -35,13 +35,26 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
35 35
36static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt) 36static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
37{ 37{
38 if (enable_ppgtt == 0 || !HAS_ALIASING_PPGTT(dev)) 38 bool has_aliasing_ppgtt;
39 bool has_full_ppgtt;
40
41 has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
42 has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
43 if (IS_GEN8(dev))
44 has_full_ppgtt = false; /* XXX why? */
45
46 /*
47 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
48 * execlists, the sole mechanism available to submit work.
49 */
50 if (INTEL_INFO(dev)->gen < 9 &&
51 (enable_ppgtt == 0 || !has_aliasing_ppgtt))
39 return 0; 52 return 0;
40 53
41 if (enable_ppgtt == 1) 54 if (enable_ppgtt == 1)
42 return 1; 55 return 1;
43 56
44 if (enable_ppgtt == 2 && HAS_PPGTT(dev)) 57 if (enable_ppgtt == 2 && has_full_ppgtt)
45 return 2; 58 return 2;
46 59
47#ifdef CONFIG_INTEL_IOMMU 60#ifdef CONFIG_INTEL_IOMMU
@@ -59,7 +72,7 @@ static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
59 return 0; 72 return 0;
60 } 73 }
61 74
62 return HAS_ALIASING_PPGTT(dev) ? 1 : 0; 75 return has_aliasing_ppgtt ? 1 : 0;
63} 76}
64 77
65 78
@@ -156,9 +169,6 @@ static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
156 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0; 169 gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
157 pte |= GEN6_PTE_ADDR_ENCODE(addr); 170 pte |= GEN6_PTE_ADDR_ENCODE(addr);
158 171
159 /* Mark the page as writeable. Other platforms don't have a
160 * setting for read-only/writable, so this matches that behavior.
161 */
162 if (!(flags & PTE_READ_ONLY)) 172 if (!(flags & PTE_READ_ONLY))
163 pte |= BYT_PTE_WRITEABLE; 173 pte |= BYT_PTE_WRITEABLE;
164 174
@@ -1092,7 +1102,7 @@ static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1092 1102
1093 if (INTEL_INFO(dev)->gen < 8) 1103 if (INTEL_INFO(dev)->gen < 8)
1094 return gen6_ppgtt_init(ppgtt); 1104 return gen6_ppgtt_init(ppgtt);
1095 else if (IS_GEN8(dev)) 1105 else if (IS_GEN8(dev) || IS_GEN9(dev))
1096 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total); 1106 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1097 else 1107 else
1098 BUG(); 1108 BUG();
@@ -1166,6 +1176,8 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
1166 1176
1167 ppgtt->file_priv = fpriv; 1177 ppgtt->file_priv = fpriv;
1168 1178
1179 trace_i915_ppgtt_create(&ppgtt->base);
1180
1169 return ppgtt; 1181 return ppgtt;
1170} 1182}
1171 1183
@@ -1174,6 +1186,8 @@ void i915_ppgtt_release(struct kref *kref)
1174 struct i915_hw_ppgtt *ppgtt = 1186 struct i915_hw_ppgtt *ppgtt =
1175 container_of(kref, struct i915_hw_ppgtt, ref); 1187 container_of(kref, struct i915_hw_ppgtt, ref);
1176 1188
1189 trace_i915_ppgtt_release(&ppgtt->base);
1190
1177 /* vmas should already be unbound */ 1191 /* vmas should already be unbound */
1178 WARN_ON(!list_empty(&ppgtt->base.active_list)); 1192 WARN_ON(!list_empty(&ppgtt->base.active_list));
1179 WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 1193 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
@@ -1258,7 +1272,7 @@ void i915_check_and_clear_faults(struct drm_device *dev)
1258 fault_reg = I915_READ(RING_FAULT_REG(ring)); 1272 fault_reg = I915_READ(RING_FAULT_REG(ring));
1259 if (fault_reg & RING_FAULT_VALID) { 1273 if (fault_reg & RING_FAULT_VALID) {
1260 DRM_DEBUG_DRIVER("Unexpected fault\n" 1274 DRM_DEBUG_DRIVER("Unexpected fault\n"
1261 "\tAddr: 0x%08lx\\n" 1275 "\tAddr: 0x%08lx\n"
1262 "\tAddress space: %s\n" 1276 "\tAddress space: %s\n"
1263 "\tSource ID: %d\n" 1277 "\tSource ID: %d\n"
1264 "\tType: %d\n", 1278 "\tType: %d\n",
@@ -1328,7 +1342,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1328 * Unfortunately above, we've just wiped out the mappings 1342 * Unfortunately above, we've just wiped out the mappings
1329 * without telling our object about it. So we need to fake it. 1343 * without telling our object about it. So we need to fake it.
1330 */ 1344 */
1331 obj->has_global_gtt_mapping = 0; 1345 vma->bound &= ~GLOBAL_BIND;
1332 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND); 1346 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1333 } 1347 }
1334 1348
@@ -1525,7 +1539,7 @@ static void i915_ggtt_bind_vma(struct i915_vma *vma,
1525 1539
1526 BUG_ON(!i915_is_ggtt(vma->vm)); 1540 BUG_ON(!i915_is_ggtt(vma->vm));
1527 intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags); 1541 intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1528 vma->obj->has_global_gtt_mapping = 1; 1542 vma->bound = GLOBAL_BIND;
1529} 1543}
1530 1544
1531static void i915_ggtt_clear_range(struct i915_address_space *vm, 1545static void i915_ggtt_clear_range(struct i915_address_space *vm,
@@ -1544,7 +1558,7 @@ static void i915_ggtt_unbind_vma(struct i915_vma *vma)
1544 const unsigned int size = vma->obj->base.size >> PAGE_SHIFT; 1558 const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1545 1559
1546 BUG_ON(!i915_is_ggtt(vma->vm)); 1560 BUG_ON(!i915_is_ggtt(vma->vm));
1547 vma->obj->has_global_gtt_mapping = 0; 1561 vma->bound = 0;
1548 intel_gtt_clear_range(first, size); 1562 intel_gtt_clear_range(first, size);
1549} 1563}
1550 1564
@@ -1572,24 +1586,24 @@ static void ggtt_bind_vma(struct i915_vma *vma,
1572 * flags. At all other times, the GPU will use the aliasing PPGTT. 1586 * flags. At all other times, the GPU will use the aliasing PPGTT.
1573 */ 1587 */
1574 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) { 1588 if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1575 if (!obj->has_global_gtt_mapping || 1589 if (!(vma->bound & GLOBAL_BIND) ||
1576 (cache_level != obj->cache_level)) { 1590 (cache_level != obj->cache_level)) {
1577 vma->vm->insert_entries(vma->vm, obj->pages, 1591 vma->vm->insert_entries(vma->vm, obj->pages,
1578 vma->node.start, 1592 vma->node.start,
1579 cache_level, flags); 1593 cache_level, flags);
1580 obj->has_global_gtt_mapping = 1; 1594 vma->bound |= GLOBAL_BIND;
1581 } 1595 }
1582 } 1596 }
1583 1597
1584 if (dev_priv->mm.aliasing_ppgtt && 1598 if (dev_priv->mm.aliasing_ppgtt &&
1585 (!obj->has_aliasing_ppgtt_mapping || 1599 (!(vma->bound & LOCAL_BIND) ||
1586 (cache_level != obj->cache_level))) { 1600 (cache_level != obj->cache_level))) {
1587 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1601 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1588 appgtt->base.insert_entries(&appgtt->base, 1602 appgtt->base.insert_entries(&appgtt->base,
1589 vma->obj->pages, 1603 vma->obj->pages,
1590 vma->node.start, 1604 vma->node.start,
1591 cache_level, flags); 1605 cache_level, flags);
1592 vma->obj->has_aliasing_ppgtt_mapping = 1; 1606 vma->bound |= LOCAL_BIND;
1593 } 1607 }
1594} 1608}
1595 1609
@@ -1599,21 +1613,21 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
1599 struct drm_i915_private *dev_priv = dev->dev_private; 1613 struct drm_i915_private *dev_priv = dev->dev_private;
1600 struct drm_i915_gem_object *obj = vma->obj; 1614 struct drm_i915_gem_object *obj = vma->obj;
1601 1615
1602 if (obj->has_global_gtt_mapping) { 1616 if (vma->bound & GLOBAL_BIND) {
1603 vma->vm->clear_range(vma->vm, 1617 vma->vm->clear_range(vma->vm,
1604 vma->node.start, 1618 vma->node.start,
1605 obj->base.size, 1619 obj->base.size,
1606 true); 1620 true);
1607 obj->has_global_gtt_mapping = 0; 1621 vma->bound &= ~GLOBAL_BIND;
1608 } 1622 }
1609 1623
1610 if (obj->has_aliasing_ppgtt_mapping) { 1624 if (vma->bound & LOCAL_BIND) {
1611 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt; 1625 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1612 appgtt->base.clear_range(&appgtt->base, 1626 appgtt->base.clear_range(&appgtt->base,
1613 vma->node.start, 1627 vma->node.start,
1614 obj->base.size, 1628 obj->base.size,
1615 true); 1629 true);
1616 obj->has_aliasing_ppgtt_mapping = 0; 1630 vma->bound &= ~LOCAL_BIND;
1617 } 1631 }
1618} 1632}
1619 1633
@@ -1650,10 +1664,10 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
1650 } 1664 }
1651} 1665}
1652 1666
1653int i915_gem_setup_global_gtt(struct drm_device *dev, 1667static int i915_gem_setup_global_gtt(struct drm_device *dev,
1654 unsigned long start, 1668 unsigned long start,
1655 unsigned long mappable_end, 1669 unsigned long mappable_end,
1656 unsigned long end) 1670 unsigned long end)
1657{ 1671{
1658 /* Let GEM Manage all of the aperture. 1672 /* Let GEM Manage all of the aperture.
1659 * 1673 *
@@ -1691,7 +1705,7 @@ int i915_gem_setup_global_gtt(struct drm_device *dev,
1691 DRM_DEBUG_KMS("Reservation failed: %i\n", ret); 1705 DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
1692 return ret; 1706 return ret;
1693 } 1707 }
1694 obj->has_global_gtt_mapping = 1; 1708 vma->bound |= GLOBAL_BIND;
1695 } 1709 }
1696 1710
1697 dev_priv->gtt.base.start = start; 1711 dev_priv->gtt.base.start = start;
@@ -1764,7 +1778,6 @@ static int setup_scratch_page(struct drm_device *dev)
1764 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); 1778 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1765 if (page == NULL) 1779 if (page == NULL)
1766 return -ENOMEM; 1780 return -ENOMEM;
1767 get_page(page);
1768 set_pages_uc(page, 1); 1781 set_pages_uc(page, 1);
1769 1782
1770#ifdef CONFIG_INTEL_IOMMU 1783#ifdef CONFIG_INTEL_IOMMU
@@ -1789,7 +1802,6 @@ static void teardown_scratch_page(struct drm_device *dev)
1789 set_pages_wb(page, 1); 1802 set_pages_wb(page, 1);
1790 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr, 1803 pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1791 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 1804 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1792 put_page(page);
1793 __free_page(page); 1805 __free_page(page);
1794} 1806}
1795 1807
@@ -1859,6 +1871,18 @@ static size_t chv_get_stolen_size(u16 gmch_ctrl)
1859 return (gmch_ctrl - 0x17 + 9) << 22; 1871 return (gmch_ctrl - 0x17 + 9) << 22;
1860} 1872}
1861 1873
1874static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
1875{
1876 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1877 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
1878
1879 if (gen9_gmch_ctl < 0xf0)
1880 return gen9_gmch_ctl << 25; /* 32 MB units */
1881 else
1882 /* 4MB increments starting at 0xf0 for 4MB */
1883 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
1884}
1885
1862static int ggtt_probe_common(struct drm_device *dev, 1886static int ggtt_probe_common(struct drm_device *dev,
1863 size_t gtt_size) 1887 size_t gtt_size)
1864{ 1888{
@@ -1934,9 +1958,17 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
1934 * Only the snoop bit has meaning for CHV, the rest is 1958 * Only the snoop bit has meaning for CHV, the rest is
1935 * ignored. 1959 * ignored.
1936 * 1960 *
1937 * Note that the harware enforces snooping for all page 1961 * The hardware will never snoop for certain types of accesses:
1938 * table accesses. The snoop bit is actually ignored for 1962 * - CPU GTT (GMADR->GGTT->no snoop->memory)
1939 * PDEs. 1963 * - PPGTT page tables
1964 * - some other special cycles
1965 *
1966 * As with BDW, we also need to consider the following for GT accesses:
1967 * "For GGTT, there is NO pat_sel[2:0] from the entry,
1968 * so RTL will always use the value corresponding to
1969 * pat_sel = 000".
1970 * Which means we must set the snoop bit in PAT entry 0
1971 * in order to keep the global status page working.
1940 */ 1972 */
1941 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | 1973 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1942 GEN8_PPAT(1, 0) | 1974 GEN8_PPAT(1, 0) |
@@ -1971,7 +2003,10 @@ static int gen8_gmch_probe(struct drm_device *dev,
1971 2003
1972 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 2004 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1973 2005
1974 if (IS_CHERRYVIEW(dev)) { 2006 if (INTEL_INFO(dev)->gen >= 9) {
2007 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
2008 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
2009 } else if (IS_CHERRYVIEW(dev)) {
1975 *stolen = chv_get_stolen_size(snb_gmch_ctl); 2010 *stolen = chv_get_stolen_size(snb_gmch_ctl);
1976 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl); 2011 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1977 } else { 2012 } else {
@@ -2143,6 +2178,7 @@ static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2143 vma->obj = obj; 2178 vma->obj = obj;
2144 2179
2145 switch (INTEL_INFO(vm->dev)->gen) { 2180 switch (INTEL_INFO(vm->dev)->gen) {
2181 case 9:
2146 case 8: 2182 case 8:
2147 case 7: 2183 case 7:
2148 case 6: 2184 case 6:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d5c14af51e99..beaf4bcfdac8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -123,6 +123,12 @@ struct i915_vma {
123 struct drm_i915_gem_object *obj; 123 struct drm_i915_gem_object *obj;
124 struct i915_address_space *vm; 124 struct i915_address_space *vm;
125 125
126 /** Flags and address space this VMA is bound to */
127#define GLOBAL_BIND (1<<0)
128#define LOCAL_BIND (1<<1)
129#define PTE_READ_ONLY (1<<2)
130 unsigned int bound : 4;
131
126 /** This object's place on the active/inactive lists */ 132 /** This object's place on the active/inactive lists */
127 struct list_head mm_list; 133 struct list_head mm_list;
128 134
@@ -155,8 +161,6 @@ struct i915_vma {
155 * setting the valid PTE entries to a reserved scratch page. */ 161 * setting the valid PTE entries to a reserved scratch page. */
156 void (*unbind_vma)(struct i915_vma *vma); 162 void (*unbind_vma)(struct i915_vma *vma);
157 /* Map an object into an address space with the given cache flags. */ 163 /* Map an object into an address space with the given cache flags. */
158#define GLOBAL_BIND (1<<0)
159#define PTE_READ_ONLY (1<<1)
160 void (*bind_vma)(struct i915_vma *vma, 164 void (*bind_vma)(struct i915_vma *vma,
161 enum i915_cache_level cache_level, 165 enum i915_cache_level cache_level,
162 u32 flags); 166 u32 flags);
@@ -270,8 +274,6 @@ struct i915_hw_ppgtt {
270 274
271int i915_gem_gtt_init(struct drm_device *dev); 275int i915_gem_gtt_init(struct drm_device *dev);
272void i915_gem_init_global_gtt(struct drm_device *dev); 276void i915_gem_init_global_gtt(struct drm_device *dev);
273int i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
274 unsigned long mappable_end, unsigned long end);
275void i915_global_gtt_cleanup(struct drm_device *dev); 277void i915_global_gtt_cleanup(struct drm_device *dev);
276 278
277 279
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a9a62d75aa57..98dcd94acba8 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -38,6 +38,8 @@ render_state_get_rodata(struct drm_device *dev, const int gen)
38 return &gen7_null_state; 38 return &gen7_null_state;
39 case 8: 39 case 8:
40 return &gen8_null_state; 40 return &gen8_null_state;
41 case 9:
42 return &gen9_null_state;
41 } 43 }
42 44
43 return NULL; 45 return NULL;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 85fda6b803e4..a2045848bd1a 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -137,7 +137,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
137 r = devm_request_mem_region(dev->dev, base + 1, 137 r = devm_request_mem_region(dev->dev, base + 1,
138 dev_priv->gtt.stolen_size - 1, 138 dev_priv->gtt.stolen_size - 1,
139 "Graphics Stolen Memory"); 139 "Graphics Stolen Memory");
140 if (r == NULL) { 140 /*
141 * GEN3 firmware likes to smash pci bridges into the stolen
142 * range. Apparently this works.
143 */
144 if (r == NULL && !IS_GEN3(dev)) {
141 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", 145 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
142 base, base + (uint32_t)dev_priv->gtt.stolen_size); 146 base, base + (uint32_t)dev_priv->gtt.stolen_size);
143 base = 0; 147 base = 0;
@@ -533,7 +537,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
533 } 537 }
534 } 538 }
535 539
536 obj->has_global_gtt_mapping = 1; 540 vma->bound |= GLOBAL_BIND;
537 541
538 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 542 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
539 list_add_tail(&vma->mm_list, &ggtt->inactive_list); 543 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2b1eaa29ada4..4727a4e2c87c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -102,22 +102,33 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
102 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 102 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
103 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 103 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
104 } else if (INTEL_INFO(dev)->gen >= 6) { 104 } else if (INTEL_INFO(dev)->gen >= 6) {
105 uint32_t dimm_c0, dimm_c1; 105 if (dev_priv->preserve_bios_swizzle) {
106 dimm_c0 = I915_READ(MAD_DIMM_C0); 106 if (I915_READ(DISP_ARB_CTL) &
107 dimm_c1 = I915_READ(MAD_DIMM_C1); 107 DISP_TILE_SURFACE_SWIZZLING) {
108 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 108 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
109 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK; 109 swizzle_y = I915_BIT_6_SWIZZLE_9;
110 /* Enable swizzling when the channels are populated with 110 } else {
111 * identically sized dimms. We don't need to check the 3rd 111 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
112 * channel because no cpu with gpu attached ships in that 112 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
113 * configuration. Also, swizzling only makes sense for 2 113 }
114 * channels anyway. */
115 if (dimm_c0 == dimm_c1) {
116 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
117 swizzle_y = I915_BIT_6_SWIZZLE_9;
118 } else { 114 } else {
119 swizzle_x = I915_BIT_6_SWIZZLE_NONE; 115 uint32_t dimm_c0, dimm_c1;
120 swizzle_y = I915_BIT_6_SWIZZLE_NONE; 116 dimm_c0 = I915_READ(MAD_DIMM_C0);
117 dimm_c1 = I915_READ(MAD_DIMM_C1);
118 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
119 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
120 /* Enable swizzling when the channels are populated
121 * with identically sized dimms. We don't need to check
122 * the 3rd channel because no cpu with gpu attached
123 * ships in that configuration. Also, swizzling only
124 * makes sense for 2 channels anyway. */
125 if (dimm_c0 == dimm_c1) {
126 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
127 swizzle_y = I915_BIT_6_SWIZZLE_9;
128 } else {
129 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
130 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
131 }
121 } 132 }
122 } else if (IS_GEN5(dev)) { 133 } else if (IS_GEN5(dev)) {
123 /* On Ironlake whatever DRAM config, GPU always do 134 /* On Ironlake whatever DRAM config, GPU always do
@@ -167,6 +178,15 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
167 } 178 }
168 break; 179 break;
169 } 180 }
181
182 /* check for L-shaped memory aka modified enhanced addressing */
183 if (IS_GEN4(dev)) {
184 uint32_t ddc2 = I915_READ(DCC2);
185
186 if (!(ddc2 & DCC2_MODIFIED_ENHANCED_DISABLE))
187 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
188 }
189
170 if (dcc == 0xffffffff) { 190 if (dcc == 0xffffffff) {
171 DRM_ERROR("Couldn't read from MCHBAR. " 191 DRM_ERROR("Couldn't read from MCHBAR. "
172 "Disabling tiling.\n"); 192 "Disabling tiling.\n");
@@ -369,6 +389,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
369 ret = i915_gem_object_ggtt_unbind(obj); 389 ret = i915_gem_object_ggtt_unbind(obj);
370 390
371 if (ret == 0) { 391 if (ret == 0) {
392 if (obj->pages &&
393 obj->madv == I915_MADV_WILLNEED &&
394 dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
395 if (args->tiling_mode == I915_TILING_NONE)
396 i915_gem_object_unpin_pages(obj);
397 if (obj->tiling_mode == I915_TILING_NONE)
398 i915_gem_object_pin_pages(obj);
399 }
400
372 obj->fence_dirty = 401 obj->fence_dirty =
373 obj->last_fenced_seqno || 402 obj->last_fenced_seqno ||
374 obj->fence_reg != I915_FENCE_REG_NONE; 403 obj->fence_reg != I915_FENCE_REG_NONE;
@@ -434,6 +463,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
434 } 463 }
435 464
436 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 465 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
466 args->phys_swizzle_mode = args->swizzle_mode;
437 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 467 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
438 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 468 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
439 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 469 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 2c87a797213f..cdaee6ce05f8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -242,11 +242,15 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
242 242
243static void i915_ring_error_state(struct drm_i915_error_state_buf *m, 243static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
244 struct drm_device *dev, 244 struct drm_device *dev,
245 struct drm_i915_error_ring *ring) 245 struct drm_i915_error_state *error,
246 int ring_idx)
246{ 247{
248 struct drm_i915_error_ring *ring = &error->ring[ring_idx];
249
247 if (!ring->valid) 250 if (!ring->valid)
248 return; 251 return;
249 252
253 err_printf(m, "%s command stream:\n", ring_str(ring_idx));
250 err_printf(m, " HEAD: 0x%08x\n", ring->head); 254 err_printf(m, " HEAD: 0x%08x\n", ring->head);
251 err_printf(m, " TAIL: 0x%08x\n", ring->tail); 255 err_printf(m, " TAIL: 0x%08x\n", ring->tail);
252 err_printf(m, " CTL: 0x%08x\n", ring->ctl); 256 err_printf(m, " CTL: 0x%08x\n", ring->ctl);
@@ -388,10 +392,8 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
388 if (INTEL_INFO(dev)->gen == 7) 392 if (INTEL_INFO(dev)->gen == 7)
389 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); 393 err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
390 394
391 for (i = 0; i < ARRAY_SIZE(error->ring); i++) { 395 for (i = 0; i < ARRAY_SIZE(error->ring); i++)
392 err_printf(m, "%s command stream:\n", ring_str(i)); 396 i915_ring_error_state(m, dev, error, i);
393 i915_ring_error_state(m, dev, &error->ring[i]);
394 }
395 397
396 for (i = 0; i < error->vm_count; i++) { 398 for (i = 0; i < error->vm_count; i++) {
397 err_printf(m, "vm[%d]\n", i); 399 err_printf(m, "vm[%d]\n", i);
@@ -565,6 +567,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
565 struct i915_address_space *vm) 567 struct i915_address_space *vm)
566{ 568{
567 struct drm_i915_error_object *dst; 569 struct drm_i915_error_object *dst;
570 struct i915_vma *vma = NULL;
568 int num_pages; 571 int num_pages;
569 bool use_ggtt; 572 bool use_ggtt;
570 int i = 0; 573 int i = 0;
@@ -585,16 +588,17 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
585 dst->gtt_offset = -1; 588 dst->gtt_offset = -1;
586 589
587 reloc_offset = dst->gtt_offset; 590 reloc_offset = dst->gtt_offset;
591 if (i915_is_ggtt(vm))
592 vma = i915_gem_obj_to_ggtt(src);
588 use_ggtt = (src->cache_level == I915_CACHE_NONE && 593 use_ggtt = (src->cache_level == I915_CACHE_NONE &&
589 i915_is_ggtt(vm) && 594 vma && (vma->bound & GLOBAL_BIND) &&
590 src->has_global_gtt_mapping && 595 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
591 reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
592 596
593 /* Cannot access stolen address directly, try to use the aperture */ 597 /* Cannot access stolen address directly, try to use the aperture */
594 if (src->stolen) { 598 if (src->stolen) {
595 use_ggtt = true; 599 use_ggtt = true;
596 600
597 if (!src->has_global_gtt_mapping) 601 if (!(vma && vma->bound & GLOBAL_BIND))
598 goto unwind; 602 goto unwind;
599 603
600 reloc_offset = i915_gem_obj_ggtt_offset(src); 604 reloc_offset = i915_gem_obj_ggtt_offset(src);
@@ -765,6 +769,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
765 769
766 /* Fences */ 770 /* Fences */
767 switch (INTEL_INFO(dev)->gen) { 771 switch (INTEL_INFO(dev)->gen) {
772 case 9:
768 case 8: 773 case 8:
769 case 7: 774 case 7:
770 case 6: 775 case 6:
@@ -804,9 +809,8 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
804 809
805 if (!error->semaphore_obj) 810 if (!error->semaphore_obj)
806 error->semaphore_obj = 811 error->semaphore_obj =
807 i915_error_object_create(dev_priv, 812 i915_error_ggtt_object_create(dev_priv,
808 dev_priv->semaphore_obj, 813 dev_priv->semaphore_obj);
809 &dev_priv->gtt.base);
810 814
811 for_each_ring(to, dev_priv, i) { 815 for_each_ring(to, dev_priv, i) {
812 int idx; 816 int idx;
@@ -923,6 +927,7 @@ static void i915_record_ring_state(struct drm_device *dev,
923 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring)); 927 ering->vm_info.gfx_mode = I915_READ(RING_MODE_GEN7(ring));
924 928
925 switch (INTEL_INFO(dev)->gen) { 929 switch (INTEL_INFO(dev)->gen) {
930 case 9:
926 case 8: 931 case 8:
927 for (i = 0; i < 4; i++) { 932 for (i = 0; i < 4; i++) {
928 ering->vm_info.pdp[i] = 933 ering->vm_info.pdp[i] =
@@ -1238,7 +1243,8 @@ static void i915_error_capture_msg(struct drm_device *dev,
1238 ecode = i915_error_generate_code(dev_priv, error, &ring_id); 1243 ecode = i915_error_generate_code(dev_priv, error, &ring_id);
1239 1244
1240 len = scnprintf(error->error_msg, sizeof(error->error_msg), 1245 len = scnprintf(error->error_msg, sizeof(error->error_msg),
1241 "GPU HANG: ecode %d:0x%08x", ring_id, ecode); 1246 "GPU HANG: ecode %d:%d:0x%08x",
1247 INTEL_INFO(dev)->gen, ring_id, ecode);
1242 1248
1243 if (ring_id != -1 && error->ring[ring_id].pid != -1) 1249 if (ring_id != -1 && error->ring[ring_id].pid != -1)
1244 len += scnprintf(error->error_msg + len, 1250 len += scnprintf(error->error_msg + len,
@@ -1326,13 +1332,12 @@ void i915_error_state_get(struct drm_device *dev,
1326 struct i915_error_state_file_priv *error_priv) 1332 struct i915_error_state_file_priv *error_priv)
1327{ 1333{
1328 struct drm_i915_private *dev_priv = dev->dev_private; 1334 struct drm_i915_private *dev_priv = dev->dev_private;
1329 unsigned long flags;
1330 1335
1331 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1336 spin_lock_irq(&dev_priv->gpu_error.lock);
1332 error_priv->error = dev_priv->gpu_error.first_error; 1337 error_priv->error = dev_priv->gpu_error.first_error;
1333 if (error_priv->error) 1338 if (error_priv->error)
1334 kref_get(&error_priv->error->ref); 1339 kref_get(&error_priv->error->ref);
1335 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1340 spin_unlock_irq(&dev_priv->gpu_error.lock);
1336 1341
1337} 1342}
1338 1343
@@ -1346,12 +1351,11 @@ void i915_destroy_error_state(struct drm_device *dev)
1346{ 1351{
1347 struct drm_i915_private *dev_priv = dev->dev_private; 1352 struct drm_i915_private *dev_priv = dev->dev_private;
1348 struct drm_i915_error_state *error; 1353 struct drm_i915_error_state *error;
1349 unsigned long flags;
1350 1354
1351 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1355 spin_lock_irq(&dev_priv->gpu_error.lock);
1352 error = dev_priv->gpu_error.first_error; 1356 error = dev_priv->gpu_error.first_error;
1353 dev_priv->gpu_error.first_error = NULL; 1357 dev_priv->gpu_error.first_error = NULL;
1354 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1358 spin_unlock_irq(&dev_priv->gpu_error.lock);
1355 1359
1356 if (error) 1360 if (error)
1357 kref_put(&error->ref, i915_error_state_free); 1361 kref_put(&error->ref, i915_error_state_free);
@@ -1389,6 +1393,7 @@ void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone)
1389 WARN_ONCE(1, "Unsupported platform\n"); 1393 WARN_ONCE(1, "Unsupported platform\n");
1390 case 7: 1394 case 7:
1391 case 8: 1395 case 8:
1396 case 9:
1392 instdone[0] = I915_READ(GEN7_INSTDONE_1); 1397 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1393 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1398 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1394 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1399 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 2e0613e26251..176de6322e4d 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -189,7 +189,6 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
189 [DRM_I915_ALLOC] = compat_i915_alloc 189 [DRM_I915_ALLOC] = compat_i915_alloc
190}; 190};
191 191
192#ifdef CONFIG_COMPAT
193/** 192/**
194 * Called whenever a 32-bit process running under a 64-bit kernel 193 * Called whenever a 32-bit process running under a 64-bit kernel
195 * performs an ioctl on /dev/dri/card<n>. 194 * performs an ioctl on /dev/dri/card<n>.
@@ -218,4 +217,3 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
218 217
219 return ret; 218 return ret;
220} 219}
221#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f66392b6e287..981834b0f9b6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -37,6 +37,14 @@
37#include "i915_trace.h" 37#include "i915_trace.h"
38#include "intel_drv.h" 38#include "intel_drv.h"
39 39
40/**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
40static const u32 hpd_ibx[] = { 48static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG, 49 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, 50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
@@ -118,20 +126,22 @@ static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
118 126
119#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ 127#define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ 128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ 129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \ 130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
124} while (0) 132} while (0)
125 133
126#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ 134#define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ 135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \ 136 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \ 137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
131} while (0) 139} while (0)
132 140
141static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
142
133/* For display hotplug interrupt */ 143/* For display hotplug interrupt */
134static void 144void
135ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 145ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136{ 146{
137 assert_spin_locked(&dev_priv->irq_lock); 147 assert_spin_locked(&dev_priv->irq_lock);
@@ -146,7 +156,7 @@ ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146 } 156 }
147} 157}
148 158
149static void 159void
150ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) 160ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151{ 161{
152 assert_spin_locked(&dev_priv->irq_lock); 162 assert_spin_locked(&dev_priv->irq_lock);
@@ -192,71 +202,28 @@ void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
192 ilk_update_gt_irq(dev_priv, mask, 0); 202 ilk_update_gt_irq(dev_priv, mask, 0);
193} 203}
194 204
195/** 205static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204{
205 uint32_t new_val;
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return;
211
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
220 }
221}
222
223void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224{ 206{
225 snb_update_pm_irq(dev_priv, mask, mask); 207 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
226} 208}
227 209
228void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 210static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
229{ 211{
230 snb_update_pm_irq(dev_priv, mask, 0); 212 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
231} 213}
232 214
233static bool ivb_can_enable_err_int(struct drm_device *dev) 215static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
234{ 216{
235 struct drm_i915_private *dev_priv = dev->dev_private; 217 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
239 assert_spin_locked(&dev_priv->irq_lock);
240
241 for_each_pipe(dev_priv, pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249} 218}
250 219
251/** 220/**
252 * bdw_update_pm_irq - update GT interrupt 2 221 * snb_update_pm_irq - update GEN6_PMIMR
253 * @dev_priv: driver private 222 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update 223 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable 224 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */ 225 */
259static void bdw_update_pm_irq(struct drm_i915_private *dev_priv, 226static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask, 227 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask) 228 uint32_t enabled_irq_mask)
262{ 229{
@@ -264,144 +231,87 @@ static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
264 231
265 assert_spin_locked(&dev_priv->irq_lock); 232 assert_spin_locked(&dev_priv->irq_lock);
266 233
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 new_val = dev_priv->pm_irq_mask; 234 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask; 235 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask); 236 new_val |= (~enabled_irq_mask & interrupt_mask);
273 237
274 if (new_val != dev_priv->pm_irq_mask) { 238 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val; 239 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask); 240 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2)); 241 POSTING_READ(gen6_pm_imr(dev_priv));
278 } 242 }
279} 243}
280 244
281void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 245void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282{ 246{
283 bdw_update_pm_irq(dev_priv, mask, mask); 247 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
284} 248 return;
285 249
286void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) 250 snb_update_pm_irq(dev_priv, mask, mask);
287{
288 bdw_update_pm_irq(dev_priv, mask, 0);
289} 251}
290 252
291static bool cpt_can_enable_serr_int(struct drm_device *dev) 253static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
254 uint32_t mask)
292{ 255{
293 struct drm_i915_private *dev_priv = dev->dev_private; 256 snb_update_pm_irq(dev_priv, mask, 0);
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
297 assert_spin_locked(&dev_priv->irq_lock);
298
299 for_each_pipe(dev_priv, pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307} 257}
308 258
309void i9xx_check_fifo_underruns(struct drm_device *dev) 259void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
310{ 260{
311 struct drm_i915_private *dev_priv = dev->dev_private; 261 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
312 struct intel_crtc *crtc; 262 return;
313 unsigned long flags;
314
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
319 u32 pipestat;
320
321 if (crtc->cpu_fifo_underrun_disabled)
322 continue;
323
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
326 continue;
327
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
329 POSTING_READ(reg);
330
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 }
333 263
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 264 __gen6_disable_pm_irq(dev_priv, mask);
335} 265}
336 266
337static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev, 267void gen6_reset_rps_interrupts(struct drm_device *dev)
338 enum pipe pipe,
339 bool enable, bool old)
340{ 268{
341 struct drm_i915_private *dev_priv = dev->dev_private; 269 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe); 270 uint32_t reg = gen6_pm_iir(dev_priv);
343 u32 pipestat = I915_READ(reg) & 0xffff0000;
344
345 assert_spin_locked(&dev_priv->irq_lock);
346 271
347 if (enable) { 272 spin_lock_irq(&dev_priv->irq_lock);
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS); 273 I915_WRITE(reg, dev_priv->pm_rps_events);
349 POSTING_READ(reg); 274 I915_WRITE(reg, dev_priv->pm_rps_events);
350 } else { 275 POSTING_READ(reg);
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS) 276 spin_unlock_irq(&dev_priv->irq_lock);
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
353 }
354} 277}
355 278
356static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, 279void gen6_enable_rps_interrupts(struct drm_device *dev)
357 enum pipe pipe, bool enable)
358{ 280{
359 struct drm_i915_private *dev_priv = dev->dev_private; 281 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
362 282
363 if (enable) 283 spin_lock_irq(&dev_priv->irq_lock);
364 ironlake_enable_display_irq(dev_priv, bit); 284 WARN_ON(dev_priv->rps.pm_iir);
365 else 285 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
366 ironlake_disable_display_irq(dev_priv, bit); 286 dev_priv->rps.interrupts_enabled = true;
287 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
288 spin_unlock_irq(&dev_priv->irq_lock);
367} 289}
368 290
369static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, 291void gen6_disable_rps_interrupts(struct drm_device *dev)
370 enum pipe pipe,
371 bool enable, bool old)
372{ 292{
373 struct drm_i915_private *dev_priv = dev->dev_private; 293 struct drm_i915_private *dev_priv = dev->dev_private;
374 if (enable) {
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
376 294
377 if (!ivb_can_enable_err_int(dev)) 295 spin_lock_irq(&dev_priv->irq_lock);
378 return; 296 dev_priv->rps.interrupts_enabled = false;
297 spin_unlock_irq(&dev_priv->irq_lock);
379 298
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); 299 cancel_work_sync(&dev_priv->rps.work);
381 } else {
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
383 300
384 if (old && 301 spin_lock_irq(&dev_priv->irq_lock);
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
387 pipe_name(pipe));
388 }
389 }
390}
391 302
392static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev, 303 I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
393 enum pipe pipe, bool enable) 304 ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
394{
395 struct drm_i915_private *dev_priv = dev->dev_private;
396 305
397 assert_spin_locked(&dev_priv->irq_lock); 306 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
307 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
308 ~dev_priv->pm_rps_events);
309 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
310 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
398 311
399 if (enable) 312 dev_priv->rps.pm_iir = 0;
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN; 313
401 else 314 spin_unlock_irq(&dev_priv->irq_lock);
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
405} 315}
406 316
407/** 317/**
@@ -410,9 +320,9 @@ static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
410 * @interrupt_mask: mask of interrupt bits to update 320 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable 321 * @enabled_irq_mask: mask of interrupt bits to enable
412 */ 322 */
413static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, 323void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask, 324 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask) 325 uint32_t enabled_irq_mask)
416{ 326{
417 uint32_t sdeimr = I915_READ(SDEIMR); 327 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask; 328 sdeimr &= ~interrupt_mask;
@@ -426,160 +336,6 @@ static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
426 I915_WRITE(SDEIMR, sdeimr); 336 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR); 337 POSTING_READ(SDEIMR);
428} 338}
429#define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431#define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
433
434static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
436 bool enable)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
441
442 if (enable)
443 ibx_enable_display_interrupt(dev_priv, bit);
444 else
445 ibx_disable_display_interrupt(dev_priv, bit);
446}
447
448static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
450 bool enable, bool old)
451{
452 struct drm_i915_private *dev_priv = dev->dev_private;
453
454 if (enable) {
455 I915_WRITE(SERR_INT,
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
457
458 if (!cpt_can_enable_serr_int(dev))
459 return;
460
461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
462 } else {
463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
464
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
469 }
470 }
471}
472
473/**
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
475 * @dev: drm device
476 * @pipe: pipe
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
478 *
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
484 *
485 * Returns the previous state of underrun reporting.
486 */
487static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
489{
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
493 bool old;
494
495 assert_spin_locked(&dev_priv->irq_lock);
496
497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499
500 if (HAS_GMCH_DISPLAY(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
508
509 return old;
510}
511
512bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
514{
515 struct drm_i915_private *dev_priv = dev->dev_private;
516 unsigned long flags;
517 bool ret;
518
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
522
523 return ret;
524}
525
526static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
527 enum pipe pipe)
528{
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
532
533 return !intel_crtc->cpu_fifo_underrun_disabled;
534}
535
536/**
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
538 * @dev: drm device
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
541 *
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
547 *
548 * Returns the previous state of underrun reporting.
549 */
550bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
552 bool enable)
553{
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
557 unsigned long flags;
558 bool old;
559
560 /*
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
567 */
568
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
570
571 old = !intel_crtc->pch_fifo_underrun_disabled;
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
573
574 if (HAS_PCH_IBX(dev))
575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
576 else
577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
578
579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
580 return old;
581}
582
583 339
584static void 340static void
585__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, 341__i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
@@ -589,6 +345,7 @@ __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 345 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
590 346
591 assert_spin_locked(&dev_priv->irq_lock); 347 assert_spin_locked(&dev_priv->irq_lock);
348 WARN_ON(!intel_irqs_enabled(dev_priv));
592 349
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 350 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK, 351 status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -615,6 +372,7 @@ __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; 372 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
616 373
617 assert_spin_locked(&dev_priv->irq_lock); 374 assert_spin_locked(&dev_priv->irq_lock);
375 WARN_ON(!intel_irqs_enabled(dev_priv));
618 376
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || 377 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK, 378 status_mask & ~PIPESTAT_INT_STATUS_MASK,
@@ -694,19 +452,18 @@ i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
694static void i915_enable_asle_pipestat(struct drm_device *dev) 452static void i915_enable_asle_pipestat(struct drm_device *dev)
695{ 453{
696 struct drm_i915_private *dev_priv = dev->dev_private; 454 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
698 455
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) 456 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return; 457 return;
701 458
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 459 spin_lock_irq(&dev_priv->irq_lock);
703 460
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); 461 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4) 462 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A, 463 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS); 464 PIPE_LEGACY_BLC_EVENT_STATUS);
708 465
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 466 spin_unlock_irq(&dev_priv->irq_lock);
710} 467}
711 468
712/** 469/**
@@ -1094,18 +851,17 @@ static void i915_digport_work_func(struct work_struct *work)
1094{ 851{
1095 struct drm_i915_private *dev_priv = 852 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work); 853 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask; 854 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port; 855 struct intel_digital_port *intel_dig_port;
1100 int i, ret; 856 int i, ret;
1101 u32 old_bits = 0; 857 u32 old_bits = 0;
1102 858
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 859 spin_lock_irq(&dev_priv->irq_lock);
1104 long_port_mask = dev_priv->long_hpd_port_mask; 860 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0; 861 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask; 862 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0; 863 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 864 spin_unlock_irq(&dev_priv->irq_lock);
1109 865
1110 for (i = 0; i < I915_MAX_PORTS; i++) { 866 for (i = 0; i < I915_MAX_PORTS; i++) {
1111 bool valid = false; 867 bool valid = false;
@@ -1130,9 +886,9 @@ static void i915_digport_work_func(struct work_struct *work)
1130 } 886 }
1131 887
1132 if (old_bits) { 888 if (old_bits) {
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 889 spin_lock_irq(&dev_priv->irq_lock);
1134 dev_priv->hpd_event_bits |= old_bits; 890 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 891 spin_unlock_irq(&dev_priv->irq_lock);
1136 schedule_work(&dev_priv->hotplug_work); 892 schedule_work(&dev_priv->hotplug_work);
1137 } 893 }
1138} 894}
@@ -1151,7 +907,6 @@ static void i915_hotplug_work_func(struct work_struct *work)
1151 struct intel_connector *intel_connector; 907 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder; 908 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector; 909 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false; 910 bool hpd_disabled = false;
1156 bool changed = false; 911 bool changed = false;
1157 u32 hpd_event_bits; 912 u32 hpd_event_bits;
@@ -1159,7 +914,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
1159 mutex_lock(&mode_config->mutex); 914 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n"); 915 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1161 916
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 917 spin_lock_irq(&dev_priv->irq_lock);
1163 918
1164 hpd_event_bits = dev_priv->hpd_event_bits; 919 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0; 920 dev_priv->hpd_event_bits = 0;
@@ -1193,7 +948,7 @@ static void i915_hotplug_work_func(struct work_struct *work)
1193 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); 948 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 } 949 }
1195 950
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 951 spin_unlock_irq(&dev_priv->irq_lock);
1197 952
1198 list_for_each_entry(connector, &mode_config->connector_list, head) { 953 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector); 954 intel_connector = to_intel_connector(connector);
@@ -1260,11 +1015,7 @@ static void notify_ring(struct drm_device *dev,
1260 1015
1261 trace_i915_gem_request_complete(ring); 1016 trace_i915_gem_request_complete(ring);
1262 1017
1263 if (drm_core_check_feature(dev, DRIVER_MODESET))
1264 intel_notify_mmio_flip(ring);
1265
1266 wake_up_all(&ring->irq_queue); 1018 wake_up_all(&ring->irq_queue);
1267 i915_queue_hangcheck(dev);
1268} 1019}
1269 1020
1270static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, 1021static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
@@ -1400,14 +1151,15 @@ static void gen6_pm_rps_work(struct work_struct *work)
1400 int new_delay, adj; 1151 int new_delay, adj;
1401 1152
1402 spin_lock_irq(&dev_priv->irq_lock); 1153 spin_lock_irq(&dev_priv->irq_lock);
1154 /* Speed up work cancelation during disabling rps interrupts. */
1155 if (!dev_priv->rps.interrupts_enabled) {
1156 spin_unlock_irq(&dev_priv->irq_lock);
1157 return;
1158 }
1403 pm_iir = dev_priv->rps.pm_iir; 1159 pm_iir = dev_priv->rps.pm_iir;
1404 dev_priv->rps.pm_iir = 0; 1160 dev_priv->rps.pm_iir = 0;
1405 if (INTEL_INFO(dev_priv->dev)->gen >= 8) 1161 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1406 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 1162 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1407 else {
1408 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1409 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1410 }
1411 spin_unlock_irq(&dev_priv->irq_lock); 1163 spin_unlock_irq(&dev_priv->irq_lock);
1412 1164
1413 /* Make sure we didn't queue anything we're not going to process. */ 1165 /* Make sure we didn't queue anything we're not going to process. */
@@ -1488,7 +1240,6 @@ static void ivybridge_parity_work(struct work_struct *work)
1488 u32 error_status, row, bank, subbank; 1240 u32 error_status, row, bank, subbank;
1489 char *parity_event[6]; 1241 char *parity_event[6];
1490 uint32_t misccpctl; 1242 uint32_t misccpctl;
1491 unsigned long flags;
1492 uint8_t slice = 0; 1243 uint8_t slice = 0;
1493 1244
1494 /* We must turn off DOP level clock gating to access the L3 registers. 1245 /* We must turn off DOP level clock gating to access the L3 registers.
@@ -1547,9 +1298,9 @@ static void ivybridge_parity_work(struct work_struct *work)
1547 1298
1548out: 1299out:
1549 WARN_ON(dev_priv->l3_parity.which_slice); 1300 WARN_ON(dev_priv->l3_parity.which_slice);
1550 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1301 spin_lock_irq(&dev_priv->irq_lock);
1551 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); 1302 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1552 spin_unlock_irqrestore(&dev_priv->irq_lock, flags); 1303 spin_unlock_irq(&dev_priv->irq_lock);
1553 1304
1554 mutex_unlock(&dev_priv->dev->struct_mutex); 1305 mutex_unlock(&dev_priv->dev->struct_mutex);
1555} 1306}
@@ -1601,28 +1352,13 @@ static void snb_gt_irq_handler(struct drm_device *dev,
1601 1352
1602 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | 1353 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1603 GT_BSD_CS_ERROR_INTERRUPT | 1354 GT_BSD_CS_ERROR_INTERRUPT |
1604 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { 1355 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1605 i915_handle_error(dev, false, "GT error interrupt 0x%08x", 1356 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1606 gt_iir);
1607 }
1608 1357
1609 if (gt_iir & GT_PARITY_ERROR(dev)) 1358 if (gt_iir & GT_PARITY_ERROR(dev))
1610 ivybridge_parity_error_irq_handler(dev, gt_iir); 1359 ivybridge_parity_error_irq_handler(dev, gt_iir);
1611} 1360}
1612 1361
1613static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1614{
1615 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1616 return;
1617
1618 spin_lock(&dev_priv->irq_lock);
1619 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1620 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1621 spin_unlock(&dev_priv->irq_lock);
1622
1623 queue_work(dev_priv->wq, &dev_priv->rps.work);
1624}
1625
1626static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, 1362static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1627 struct drm_i915_private *dev_priv, 1363 struct drm_i915_private *dev_priv,
1628 u32 master_ctl) 1364 u32 master_ctl)
@@ -1684,7 +1420,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1684 I915_WRITE(GEN8_GT_IIR(2), 1420 I915_WRITE(GEN8_GT_IIR(2),
1685 tmp & dev_priv->pm_rps_events); 1421 tmp & dev_priv->pm_rps_events);
1686 ret = IRQ_HANDLED; 1422 ret = IRQ_HANDLED;
1687 gen8_rps_irq_handler(dev_priv, tmp); 1423 gen6_rps_irq_handler(dev_priv, tmp);
1688 } else 1424 } else
1689 DRM_ERROR("The master control interrupt lied (PM)!\n"); 1425 DRM_ERROR("The master control interrupt lied (PM)!\n");
1690 } 1426 }
@@ -1898,7 +1634,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1898 1634
1899 if (!pipe_crc->entries) { 1635 if (!pipe_crc->entries) {
1900 spin_unlock(&pipe_crc->lock); 1636 spin_unlock(&pipe_crc->lock);
1901 DRM_ERROR("spurious interrupt\n"); 1637 DRM_DEBUG_KMS("spurious interrupt\n");
1902 return; 1638 return;
1903 } 1639 }
1904 1640
@@ -1984,24 +1720,30 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1984 * the work queue. */ 1720 * the work queue. */
1985static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) 1721static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1986{ 1722{
1723 /* TODO: RPS on GEN9+ is not supported yet. */
1724 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1725 "GEN9+: unexpected RPS IRQ\n"))
1726 return;
1727
1987 if (pm_iir & dev_priv->pm_rps_events) { 1728 if (pm_iir & dev_priv->pm_rps_events) {
1988 spin_lock(&dev_priv->irq_lock); 1729 spin_lock(&dev_priv->irq_lock);
1989 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1990 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); 1730 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1731 if (dev_priv->rps.interrupts_enabled) {
1732 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1733 queue_work(dev_priv->wq, &dev_priv->rps.work);
1734 }
1991 spin_unlock(&dev_priv->irq_lock); 1735 spin_unlock(&dev_priv->irq_lock);
1992
1993 queue_work(dev_priv->wq, &dev_priv->rps.work);
1994 } 1736 }
1995 1737
1738 if (INTEL_INFO(dev_priv)->gen >= 8)
1739 return;
1740
1996 if (HAS_VEBOX(dev_priv->dev)) { 1741 if (HAS_VEBOX(dev_priv->dev)) {
1997 if (pm_iir & PM_VEBOX_USER_INTERRUPT) 1742 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1998 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); 1743 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1999 1744
2000 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { 1745 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
2001 i915_handle_error(dev_priv->dev, false, 1746 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
2002 "VEBOX CS error interrupt 0x%08x",
2003 pm_iir);
2004 }
2005 } 1747 }
2006} 1748}
2007 1749
@@ -2031,9 +1773,9 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2031 * we need to be careful that we only handle what we want to 1773 * we need to be careful that we only handle what we want to
2032 * handle. 1774 * handle.
2033 */ 1775 */
2034 mask = 0; 1776
2035 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe)) 1777 /* fifo underruns are filterered in the underrun handler. */
2036 mask |= PIPE_FIFO_UNDERRUN_STATUS; 1778 mask = PIPE_FIFO_UNDERRUN_STATUS;
2037 1779
2038 switch (pipe) { 1780 switch (pipe) {
2039 case PIPE_A: 1781 case PIPE_A:
@@ -2078,9 +1820,8 @@ static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2078 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 1820 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2079 i9xx_pipe_crc_irq_handler(dev, pipe); 1821 i9xx_pipe_crc_irq_handler(dev, pipe);
2080 1822
2081 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 1823 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
2082 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 1824 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2083 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2084 } 1825 }
2085 1826
2086 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) 1827 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
@@ -2247,14 +1988,10 @@ static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2247 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); 1988 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2248 1989
2249 if (pch_iir & SDE_TRANSA_FIFO_UNDER) 1990 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2250 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 1991 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2251 false))
2252 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2253 1992
2254 if (pch_iir & SDE_TRANSB_FIFO_UNDER) 1993 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2255 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 1994 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2256 false))
2257 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2258} 1995}
2259 1996
2260static void ivb_err_int_handler(struct drm_device *dev) 1997static void ivb_err_int_handler(struct drm_device *dev)
@@ -2267,12 +2004,8 @@ static void ivb_err_int_handler(struct drm_device *dev)
2267 DRM_ERROR("Poison interrupt\n"); 2004 DRM_ERROR("Poison interrupt\n");
2268 2005
2269 for_each_pipe(dev_priv, pipe) { 2006 for_each_pipe(dev_priv, pipe) {
2270 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) { 2007 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2271 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2008 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2272 false))
2273 DRM_ERROR("Pipe %c FIFO underrun\n",
2274 pipe_name(pipe));
2275 }
2276 2009
2277 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { 2010 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2278 if (IS_IVYBRIDGE(dev)) 2011 if (IS_IVYBRIDGE(dev))
@@ -2294,19 +2027,13 @@ static void cpt_serr_int_handler(struct drm_device *dev)
2294 DRM_ERROR("PCH poison interrupt\n"); 2027 DRM_ERROR("PCH poison interrupt\n");
2295 2028
2296 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) 2029 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2297 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, 2030 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2298 false))
2299 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2300 2031
2301 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) 2032 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2302 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, 2033 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2303 false))
2304 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2305 2034
2306 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) 2035 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2307 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, 2036 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2308 false))
2309 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2310 2037
2311 I915_WRITE(SERR_INT, serr_int); 2038 I915_WRITE(SERR_INT, serr_int);
2312} 2039}
@@ -2372,9 +2099,7 @@ static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2372 intel_check_page_flip(dev, pipe); 2099 intel_check_page_flip(dev, pipe);
2373 2100
2374 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) 2101 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2375 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 2102 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2376 DRM_ERROR("Pipe %c FIFO underrun\n",
2377 pipe_name(pipe));
2378 2103
2379 if (de_iir & DE_PIPE_CRC_DONE(pipe)) 2104 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2380 i9xx_pipe_crc_irq_handler(dev, pipe); 2105 i9xx_pipe_crc_irq_handler(dev, pipe);
@@ -2524,6 +2249,11 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2524 irqreturn_t ret = IRQ_NONE; 2249 irqreturn_t ret = IRQ_NONE;
2525 uint32_t tmp = 0; 2250 uint32_t tmp = 0;
2526 enum pipe pipe; 2251 enum pipe pipe;
2252 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2253
2254 if (IS_GEN9(dev))
2255 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2256 GEN9_AUX_CHANNEL_D;
2527 2257
2528 master_ctl = I915_READ(GEN8_MASTER_IRQ); 2258 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2529 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; 2259 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
@@ -2556,7 +2286,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2556 if (tmp) { 2286 if (tmp) {
2557 I915_WRITE(GEN8_DE_PORT_IIR, tmp); 2287 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2558 ret = IRQ_HANDLED; 2288 ret = IRQ_HANDLED;
2559 if (tmp & GEN8_AUX_CHANNEL_A) 2289
2290 if (tmp & aux_mask)
2560 dp_aux_irq_handler(dev); 2291 dp_aux_irq_handler(dev);
2561 else 2292 else
2562 DRM_ERROR("Unexpected DE Port interrupt\n"); 2293 DRM_ERROR("Unexpected DE Port interrupt\n");
@@ -2566,7 +2297,7 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2566 } 2297 }
2567 2298
2568 for_each_pipe(dev_priv, pipe) { 2299 for_each_pipe(dev_priv, pipe) {
2569 uint32_t pipe_iir; 2300 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2570 2301
2571 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) 2302 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2572 continue; 2303 continue;
@@ -2575,11 +2306,17 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2575 if (pipe_iir) { 2306 if (pipe_iir) {
2576 ret = IRQ_HANDLED; 2307 ret = IRQ_HANDLED;
2577 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); 2308 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2309
2578 if (pipe_iir & GEN8_PIPE_VBLANK && 2310 if (pipe_iir & GEN8_PIPE_VBLANK &&
2579 intel_pipe_handle_vblank(dev, pipe)) 2311 intel_pipe_handle_vblank(dev, pipe))
2580 intel_check_page_flip(dev, pipe); 2312 intel_check_page_flip(dev, pipe);
2581 2313
2582 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) { 2314 if (IS_GEN9(dev))
2315 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2316 else
2317 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2318
2319 if (flip_done) {
2583 intel_prepare_page_flip(dev, pipe); 2320 intel_prepare_page_flip(dev, pipe);
2584 intel_finish_page_flip_plane(dev, pipe); 2321 intel_finish_page_flip_plane(dev, pipe);
2585 } 2322 }
@@ -2587,18 +2324,20 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
2587 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) 2324 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2588 hsw_pipe_crc_irq_handler(dev, pipe); 2325 hsw_pipe_crc_irq_handler(dev, pipe);
2589 2326
2590 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) { 2327 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2591 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, 2328 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2592 false)) 2329 pipe);
2593 DRM_ERROR("Pipe %c FIFO underrun\n", 2330
2594 pipe_name(pipe));
2595 }
2596 2331
2597 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) { 2332 if (IS_GEN9(dev))
2333 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2334 else
2335 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2336
2337 if (fault_errors)
2598 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", 2338 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2599 pipe_name(pipe), 2339 pipe_name(pipe),
2600 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); 2340 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2601 }
2602 } else 2341 } else
2603 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); 2342 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2604 } 2343 }
@@ -2697,6 +2436,9 @@ static void i915_error_work_func(struct work_struct *work)
2697 * simulated reset via debugs, so get an RPM reference. 2436 * simulated reset via debugs, so get an RPM reference.
2698 */ 2437 */
2699 intel_runtime_pm_get(dev_priv); 2438 intel_runtime_pm_get(dev_priv);
2439
2440 intel_prepare_reset(dev);
2441
2700 /* 2442 /*
2701 * All state reset _must_ be completed before we update the 2443 * All state reset _must_ be completed before we update the
2702 * reset counter, for otherwise waiters might miss the reset 2444 * reset counter, for otherwise waiters might miss the reset
@@ -2705,7 +2447,7 @@ static void i915_error_work_func(struct work_struct *work)
2705 */ 2447 */
2706 ret = i915_reset(dev); 2448 ret = i915_reset(dev);
2707 2449
2708 intel_display_handle_reset(dev); 2450 intel_finish_reset(dev);
2709 2451
2710 intel_runtime_pm_put(dev_priv); 2452 intel_runtime_pm_put(dev_priv);
2711 2453
@@ -3330,10 +3072,15 @@ static void i915_hangcheck_elapsed(unsigned long data)
3330void i915_queue_hangcheck(struct drm_device *dev) 3072void i915_queue_hangcheck(struct drm_device *dev)
3331{ 3073{
3332 struct drm_i915_private *dev_priv = dev->dev_private; 3074 struct drm_i915_private *dev_priv = dev->dev_private;
3075 struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer;
3076
3333 if (!i915.enable_hangcheck) 3077 if (!i915.enable_hangcheck)
3334 return; 3078 return;
3335 3079
3336 mod_timer(&dev_priv->gpu_error.hangcheck_timer, 3080 /* Don't continually defer the hangcheck, but make sure it is active */
3081 if (timer_pending(timer))
3082 return;
3083 mod_timer(timer,
3337 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); 3084 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3338} 3085}
3339 3086
@@ -3396,10 +3143,22 @@ static void ironlake_irq_reset(struct drm_device *dev)
3396 ibx_irq_reset(dev); 3143 ibx_irq_reset(dev);
3397} 3144}
3398 3145
3146static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3147{
3148 enum pipe pipe;
3149
3150 I915_WRITE(PORT_HOTPLUG_EN, 0);
3151 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3152
3153 for_each_pipe(dev_priv, pipe)
3154 I915_WRITE(PIPESTAT(pipe), 0xffff);
3155
3156 GEN5_IRQ_RESET(VLV_);
3157}
3158
3399static void valleyview_irq_preinstall(struct drm_device *dev) 3159static void valleyview_irq_preinstall(struct drm_device *dev)
3400{ 3160{
3401 struct drm_i915_private *dev_priv = dev->dev_private; 3161 struct drm_i915_private *dev_priv = dev->dev_private;
3402 int pipe;
3403 3162
3404 /* VLV magic */ 3163 /* VLV magic */
3405 I915_WRITE(VLV_IMR, 0); 3164 I915_WRITE(VLV_IMR, 0);
@@ -3407,22 +3166,11 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
3407 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); 3166 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3408 I915_WRITE(RING_IMR(BLT_RING_BASE), 0); 3167 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3409 3168
3410 /* and GT */
3411 I915_WRITE(GTIIR, I915_READ(GTIIR));
3412 I915_WRITE(GTIIR, I915_READ(GTIIR));
3413
3414 gen5_gt_irq_reset(dev); 3169 gen5_gt_irq_reset(dev);
3415 3170
3416 I915_WRITE(DPINVGTT, 0xff); 3171 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3417 3172
3418 I915_WRITE(PORT_HOTPLUG_EN, 0); 3173 vlv_display_irq_reset(dev_priv);
3419 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3420 for_each_pipe(dev_priv, pipe)
3421 I915_WRITE(PIPESTAT(pipe), 0xffff);
3422 I915_WRITE(VLV_IIR, 0xffffffff);
3423 I915_WRITE(VLV_IMR, 0xffffffff);
3424 I915_WRITE(VLV_IER, 0x0);
3425 POSTING_READ(VLV_IER);
3426} 3174}
3427 3175
3428static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) 3176static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
@@ -3444,8 +3192,8 @@ static void gen8_irq_reset(struct drm_device *dev)
3444 gen8_gt_irq_reset(dev_priv); 3192 gen8_gt_irq_reset(dev_priv);
3445 3193
3446 for_each_pipe(dev_priv, pipe) 3194 for_each_pipe(dev_priv, pipe)
3447 if (intel_display_power_enabled(dev_priv, 3195 if (intel_display_power_is_enabled(dev_priv,
3448 POWER_DOMAIN_PIPE(pipe))) 3196 POWER_DOMAIN_PIPE(pipe)))
3449 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); 3197 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3450 3198
3451 GEN5_IRQ_RESET(GEN8_DE_PORT_); 3199 GEN5_IRQ_RESET(GEN8_DE_PORT_);
@@ -3457,21 +3205,19 @@ static void gen8_irq_reset(struct drm_device *dev)
3457 3205
3458void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3206void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3459{ 3207{
3460 unsigned long irqflags;
3461 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; 3208 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3462 3209
3463 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3210 spin_lock_irq(&dev_priv->irq_lock);
3464 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3211 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3465 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); 3212 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3466 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3213 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3467 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); 3214 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3468 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3215 spin_unlock_irq(&dev_priv->irq_lock);
3469} 3216}
3470 3217
3471static void cherryview_irq_preinstall(struct drm_device *dev) 3218static void cherryview_irq_preinstall(struct drm_device *dev)
3472{ 3219{
3473 struct drm_i915_private *dev_priv = dev->dev_private; 3220 struct drm_i915_private *dev_priv = dev->dev_private;
3474 int pipe;
3475 3221
3476 I915_WRITE(GEN8_MASTER_IRQ, 0); 3222 I915_WRITE(GEN8_MASTER_IRQ, 0);
3477 POSTING_READ(GEN8_MASTER_IRQ); 3223 POSTING_READ(GEN8_MASTER_IRQ);
@@ -3480,20 +3226,9 @@ static void cherryview_irq_preinstall(struct drm_device *dev)
3480 3226
3481 GEN5_IRQ_RESET(GEN8_PCU_); 3227 GEN5_IRQ_RESET(GEN8_PCU_);
3482 3228
3483 POSTING_READ(GEN8_PCU_IIR);
3484
3485 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); 3229 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3486 3230
3487 I915_WRITE(PORT_HOTPLUG_EN, 0); 3231 vlv_display_irq_reset(dev_priv);
3488 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3489
3490 for_each_pipe(dev_priv, pipe)
3491 I915_WRITE(PIPESTAT(pipe), 0xffff);
3492
3493 I915_WRITE(VLV_IMR, 0xffffffff);
3494 I915_WRITE(VLV_IER, 0x0);
3495 I915_WRITE(VLV_IIR, 0xffffffff);
3496 POSTING_READ(VLV_IIR);
3497} 3232}
3498 3233
3499static void ibx_hpd_irq_setup(struct drm_device *dev) 3234static void ibx_hpd_irq_setup(struct drm_device *dev)
@@ -3584,7 +3319,6 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
3584 3319
3585static int ironlake_irq_postinstall(struct drm_device *dev) 3320static int ironlake_irq_postinstall(struct drm_device *dev)
3586{ 3321{
3587 unsigned long irqflags;
3588 struct drm_i915_private *dev_priv = dev->dev_private; 3322 struct drm_i915_private *dev_priv = dev->dev_private;
3589 u32 display_mask, extra_mask; 3323 u32 display_mask, extra_mask;
3590 3324
@@ -3623,9 +3357,9 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
3623 * spinlocking not required here for correctness since interrupt 3357 * spinlocking not required here for correctness since interrupt
3624 * setup is guaranteed to run in single-threaded context. But we 3358 * setup is guaranteed to run in single-threaded context. But we
3625 * need it to make the assert_spin_locked happy. */ 3359 * need it to make the assert_spin_locked happy. */
3626 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3360 spin_lock_irq(&dev_priv->irq_lock);
3627 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); 3361 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3628 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3362 spin_unlock_irq(&dev_priv->irq_lock);
3629 } 3363 }
3630 3364
3631 return 0; 3365 return 0;
@@ -3635,45 +3369,51 @@ static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3635{ 3369{
3636 u32 pipestat_mask; 3370 u32 pipestat_mask;
3637 u32 iir_mask; 3371 u32 iir_mask;
3372 enum pipe pipe;
3638 3373
3639 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3374 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3640 PIPE_FIFO_UNDERRUN_STATUS; 3375 PIPE_FIFO_UNDERRUN_STATUS;
3641 3376
3642 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3377 for_each_pipe(dev_priv, pipe)
3643 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3378 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3644 POSTING_READ(PIPESTAT(PIPE_A)); 3379 POSTING_READ(PIPESTAT(PIPE_A));
3645 3380
3646 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3381 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3647 PIPE_CRC_DONE_INTERRUPT_STATUS; 3382 PIPE_CRC_DONE_INTERRUPT_STATUS;
3648 3383
3649 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3384 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3650 PIPE_GMBUS_INTERRUPT_STATUS); 3385 for_each_pipe(dev_priv, pipe)
3651 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3386 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3652 3387
3653 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3388 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3654 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3389 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3655 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3390 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3391 if (IS_CHERRYVIEW(dev_priv))
3392 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3656 dev_priv->irq_mask &= ~iir_mask; 3393 dev_priv->irq_mask &= ~iir_mask;
3657 3394
3658 I915_WRITE(VLV_IIR, iir_mask); 3395 I915_WRITE(VLV_IIR, iir_mask);
3659 I915_WRITE(VLV_IIR, iir_mask); 3396 I915_WRITE(VLV_IIR, iir_mask);
3660 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3661 I915_WRITE(VLV_IER, ~dev_priv->irq_mask); 3397 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3662 POSTING_READ(VLV_IER); 3398 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3399 POSTING_READ(VLV_IMR);
3663} 3400}
3664 3401
3665static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) 3402static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3666{ 3403{
3667 u32 pipestat_mask; 3404 u32 pipestat_mask;
3668 u32 iir_mask; 3405 u32 iir_mask;
3406 enum pipe pipe;
3669 3407
3670 iir_mask = I915_DISPLAY_PORT_INTERRUPT | 3408 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3671 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | 3409 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3672 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; 3410 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3411 if (IS_CHERRYVIEW(dev_priv))
3412 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3673 3413
3674 dev_priv->irq_mask |= iir_mask; 3414 dev_priv->irq_mask |= iir_mask;
3675 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3676 I915_WRITE(VLV_IMR, dev_priv->irq_mask); 3415 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3416 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3677 I915_WRITE(VLV_IIR, iir_mask); 3417 I915_WRITE(VLV_IIR, iir_mask);
3678 I915_WRITE(VLV_IIR, iir_mask); 3418 I915_WRITE(VLV_IIR, iir_mask);
3679 POSTING_READ(VLV_IIR); 3419 POSTING_READ(VLV_IIR);
@@ -3681,14 +3421,15 @@ static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3681 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | 3421 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3682 PIPE_CRC_DONE_INTERRUPT_STATUS; 3422 PIPE_CRC_DONE_INTERRUPT_STATUS;
3683 3423
3684 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask | 3424 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3685 PIPE_GMBUS_INTERRUPT_STATUS); 3425 for_each_pipe(dev_priv, pipe)
3686 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask); 3426 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3687 3427
3688 pipestat_mask = PIPESTAT_INT_STATUS_MASK | 3428 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3689 PIPE_FIFO_UNDERRUN_STATUS; 3429 PIPE_FIFO_UNDERRUN_STATUS;
3690 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask); 3430
3691 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask); 3431 for_each_pipe(dev_priv, pipe)
3432 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3692 POSTING_READ(PIPESTAT(PIPE_A)); 3433 POSTING_READ(PIPESTAT(PIPE_A));
3693} 3434}
3694 3435
@@ -3701,7 +3442,7 @@ void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3701 3442
3702 dev_priv->display_irqs_enabled = true; 3443 dev_priv->display_irqs_enabled = true;
3703 3444
3704 if (dev_priv->dev->irq_enabled) 3445 if (intel_irqs_enabled(dev_priv))
3705 valleyview_display_irqs_install(dev_priv); 3446 valleyview_display_irqs_install(dev_priv);
3706} 3447}
3707 3448
@@ -3714,34 +3455,36 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3714 3455
3715 dev_priv->display_irqs_enabled = false; 3456 dev_priv->display_irqs_enabled = false;
3716 3457
3717 if (dev_priv->dev->irq_enabled) 3458 if (intel_irqs_enabled(dev_priv))
3718 valleyview_display_irqs_uninstall(dev_priv); 3459 valleyview_display_irqs_uninstall(dev_priv);
3719} 3460}
3720 3461
3721static int valleyview_irq_postinstall(struct drm_device *dev) 3462static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3722{ 3463{
3723 struct drm_i915_private *dev_priv = dev->dev_private;
3724 unsigned long irqflags;
3725
3726 dev_priv->irq_mask = ~0; 3464 dev_priv->irq_mask = ~0;
3727 3465
3728 I915_WRITE(PORT_HOTPLUG_EN, 0); 3466 I915_WRITE(PORT_HOTPLUG_EN, 0);
3729 POSTING_READ(PORT_HOTPLUG_EN); 3467 POSTING_READ(PORT_HOTPLUG_EN);
3730 3468
3731 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3732 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3733 I915_WRITE(VLV_IIR, 0xffffffff); 3469 I915_WRITE(VLV_IIR, 0xffffffff);
3734 POSTING_READ(VLV_IER); 3470 I915_WRITE(VLV_IIR, 0xffffffff);
3471 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3472 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3473 POSTING_READ(VLV_IMR);
3735 3474
3736 /* Interrupt setup is already guaranteed to be single-threaded, this is 3475 /* Interrupt setup is already guaranteed to be single-threaded, this is
3737 * just to make the assert_spin_locked check happy. */ 3476 * just to make the assert_spin_locked check happy. */
3738 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3477 spin_lock_irq(&dev_priv->irq_lock);
3739 if (dev_priv->display_irqs_enabled) 3478 if (dev_priv->display_irqs_enabled)
3740 valleyview_display_irqs_install(dev_priv); 3479 valleyview_display_irqs_install(dev_priv);
3741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3480 spin_unlock_irq(&dev_priv->irq_lock);
3481}
3742 3482
3743 I915_WRITE(VLV_IIR, 0xffffffff); 3483static int valleyview_irq_postinstall(struct drm_device *dev)
3744 I915_WRITE(VLV_IIR, 0xffffffff); 3484{
3485 struct drm_i915_private *dev_priv = dev->dev_private;
3486
3487 vlv_display_irq_postinstall(dev_priv);
3745 3488
3746 gen5_gt_irq_postinstall(dev); 3489 gen5_gt_irq_postinstall(dev);
3747 3490
@@ -3783,24 +3526,35 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3783 3526
3784static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) 3527static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3785{ 3528{
3786 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE | 3529 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3787 GEN8_PIPE_CDCLK_CRC_DONE | 3530 uint32_t de_pipe_enables;
3788 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3789 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3790 GEN8_PIPE_FIFO_UNDERRUN;
3791 int pipe; 3531 int pipe;
3532 u32 aux_en = GEN8_AUX_CHANNEL_A;
3533
3534 if (IS_GEN9(dev_priv)) {
3535 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3536 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3537 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3538 GEN9_AUX_CHANNEL_D;
3539 } else
3540 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3541 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3542
3543 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3544 GEN8_PIPE_FIFO_UNDERRUN;
3545
3792 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; 3546 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3793 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; 3547 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3794 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; 3548 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3795 3549
3796 for_each_pipe(dev_priv, pipe) 3550 for_each_pipe(dev_priv, pipe)
3797 if (intel_display_power_enabled(dev_priv, 3551 if (intel_display_power_is_enabled(dev_priv,
3798 POWER_DOMAIN_PIPE(pipe))) 3552 POWER_DOMAIN_PIPE(pipe)))
3799 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, 3553 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3800 dev_priv->de_irq_mask[pipe], 3554 dev_priv->de_irq_mask[pipe],
3801 de_pipe_enables); 3555 de_pipe_enables);
3802 3556
3803 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A); 3557 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3804} 3558}
3805 3559
3806static int gen8_irq_postinstall(struct drm_device *dev) 3560static int gen8_irq_postinstall(struct drm_device *dev)
@@ -3823,33 +3577,8 @@ static int gen8_irq_postinstall(struct drm_device *dev)
3823static int cherryview_irq_postinstall(struct drm_device *dev) 3577static int cherryview_irq_postinstall(struct drm_device *dev)
3824{ 3578{
3825 struct drm_i915_private *dev_priv = dev->dev_private; 3579 struct drm_i915_private *dev_priv = dev->dev_private;
3826 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3827 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3828 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3829 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3830 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3831 PIPE_CRC_DONE_INTERRUPT_STATUS;
3832 unsigned long irqflags;
3833 int pipe;
3834 3580
3835 /* 3581 vlv_display_irq_postinstall(dev_priv);
3836 * Leave vblank interrupts masked initially. enable/disable will
3837 * toggle them based on usage.
3838 */
3839 dev_priv->irq_mask = ~enable_mask;
3840
3841 for_each_pipe(dev_priv, pipe)
3842 I915_WRITE(PIPESTAT(pipe), 0xffff);
3843
3844 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3845 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3846 for_each_pipe(dev_priv, pipe)
3847 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3848 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3849
3850 I915_WRITE(VLV_IIR, 0xffffffff);
3851 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3852 I915_WRITE(VLV_IER, enable_mask);
3853 3582
3854 gen8_gt_irq_postinstall(dev_priv); 3583 gen8_gt_irq_postinstall(dev_priv);
3855 3584
@@ -3869,41 +3598,39 @@ static void gen8_irq_uninstall(struct drm_device *dev)
3869 gen8_irq_reset(dev); 3598 gen8_irq_reset(dev);
3870} 3599}
3871 3600
3601static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3602{
3603 /* Interrupt setup is already guaranteed to be single-threaded, this is
3604 * just to make the assert_spin_locked check happy. */
3605 spin_lock_irq(&dev_priv->irq_lock);
3606 if (dev_priv->display_irqs_enabled)
3607 valleyview_display_irqs_uninstall(dev_priv);
3608 spin_unlock_irq(&dev_priv->irq_lock);
3609
3610 vlv_display_irq_reset(dev_priv);
3611
3612 dev_priv->irq_mask = 0;
3613}
3614
3872static void valleyview_irq_uninstall(struct drm_device *dev) 3615static void valleyview_irq_uninstall(struct drm_device *dev)
3873{ 3616{
3874 struct drm_i915_private *dev_priv = dev->dev_private; 3617 struct drm_i915_private *dev_priv = dev->dev_private;
3875 unsigned long irqflags;
3876 int pipe;
3877 3618
3878 if (!dev_priv) 3619 if (!dev_priv)
3879 return; 3620 return;
3880 3621
3881 I915_WRITE(VLV_MASTER_IER, 0); 3622 I915_WRITE(VLV_MASTER_IER, 0);
3882 3623
3883 for_each_pipe(dev_priv, pipe) 3624 gen5_gt_irq_reset(dev);
3884 I915_WRITE(PIPESTAT(pipe), 0xffff);
3885 3625
3886 I915_WRITE(HWSTAM, 0xffffffff); 3626 I915_WRITE(HWSTAM, 0xffffffff);
3887 I915_WRITE(PORT_HOTPLUG_EN, 0);
3888 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3889
3890 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3891 if (dev_priv->display_irqs_enabled)
3892 valleyview_display_irqs_uninstall(dev_priv);
3893 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3894 3627
3895 dev_priv->irq_mask = 0; 3628 vlv_display_irq_uninstall(dev_priv);
3896
3897 I915_WRITE(VLV_IIR, 0xffffffff);
3898 I915_WRITE(VLV_IMR, 0xffffffff);
3899 I915_WRITE(VLV_IER, 0x0);
3900 POSTING_READ(VLV_IER);
3901} 3629}
3902 3630
3903static void cherryview_irq_uninstall(struct drm_device *dev) 3631static void cherryview_irq_uninstall(struct drm_device *dev)
3904{ 3632{
3905 struct drm_i915_private *dev_priv = dev->dev_private; 3633 struct drm_i915_private *dev_priv = dev->dev_private;
3906 int pipe;
3907 3634
3908 if (!dev_priv) 3635 if (!dev_priv)
3909 return; 3636 return;
@@ -3911,44 +3638,11 @@ static void cherryview_irq_uninstall(struct drm_device *dev)
3911 I915_WRITE(GEN8_MASTER_IRQ, 0); 3638 I915_WRITE(GEN8_MASTER_IRQ, 0);
3912 POSTING_READ(GEN8_MASTER_IRQ); 3639 POSTING_READ(GEN8_MASTER_IRQ);
3913 3640
3914#define GEN8_IRQ_FINI_NDX(type, which) \ 3641 gen8_gt_irq_reset(dev_priv);
3915do { \
3916 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3917 I915_WRITE(GEN8_##type##_IER(which), 0); \
3918 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3919 POSTING_READ(GEN8_##type##_IIR(which)); \
3920 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3921} while (0)
3922
3923#define GEN8_IRQ_FINI(type) \
3924do { \
3925 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3926 I915_WRITE(GEN8_##type##_IER, 0); \
3927 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3928 POSTING_READ(GEN8_##type##_IIR); \
3929 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3930} while (0)
3931
3932 GEN8_IRQ_FINI_NDX(GT, 0);
3933 GEN8_IRQ_FINI_NDX(GT, 1);
3934 GEN8_IRQ_FINI_NDX(GT, 2);
3935 GEN8_IRQ_FINI_NDX(GT, 3);
3936
3937 GEN8_IRQ_FINI(PCU);
3938
3939#undef GEN8_IRQ_FINI
3940#undef GEN8_IRQ_FINI_NDX
3941
3942 I915_WRITE(PORT_HOTPLUG_EN, 0);
3943 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3944 3642
3945 for_each_pipe(dev_priv, pipe) 3643 GEN5_IRQ_RESET(GEN8_PCU_);
3946 I915_WRITE(PIPESTAT(pipe), 0xffff);
3947 3644
3948 I915_WRITE(VLV_IMR, 0xffffffff); 3645 vlv_display_irq_uninstall(dev_priv);
3949 I915_WRITE(VLV_IER, 0x0);
3950 I915_WRITE(VLV_IIR, 0xffffffff);
3951 POSTING_READ(VLV_IIR);
3952} 3646}
3953 3647
3954static void ironlake_irq_uninstall(struct drm_device *dev) 3648static void ironlake_irq_uninstall(struct drm_device *dev)
@@ -3976,7 +3670,6 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
3976static int i8xx_irq_postinstall(struct drm_device *dev) 3670static int i8xx_irq_postinstall(struct drm_device *dev)
3977{ 3671{
3978 struct drm_i915_private *dev_priv = dev->dev_private; 3672 struct drm_i915_private *dev_priv = dev->dev_private;
3979 unsigned long irqflags;
3980 3673
3981 I915_WRITE16(EMR, 3674 I915_WRITE16(EMR,
3982 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3675 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
@@ -3999,10 +3692,10 @@ static int i8xx_irq_postinstall(struct drm_device *dev)
3999 3692
4000 /* Interrupt setup is already guaranteed to be single-threaded, this is 3693 /* Interrupt setup is already guaranteed to be single-threaded, this is
4001 * just to make the assert_spin_locked check happy. */ 3694 * just to make the assert_spin_locked check happy. */
4002 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3695 spin_lock_irq(&dev_priv->irq_lock);
4003 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3696 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4004 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3697 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4005 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3698 spin_unlock_irq(&dev_priv->irq_lock);
4006 3699
4007 return 0; 3700 return 0;
4008} 3701}
@@ -4047,7 +3740,6 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4047 struct drm_i915_private *dev_priv = dev->dev_private; 3740 struct drm_i915_private *dev_priv = dev->dev_private;
4048 u16 iir, new_iir; 3741 u16 iir, new_iir;
4049 u32 pipe_stats[2]; 3742 u32 pipe_stats[2];
4050 unsigned long irqflags;
4051 int pipe; 3743 int pipe;
4052 u16 flip_mask = 3744 u16 flip_mask =
4053 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3745 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4063,11 +3755,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4063 * It doesn't set the bit in iir again, but it still produces 3755 * It doesn't set the bit in iir again, but it still produces
4064 * interrupts (for non-MSI). 3756 * interrupts (for non-MSI).
4065 */ 3757 */
4066 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3758 spin_lock(&dev_priv->irq_lock);
4067 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3759 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4068 i915_handle_error(dev, false, 3760 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4069 "Command parser error, iir 0x%08x",
4070 iir);
4071 3761
4072 for_each_pipe(dev_priv, pipe) { 3762 for_each_pipe(dev_priv, pipe) {
4073 int reg = PIPESTAT(pipe); 3763 int reg = PIPESTAT(pipe);
@@ -4079,13 +3769,11 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4079 if (pipe_stats[pipe] & 0x8000ffff) 3769 if (pipe_stats[pipe] & 0x8000ffff)
4080 I915_WRITE(reg, pipe_stats[pipe]); 3770 I915_WRITE(reg, pipe_stats[pipe]);
4081 } 3771 }
4082 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3772 spin_unlock(&dev_priv->irq_lock);
4083 3773
4084 I915_WRITE16(IIR, iir & ~flip_mask); 3774 I915_WRITE16(IIR, iir & ~flip_mask);
4085 new_iir = I915_READ16(IIR); /* Flush posted writes */ 3775 new_iir = I915_READ16(IIR); /* Flush posted writes */
4086 3776
4087 i915_update_dri1_breadcrumb(dev);
4088
4089 if (iir & I915_USER_INTERRUPT) 3777 if (iir & I915_USER_INTERRUPT)
4090 notify_ring(dev, &dev_priv->ring[RCS]); 3778 notify_ring(dev, &dev_priv->ring[RCS]);
4091 3779
@@ -4101,9 +3789,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4101 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3789 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4102 i9xx_pipe_crc_irq_handler(dev, pipe); 3790 i9xx_pipe_crc_irq_handler(dev, pipe);
4103 3791
4104 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3792 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4105 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3793 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4106 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3794 pipe);
4107 } 3795 }
4108 3796
4109 iir = new_iir; 3797 iir = new_iir;
@@ -4149,7 +3837,6 @@ static int i915_irq_postinstall(struct drm_device *dev)
4149{ 3837{
4150 struct drm_i915_private *dev_priv = dev->dev_private; 3838 struct drm_i915_private *dev_priv = dev->dev_private;
4151 u32 enable_mask; 3839 u32 enable_mask;
4152 unsigned long irqflags;
4153 3840
4154 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); 3841 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4155 3842
@@ -4187,10 +3874,10 @@ static int i915_irq_postinstall(struct drm_device *dev)
4187 3874
4188 /* Interrupt setup is already guaranteed to be single-threaded, this is 3875 /* Interrupt setup is already guaranteed to be single-threaded, this is
4189 * just to make the assert_spin_locked check happy. */ 3876 * just to make the assert_spin_locked check happy. */
4190 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3877 spin_lock_irq(&dev_priv->irq_lock);
4191 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 3878 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4192 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 3879 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4193 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3880 spin_unlock_irq(&dev_priv->irq_lock);
4194 3881
4195 return 0; 3882 return 0;
4196} 3883}
@@ -4234,7 +3921,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4234 struct drm_device *dev = arg; 3921 struct drm_device *dev = arg;
4235 struct drm_i915_private *dev_priv = dev->dev_private; 3922 struct drm_i915_private *dev_priv = dev->dev_private;
4236 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; 3923 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4237 unsigned long irqflags;
4238 u32 flip_mask = 3924 u32 flip_mask =
4239 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 3925 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4240 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; 3926 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
@@ -4250,11 +3936,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4250 * It doesn't set the bit in iir again, but it still produces 3936 * It doesn't set the bit in iir again, but it still produces
4251 * interrupts (for non-MSI). 3937 * interrupts (for non-MSI).
4252 */ 3938 */
4253 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3939 spin_lock(&dev_priv->irq_lock);
4254 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 3940 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4255 i915_handle_error(dev, false, 3941 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4256 "Command parser error, iir 0x%08x",
4257 iir);
4258 3942
4259 for_each_pipe(dev_priv, pipe) { 3943 for_each_pipe(dev_priv, pipe) {
4260 int reg = PIPESTAT(pipe); 3944 int reg = PIPESTAT(pipe);
@@ -4266,7 +3950,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4266 irq_received = true; 3950 irq_received = true;
4267 } 3951 }
4268 } 3952 }
4269 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3953 spin_unlock(&dev_priv->irq_lock);
4270 3954
4271 if (!irq_received) 3955 if (!irq_received)
4272 break; 3956 break;
@@ -4297,9 +3981,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4297 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 3981 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4298 i9xx_pipe_crc_irq_handler(dev, pipe); 3982 i9xx_pipe_crc_irq_handler(dev, pipe);
4299 3983
4300 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 3984 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4301 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 3985 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4302 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe)); 3986 pipe);
4303 } 3987 }
4304 3988
4305 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 3989 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4324,8 +4008,6 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
4324 iir = new_iir; 4008 iir = new_iir;
4325 } while (iir & ~flip_mask); 4009 } while (iir & ~flip_mask);
4326 4010
4327 i915_update_dri1_breadcrumb(dev);
4328
4329 return ret; 4011 return ret;
4330} 4012}
4331 4013
@@ -4372,7 +4054,6 @@ static int i965_irq_postinstall(struct drm_device *dev)
4372 struct drm_i915_private *dev_priv = dev->dev_private; 4054 struct drm_i915_private *dev_priv = dev->dev_private;
4373 u32 enable_mask; 4055 u32 enable_mask;
4374 u32 error_mask; 4056 u32 error_mask;
4375 unsigned long irqflags;
4376 4057
4377 /* Unmask the interrupts that we always want on. */ 4058 /* Unmask the interrupts that we always want on. */
4378 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | 4059 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
@@ -4393,11 +4074,11 @@ static int i965_irq_postinstall(struct drm_device *dev)
4393 4074
4394 /* Interrupt setup is already guaranteed to be single-threaded, this is 4075 /* Interrupt setup is already guaranteed to be single-threaded, this is
4395 * just to make the assert_spin_locked check happy. */ 4076 * just to make the assert_spin_locked check happy. */
4396 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4077 spin_lock_irq(&dev_priv->irq_lock);
4397 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); 4078 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4398 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); 4079 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4399 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); 4080 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4400 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4081 spin_unlock_irq(&dev_priv->irq_lock);
4401 4082
4402 /* 4083 /*
4403 * Enable some error detection, note the instruction error mask 4084 * Enable some error detection, note the instruction error mask
@@ -4462,7 +4143,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4462 struct drm_i915_private *dev_priv = dev->dev_private; 4143 struct drm_i915_private *dev_priv = dev->dev_private;
4463 u32 iir, new_iir; 4144 u32 iir, new_iir;
4464 u32 pipe_stats[I915_MAX_PIPES]; 4145 u32 pipe_stats[I915_MAX_PIPES];
4465 unsigned long irqflags;
4466 int ret = IRQ_NONE, pipe; 4146 int ret = IRQ_NONE, pipe;
4467 u32 flip_mask = 4147 u32 flip_mask =
4468 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | 4148 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
@@ -4479,11 +4159,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4479 * It doesn't set the bit in iir again, but it still produces 4159 * It doesn't set the bit in iir again, but it still produces
4480 * interrupts (for non-MSI). 4160 * interrupts (for non-MSI).
4481 */ 4161 */
4482 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4162 spin_lock(&dev_priv->irq_lock);
4483 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) 4163 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4484 i915_handle_error(dev, false, 4164 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4485 "Command parser error, iir 0x%08x",
4486 iir);
4487 4165
4488 for_each_pipe(dev_priv, pipe) { 4166 for_each_pipe(dev_priv, pipe) {
4489 int reg = PIPESTAT(pipe); 4167 int reg = PIPESTAT(pipe);
@@ -4497,7 +4175,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4497 irq_received = true; 4175 irq_received = true;
4498 } 4176 }
4499 } 4177 }
4500 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4178 spin_unlock(&dev_priv->irq_lock);
4501 4179
4502 if (!irq_received) 4180 if (!irq_received)
4503 break; 4181 break;
@@ -4527,9 +4205,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4527 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) 4205 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4528 i9xx_pipe_crc_irq_handler(dev, pipe); 4206 i9xx_pipe_crc_irq_handler(dev, pipe);
4529 4207
4530 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS && 4208 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4531 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false)) 4209 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4532 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4533 } 4210 }
4534 4211
4535 if (blc_event || (iir & I915_ASLE_INTERRUPT)) 4212 if (blc_event || (iir & I915_ASLE_INTERRUPT))
@@ -4556,8 +4233,6 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
4556 iir = new_iir; 4233 iir = new_iir;
4557 } 4234 }
4558 4235
4559 i915_update_dri1_breadcrumb(dev);
4560
4561 return ret; 4236 return ret;
4562} 4237}
4563 4238
@@ -4584,19 +4259,18 @@ static void i965_irq_uninstall(struct drm_device * dev)
4584 I915_WRITE(IIR, I915_READ(IIR)); 4259 I915_WRITE(IIR, I915_READ(IIR));
4585} 4260}
4586 4261
4587static void intel_hpd_irq_reenable(struct work_struct *work) 4262static void intel_hpd_irq_reenable_work(struct work_struct *work)
4588{ 4263{
4589 struct drm_i915_private *dev_priv = 4264 struct drm_i915_private *dev_priv =
4590 container_of(work, typeof(*dev_priv), 4265 container_of(work, typeof(*dev_priv),
4591 hotplug_reenable_work.work); 4266 hotplug_reenable_work.work);
4592 struct drm_device *dev = dev_priv->dev; 4267 struct drm_device *dev = dev_priv->dev;
4593 struct drm_mode_config *mode_config = &dev->mode_config; 4268 struct drm_mode_config *mode_config = &dev->mode_config;
4594 unsigned long irqflags;
4595 int i; 4269 int i;
4596 4270
4597 intel_runtime_pm_get(dev_priv); 4271 intel_runtime_pm_get(dev_priv);
4598 4272
4599 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4273 spin_lock_irq(&dev_priv->irq_lock);
4600 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { 4274 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4601 struct drm_connector *connector; 4275 struct drm_connector *connector;
4602 4276
@@ -4620,14 +4294,21 @@ static void intel_hpd_irq_reenable(struct work_struct *work)
4620 } 4294 }
4621 if (dev_priv->display.hpd_irq_setup) 4295 if (dev_priv->display.hpd_irq_setup)
4622 dev_priv->display.hpd_irq_setup(dev); 4296 dev_priv->display.hpd_irq_setup(dev);
4623 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4297 spin_unlock_irq(&dev_priv->irq_lock);
4624 4298
4625 intel_runtime_pm_put(dev_priv); 4299 intel_runtime_pm_put(dev_priv);
4626} 4300}
4627 4301
4628void intel_irq_init(struct drm_device *dev) 4302/**
4303 * intel_irq_init - initializes irq support
4304 * @dev_priv: i915 device instance
4305 *
4306 * This function initializes all the irq support including work items, timers
4307 * and all the vtables. It does not setup the interrupt itself though.
4308 */
4309void intel_irq_init(struct drm_i915_private *dev_priv)
4629{ 4310{
4630 struct drm_i915_private *dev_priv = dev->dev_private; 4311 struct drm_device *dev = dev_priv->dev;
4631 4312
4632 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); 4313 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4633 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); 4314 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
@@ -4636,7 +4317,7 @@ void intel_irq_init(struct drm_device *dev)
4636 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); 4317 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4637 4318
4638 /* Let's track the enabled rps events */ 4319 /* Let's track the enabled rps events */
4639 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) 4320 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4640 /* WaGsvRC0ResidencyMethod:vlv */ 4321 /* WaGsvRC0ResidencyMethod:vlv */
4641 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; 4322 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4642 else 4323 else
@@ -4646,17 +4327,14 @@ void intel_irq_init(struct drm_device *dev)
4646 i915_hangcheck_elapsed, 4327 i915_hangcheck_elapsed,
4647 (unsigned long) dev); 4328 (unsigned long) dev);
4648 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, 4329 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4649 intel_hpd_irq_reenable); 4330 intel_hpd_irq_reenable_work);
4650 4331
4651 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); 4332 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4652 4333
4653 /* Haven't installed the IRQ handler yet */ 4334 if (IS_GEN2(dev_priv)) {
4654 dev_priv->pm._irqs_disabled = true;
4655
4656 if (IS_GEN2(dev)) {
4657 dev->max_vblank_count = 0; 4335 dev->max_vblank_count = 0;
4658 dev->driver->get_vblank_counter = i8xx_get_vblank_counter; 4336 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4659 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { 4337 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4660 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ 4338 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4661 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 4339 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4662 } else { 4340 } else {
@@ -4669,7 +4347,7 @@ void intel_irq_init(struct drm_device *dev)
4669 * Gen2 doesn't have a hardware frame counter and so depends on 4347 * Gen2 doesn't have a hardware frame counter and so depends on
4670 * vblank interrupts to produce sane vblank seuquence numbers. 4348 * vblank interrupts to produce sane vblank seuquence numbers.
4671 */ 4349 */
4672 if (!IS_GEN2(dev)) 4350 if (!IS_GEN2(dev_priv))
4673 dev->vblank_disable_immediate = true; 4351 dev->vblank_disable_immediate = true;
4674 4352
4675 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 4353 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -4677,7 +4355,7 @@ void intel_irq_init(struct drm_device *dev)
4677 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; 4355 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4678 } 4356 }
4679 4357
4680 if (IS_CHERRYVIEW(dev)) { 4358 if (IS_CHERRYVIEW(dev_priv)) {
4681 dev->driver->irq_handler = cherryview_irq_handler; 4359 dev->driver->irq_handler = cherryview_irq_handler;
4682 dev->driver->irq_preinstall = cherryview_irq_preinstall; 4360 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4683 dev->driver->irq_postinstall = cherryview_irq_postinstall; 4361 dev->driver->irq_postinstall = cherryview_irq_postinstall;
@@ -4685,7 +4363,7 @@ void intel_irq_init(struct drm_device *dev)
4685 dev->driver->enable_vblank = valleyview_enable_vblank; 4363 dev->driver->enable_vblank = valleyview_enable_vblank;
4686 dev->driver->disable_vblank = valleyview_disable_vblank; 4364 dev->driver->disable_vblank = valleyview_disable_vblank;
4687 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4365 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4688 } else if (IS_VALLEYVIEW(dev)) { 4366 } else if (IS_VALLEYVIEW(dev_priv)) {
4689 dev->driver->irq_handler = valleyview_irq_handler; 4367 dev->driver->irq_handler = valleyview_irq_handler;
4690 dev->driver->irq_preinstall = valleyview_irq_preinstall; 4368 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4691 dev->driver->irq_postinstall = valleyview_irq_postinstall; 4369 dev->driver->irq_postinstall = valleyview_irq_postinstall;
@@ -4693,7 +4371,7 @@ void intel_irq_init(struct drm_device *dev)
4693 dev->driver->enable_vblank = valleyview_enable_vblank; 4371 dev->driver->enable_vblank = valleyview_enable_vblank;
4694 dev->driver->disable_vblank = valleyview_disable_vblank; 4372 dev->driver->disable_vblank = valleyview_disable_vblank;
4695 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; 4373 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4696 } else if (IS_GEN8(dev)) { 4374 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4697 dev->driver->irq_handler = gen8_irq_handler; 4375 dev->driver->irq_handler = gen8_irq_handler;
4698 dev->driver->irq_preinstall = gen8_irq_reset; 4376 dev->driver->irq_preinstall = gen8_irq_reset;
4699 dev->driver->irq_postinstall = gen8_irq_postinstall; 4377 dev->driver->irq_postinstall = gen8_irq_postinstall;
@@ -4710,12 +4388,12 @@ void intel_irq_init(struct drm_device *dev)
4710 dev->driver->disable_vblank = ironlake_disable_vblank; 4388 dev->driver->disable_vblank = ironlake_disable_vblank;
4711 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; 4389 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4712 } else { 4390 } else {
4713 if (INTEL_INFO(dev)->gen == 2) { 4391 if (INTEL_INFO(dev_priv)->gen == 2) {
4714 dev->driver->irq_preinstall = i8xx_irq_preinstall; 4392 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4715 dev->driver->irq_postinstall = i8xx_irq_postinstall; 4393 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4716 dev->driver->irq_handler = i8xx_irq_handler; 4394 dev->driver->irq_handler = i8xx_irq_handler;
4717 dev->driver->irq_uninstall = i8xx_irq_uninstall; 4395 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4718 } else if (INTEL_INFO(dev)->gen == 3) { 4396 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4719 dev->driver->irq_preinstall = i915_irq_preinstall; 4397 dev->driver->irq_preinstall = i915_irq_preinstall;
4720 dev->driver->irq_postinstall = i915_irq_postinstall; 4398 dev->driver->irq_postinstall = i915_irq_postinstall;
4721 dev->driver->irq_uninstall = i915_irq_uninstall; 4399 dev->driver->irq_uninstall = i915_irq_uninstall;
@@ -4733,12 +4411,23 @@ void intel_irq_init(struct drm_device *dev)
4733 } 4411 }
4734} 4412}
4735 4413
4736void intel_hpd_init(struct drm_device *dev) 4414/**
4415 * intel_hpd_init - initializes and enables hpd support
4416 * @dev_priv: i915 device instance
4417 *
4418 * This function enables the hotplug support. It requires that interrupts have
4419 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4420 * poll request can run concurrently to other code, so locking rules must be
4421 * obeyed.
4422 *
4423 * This is a separate step from interrupt enabling to simplify the locking rules
4424 * in the driver load and resume code.
4425 */
4426void intel_hpd_init(struct drm_i915_private *dev_priv)
4737{ 4427{
4738 struct drm_i915_private *dev_priv = dev->dev_private; 4428 struct drm_device *dev = dev_priv->dev;
4739 struct drm_mode_config *mode_config = &dev->mode_config; 4429 struct drm_mode_config *mode_config = &dev->mode_config;
4740 struct drm_connector *connector; 4430 struct drm_connector *connector;
4741 unsigned long irqflags;
4742 int i; 4431 int i;
4743 4432
4744 for (i = 1; i < HPD_NUM_PINS; i++) { 4433 for (i = 1; i < HPD_NUM_PINS; i++) {
@@ -4756,27 +4445,72 @@ void intel_hpd_init(struct drm_device *dev)
4756 4445
4757 /* Interrupt setup is already guaranteed to be single-threaded, this is 4446 /* Interrupt setup is already guaranteed to be single-threaded, this is
4758 * just to make the assert_spin_locked checks happy. */ 4447 * just to make the assert_spin_locked checks happy. */
4759 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 4448 spin_lock_irq(&dev_priv->irq_lock);
4760 if (dev_priv->display.hpd_irq_setup) 4449 if (dev_priv->display.hpd_irq_setup)
4761 dev_priv->display.hpd_irq_setup(dev); 4450 dev_priv->display.hpd_irq_setup(dev);
4762 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 4451 spin_unlock_irq(&dev_priv->irq_lock);
4763} 4452}
4764 4453
4765/* Disable interrupts so we can allow runtime PM. */ 4454/**
4766void intel_runtime_pm_disable_interrupts(struct drm_device *dev) 4455 * intel_irq_install - enables the hardware interrupt
4456 * @dev_priv: i915 device instance
4457 *
4458 * This function enables the hardware interrupt handling, but leaves the hotplug
4459 * handling still disabled. It is called after intel_irq_init().
4460 *
4461 * In the driver load and resume code we need working interrupts in a few places
4462 * but don't want to deal with the hassle of concurrent probe and hotplug
4463 * workers. Hence the split into this two-stage approach.
4464 */
4465int intel_irq_install(struct drm_i915_private *dev_priv)
4767{ 4466{
4768 struct drm_i915_private *dev_priv = dev->dev_private; 4467 /*
4468 * We enable some interrupt sources in our postinstall hooks, so mark
4469 * interrupts as enabled _before_ actually enabling them to avoid
4470 * special cases in our ordering checks.
4471 */
4472 dev_priv->pm.irqs_enabled = true;
4769 4473
4770 dev->driver->irq_uninstall(dev); 4474 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4771 dev_priv->pm._irqs_disabled = true;
4772} 4475}
4773 4476
4774/* Restore interrupts so we can recover from runtime PM. */ 4477/**
4775void intel_runtime_pm_restore_interrupts(struct drm_device *dev) 4478 * intel_irq_uninstall - finilizes all irq handling
4479 * @dev_priv: i915 device instance
4480 *
4481 * This stops interrupt and hotplug handling and unregisters and frees all
4482 * resources acquired in the init functions.
4483 */
4484void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4776{ 4485{
4777 struct drm_i915_private *dev_priv = dev->dev_private; 4486 drm_irq_uninstall(dev_priv->dev);
4487 intel_hpd_cancel_work(dev_priv);
4488 dev_priv->pm.irqs_enabled = false;
4489}
4778 4490
4779 dev_priv->pm._irqs_disabled = false; 4491/**
4780 dev->driver->irq_preinstall(dev); 4492 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4781 dev->driver->irq_postinstall(dev); 4493 * @dev_priv: i915 device instance
4494 *
4495 * This function is used to disable interrupts at runtime, both in the runtime
4496 * pm and the system suspend/resume code.
4497 */
4498void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4499{
4500 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4501 dev_priv->pm.irqs_enabled = false;
4502}
4503
4504/**
4505 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4506 * @dev_priv: i915 device instance
4507 *
4508 * This function is used to enable interrupts at runtime, both in the runtime
4509 * pm and the system suspend/resume code.
4510 */
4511void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4512{
4513 dev_priv->pm.irqs_enabled = true;
4514 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4515 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4782} 4516}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index c01e5f31430e..eefdc238f70b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -26,14 +26,25 @@
26#define _I915_REG_H_ 26#define _I915_REG_H_
27 27
28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a))) 28#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
29#define _PLANE(plane, a, b) _PIPE(plane, a, b)
29#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a))) 30#define _TRANSCODER(tran, a, b) ((a) + (tran)*((b)-(a)))
30
31#define _PORT(port, a, b) ((a) + (port)*((b)-(a))) 31#define _PORT(port, a, b) ((a) + (port)*((b)-(a)))
32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \ 32#define _PIPE3(pipe, a, b, c) ((pipe) == PIPE_A ? (a) : \
33 (pipe) == PIPE_B ? (b) : (c)) 33 (pipe) == PIPE_B ? (b) : (c))
34 34
35#define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 35#define _MASKED_FIELD(mask, value) ({ \
36#define _MASKED_BIT_DISABLE(a) ((a) << 16) 36 if (__builtin_constant_p(mask)) \
37 BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
38 if (__builtin_constant_p(value)) \
39 BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
40 if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
41 BUILD_BUG_ON_MSG((value) & ~(mask), \
42 "Incorrect value for mask"); \
43 (mask) << 16 | (value); })
44#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
45#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
46
47
37 48
38/* PCI config space */ 49/* PCI config space */
39 50
@@ -74,15 +85,17 @@
74#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) 85#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
75#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) 86#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
76#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) 87#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
88#define GCDGMBUS 0xcc
77#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ 89#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
78 90
79 91
80/* Graphics reset regs */ 92/* Graphics reset regs */
81#define I965_GDRST 0xc0 /* PCI config register */ 93#define I915_GDRST 0xc0 /* PCI config register */
82#define GRDOM_FULL (0<<2) 94#define GRDOM_FULL (0<<2)
83#define GRDOM_RENDER (1<<2) 95#define GRDOM_RENDER (1<<2)
84#define GRDOM_MEDIA (3<<2) 96#define GRDOM_MEDIA (3<<2)
85#define GRDOM_MASK (3<<2) 97#define GRDOM_MASK (3<<2)
98#define GRDOM_RESET_STATUS (1<<1)
86#define GRDOM_RESET_ENABLE (1<<0) 99#define GRDOM_RESET_ENABLE (1<<0)
87 100
88#define ILK_GDSR 0x2ca4 /* MCHBAR offset */ 101#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
@@ -248,6 +261,16 @@
248#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19) 261#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
249#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19) 262#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
250#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19) 263#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
264/* SKL ones */
265#define MI_DISPLAY_FLIP_SKL_PLANE_1_A (0 << 8)
266#define MI_DISPLAY_FLIP_SKL_PLANE_1_B (1 << 8)
267#define MI_DISPLAY_FLIP_SKL_PLANE_1_C (2 << 8)
268#define MI_DISPLAY_FLIP_SKL_PLANE_2_A (4 << 8)
269#define MI_DISPLAY_FLIP_SKL_PLANE_2_B (5 << 8)
270#define MI_DISPLAY_FLIP_SKL_PLANE_2_C (6 << 8)
271#define MI_DISPLAY_FLIP_SKL_PLANE_3_A (7 << 8)
272#define MI_DISPLAY_FLIP_SKL_PLANE_3_B (8 << 8)
273#define MI_DISPLAY_FLIP_SKL_PLANE_3_C (9 << 8)
251#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */ 274#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6, gen7 */
252#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 275#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
253#define MI_SEMAPHORE_UPDATE (1<<21) 276#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -314,6 +337,8 @@
314#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ 337#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
315#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) 338#define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1)
316 339
340#define MI_PREDICATE_SRC0 (0x2400)
341#define MI_PREDICATE_SRC1 (0x2408)
317 342
318#define MI_PREDICATE_RESULT_2 (0x2214) 343#define MI_PREDICATE_RESULT_2 (0x2214)
319#define LOWER_SLICE_ENABLED (1<<0) 344#define LOWER_SLICE_ENABLED (1<<0)
@@ -564,6 +589,7 @@ enum punit_power_well {
564#define PUNIT_REG_GPU_LFM 0xd3 589#define PUNIT_REG_GPU_LFM 0xd3
565#define PUNIT_REG_GPU_FREQ_REQ 0xd4 590#define PUNIT_REG_GPU_FREQ_REQ 0xd4
566#define PUNIT_REG_GPU_FREQ_STS 0xd8 591#define PUNIT_REG_GPU_FREQ_STS 0xd8
592#define GPLLENABLE (1<<4)
567#define GENFREQSTATUS (1<<0) 593#define GENFREQSTATUS (1<<0)
568#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc 594#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc
569#define PUNIT_REG_CZ_TIMESTAMP 0xce 595#define PUNIT_REG_CZ_TIMESTAMP 0xce
@@ -672,7 +698,7 @@ enum punit_power_well {
672 * need to be accessed during AUX communication, 698 * need to be accessed during AUX communication,
673 * 699 *
674 * Generally the common lane corresponds to the pipe and 700 * Generally the common lane corresponds to the pipe and
675 * the spline (PCS/TX) correponds to the port. 701 * the spline (PCS/TX) corresponds to the port.
676 * 702 *
677 * For dual channel PHY (VLV/CHV): 703 * For dual channel PHY (VLV/CHV):
678 * 704 *
@@ -796,6 +822,8 @@ enum punit_power_well {
796#define _VLV_PCS_DW0_CH1 0x8400 822#define _VLV_PCS_DW0_CH1 0x8400
797#define DPIO_PCS_TX_LANE2_RESET (1<<16) 823#define DPIO_PCS_TX_LANE2_RESET (1<<16)
798#define DPIO_PCS_TX_LANE1_RESET (1<<7) 824#define DPIO_PCS_TX_LANE1_RESET (1<<7)
825#define DPIO_LEFT_TXFIFO_RST_MASTER2 (1<<4)
826#define DPIO_RIGHT_TXFIFO_RST_MASTER2 (1<<3)
799#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1) 827#define VLV_PCS_DW0(ch) _PORT(ch, _VLV_PCS_DW0_CH0, _VLV_PCS_DW0_CH1)
800 828
801#define _VLV_PCS01_DW0_CH0 0x200 829#define _VLV_PCS01_DW0_CH0 0x200
@@ -836,12 +864,31 @@ enum punit_power_well {
836 864
837#define _VLV_PCS_DW9_CH0 0x8224 865#define _VLV_PCS_DW9_CH0 0x8224
838#define _VLV_PCS_DW9_CH1 0x8424 866#define _VLV_PCS_DW9_CH1 0x8424
867#define DPIO_PCS_TX2MARGIN_MASK (0x7<<13)
868#define DPIO_PCS_TX2MARGIN_000 (0<<13)
869#define DPIO_PCS_TX2MARGIN_101 (1<<13)
870#define DPIO_PCS_TX1MARGIN_MASK (0x7<<10)
871#define DPIO_PCS_TX1MARGIN_000 (0<<10)
872#define DPIO_PCS_TX1MARGIN_101 (1<<10)
839#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1) 873#define VLV_PCS_DW9(ch) _PORT(ch, _VLV_PCS_DW9_CH0, _VLV_PCS_DW9_CH1)
840 874
875#define _VLV_PCS01_DW9_CH0 0x224
876#define _VLV_PCS23_DW9_CH0 0x424
877#define _VLV_PCS01_DW9_CH1 0x2624
878#define _VLV_PCS23_DW9_CH1 0x2824
879#define VLV_PCS01_DW9(ch) _PORT(ch, _VLV_PCS01_DW9_CH0, _VLV_PCS01_DW9_CH1)
880#define VLV_PCS23_DW9(ch) _PORT(ch, _VLV_PCS23_DW9_CH0, _VLV_PCS23_DW9_CH1)
881
841#define _CHV_PCS_DW10_CH0 0x8228 882#define _CHV_PCS_DW10_CH0 0x8228
842#define _CHV_PCS_DW10_CH1 0x8428 883#define _CHV_PCS_DW10_CH1 0x8428
843#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30) 884#define DPIO_PCS_SWING_CALC_TX0_TX2 (1<<30)
844#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31) 885#define DPIO_PCS_SWING_CALC_TX1_TX3 (1<<31)
886#define DPIO_PCS_TX2DEEMP_MASK (0xf<<24)
887#define DPIO_PCS_TX2DEEMP_9P5 (0<<24)
888#define DPIO_PCS_TX2DEEMP_6P0 (2<<24)
889#define DPIO_PCS_TX1DEEMP_MASK (0xf<<16)
890#define DPIO_PCS_TX1DEEMP_9P5 (0<<16)
891#define DPIO_PCS_TX1DEEMP_6P0 (2<<16)
845#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1) 892#define CHV_PCS_DW10(ch) _PORT(ch, _CHV_PCS_DW10_CH0, _CHV_PCS_DW10_CH1)
846 893
847#define _VLV_PCS01_DW10_CH0 0x0228 894#define _VLV_PCS01_DW10_CH0 0x0228
@@ -853,8 +900,18 @@ enum punit_power_well {
853 900
854#define _VLV_PCS_DW11_CH0 0x822c 901#define _VLV_PCS_DW11_CH0 0x822c
855#define _VLV_PCS_DW11_CH1 0x842c 902#define _VLV_PCS_DW11_CH1 0x842c
903#define DPIO_LANEDESKEW_STRAP_OVRD (1<<3)
904#define DPIO_LEFT_TXFIFO_RST_MASTER (1<<1)
905#define DPIO_RIGHT_TXFIFO_RST_MASTER (1<<0)
856#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1) 906#define VLV_PCS_DW11(ch) _PORT(ch, _VLV_PCS_DW11_CH0, _VLV_PCS_DW11_CH1)
857 907
908#define _VLV_PCS01_DW11_CH0 0x022c
909#define _VLV_PCS23_DW11_CH0 0x042c
910#define _VLV_PCS01_DW11_CH1 0x262c
911#define _VLV_PCS23_DW11_CH1 0x282c
912#define VLV_PCS01_DW11(ch) _PORT(ch, _VLV_PCS01_DW11_CH0, _VLV_PCS01_DW11_CH1)
913#define VLV_PCS23_DW11(ch) _PORT(ch, _VLV_PCS23_DW11_CH0, _VLV_PCS23_DW11_CH1)
914
858#define _VLV_PCS_DW12_CH0 0x8230 915#define _VLV_PCS_DW12_CH0 0x8230
859#define _VLV_PCS_DW12_CH1 0x8430 916#define _VLV_PCS_DW12_CH1 0x8430
860#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1) 917#define VLV_PCS_DW12(ch) _PORT(ch, _VLV_PCS_DW12_CH0, _VLV_PCS_DW12_CH1)
@@ -1237,7 +1294,7 @@ enum punit_power_well {
1237#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0) 1294#define GEN6_WIZ_HASHING_8x8 GEN6_WIZ_HASHING(0, 0)
1238#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1) 1295#define GEN6_WIZ_HASHING_8x4 GEN6_WIZ_HASHING(0, 1)
1239#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0) 1296#define GEN6_WIZ_HASHING_16x4 GEN6_WIZ_HASHING(1, 0)
1240#define GEN6_WIZ_HASHING_MASK (GEN6_WIZ_HASHING(1, 1) << 16) 1297#define GEN6_WIZ_HASHING_MASK GEN6_WIZ_HASHING(1, 1)
1241#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5) 1298#define GEN6_TD_FOUR_ROW_DISPATCH_DISABLE (1 << 5)
1242 1299
1243#define GFX_MODE 0x02520 1300#define GFX_MODE 0x02520
@@ -1999,6 +2056,8 @@ enum punit_power_well {
1999#define DCC_ADDRESSING_MODE_MASK (3 << 0) 2056#define DCC_ADDRESSING_MODE_MASK (3 << 0)
2000#define DCC_CHANNEL_XOR_DISABLE (1 << 10) 2057#define DCC_CHANNEL_XOR_DISABLE (1 << 10)
2001#define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 2058#define DCC_CHANNEL_XOR_BIT_17 (1 << 9)
2059#define DCC2 0x10204
2060#define DCC2_MODIFIED_ENHANCED_DISABLE (1 << 20)
2002 2061
2003/* Pineview MCH register contains DDR3 setting */ 2062/* Pineview MCH register contains DDR3 setting */
2004#define CSHRDDR3CTL 0x101a8 2063#define CSHRDDR3CTL 0x101a8
@@ -2282,7 +2341,6 @@ enum punit_power_well {
2282 2341
2283#define GEN6_GT_THREAD_STATUS_REG 0x13805c 2342#define GEN6_GT_THREAD_STATUS_REG 0x13805c
2284#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7 2343#define GEN6_GT_THREAD_STATUS_CORE_MASK 0x7
2285#define GEN6_GT_THREAD_STATUS_CORE_MASK_HSW (0x7 | (0x07 << 16))
2286 2344
2287#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948) 2345#define GEN6_GT_PERF_STATUS (MCHBAR_MIRROR_BASE_SNB + 0x5948)
2288#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) 2346#define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994)
@@ -2506,9 +2564,7 @@ enum punit_power_well {
2506 2564
2507#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10) 2565#define EDP_PSR_AUX_CTL(dev) (EDP_PSR_BASE(dev) + 0x10)
2508#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14) 2566#define EDP_PSR_AUX_DATA1(dev) (EDP_PSR_BASE(dev) + 0x14)
2509#define EDP_PSR_DPCD_COMMAND 0x80060000
2510#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18) 2567#define EDP_PSR_AUX_DATA2(dev) (EDP_PSR_BASE(dev) + 0x18)
2511#define EDP_PSR_DPCD_NORMAL_OPERATION (1<<24)
2512#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c) 2568#define EDP_PSR_AUX_DATA3(dev) (EDP_PSR_BASE(dev) + 0x1c)
2513#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20) 2569#define EDP_PSR_AUX_DATA4(dev) (EDP_PSR_BASE(dev) + 0x20)
2514#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24) 2570#define EDP_PSR_AUX_DATA5(dev) (EDP_PSR_BASE(dev) + 0x24)
@@ -3645,6 +3701,7 @@ enum punit_power_well {
3645#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11) 3701#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
3646#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff) 3702#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
3647#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0 3703#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
3704#define DP_AUX_CH_CTL_SYNC_PULSE_SKL(c) ((c) - 1)
3648 3705
3649/* 3706/*
3650 * Computing GMCH M and N values for the Display Port link 3707 * Computing GMCH M and N values for the Display Port link
@@ -4024,17 +4081,18 @@ enum punit_power_well {
4024#define DSPFW_PLANEA_WM1_HI_MASK (1<<0) 4081#define DSPFW_PLANEA_WM1_HI_MASK (1<<0)
4025 4082
4026/* drain latency register values*/ 4083/* drain latency register values*/
4084#define DRAIN_LATENCY_PRECISION_16 16
4027#define DRAIN_LATENCY_PRECISION_32 32 4085#define DRAIN_LATENCY_PRECISION_32 32
4028#define DRAIN_LATENCY_PRECISION_64 64 4086#define DRAIN_LATENCY_PRECISION_64 64
4029#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) 4087#define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe))
4030#define DDL_CURSOR_PRECISION_64 (1<<31) 4088#define DDL_CURSOR_PRECISION_HIGH (1<<31)
4031#define DDL_CURSOR_PRECISION_32 (0<<31) 4089#define DDL_CURSOR_PRECISION_LOW (0<<31)
4032#define DDL_CURSOR_SHIFT 24 4090#define DDL_CURSOR_SHIFT 24
4033#define DDL_SPRITE_PRECISION_64(sprite) (1<<(15+8*(sprite))) 4091#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite)))
4034#define DDL_SPRITE_PRECISION_32(sprite) (0<<(15+8*(sprite))) 4092#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite)))
4035#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) 4093#define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite))
4036#define DDL_PLANE_PRECISION_64 (1<<7) 4094#define DDL_PLANE_PRECISION_HIGH (1<<7)
4037#define DDL_PLANE_PRECISION_32 (0<<7) 4095#define DDL_PLANE_PRECISION_LOW (0<<7)
4038#define DDL_PLANE_SHIFT 0 4096#define DDL_PLANE_SHIFT 0
4039#define DRAIN_LATENCY_MASK 0x7f 4097#define DRAIN_LATENCY_MASK 0x7f
4040 4098
@@ -4071,6 +4129,41 @@ enum punit_power_well {
4071#define I965_CURSOR_MAX_WM 32 4129#define I965_CURSOR_MAX_WM 32
4072#define I965_CURSOR_DFT_WM 8 4130#define I965_CURSOR_DFT_WM 8
4073 4131
4132/* Watermark register definitions for SKL */
4133#define CUR_WM_A_0 0x70140
4134#define CUR_WM_B_0 0x71140
4135#define PLANE_WM_1_A_0 0x70240
4136#define PLANE_WM_1_B_0 0x71240
4137#define PLANE_WM_2_A_0 0x70340
4138#define PLANE_WM_2_B_0 0x71340
4139#define PLANE_WM_TRANS_1_A_0 0x70268
4140#define PLANE_WM_TRANS_1_B_0 0x71268
4141#define PLANE_WM_TRANS_2_A_0 0x70368
4142#define PLANE_WM_TRANS_2_B_0 0x71368
4143#define CUR_WM_TRANS_A_0 0x70168
4144#define CUR_WM_TRANS_B_0 0x71168
4145#define PLANE_WM_EN (1 << 31)
4146#define PLANE_WM_LINES_SHIFT 14
4147#define PLANE_WM_LINES_MASK 0x1f
4148#define PLANE_WM_BLOCKS_MASK 0x3ff
4149
4150#define CUR_WM_0(pipe) _PIPE(pipe, CUR_WM_A_0, CUR_WM_B_0)
4151#define CUR_WM(pipe, level) (CUR_WM_0(pipe) + ((4) * (level)))
4152#define CUR_WM_TRANS(pipe) _PIPE(pipe, CUR_WM_TRANS_A_0, CUR_WM_TRANS_B_0)
4153
4154#define _PLANE_WM_1(pipe) _PIPE(pipe, PLANE_WM_1_A_0, PLANE_WM_1_B_0)
4155#define _PLANE_WM_2(pipe) _PIPE(pipe, PLANE_WM_2_A_0, PLANE_WM_2_B_0)
4156#define _PLANE_WM_BASE(pipe, plane) \
4157 _PLANE(plane, _PLANE_WM_1(pipe), _PLANE_WM_2(pipe))
4158#define PLANE_WM(pipe, plane, level) \
4159 (_PLANE_WM_BASE(pipe, plane) + ((4) * (level)))
4160#define _PLANE_WM_TRANS_1(pipe) \
4161 _PIPE(pipe, PLANE_WM_TRANS_1_A_0, PLANE_WM_TRANS_1_B_0)
4162#define _PLANE_WM_TRANS_2(pipe) \
4163 _PIPE(pipe, PLANE_WM_TRANS_2_A_0, PLANE_WM_TRANS_2_B_0)
4164#define PLANE_WM_TRANS(pipe, plane) \
4165 _PLANE(plane, _PLANE_WM_TRANS_1(pipe), _PLANE_WM_TRANS_2(pipe))
4166
4074/* define the Watermark register on Ironlake */ 4167/* define the Watermark register on Ironlake */
4075#define WM0_PIPEA_ILK 0x45100 4168#define WM0_PIPEA_ILK 0x45100
4076#define WM0_PIPE_PLANE_MASK (0xffff<<16) 4169#define WM0_PIPE_PLANE_MASK (0xffff<<16)
@@ -4177,6 +4270,7 @@ enum punit_power_well {
4177#define MCURSOR_PIPE_A 0x00 4270#define MCURSOR_PIPE_A 0x00
4178#define MCURSOR_PIPE_B (1 << 28) 4271#define MCURSOR_PIPE_B (1 << 28)
4179#define MCURSOR_GAMMA_ENABLE (1 << 26) 4272#define MCURSOR_GAMMA_ENABLE (1 << 26)
4273#define CURSOR_ROTATE_180 (1<<15)
4180#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 4274#define CURSOR_TRICKLE_FEED_DISABLE (1 << 14)
4181#define _CURABASE 0x70084 4275#define _CURABASE 0x70084
4182#define _CURAPOS 0x70088 4276#define _CURAPOS 0x70088
@@ -4240,9 +4334,11 @@ enum punit_power_well {
4240#define DISPPLANE_NO_LINE_DOUBLE 0 4334#define DISPPLANE_NO_LINE_DOUBLE 0
4241#define DISPPLANE_STEREO_POLARITY_FIRST 0 4335#define DISPPLANE_STEREO_POLARITY_FIRST 0
4242#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 4336#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
4243#define DISPPLANE_ROTATE_180 (1<<15) 4337#define DISPPLANE_ALPHA_PREMULTIPLY (1<<16) /* CHV pipe B */
4338#define DISPPLANE_ROTATE_180 (1<<15)
4244#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */ 4339#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
4245#define DISPPLANE_TILED (1<<10) 4340#define DISPPLANE_TILED (1<<10)
4341#define DISPPLANE_MIRROR (1<<8) /* CHV pipe B */
4246#define _DSPAADDR 0x70184 4342#define _DSPAADDR 0x70184
4247#define _DSPASTRIDE 0x70188 4343#define _DSPASTRIDE 0x70188
4248#define _DSPAPOS 0x7018C /* reserved */ 4344#define _DSPAPOS 0x7018C /* reserved */
@@ -4263,6 +4359,24 @@ enum punit_power_well {
4263#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET) 4359#define DSPOFFSET(plane) _PIPE2(plane, _DSPAOFFSET)
4264#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE) 4360#define DSPSURFLIVE(plane) _PIPE2(plane, _DSPASURFLIVE)
4265 4361
4362/* CHV pipe B blender and primary plane */
4363#define _CHV_BLEND_A 0x60a00
4364#define CHV_BLEND_LEGACY (0<<30)
4365#define CHV_BLEND_ANDROID (1<<30)
4366#define CHV_BLEND_MPO (2<<30)
4367#define CHV_BLEND_MASK (3<<30)
4368#define _CHV_CANVAS_A 0x60a04
4369#define _PRIMPOS_A 0x60a08
4370#define _PRIMSIZE_A 0x60a0c
4371#define _PRIMCNSTALPHA_A 0x60a10
4372#define PRIM_CONST_ALPHA_ENABLE (1<<31)
4373
4374#define CHV_BLEND(pipe) _TRANSCODER2(pipe, _CHV_BLEND_A)
4375#define CHV_CANVAS(pipe) _TRANSCODER2(pipe, _CHV_CANVAS_A)
4376#define PRIMPOS(plane) _TRANSCODER2(plane, _PRIMPOS_A)
4377#define PRIMSIZE(plane) _TRANSCODER2(plane, _PRIMSIZE_A)
4378#define PRIMCNSTALPHA(plane) _TRANSCODER2(plane, _PRIMCNSTALPHA_A)
4379
4266/* Display/Sprite base address macros */ 4380/* Display/Sprite base address macros */
4267#define DISP_BASEADDR_MASK (0xfffff000) 4381#define DISP_BASEADDR_MASK (0xfffff000)
4268#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK) 4382#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
@@ -4464,6 +4578,7 @@ enum punit_power_well {
4464#define SP_FORMAT_RGBA1010102 (9<<26) 4578#define SP_FORMAT_RGBA1010102 (9<<26)
4465#define SP_FORMAT_RGBX8888 (0xe<<26) 4579#define SP_FORMAT_RGBX8888 (0xe<<26)
4466#define SP_FORMAT_RGBA8888 (0xf<<26) 4580#define SP_FORMAT_RGBA8888 (0xf<<26)
4581#define SP_ALPHA_PREMULTIPLY (1<<23) /* CHV pipe B */
4467#define SP_SOURCE_KEY (1<<22) 4582#define SP_SOURCE_KEY (1<<22)
4468#define SP_YUV_BYTE_ORDER_MASK (3<<16) 4583#define SP_YUV_BYTE_ORDER_MASK (3<<16)
4469#define SP_YUV_ORDER_YUYV (0<<16) 4584#define SP_YUV_ORDER_YUYV (0<<16)
@@ -4472,6 +4587,7 @@ enum punit_power_well {
4472#define SP_YUV_ORDER_VYUY (3<<16) 4587#define SP_YUV_ORDER_VYUY (3<<16)
4473#define SP_ROTATE_180 (1<<15) 4588#define SP_ROTATE_180 (1<<15)
4474#define SP_TILED (1<<10) 4589#define SP_TILED (1<<10)
4590#define SP_MIRROR (1<<8) /* CHV pipe B */
4475#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) 4591#define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184)
4476#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) 4592#define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188)
4477#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) 4593#define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c)
@@ -4482,6 +4598,7 @@ enum punit_power_well {
4482#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) 4598#define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0)
4483#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) 4599#define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
4484#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) 4600#define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
4601#define SP_CONST_ALPHA_ENABLE (1<<31)
4485#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4) 4602#define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
4486 4603
4487#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) 4604#define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
@@ -4510,6 +4627,195 @@ enum punit_power_well {
4510#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA) 4627#define SPCONSTALPHA(pipe, plane) _PIPE(pipe * 2 + plane, _SPACONSTALPHA, _SPBCONSTALPHA)
4511#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC) 4628#define SPGAMC(pipe, plane) _PIPE(pipe * 2 + plane, _SPAGAMC, _SPBGAMC)
4512 4629
4630/*
4631 * CHV pipe B sprite CSC
4632 *
4633 * |cr| |c0 c1 c2| |cr + cr_ioff| |cr_ooff|
4634 * |yg| = |c3 c4 c5| x |yg + yg_ioff| + |yg_ooff|
4635 * |cb| |c6 c7 c8| |cb + cr_ioff| |cb_ooff|
4636 */
4637#define SPCSCYGOFF(sprite) (VLV_DISPLAY_BASE + 0x6d900 + (sprite) * 0x1000)
4638#define SPCSCCBOFF(sprite) (VLV_DISPLAY_BASE + 0x6d904 + (sprite) * 0x1000)
4639#define SPCSCCROFF(sprite) (VLV_DISPLAY_BASE + 0x6d908 + (sprite) * 0x1000)
4640#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */
4641#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */
4642
4643#define SPCSCC01(sprite) (VLV_DISPLAY_BASE + 0x6d90c + (sprite) * 0x1000)
4644#define SPCSCC23(sprite) (VLV_DISPLAY_BASE + 0x6d910 + (sprite) * 0x1000)
4645#define SPCSCC45(sprite) (VLV_DISPLAY_BASE + 0x6d914 + (sprite) * 0x1000)
4646#define SPCSCC67(sprite) (VLV_DISPLAY_BASE + 0x6d918 + (sprite) * 0x1000)
4647#define SPCSCC8(sprite) (VLV_DISPLAY_BASE + 0x6d91c + (sprite) * 0x1000)
4648#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */
4649#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */
4650
4651#define SPCSCYGICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d920 + (sprite) * 0x1000)
4652#define SPCSCCBICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d924 + (sprite) * 0x1000)
4653#define SPCSCCRICLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d928 + (sprite) * 0x1000)
4654#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */
4655#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */
4656
4657#define SPCSCYGOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d92c + (sprite) * 0x1000)
4658#define SPCSCCBOCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d930 + (sprite) * 0x1000)
4659#define SPCSCCROCLAMP(sprite) (VLV_DISPLAY_BASE + 0x6d934 + (sprite) * 0x1000)
4660#define SPCSC_OMAX(x) ((x) << 16) /* u10 */
4661#define SPCSC_OMIN(x) ((x) << 0) /* u10 */
4662
4663/* Skylake plane registers */
4664
4665#define _PLANE_CTL_1_A 0x70180
4666#define _PLANE_CTL_2_A 0x70280
4667#define _PLANE_CTL_3_A 0x70380
4668#define PLANE_CTL_ENABLE (1 << 31)
4669#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30)
4670#define PLANE_CTL_FORMAT_MASK (0xf << 24)
4671#define PLANE_CTL_FORMAT_YUV422 ( 0 << 24)
4672#define PLANE_CTL_FORMAT_NV12 ( 1 << 24)
4673#define PLANE_CTL_FORMAT_XRGB_2101010 ( 2 << 24)
4674#define PLANE_CTL_FORMAT_XRGB_8888 ( 4 << 24)
4675#define PLANE_CTL_FORMAT_XRGB_16161616F ( 6 << 24)
4676#define PLANE_CTL_FORMAT_AYUV ( 8 << 24)
4677#define PLANE_CTL_FORMAT_INDEXED ( 12 << 24)
4678#define PLANE_CTL_FORMAT_RGB_565 ( 14 << 24)
4679#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23)
4680#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21)
4681#define PLANE_CTL_KEY_ENABLE_SOURCE ( 1 << 21)
4682#define PLANE_CTL_KEY_ENABLE_DESTINATION ( 2 << 21)
4683#define PLANE_CTL_ORDER_BGRX (0 << 20)
4684#define PLANE_CTL_ORDER_RGBX (1 << 20)
4685#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16)
4686#define PLANE_CTL_YUV422_YUYV ( 0 << 16)
4687#define PLANE_CTL_YUV422_UYVY ( 1 << 16)
4688#define PLANE_CTL_YUV422_YVYU ( 2 << 16)
4689#define PLANE_CTL_YUV422_VYUY ( 3 << 16)
4690#define PLANE_CTL_DECOMPRESSION_ENABLE (1 << 15)
4691#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14)
4692#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13)
4693#define PLANE_CTL_TILED_MASK (0x7 << 10)
4694#define PLANE_CTL_TILED_LINEAR ( 0 << 10)
4695#define PLANE_CTL_TILED_X ( 1 << 10)
4696#define PLANE_CTL_TILED_Y ( 4 << 10)
4697#define PLANE_CTL_TILED_YF ( 5 << 10)
4698#define PLANE_CTL_ALPHA_MASK (0x3 << 4)
4699#define PLANE_CTL_ALPHA_DISABLE ( 0 << 4)
4700#define PLANE_CTL_ALPHA_SW_PREMULTIPLY ( 2 << 4)
4701#define PLANE_CTL_ALPHA_HW_PREMULTIPLY ( 3 << 4)
4702#define PLANE_CTL_ROTATE_MASK 0x3
4703#define PLANE_CTL_ROTATE_0 0x0
4704#define PLANE_CTL_ROTATE_180 0x2
4705#define _PLANE_STRIDE_1_A 0x70188
4706#define _PLANE_STRIDE_2_A 0x70288
4707#define _PLANE_STRIDE_3_A 0x70388
4708#define _PLANE_POS_1_A 0x7018c
4709#define _PLANE_POS_2_A 0x7028c
4710#define _PLANE_POS_3_A 0x7038c
4711#define _PLANE_SIZE_1_A 0x70190
4712#define _PLANE_SIZE_2_A 0x70290
4713#define _PLANE_SIZE_3_A 0x70390
4714#define _PLANE_SURF_1_A 0x7019c
4715#define _PLANE_SURF_2_A 0x7029c
4716#define _PLANE_SURF_3_A 0x7039c
4717#define _PLANE_OFFSET_1_A 0x701a4
4718#define _PLANE_OFFSET_2_A 0x702a4
4719#define _PLANE_OFFSET_3_A 0x703a4
4720#define _PLANE_KEYVAL_1_A 0x70194
4721#define _PLANE_KEYVAL_2_A 0x70294
4722#define _PLANE_KEYMSK_1_A 0x70198
4723#define _PLANE_KEYMSK_2_A 0x70298
4724#define _PLANE_KEYMAX_1_A 0x701a0
4725#define _PLANE_KEYMAX_2_A 0x702a0
4726#define _PLANE_BUF_CFG_1_A 0x7027c
4727#define _PLANE_BUF_CFG_2_A 0x7037c
4728
4729#define _PLANE_CTL_1_B 0x71180
4730#define _PLANE_CTL_2_B 0x71280
4731#define _PLANE_CTL_3_B 0x71380
4732#define _PLANE_CTL_1(pipe) _PIPE(pipe, _PLANE_CTL_1_A, _PLANE_CTL_1_B)
4733#define _PLANE_CTL_2(pipe) _PIPE(pipe, _PLANE_CTL_2_A, _PLANE_CTL_2_B)
4734#define _PLANE_CTL_3(pipe) _PIPE(pipe, _PLANE_CTL_3_A, _PLANE_CTL_3_B)
4735#define PLANE_CTL(pipe, plane) \
4736 _PLANE(plane, _PLANE_CTL_1(pipe), _PLANE_CTL_2(pipe))
4737
4738#define _PLANE_STRIDE_1_B 0x71188
4739#define _PLANE_STRIDE_2_B 0x71288
4740#define _PLANE_STRIDE_3_B 0x71388
4741#define _PLANE_STRIDE_1(pipe) \
4742 _PIPE(pipe, _PLANE_STRIDE_1_A, _PLANE_STRIDE_1_B)
4743#define _PLANE_STRIDE_2(pipe) \
4744 _PIPE(pipe, _PLANE_STRIDE_2_A, _PLANE_STRIDE_2_B)
4745#define _PLANE_STRIDE_3(pipe) \
4746 _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B)
4747#define PLANE_STRIDE(pipe, plane) \
4748 _PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe))
4749
4750#define _PLANE_POS_1_B 0x7118c
4751#define _PLANE_POS_2_B 0x7128c
4752#define _PLANE_POS_3_B 0x7138c
4753#define _PLANE_POS_1(pipe) _PIPE(pipe, _PLANE_POS_1_A, _PLANE_POS_1_B)
4754#define _PLANE_POS_2(pipe) _PIPE(pipe, _PLANE_POS_2_A, _PLANE_POS_2_B)
4755#define _PLANE_POS_3(pipe) _PIPE(pipe, _PLANE_POS_3_A, _PLANE_POS_3_B)
4756#define PLANE_POS(pipe, plane) \
4757 _PLANE(plane, _PLANE_POS_1(pipe), _PLANE_POS_2(pipe))
4758
4759#define _PLANE_SIZE_1_B 0x71190
4760#define _PLANE_SIZE_2_B 0x71290
4761#define _PLANE_SIZE_3_B 0x71390
4762#define _PLANE_SIZE_1(pipe) _PIPE(pipe, _PLANE_SIZE_1_A, _PLANE_SIZE_1_B)
4763#define _PLANE_SIZE_2(pipe) _PIPE(pipe, _PLANE_SIZE_2_A, _PLANE_SIZE_2_B)
4764#define _PLANE_SIZE_3(pipe) _PIPE(pipe, _PLANE_SIZE_3_A, _PLANE_SIZE_3_B)
4765#define PLANE_SIZE(pipe, plane) \
4766 _PLANE(plane, _PLANE_SIZE_1(pipe), _PLANE_SIZE_2(pipe))
4767
4768#define _PLANE_SURF_1_B 0x7119c
4769#define _PLANE_SURF_2_B 0x7129c
4770#define _PLANE_SURF_3_B 0x7139c
4771#define _PLANE_SURF_1(pipe) _PIPE(pipe, _PLANE_SURF_1_A, _PLANE_SURF_1_B)
4772#define _PLANE_SURF_2(pipe) _PIPE(pipe, _PLANE_SURF_2_A, _PLANE_SURF_2_B)
4773#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
4774#define PLANE_SURF(pipe, plane) \
4775 _PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
4776
4777#define _PLANE_OFFSET_1_B 0x711a4
4778#define _PLANE_OFFSET_2_B 0x712a4
4779#define _PLANE_OFFSET_1(pipe) _PIPE(pipe, _PLANE_OFFSET_1_A, _PLANE_OFFSET_1_B)
4780#define _PLANE_OFFSET_2(pipe) _PIPE(pipe, _PLANE_OFFSET_2_A, _PLANE_OFFSET_2_B)
4781#define PLANE_OFFSET(pipe, plane) \
4782 _PLANE(plane, _PLANE_OFFSET_1(pipe), _PLANE_OFFSET_2(pipe))
4783
4784#define _PLANE_KEYVAL_1_B 0x71194
4785#define _PLANE_KEYVAL_2_B 0x71294
4786#define _PLANE_KEYVAL_1(pipe) _PIPE(pipe, _PLANE_KEYVAL_1_A, _PLANE_KEYVAL_1_B)
4787#define _PLANE_KEYVAL_2(pipe) _PIPE(pipe, _PLANE_KEYVAL_2_A, _PLANE_KEYVAL_2_B)
4788#define PLANE_KEYVAL(pipe, plane) \
4789 _PLANE(plane, _PLANE_KEYVAL_1(pipe), _PLANE_KEYVAL_2(pipe))
4790
4791#define _PLANE_KEYMSK_1_B 0x71198
4792#define _PLANE_KEYMSK_2_B 0x71298
4793#define _PLANE_KEYMSK_1(pipe) _PIPE(pipe, _PLANE_KEYMSK_1_A, _PLANE_KEYMSK_1_B)
4794#define _PLANE_KEYMSK_2(pipe) _PIPE(pipe, _PLANE_KEYMSK_2_A, _PLANE_KEYMSK_2_B)
4795#define PLANE_KEYMSK(pipe, plane) \
4796 _PLANE(plane, _PLANE_KEYMSK_1(pipe), _PLANE_KEYMSK_2(pipe))
4797
4798#define _PLANE_KEYMAX_1_B 0x711a0
4799#define _PLANE_KEYMAX_2_B 0x712a0
4800#define _PLANE_KEYMAX_1(pipe) _PIPE(pipe, _PLANE_KEYMAX_1_A, _PLANE_KEYMAX_1_B)
4801#define _PLANE_KEYMAX_2(pipe) _PIPE(pipe, _PLANE_KEYMAX_2_A, _PLANE_KEYMAX_2_B)
4802#define PLANE_KEYMAX(pipe, plane) \
4803 _PLANE(plane, _PLANE_KEYMAX_1(pipe), _PLANE_KEYMAX_2(pipe))
4804
4805#define _PLANE_BUF_CFG_1_B 0x7127c
4806#define _PLANE_BUF_CFG_2_B 0x7137c
4807#define _PLANE_BUF_CFG_1(pipe) \
4808 _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B)
4809#define _PLANE_BUF_CFG_2(pipe) \
4810 _PIPE(pipe, _PLANE_BUF_CFG_2_A, _PLANE_BUF_CFG_2_B)
4811#define PLANE_BUF_CFG(pipe, plane) \
4812 _PLANE(plane, _PLANE_BUF_CFG_1(pipe), _PLANE_BUF_CFG_2(pipe))
4813
4814/* SKL new cursor registers */
4815#define _CUR_BUF_CFG_A 0x7017c
4816#define _CUR_BUF_CFG_B 0x7117c
4817#define CUR_BUF_CFG(pipe) _PIPE(pipe, _CUR_BUF_CFG_A, _CUR_BUF_CFG_B)
4818
4513/* VBIOS regs */ 4819/* VBIOS regs */
4514#define VGACNTRL 0x71400 4820#define VGACNTRL 0x71400
4515# define VGA_DISP_DISABLE (1 << 31) 4821# define VGA_DISP_DISABLE (1 << 31)
@@ -4625,6 +4931,18 @@ enum punit_power_well {
4625#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE) 4931#define PF_VSCALE(pipe) _PIPE(pipe, _PFA_VSCALE, _PFB_VSCALE)
4626#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE) 4932#define PF_HSCALE(pipe) _PIPE(pipe, _PFA_HSCALE, _PFB_HSCALE)
4627 4933
4934#define _PSA_CTL 0x68180
4935#define _PSB_CTL 0x68980
4936#define PS_ENABLE (1<<31)
4937#define _PSA_WIN_SZ 0x68174
4938#define _PSB_WIN_SZ 0x68974
4939#define _PSA_WIN_POS 0x68170
4940#define _PSB_WIN_POS 0x68970
4941
4942#define PS_CTL(pipe) _PIPE(pipe, _PSA_CTL, _PSB_CTL)
4943#define PS_WIN_SZ(pipe) _PIPE(pipe, _PSA_WIN_SZ, _PSB_WIN_SZ)
4944#define PS_WIN_POS(pipe) _PIPE(pipe, _PSA_WIN_POS, _PSB_WIN_POS)
4945
4628/* legacy palette */ 4946/* legacy palette */
4629#define _LGC_PALETTE_A 0x4a000 4947#define _LGC_PALETTE_A 0x4a000
4630#define _LGC_PALETTE_B 0x4a800 4948#define _LGC_PALETTE_B 0x4a800
@@ -4746,16 +5064,32 @@ enum punit_power_well {
4746#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2) 5064#define GEN8_PIPE_SCAN_LINE_EVENT (1 << 2)
4747#define GEN8_PIPE_VSYNC (1 << 1) 5065#define GEN8_PIPE_VSYNC (1 << 1)
4748#define GEN8_PIPE_VBLANK (1 << 0) 5066#define GEN8_PIPE_VBLANK (1 << 0)
5067#define GEN9_PIPE_CURSOR_FAULT (1 << 11)
5068#define GEN9_PIPE_PLANE3_FAULT (1 << 9)
5069#define GEN9_PIPE_PLANE2_FAULT (1 << 8)
5070#define GEN9_PIPE_PLANE1_FAULT (1 << 7)
5071#define GEN9_PIPE_PLANE3_FLIP_DONE (1 << 5)
5072#define GEN9_PIPE_PLANE2_FLIP_DONE (1 << 4)
5073#define GEN9_PIPE_PLANE1_FLIP_DONE (1 << 3)
5074#define GEN9_PIPE_PLANE_FLIP_DONE(p) (1 << (3 + p))
4749#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \ 5075#define GEN8_DE_PIPE_IRQ_FAULT_ERRORS \
4750 (GEN8_PIPE_CURSOR_FAULT | \ 5076 (GEN8_PIPE_CURSOR_FAULT | \
4751 GEN8_PIPE_SPRITE_FAULT | \ 5077 GEN8_PIPE_SPRITE_FAULT | \
4752 GEN8_PIPE_PRIMARY_FAULT) 5078 GEN8_PIPE_PRIMARY_FAULT)
5079#define GEN9_DE_PIPE_IRQ_FAULT_ERRORS \
5080 (GEN9_PIPE_CURSOR_FAULT | \
5081 GEN9_PIPE_PLANE3_FAULT | \
5082 GEN9_PIPE_PLANE2_FAULT | \
5083 GEN9_PIPE_PLANE1_FAULT)
4753 5084
4754#define GEN8_DE_PORT_ISR 0x44440 5085#define GEN8_DE_PORT_ISR 0x44440
4755#define GEN8_DE_PORT_IMR 0x44444 5086#define GEN8_DE_PORT_IMR 0x44444
4756#define GEN8_DE_PORT_IIR 0x44448 5087#define GEN8_DE_PORT_IIR 0x44448
4757#define GEN8_DE_PORT_IER 0x4444c 5088#define GEN8_DE_PORT_IER 0x4444c
4758#define GEN8_PORT_DP_A_HOTPLUG (1 << 3) 5089#define GEN8_PORT_DP_A_HOTPLUG (1 << 3)
5090#define GEN9_AUX_CHANNEL_D (1 << 27)
5091#define GEN9_AUX_CHANNEL_C (1 << 26)
5092#define GEN9_AUX_CHANNEL_B (1 << 25)
4759#define GEN8_AUX_CHANNEL_A (1 << 0) 5093#define GEN8_AUX_CHANNEL_A (1 << 0)
4760 5094
4761#define GEN8_DE_MISC_ISR 0x44460 5095#define GEN8_DE_MISC_ISR 0x44460
@@ -4839,6 +5173,8 @@ enum punit_power_well {
4839/* GEN8 chicken */ 5173/* GEN8 chicken */
4840#define HDC_CHICKEN0 0x7300 5174#define HDC_CHICKEN0 0x7300
4841#define HDC_FORCE_NON_COHERENT (1<<4) 5175#define HDC_FORCE_NON_COHERENT (1<<4)
5176#define HDC_DONOT_FETCH_MEM_WHEN_MASKED (1<<11)
5177#define HDC_FENCE_DEST_SLM_DISABLE (1<<14)
4842 5178
4843/* WaCatErrorRejectionIssue */ 5179/* WaCatErrorRejectionIssue */
4844#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030 5180#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
@@ -5540,6 +5876,12 @@ enum punit_power_well {
5540#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5) 5876#define VLV_GTLC_PW_MEDIA_STATUS_MASK (1 << 5)
5541#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7) 5877#define VLV_GTLC_PW_RENDER_STATUS_MASK (1 << 7)
5542#define FORCEWAKE_MT 0xa188 /* multi-threaded */ 5878#define FORCEWAKE_MT 0xa188 /* multi-threaded */
5879#define FORCEWAKE_MEDIA_GEN9 0xa270
5880#define FORCEWAKE_RENDER_GEN9 0xa278
5881#define FORCEWAKE_BLITTER_GEN9 0xa188
5882#define FORCEWAKE_ACK_MEDIA_GEN9 0x0D88
5883#define FORCEWAKE_ACK_RENDER_GEN9 0x0D84
5884#define FORCEWAKE_ACK_BLITTER_GEN9 0x130044
5543#define FORCEWAKE_KERNEL 0x1 5885#define FORCEWAKE_KERNEL 0x1
5544#define FORCEWAKE_USER 0x2 5886#define FORCEWAKE_USER 0x2
5545#define FORCEWAKE_MT_ACK 0x130040 5887#define FORCEWAKE_MT_ACK 0x130040
@@ -5711,9 +6053,17 @@ enum punit_power_well {
5711#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5) 6053#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
5712#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245) 6054#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
5713#define DISPLAY_IPS_CONTROL 0x19 6055#define DISPLAY_IPS_CONTROL 0x19
6056#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
5714#define GEN6_PCODE_DATA 0x138128 6057#define GEN6_PCODE_DATA 0x138128
5715#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 6058#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
5716#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 6059#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
6060#define GEN6_PCODE_DATA1 0x13812C
6061
6062#define GEN9_PCODE_READ_MEM_LATENCY 0x6
6063#define GEN9_MEM_LATENCY_LEVEL_MASK 0xFF
6064#define GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT 8
6065#define GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT 16
6066#define GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT 24
5717 6067
5718#define GEN6_GT_CORE_STATUS 0x138060 6068#define GEN6_GT_CORE_STATUS 0x138060
5719#define GEN6_CORE_CPD_STATE_MASK (7<<4) 6069#define GEN6_CORE_CPD_STATE_MASK (7<<4)
@@ -5751,6 +6101,9 @@ enum punit_power_well {
5751#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10) 6101#define GEN7_SINGLE_SUBSCAN_DISPATCH_ENABLE (1<<10)
5752#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3) 6102#define GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE (1<<3)
5753 6103
6104#define GEN9_HALF_SLICE_CHICKEN5 0xe188
6105#define GEN9_DG_MIRROR_FIX_ENABLE (1<<5)
6106
5754#define GEN8_ROW_CHICKEN 0xe4f0 6107#define GEN8_ROW_CHICKEN 0xe4f0
5755#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8) 6108#define PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE (1<<8)
5756#define STALL_DOP_GATING_DISABLE (1<<5) 6109#define STALL_DOP_GATING_DISABLE (1<<5)
@@ -5766,57 +6119,58 @@ enum punit_power_well {
5766#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8) 6119#define GEN8_CENTROID_PIXEL_OPT_DIS (1<<8)
5767#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1) 6120#define GEN8_SAMPLER_POWER_BYPASS_DIS (1<<1)
5768 6121
6122/* Audio */
5769#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020) 6123#define G4X_AUD_VID_DID (dev_priv->info.display_mmio_offset + 0x62020)
5770#define INTEL_AUDIO_DEVCL 0x808629FB 6124#define INTEL_AUDIO_DEVCL 0x808629FB
5771#define INTEL_AUDIO_DEVBLC 0x80862801 6125#define INTEL_AUDIO_DEVBLC 0x80862801
5772#define INTEL_AUDIO_DEVCTG 0x80862802 6126#define INTEL_AUDIO_DEVCTG 0x80862802
5773 6127
5774#define G4X_AUD_CNTL_ST 0x620B4 6128#define G4X_AUD_CNTL_ST 0x620B4
5775#define G4X_ELDV_DEVCL_DEVBLC (1 << 13) 6129#define G4X_ELDV_DEVCL_DEVBLC (1 << 13)
5776#define G4X_ELDV_DEVCTG (1 << 14) 6130#define G4X_ELDV_DEVCTG (1 << 14)
5777#define G4X_ELD_ADDR (0xf << 5) 6131#define G4X_ELD_ADDR_MASK (0xf << 5)
5778#define G4X_ELD_ACK (1 << 4) 6132#define G4X_ELD_ACK (1 << 4)
5779#define G4X_HDMIW_HDMIEDID 0x6210C 6133#define G4X_HDMIW_HDMIEDID 0x6210C
5780 6134
5781#define IBX_HDMIW_HDMIEDID_A 0xE2050 6135#define _IBX_HDMIW_HDMIEDID_A 0xE2050
5782#define IBX_HDMIW_HDMIEDID_B 0xE2150 6136#define _IBX_HDMIW_HDMIEDID_B 0xE2150
5783#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 6137#define IBX_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
5784 IBX_HDMIW_HDMIEDID_A, \ 6138 _IBX_HDMIW_HDMIEDID_A, \
5785 IBX_HDMIW_HDMIEDID_B) 6139 _IBX_HDMIW_HDMIEDID_B)
5786#define IBX_AUD_CNTL_ST_A 0xE20B4 6140#define _IBX_AUD_CNTL_ST_A 0xE20B4
5787#define IBX_AUD_CNTL_ST_B 0xE21B4 6141#define _IBX_AUD_CNTL_ST_B 0xE21B4
5788#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 6142#define IBX_AUD_CNTL_ST(pipe) _PIPE(pipe, \
5789 IBX_AUD_CNTL_ST_A, \ 6143 _IBX_AUD_CNTL_ST_A, \
5790 IBX_AUD_CNTL_ST_B) 6144 _IBX_AUD_CNTL_ST_B)
5791#define IBX_ELD_BUFFER_SIZE (0x1f << 10) 6145#define IBX_ELD_BUFFER_SIZE_MASK (0x1f << 10)
5792#define IBX_ELD_ADDRESS (0x1f << 5) 6146#define IBX_ELD_ADDRESS_MASK (0x1f << 5)
5793#define IBX_ELD_ACK (1 << 4) 6147#define IBX_ELD_ACK (1 << 4)
5794#define IBX_AUD_CNTL_ST2 0xE20C0 6148#define IBX_AUD_CNTL_ST2 0xE20C0
5795#define IBX_ELD_VALIDB (1 << 0) 6149#define IBX_CP_READY(port) ((1 << 1) << (((port) - 1) * 4))
5796#define IBX_CP_READYB (1 << 1) 6150#define IBX_ELD_VALID(port) ((1 << 0) << (((port) - 1) * 4))
5797 6151
5798#define CPT_HDMIW_HDMIEDID_A 0xE5050 6152#define _CPT_HDMIW_HDMIEDID_A 0xE5050
5799#define CPT_HDMIW_HDMIEDID_B 0xE5150 6153#define _CPT_HDMIW_HDMIEDID_B 0xE5150
5800#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 6154#define CPT_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
5801 CPT_HDMIW_HDMIEDID_A, \ 6155 _CPT_HDMIW_HDMIEDID_A, \
5802 CPT_HDMIW_HDMIEDID_B) 6156 _CPT_HDMIW_HDMIEDID_B)
5803#define CPT_AUD_CNTL_ST_A 0xE50B4 6157#define _CPT_AUD_CNTL_ST_A 0xE50B4
5804#define CPT_AUD_CNTL_ST_B 0xE51B4 6158#define _CPT_AUD_CNTL_ST_B 0xE51B4
5805#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 6159#define CPT_AUD_CNTL_ST(pipe) _PIPE(pipe, \
5806 CPT_AUD_CNTL_ST_A, \ 6160 _CPT_AUD_CNTL_ST_A, \
5807 CPT_AUD_CNTL_ST_B) 6161 _CPT_AUD_CNTL_ST_B)
5808#define CPT_AUD_CNTRL_ST2 0xE50C0 6162#define CPT_AUD_CNTRL_ST2 0xE50C0
5809 6163
5810#define VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050) 6164#define _VLV_HDMIW_HDMIEDID_A (VLV_DISPLAY_BASE + 0x62050)
5811#define VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150) 6165#define _VLV_HDMIW_HDMIEDID_B (VLV_DISPLAY_BASE + 0x62150)
5812#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \ 6166#define VLV_HDMIW_HDMIEDID(pipe) _PIPE(pipe, \
5813 VLV_HDMIW_HDMIEDID_A, \ 6167 _VLV_HDMIW_HDMIEDID_A, \
5814 VLV_HDMIW_HDMIEDID_B) 6168 _VLV_HDMIW_HDMIEDID_B)
5815#define VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4) 6169#define _VLV_AUD_CNTL_ST_A (VLV_DISPLAY_BASE + 0x620B4)
5816#define VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4) 6170#define _VLV_AUD_CNTL_ST_B (VLV_DISPLAY_BASE + 0x621B4)
5817#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \ 6171#define VLV_AUD_CNTL_ST(pipe) _PIPE(pipe, \
5818 VLV_AUD_CNTL_ST_A, \ 6172 _VLV_AUD_CNTL_ST_A, \
5819 VLV_AUD_CNTL_ST_B) 6173 _VLV_AUD_CNTL_ST_B)
5820#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0) 6174#define VLV_AUD_CNTL_ST2 (VLV_DISPLAY_BASE + 0x620C0)
5821 6175
5822/* These are the 4 32-bit write offset registers for each stream 6176/* These are the 4 32-bit write offset registers for each stream
@@ -5825,28 +6179,28 @@ enum punit_power_well {
5825 */ 6179 */
5826#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4) 6180#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
5827 6181
5828#define IBX_AUD_CONFIG_A 0xe2000 6182#define _IBX_AUD_CONFIG_A 0xe2000
5829#define IBX_AUD_CONFIG_B 0xe2100 6183#define _IBX_AUD_CONFIG_B 0xe2100
5830#define IBX_AUD_CFG(pipe) _PIPE(pipe, \ 6184#define IBX_AUD_CFG(pipe) _PIPE(pipe, \
5831 IBX_AUD_CONFIG_A, \ 6185 _IBX_AUD_CONFIG_A, \
5832 IBX_AUD_CONFIG_B) 6186 _IBX_AUD_CONFIG_B)
5833#define CPT_AUD_CONFIG_A 0xe5000 6187#define _CPT_AUD_CONFIG_A 0xe5000
5834#define CPT_AUD_CONFIG_B 0xe5100 6188#define _CPT_AUD_CONFIG_B 0xe5100
5835#define CPT_AUD_CFG(pipe) _PIPE(pipe, \ 6189#define CPT_AUD_CFG(pipe) _PIPE(pipe, \
5836 CPT_AUD_CONFIG_A, \ 6190 _CPT_AUD_CONFIG_A, \
5837 CPT_AUD_CONFIG_B) 6191 _CPT_AUD_CONFIG_B)
5838#define VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000) 6192#define _VLV_AUD_CONFIG_A (VLV_DISPLAY_BASE + 0x62000)
5839#define VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100) 6193#define _VLV_AUD_CONFIG_B (VLV_DISPLAY_BASE + 0x62100)
5840#define VLV_AUD_CFG(pipe) _PIPE(pipe, \ 6194#define VLV_AUD_CFG(pipe) _PIPE(pipe, \
5841 VLV_AUD_CONFIG_A, \ 6195 _VLV_AUD_CONFIG_A, \
5842 VLV_AUD_CONFIG_B) 6196 _VLV_AUD_CONFIG_B)
5843 6197
5844#define AUD_CONFIG_N_VALUE_INDEX (1 << 29) 6198#define AUD_CONFIG_N_VALUE_INDEX (1 << 29)
5845#define AUD_CONFIG_N_PROG_ENABLE (1 << 28) 6199#define AUD_CONFIG_N_PROG_ENABLE (1 << 28)
5846#define AUD_CONFIG_UPPER_N_SHIFT 20 6200#define AUD_CONFIG_UPPER_N_SHIFT 20
5847#define AUD_CONFIG_UPPER_N_VALUE (0xff << 20) 6201#define AUD_CONFIG_UPPER_N_MASK (0xff << 20)
5848#define AUD_CONFIG_LOWER_N_SHIFT 4 6202#define AUD_CONFIG_LOWER_N_SHIFT 4
5849#define AUD_CONFIG_LOWER_N_VALUE (0xfff << 4) 6203#define AUD_CONFIG_LOWER_N_MASK (0xfff << 4)
5850#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16 6204#define AUD_CONFIG_PIXEL_CLOCK_HDMI_SHIFT 16
5851#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16) 6205#define AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK (0xf << 16)
5852#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16) 6206#define AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 (0 << 16)
@@ -5862,52 +6216,44 @@ enum punit_power_well {
5862#define AUD_CONFIG_DISABLE_NCTS (1 << 3) 6216#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
5863 6217
5864/* HSW Audio */ 6218/* HSW Audio */
5865#define HSW_AUD_CONFIG_A 0x65000 /* Audio Configuration Transcoder A */ 6219#define _HSW_AUD_CONFIG_A 0x65000
5866#define HSW_AUD_CONFIG_B 0x65100 /* Audio Configuration Transcoder B */ 6220#define _HSW_AUD_CONFIG_B 0x65100
5867#define HSW_AUD_CFG(pipe) _PIPE(pipe, \ 6221#define HSW_AUD_CFG(pipe) _PIPE(pipe, \
5868 HSW_AUD_CONFIG_A, \ 6222 _HSW_AUD_CONFIG_A, \
5869 HSW_AUD_CONFIG_B) 6223 _HSW_AUD_CONFIG_B)
5870 6224
5871#define HSW_AUD_MISC_CTRL_A 0x65010 /* Audio Misc Control Convert 1 */ 6225#define _HSW_AUD_MISC_CTRL_A 0x65010
5872#define HSW_AUD_MISC_CTRL_B 0x65110 /* Audio Misc Control Convert 2 */ 6226#define _HSW_AUD_MISC_CTRL_B 0x65110
5873#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \ 6227#define HSW_AUD_MISC_CTRL(pipe) _PIPE(pipe, \
5874 HSW_AUD_MISC_CTRL_A, \ 6228 _HSW_AUD_MISC_CTRL_A, \
5875 HSW_AUD_MISC_CTRL_B) 6229 _HSW_AUD_MISC_CTRL_B)
5876 6230
5877#define HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4 /* Audio DIP and ELD Control State Transcoder A */ 6231#define _HSW_AUD_DIP_ELD_CTRL_ST_A 0x650b4
5878#define HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4 /* Audio DIP and ELD Control State Transcoder B */ 6232#define _HSW_AUD_DIP_ELD_CTRL_ST_B 0x651b4
5879#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \ 6233#define HSW_AUD_DIP_ELD_CTRL(pipe) _PIPE(pipe, \
5880 HSW_AUD_DIP_ELD_CTRL_ST_A, \ 6234 _HSW_AUD_DIP_ELD_CTRL_ST_A, \
5881 HSW_AUD_DIP_ELD_CTRL_ST_B) 6235 _HSW_AUD_DIP_ELD_CTRL_ST_B)
5882 6236
5883/* Audio Digital Converter */ 6237/* Audio Digital Converter */
5884#define HSW_AUD_DIG_CNVT_1 0x65080 /* Audio Converter 1 */ 6238#define _HSW_AUD_DIG_CNVT_1 0x65080
5885#define HSW_AUD_DIG_CNVT_2 0x65180 /* Audio Converter 1 */ 6239#define _HSW_AUD_DIG_CNVT_2 0x65180
5886#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \ 6240#define AUD_DIG_CNVT(pipe) _PIPE(pipe, \
5887 HSW_AUD_DIG_CNVT_1, \ 6241 _HSW_AUD_DIG_CNVT_1, \
5888 HSW_AUD_DIG_CNVT_2) 6242 _HSW_AUD_DIG_CNVT_2)
5889#define DIP_PORT_SEL_MASK 0x3 6243#define DIP_PORT_SEL_MASK 0x3
5890 6244
5891#define HSW_AUD_EDID_DATA_A 0x65050 6245#define _HSW_AUD_EDID_DATA_A 0x65050
5892#define HSW_AUD_EDID_DATA_B 0x65150 6246#define _HSW_AUD_EDID_DATA_B 0x65150
5893#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \ 6247#define HSW_AUD_EDID_DATA(pipe) _PIPE(pipe, \
5894 HSW_AUD_EDID_DATA_A, \ 6248 _HSW_AUD_EDID_DATA_A, \
5895 HSW_AUD_EDID_DATA_B) 6249 _HSW_AUD_EDID_DATA_B)
5896 6250
5897#define HSW_AUD_PIPE_CONV_CFG 0x6507c /* Audio pipe and converter configs */ 6251#define HSW_AUD_PIPE_CONV_CFG 0x6507c
5898#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0 /* Audio ELD and CP Ready Status */ 6252#define HSW_AUD_PIN_ELD_CP_VLD 0x650c0
5899#define AUDIO_INACTIVE_C (1<<11) 6253#define AUDIO_INACTIVE(trans) ((1 << 3) << ((trans) * 4))
5900#define AUDIO_INACTIVE_B (1<<7) 6254#define AUDIO_OUTPUT_ENABLE(trans) ((1 << 2) << ((trans) * 4))
5901#define AUDIO_INACTIVE_A (1<<3) 6255#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
5902#define AUDIO_OUTPUT_ENABLE_A (1<<2) 6256#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
5903#define AUDIO_OUTPUT_ENABLE_B (1<<6)
5904#define AUDIO_OUTPUT_ENABLE_C (1<<10)
5905#define AUDIO_ELD_VALID_A (1<<0)
5906#define AUDIO_ELD_VALID_B (1<<4)
5907#define AUDIO_ELD_VALID_C (1<<8)
5908#define AUDIO_CP_READY_A (1<<1)
5909#define AUDIO_CP_READY_B (1<<5)
5910#define AUDIO_CP_READY_C (1<<9)
5911 6257
5912/* HSW Power Wells */ 6258/* HSW Power Wells */
5913#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */ 6259#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
@@ -6125,6 +6471,83 @@ enum punit_power_well {
6125#define LCPLL_CD_SOURCE_FCLK (1<<21) 6471#define LCPLL_CD_SOURCE_FCLK (1<<21)
6126#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19) 6472#define LCPLL_CD_SOURCE_FCLK_DONE (1<<19)
6127 6473
6474/*
6475 * SKL Clocks
6476 */
6477
6478/* CDCLK_CTL */
6479#define CDCLK_CTL 0x46000
6480#define CDCLK_FREQ_SEL_MASK (3<<26)
6481#define CDCLK_FREQ_450_432 (0<<26)
6482#define CDCLK_FREQ_540 (1<<26)
6483#define CDCLK_FREQ_337_308 (2<<26)
6484#define CDCLK_FREQ_675_617 (3<<26)
6485#define CDCLK_FREQ_DECIMAL_MASK (0x7ff)
6486
6487/* LCPLL_CTL */
6488#define LCPLL1_CTL 0x46010
6489#define LCPLL2_CTL 0x46014
6490#define LCPLL_PLL_ENABLE (1<<31)
6491
6492/* DPLL control1 */
6493#define DPLL_CTRL1 0x6C058
6494#define DPLL_CTRL1_HDMI_MODE(id) (1<<((id)*6+5))
6495#define DPLL_CTRL1_SSC(id) (1<<((id)*6+4))
6496#define DPLL_CRTL1_LINK_RATE_MASK(id) (7<<((id)*6+1))
6497#define DPLL_CRTL1_LINK_RATE_SHIFT(id) ((id)*6+1)
6498#define DPLL_CRTL1_LINK_RATE(linkrate, id) ((linkrate)<<((id)*6+1))
6499#define DPLL_CTRL1_OVERRIDE(id) (1<<((id)*6))
6500#define DPLL_CRTL1_LINK_RATE_2700 0
6501#define DPLL_CRTL1_LINK_RATE_1350 1
6502#define DPLL_CRTL1_LINK_RATE_810 2
6503#define DPLL_CRTL1_LINK_RATE_1620 3
6504#define DPLL_CRTL1_LINK_RATE_1080 4
6505#define DPLL_CRTL1_LINK_RATE_2160 5
6506
6507/* DPLL control2 */
6508#define DPLL_CTRL2 0x6C05C
6509#define DPLL_CTRL2_DDI_CLK_OFF(port) (1<<(port+15))
6510#define DPLL_CTRL2_DDI_CLK_SEL_MASK(port) (3<<((port)*3+1))
6511#define DPLL_CTRL2_DDI_CLK_SEL_SHIFT(port) ((port)*3+1)
6512#define DPLL_CTRL2_DDI_CLK_SEL(clk, port) (clk<<((port)*3+1))
6513#define DPLL_CTRL2_DDI_SEL_OVERRIDE(port) (1<<((port)*3))
6514
6515/* DPLL Status */
6516#define DPLL_STATUS 0x6C060
6517#define DPLL_LOCK(id) (1<<((id)*8))
6518
6519/* DPLL cfg */
6520#define DPLL1_CFGCR1 0x6C040
6521#define DPLL2_CFGCR1 0x6C048
6522#define DPLL3_CFGCR1 0x6C050
6523#define DPLL_CFGCR1_FREQ_ENABLE (1<<31)
6524#define DPLL_CFGCR1_DCO_FRACTION_MASK (0x7fff<<9)
6525#define DPLL_CFGCR1_DCO_FRACTION(x) (x<<9)
6526#define DPLL_CFGCR1_DCO_INTEGER_MASK (0x1ff)
6527
6528#define DPLL1_CFGCR2 0x6C044
6529#define DPLL2_CFGCR2 0x6C04C
6530#define DPLL3_CFGCR2 0x6C054
6531#define DPLL_CFGCR2_QDIV_RATIO_MASK (0xff<<8)
6532#define DPLL_CFGCR2_QDIV_RATIO(x) (x<<8)
6533#define DPLL_CFGCR2_QDIV_MODE(x) (x<<7)
6534#define DPLL_CFGCR2_KDIV_MASK (3<<5)
6535#define DPLL_CFGCR2_KDIV(x) (x<<5)
6536#define DPLL_CFGCR2_KDIV_5 (0<<5)
6537#define DPLL_CFGCR2_KDIV_2 (1<<5)
6538#define DPLL_CFGCR2_KDIV_3 (2<<5)
6539#define DPLL_CFGCR2_KDIV_1 (3<<5)
6540#define DPLL_CFGCR2_PDIV_MASK (7<<2)
6541#define DPLL_CFGCR2_PDIV(x) (x<<2)
6542#define DPLL_CFGCR2_PDIV_1 (0<<2)
6543#define DPLL_CFGCR2_PDIV_2 (1<<2)
6544#define DPLL_CFGCR2_PDIV_3 (2<<2)
6545#define DPLL_CFGCR2_PDIV_7 (4<<2)
6546#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
6547
6548#define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8)
6549#define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8)
6550
6128/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register, 6551/* Please see hsw_read_dcomp() and hsw_write_dcomp() before using this register,
6129 * since on HSW we can't write to it using I915_WRITE. */ 6552 * since on HSW we can't write to it using I915_WRITE. */
6130#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C) 6553#define D_COMP_HSW (MCHBAR_MIRROR_BASE_SNB + 0x5F0C)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 043123c77a1f..26368822a33f 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -203,34 +203,19 @@ static void i915_save_display(struct drm_device *dev)
203 i915_save_display_reg(dev); 203 i915_save_display_reg(dev);
204 204
205 /* LVDS state */ 205 /* LVDS state */
206 if (HAS_PCH_SPLIT(dev)) { 206 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
207 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL); 207 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS);
208 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 208 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
209 dev_priv->regfile.saveLVDS = I915_READ(PCH_LVDS); 209 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
210 } else if (IS_VALLEYVIEW(dev)) {
211 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
212 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
213
214 dev_priv->regfile.saveBLC_HIST_CTL =
215 I915_READ(VLV_BLC_HIST_CTL(PIPE_A));
216 dev_priv->regfile.saveBLC_HIST_CTL_B =
217 I915_READ(VLV_BLC_HIST_CTL(PIPE_B));
218 } else {
219 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
220 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
221 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
222 if (IS_MOBILE(dev) && !IS_I830(dev))
223 dev_priv->regfile.saveLVDS = I915_READ(LVDS);
224 }
225
226 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev))
227 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
228 210
211 /* Panel power sequencer */
229 if (HAS_PCH_SPLIT(dev)) { 212 if (HAS_PCH_SPLIT(dev)) {
213 dev_priv->regfile.savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
230 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS); 214 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
231 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS); 215 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
232 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR); 216 dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
233 } else { 217 } else if (!IS_VALLEYVIEW(dev)) {
218 dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
234 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 219 dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
235 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 220 dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
236 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR); 221 dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
@@ -259,29 +244,19 @@ static void i915_restore_display(struct drm_device *dev)
259 if (drm_core_check_feature(dev, DRIVER_MODESET)) 244 if (drm_core_check_feature(dev, DRIVER_MODESET))
260 mask = ~LVDS_PORT_EN; 245 mask = ~LVDS_PORT_EN;
261 246
247 /* LVDS state */
262 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) 248 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
263 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask); 249 I915_WRITE(PCH_LVDS, dev_priv->regfile.saveLVDS & mask);
264 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev)) 250 else if (INTEL_INFO(dev)->gen <= 4 && IS_MOBILE(dev) && !IS_I830(dev))
265 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask); 251 I915_WRITE(LVDS, dev_priv->regfile.saveLVDS & mask);
266 252
267 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) 253 /* Panel power sequencer */
268 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
269
270 if (HAS_PCH_SPLIT(dev)) { 254 if (HAS_PCH_SPLIT(dev)) {
271 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 255 I915_WRITE(PCH_PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
272 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 256 I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
273 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 257 I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
274 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL); 258 I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
275 I915_WRITE(RSTDBYCTL, 259 } else if (!IS_VALLEYVIEW(dev)) {
276 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
277 } else if (IS_VALLEYVIEW(dev)) {
278 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_A),
279 dev_priv->regfile.saveBLC_HIST_CTL);
280 I915_WRITE(VLV_BLC_HIST_CTL(PIPE_B),
281 dev_priv->regfile.saveBLC_HIST_CTL);
282 } else {
283 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
284 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
285 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS); 260 I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
286 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS); 261 I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
287 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR); 262 I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
@@ -328,6 +303,10 @@ int i915_save_state(struct drm_device *dev)
328 } 303 }
329 } 304 }
330 305
306 if (IS_GEN4(dev))
307 pci_read_config_word(dev->pdev, GCDGMBUS,
308 &dev_priv->regfile.saveGCDGMBUS);
309
331 /* Cache mode state */ 310 /* Cache mode state */
332 if (INTEL_INFO(dev)->gen < 7) 311 if (INTEL_INFO(dev)->gen < 7)
333 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 312 dev_priv->regfile.saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -356,6 +335,10 @@ int i915_restore_state(struct drm_device *dev)
356 mutex_lock(&dev->struct_mutex); 335 mutex_lock(&dev->struct_mutex);
357 336
358 i915_gem_restore_fences(dev); 337 i915_gem_restore_fences(dev);
338
339 if (IS_GEN4(dev))
340 pci_write_config_word(dev->pdev, GCDGMBUS,
341 dev_priv->regfile.saveGCDGMBUS);
359 i915_restore_display(dev); 342 i915_restore_display(dev);
360 343
361 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 344 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -368,6 +351,8 @@ int i915_restore_state(struct drm_device *dev)
368 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR); 351 I915_WRITE(_FDI_RXA_IMR, dev_priv->regfile.saveFDI_RXA_IMR);
369 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR); 352 I915_WRITE(_FDI_RXB_IMR, dev_priv->regfile.saveFDI_RXB_IMR);
370 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG); 353 I915_WRITE(PCH_PORT_HOTPLUG, dev_priv->regfile.savePCH_PORT_HOTPLUG);
354 I915_WRITE(RSTDBYCTL,
355 dev_priv->regfile.saveMCHBAR_RENDER_STANDBY);
371 } else { 356 } else {
372 I915_WRITE(IER, dev_priv->regfile.saveIER); 357 I915_WRITE(IER, dev_priv->regfile.saveIER);
373 I915_WRITE(IMR, dev_priv->regfile.saveIMR); 358 I915_WRITE(IMR, dev_priv->regfile.saveIMR);
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 503847f18fdd..4a5af695307e 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -139,8 +139,6 @@ static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
139static struct attribute *rc6_attrs[] = { 139static struct attribute *rc6_attrs[] = {
140 &dev_attr_rc6_enable.attr, 140 &dev_attr_rc6_enable.attr,
141 &dev_attr_rc6_residency_ms.attr, 141 &dev_attr_rc6_residency_ms.attr,
142 &dev_attr_rc6p_residency_ms.attr,
143 &dev_attr_rc6pp_residency_ms.attr,
144 NULL 142 NULL
145}; 143};
146 144
@@ -148,6 +146,17 @@ static struct attribute_group rc6_attr_group = {
148 .name = power_group_name, 146 .name = power_group_name,
149 .attrs = rc6_attrs 147 .attrs = rc6_attrs
150}; 148};
149
150static struct attribute *rc6p_attrs[] = {
151 &dev_attr_rc6p_residency_ms.attr,
152 &dev_attr_rc6pp_residency_ms.attr,
153 NULL
154};
155
156static struct attribute_group rc6p_attr_group = {
157 .name = power_group_name,
158 .attrs = rc6p_attrs
159};
151#endif 160#endif
152 161
153static int l3_access_valid(struct drm_device *dev, loff_t offset) 162static int l3_access_valid(struct drm_device *dev, loff_t offset)
@@ -595,12 +604,18 @@ void i915_setup_sysfs(struct drm_device *dev)
595 int ret; 604 int ret;
596 605
597#ifdef CONFIG_PM 606#ifdef CONFIG_PM
598 if (INTEL_INFO(dev)->gen >= 6) { 607 if (HAS_RC6(dev)) {
599 ret = sysfs_merge_group(&dev->primary->kdev->kobj, 608 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
600 &rc6_attr_group); 609 &rc6_attr_group);
601 if (ret) 610 if (ret)
602 DRM_ERROR("RC6 residency sysfs setup failed\n"); 611 DRM_ERROR("RC6 residency sysfs setup failed\n");
603 } 612 }
613 if (HAS_RC6p(dev)) {
614 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
615 &rc6p_attr_group);
616 if (ret)
617 DRM_ERROR("RC6p residency sysfs setup failed\n");
618 }
604#endif 619#endif
605 if (HAS_L3_DPF(dev)) { 620 if (HAS_L3_DPF(dev)) {
606 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs); 621 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
@@ -640,5 +655,6 @@ void i915_teardown_sysfs(struct drm_device *dev)
640 device_remove_bin_file(dev->primary->kdev, &dpf_attrs); 655 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
641#ifdef CONFIG_PM 656#ifdef CONFIG_PM
642 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group); 657 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
658 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
643#endif 659#endif
644} 660}
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index f5aa0067755a..751d4ad14d62 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -587,6 +587,110 @@ TRACE_EVENT(intel_gpu_freq_change,
587 TP_printk("new_freq=%u", __entry->freq) 587 TP_printk("new_freq=%u", __entry->freq)
588); 588);
589 589
590/**
591 * DOC: i915_ppgtt_create and i915_ppgtt_release tracepoints
592 *
593 * With full ppgtt enabled each process using drm will allocate at least one
594 * translation table. With these traces it is possible to keep track of the
595 * allocation and of the lifetime of the tables; this can be used during
596 * testing/debug to verify that we are not leaking ppgtts.
597 * These traces identify the ppgtt through the vm pointer, which is also printed
598 * by the i915_vma_bind and i915_vma_unbind tracepoints.
599 */
600DECLARE_EVENT_CLASS(i915_ppgtt,
601 TP_PROTO(struct i915_address_space *vm),
602 TP_ARGS(vm),
603
604 TP_STRUCT__entry(
605 __field(struct i915_address_space *, vm)
606 __field(u32, dev)
607 ),
608
609 TP_fast_assign(
610 __entry->vm = vm;
611 __entry->dev = vm->dev->primary->index;
612 ),
613
614 TP_printk("dev=%u, vm=%p", __entry->dev, __entry->vm)
615)
616
617DEFINE_EVENT(i915_ppgtt, i915_ppgtt_create,
618 TP_PROTO(struct i915_address_space *vm),
619 TP_ARGS(vm)
620);
621
622DEFINE_EVENT(i915_ppgtt, i915_ppgtt_release,
623 TP_PROTO(struct i915_address_space *vm),
624 TP_ARGS(vm)
625);
626
627/**
628 * DOC: i915_context_create and i915_context_free tracepoints
629 *
630 * These tracepoints are used to track creation and deletion of contexts.
631 * If full ppgtt is enabled, they also print the address of the vm assigned to
632 * the context.
633 */
634DECLARE_EVENT_CLASS(i915_context,
635 TP_PROTO(struct intel_context *ctx),
636 TP_ARGS(ctx),
637
638 TP_STRUCT__entry(
639 __field(u32, dev)
640 __field(struct intel_context *, ctx)
641 __field(struct i915_address_space *, vm)
642 ),
643
644 TP_fast_assign(
645 __entry->ctx = ctx;
646 __entry->vm = ctx->ppgtt ? &ctx->ppgtt->base : NULL;
647 __entry->dev = ctx->file_priv->dev_priv->dev->primary->index;
648 ),
649
650 TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
651 __entry->dev, __entry->ctx, __entry->vm)
652)
653
654DEFINE_EVENT(i915_context, i915_context_create,
655 TP_PROTO(struct intel_context *ctx),
656 TP_ARGS(ctx)
657);
658
659DEFINE_EVENT(i915_context, i915_context_free,
660 TP_PROTO(struct intel_context *ctx),
661 TP_ARGS(ctx)
662);
663
664/**
665 * DOC: switch_mm tracepoint
666 *
667 * This tracepoint allows tracking of the mm switch, which is an important point
668 * in the lifetime of the vm in the legacy submission path. This tracepoint is
669 * called only if full ppgtt is enabled.
670 */
671TRACE_EVENT(switch_mm,
672 TP_PROTO(struct intel_engine_cs *ring, struct intel_context *to),
673
674 TP_ARGS(ring, to),
675
676 TP_STRUCT__entry(
677 __field(u32, ring)
678 __field(struct intel_context *, to)
679 __field(struct i915_address_space *, vm)
680 __field(u32, dev)
681 ),
682
683 TP_fast_assign(
684 __entry->ring = ring->id;
685 __entry->to = to;
686 __entry->vm = to->ppgtt? &to->ppgtt->base : NULL;
687 __entry->dev = ring->dev->primary->index;
688 ),
689
690 TP_printk("dev=%u, ring=%u, ctx=%p, ctx_vm=%p",
691 __entry->dev, __entry->ring, __entry->to, __entry->vm)
692);
693
590#endif /* _I915_TRACE_H_ */ 694#endif /* _I915_TRACE_H_ */
591 695
592/* This part must be outside protection */ 696/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/i915_ums.c b/drivers/gpu/drm/i915/i915_ums.c
index 480da593e6c0..d10fe3e9c49f 100644
--- a/drivers/gpu/drm/i915/i915_ums.c
+++ b/drivers/gpu/drm/i915/i915_ums.c
@@ -270,6 +270,12 @@ void i915_save_display_reg(struct drm_device *dev)
270 } 270 }
271 /* FIXME: regfile.save TV & SDVO state */ 271 /* FIXME: regfile.save TV & SDVO state */
272 272
273 /* Panel fitter */
274 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
275 dev_priv->regfile.savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
276 dev_priv->regfile.savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
277 }
278
273 /* Backlight */ 279 /* Backlight */
274 if (INTEL_INFO(dev)->gen <= 4) 280 if (INTEL_INFO(dev)->gen <= 4)
275 pci_read_config_byte(dev->pdev, PCI_LBPC, 281 pci_read_config_byte(dev->pdev, PCI_LBPC,
@@ -284,6 +290,7 @@ void i915_save_display_reg(struct drm_device *dev)
284 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 290 dev_priv->regfile.saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
285 if (INTEL_INFO(dev)->gen >= 4) 291 if (INTEL_INFO(dev)->gen >= 4)
286 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 292 dev_priv->regfile.saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
293 dev_priv->regfile.saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
287 } 294 }
288 295
289 return; 296 return;
@@ -313,6 +320,13 @@ void i915_restore_display_reg(struct drm_device *dev)
313 if (INTEL_INFO(dev)->gen >= 4) 320 if (INTEL_INFO(dev)->gen >= 4)
314 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2); 321 I915_WRITE(BLC_PWM_CTL2, dev_priv->regfile.saveBLC_PWM_CTL2);
315 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL); 322 I915_WRITE(BLC_PWM_CTL, dev_priv->regfile.saveBLC_PWM_CTL);
323 I915_WRITE(BLC_HIST_CTL, dev_priv->regfile.saveBLC_HIST_CTL);
324 }
325
326 /* Panel fitter */
327 if (!IS_I830(dev) && !IS_845G(dev) && !HAS_PCH_SPLIT(dev)) {
328 I915_WRITE(PFIT_PGM_RATIOS, dev_priv->regfile.savePFIT_PGM_RATIOS);
329 I915_WRITE(PFIT_CONTROL, dev_priv->regfile.savePFIT_CONTROL);
316 } 330 }
317 331
318 /* Display port ratios (must be done before clock is set) */ 332 /* Display port ratios (must be done before clock is set) */
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
new file mode 100644
index 000000000000..2c7ed5cb29c0
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -0,0 +1,463 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#include <linux/kernel.h>
25
26#include <drm/drmP.h>
27#include <drm/drm_edid.h>
28#include "intel_drv.h"
29#include "i915_drv.h"
30
31/**
32 * DOC: High Definition Audio over HDMI and Display Port
33 *
34 * The graphics and audio drivers together support High Definition Audio over
35 * HDMI and Display Port. The audio programming sequences are divided into audio
36 * codec and controller enable and disable sequences. The graphics driver
37 * handles the audio codec sequences, while the audio driver handles the audio
38 * controller sequences.
39 *
40 * The disable sequences must be performed before disabling the transcoder or
41 * port. The enable sequences may only be performed after enabling the
42 * transcoder and port, and after completed link training.
43 *
44 * The codec and controller sequences could be done either parallel or serial,
45 * but generally the ELDV/PD change in the codec sequence indicates to the audio
46 * driver that the controller sequence should start. Indeed, most of the
47 * co-operation between the graphics and audio drivers is handled via audio
48 * related registers. (The notable exception is the power management, not
49 * covered here.)
50 */
51
52static const struct {
53 int clock;
54 u32 config;
55} hdmi_audio_clock[] = {
56 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
57 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
58 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
59 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
60 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
61 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
62 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
63 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
64 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
65 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
66};
67
68/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
69static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
70{
71 int i;
72
73 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
74 if (mode->clock == hdmi_audio_clock[i].clock)
75 break;
76 }
77
78 if (i == ARRAY_SIZE(hdmi_audio_clock)) {
79 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
80 i = 1;
81 }
82
83 DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
84 hdmi_audio_clock[i].clock,
85 hdmi_audio_clock[i].config);
86
87 return hdmi_audio_clock[i].config;
88}
89
90static bool intel_eld_uptodate(struct drm_connector *connector,
91 int reg_eldv, uint32_t bits_eldv,
92 int reg_elda, uint32_t bits_elda,
93 int reg_edid)
94{
95 struct drm_i915_private *dev_priv = connector->dev->dev_private;
96 uint8_t *eld = connector->eld;
97 uint32_t tmp;
98 int i;
99
100 tmp = I915_READ(reg_eldv);
101 tmp &= bits_eldv;
102
103 if (!tmp)
104 return false;
105
106 tmp = I915_READ(reg_elda);
107 tmp &= ~bits_elda;
108 I915_WRITE(reg_elda, tmp);
109
110 for (i = 0; i < drm_eld_size(eld) / 4; i++)
111 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
112 return false;
113
114 return true;
115}
116
117static void g4x_audio_codec_disable(struct intel_encoder *encoder)
118{
119 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
120 uint32_t eldv, tmp;
121
122 DRM_DEBUG_KMS("Disable audio codec\n");
123
124 tmp = I915_READ(G4X_AUD_VID_DID);
125 if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
126 eldv = G4X_ELDV_DEVCL_DEVBLC;
127 else
128 eldv = G4X_ELDV_DEVCTG;
129
130 /* Invalidate ELD */
131 tmp = I915_READ(G4X_AUD_CNTL_ST);
132 tmp &= ~eldv;
133 I915_WRITE(G4X_AUD_CNTL_ST, tmp);
134}
135
136static void g4x_audio_codec_enable(struct drm_connector *connector,
137 struct intel_encoder *encoder,
138 struct drm_display_mode *mode)
139{
140 struct drm_i915_private *dev_priv = connector->dev->dev_private;
141 uint8_t *eld = connector->eld;
142 uint32_t eldv;
143 uint32_t tmp;
144 int len, i;
145
146 DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", eld[2]);
147
148 tmp = I915_READ(G4X_AUD_VID_DID);
149 if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
150 eldv = G4X_ELDV_DEVCL_DEVBLC;
151 else
152 eldv = G4X_ELDV_DEVCTG;
153
154 if (intel_eld_uptodate(connector,
155 G4X_AUD_CNTL_ST, eldv,
156 G4X_AUD_CNTL_ST, G4X_ELD_ADDR_MASK,
157 G4X_HDMIW_HDMIEDID))
158 return;
159
160 tmp = I915_READ(G4X_AUD_CNTL_ST);
161 tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
162 len = (tmp >> 9) & 0x1f; /* ELD buffer size */
163 I915_WRITE(G4X_AUD_CNTL_ST, tmp);
164
165 len = min(drm_eld_size(eld) / 4, len);
166 DRM_DEBUG_DRIVER("ELD size %d\n", len);
167 for (i = 0; i < len; i++)
168 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
169
170 tmp = I915_READ(G4X_AUD_CNTL_ST);
171 tmp |= eldv;
172 I915_WRITE(G4X_AUD_CNTL_ST, tmp);
173}
174
175static void hsw_audio_codec_disable(struct intel_encoder *encoder)
176{
177 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
178 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
179 enum pipe pipe = intel_crtc->pipe;
180 uint32_t tmp;
181
182 DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe));
183
184 /* Disable timestamps */
185 tmp = I915_READ(HSW_AUD_CFG(pipe));
186 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
187 tmp |= AUD_CONFIG_N_PROG_ENABLE;
188 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
189 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
190 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
191 tmp |= AUD_CONFIG_N_VALUE_INDEX;
192 I915_WRITE(HSW_AUD_CFG(pipe), tmp);
193
194 /* Invalidate ELD */
195 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
196 tmp &= ~AUDIO_ELD_VALID(pipe);
197 tmp &= ~AUDIO_OUTPUT_ENABLE(pipe);
198 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
199}
200
201static void hsw_audio_codec_enable(struct drm_connector *connector,
202 struct intel_encoder *encoder,
203 struct drm_display_mode *mode)
204{
205 struct drm_i915_private *dev_priv = connector->dev->dev_private;
206 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
207 enum pipe pipe = intel_crtc->pipe;
208 const uint8_t *eld = connector->eld;
209 uint32_t tmp;
210 int len, i;
211
212 DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n",
213 pipe_name(pipe), drm_eld_size(eld));
214
215 /* Enable audio presence detect, invalidate ELD */
216 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
217 tmp |= AUDIO_OUTPUT_ENABLE(pipe);
218 tmp &= ~AUDIO_ELD_VALID(pipe);
219 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
220
221 /*
222 * FIXME: We're supposed to wait for vblank here, but we have vblanks
223 * disabled during the mode set. The proper fix would be to push the
224 * rest of the setup into a vblank work item, queued here, but the
225 * infrastructure is not there yet.
226 */
227
228 /* Reset ELD write address */
229 tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(pipe));
230 tmp &= ~IBX_ELD_ADDRESS_MASK;
231 I915_WRITE(HSW_AUD_DIP_ELD_CTRL(pipe), tmp);
232
233 /* Up to 84 bytes of hw ELD buffer */
234 len = min(drm_eld_size(eld), 84);
235 for (i = 0; i < len / 4; i++)
236 I915_WRITE(HSW_AUD_EDID_DATA(pipe), *((uint32_t *)eld + i));
237
238 /* ELD valid */
239 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
240 tmp |= AUDIO_ELD_VALID(pipe);
241 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
242
243 /* Enable timestamps */
244 tmp = I915_READ(HSW_AUD_CFG(pipe));
245 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
246 tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
247 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
248 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
249 tmp |= AUD_CONFIG_N_VALUE_INDEX;
250 else
251 tmp |= audio_config_hdmi_pixel_clock(mode);
252 I915_WRITE(HSW_AUD_CFG(pipe), tmp);
253}
254
255static void ilk_audio_codec_disable(struct intel_encoder *encoder)
256{
257 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
258 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
259 struct intel_digital_port *intel_dig_port =
260 enc_to_dig_port(&encoder->base);
261 enum port port = intel_dig_port->port;
262 enum pipe pipe = intel_crtc->pipe;
263 uint32_t tmp, eldv;
264 int aud_config;
265 int aud_cntrl_st2;
266
267 DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n",
268 port_name(port), pipe_name(pipe));
269
270 if (HAS_PCH_IBX(dev_priv->dev)) {
271 aud_config = IBX_AUD_CFG(pipe);
272 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
273 } else if (IS_VALLEYVIEW(dev_priv)) {
274 aud_config = VLV_AUD_CFG(pipe);
275 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
276 } else {
277 aud_config = CPT_AUD_CFG(pipe);
278 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
279 }
280
281 /* Disable timestamps */
282 tmp = I915_READ(aud_config);
283 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
284 tmp |= AUD_CONFIG_N_PROG_ENABLE;
285 tmp &= ~AUD_CONFIG_UPPER_N_MASK;
286 tmp &= ~AUD_CONFIG_LOWER_N_MASK;
287 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
288 tmp |= AUD_CONFIG_N_VALUE_INDEX;
289 I915_WRITE(aud_config, tmp);
290
291 if (WARN_ON(!port)) {
292 eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
293 IBX_ELD_VALID(PORT_D);
294 } else {
295 eldv = IBX_ELD_VALID(port);
296 }
297
298 /* Invalidate ELD */
299 tmp = I915_READ(aud_cntrl_st2);
300 tmp &= ~eldv;
301 I915_WRITE(aud_cntrl_st2, tmp);
302}
303
304static void ilk_audio_codec_enable(struct drm_connector *connector,
305 struct intel_encoder *encoder,
306 struct drm_display_mode *mode)
307{
308 struct drm_i915_private *dev_priv = connector->dev->dev_private;
309 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
310 struct intel_digital_port *intel_dig_port =
311 enc_to_dig_port(&encoder->base);
312 enum port port = intel_dig_port->port;
313 enum pipe pipe = intel_crtc->pipe;
314 uint8_t *eld = connector->eld;
315 uint32_t eldv;
316 uint32_t tmp;
317 int len, i;
318 int hdmiw_hdmiedid;
319 int aud_config;
320 int aud_cntl_st;
321 int aud_cntrl_st2;
322
323 DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n",
324 port_name(port), pipe_name(pipe), drm_eld_size(eld));
325
326 /*
327 * FIXME: We're supposed to wait for vblank here, but we have vblanks
328 * disabled during the mode set. The proper fix would be to push the
329 * rest of the setup into a vblank work item, queued here, but the
330 * infrastructure is not there yet.
331 */
332
333 if (HAS_PCH_IBX(connector->dev)) {
334 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
335 aud_config = IBX_AUD_CFG(pipe);
336 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
337 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
338 } else if (IS_VALLEYVIEW(connector->dev)) {
339 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
340 aud_config = VLV_AUD_CFG(pipe);
341 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
342 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
343 } else {
344 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
345 aud_config = CPT_AUD_CFG(pipe);
346 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
347 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
348 }
349
350 if (WARN_ON(!port)) {
351 eldv = IBX_ELD_VALID(PORT_B) | IBX_ELD_VALID(PORT_C) |
352 IBX_ELD_VALID(PORT_D);
353 } else {
354 eldv = IBX_ELD_VALID(port);
355 }
356
357 /* Invalidate ELD */
358 tmp = I915_READ(aud_cntrl_st2);
359 tmp &= ~eldv;
360 I915_WRITE(aud_cntrl_st2, tmp);
361
362 /* Reset ELD write address */
363 tmp = I915_READ(aud_cntl_st);
364 tmp &= ~IBX_ELD_ADDRESS_MASK;
365 I915_WRITE(aud_cntl_st, tmp);
366
367 /* Up to 84 bytes of hw ELD buffer */
368 len = min(drm_eld_size(eld), 84);
369 for (i = 0; i < len / 4; i++)
370 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
371
372 /* ELD valid */
373 tmp = I915_READ(aud_cntrl_st2);
374 tmp |= eldv;
375 I915_WRITE(aud_cntrl_st2, tmp);
376
377 /* Enable timestamps */
378 tmp = I915_READ(aud_config);
379 tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
380 tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
381 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
382 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT))
383 tmp |= AUD_CONFIG_N_VALUE_INDEX;
384 else
385 tmp |= audio_config_hdmi_pixel_clock(mode);
386 I915_WRITE(aud_config, tmp);
387}
388
389/**
390 * intel_audio_codec_enable - Enable the audio codec for HD audio
391 * @intel_encoder: encoder on which to enable audio
392 *
393 * The enable sequences may only be performed after enabling the transcoder and
394 * port, and after completed link training.
395 */
396void intel_audio_codec_enable(struct intel_encoder *intel_encoder)
397{
398 struct drm_encoder *encoder = &intel_encoder->base;
399 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
400 struct drm_display_mode *mode = &crtc->config.adjusted_mode;
401 struct drm_connector *connector;
402 struct drm_device *dev = encoder->dev;
403 struct drm_i915_private *dev_priv = dev->dev_private;
404
405 connector = drm_select_eld(encoder, mode);
406 if (!connector)
407 return;
408
409 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
410 connector->base.id,
411 connector->name,
412 connector->encoder->base.id,
413 connector->encoder->name);
414
415 /* ELD Conn_Type */
416 connector->eld[5] &= ~(3 << 2);
417 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
418 connector->eld[5] |= (1 << 2);
419
420 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
421
422 if (dev_priv->display.audio_codec_enable)
423 dev_priv->display.audio_codec_enable(connector, intel_encoder, mode);
424}
425
426/**
427 * intel_audio_codec_disable - Disable the audio codec for HD audio
428 * @encoder: encoder on which to disable audio
429 *
430 * The disable sequences must be performed before disabling the transcoder or
431 * port.
432 */
433void intel_audio_codec_disable(struct intel_encoder *encoder)
434{
435 struct drm_device *dev = encoder->base.dev;
436 struct drm_i915_private *dev_priv = dev->dev_private;
437
438 if (dev_priv->display.audio_codec_disable)
439 dev_priv->display.audio_codec_disable(encoder);
440}
441
442/**
443 * intel_init_audio - Set up chip specific audio functions
444 * @dev: drm device
445 */
446void intel_init_audio(struct drm_device *dev)
447{
448 struct drm_i915_private *dev_priv = dev->dev_private;
449
450 if (IS_G4X(dev)) {
451 dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
452 dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
453 } else if (IS_VALLEYVIEW(dev)) {
454 dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
455 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
456 } else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) {
457 dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
458 dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
459 } else if (HAS_PCH_SPLIT(dev)) {
460 dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
461 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
462 }
463}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 905999bee2ac..7603765c91fc 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -46,7 +46,7 @@ struct bdb_header {
46 u16 version; /**< decimal */ 46 u16 version; /**< decimal */
47 u16 header_size; /**< in bytes */ 47 u16 header_size; /**< in bytes */
48 u16 bdb_size; /**< in bytes */ 48 u16 bdb_size; /**< in bytes */
49}; 49} __packed;
50 50
51/* strictly speaking, this is a "skip" block, but it has interesting info */ 51/* strictly speaking, this is a "skip" block, but it has interesting info */
52struct vbios_data { 52struct vbios_data {
@@ -252,7 +252,7 @@ union child_device_config {
252 /* This one should also be safe to use anywhere, even without version 252 /* This one should also be safe to use anywhere, even without version
253 * checks. */ 253 * checks. */
254 struct common_child_dev_config common; 254 struct common_child_dev_config common;
255}; 255} __packed;
256 256
257struct bdb_general_definitions { 257struct bdb_general_definitions {
258 /* DDC GPIO */ 258 /* DDC GPIO */
@@ -888,12 +888,12 @@ struct mipi_pps_data {
888 u16 bl_disable_delay; 888 u16 bl_disable_delay;
889 u16 panel_off_delay; 889 u16 panel_off_delay;
890 u16 panel_power_cycle_delay; 890 u16 panel_power_cycle_delay;
891}; 891} __packed;
892 892
893struct bdb_mipi_config { 893struct bdb_mipi_config {
894 struct mipi_config config[MAX_MIPI_CONFIGURATIONS]; 894 struct mipi_config config[MAX_MIPI_CONFIGURATIONS];
895 struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS]; 895 struct mipi_pps_data pps[MAX_MIPI_CONFIGURATIONS];
896}; 896} __packed;
897 897
898/* Block 53 contains MIPI sequences as needed by the panel 898/* Block 53 contains MIPI sequences as needed by the panel
899 * for enabling it. This block can be variable in size and 899 * for enabling it. This block can be variable in size and
@@ -902,7 +902,7 @@ struct bdb_mipi_config {
902struct bdb_mipi_sequence { 902struct bdb_mipi_sequence {
903 u8 version; 903 u8 version;
904 u8 data[0]; 904 u8 data[0];
905}; 905} __packed;
906 906
907/* MIPI Sequnece Block definitions */ 907/* MIPI Sequnece Block definitions */
908enum mipi_seq { 908enum mipi_seq {
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9212e6504e0f..a9af9a4866db 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -72,7 +72,7 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
72 u32 tmp; 72 u32 tmp;
73 73
74 power_domain = intel_display_port_power_domain(encoder); 74 power_domain = intel_display_port_power_domain(encoder);
75 if (!intel_display_power_enabled(dev_priv, power_domain)) 75 if (!intel_display_power_is_enabled(dev_priv, power_domain))
76 return false; 76 return false;
77 77
78 tmp = I915_READ(crt->adpa_reg); 78 tmp = I915_READ(crt->adpa_reg);
@@ -775,7 +775,7 @@ static void intel_crt_reset(struct drm_connector *connector)
775 I915_WRITE(crt->adpa_reg, adpa); 775 I915_WRITE(crt->adpa_reg, adpa);
776 POSTING_READ(crt->adpa_reg); 776 POSTING_READ(crt->adpa_reg);
777 777
778 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); 778 DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
779 crt->force_hotplug_required = 1; 779 crt->force_hotplug_required = 1;
780 } 780 }
781 781
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b63d4fa204a3..e6b45cd150d3 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -95,8 +95,8 @@ static const struct ddi_buf_trans bdw_ddi_translations_dp[] = {
95 { 0x00BEFFFF, 0x00140006 }, 95 { 0x00BEFFFF, 0x00140006 },
96 { 0x80B2CFFF, 0x001B0002 }, 96 { 0x80B2CFFF, 0x001B0002 },
97 { 0x00FFFFFF, 0x000E000A }, 97 { 0x00FFFFFF, 0x000E000A },
98 { 0x00D75FFF, 0x00180004 }, 98 { 0x00DB6FFF, 0x00160005 },
99 { 0x80CB2FFF, 0x001B0002 }, 99 { 0x80C71FFF, 0x001A0002 },
100 { 0x00F7DFFF, 0x00180004 }, 100 { 0x00F7DFFF, 0x00180004 },
101 { 0x80D75FFF, 0x001B0002 }, 101 { 0x80D75FFF, 0x001B0002 },
102}; 102};
@@ -127,6 +127,32 @@ static const struct ddi_buf_trans bdw_ddi_translations_hdmi[] = {
127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */ 127 { 0x80FFFFFF, 0x001B0002 }, /* 9: 1000 1000 0 */
128}; 128};
129 129
130static const struct ddi_buf_trans skl_ddi_translations_dp[] = {
131 { 0x00000018, 0x000000a0 },
132 { 0x00004014, 0x00000098 },
133 { 0x00006012, 0x00000088 },
134 { 0x00008010, 0x00000080 },
135 { 0x00000018, 0x00000098 },
136 { 0x00004014, 0x00000088 },
137 { 0x00006012, 0x00000080 },
138 { 0x00000018, 0x00000088 },
139 { 0x00004014, 0x00000080 },
140};
141
142static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = {
143 /* Idx NT mV T mV db */
144 { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */
145 { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */
146 { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */
147 { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */
148 { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */
149 { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */
150 { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */
151 { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */
152 { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */
153 { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */
154};
155
130enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) 156enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder)
131{ 157{
132 struct drm_encoder *encoder = &intel_encoder->base; 158 struct drm_encoder *encoder = &intel_encoder->base;
@@ -169,7 +195,14 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
169 const struct ddi_buf_trans *ddi_translations_hdmi; 195 const struct ddi_buf_trans *ddi_translations_hdmi;
170 const struct ddi_buf_trans *ddi_translations; 196 const struct ddi_buf_trans *ddi_translations;
171 197
172 if (IS_BROADWELL(dev)) { 198 if (IS_SKYLAKE(dev)) {
199 ddi_translations_fdi = NULL;
200 ddi_translations_dp = skl_ddi_translations_dp;
201 ddi_translations_edp = skl_ddi_translations_dp;
202 ddi_translations_hdmi = skl_ddi_translations_hdmi;
203 n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi);
204 hdmi_800mV_0dB = 7;
205 } else if (IS_BROADWELL(dev)) {
173 ddi_translations_fdi = bdw_ddi_translations_fdi; 206 ddi_translations_fdi = bdw_ddi_translations_fdi;
174 ddi_translations_dp = bdw_ddi_translations_dp; 207 ddi_translations_dp = bdw_ddi_translations_dp;
175 ddi_translations_edp = bdw_ddi_translations_edp; 208 ddi_translations_edp = bdw_ddi_translations_edp;
@@ -208,7 +241,10 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port)
208 ddi_translations = ddi_translations_dp; 241 ddi_translations = ddi_translations_dp;
209 break; 242 break;
210 case PORT_E: 243 case PORT_E:
211 ddi_translations = ddi_translations_fdi; 244 if (ddi_translations_fdi)
245 ddi_translations = ddi_translations_fdi;
246 else
247 ddi_translations = ddi_translations_dp;
212 break; 248 break;
213 default: 249 default:
214 BUG(); 250 BUG();
@@ -423,6 +459,27 @@ intel_ddi_get_crtc_encoder(struct drm_crtc *crtc)
423 return ret; 459 return ret;
424} 460}
425 461
462static struct intel_encoder *
463intel_ddi_get_crtc_new_encoder(struct intel_crtc *crtc)
464{
465 struct drm_device *dev = crtc->base.dev;
466 struct intel_encoder *intel_encoder, *ret = NULL;
467 int num_encoders = 0;
468
469 for_each_intel_encoder(dev, intel_encoder) {
470 if (intel_encoder->new_crtc == crtc) {
471 ret = intel_encoder;
472 num_encoders++;
473 }
474 }
475
476 WARN(num_encoders != 1, "%d encoders on crtc for pipe %c\n", num_encoders,
477 pipe_name(crtc->pipe));
478
479 BUG_ON(ret == NULL);
480 return ret;
481}
482
426#define LC_FREQ 2700 483#define LC_FREQ 2700
427#define LC_FREQ_2K U64_C(LC_FREQ * 2000) 484#define LC_FREQ_2K U64_C(LC_FREQ * 2000)
428 485
@@ -613,6 +670,111 @@ static int intel_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
613 return (refclk * n * 100) / (p * r); 670 return (refclk * n * 100) / (p * r);
614} 671}
615 672
673static int skl_calc_wrpll_link(struct drm_i915_private *dev_priv,
674 uint32_t dpll)
675{
676 uint32_t cfgcr1_reg, cfgcr2_reg;
677 uint32_t cfgcr1_val, cfgcr2_val;
678 uint32_t p0, p1, p2, dco_freq;
679
680 cfgcr1_reg = GET_CFG_CR1_REG(dpll);
681 cfgcr2_reg = GET_CFG_CR2_REG(dpll);
682
683 cfgcr1_val = I915_READ(cfgcr1_reg);
684 cfgcr2_val = I915_READ(cfgcr2_reg);
685
686 p0 = cfgcr2_val & DPLL_CFGCR2_PDIV_MASK;
687 p2 = cfgcr2_val & DPLL_CFGCR2_KDIV_MASK;
688
689 if (cfgcr2_val & DPLL_CFGCR2_QDIV_MODE(1))
690 p1 = (cfgcr2_val & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
691 else
692 p1 = 1;
693
694
695 switch (p0) {
696 case DPLL_CFGCR2_PDIV_1:
697 p0 = 1;
698 break;
699 case DPLL_CFGCR2_PDIV_2:
700 p0 = 2;
701 break;
702 case DPLL_CFGCR2_PDIV_3:
703 p0 = 3;
704 break;
705 case DPLL_CFGCR2_PDIV_7:
706 p0 = 7;
707 break;
708 }
709
710 switch (p2) {
711 case DPLL_CFGCR2_KDIV_5:
712 p2 = 5;
713 break;
714 case DPLL_CFGCR2_KDIV_2:
715 p2 = 2;
716 break;
717 case DPLL_CFGCR2_KDIV_3:
718 p2 = 3;
719 break;
720 case DPLL_CFGCR2_KDIV_1:
721 p2 = 1;
722 break;
723 }
724
725 dco_freq = (cfgcr1_val & DPLL_CFGCR1_DCO_INTEGER_MASK) * 24 * 1000;
726
727 dco_freq += (((cfgcr1_val & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) * 24 *
728 1000) / 0x8000;
729
730 return dco_freq / (p0 * p1 * p2 * 5);
731}
732
733
734static void skl_ddi_clock_get(struct intel_encoder *encoder,
735 struct intel_crtc_config *pipe_config)
736{
737 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
738 int link_clock = 0;
739 uint32_t dpll_ctl1, dpll;
740
741 dpll = pipe_config->ddi_pll_sel;
742
743 dpll_ctl1 = I915_READ(DPLL_CTRL1);
744
745 if (dpll_ctl1 & DPLL_CTRL1_HDMI_MODE(dpll)) {
746 link_clock = skl_calc_wrpll_link(dev_priv, dpll);
747 } else {
748 link_clock = dpll_ctl1 & DPLL_CRTL1_LINK_RATE_MASK(dpll);
749 link_clock >>= DPLL_CRTL1_LINK_RATE_SHIFT(dpll);
750
751 switch (link_clock) {
752 case DPLL_CRTL1_LINK_RATE_810:
753 link_clock = 81000;
754 break;
755 case DPLL_CRTL1_LINK_RATE_1350:
756 link_clock = 135000;
757 break;
758 case DPLL_CRTL1_LINK_RATE_2700:
759 link_clock = 270000;
760 break;
761 default:
762 WARN(1, "Unsupported link rate\n");
763 break;
764 }
765 link_clock *= 2;
766 }
767
768 pipe_config->port_clock = link_clock;
769
770 if (pipe_config->has_dp_encoder)
771 pipe_config->adjusted_mode.crtc_clock =
772 intel_dotclock_calculate(pipe_config->port_clock,
773 &pipe_config->dp_m_n);
774 else
775 pipe_config->adjusted_mode.crtc_clock = pipe_config->port_clock;
776}
777
616static void hsw_ddi_clock_get(struct intel_encoder *encoder, 778static void hsw_ddi_clock_get(struct intel_encoder *encoder,
617 struct intel_crtc_config *pipe_config) 779 struct intel_crtc_config *pipe_config)
618{ 780{
@@ -756,7 +918,7 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
756 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) | 918 WRPLL_DIVIDER_REFERENCE(r2) | WRPLL_DIVIDER_FEEDBACK(n2) |
757 WRPLL_DIVIDER_POST(p); 919 WRPLL_DIVIDER_POST(p);
758 920
759 intel_crtc->config.dpll_hw_state.wrpll = val; 921 intel_crtc->new_config->dpll_hw_state.wrpll = val;
760 922
761 pll = intel_get_shared_dpll(intel_crtc); 923 pll = intel_get_shared_dpll(intel_crtc);
762 if (pll == NULL) { 924 if (pll == NULL) {
@@ -765,12 +927,234 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
765 return false; 927 return false;
766 } 928 }
767 929
768 intel_crtc->config.ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id); 930 intel_crtc->new_config->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
769 } 931 }
770 932
771 return true; 933 return true;
772} 934}
773 935
936struct skl_wrpll_params {
937 uint32_t dco_fraction;
938 uint32_t dco_integer;
939 uint32_t qdiv_ratio;
940 uint32_t qdiv_mode;
941 uint32_t kdiv;
942 uint32_t pdiv;
943 uint32_t central_freq;
944};
945
946static void
947skl_ddi_calculate_wrpll(int clock /* in Hz */,
948 struct skl_wrpll_params *wrpll_params)
949{
950 uint64_t afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
951 uint64_t dco_central_freq[3] = {8400000000ULL,
952 9000000000ULL,
953 9600000000ULL};
954 uint32_t min_dco_deviation = 400;
955 uint32_t min_dco_index = 3;
956 uint32_t P0[4] = {1, 2, 3, 7};
957 uint32_t P2[4] = {1, 2, 3, 5};
958 bool found = false;
959 uint32_t candidate_p = 0;
960 uint32_t candidate_p0[3] = {0}, candidate_p1[3] = {0};
961 uint32_t candidate_p2[3] = {0};
962 uint32_t dco_central_freq_deviation[3];
963 uint32_t i, P1, k, dco_count;
964 bool retry_with_odd = false;
965 uint64_t dco_freq;
966
967 /* Determine P0, P1 or P2 */
968 for (dco_count = 0; dco_count < 3; dco_count++) {
969 found = false;
970 candidate_p =
971 div64_u64(dco_central_freq[dco_count], afe_clock);
972 if (retry_with_odd == false)
973 candidate_p = (candidate_p % 2 == 0 ?
974 candidate_p : candidate_p + 1);
975
976 for (P1 = 1; P1 < candidate_p; P1++) {
977 for (i = 0; i < 4; i++) {
978 if (!(P0[i] != 1 || P1 == 1))
979 continue;
980
981 for (k = 0; k < 4; k++) {
982 if (P1 != 1 && P2[k] != 2)
983 continue;
984
985 if (candidate_p == P0[i] * P1 * P2[k]) {
986 /* Found possible P0, P1, P2 */
987 found = true;
988 candidate_p0[dco_count] = P0[i];
989 candidate_p1[dco_count] = P1;
990 candidate_p2[dco_count] = P2[k];
991 goto found;
992 }
993
994 }
995 }
996 }
997
998found:
999 if (found) {
1000 dco_central_freq_deviation[dco_count] =
1001 div64_u64(10000 *
1002 abs_diff((candidate_p * afe_clock),
1003 dco_central_freq[dco_count]),
1004 dco_central_freq[dco_count]);
1005
1006 if (dco_central_freq_deviation[dco_count] <
1007 min_dco_deviation) {
1008 min_dco_deviation =
1009 dco_central_freq_deviation[dco_count];
1010 min_dco_index = dco_count;
1011 }
1012 }
1013
1014 if (min_dco_index > 2 && dco_count == 2) {
1015 retry_with_odd = true;
1016 dco_count = 0;
1017 }
1018 }
1019
1020 if (min_dco_index > 2) {
1021 WARN(1, "No valid values found for the given pixel clock\n");
1022 } else {
1023 wrpll_params->central_freq = dco_central_freq[min_dco_index];
1024
1025 switch (dco_central_freq[min_dco_index]) {
1026 case 9600000000ULL:
1027 wrpll_params->central_freq = 0;
1028 break;
1029 case 9000000000ULL:
1030 wrpll_params->central_freq = 1;
1031 break;
1032 case 8400000000ULL:
1033 wrpll_params->central_freq = 3;
1034 }
1035
1036 switch (candidate_p0[min_dco_index]) {
1037 case 1:
1038 wrpll_params->pdiv = 0;
1039 break;
1040 case 2:
1041 wrpll_params->pdiv = 1;
1042 break;
1043 case 3:
1044 wrpll_params->pdiv = 2;
1045 break;
1046 case 7:
1047 wrpll_params->pdiv = 4;
1048 break;
1049 default:
1050 WARN(1, "Incorrect PDiv\n");
1051 }
1052
1053 switch (candidate_p2[min_dco_index]) {
1054 case 5:
1055 wrpll_params->kdiv = 0;
1056 break;
1057 case 2:
1058 wrpll_params->kdiv = 1;
1059 break;
1060 case 3:
1061 wrpll_params->kdiv = 2;
1062 break;
1063 case 1:
1064 wrpll_params->kdiv = 3;
1065 break;
1066 default:
1067 WARN(1, "Incorrect KDiv\n");
1068 }
1069
1070 wrpll_params->qdiv_ratio = candidate_p1[min_dco_index];
1071 wrpll_params->qdiv_mode =
1072 (wrpll_params->qdiv_ratio == 1) ? 0 : 1;
1073
1074 dco_freq = candidate_p0[min_dco_index] *
1075 candidate_p1[min_dco_index] *
1076 candidate_p2[min_dco_index] * afe_clock;
1077
1078 /*
1079 * Intermediate values are in Hz.
1080 * Divide by MHz to match bsepc
1081 */
1082 wrpll_params->dco_integer = div_u64(dco_freq, (24 * MHz(1)));
1083 wrpll_params->dco_fraction =
1084 div_u64(((div_u64(dco_freq, 24) -
1085 wrpll_params->dco_integer * MHz(1)) * 0x8000), MHz(1));
1086
1087 }
1088}
1089
1090
1091static bool
1092skl_ddi_pll_select(struct intel_crtc *intel_crtc,
1093 struct intel_encoder *intel_encoder,
1094 int clock)
1095{
1096 struct intel_shared_dpll *pll;
1097 uint32_t ctrl1, cfgcr1, cfgcr2;
1098
1099 /*
1100 * See comment in intel_dpll_hw_state to understand why we always use 0
1101 * as the DPLL id in this function.
1102 */
1103
1104 ctrl1 = DPLL_CTRL1_OVERRIDE(0);
1105
1106 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
1107 struct skl_wrpll_params wrpll_params = { 0, };
1108
1109 ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
1110
1111 skl_ddi_calculate_wrpll(clock * 1000, &wrpll_params);
1112
1113 cfgcr1 = DPLL_CFGCR1_FREQ_ENABLE |
1114 DPLL_CFGCR1_DCO_FRACTION(wrpll_params.dco_fraction) |
1115 wrpll_params.dco_integer;
1116
1117 cfgcr2 = DPLL_CFGCR2_QDIV_RATIO(wrpll_params.qdiv_ratio) |
1118 DPLL_CFGCR2_QDIV_MODE(wrpll_params.qdiv_mode) |
1119 DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
1120 DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
1121 wrpll_params.central_freq;
1122 } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
1123 struct drm_encoder *encoder = &intel_encoder->base;
1124 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1125
1126 switch (intel_dp->link_bw) {
1127 case DP_LINK_BW_1_62:
1128 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, 0);
1129 break;
1130 case DP_LINK_BW_2_7:
1131 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, 0);
1132 break;
1133 case DP_LINK_BW_5_4:
1134 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, 0);
1135 break;
1136 }
1137
1138 cfgcr1 = cfgcr2 = 0;
1139 } else /* eDP */
1140 return true;
1141
1142 intel_crtc->new_config->dpll_hw_state.ctrl1 = ctrl1;
1143 intel_crtc->new_config->dpll_hw_state.cfgcr1 = cfgcr1;
1144 intel_crtc->new_config->dpll_hw_state.cfgcr2 = cfgcr2;
1145
1146 pll = intel_get_shared_dpll(intel_crtc);
1147 if (pll == NULL) {
1148 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
1149 pipe_name(intel_crtc->pipe));
1150 return false;
1151 }
1152
1153 /* shared DPLL id 0 is DPLL 1 */
1154 intel_crtc->new_config->ddi_pll_sel = pll->id + 1;
1155
1156 return true;
1157}
774 1158
775/* 1159/*
776 * Tries to find a *shared* PLL for the CRTC and store it in 1160 * Tries to find a *shared* PLL for the CRTC and store it in
@@ -781,13 +1165,15 @@ hsw_ddi_pll_select(struct intel_crtc *intel_crtc,
781 */ 1165 */
782bool intel_ddi_pll_select(struct intel_crtc *intel_crtc) 1166bool intel_ddi_pll_select(struct intel_crtc *intel_crtc)
783{ 1167{
784 struct drm_crtc *crtc = &intel_crtc->base; 1168 struct drm_device *dev = intel_crtc->base.dev;
785 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 1169 struct intel_encoder *intel_encoder =
786 int clock = intel_crtc->config.port_clock; 1170 intel_ddi_get_crtc_new_encoder(intel_crtc);
787 1171 int clock = intel_crtc->new_config->port_clock;
788 intel_put_shared_dpll(intel_crtc);
789 1172
790 return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock); 1173 if (IS_SKYLAKE(dev))
1174 return skl_ddi_pll_select(intel_crtc, intel_encoder, clock);
1175 else
1176 return hsw_ddi_pll_select(intel_crtc, intel_encoder, clock);
791} 1177}
792 1178
793void intel_ddi_set_pipe_settings(struct drm_crtc *crtc) 1179void intel_ddi_set_pipe_settings(struct drm_crtc *crtc)
@@ -962,7 +1348,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
962 uint32_t tmp; 1348 uint32_t tmp;
963 1349
964 power_domain = intel_display_port_power_domain(intel_encoder); 1350 power_domain = intel_display_port_power_domain(intel_encoder);
965 if (!intel_display_power_enabled(dev_priv, power_domain)) 1351 if (!intel_display_power_is_enabled(dev_priv, power_domain))
966 return false; 1352 return false;
967 1353
968 if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) 1354 if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
@@ -1008,7 +1394,7 @@ bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
1008 int i; 1394 int i;
1009 1395
1010 power_domain = intel_display_port_power_domain(encoder); 1396 power_domain = intel_display_port_power_domain(encoder);
1011 if (!intel_display_power_enabled(dev_priv, power_domain)) 1397 if (!intel_display_power_is_enabled(dev_priv, power_domain))
1012 return false; 1398 return false;
1013 1399
1014 tmp = I915_READ(DDI_BUF_CTL(port)); 1400 tmp = I915_READ(DDI_BUF_CTL(port));
@@ -1079,27 +1465,53 @@ void intel_ddi_disable_pipe_clock(struct intel_crtc *intel_crtc)
1079static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) 1465static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1080{ 1466{
1081 struct drm_encoder *encoder = &intel_encoder->base; 1467 struct drm_encoder *encoder = &intel_encoder->base;
1082 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1468 struct drm_device *dev = encoder->dev;
1469 struct drm_i915_private *dev_priv = dev->dev_private;
1083 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 1470 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
1084 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1471 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1085 int type = intel_encoder->type; 1472 int type = intel_encoder->type;
1086 1473
1087 if (crtc->config.has_audio) {
1088 DRM_DEBUG_DRIVER("Audio on pipe %c on DDI\n",
1089 pipe_name(crtc->pipe));
1090
1091 /* write eld */
1092 DRM_DEBUG_DRIVER("DDI audio: write eld information\n");
1093 intel_write_eld(encoder, &crtc->config.adjusted_mode);
1094 }
1095
1096 if (type == INTEL_OUTPUT_EDP) { 1474 if (type == INTEL_OUTPUT_EDP) {
1097 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1475 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1098 intel_edp_panel_on(intel_dp); 1476 intel_edp_panel_on(intel_dp);
1099 } 1477 }
1100 1478
1101 WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE); 1479 if (IS_SKYLAKE(dev)) {
1102 I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel); 1480 uint32_t dpll = crtc->config.ddi_pll_sel;
1481 uint32_t val;
1482
1483 /*
1484 * DPLL0 is used for eDP and is the only "private" DPLL (as
1485 * opposed to shared) on SKL
1486 */
1487 if (type == INTEL_OUTPUT_EDP) {
1488 WARN_ON(dpll != SKL_DPLL0);
1489
1490 val = I915_READ(DPLL_CTRL1);
1491
1492 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) |
1493 DPLL_CTRL1_SSC(dpll) |
1494 DPLL_CRTL1_LINK_RATE_MASK(dpll));
1495 val |= crtc->config.dpll_hw_state.ctrl1 << (dpll * 6);
1496
1497 I915_WRITE(DPLL_CTRL1, val);
1498 POSTING_READ(DPLL_CTRL1);
1499 }
1500
1501 /* DDI -> PLL mapping */
1502 val = I915_READ(DPLL_CTRL2);
1503
1504 val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
1505 DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
1506 val |= (DPLL_CTRL2_DDI_CLK_SEL(dpll, port) |
1507 DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
1508
1509 I915_WRITE(DPLL_CTRL2, val);
1510
1511 } else {
1512 WARN_ON(crtc->config.ddi_pll_sel == PORT_CLK_SEL_NONE);
1513 I915_WRITE(PORT_CLK_SEL(port), crtc->config.ddi_pll_sel);
1514 }
1103 1515
1104 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 1516 if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
1105 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1517 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -1109,7 +1521,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1109 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1521 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1110 intel_dp_start_link_train(intel_dp); 1522 intel_dp_start_link_train(intel_dp);
1111 intel_dp_complete_link_train(intel_dp); 1523 intel_dp_complete_link_train(intel_dp);
1112 if (port != PORT_A) 1524 if (port != PORT_A || INTEL_INFO(dev)->gen >= 9)
1113 intel_dp_stop_link_train(intel_dp); 1525 intel_dp_stop_link_train(intel_dp);
1114 } else if (type == INTEL_OUTPUT_HDMI) { 1526 } else if (type == INTEL_OUTPUT_HDMI) {
1115 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1527 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
@@ -1123,7 +1535,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1123static void intel_ddi_post_disable(struct intel_encoder *intel_encoder) 1535static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1124{ 1536{
1125 struct drm_encoder *encoder = &intel_encoder->base; 1537 struct drm_encoder *encoder = &intel_encoder->base;
1126 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 1538 struct drm_device *dev = encoder->dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private;
1127 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1540 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1128 int type = intel_encoder->type; 1541 int type = intel_encoder->type;
1129 uint32_t val; 1542 uint32_t val;
@@ -1151,7 +1564,11 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
1151 intel_edp_panel_off(intel_dp); 1564 intel_edp_panel_off(intel_dp);
1152 } 1565 }
1153 1566
1154 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); 1567 if (IS_SKYLAKE(dev))
1568 I915_WRITE(DPLL_CTRL2, (I915_READ(DPLL_CTRL2) |
1569 DPLL_CTRL2_DDI_CLK_OFF(port)));
1570 else
1571 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
1155} 1572}
1156 1573
1157static void intel_enable_ddi(struct intel_encoder *intel_encoder) 1574static void intel_enable_ddi(struct intel_encoder *intel_encoder)
@@ -1159,12 +1576,10 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1159 struct drm_encoder *encoder = &intel_encoder->base; 1576 struct drm_encoder *encoder = &intel_encoder->base;
1160 struct drm_crtc *crtc = encoder->crtc; 1577 struct drm_crtc *crtc = encoder->crtc;
1161 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1578 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1162 int pipe = intel_crtc->pipe;
1163 struct drm_device *dev = encoder->dev; 1579 struct drm_device *dev = encoder->dev;
1164 struct drm_i915_private *dev_priv = dev->dev_private; 1580 struct drm_i915_private *dev_priv = dev->dev_private;
1165 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1581 enum port port = intel_ddi_get_encoder_port(intel_encoder);
1166 int type = intel_encoder->type; 1582 int type = intel_encoder->type;
1167 uint32_t tmp;
1168 1583
1169 if (type == INTEL_OUTPUT_HDMI) { 1584 if (type == INTEL_OUTPUT_HDMI) {
1170 struct intel_digital_port *intel_dig_port = 1585 struct intel_digital_port *intel_dig_port =
@@ -1180,18 +1595,16 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1180 } else if (type == INTEL_OUTPUT_EDP) { 1595 } else if (type == INTEL_OUTPUT_EDP) {
1181 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1596 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1182 1597
1183 if (port == PORT_A) 1598 if (port == PORT_A && INTEL_INFO(dev)->gen < 9)
1184 intel_dp_stop_link_train(intel_dp); 1599 intel_dp_stop_link_train(intel_dp);
1185 1600
1186 intel_edp_backlight_on(intel_dp); 1601 intel_edp_backlight_on(intel_dp);
1187 intel_edp_psr_enable(intel_dp); 1602 intel_psr_enable(intel_dp);
1188 } 1603 }
1189 1604
1190 if (intel_crtc->config.has_audio) { 1605 if (intel_crtc->config.has_audio) {
1191 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); 1606 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1192 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1607 intel_audio_codec_enable(intel_encoder);
1193 tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
1194 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1195 } 1608 }
1196} 1609}
1197 1610
@@ -1200,30 +1613,71 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
1200 struct drm_encoder *encoder = &intel_encoder->base; 1613 struct drm_encoder *encoder = &intel_encoder->base;
1201 struct drm_crtc *crtc = encoder->crtc; 1614 struct drm_crtc *crtc = encoder->crtc;
1202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1615 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1203 int pipe = intel_crtc->pipe;
1204 int type = intel_encoder->type; 1616 int type = intel_encoder->type;
1205 struct drm_device *dev = encoder->dev; 1617 struct drm_device *dev = encoder->dev;
1206 struct drm_i915_private *dev_priv = dev->dev_private; 1618 struct drm_i915_private *dev_priv = dev->dev_private;
1207 uint32_t tmp;
1208 1619
1209 /* We can't touch HSW_AUD_PIN_ELD_CP_VLD uncionditionally because this
1210 * register is part of the power well on Haswell. */
1211 if (intel_crtc->config.has_audio) { 1620 if (intel_crtc->config.has_audio) {
1212 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 1621 intel_audio_codec_disable(intel_encoder);
1213 tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) <<
1214 (pipe * 4));
1215 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
1216 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO); 1622 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1217 } 1623 }
1218 1624
1219 if (type == INTEL_OUTPUT_EDP) { 1625 if (type == INTEL_OUTPUT_EDP) {
1220 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1626 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1221 1627
1222 intel_edp_psr_disable(intel_dp); 1628 intel_psr_disable(intel_dp);
1223 intel_edp_backlight_off(intel_dp); 1629 intel_edp_backlight_off(intel_dp);
1224 } 1630 }
1225} 1631}
1226 1632
1633static int skl_get_cdclk_freq(struct drm_i915_private *dev_priv)
1634{
1635 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
1636 uint32_t cdctl = I915_READ(CDCLK_CTL);
1637 uint32_t linkrate;
1638
1639 if (!(lcpll1 & LCPLL_PLL_ENABLE)) {
1640 WARN(1, "LCPLL1 not enabled\n");
1641 return 24000; /* 24MHz is the cd freq with NSSC ref */
1642 }
1643
1644 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
1645 return 540000;
1646
1647 linkrate = (I915_READ(DPLL_CTRL1) &
1648 DPLL_CRTL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
1649
1650 if (linkrate == DPLL_CRTL1_LINK_RATE_2160 ||
1651 linkrate == DPLL_CRTL1_LINK_RATE_1080) {
1652 /* vco 8640 */
1653 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
1654 case CDCLK_FREQ_450_432:
1655 return 432000;
1656 case CDCLK_FREQ_337_308:
1657 return 308570;
1658 case CDCLK_FREQ_675_617:
1659 return 617140;
1660 default:
1661 WARN(1, "Unknown cd freq selection\n");
1662 }
1663 } else {
1664 /* vco 8100 */
1665 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
1666 case CDCLK_FREQ_450_432:
1667 return 450000;
1668 case CDCLK_FREQ_337_308:
1669 return 337500;
1670 case CDCLK_FREQ_675_617:
1671 return 675000;
1672 default:
1673 WARN(1, "Unknown cd freq selection\n");
1674 }
1675 }
1676
1677 /* error case, do as if DPLL0 isn't enabled */
1678 return 24000;
1679}
1680
1227static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv) 1681static int bdw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1228{ 1682{
1229 uint32_t lcpll = I915_READ(LCPLL_CTL); 1683 uint32_t lcpll = I915_READ(LCPLL_CTL);
@@ -1255,7 +1709,7 @@ static int hsw_get_cdclk_freq(struct drm_i915_private *dev_priv)
1255 return 450000; 1709 return 450000;
1256 else if (freq == LCPLL_CLK_FREQ_450) 1710 else if (freq == LCPLL_CLK_FREQ_450)
1257 return 450000; 1711 return 450000;
1258 else if (IS_ULT(dev)) 1712 else if (IS_HSW_ULT(dev))
1259 return 337500; 1713 return 337500;
1260 else 1714 else
1261 return 540000; 1715 return 540000;
@@ -1265,6 +1719,9 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1265{ 1719{
1266 struct drm_device *dev = dev_priv->dev; 1720 struct drm_device *dev = dev_priv->dev;
1267 1721
1722 if (IS_SKYLAKE(dev))
1723 return skl_get_cdclk_freq(dev_priv);
1724
1268 if (IS_BROADWELL(dev)) 1725 if (IS_BROADWELL(dev))
1269 return bdw_get_cdclk_freq(dev_priv); 1726 return bdw_get_cdclk_freq(dev_priv);
1270 1727
@@ -1275,7 +1732,7 @@ int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)
1275static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv, 1732static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
1276 struct intel_shared_dpll *pll) 1733 struct intel_shared_dpll *pll)
1277{ 1734{
1278 I915_WRITE(WRPLL_CTL(pll->id), pll->hw_state.wrpll); 1735 I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
1279 POSTING_READ(WRPLL_CTL(pll->id)); 1736 POSTING_READ(WRPLL_CTL(pll->id));
1280 udelay(20); 1737 udelay(20);
1281} 1738}
@@ -1296,7 +1753,7 @@ static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1296{ 1753{
1297 uint32_t val; 1754 uint32_t val;
1298 1755
1299 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) 1756 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
1300 return false; 1757 return false;
1301 1758
1302 val = I915_READ(WRPLL_CTL(pll->id)); 1759 val = I915_READ(WRPLL_CTL(pll->id));
@@ -1326,26 +1783,156 @@ static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
1326 } 1783 }
1327} 1784}
1328 1785
1786static const char * const skl_ddi_pll_names[] = {
1787 "DPLL 1",
1788 "DPLL 2",
1789 "DPLL 3",
1790};
1791
1792struct skl_dpll_regs {
1793 u32 ctl, cfgcr1, cfgcr2;
1794};
1795
1796/* this array is indexed by the *shared* pll id */
1797static const struct skl_dpll_regs skl_dpll_regs[3] = {
1798 {
1799 /* DPLL 1 */
1800 .ctl = LCPLL2_CTL,
1801 .cfgcr1 = DPLL1_CFGCR1,
1802 .cfgcr2 = DPLL1_CFGCR2,
1803 },
1804 {
1805 /* DPLL 2 */
1806 .ctl = WRPLL_CTL1,
1807 .cfgcr1 = DPLL2_CFGCR1,
1808 .cfgcr2 = DPLL2_CFGCR2,
1809 },
1810 {
1811 /* DPLL 3 */
1812 .ctl = WRPLL_CTL2,
1813 .cfgcr1 = DPLL3_CFGCR1,
1814 .cfgcr2 = DPLL3_CFGCR2,
1815 },
1816};
1817
1818static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
1819 struct intel_shared_dpll *pll)
1820{
1821 uint32_t val;
1822 unsigned int dpll;
1823 const struct skl_dpll_regs *regs = skl_dpll_regs;
1824
1825 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
1826 dpll = pll->id + 1;
1827
1828 val = I915_READ(DPLL_CTRL1);
1829
1830 val &= ~(DPLL_CTRL1_HDMI_MODE(dpll) | DPLL_CTRL1_SSC(dpll) |
1831 DPLL_CRTL1_LINK_RATE_MASK(dpll));
1832 val |= pll->config.hw_state.ctrl1 << (dpll * 6);
1833
1834 I915_WRITE(DPLL_CTRL1, val);
1835 POSTING_READ(DPLL_CTRL1);
1836
1837 I915_WRITE(regs[pll->id].cfgcr1, pll->config.hw_state.cfgcr1);
1838 I915_WRITE(regs[pll->id].cfgcr2, pll->config.hw_state.cfgcr2);
1839 POSTING_READ(regs[pll->id].cfgcr1);
1840 POSTING_READ(regs[pll->id].cfgcr2);
1841
1842 /* the enable bit is always bit 31 */
1843 I915_WRITE(regs[pll->id].ctl,
1844 I915_READ(regs[pll->id].ctl) | LCPLL_PLL_ENABLE);
1845
1846 if (wait_for(I915_READ(DPLL_STATUS) & DPLL_LOCK(dpll), 5))
1847 DRM_ERROR("DPLL %d not locked\n", dpll);
1848}
1849
1850static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
1851 struct intel_shared_dpll *pll)
1852{
1853 const struct skl_dpll_regs *regs = skl_dpll_regs;
1854
1855 /* the enable bit is always bit 31 */
1856 I915_WRITE(regs[pll->id].ctl,
1857 I915_READ(regs[pll->id].ctl) & ~LCPLL_PLL_ENABLE);
1858 POSTING_READ(regs[pll->id].ctl);
1859}
1860
1861static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
1862 struct intel_shared_dpll *pll,
1863 struct intel_dpll_hw_state *hw_state)
1864{
1865 uint32_t val;
1866 unsigned int dpll;
1867 const struct skl_dpll_regs *regs = skl_dpll_regs;
1868
1869 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
1870 return false;
1871
1872 /* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
1873 dpll = pll->id + 1;
1874
1875 val = I915_READ(regs[pll->id].ctl);
1876 if (!(val & LCPLL_PLL_ENABLE))
1877 return false;
1878
1879 val = I915_READ(DPLL_CTRL1);
1880 hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
1881
1882 /* avoid reading back stale values if HDMI mode is not enabled */
1883 if (val & DPLL_CTRL1_HDMI_MODE(dpll)) {
1884 hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
1885 hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
1886 }
1887
1888 return true;
1889}
1890
1891static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
1892{
1893 int i;
1894
1895 dev_priv->num_shared_dpll = 3;
1896
1897 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
1898 dev_priv->shared_dplls[i].id = i;
1899 dev_priv->shared_dplls[i].name = skl_ddi_pll_names[i];
1900 dev_priv->shared_dplls[i].disable = skl_ddi_pll_disable;
1901 dev_priv->shared_dplls[i].enable = skl_ddi_pll_enable;
1902 dev_priv->shared_dplls[i].get_hw_state =
1903 skl_ddi_pll_get_hw_state;
1904 }
1905}
1906
1329void intel_ddi_pll_init(struct drm_device *dev) 1907void intel_ddi_pll_init(struct drm_device *dev)
1330{ 1908{
1331 struct drm_i915_private *dev_priv = dev->dev_private; 1909 struct drm_i915_private *dev_priv = dev->dev_private;
1332 uint32_t val = I915_READ(LCPLL_CTL); 1910 uint32_t val = I915_READ(LCPLL_CTL);
1333 1911
1334 hsw_shared_dplls_init(dev_priv); 1912 if (IS_SKYLAKE(dev))
1335 1913 skl_shared_dplls_init(dev_priv);
1336 /* The LCPLL register should be turned on by the BIOS. For now let's 1914 else
1337 * just check its state and print errors in case something is wrong. 1915 hsw_shared_dplls_init(dev_priv);
1338 * Don't even try to turn it on.
1339 */
1340 1916
1341 DRM_DEBUG_KMS("CDCLK running at %dKHz\n", 1917 DRM_DEBUG_KMS("CDCLK running at %dKHz\n",
1342 intel_ddi_get_cdclk_freq(dev_priv)); 1918 intel_ddi_get_cdclk_freq(dev_priv));
1343 1919
1344 if (val & LCPLL_CD_SOURCE_FCLK) 1920 if (IS_SKYLAKE(dev)) {
1345 DRM_ERROR("CDCLK source is not LCPLL\n"); 1921 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE))
1922 DRM_ERROR("LCPLL1 is disabled\n");
1923 } else {
1924 /*
1925 * The LCPLL register should be turned on by the BIOS. For now
1926 * let's just check its state and print errors in case
1927 * something is wrong. Don't even try to turn it on.
1928 */
1346 1929
1347 if (val & LCPLL_PLL_DISABLE) 1930 if (val & LCPLL_CD_SOURCE_FCLK)
1348 DRM_ERROR("LCPLL is disabled\n"); 1931 DRM_ERROR("CDCLK source is not LCPLL\n");
1932
1933 if (val & LCPLL_PLL_DISABLE)
1934 DRM_ERROR("LCPLL is disabled\n");
1935 }
1349} 1936}
1350 1937
1351void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder) 1938void intel_ddi_prepare_link_retrain(struct drm_encoder *encoder)
@@ -1440,7 +2027,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1440 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 2027 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
1441 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 2028 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1442 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder; 2029 enum transcoder cpu_transcoder = intel_crtc->config.cpu_transcoder;
2030 struct intel_hdmi *intel_hdmi;
1443 u32 temp, flags = 0; 2031 u32 temp, flags = 0;
2032 struct drm_device *dev = dev_priv->dev;
1444 2033
1445 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)); 2034 temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1446 if (temp & TRANS_DDI_PHSYNC) 2035 if (temp & TRANS_DDI_PHSYNC)
@@ -1474,6 +2063,11 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1474 switch (temp & TRANS_DDI_MODE_SELECT_MASK) { 2063 switch (temp & TRANS_DDI_MODE_SELECT_MASK) {
1475 case TRANS_DDI_MODE_SELECT_HDMI: 2064 case TRANS_DDI_MODE_SELECT_HDMI:
1476 pipe_config->has_hdmi_sink = true; 2065 pipe_config->has_hdmi_sink = true;
2066 intel_hdmi = enc_to_intel_hdmi(&encoder->base);
2067
2068 if (intel_hdmi->infoframe_enabled(&encoder->base))
2069 pipe_config->has_infoframe = true;
2070 break;
1477 case TRANS_DDI_MODE_SELECT_DVI: 2071 case TRANS_DDI_MODE_SELECT_DVI:
1478 case TRANS_DDI_MODE_SELECT_FDI: 2072 case TRANS_DDI_MODE_SELECT_FDI:
1479 break; 2073 break;
@@ -1486,9 +2080,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1486 break; 2080 break;
1487 } 2081 }
1488 2082
1489 if (intel_display_power_enabled(dev_priv, POWER_DOMAIN_AUDIO)) { 2083 if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
1490 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); 2084 temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
1491 if (temp & (AUDIO_OUTPUT_ENABLE_A << (intel_crtc->pipe * 4))) 2085 if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
1492 pipe_config->has_audio = true; 2086 pipe_config->has_audio = true;
1493 } 2087 }
1494 2088
@@ -1512,7 +2106,10 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
1512 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp; 2106 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
1513 } 2107 }
1514 2108
1515 hsw_ddi_clock_get(encoder, pipe_config); 2109 if (INTEL_INFO(dev)->gen <= 8)
2110 hsw_ddi_clock_get(encoder, pipe_config);
2111 else
2112 skl_ddi_clock_get(encoder, pipe_config);
1516} 2113}
1517 2114
1518static void intel_ddi_destroy(struct drm_encoder *encoder) 2115static void intel_ddi_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9cb5c95d5898..fb3e3d429191 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,8 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
73 DRM_FORMAT_ARGB8888, 73 DRM_FORMAT_ARGB8888,
74}; 74};
75 75
76static void intel_increase_pllclock(struct drm_device *dev,
77 enum pipe pipe);
78static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 76static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79 77
80static void i9xx_crtc_clock_get(struct intel_crtc *crtc, 78static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
@@ -96,8 +94,10 @@ static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
96static void ironlake_set_pipeconf(struct drm_crtc *crtc); 94static void ironlake_set_pipeconf(struct drm_crtc *crtc);
97static void haswell_set_pipeconf(struct drm_crtc *crtc); 95static void haswell_set_pipeconf(struct drm_crtc *crtc);
98static void intel_set_pipe_csc(struct drm_crtc *crtc); 96static void intel_set_pipe_csc(struct drm_crtc *crtc);
99static void vlv_prepare_pll(struct intel_crtc *crtc); 97static void vlv_prepare_pll(struct intel_crtc *crtc,
100static void chv_prepare_pll(struct intel_crtc *crtc); 98 const struct intel_crtc_config *pipe_config);
99static void chv_prepare_pll(struct intel_crtc *crtc,
100 const struct intel_crtc_config *pipe_config);
101 101
102static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe) 102static struct intel_encoder *intel_find_encoder(struct intel_connector *connector, int pipe)
103{ 103{
@@ -408,25 +408,43 @@ static void vlv_clock(int refclk, intel_clock_t *clock)
408/** 408/**
409 * Returns whether any output on the specified pipe is of the specified type 409 * Returns whether any output on the specified pipe is of the specified type
410 */ 410 */
411static bool intel_pipe_has_type(struct drm_crtc *crtc, int type) 411bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
412{ 412{
413 struct drm_device *dev = crtc->dev; 413 struct drm_device *dev = crtc->base.dev;
414 struct intel_encoder *encoder; 414 struct intel_encoder *encoder;
415 415
416 for_each_encoder_on_crtc(dev, crtc, encoder) 416 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
417 if (encoder->type == type) 417 if (encoder->type == type)
418 return true; 418 return true;
419 419
420 return false; 420 return false;
421} 421}
422 422
423static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, 423/**
424 * Returns whether any output on the specified pipe will have the specified
425 * type after a staged modeset is complete, i.e., the same as
426 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
427 * encoder->crtc.
428 */
429static bool intel_pipe_will_have_type(struct intel_crtc *crtc, int type)
430{
431 struct drm_device *dev = crtc->base.dev;
432 struct intel_encoder *encoder;
433
434 for_each_intel_encoder(dev, encoder)
435 if (encoder->new_crtc == crtc && encoder->type == type)
436 return true;
437
438 return false;
439}
440
441static const intel_limit_t *intel_ironlake_limit(struct intel_crtc *crtc,
424 int refclk) 442 int refclk)
425{ 443{
426 struct drm_device *dev = crtc->dev; 444 struct drm_device *dev = crtc->base.dev;
427 const intel_limit_t *limit; 445 const intel_limit_t *limit;
428 446
429 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 447 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
430 if (intel_is_dual_link_lvds(dev)) { 448 if (intel_is_dual_link_lvds(dev)) {
431 if (refclk == 100000) 449 if (refclk == 100000)
432 limit = &intel_limits_ironlake_dual_lvds_100m; 450 limit = &intel_limits_ironlake_dual_lvds_100m;
@@ -444,20 +462,20 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
444 return limit; 462 return limit;
445} 463}
446 464
447static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 465static const intel_limit_t *intel_g4x_limit(struct intel_crtc *crtc)
448{ 466{
449 struct drm_device *dev = crtc->dev; 467 struct drm_device *dev = crtc->base.dev;
450 const intel_limit_t *limit; 468 const intel_limit_t *limit;
451 469
452 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 470 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
453 if (intel_is_dual_link_lvds(dev)) 471 if (intel_is_dual_link_lvds(dev))
454 limit = &intel_limits_g4x_dual_channel_lvds; 472 limit = &intel_limits_g4x_dual_channel_lvds;
455 else 473 else
456 limit = &intel_limits_g4x_single_channel_lvds; 474 limit = &intel_limits_g4x_single_channel_lvds;
457 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || 475 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI) ||
458 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { 476 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_ANALOG)) {
459 limit = &intel_limits_g4x_hdmi; 477 limit = &intel_limits_g4x_hdmi;
460 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { 478 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO)) {
461 limit = &intel_limits_g4x_sdvo; 479 limit = &intel_limits_g4x_sdvo;
462 } else /* The option is for other outputs */ 480 } else /* The option is for other outputs */
463 limit = &intel_limits_i9xx_sdvo; 481 limit = &intel_limits_i9xx_sdvo;
@@ -465,9 +483,9 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
465 return limit; 483 return limit;
466} 484}
467 485
468static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) 486static const intel_limit_t *intel_limit(struct intel_crtc *crtc, int refclk)
469{ 487{
470 struct drm_device *dev = crtc->dev; 488 struct drm_device *dev = crtc->base.dev;
471 const intel_limit_t *limit; 489 const intel_limit_t *limit;
472 490
473 if (HAS_PCH_SPLIT(dev)) 491 if (HAS_PCH_SPLIT(dev))
@@ -475,7 +493,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
475 else if (IS_G4X(dev)) { 493 else if (IS_G4X(dev)) {
476 limit = intel_g4x_limit(crtc); 494 limit = intel_g4x_limit(crtc);
477 } else if (IS_PINEVIEW(dev)) { 495 } else if (IS_PINEVIEW(dev)) {
478 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 496 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
479 limit = &intel_limits_pineview_lvds; 497 limit = &intel_limits_pineview_lvds;
480 else 498 else
481 limit = &intel_limits_pineview_sdvo; 499 limit = &intel_limits_pineview_sdvo;
@@ -484,14 +502,14 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
484 } else if (IS_VALLEYVIEW(dev)) { 502 } else if (IS_VALLEYVIEW(dev)) {
485 limit = &intel_limits_vlv; 503 limit = &intel_limits_vlv;
486 } else if (!IS_GEN2(dev)) { 504 } else if (!IS_GEN2(dev)) {
487 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 505 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
488 limit = &intel_limits_i9xx_lvds; 506 limit = &intel_limits_i9xx_lvds;
489 else 507 else
490 limit = &intel_limits_i9xx_sdvo; 508 limit = &intel_limits_i9xx_sdvo;
491 } else { 509 } else {
492 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 510 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
493 limit = &intel_limits_i8xx_lvds; 511 limit = &intel_limits_i8xx_lvds;
494 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO)) 512 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
495 limit = &intel_limits_i8xx_dvo; 513 limit = &intel_limits_i8xx_dvo;
496 else 514 else
497 limit = &intel_limits_i8xx_dac; 515 limit = &intel_limits_i8xx_dac;
@@ -578,15 +596,15 @@ static bool intel_PLL_is_valid(struct drm_device *dev,
578} 596}
579 597
580static bool 598static bool
581i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 599i9xx_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
582 int target, int refclk, intel_clock_t *match_clock, 600 int target, int refclk, intel_clock_t *match_clock,
583 intel_clock_t *best_clock) 601 intel_clock_t *best_clock)
584{ 602{
585 struct drm_device *dev = crtc->dev; 603 struct drm_device *dev = crtc->base.dev;
586 intel_clock_t clock; 604 intel_clock_t clock;
587 int err = target; 605 int err = target;
588 606
589 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 607 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
590 /* 608 /*
591 * For LVDS just rely on its current settings for dual-channel. 609 * For LVDS just rely on its current settings for dual-channel.
592 * We haven't figured out how to reliably set up different 610 * We haven't figured out how to reliably set up different
@@ -639,15 +657,15 @@ i9xx_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
639} 657}
640 658
641static bool 659static bool
642pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 660pnv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
643 int target, int refclk, intel_clock_t *match_clock, 661 int target, int refclk, intel_clock_t *match_clock,
644 intel_clock_t *best_clock) 662 intel_clock_t *best_clock)
645{ 663{
646 struct drm_device *dev = crtc->dev; 664 struct drm_device *dev = crtc->base.dev;
647 intel_clock_t clock; 665 intel_clock_t clock;
648 int err = target; 666 int err = target;
649 667
650 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 668 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
651 /* 669 /*
652 * For LVDS just rely on its current settings for dual-channel. 670 * For LVDS just rely on its current settings for dual-channel.
653 * We haven't figured out how to reliably set up different 671 * We haven't figured out how to reliably set up different
@@ -698,11 +716,11 @@ pnv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
698} 716}
699 717
700static bool 718static bool
701g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 719g4x_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
702 int target, int refclk, intel_clock_t *match_clock, 720 int target, int refclk, intel_clock_t *match_clock,
703 intel_clock_t *best_clock) 721 intel_clock_t *best_clock)
704{ 722{
705 struct drm_device *dev = crtc->dev; 723 struct drm_device *dev = crtc->base.dev;
706 intel_clock_t clock; 724 intel_clock_t clock;
707 int max_n; 725 int max_n;
708 bool found; 726 bool found;
@@ -710,7 +728,7 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
710 int err_most = (target >> 8) + (target >> 9); 728 int err_most = (target >> 8) + (target >> 9);
711 found = false; 729 found = false;
712 730
713 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 731 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
714 if (intel_is_dual_link_lvds(dev)) 732 if (intel_is_dual_link_lvds(dev))
715 clock.p2 = limit->p2.p2_fast; 733 clock.p2 = limit->p2.p2_fast;
716 else 734 else
@@ -755,11 +773,11 @@ g4x_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
755} 773}
756 774
757static bool 775static bool
758vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 776vlv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
759 int target, int refclk, intel_clock_t *match_clock, 777 int target, int refclk, intel_clock_t *match_clock,
760 intel_clock_t *best_clock) 778 intel_clock_t *best_clock)
761{ 779{
762 struct drm_device *dev = crtc->dev; 780 struct drm_device *dev = crtc->base.dev;
763 intel_clock_t clock; 781 intel_clock_t clock;
764 unsigned int bestppm = 1000000; 782 unsigned int bestppm = 1000000;
765 /* min update 19.2 MHz */ 783 /* min update 19.2 MHz */
@@ -812,11 +830,11 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
812} 830}
813 831
814static bool 832static bool
815chv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc, 833chv_find_best_dpll(const intel_limit_t *limit, struct intel_crtc *crtc,
816 int target, int refclk, intel_clock_t *match_clock, 834 int target, int refclk, intel_clock_t *match_clock,
817 intel_clock_t *best_clock) 835 intel_clock_t *best_clock)
818{ 836{
819 struct drm_device *dev = crtc->dev; 837 struct drm_device *dev = crtc->base.dev;
820 intel_clock_t clock; 838 intel_clock_t clock;
821 uint64_t m2; 839 uint64_t m2;
822 int found = false; 840 int found = false;
@@ -889,60 +907,6 @@ enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
889 return intel_crtc->config.cpu_transcoder; 907 return intel_crtc->config.cpu_transcoder;
890} 908}
891 909
892static void g4x_wait_for_vblank(struct drm_device *dev, int pipe)
893{
894 struct drm_i915_private *dev_priv = dev->dev_private;
895 u32 frame, frame_reg = PIPE_FRMCOUNT_GM45(pipe);
896
897 frame = I915_READ(frame_reg);
898
899 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
900 WARN(1, "vblank wait on pipe %c timed out\n",
901 pipe_name(pipe));
902}
903
904/**
905 * intel_wait_for_vblank - wait for vblank on a given pipe
906 * @dev: drm device
907 * @pipe: pipe to wait for
908 *
909 * Wait for vblank to occur on a given pipe. Needed for various bits of
910 * mode setting code.
911 */
912void intel_wait_for_vblank(struct drm_device *dev, int pipe)
913{
914 struct drm_i915_private *dev_priv = dev->dev_private;
915 int pipestat_reg = PIPESTAT(pipe);
916
917 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
918 g4x_wait_for_vblank(dev, pipe);
919 return;
920 }
921
922 /* Clear existing vblank status. Note this will clear any other
923 * sticky status fields as well.
924 *
925 * This races with i915_driver_irq_handler() with the result
926 * that either function could miss a vblank event. Here it is not
927 * fatal, as we will either wait upon the next vblank interrupt or
928 * timeout. Generally speaking intel_wait_for_vblank() is only
929 * called during modeset at which time the GPU should be idle and
930 * should *not* be performing page flips and thus not waiting on
931 * vblanks...
932 * Currently, the result of us stealing a vblank from the irq
933 * handler is that a single frame will be skipped during swapbuffers.
934 */
935 I915_WRITE(pipestat_reg,
936 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
937
938 /* Wait for vblank interrupt bit to set */
939 if (wait_for(I915_READ(pipestat_reg) &
940 PIPE_VBLANK_INTERRUPT_STATUS,
941 50))
942 DRM_DEBUG_KMS("vblank wait on pipe %c timed out\n",
943 pipe_name(pipe));
944}
945
946static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe) 910static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
947{ 911{
948 struct drm_i915_private *dev_priv = dev->dev_private; 912 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1189,8 +1153,8 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1189 state_string(state), state_string(cur_state)); 1153 state_string(state), state_string(cur_state));
1190} 1154}
1191 1155
1192static void assert_panel_unlocked(struct drm_i915_private *dev_priv, 1156void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1193 enum pipe pipe) 1157 enum pipe pipe)
1194{ 1158{
1195 struct drm_device *dev = dev_priv->dev; 1159 struct drm_device *dev = dev_priv->dev;
1196 int pp_reg; 1160 int pp_reg;
@@ -1263,7 +1227,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
1263 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)) 1227 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1264 state = true; 1228 state = true;
1265 1229
1266 if (!intel_display_power_enabled(dev_priv, 1230 if (!intel_display_power_is_enabled(dev_priv,
1267 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) { 1231 POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
1268 cur_state = false; 1232 cur_state = false;
1269 } else { 1233 } else {
@@ -1332,7 +1296,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1332 int reg, sprite; 1296 int reg, sprite;
1333 u32 val; 1297 u32 val;
1334 1298
1335 if (IS_VALLEYVIEW(dev)) { 1299 if (INTEL_INFO(dev)->gen >= 9) {
1300 for_each_sprite(pipe, sprite) {
1301 val = I915_READ(PLANE_CTL(pipe, sprite));
1302 WARN(val & PLANE_CTL_ENABLE,
1303 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1304 sprite, pipe_name(pipe));
1305 }
1306 } else if (IS_VALLEYVIEW(dev)) {
1336 for_each_sprite(pipe, sprite) { 1307 for_each_sprite(pipe, sprite) {
1337 reg = SPCNTR(pipe, sprite); 1308 reg = SPCNTR(pipe, sprite);
1338 val = I915_READ(reg); 1309 val = I915_READ(reg);
@@ -1533,12 +1504,13 @@ static void intel_init_dpio(struct drm_device *dev)
1533 } 1504 }
1534} 1505}
1535 1506
1536static void vlv_enable_pll(struct intel_crtc *crtc) 1507static void vlv_enable_pll(struct intel_crtc *crtc,
1508 const struct intel_crtc_config *pipe_config)
1537{ 1509{
1538 struct drm_device *dev = crtc->base.dev; 1510 struct drm_device *dev = crtc->base.dev;
1539 struct drm_i915_private *dev_priv = dev->dev_private; 1511 struct drm_i915_private *dev_priv = dev->dev_private;
1540 int reg = DPLL(crtc->pipe); 1512 int reg = DPLL(crtc->pipe);
1541 u32 dpll = crtc->config.dpll_hw_state.dpll; 1513 u32 dpll = pipe_config->dpll_hw_state.dpll;
1542 1514
1543 assert_pipe_disabled(dev_priv, crtc->pipe); 1515 assert_pipe_disabled(dev_priv, crtc->pipe);
1544 1516
@@ -1556,7 +1528,7 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
1556 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1528 if (wait_for(((I915_READ(reg) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1557 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe); 1529 DRM_ERROR("DPLL %d failed to lock\n", crtc->pipe);
1558 1530
1559 I915_WRITE(DPLL_MD(crtc->pipe), crtc->config.dpll_hw_state.dpll_md); 1531 I915_WRITE(DPLL_MD(crtc->pipe), pipe_config->dpll_hw_state.dpll_md);
1560 POSTING_READ(DPLL_MD(crtc->pipe)); 1532 POSTING_READ(DPLL_MD(crtc->pipe));
1561 1533
1562 /* We do this three times for luck */ 1534 /* We do this three times for luck */
@@ -1571,7 +1543,8 @@ static void vlv_enable_pll(struct intel_crtc *crtc)
1571 udelay(150); /* wait for warmup */ 1543 udelay(150); /* wait for warmup */
1572} 1544}
1573 1545
1574static void chv_enable_pll(struct intel_crtc *crtc) 1546static void chv_enable_pll(struct intel_crtc *crtc,
1547 const struct intel_crtc_config *pipe_config)
1575{ 1548{
1576 struct drm_device *dev = crtc->base.dev; 1549 struct drm_device *dev = crtc->base.dev;
1577 struct drm_i915_private *dev_priv = dev->dev_private; 1550 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1596,14 +1569,14 @@ static void chv_enable_pll(struct intel_crtc *crtc)
1596 udelay(1); 1569 udelay(1);
1597 1570
1598 /* Enable PLL */ 1571 /* Enable PLL */
1599 I915_WRITE(DPLL(pipe), crtc->config.dpll_hw_state.dpll); 1572 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1600 1573
1601 /* Check PLL is locked */ 1574 /* Check PLL is locked */
1602 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1)) 1575 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1603 DRM_ERROR("PLL %d failed to lock\n", pipe); 1576 DRM_ERROR("PLL %d failed to lock\n", pipe);
1604 1577
1605 /* not sure when this should be written */ 1578 /* not sure when this should be written */
1606 I915_WRITE(DPLL_MD(pipe), crtc->config.dpll_hw_state.dpll_md); 1579 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1607 POSTING_READ(DPLL_MD(pipe)); 1580 POSTING_READ(DPLL_MD(pipe));
1608 1581
1609 mutex_unlock(&dev_priv->dpio_lock); 1582 mutex_unlock(&dev_priv->dpio_lock);
@@ -1616,7 +1589,7 @@ static int intel_num_dvo_pipes(struct drm_device *dev)
1616 1589
1617 for_each_intel_crtc(dev, crtc) 1590 for_each_intel_crtc(dev, crtc)
1618 count += crtc->active && 1591 count += crtc->active &&
1619 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO); 1592 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1620 1593
1621 return count; 1594 return count;
1622} 1595}
@@ -1695,7 +1668,7 @@ static void i9xx_disable_pll(struct intel_crtc *crtc)
1695 1668
1696 /* Disable DVO 2x clock on both PLLs if necessary */ 1669 /* Disable DVO 2x clock on both PLLs if necessary */
1697 if (IS_I830(dev) && 1670 if (IS_I830(dev) &&
1698 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO) && 1671 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1699 intel_num_dvo_pipes(dev) == 1) { 1672 intel_num_dvo_pipes(dev) == 1) {
1700 I915_WRITE(DPLL(PIPE_B), 1673 I915_WRITE(DPLL(PIPE_B),
1701 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE); 1674 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
@@ -1806,7 +1779,7 @@ static void intel_prepare_shared_dpll(struct intel_crtc *crtc)
1806 if (WARN_ON(pll == NULL)) 1779 if (WARN_ON(pll == NULL))
1807 return; 1780 return;
1808 1781
1809 WARN_ON(!pll->refcount); 1782 WARN_ON(!pll->config.crtc_mask);
1810 if (pll->active == 0) { 1783 if (pll->active == 0) {
1811 DRM_DEBUG_DRIVER("setting up %s\n", pll->name); 1784 DRM_DEBUG_DRIVER("setting up %s\n", pll->name);
1812 WARN_ON(pll->on); 1785 WARN_ON(pll->on);
@@ -1833,7 +1806,7 @@ static void intel_enable_shared_dpll(struct intel_crtc *crtc)
1833 if (WARN_ON(pll == NULL)) 1806 if (WARN_ON(pll == NULL))
1834 return; 1807 return;
1835 1808
1836 if (WARN_ON(pll->refcount == 0)) 1809 if (WARN_ON(pll->config.crtc_mask == 0))
1837 return; 1810 return;
1838 1811
1839 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n", 1812 DRM_DEBUG_KMS("enable %s (active %d, on? %d) for crtc %d\n",
@@ -1865,7 +1838,7 @@ static void intel_disable_shared_dpll(struct intel_crtc *crtc)
1865 if (WARN_ON(pll == NULL)) 1838 if (WARN_ON(pll == NULL))
1866 return; 1839 return;
1867 1840
1868 if (WARN_ON(pll->refcount == 0)) 1841 if (WARN_ON(pll->config.crtc_mask == 0))
1869 return; 1842 return;
1870 1843
1871 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n", 1844 DRM_DEBUG_KMS("disable %s (active %d, on? %d) for crtc %d\n",
@@ -1933,7 +1906,7 @@ static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1933 val &= ~TRANS_INTERLACE_MASK; 1906 val &= ~TRANS_INTERLACE_MASK;
1934 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) 1907 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1935 if (HAS_PCH_IBX(dev_priv->dev) && 1908 if (HAS_PCH_IBX(dev_priv->dev) &&
1936 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) 1909 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1937 val |= TRANS_LEGACY_INTERLACED_ILK; 1910 val |= TRANS_LEGACY_INTERLACED_ILK;
1938 else 1911 else
1939 val |= TRANS_INTERLACED; 1912 val |= TRANS_INTERLACED;
@@ -2056,7 +2029,7 @@ static void intel_enable_pipe(struct intel_crtc *crtc)
2056 * need the check. 2029 * need the check.
2057 */ 2030 */
2058 if (!HAS_PCH_SPLIT(dev_priv->dev)) 2031 if (!HAS_PCH_SPLIT(dev_priv->dev))
2059 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DSI)) 2032 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI))
2060 assert_dsi_pll_enabled(dev_priv); 2033 assert_dsi_pll_enabled(dev_priv);
2061 else 2034 else
2062 assert_pll_enabled(dev_priv, pipe); 2035 assert_pll_enabled(dev_priv, pipe);
@@ -2221,11 +2194,13 @@ static int intel_align_height(struct drm_device *dev, int height, bool tiled)
2221} 2194}
2222 2195
2223int 2196int
2224intel_pin_and_fence_fb_obj(struct drm_device *dev, 2197intel_pin_and_fence_fb_obj(struct drm_plane *plane,
2225 struct drm_i915_gem_object *obj, 2198 struct drm_framebuffer *fb,
2226 struct intel_engine_cs *pipelined) 2199 struct intel_engine_cs *pipelined)
2227{ 2200{
2201 struct drm_device *dev = fb->dev;
2228 struct drm_i915_private *dev_priv = dev->dev_private; 2202 struct drm_i915_private *dev_priv = dev->dev_private;
2203 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2229 u32 alignment; 2204 u32 alignment;
2230 int ret; 2205 int ret;
2231 2206
@@ -2233,7 +2208,9 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2233 2208
2234 switch (obj->tiling_mode) { 2209 switch (obj->tiling_mode) {
2235 case I915_TILING_NONE: 2210 case I915_TILING_NONE:
2236 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 2211 if (INTEL_INFO(dev)->gen >= 9)
2212 alignment = 256 * 1024;
2213 else if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
2237 alignment = 128 * 1024; 2214 alignment = 128 * 1024;
2238 else if (INTEL_INFO(dev)->gen >= 4) 2215 else if (INTEL_INFO(dev)->gen >= 4)
2239 alignment = 4 * 1024; 2216 alignment = 4 * 1024;
@@ -2241,8 +2218,12 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2241 alignment = 64 * 1024; 2218 alignment = 64 * 1024;
2242 break; 2219 break;
2243 case I915_TILING_X: 2220 case I915_TILING_X:
2244 /* pin() will align the object as required by fence */ 2221 if (INTEL_INFO(dev)->gen >= 9)
2245 alignment = 0; 2222 alignment = 256 * 1024;
2223 else {
2224 /* pin() will align the object as required by fence */
2225 alignment = 0;
2226 }
2246 break; 2227 break;
2247 case I915_TILING_Y: 2228 case I915_TILING_Y:
2248 WARN(1, "Y tiled bo slipped through, driver bug!\n"); 2229 WARN(1, "Y tiled bo slipped through, driver bug!\n");
@@ -2402,6 +2383,7 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2402 struct intel_plane_config *plane_config) 2383 struct intel_plane_config *plane_config)
2403{ 2384{
2404 struct drm_device *dev = intel_crtc->base.dev; 2385 struct drm_device *dev = intel_crtc->base.dev;
2386 struct drm_i915_private *dev_priv = dev->dev_private;
2405 struct drm_crtc *c; 2387 struct drm_crtc *c;
2406 struct intel_crtc *i; 2388 struct intel_crtc *i;
2407 struct drm_i915_gem_object *obj; 2389 struct drm_i915_gem_object *obj;
@@ -2433,6 +2415,9 @@ static void intel_find_plane_obj(struct intel_crtc *intel_crtc,
2433 continue; 2415 continue;
2434 2416
2435 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) { 2417 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2418 if (obj->tiling_mode != I915_TILING_NONE)
2419 dev_priv->preserve_bios_swizzle = true;
2420
2436 drm_framebuffer_reference(c->primary->fb); 2421 drm_framebuffer_reference(c->primary->fb);
2437 intel_crtc->base.primary->fb = c->primary->fb; 2422 intel_crtc->base.primary->fb = c->primary->fb;
2438 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe); 2423 obj->frontbuffer_bits |= INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe);
@@ -2486,6 +2471,12 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc,
2486 ((intel_crtc->config.pipe_src_h - 1) << 16) | 2471 ((intel_crtc->config.pipe_src_h - 1) << 16) |
2487 (intel_crtc->config.pipe_src_w - 1)); 2472 (intel_crtc->config.pipe_src_w - 1));
2488 I915_WRITE(DSPPOS(plane), 0); 2473 I915_WRITE(DSPPOS(plane), 0);
2474 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2475 I915_WRITE(PRIMSIZE(plane),
2476 ((intel_crtc->config.pipe_src_h - 1) << 16) |
2477 (intel_crtc->config.pipe_src_w - 1));
2478 I915_WRITE(PRIMPOS(plane), 0);
2479 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2489 } 2480 }
2490 2481
2491 switch (fb->pixel_format) { 2482 switch (fb->pixel_format) {
@@ -2672,6 +2663,92 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc,
2672 POSTING_READ(reg); 2663 POSTING_READ(reg);
2673} 2664}
2674 2665
2666static void skylake_update_primary_plane(struct drm_crtc *crtc,
2667 struct drm_framebuffer *fb,
2668 int x, int y)
2669{
2670 struct drm_device *dev = crtc->dev;
2671 struct drm_i915_private *dev_priv = dev->dev_private;
2672 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2673 struct intel_framebuffer *intel_fb;
2674 struct drm_i915_gem_object *obj;
2675 int pipe = intel_crtc->pipe;
2676 u32 plane_ctl, stride;
2677
2678 if (!intel_crtc->primary_enabled) {
2679 I915_WRITE(PLANE_CTL(pipe, 0), 0);
2680 I915_WRITE(PLANE_SURF(pipe, 0), 0);
2681 POSTING_READ(PLANE_CTL(pipe, 0));
2682 return;
2683 }
2684
2685 plane_ctl = PLANE_CTL_ENABLE |
2686 PLANE_CTL_PIPE_GAMMA_ENABLE |
2687 PLANE_CTL_PIPE_CSC_ENABLE;
2688
2689 switch (fb->pixel_format) {
2690 case DRM_FORMAT_RGB565:
2691 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
2692 break;
2693 case DRM_FORMAT_XRGB8888:
2694 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2695 break;
2696 case DRM_FORMAT_XBGR8888:
2697 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2698 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
2699 break;
2700 case DRM_FORMAT_XRGB2101010:
2701 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2702 break;
2703 case DRM_FORMAT_XBGR2101010:
2704 plane_ctl |= PLANE_CTL_ORDER_RGBX;
2705 plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010;
2706 break;
2707 default:
2708 BUG();
2709 }
2710
2711 intel_fb = to_intel_framebuffer(fb);
2712 obj = intel_fb->obj;
2713
2714 /*
2715 * The stride is either expressed as a multiple of 64 bytes chunks for
2716 * linear buffers or in number of tiles for tiled buffers.
2717 */
2718 switch (obj->tiling_mode) {
2719 case I915_TILING_NONE:
2720 stride = fb->pitches[0] >> 6;
2721 break;
2722 case I915_TILING_X:
2723 plane_ctl |= PLANE_CTL_TILED_X;
2724 stride = fb->pitches[0] >> 9;
2725 break;
2726 default:
2727 BUG();
2728 }
2729
2730 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
2731 if (to_intel_plane(crtc->primary)->rotation == BIT(DRM_ROTATE_180))
2732 plane_ctl |= PLANE_CTL_ROTATE_180;
2733
2734 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
2735
2736 DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n",
2737 i915_gem_obj_ggtt_offset(obj),
2738 x, y, fb->width, fb->height,
2739 fb->pitches[0]);
2740
2741 I915_WRITE(PLANE_POS(pipe, 0), 0);
2742 I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x);
2743 I915_WRITE(PLANE_SIZE(pipe, 0),
2744 (intel_crtc->config.pipe_src_h - 1) << 16 |
2745 (intel_crtc->config.pipe_src_w - 1));
2746 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
2747 I915_WRITE(PLANE_SURF(pipe, 0), i915_gem_obj_ggtt_offset(obj));
2748
2749 POSTING_READ(PLANE_SURF(pipe, 0));
2750}
2751
2675/* Assume fb object is pinned & idle & fenced and just update base pointers */ 2752/* Assume fb object is pinned & idle & fenced and just update base pointers */
2676static int 2753static int
2677intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, 2754intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -2682,32 +2759,16 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2682 2759
2683 if (dev_priv->display.disable_fbc) 2760 if (dev_priv->display.disable_fbc)
2684 dev_priv->display.disable_fbc(dev); 2761 dev_priv->display.disable_fbc(dev);
2685 intel_increase_pllclock(dev, to_intel_crtc(crtc)->pipe);
2686 2762
2687 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2763 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2688 2764
2689 return 0; 2765 return 0;
2690} 2766}
2691 2767
2692void intel_display_handle_reset(struct drm_device *dev) 2768static void intel_complete_page_flips(struct drm_device *dev)
2693{ 2769{
2694 struct drm_i915_private *dev_priv = dev->dev_private;
2695 struct drm_crtc *crtc; 2770 struct drm_crtc *crtc;
2696 2771
2697 /*
2698 * Flips in the rings have been nuked by the reset,
2699 * so complete all pending flips so that user space
2700 * will get its events and not get stuck.
2701 *
2702 * Also update the base address of all primary
2703 * planes to the the last fb to make sure we're
2704 * showing the correct fb after a reset.
2705 *
2706 * Need to make two loops over the crtcs so that we
2707 * don't try to grab a crtc mutex before the
2708 * pending_flip_queue really got woken up.
2709 */
2710
2711 for_each_crtc(dev, crtc) { 2772 for_each_crtc(dev, crtc) {
2712 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2713 enum plane plane = intel_crtc->plane; 2774 enum plane plane = intel_crtc->plane;
@@ -2715,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev)
2715 intel_prepare_page_flip(dev, plane); 2776 intel_prepare_page_flip(dev, plane);
2716 intel_finish_page_flip_plane(dev, plane); 2777 intel_finish_page_flip_plane(dev, plane);
2717 } 2778 }
2779}
2780
2781static void intel_update_primary_planes(struct drm_device *dev)
2782{
2783 struct drm_i915_private *dev_priv = dev->dev_private;
2784 struct drm_crtc *crtc;
2718 2785
2719 for_each_crtc(dev, crtc) { 2786 for_each_crtc(dev, crtc) {
2720 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2787 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2734,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev)
2734 } 2801 }
2735} 2802}
2736 2803
2804void intel_prepare_reset(struct drm_device *dev)
2805{
2806 struct drm_i915_private *dev_priv = to_i915(dev);
2807 struct intel_crtc *crtc;
2808
2809 /* no reset support for gen2 */
2810 if (IS_GEN2(dev))
2811 return;
2812
2813 /* reset doesn't touch the display */
2814 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
2815 return;
2816
2817 drm_modeset_lock_all(dev);
2818
2819 /*
2820 * Disabling the crtcs gracefully seems nicer. Also the
2821 * g33 docs say we should at least disable all the planes.
2822 */
2823 for_each_intel_crtc(dev, crtc) {
2824 if (crtc->active)
2825 dev_priv->display.crtc_disable(&crtc->base);
2826 }
2827}
2828
2829void intel_finish_reset(struct drm_device *dev)
2830{
2831 struct drm_i915_private *dev_priv = to_i915(dev);
2832
2833 /*
2834 * Flips in the rings will be nuked by the reset,
2835 * so complete all pending flips so that user space
2836 * will get its events and not get stuck.
2837 */
2838 intel_complete_page_flips(dev);
2839
2840 /* no reset support for gen2 */
2841 if (IS_GEN2(dev))
2842 return;
2843
2844 /* reset doesn't touch the display */
2845 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) {
2846 /*
2847 * Flips in the rings have been nuked by the reset,
2848 * so update the base address of all primary
2849 * planes to the the last fb to make sure we're
2850 * showing the correct fb after a reset.
2851 */
2852 intel_update_primary_planes(dev);
2853 return;
2854 }
2855
2856 /*
2857 * The display has been reset as well,
2858 * so need a full re-initialization.
2859 */
2860 intel_runtime_pm_disable_interrupts(dev_priv);
2861 intel_runtime_pm_enable_interrupts(dev_priv);
2862
2863 intel_modeset_init_hw(dev);
2864
2865 spin_lock_irq(&dev_priv->irq_lock);
2866 if (dev_priv->display.hpd_irq_setup)
2867 dev_priv->display.hpd_irq_setup(dev);
2868 spin_unlock_irq(&dev_priv->irq_lock);
2869
2870 intel_modeset_setup_hw_state(dev, true);
2871
2872 intel_hpd_init(dev_priv);
2873
2874 drm_modeset_unlock_all(dev);
2875}
2876
2737static int 2877static int
2738intel_finish_fb(struct drm_framebuffer *old_fb) 2878intel_finish_fb(struct drm_framebuffer *old_fb)
2739{ 2879{
@@ -2762,20 +2902,58 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
2762 struct drm_device *dev = crtc->dev; 2902 struct drm_device *dev = crtc->dev;
2763 struct drm_i915_private *dev_priv = dev->dev_private; 2903 struct drm_i915_private *dev_priv = dev->dev_private;
2764 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2904 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2765 unsigned long flags;
2766 bool pending; 2905 bool pending;
2767 2906
2768 if (i915_reset_in_progress(&dev_priv->gpu_error) || 2907 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
2769 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) 2908 intel_crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
2770 return false; 2909 return false;
2771 2910
2772 spin_lock_irqsave(&dev->event_lock, flags); 2911 spin_lock_irq(&dev->event_lock);
2773 pending = to_intel_crtc(crtc)->unpin_work != NULL; 2912 pending = to_intel_crtc(crtc)->unpin_work != NULL;
2774 spin_unlock_irqrestore(&dev->event_lock, flags); 2913 spin_unlock_irq(&dev->event_lock);
2775 2914
2776 return pending; 2915 return pending;
2777} 2916}
2778 2917
2918static void intel_update_pipe_size(struct intel_crtc *crtc)
2919{
2920 struct drm_device *dev = crtc->base.dev;
2921 struct drm_i915_private *dev_priv = dev->dev_private;
2922 const struct drm_display_mode *adjusted_mode;
2923
2924 if (!i915.fastboot)
2925 return;
2926
2927 /*
2928 * Update pipe size and adjust fitter if needed: the reason for this is
2929 * that in compute_mode_changes we check the native mode (not the pfit
2930 * mode) to see if we can flip rather than do a full mode set. In the
2931 * fastboot case, we'll flip, but if we don't update the pipesrc and
2932 * pfit state, we'll end up with a big fb scanned out into the wrong
2933 * sized surface.
2934 *
2935 * To fix this properly, we need to hoist the checks up into
2936 * compute_mode_changes (or above), check the actual pfit state and
2937 * whether the platform allows pfit disable with pipe active, and only
2938 * then update the pipesrc and pfit state, even on the flip path.
2939 */
2940
2941 adjusted_mode = &crtc->config.adjusted_mode;
2942
2943 I915_WRITE(PIPESRC(crtc->pipe),
2944 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2945 (adjusted_mode->crtc_vdisplay - 1));
2946 if (!crtc->config.pch_pfit.enabled &&
2947 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2948 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2949 I915_WRITE(PF_CTL(crtc->pipe), 0);
2950 I915_WRITE(PF_WIN_POS(crtc->pipe), 0);
2951 I915_WRITE(PF_WIN_SZ(crtc->pipe), 0);
2952 }
2953 crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2954 crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2955}
2956
2779static int 2957static int
2780intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 2958intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2781 struct drm_framebuffer *fb) 2959 struct drm_framebuffer *fb)
@@ -2785,7 +2963,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2785 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2963 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2786 enum pipe pipe = intel_crtc->pipe; 2964 enum pipe pipe = intel_crtc->pipe;
2787 struct drm_framebuffer *old_fb = crtc->primary->fb; 2965 struct drm_framebuffer *old_fb = crtc->primary->fb;
2788 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2789 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb); 2966 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_fb);
2790 int ret; 2967 int ret;
2791 2968
@@ -2808,9 +2985,9 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2808 } 2985 }
2809 2986
2810 mutex_lock(&dev->struct_mutex); 2987 mutex_lock(&dev->struct_mutex);
2811 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 2988 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
2812 if (ret == 0) 2989 if (ret == 0)
2813 i915_gem_track_fb(old_obj, obj, 2990 i915_gem_track_fb(old_obj, intel_fb_obj(fb),
2814 INTEL_FRONTBUFFER_PRIMARY(pipe)); 2991 INTEL_FRONTBUFFER_PRIMARY(pipe));
2815 mutex_unlock(&dev->struct_mutex); 2992 mutex_unlock(&dev->struct_mutex);
2816 if (ret != 0) { 2993 if (ret != 0) {
@@ -2818,37 +2995,6 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2818 return ret; 2995 return ret;
2819 } 2996 }
2820 2997
2821 /*
2822 * Update pipe size and adjust fitter if needed: the reason for this is
2823 * that in compute_mode_changes we check the native mode (not the pfit
2824 * mode) to see if we can flip rather than do a full mode set. In the
2825 * fastboot case, we'll flip, but if we don't update the pipesrc and
2826 * pfit state, we'll end up with a big fb scanned out into the wrong
2827 * sized surface.
2828 *
2829 * To fix this properly, we need to hoist the checks up into
2830 * compute_mode_changes (or above), check the actual pfit state and
2831 * whether the platform allows pfit disable with pipe active, and only
2832 * then update the pipesrc and pfit state, even on the flip path.
2833 */
2834 if (i915.fastboot) {
2835 const struct drm_display_mode *adjusted_mode =
2836 &intel_crtc->config.adjusted_mode;
2837
2838 I915_WRITE(PIPESRC(intel_crtc->pipe),
2839 ((adjusted_mode->crtc_hdisplay - 1) << 16) |
2840 (adjusted_mode->crtc_vdisplay - 1));
2841 if (!intel_crtc->config.pch_pfit.enabled &&
2842 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) ||
2843 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2844 I915_WRITE(PF_CTL(intel_crtc->pipe), 0);
2845 I915_WRITE(PF_WIN_POS(intel_crtc->pipe), 0);
2846 I915_WRITE(PF_WIN_SZ(intel_crtc->pipe), 0);
2847 }
2848 intel_crtc->config.pipe_src_w = adjusted_mode->crtc_hdisplay;
2849 intel_crtc->config.pipe_src_h = adjusted_mode->crtc_vdisplay;
2850 }
2851
2852 dev_priv->display.update_primary_plane(crtc, fb, x, y); 2998 dev_priv->display.update_primary_plane(crtc, fb, x, y);
2853 2999
2854 if (intel_crtc->active) 3000 if (intel_crtc->active)
@@ -3472,14 +3618,13 @@ void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3472 !intel_crtc_has_pending_flip(crtc), 3618 !intel_crtc_has_pending_flip(crtc),
3473 60*HZ) == 0)) { 3619 60*HZ) == 0)) {
3474 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3475 unsigned long flags;
3476 3621
3477 spin_lock_irqsave(&dev->event_lock, flags); 3622 spin_lock_irq(&dev->event_lock);
3478 if (intel_crtc->unpin_work) { 3623 if (intel_crtc->unpin_work) {
3479 WARN_ONCE(1, "Removing stuck page flip\n"); 3624 WARN_ONCE(1, "Removing stuck page flip\n");
3480 page_flip_completed(intel_crtc); 3625 page_flip_completed(intel_crtc);
3481 } 3626 }
3482 spin_unlock_irqrestore(&dev->event_lock, flags); 3627 spin_unlock_irq(&dev->event_lock);
3483 } 3628 }
3484 3629
3485 if (crtc->primary->fb) { 3630 if (crtc->primary->fb) {
@@ -3704,9 +3849,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
3704 intel_fdi_normal_train(crtc); 3849 intel_fdi_normal_train(crtc);
3705 3850
3706 /* For PCH DP, enable TRANS_DP_CTL */ 3851 /* For PCH DP, enable TRANS_DP_CTL */
3707 if (HAS_PCH_CPT(dev) && 3852 if (HAS_PCH_CPT(dev) && intel_crtc->config.has_dp_encoder) {
3708 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3709 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
3710 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5; 3853 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
3711 reg = TRANS_DP_CTL(pipe); 3854 reg = TRANS_DP_CTL(pipe);
3712 temp = I915_READ(reg); 3855 temp = I915_READ(reg);
@@ -3766,12 +3909,13 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
3766 if (pll == NULL) 3909 if (pll == NULL)
3767 return; 3910 return;
3768 3911
3769 if (pll->refcount == 0) { 3912 if (!(pll->config.crtc_mask & (1 << crtc->pipe))) {
3770 WARN(1, "bad %s refcount\n", pll->name); 3913 WARN(1, "bad %s crtc mask\n", pll->name);
3771 return; 3914 return;
3772 } 3915 }
3773 3916
3774 if (--pll->refcount == 0) { 3917 pll->config.crtc_mask &= ~(1 << crtc->pipe);
3918 if (pll->config.crtc_mask == 0) {
3775 WARN_ON(pll->on); 3919 WARN_ON(pll->on);
3776 WARN_ON(pll->active); 3920 WARN_ON(pll->active);
3777 } 3921 }
@@ -3782,15 +3926,9 @@ void intel_put_shared_dpll(struct intel_crtc *crtc)
3782struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc) 3926struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3783{ 3927{
3784 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 3928 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
3785 struct intel_shared_dpll *pll = intel_crtc_to_shared_dpll(crtc); 3929 struct intel_shared_dpll *pll;
3786 enum intel_dpll_id i; 3930 enum intel_dpll_id i;
3787 3931
3788 if (pll) {
3789 DRM_DEBUG_KMS("CRTC:%d dropping existing %s\n",
3790 crtc->base.base.id, pll->name);
3791 intel_put_shared_dpll(crtc);
3792 }
3793
3794 if (HAS_PCH_IBX(dev_priv->dev)) { 3932 if (HAS_PCH_IBX(dev_priv->dev)) {
3795 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */ 3933 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3796 i = (enum intel_dpll_id) crtc->pipe; 3934 i = (enum intel_dpll_id) crtc->pipe;
@@ -3799,7 +3937,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3799 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n", 3937 DRM_DEBUG_KMS("CRTC:%d using pre-allocated %s\n",
3800 crtc->base.base.id, pll->name); 3938 crtc->base.base.id, pll->name);
3801 3939
3802 WARN_ON(pll->refcount); 3940 WARN_ON(pll->new_config->crtc_mask);
3803 3941
3804 goto found; 3942 goto found;
3805 } 3943 }
@@ -3808,15 +3946,16 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3808 pll = &dev_priv->shared_dplls[i]; 3946 pll = &dev_priv->shared_dplls[i];
3809 3947
3810 /* Only want to check enabled timings first */ 3948 /* Only want to check enabled timings first */
3811 if (pll->refcount == 0) 3949 if (pll->new_config->crtc_mask == 0)
3812 continue; 3950 continue;
3813 3951
3814 if (memcmp(&crtc->config.dpll_hw_state, &pll->hw_state, 3952 if (memcmp(&crtc->new_config->dpll_hw_state,
3815 sizeof(pll->hw_state)) == 0) { 3953 &pll->new_config->hw_state,
3816 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (refcount %d, ative %d)\n", 3954 sizeof(pll->new_config->hw_state)) == 0) {
3817 crtc->base.base.id, 3955 DRM_DEBUG_KMS("CRTC:%d sharing existing %s (crtc mask 0x%08x, ative %d)\n",
3818 pll->name, pll->refcount, pll->active); 3956 crtc->base.base.id, pll->name,
3819 3957 pll->new_config->crtc_mask,
3958 pll->active);
3820 goto found; 3959 goto found;
3821 } 3960 }
3822 } 3961 }
@@ -3824,7 +3963,7 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3824 /* Ok no matching timings, maybe there's a free one? */ 3963 /* Ok no matching timings, maybe there's a free one? */
3825 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 3964 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
3826 pll = &dev_priv->shared_dplls[i]; 3965 pll = &dev_priv->shared_dplls[i];
3827 if (pll->refcount == 0) { 3966 if (pll->new_config->crtc_mask == 0) {
3828 DRM_DEBUG_KMS("CRTC:%d allocated %s\n", 3967 DRM_DEBUG_KMS("CRTC:%d allocated %s\n",
3829 crtc->base.base.id, pll->name); 3968 crtc->base.base.id, pll->name);
3830 goto found; 3969 goto found;
@@ -3834,18 +3973,86 @@ struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc)
3834 return NULL; 3973 return NULL;
3835 3974
3836found: 3975found:
3837 if (pll->refcount == 0) 3976 if (pll->new_config->crtc_mask == 0)
3838 pll->hw_state = crtc->config.dpll_hw_state; 3977 pll->new_config->hw_state = crtc->new_config->dpll_hw_state;
3839 3978
3840 crtc->config.shared_dpll = i; 3979 crtc->new_config->shared_dpll = i;
3841 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name, 3980 DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->name,
3842 pipe_name(crtc->pipe)); 3981 pipe_name(crtc->pipe));
3843 3982
3844 pll->refcount++; 3983 pll->new_config->crtc_mask |= 1 << crtc->pipe;
3845 3984
3846 return pll; 3985 return pll;
3847} 3986}
3848 3987
3988/**
3989 * intel_shared_dpll_start_config - start a new PLL staged config
3990 * @dev_priv: DRM device
3991 * @clear_pipes: mask of pipes that will have their PLLs freed
3992 *
3993 * Starts a new PLL staged config, copying the current config but
3994 * releasing the references of pipes specified in clear_pipes.
3995 */
3996static int intel_shared_dpll_start_config(struct drm_i915_private *dev_priv,
3997 unsigned clear_pipes)
3998{
3999 struct intel_shared_dpll *pll;
4000 enum intel_dpll_id i;
4001
4002 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4003 pll = &dev_priv->shared_dplls[i];
4004
4005 pll->new_config = kmemdup(&pll->config, sizeof pll->config,
4006 GFP_KERNEL);
4007 if (!pll->new_config)
4008 goto cleanup;
4009
4010 pll->new_config->crtc_mask &= ~clear_pipes;
4011 }
4012
4013 return 0;
4014
4015cleanup:
4016 while (--i >= 0) {
4017 pll = &dev_priv->shared_dplls[i];
4018 kfree(pll->new_config);
4019 pll->new_config = NULL;
4020 }
4021
4022 return -ENOMEM;
4023}
4024
4025static void intel_shared_dpll_commit(struct drm_i915_private *dev_priv)
4026{
4027 struct intel_shared_dpll *pll;
4028 enum intel_dpll_id i;
4029
4030 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4031 pll = &dev_priv->shared_dplls[i];
4032
4033 WARN_ON(pll->new_config == &pll->config);
4034
4035 pll->config = *pll->new_config;
4036 kfree(pll->new_config);
4037 pll->new_config = NULL;
4038 }
4039}
4040
4041static void intel_shared_dpll_abort_config(struct drm_i915_private *dev_priv)
4042{
4043 struct intel_shared_dpll *pll;
4044 enum intel_dpll_id i;
4045
4046 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
4047 pll = &dev_priv->shared_dplls[i];
4048
4049 WARN_ON(pll->new_config == &pll->config);
4050
4051 kfree(pll->new_config);
4052 pll->new_config = NULL;
4053 }
4054}
4055
3849static void cpt_verify_modeset(struct drm_device *dev, int pipe) 4056static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3850{ 4057{
3851 struct drm_i915_private *dev_priv = dev->dev_private; 4058 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -3860,6 +4067,19 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
3860 } 4067 }
3861} 4068}
3862 4069
4070static void skylake_pfit_enable(struct intel_crtc *crtc)
4071{
4072 struct drm_device *dev = crtc->base.dev;
4073 struct drm_i915_private *dev_priv = dev->dev_private;
4074 int pipe = crtc->pipe;
4075
4076 if (crtc->config.pch_pfit.enabled) {
4077 I915_WRITE(PS_CTL(pipe), PS_ENABLE);
4078 I915_WRITE(PS_WIN_POS(pipe), crtc->config.pch_pfit.pos);
4079 I915_WRITE(PS_WIN_SZ(pipe), crtc->config.pch_pfit.size);
4080 }
4081}
4082
3863static void ironlake_pfit_enable(struct intel_crtc *crtc) 4083static void ironlake_pfit_enable(struct intel_crtc *crtc)
3864{ 4084{
3865 struct drm_device *dev = crtc->base.dev; 4085 struct drm_device *dev = crtc->base.dev;
@@ -3983,7 +4203,7 @@ static void intel_crtc_load_lut(struct drm_crtc *crtc)
3983 return; 4203 return;
3984 4204
3985 if (!HAS_PCH_SPLIT(dev_priv->dev)) { 4205 if (!HAS_PCH_SPLIT(dev_priv->dev)) {
3986 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) 4206 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI))
3987 assert_dsi_pll_enabled(dev_priv); 4207 assert_dsi_pll_enabled(dev_priv);
3988 else 4208 else
3989 assert_pll_enabled(dev_priv, pipe); 4209 assert_pll_enabled(dev_priv, pipe);
@@ -4038,10 +4258,6 @@ static void intel_crtc_enable_planes(struct drm_crtc *crtc)
4038 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4258 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4039 int pipe = intel_crtc->pipe; 4259 int pipe = intel_crtc->pipe;
4040 4260
4041 assert_vblank_disabled(crtc);
4042
4043 drm_vblank_on(dev, pipe);
4044
4045 intel_enable_primary_hw_plane(crtc->primary, crtc); 4261 intel_enable_primary_hw_plane(crtc->primary, crtc);
4046 intel_enable_planes(crtc); 4262 intel_enable_planes(crtc);
4047 intel_crtc_update_cursor(crtc, true); 4263 intel_crtc_update_cursor(crtc, true);
@@ -4087,10 +4303,6 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc)
4087 * consider this a flip to a NULL plane. 4303 * consider this a flip to a NULL plane.
4088 */ 4304 */
4089 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe)); 4305 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4090
4091 drm_vblank_off(dev, pipe);
4092
4093 assert_vblank_disabled(crtc);
4094} 4306}
4095 4307
4096static void ironlake_crtc_enable(struct drm_crtc *crtc) 4308static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4123,8 +4335,8 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4123 4335
4124 intel_crtc->active = true; 4336 intel_crtc->active = true;
4125 4337
4126 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4338 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4127 intel_set_pch_fifo_underrun_reporting(dev, pipe, true); 4339 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4128 4340
4129 for_each_encoder_on_crtc(dev, crtc, encoder) 4341 for_each_encoder_on_crtc(dev, crtc, encoder)
4130 if (encoder->pre_enable) 4342 if (encoder->pre_enable)
@@ -4160,6 +4372,9 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
4160 if (HAS_PCH_CPT(dev)) 4372 if (HAS_PCH_CPT(dev))
4161 cpt_verify_modeset(dev, intel_crtc->pipe); 4373 cpt_verify_modeset(dev, intel_crtc->pipe);
4162 4374
4375 assert_vblank_disabled(crtc);
4376 drm_crtc_vblank_on(crtc);
4377
4163 intel_crtc_enable_planes(crtc); 4378 intel_crtc_enable_planes(crtc);
4164} 4379}
4165 4380
@@ -4235,19 +4450,23 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4235 4450
4236 intel_crtc->active = true; 4451 intel_crtc->active = true;
4237 4452
4238 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 4453 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4239 for_each_encoder_on_crtc(dev, crtc, encoder) 4454 for_each_encoder_on_crtc(dev, crtc, encoder)
4240 if (encoder->pre_enable) 4455 if (encoder->pre_enable)
4241 encoder->pre_enable(encoder); 4456 encoder->pre_enable(encoder);
4242 4457
4243 if (intel_crtc->config.has_pch_encoder) { 4458 if (intel_crtc->config.has_pch_encoder) {
4244 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true); 4459 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4460 true);
4245 dev_priv->display.fdi_link_train(crtc); 4461 dev_priv->display.fdi_link_train(crtc);
4246 } 4462 }
4247 4463
4248 intel_ddi_enable_pipe_clock(intel_crtc); 4464 intel_ddi_enable_pipe_clock(intel_crtc);
4249 4465
4250 ironlake_pfit_enable(intel_crtc); 4466 if (IS_SKYLAKE(dev))
4467 skylake_pfit_enable(intel_crtc);
4468 else
4469 ironlake_pfit_enable(intel_crtc);
4251 4470
4252 /* 4471 /*
4253 * On ILK+ LUT must be loaded before the pipe is running but with 4472 * On ILK+ LUT must be loaded before the pipe is running but with
@@ -4272,12 +4491,30 @@ static void haswell_crtc_enable(struct drm_crtc *crtc)
4272 intel_opregion_notify_encoder(encoder, true); 4491 intel_opregion_notify_encoder(encoder, true);
4273 } 4492 }
4274 4493
4494 assert_vblank_disabled(crtc);
4495 drm_crtc_vblank_on(crtc);
4496
4275 /* If we change the relative order between pipe/planes enabling, we need 4497 /* If we change the relative order between pipe/planes enabling, we need
4276 * to change the workaround. */ 4498 * to change the workaround. */
4277 haswell_mode_set_planes_workaround(intel_crtc); 4499 haswell_mode_set_planes_workaround(intel_crtc);
4278 intel_crtc_enable_planes(crtc); 4500 intel_crtc_enable_planes(crtc);
4279} 4501}
4280 4502
4503static void skylake_pfit_disable(struct intel_crtc *crtc)
4504{
4505 struct drm_device *dev = crtc->base.dev;
4506 struct drm_i915_private *dev_priv = dev->dev_private;
4507 int pipe = crtc->pipe;
4508
4509 /* To avoid upsetting the power well on haswell only disable the pfit if
4510 * it's in use. The hw state code will make sure we get this right. */
4511 if (crtc->config.pch_pfit.enabled) {
4512 I915_WRITE(PS_CTL(pipe), 0);
4513 I915_WRITE(PS_WIN_POS(pipe), 0);
4514 I915_WRITE(PS_WIN_SZ(pipe), 0);
4515 }
4516}
4517
4281static void ironlake_pfit_disable(struct intel_crtc *crtc) 4518static void ironlake_pfit_disable(struct intel_crtc *crtc)
4282{ 4519{
4283 struct drm_device *dev = crtc->base.dev; 4520 struct drm_device *dev = crtc->base.dev;
@@ -4307,11 +4544,14 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4307 4544
4308 intel_crtc_disable_planes(crtc); 4545 intel_crtc_disable_planes(crtc);
4309 4546
4547 drm_crtc_vblank_off(crtc);
4548 assert_vblank_disabled(crtc);
4549
4310 for_each_encoder_on_crtc(dev, crtc, encoder) 4550 for_each_encoder_on_crtc(dev, crtc, encoder)
4311 encoder->disable(encoder); 4551 encoder->disable(encoder);
4312 4552
4313 if (intel_crtc->config.has_pch_encoder) 4553 if (intel_crtc->config.has_pch_encoder)
4314 intel_set_pch_fifo_underrun_reporting(dev, pipe, false); 4554 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4315 4555
4316 intel_disable_pipe(intel_crtc); 4556 intel_disable_pipe(intel_crtc);
4317 4557
@@ -4368,13 +4608,17 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4368 4608
4369 intel_crtc_disable_planes(crtc); 4609 intel_crtc_disable_planes(crtc);
4370 4610
4611 drm_crtc_vblank_off(crtc);
4612 assert_vblank_disabled(crtc);
4613
4371 for_each_encoder_on_crtc(dev, crtc, encoder) { 4614 for_each_encoder_on_crtc(dev, crtc, encoder) {
4372 intel_opregion_notify_encoder(encoder, false); 4615 intel_opregion_notify_encoder(encoder, false);
4373 encoder->disable(encoder); 4616 encoder->disable(encoder);
4374 } 4617 }
4375 4618
4376 if (intel_crtc->config.has_pch_encoder) 4619 if (intel_crtc->config.has_pch_encoder)
4377 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, false); 4620 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4621 false);
4378 intel_disable_pipe(intel_crtc); 4622 intel_disable_pipe(intel_crtc);
4379 4623
4380 if (intel_crtc->config.dp_encoder_is_mst) 4624 if (intel_crtc->config.dp_encoder_is_mst)
@@ -4382,7 +4626,10 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4382 4626
4383 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 4627 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4384 4628
4385 ironlake_pfit_disable(intel_crtc); 4629 if (IS_SKYLAKE(dev))
4630 skylake_pfit_disable(intel_crtc);
4631 else
4632 ironlake_pfit_disable(intel_crtc);
4386 4633
4387 intel_ddi_disable_pipe_clock(intel_crtc); 4634 intel_ddi_disable_pipe_clock(intel_crtc);
4388 4635
@@ -4508,20 +4755,6 @@ static unsigned long get_crtc_power_domains(struct drm_crtc *crtc)
4508 return mask; 4755 return mask;
4509} 4756}
4510 4757
4511void intel_display_set_init_power(struct drm_i915_private *dev_priv,
4512 bool enable)
4513{
4514 if (dev_priv->power_domains.init_power_on == enable)
4515 return;
4516
4517 if (enable)
4518 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
4519 else
4520 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
4521
4522 dev_priv->power_domains.init_power_on = enable;
4523}
4524
4525static void modeset_update_crtc_power_domains(struct drm_device *dev) 4758static void modeset_update_crtc_power_domains(struct drm_device *dev)
4526{ 4759{
4527 struct drm_i915_private *dev_priv = dev->dev_private; 4760 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4544,6 +4777,9 @@ static void modeset_update_crtc_power_domains(struct drm_device *dev)
4544 intel_display_power_get(dev_priv, domain); 4777 intel_display_power_get(dev_priv, domain);
4545 } 4778 }
4546 4779
4780 if (dev_priv->display.modeset_global_resources)
4781 dev_priv->display.modeset_global_resources(dev);
4782
4547 for_each_intel_crtc(dev, crtc) { 4783 for_each_intel_crtc(dev, crtc) {
4548 enum intel_display_power_domain domain; 4784 enum intel_display_power_domain domain;
4549 4785
@@ -4575,7 +4811,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
4575 struct drm_i915_private *dev_priv = dev->dev_private; 4811 struct drm_i915_private *dev_priv = dev->dev_private;
4576 4812
4577 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev); 4813 dev_priv->vlv_cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
4578 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz", 4814 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
4579 dev_priv->vlv_cdclk_freq); 4815 dev_priv->vlv_cdclk_freq);
4580 4816
4581 /* 4817 /*
@@ -4614,10 +4850,9 @@ static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
4614 mutex_unlock(&dev_priv->rps.hw_lock); 4850 mutex_unlock(&dev_priv->rps.hw_lock);
4615 4851
4616 if (cdclk == 400000) { 4852 if (cdclk == 400000) {
4617 u32 divider, vco; 4853 u32 divider;
4618 4854
4619 vco = valleyview_get_vco(dev_priv); 4855 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
4620 divider = DIV_ROUND_CLOSEST(vco << 1, cdclk) - 1;
4621 4856
4622 mutex_lock(&dev_priv->dpio_lock); 4857 mutex_lock(&dev_priv->dpio_lock);
4623 /* adjust cdclk divider */ 4858 /* adjust cdclk divider */
@@ -4696,8 +4931,7 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
4696static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, 4931static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
4697 int max_pixclk) 4932 int max_pixclk)
4698{ 4933{
4699 int vco = valleyview_get_vco(dev_priv); 4934 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
4700 int freq_320 = (vco << 1) % 320000 != 0 ? 333333 : 320000;
4701 4935
4702 /* FIXME: Punit isn't quite ready yet */ 4936 /* FIXME: Punit isn't quite ready yet */
4703 if (IS_CHERRYVIEW(dev_priv->dev)) 4937 if (IS_CHERRYVIEW(dev_priv->dev))
@@ -4766,18 +5000,30 @@ static void valleyview_modeset_global_resources(struct drm_device *dev)
4766 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk); 5000 int req_cdclk = valleyview_calc_cdclk(dev_priv, max_pixclk);
4767 5001
4768 if (req_cdclk != dev_priv->vlv_cdclk_freq) { 5002 if (req_cdclk != dev_priv->vlv_cdclk_freq) {
5003 /*
5004 * FIXME: We can end up here with all power domains off, yet
5005 * with a CDCLK frequency other than the minimum. To account
5006 * for this take the PIPE-A power domain, which covers the HW
5007 * blocks needed for the following programming. This can be
5008 * removed once it's guaranteed that we get here either with
5009 * the minimum CDCLK set, or the required power domains
5010 * enabled.
5011 */
5012 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
5013
4769 if (IS_CHERRYVIEW(dev)) 5014 if (IS_CHERRYVIEW(dev))
4770 cherryview_set_cdclk(dev, req_cdclk); 5015 cherryview_set_cdclk(dev, req_cdclk);
4771 else 5016 else
4772 valleyview_set_cdclk(dev, req_cdclk); 5017 valleyview_set_cdclk(dev, req_cdclk);
4773 }
4774 5018
4775 modeset_update_crtc_power_domains(dev); 5019 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
5020 }
4776} 5021}
4777 5022
4778static void valleyview_crtc_enable(struct drm_crtc *crtc) 5023static void valleyview_crtc_enable(struct drm_crtc *crtc)
4779{ 5024{
4780 struct drm_device *dev = crtc->dev; 5025 struct drm_device *dev = crtc->dev;
5026 struct drm_i915_private *dev_priv = to_i915(dev);
4781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5027 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4782 struct intel_encoder *encoder; 5028 struct intel_encoder *encoder;
4783 int pipe = intel_crtc->pipe; 5029 int pipe = intel_crtc->pipe;
@@ -4788,13 +5034,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4788 if (intel_crtc->active) 5034 if (intel_crtc->active)
4789 return; 5035 return;
4790 5036
4791 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI); 5037 is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI);
4792 5038
4793 if (!is_dsi) { 5039 if (!is_dsi) {
4794 if (IS_CHERRYVIEW(dev)) 5040 if (IS_CHERRYVIEW(dev))
4795 chv_prepare_pll(intel_crtc); 5041 chv_prepare_pll(intel_crtc, &intel_crtc->config);
4796 else 5042 else
4797 vlv_prepare_pll(intel_crtc); 5043 vlv_prepare_pll(intel_crtc, &intel_crtc->config);
4798 } 5044 }
4799 5045
4800 if (intel_crtc->config.has_dp_encoder) 5046 if (intel_crtc->config.has_dp_encoder)
@@ -4802,11 +5048,18 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4802 5048
4803 intel_set_pipe_timings(intel_crtc); 5049 intel_set_pipe_timings(intel_crtc);
4804 5050
5051 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
5052 struct drm_i915_private *dev_priv = dev->dev_private;
5053
5054 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
5055 I915_WRITE(CHV_CANVAS(pipe), 0);
5056 }
5057
4805 i9xx_set_pipeconf(intel_crtc); 5058 i9xx_set_pipeconf(intel_crtc);
4806 5059
4807 intel_crtc->active = true; 5060 intel_crtc->active = true;
4808 5061
4809 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 5062 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4810 5063
4811 for_each_encoder_on_crtc(dev, crtc, encoder) 5064 for_each_encoder_on_crtc(dev, crtc, encoder)
4812 if (encoder->pre_pll_enable) 5065 if (encoder->pre_pll_enable)
@@ -4814,9 +5067,9 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4814 5067
4815 if (!is_dsi) { 5068 if (!is_dsi) {
4816 if (IS_CHERRYVIEW(dev)) 5069 if (IS_CHERRYVIEW(dev))
4817 chv_enable_pll(intel_crtc); 5070 chv_enable_pll(intel_crtc, &intel_crtc->config);
4818 else 5071 else
4819 vlv_enable_pll(intel_crtc); 5072 vlv_enable_pll(intel_crtc, &intel_crtc->config);
4820 } 5073 }
4821 5074
4822 for_each_encoder_on_crtc(dev, crtc, encoder) 5075 for_each_encoder_on_crtc(dev, crtc, encoder)
@@ -4833,10 +5086,13 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4833 for_each_encoder_on_crtc(dev, crtc, encoder) 5086 for_each_encoder_on_crtc(dev, crtc, encoder)
4834 encoder->enable(encoder); 5087 encoder->enable(encoder);
4835 5088
5089 assert_vblank_disabled(crtc);
5090 drm_crtc_vblank_on(crtc);
5091
4836 intel_crtc_enable_planes(crtc); 5092 intel_crtc_enable_planes(crtc);
4837 5093
4838 /* Underruns don't raise interrupts, so check manually. */ 5094 /* Underruns don't raise interrupts, so check manually. */
4839 i9xx_check_fifo_underruns(dev); 5095 i9xx_check_fifo_underruns(dev_priv);
4840} 5096}
4841 5097
4842static void i9xx_set_pll_dividers(struct intel_crtc *crtc) 5098static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
@@ -4851,6 +5107,7 @@ static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
4851static void i9xx_crtc_enable(struct drm_crtc *crtc) 5107static void i9xx_crtc_enable(struct drm_crtc *crtc)
4852{ 5108{
4853 struct drm_device *dev = crtc->dev; 5109 struct drm_device *dev = crtc->dev;
5110 struct drm_i915_private *dev_priv = to_i915(dev);
4854 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5111 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4855 struct intel_encoder *encoder; 5112 struct intel_encoder *encoder;
4856 int pipe = intel_crtc->pipe; 5113 int pipe = intel_crtc->pipe;
@@ -4872,7 +5129,7 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4872 intel_crtc->active = true; 5129 intel_crtc->active = true;
4873 5130
4874 if (!IS_GEN2(dev)) 5131 if (!IS_GEN2(dev))
4875 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 5132 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4876 5133
4877 for_each_encoder_on_crtc(dev, crtc, encoder) 5134 for_each_encoder_on_crtc(dev, crtc, encoder)
4878 if (encoder->pre_enable) 5135 if (encoder->pre_enable)
@@ -4890,6 +5147,9 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4890 for_each_encoder_on_crtc(dev, crtc, encoder) 5147 for_each_encoder_on_crtc(dev, crtc, encoder)
4891 encoder->enable(encoder); 5148 encoder->enable(encoder);
4892 5149
5150 assert_vblank_disabled(crtc);
5151 drm_crtc_vblank_on(crtc);
5152
4893 intel_crtc_enable_planes(crtc); 5153 intel_crtc_enable_planes(crtc);
4894 5154
4895 /* 5155 /*
@@ -4900,10 +5160,10 @@ static void i9xx_crtc_enable(struct drm_crtc *crtc)
4900 * but leave the pipe running. 5160 * but leave the pipe running.
4901 */ 5161 */
4902 if (IS_GEN2(dev)) 5162 if (IS_GEN2(dev))
4903 intel_set_cpu_fifo_underrun_reporting(dev, pipe, true); 5163 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4904 5164
4905 /* Underruns don't raise interrupts, so check manually. */ 5165 /* Underruns don't raise interrupts, so check manually. */
4906 i9xx_check_fifo_underruns(dev); 5166 i9xx_check_fifo_underruns(dev_priv);
4907} 5167}
4908 5168
4909static void i9xx_pfit_disable(struct intel_crtc *crtc) 5169static void i9xx_pfit_disable(struct intel_crtc *crtc)
@@ -4939,7 +5199,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4939 * but leave the pipe running. 5199 * but leave the pipe running.
4940 */ 5200 */
4941 if (IS_GEN2(dev)) 5201 if (IS_GEN2(dev))
4942 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 5202 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4943 5203
4944 /* 5204 /*
4945 * Vblank time updates from the shadow to live plane control register 5205 * Vblank time updates from the shadow to live plane control register
@@ -4953,9 +5213,6 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4953 intel_set_memory_cxsr(dev_priv, false); 5213 intel_set_memory_cxsr(dev_priv, false);
4954 intel_crtc_disable_planes(crtc); 5214 intel_crtc_disable_planes(crtc);
4955 5215
4956 for_each_encoder_on_crtc(dev, crtc, encoder)
4957 encoder->disable(encoder);
4958
4959 /* 5216 /*
4960 * On gen2 planes are double buffered but the pipe isn't, so we must 5217 * On gen2 planes are double buffered but the pipe isn't, so we must
4961 * wait for planes to fully turn off before disabling the pipe. 5218 * wait for planes to fully turn off before disabling the pipe.
@@ -4964,6 +5221,12 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4964 */ 5221 */
4965 intel_wait_for_vblank(dev, pipe); 5222 intel_wait_for_vblank(dev, pipe);
4966 5223
5224 drm_crtc_vblank_off(crtc);
5225 assert_vblank_disabled(crtc);
5226
5227 for_each_encoder_on_crtc(dev, crtc, encoder)
5228 encoder->disable(encoder);
5229
4967 intel_disable_pipe(intel_crtc); 5230 intel_disable_pipe(intel_crtc);
4968 5231
4969 i9xx_pfit_disable(intel_crtc); 5232 i9xx_pfit_disable(intel_crtc);
@@ -4972,7 +5235,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4972 if (encoder->post_disable) 5235 if (encoder->post_disable)
4973 encoder->post_disable(encoder); 5236 encoder->post_disable(encoder);
4974 5237
4975 if (!intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI)) { 5238 if (!intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI)) {
4976 if (IS_CHERRYVIEW(dev)) 5239 if (IS_CHERRYVIEW(dev))
4977 chv_disable_pll(dev_priv, pipe); 5240 chv_disable_pll(dev_priv, pipe);
4978 else if (IS_VALLEYVIEW(dev)) 5241 else if (IS_VALLEYVIEW(dev))
@@ -4982,7 +5245,7 @@ static void i9xx_crtc_disable(struct drm_crtc *crtc)
4982 } 5245 }
4983 5246
4984 if (!IS_GEN2(dev)) 5247 if (!IS_GEN2(dev))
4985 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false); 5248 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4986 5249
4987 intel_crtc->active = false; 5250 intel_crtc->active = false;
4988 intel_update_watermarks(crtc); 5251 intel_update_watermarks(crtc);
@@ -4996,36 +5259,6 @@ static void i9xx_crtc_off(struct drm_crtc *crtc)
4996{ 5259{
4997} 5260}
4998 5261
4999static void intel_crtc_update_sarea(struct drm_crtc *crtc,
5000 bool enabled)
5001{
5002 struct drm_device *dev = crtc->dev;
5003 struct drm_i915_master_private *master_priv;
5004 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5005 int pipe = intel_crtc->pipe;
5006
5007 if (!dev->primary->master)
5008 return;
5009
5010 master_priv = dev->primary->master->driver_priv;
5011 if (!master_priv->sarea_priv)
5012 return;
5013
5014 switch (pipe) {
5015 case 0:
5016 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
5017 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
5018 break;
5019 case 1:
5020 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
5021 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
5022 break;
5023 default:
5024 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
5025 break;
5026 }
5027}
5028
5029/* Master function to enable/disable CRTC and corresponding power wells */ 5262/* Master function to enable/disable CRTC and corresponding power wells */
5030void intel_crtc_control(struct drm_crtc *crtc, bool enable) 5263void intel_crtc_control(struct drm_crtc *crtc, bool enable)
5031{ 5264{
@@ -5069,8 +5302,6 @@ void intel_crtc_update_dpms(struct drm_crtc *crtc)
5069 enable |= intel_encoder->connectors_active; 5302 enable |= intel_encoder->connectors_active;
5070 5303
5071 intel_crtc_control(crtc, enable); 5304 intel_crtc_control(crtc, enable);
5072
5073 intel_crtc_update_sarea(crtc, enable);
5074} 5305}
5075 5306
5076static void intel_crtc_disable(struct drm_crtc *crtc) 5307static void intel_crtc_disable(struct drm_crtc *crtc)
@@ -5085,7 +5316,6 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
5085 WARN_ON(!crtc->enabled); 5316 WARN_ON(!crtc->enabled);
5086 5317
5087 dev_priv->display.crtc_disable(crtc); 5318 dev_priv->display.crtc_disable(crtc);
5088 intel_crtc_update_sarea(crtc, false);
5089 dev_priv->display.off(crtc); 5319 dev_priv->display.off(crtc);
5090 5320
5091 if (crtc->primary->fb) { 5321 if (crtc->primary->fb) {
@@ -5324,11 +5554,11 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5324 struct intel_crtc_config *pipe_config) 5554 struct intel_crtc_config *pipe_config)
5325{ 5555{
5326 struct drm_device *dev = crtc->base.dev; 5556 struct drm_device *dev = crtc->base.dev;
5557 struct drm_i915_private *dev_priv = dev->dev_private;
5327 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; 5558 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5328 5559
5329 /* FIXME should check pixel clock limits on all platforms */ 5560 /* FIXME should check pixel clock limits on all platforms */
5330 if (INTEL_INFO(dev)->gen < 4) { 5561 if (INTEL_INFO(dev)->gen < 4) {
5331 struct drm_i915_private *dev_priv = dev->dev_private;
5332 int clock_limit = 5562 int clock_limit =
5333 dev_priv->display.get_display_clock_speed(dev); 5563 dev_priv->display.get_display_clock_speed(dev);
5334 5564
@@ -5355,7 +5585,7 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5355 * - LVDS dual channel mode 5585 * - LVDS dual channel mode
5356 * - Double wide pipe 5586 * - Double wide pipe
5357 */ 5587 */
5358 if ((intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5588 if ((intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
5359 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide) 5589 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
5360 pipe_config->pipe_src_w &= ~1; 5590 pipe_config->pipe_src_w &= ~1;
5361 5591
@@ -5377,13 +5607,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5377 if (HAS_IPS(dev)) 5607 if (HAS_IPS(dev))
5378 hsw_compute_ips_config(crtc, pipe_config); 5608 hsw_compute_ips_config(crtc, pipe_config);
5379 5609
5380 /*
5381 * XXX: PCH/WRPLL clock sharing is done in ->mode_set, so make sure the
5382 * old clock survives for now.
5383 */
5384 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev) || HAS_DDI(dev))
5385 pipe_config->shared_dpll = crtc->config.shared_dpll;
5386
5387 if (pipe_config->has_pch_encoder) 5610 if (pipe_config->has_pch_encoder)
5388 return ironlake_fdi_compute_config(crtc, pipe_config); 5611 return ironlake_fdi_compute_config(crtc, pipe_config);
5389 5612
@@ -5393,7 +5616,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
5393static int valleyview_get_display_clock_speed(struct drm_device *dev) 5616static int valleyview_get_display_clock_speed(struct drm_device *dev)
5394{ 5617{
5395 struct drm_i915_private *dev_priv = dev->dev_private; 5618 struct drm_i915_private *dev_priv = dev->dev_private;
5396 int vco = valleyview_get_vco(dev_priv);
5397 u32 val; 5619 u32 val;
5398 int divider; 5620 int divider;
5399 5621
@@ -5401,6 +5623,9 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
5401 if (IS_CHERRYVIEW(dev)) 5623 if (IS_CHERRYVIEW(dev))
5402 return 400000; 5624 return 400000;
5403 5625
5626 if (dev_priv->hpll_freq == 0)
5627 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
5628
5404 mutex_lock(&dev_priv->dpio_lock); 5629 mutex_lock(&dev_priv->dpio_lock);
5405 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5630 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5406 mutex_unlock(&dev_priv->dpio_lock); 5631 mutex_unlock(&dev_priv->dpio_lock);
@@ -5411,7 +5636,7 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev)
5411 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5636 (divider << DISPLAY_FREQUENCY_STATUS_SHIFT),
5412 "cdclk change in progress\n"); 5637 "cdclk change in progress\n");
5413 5638
5414 return DIV_ROUND_CLOSEST(vco << 1, divider + 1); 5639 return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1);
5415} 5640}
5416 5641
5417static int i945_get_display_clock_speed(struct drm_device *dev) 5642static int i945_get_display_clock_speed(struct drm_device *dev)
@@ -5543,15 +5768,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5543 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); 5768 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
5544} 5769}
5545 5770
5546static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors) 5771static int i9xx_get_refclk(struct intel_crtc *crtc, int num_connectors)
5547{ 5772{
5548 struct drm_device *dev = crtc->dev; 5773 struct drm_device *dev = crtc->base.dev;
5549 struct drm_i915_private *dev_priv = dev->dev_private; 5774 struct drm_i915_private *dev_priv = dev->dev_private;
5550 int refclk; 5775 int refclk;
5551 5776
5552 if (IS_VALLEYVIEW(dev)) { 5777 if (IS_VALLEYVIEW(dev)) {
5553 refclk = 100000; 5778 refclk = 100000;
5554 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && 5779 } else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5555 intel_panel_use_ssc(dev_priv) && num_connectors < 2) { 5780 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
5556 refclk = dev_priv->vbt.lvds_ssc_freq; 5781 refclk = dev_priv->vbt.lvds_ssc_freq;
5557 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk); 5782 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
@@ -5581,24 +5806,24 @@ static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
5581 u32 fp, fp2 = 0; 5806 u32 fp, fp2 = 0;
5582 5807
5583 if (IS_PINEVIEW(dev)) { 5808 if (IS_PINEVIEW(dev)) {
5584 fp = pnv_dpll_compute_fp(&crtc->config.dpll); 5809 fp = pnv_dpll_compute_fp(&crtc->new_config->dpll);
5585 if (reduced_clock) 5810 if (reduced_clock)
5586 fp2 = pnv_dpll_compute_fp(reduced_clock); 5811 fp2 = pnv_dpll_compute_fp(reduced_clock);
5587 } else { 5812 } else {
5588 fp = i9xx_dpll_compute_fp(&crtc->config.dpll); 5813 fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
5589 if (reduced_clock) 5814 if (reduced_clock)
5590 fp2 = i9xx_dpll_compute_fp(reduced_clock); 5815 fp2 = i9xx_dpll_compute_fp(reduced_clock);
5591 } 5816 }
5592 5817
5593 crtc->config.dpll_hw_state.fp0 = fp; 5818 crtc->new_config->dpll_hw_state.fp0 = fp;
5594 5819
5595 crtc->lowfreq_avail = false; 5820 crtc->lowfreq_avail = false;
5596 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 5821 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5597 reduced_clock && i915.powersave) { 5822 reduced_clock && i915.powersave) {
5598 crtc->config.dpll_hw_state.fp1 = fp2; 5823 crtc->new_config->dpll_hw_state.fp1 = fp2;
5599 crtc->lowfreq_avail = true; 5824 crtc->lowfreq_avail = true;
5600 } else { 5825 } else {
5601 crtc->config.dpll_hw_state.fp1 = fp; 5826 crtc->new_config->dpll_hw_state.fp1 = fp;
5602 } 5827 }
5603} 5828}
5604 5829
@@ -5687,7 +5912,8 @@ void intel_dp_set_m_n(struct intel_crtc *crtc)
5687 &crtc->config.dp_m2_n2); 5912 &crtc->config.dp_m2_n2);
5688} 5913}
5689 5914
5690static void vlv_update_pll(struct intel_crtc *crtc) 5915static void vlv_update_pll(struct intel_crtc *crtc,
5916 struct intel_crtc_config *pipe_config)
5691{ 5917{
5692 u32 dpll, dpll_md; 5918 u32 dpll, dpll_md;
5693 5919
@@ -5702,14 +5928,15 @@ static void vlv_update_pll(struct intel_crtc *crtc)
5702 if (crtc->pipe == PIPE_B) 5928 if (crtc->pipe == PIPE_B)
5703 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 5929 dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5704 dpll |= DPLL_VCO_ENABLE; 5930 dpll |= DPLL_VCO_ENABLE;
5705 crtc->config.dpll_hw_state.dpll = dpll; 5931 pipe_config->dpll_hw_state.dpll = dpll;
5706 5932
5707 dpll_md = (crtc->config.pixel_multiplier - 1) 5933 dpll_md = (pipe_config->pixel_multiplier - 1)
5708 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 5934 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5709 crtc->config.dpll_hw_state.dpll_md = dpll_md; 5935 pipe_config->dpll_hw_state.dpll_md = dpll_md;
5710} 5936}
5711 5937
5712static void vlv_prepare_pll(struct intel_crtc *crtc) 5938static void vlv_prepare_pll(struct intel_crtc *crtc,
5939 const struct intel_crtc_config *pipe_config)
5713{ 5940{
5714 struct drm_device *dev = crtc->base.dev; 5941 struct drm_device *dev = crtc->base.dev;
5715 struct drm_i915_private *dev_priv = dev->dev_private; 5942 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5720,11 +5947,11 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5720 5947
5721 mutex_lock(&dev_priv->dpio_lock); 5948 mutex_lock(&dev_priv->dpio_lock);
5722 5949
5723 bestn = crtc->config.dpll.n; 5950 bestn = pipe_config->dpll.n;
5724 bestm1 = crtc->config.dpll.m1; 5951 bestm1 = pipe_config->dpll.m1;
5725 bestm2 = crtc->config.dpll.m2; 5952 bestm2 = pipe_config->dpll.m2;
5726 bestp1 = crtc->config.dpll.p1; 5953 bestp1 = pipe_config->dpll.p1;
5727 bestp2 = crtc->config.dpll.p2; 5954 bestp2 = pipe_config->dpll.p2;
5728 5955
5729 /* See eDP HDMI DPIO driver vbios notes doc */ 5956 /* See eDP HDMI DPIO driver vbios notes doc */
5730 5957
@@ -5761,17 +5988,16 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5761 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv); 5988 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
5762 5989
5763 /* Set HBR and RBR LPF coefficients */ 5990 /* Set HBR and RBR LPF coefficients */
5764 if (crtc->config.port_clock == 162000 || 5991 if (pipe_config->port_clock == 162000 ||
5765 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_ANALOG) || 5992 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
5766 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) 5993 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
5767 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5994 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5768 0x009f0003); 5995 0x009f0003);
5769 else 5996 else
5770 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe), 5997 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
5771 0x00d0000f); 5998 0x00d0000f);
5772 5999
5773 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP) || 6000 if (crtc->config.has_dp_encoder) {
5774 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) {
5775 /* Use SSC source */ 6001 /* Use SSC source */
5776 if (pipe == PIPE_A) 6002 if (pipe == PIPE_A)
5777 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe), 6003 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -5791,8 +6017,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5791 6017
5792 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe)); 6018 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
5793 coreclk = (coreclk & 0x0000ff00) | 0x01c00000; 6019 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
5794 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT) || 6020 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
5795 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_EDP)) 6021 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
5796 coreclk |= 0x01000000; 6022 coreclk |= 0x01000000;
5797 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk); 6023 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
5798 6024
@@ -5800,19 +6026,21 @@ static void vlv_prepare_pll(struct intel_crtc *crtc)
5800 mutex_unlock(&dev_priv->dpio_lock); 6026 mutex_unlock(&dev_priv->dpio_lock);
5801} 6027}
5802 6028
5803static void chv_update_pll(struct intel_crtc *crtc) 6029static void chv_update_pll(struct intel_crtc *crtc,
6030 struct intel_crtc_config *pipe_config)
5804{ 6031{
5805 crtc->config.dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV | 6032 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLOCK_CHV |
5806 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS | 6033 DPLL_REFA_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS |
5807 DPLL_VCO_ENABLE; 6034 DPLL_VCO_ENABLE;
5808 if (crtc->pipe != PIPE_A) 6035 if (crtc->pipe != PIPE_A)
5809 crtc->config.dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV; 6036 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
5810 6037
5811 crtc->config.dpll_hw_state.dpll_md = 6038 pipe_config->dpll_hw_state.dpll_md =
5812 (crtc->config.pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6039 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5813} 6040}
5814 6041
5815static void chv_prepare_pll(struct intel_crtc *crtc) 6042static void chv_prepare_pll(struct intel_crtc *crtc,
6043 const struct intel_crtc_config *pipe_config)
5816{ 6044{
5817 struct drm_device *dev = crtc->base.dev; 6045 struct drm_device *dev = crtc->base.dev;
5818 struct drm_i915_private *dev_priv = dev->dev_private; 6046 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5823,18 +6051,18 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
5823 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; 6051 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
5824 int refclk; 6052 int refclk;
5825 6053
5826 bestn = crtc->config.dpll.n; 6054 bestn = pipe_config->dpll.n;
5827 bestm2_frac = crtc->config.dpll.m2 & 0x3fffff; 6055 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
5828 bestm1 = crtc->config.dpll.m1; 6056 bestm1 = pipe_config->dpll.m1;
5829 bestm2 = crtc->config.dpll.m2 >> 22; 6057 bestm2 = pipe_config->dpll.m2 >> 22;
5830 bestp1 = crtc->config.dpll.p1; 6058 bestp1 = pipe_config->dpll.p1;
5831 bestp2 = crtc->config.dpll.p2; 6059 bestp2 = pipe_config->dpll.p2;
5832 6060
5833 /* 6061 /*
5834 * Enable Refclk and SSC 6062 * Enable Refclk and SSC
5835 */ 6063 */
5836 I915_WRITE(dpll_reg, 6064 I915_WRITE(dpll_reg,
5837 crtc->config.dpll_hw_state.dpll & ~DPLL_VCO_ENABLE); 6065 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
5838 6066
5839 mutex_lock(&dev_priv->dpio_lock); 6067 mutex_lock(&dev_priv->dpio_lock);
5840 6068
@@ -5862,7 +6090,7 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
5862 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); 6090 (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT));
5863 6091
5864 /* Loop filter */ 6092 /* Loop filter */
5865 refclk = i9xx_get_refclk(&crtc->base, 0); 6093 refclk = i9xx_get_refclk(crtc, 0);
5866 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | 6094 loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT |
5867 2 << DPIO_CHV_GAIN_CTRL_SHIFT; 6095 2 << DPIO_CHV_GAIN_CTRL_SHIFT;
5868 if (refclk == 100000) 6096 if (refclk == 100000)
@@ -5882,6 +6110,53 @@ static void chv_prepare_pll(struct intel_crtc *crtc)
5882 mutex_unlock(&dev_priv->dpio_lock); 6110 mutex_unlock(&dev_priv->dpio_lock);
5883} 6111}
5884 6112
6113/**
6114 * vlv_force_pll_on - forcibly enable just the PLL
6115 * @dev_priv: i915 private structure
6116 * @pipe: pipe PLL to enable
6117 * @dpll: PLL configuration
6118 *
6119 * Enable the PLL for @pipe using the supplied @dpll config. To be used
6120 * in cases where we need the PLL enabled even when @pipe is not going to
6121 * be enabled.
6122 */
6123void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
6124 const struct dpll *dpll)
6125{
6126 struct intel_crtc *crtc =
6127 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
6128 struct intel_crtc_config pipe_config = {
6129 .pixel_multiplier = 1,
6130 .dpll = *dpll,
6131 };
6132
6133 if (IS_CHERRYVIEW(dev)) {
6134 chv_update_pll(crtc, &pipe_config);
6135 chv_prepare_pll(crtc, &pipe_config);
6136 chv_enable_pll(crtc, &pipe_config);
6137 } else {
6138 vlv_update_pll(crtc, &pipe_config);
6139 vlv_prepare_pll(crtc, &pipe_config);
6140 vlv_enable_pll(crtc, &pipe_config);
6141 }
6142}
6143
6144/**
6145 * vlv_force_pll_off - forcibly disable just the PLL
6146 * @dev_priv: i915 private structure
6147 * @pipe: pipe PLL to disable
6148 *
6149 * Disable the PLL for @pipe. To be used in cases where we need
6150 * the PLL enabled even when @pipe is not going to be enabled.
6151 */
6152void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
6153{
6154 if (IS_CHERRYVIEW(dev))
6155 chv_disable_pll(to_i915(dev), pipe);
6156 else
6157 vlv_disable_pll(to_i915(dev), pipe);
6158}
6159
5885static void i9xx_update_pll(struct intel_crtc *crtc, 6160static void i9xx_update_pll(struct intel_crtc *crtc,
5886 intel_clock_t *reduced_clock, 6161 intel_clock_t *reduced_clock,
5887 int num_connectors) 6162 int num_connectors)
@@ -5890,29 +6165,29 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5890 struct drm_i915_private *dev_priv = dev->dev_private; 6165 struct drm_i915_private *dev_priv = dev->dev_private;
5891 u32 dpll; 6166 u32 dpll;
5892 bool is_sdvo; 6167 bool is_sdvo;
5893 struct dpll *clock = &crtc->config.dpll; 6168 struct dpll *clock = &crtc->new_config->dpll;
5894 6169
5895 i9xx_update_pll_dividers(crtc, reduced_clock); 6170 i9xx_update_pll_dividers(crtc, reduced_clock);
5896 6171
5897 is_sdvo = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_SDVO) || 6172 is_sdvo = intel_pipe_will_have_type(crtc, INTEL_OUTPUT_SDVO) ||
5898 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI); 6173 intel_pipe_will_have_type(crtc, INTEL_OUTPUT_HDMI);
5899 6174
5900 dpll = DPLL_VGA_MODE_DIS; 6175 dpll = DPLL_VGA_MODE_DIS;
5901 6176
5902 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) 6177 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS))
5903 dpll |= DPLLB_MODE_LVDS; 6178 dpll |= DPLLB_MODE_LVDS;
5904 else 6179 else
5905 dpll |= DPLLB_MODE_DAC_SERIAL; 6180 dpll |= DPLLB_MODE_DAC_SERIAL;
5906 6181
5907 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 6182 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
5908 dpll |= (crtc->config.pixel_multiplier - 1) 6183 dpll |= (crtc->new_config->pixel_multiplier - 1)
5909 << SDVO_MULTIPLIER_SHIFT_HIRES; 6184 << SDVO_MULTIPLIER_SHIFT_HIRES;
5910 } 6185 }
5911 6186
5912 if (is_sdvo) 6187 if (is_sdvo)
5913 dpll |= DPLL_SDVO_HIGH_SPEED; 6188 dpll |= DPLL_SDVO_HIGH_SPEED;
5914 6189
5915 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DISPLAYPORT)) 6190 if (crtc->new_config->has_dp_encoder)
5916 dpll |= DPLL_SDVO_HIGH_SPEED; 6191 dpll |= DPLL_SDVO_HIGH_SPEED;
5917 6192
5918 /* compute bitmask from p1 value */ 6193 /* compute bitmask from p1 value */
@@ -5940,21 +6215,21 @@ static void i9xx_update_pll(struct intel_crtc *crtc,
5940 if (INTEL_INFO(dev)->gen >= 4) 6215 if (INTEL_INFO(dev)->gen >= 4)
5941 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 6216 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
5942 6217
5943 if (crtc->config.sdvo_tv_clock) 6218 if (crtc->new_config->sdvo_tv_clock)
5944 dpll |= PLL_REF_INPUT_TVCLKINBC; 6219 dpll |= PLL_REF_INPUT_TVCLKINBC;
5945 else if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 6220 else if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5946 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6221 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5947 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6222 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5948 else 6223 else
5949 dpll |= PLL_REF_INPUT_DREFCLK; 6224 dpll |= PLL_REF_INPUT_DREFCLK;
5950 6225
5951 dpll |= DPLL_VCO_ENABLE; 6226 dpll |= DPLL_VCO_ENABLE;
5952 crtc->config.dpll_hw_state.dpll = dpll; 6227 crtc->new_config->dpll_hw_state.dpll = dpll;
5953 6228
5954 if (INTEL_INFO(dev)->gen >= 4) { 6229 if (INTEL_INFO(dev)->gen >= 4) {
5955 u32 dpll_md = (crtc->config.pixel_multiplier - 1) 6230 u32 dpll_md = (crtc->new_config->pixel_multiplier - 1)
5956 << DPLL_MD_UDI_MULTIPLIER_SHIFT; 6231 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
5957 crtc->config.dpll_hw_state.dpll_md = dpll_md; 6232 crtc->new_config->dpll_hw_state.dpll_md = dpll_md;
5958 } 6233 }
5959} 6234}
5960 6235
@@ -5965,13 +6240,13 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
5965 struct drm_device *dev = crtc->base.dev; 6240 struct drm_device *dev = crtc->base.dev;
5966 struct drm_i915_private *dev_priv = dev->dev_private; 6241 struct drm_i915_private *dev_priv = dev->dev_private;
5967 u32 dpll; 6242 u32 dpll;
5968 struct dpll *clock = &crtc->config.dpll; 6243 struct dpll *clock = &crtc->new_config->dpll;
5969 6244
5970 i9xx_update_pll_dividers(crtc, reduced_clock); 6245 i9xx_update_pll_dividers(crtc, reduced_clock);
5971 6246
5972 dpll = DPLL_VGA_MODE_DIS; 6247 dpll = DPLL_VGA_MODE_DIS;
5973 6248
5974 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS)) { 6249 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS)) {
5975 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 6250 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
5976 } else { 6251 } else {
5977 if (clock->p1 == 2) 6252 if (clock->p1 == 2)
@@ -5982,17 +6257,17 @@ static void i8xx_update_pll(struct intel_crtc *crtc,
5982 dpll |= PLL_P2_DIVIDE_BY_4; 6257 dpll |= PLL_P2_DIVIDE_BY_4;
5983 } 6258 }
5984 6259
5985 if (!IS_I830(dev) && intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_DVO)) 6260 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc, INTEL_OUTPUT_DVO))
5986 dpll |= DPLL_DVO_2X_MODE; 6261 dpll |= DPLL_DVO_2X_MODE;
5987 6262
5988 if (intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_LVDS) && 6263 if (intel_pipe_will_have_type(crtc, INTEL_OUTPUT_LVDS) &&
5989 intel_panel_use_ssc(dev_priv) && num_connectors < 2) 6264 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
5990 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; 6265 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
5991 else 6266 else
5992 dpll |= PLL_REF_INPUT_DREFCLK; 6267 dpll |= PLL_REF_INPUT_DREFCLK;
5993 6268
5994 dpll |= DPLL_VCO_ENABLE; 6269 dpll |= DPLL_VCO_ENABLE;
5995 crtc->config.dpll_hw_state.dpll = dpll; 6270 crtc->new_config->dpll_hw_state.dpll = dpll;
5996} 6271}
5997 6272
5998static void intel_set_pipe_timings(struct intel_crtc *intel_crtc) 6273static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
@@ -6016,7 +6291,7 @@ static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
6016 crtc_vtotal -= 1; 6291 crtc_vtotal -= 1;
6017 crtc_vblank_end -= 1; 6292 crtc_vblank_end -= 1;
6018 6293
6019 if (intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6294 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6020 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; 6295 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
6021 else 6296 else
6022 vsyncshift = adjusted_mode->crtc_hsync_start - 6297 vsyncshift = adjusted_mode->crtc_hsync_start -
@@ -6174,7 +6449,7 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6174 6449
6175 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { 6450 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6176 if (INTEL_INFO(dev)->gen < 4 || 6451 if (INTEL_INFO(dev)->gen < 4 ||
6177 intel_pipe_has_type(&intel_crtc->base, INTEL_OUTPUT_SDVO)) 6452 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
6178 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; 6453 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
6179 else 6454 else
6180 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; 6455 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
@@ -6188,13 +6463,10 @@ static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
6188 POSTING_READ(PIPECONF(intel_crtc->pipe)); 6463 POSTING_READ(PIPECONF(intel_crtc->pipe));
6189} 6464}
6190 6465
6191static int i9xx_crtc_mode_set(struct drm_crtc *crtc, 6466static int i9xx_crtc_compute_clock(struct intel_crtc *crtc)
6192 int x, int y,
6193 struct drm_framebuffer *fb)
6194{ 6467{
6195 struct drm_device *dev = crtc->dev; 6468 struct drm_device *dev = crtc->base.dev;
6196 struct drm_i915_private *dev_priv = dev->dev_private; 6469 struct drm_i915_private *dev_priv = dev->dev_private;
6197 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6198 int refclk, num_connectors = 0; 6470 int refclk, num_connectors = 0;
6199 intel_clock_t clock, reduced_clock; 6471 intel_clock_t clock, reduced_clock;
6200 bool ok, has_reduced_clock = false; 6472 bool ok, has_reduced_clock = false;
@@ -6202,7 +6474,10 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6202 struct intel_encoder *encoder; 6474 struct intel_encoder *encoder;
6203 const intel_limit_t *limit; 6475 const intel_limit_t *limit;
6204 6476
6205 for_each_encoder_on_crtc(dev, crtc, encoder) { 6477 for_each_intel_encoder(dev, encoder) {
6478 if (encoder->new_crtc != crtc)
6479 continue;
6480
6206 switch (encoder->type) { 6481 switch (encoder->type) {
6207 case INTEL_OUTPUT_LVDS: 6482 case INTEL_OUTPUT_LVDS:
6208 is_lvds = true; 6483 is_lvds = true;
@@ -6210,6 +6485,8 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6210 case INTEL_OUTPUT_DSI: 6485 case INTEL_OUTPUT_DSI:
6211 is_dsi = true; 6486 is_dsi = true;
6212 break; 6487 break;
6488 default:
6489 break;
6213 } 6490 }
6214 6491
6215 num_connectors++; 6492 num_connectors++;
@@ -6218,7 +6495,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6218 if (is_dsi) 6495 if (is_dsi)
6219 return 0; 6496 return 0;
6220 6497
6221 if (!intel_crtc->config.clock_set) { 6498 if (!crtc->new_config->clock_set) {
6222 refclk = i9xx_get_refclk(crtc, num_connectors); 6499 refclk = i9xx_get_refclk(crtc, num_connectors);
6223 6500
6224 /* 6501 /*
@@ -6229,7 +6506,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6229 */ 6506 */
6230 limit = intel_limit(crtc, refclk); 6507 limit = intel_limit(crtc, refclk);
6231 ok = dev_priv->display.find_dpll(limit, crtc, 6508 ok = dev_priv->display.find_dpll(limit, crtc,
6232 intel_crtc->config.port_clock, 6509 crtc->new_config->port_clock,
6233 refclk, NULL, &clock); 6510 refclk, NULL, &clock);
6234 if (!ok) { 6511 if (!ok) {
6235 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 6512 DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -6250,23 +6527,23 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
6250 &reduced_clock); 6527 &reduced_clock);
6251 } 6528 }
6252 /* Compat-code for transition, will disappear. */ 6529 /* Compat-code for transition, will disappear. */
6253 intel_crtc->config.dpll.n = clock.n; 6530 crtc->new_config->dpll.n = clock.n;
6254 intel_crtc->config.dpll.m1 = clock.m1; 6531 crtc->new_config->dpll.m1 = clock.m1;
6255 intel_crtc->config.dpll.m2 = clock.m2; 6532 crtc->new_config->dpll.m2 = clock.m2;
6256 intel_crtc->config.dpll.p1 = clock.p1; 6533 crtc->new_config->dpll.p1 = clock.p1;
6257 intel_crtc->config.dpll.p2 = clock.p2; 6534 crtc->new_config->dpll.p2 = clock.p2;
6258 } 6535 }
6259 6536
6260 if (IS_GEN2(dev)) { 6537 if (IS_GEN2(dev)) {
6261 i8xx_update_pll(intel_crtc, 6538 i8xx_update_pll(crtc,
6262 has_reduced_clock ? &reduced_clock : NULL, 6539 has_reduced_clock ? &reduced_clock : NULL,
6263 num_connectors); 6540 num_connectors);
6264 } else if (IS_CHERRYVIEW(dev)) { 6541 } else if (IS_CHERRYVIEW(dev)) {
6265 chv_update_pll(intel_crtc); 6542 chv_update_pll(crtc, crtc->new_config);
6266 } else if (IS_VALLEYVIEW(dev)) { 6543 } else if (IS_VALLEYVIEW(dev)) {
6267 vlv_update_pll(intel_crtc); 6544 vlv_update_pll(crtc, crtc->new_config);
6268 } else { 6545 } else {
6269 i9xx_update_pll(intel_crtc, 6546 i9xx_update_pll(crtc,
6270 has_reduced_clock ? &reduced_clock : NULL, 6547 has_reduced_clock ? &reduced_clock : NULL,
6271 num_connectors); 6548 num_connectors);
6272 } 6549 }
@@ -6432,8 +6709,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
6432 struct drm_i915_private *dev_priv = dev->dev_private; 6709 struct drm_i915_private *dev_priv = dev->dev_private;
6433 uint32_t tmp; 6710 uint32_t tmp;
6434 6711
6435 if (!intel_display_power_enabled(dev_priv, 6712 if (!intel_display_power_is_enabled(dev_priv,
6436 POWER_DOMAIN_PIPE(crtc->pipe))) 6713 POWER_DOMAIN_PIPE(crtc->pipe)))
6437 return false; 6714 return false;
6438 6715
6439 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 6716 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -6538,6 +6815,8 @@ static void ironlake_init_pch_refclk(struct drm_device *dev)
6538 if (enc_to_dig_port(&encoder->base)->port == PORT_A) 6815 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
6539 has_cpu_edp = true; 6816 has_cpu_edp = true;
6540 break; 6817 break;
6818 default:
6819 break;
6541 } 6820 }
6542 } 6821 }
6543 6822
@@ -6842,6 +7121,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
6842 case INTEL_OUTPUT_ANALOG: 7121 case INTEL_OUTPUT_ANALOG:
6843 has_vga = true; 7122 has_vga = true;
6844 break; 7123 break;
7124 default:
7125 break;
6845 } 7126 }
6846 } 7127 }
6847 7128
@@ -6870,11 +7151,16 @@ static int ironlake_get_refclk(struct drm_crtc *crtc)
6870 int num_connectors = 0; 7151 int num_connectors = 0;
6871 bool is_lvds = false; 7152 bool is_lvds = false;
6872 7153
6873 for_each_encoder_on_crtc(dev, crtc, encoder) { 7154 for_each_intel_encoder(dev, encoder) {
7155 if (encoder->new_crtc != to_intel_crtc(crtc))
7156 continue;
7157
6874 switch (encoder->type) { 7158 switch (encoder->type) {
6875 case INTEL_OUTPUT_LVDS: 7159 case INTEL_OUTPUT_LVDS:
6876 is_lvds = true; 7160 is_lvds = true;
6877 break; 7161 break;
7162 default:
7163 break;
6878 } 7164 }
6879 num_connectors++; 7165 num_connectors++;
6880 } 7166 }
@@ -7019,7 +7305,7 @@ static void haswell_set_pipeconf(struct drm_crtc *crtc)
7019 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT); 7305 I915_WRITE(GAMMA_MODE(intel_crtc->pipe), GAMMA_MODE_MODE_8BIT);
7020 POSTING_READ(GAMMA_MODE(intel_crtc->pipe)); 7306 POSTING_READ(GAMMA_MODE(intel_crtc->pipe));
7021 7307
7022 if (IS_BROADWELL(dev)) { 7308 if (IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
7023 val = 0; 7309 val = 0;
7024 7310
7025 switch (intel_crtc->config.pipe_bpp) { 7311 switch (intel_crtc->config.pipe_bpp) {
@@ -7054,18 +7340,12 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7054{ 7340{
7055 struct drm_device *dev = crtc->dev; 7341 struct drm_device *dev = crtc->dev;
7056 struct drm_i915_private *dev_priv = dev->dev_private; 7342 struct drm_i915_private *dev_priv = dev->dev_private;
7057 struct intel_encoder *intel_encoder; 7343 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7058 int refclk; 7344 int refclk;
7059 const intel_limit_t *limit; 7345 const intel_limit_t *limit;
7060 bool ret, is_lvds = false; 7346 bool ret, is_lvds = false;
7061 7347
7062 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 7348 is_lvds = intel_pipe_will_have_type(intel_crtc, INTEL_OUTPUT_LVDS);
7063 switch (intel_encoder->type) {
7064 case INTEL_OUTPUT_LVDS:
7065 is_lvds = true;
7066 break;
7067 }
7068 }
7069 7349
7070 refclk = ironlake_get_refclk(crtc); 7350 refclk = ironlake_get_refclk(crtc);
7071 7351
@@ -7074,9 +7354,9 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7074 * refclk, or FALSE. The returned values represent the clock equation: 7354 * refclk, or FALSE. The returned values represent the clock equation:
7075 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 7355 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
7076 */ 7356 */
7077 limit = intel_limit(crtc, refclk); 7357 limit = intel_limit(intel_crtc, refclk);
7078 ret = dev_priv->display.find_dpll(limit, crtc, 7358 ret = dev_priv->display.find_dpll(limit, intel_crtc,
7079 to_intel_crtc(crtc)->config.port_clock, 7359 intel_crtc->new_config->port_clock,
7080 refclk, NULL, clock); 7360 refclk, NULL, clock);
7081 if (!ret) 7361 if (!ret)
7082 return false; 7362 return false;
@@ -7089,7 +7369,7 @@ static bool ironlake_compute_clocks(struct drm_crtc *crtc,
7089 * downclock feature. 7369 * downclock feature.
7090 */ 7370 */
7091 *has_reduced_clock = 7371 *has_reduced_clock =
7092 dev_priv->display.find_dpll(limit, crtc, 7372 dev_priv->display.find_dpll(limit, intel_crtc,
7093 dev_priv->lvds_downclock, 7373 dev_priv->lvds_downclock,
7094 refclk, clock, 7374 refclk, clock,
7095 reduced_clock); 7375 reduced_clock);
@@ -7126,7 +7406,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7126 int factor, num_connectors = 0; 7406 int factor, num_connectors = 0;
7127 bool is_lvds = false, is_sdvo = false; 7407 bool is_lvds = false, is_sdvo = false;
7128 7408
7129 for_each_encoder_on_crtc(dev, crtc, intel_encoder) { 7409 for_each_intel_encoder(dev, intel_encoder) {
7410 if (intel_encoder->new_crtc != to_intel_crtc(crtc))
7411 continue;
7412
7130 switch (intel_encoder->type) { 7413 switch (intel_encoder->type) {
7131 case INTEL_OUTPUT_LVDS: 7414 case INTEL_OUTPUT_LVDS:
7132 is_lvds = true; 7415 is_lvds = true;
@@ -7135,6 +7418,8 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7135 case INTEL_OUTPUT_HDMI: 7418 case INTEL_OUTPUT_HDMI:
7136 is_sdvo = true; 7419 is_sdvo = true;
7137 break; 7420 break;
7421 default:
7422 break;
7138 } 7423 }
7139 7424
7140 num_connectors++; 7425 num_connectors++;
@@ -7147,10 +7432,10 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7147 dev_priv->vbt.lvds_ssc_freq == 100000) || 7432 dev_priv->vbt.lvds_ssc_freq == 100000) ||
7148 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev))) 7433 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
7149 factor = 25; 7434 factor = 25;
7150 } else if (intel_crtc->config.sdvo_tv_clock) 7435 } else if (intel_crtc->new_config->sdvo_tv_clock)
7151 factor = 20; 7436 factor = 20;
7152 7437
7153 if (ironlake_needs_fb_cb_tune(&intel_crtc->config.dpll, factor)) 7438 if (ironlake_needs_fb_cb_tune(&intel_crtc->new_config->dpll, factor))
7154 *fp |= FP_CB_TUNE; 7439 *fp |= FP_CB_TUNE;
7155 7440
7156 if (fp2 && (reduced_clock->m < factor * reduced_clock->n)) 7441 if (fp2 && (reduced_clock->m < factor * reduced_clock->n))
@@ -7163,20 +7448,20 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7163 else 7448 else
7164 dpll |= DPLLB_MODE_DAC_SERIAL; 7449 dpll |= DPLLB_MODE_DAC_SERIAL;
7165 7450
7166 dpll |= (intel_crtc->config.pixel_multiplier - 1) 7451 dpll |= (intel_crtc->new_config->pixel_multiplier - 1)
7167 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; 7452 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
7168 7453
7169 if (is_sdvo) 7454 if (is_sdvo)
7170 dpll |= DPLL_SDVO_HIGH_SPEED; 7455 dpll |= DPLL_SDVO_HIGH_SPEED;
7171 if (intel_crtc->config.has_dp_encoder) 7456 if (intel_crtc->new_config->has_dp_encoder)
7172 dpll |= DPLL_SDVO_HIGH_SPEED; 7457 dpll |= DPLL_SDVO_HIGH_SPEED;
7173 7458
7174 /* compute bitmask from p1 value */ 7459 /* compute bitmask from p1 value */
7175 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 7460 dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7176 /* also FPA1 */ 7461 /* also FPA1 */
7177 dpll |= (1 << (intel_crtc->config.dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; 7462 dpll |= (1 << (intel_crtc->new_config->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7178 7463
7179 switch (intel_crtc->config.dpll.p2) { 7464 switch (intel_crtc->new_config->dpll.p2) {
7180 case 5: 7465 case 5:
7181 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 7466 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7182 break; 7467 break;
@@ -7199,78 +7484,64 @@ static uint32_t ironlake_compute_dpll(struct intel_crtc *intel_crtc,
7199 return dpll | DPLL_VCO_ENABLE; 7484 return dpll | DPLL_VCO_ENABLE;
7200} 7485}
7201 7486
7202static int ironlake_crtc_mode_set(struct drm_crtc *crtc, 7487static int ironlake_crtc_compute_clock(struct intel_crtc *crtc)
7203 int x, int y,
7204 struct drm_framebuffer *fb)
7205{ 7488{
7206 struct drm_device *dev = crtc->dev; 7489 struct drm_device *dev = crtc->base.dev;
7207 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
7208 int num_connectors = 0;
7209 intel_clock_t clock, reduced_clock; 7490 intel_clock_t clock, reduced_clock;
7210 u32 dpll = 0, fp = 0, fp2 = 0; 7491 u32 dpll = 0, fp = 0, fp2 = 0;
7211 bool ok, has_reduced_clock = false; 7492 bool ok, has_reduced_clock = false;
7212 bool is_lvds = false; 7493 bool is_lvds = false;
7213 struct intel_encoder *encoder;
7214 struct intel_shared_dpll *pll; 7494 struct intel_shared_dpll *pll;
7215 7495
7216 for_each_encoder_on_crtc(dev, crtc, encoder) { 7496 is_lvds = intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS);
7217 switch (encoder->type) {
7218 case INTEL_OUTPUT_LVDS:
7219 is_lvds = true;
7220 break;
7221 }
7222
7223 num_connectors++;
7224 }
7225 7497
7226 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)), 7498 WARN(!(HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)),
7227 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev)); 7499 "Unexpected PCH type %d\n", INTEL_PCH_TYPE(dev));
7228 7500
7229 ok = ironlake_compute_clocks(crtc, &clock, 7501 ok = ironlake_compute_clocks(&crtc->base, &clock,
7230 &has_reduced_clock, &reduced_clock); 7502 &has_reduced_clock, &reduced_clock);
7231 if (!ok && !intel_crtc->config.clock_set) { 7503 if (!ok && !crtc->new_config->clock_set) {
7232 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 7504 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7233 return -EINVAL; 7505 return -EINVAL;
7234 } 7506 }
7235 /* Compat-code for transition, will disappear. */ 7507 /* Compat-code for transition, will disappear. */
7236 if (!intel_crtc->config.clock_set) { 7508 if (!crtc->new_config->clock_set) {
7237 intel_crtc->config.dpll.n = clock.n; 7509 crtc->new_config->dpll.n = clock.n;
7238 intel_crtc->config.dpll.m1 = clock.m1; 7510 crtc->new_config->dpll.m1 = clock.m1;
7239 intel_crtc->config.dpll.m2 = clock.m2; 7511 crtc->new_config->dpll.m2 = clock.m2;
7240 intel_crtc->config.dpll.p1 = clock.p1; 7512 crtc->new_config->dpll.p1 = clock.p1;
7241 intel_crtc->config.dpll.p2 = clock.p2; 7513 crtc->new_config->dpll.p2 = clock.p2;
7242 } 7514 }
7243 7515
7244 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */ 7516 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
7245 if (intel_crtc->config.has_pch_encoder) { 7517 if (crtc->new_config->has_pch_encoder) {
7246 fp = i9xx_dpll_compute_fp(&intel_crtc->config.dpll); 7518 fp = i9xx_dpll_compute_fp(&crtc->new_config->dpll);
7247 if (has_reduced_clock) 7519 if (has_reduced_clock)
7248 fp2 = i9xx_dpll_compute_fp(&reduced_clock); 7520 fp2 = i9xx_dpll_compute_fp(&reduced_clock);
7249 7521
7250 dpll = ironlake_compute_dpll(intel_crtc, 7522 dpll = ironlake_compute_dpll(crtc,
7251 &fp, &reduced_clock, 7523 &fp, &reduced_clock,
7252 has_reduced_clock ? &fp2 : NULL); 7524 has_reduced_clock ? &fp2 : NULL);
7253 7525
7254 intel_crtc->config.dpll_hw_state.dpll = dpll; 7526 crtc->new_config->dpll_hw_state.dpll = dpll;
7255 intel_crtc->config.dpll_hw_state.fp0 = fp; 7527 crtc->new_config->dpll_hw_state.fp0 = fp;
7256 if (has_reduced_clock) 7528 if (has_reduced_clock)
7257 intel_crtc->config.dpll_hw_state.fp1 = fp2; 7529 crtc->new_config->dpll_hw_state.fp1 = fp2;
7258 else 7530 else
7259 intel_crtc->config.dpll_hw_state.fp1 = fp; 7531 crtc->new_config->dpll_hw_state.fp1 = fp;
7260 7532
7261 pll = intel_get_shared_dpll(intel_crtc); 7533 pll = intel_get_shared_dpll(crtc);
7262 if (pll == NULL) { 7534 if (pll == NULL) {
7263 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n", 7535 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
7264 pipe_name(intel_crtc->pipe)); 7536 pipe_name(crtc->pipe));
7265 return -EINVAL; 7537 return -EINVAL;
7266 } 7538 }
7267 } else 7539 }
7268 intel_put_shared_dpll(intel_crtc);
7269 7540
7270 if (is_lvds && has_reduced_clock && i915.powersave) 7541 if (is_lvds && has_reduced_clock && i915.powersave)
7271 intel_crtc->lowfreq_avail = true; 7542 crtc->lowfreq_avail = true;
7272 else 7543 else
7273 intel_crtc->lowfreq_avail = false; 7544 crtc->lowfreq_avail = false;
7274 7545
7275 return 0; 7546 return 0;
7276} 7547}
@@ -7351,6 +7622,22 @@ static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
7351 &pipe_config->fdi_m_n, NULL); 7622 &pipe_config->fdi_m_n, NULL);
7352} 7623}
7353 7624
7625static void skylake_get_pfit_config(struct intel_crtc *crtc,
7626 struct intel_crtc_config *pipe_config)
7627{
7628 struct drm_device *dev = crtc->base.dev;
7629 struct drm_i915_private *dev_priv = dev->dev_private;
7630 uint32_t tmp;
7631
7632 tmp = I915_READ(PS_CTL(crtc->pipe));
7633
7634 if (tmp & PS_ENABLE) {
7635 pipe_config->pch_pfit.enabled = true;
7636 pipe_config->pch_pfit.pos = I915_READ(PS_WIN_POS(crtc->pipe));
7637 pipe_config->pch_pfit.size = I915_READ(PS_WIN_SZ(crtc->pipe));
7638 }
7639}
7640
7354static void ironlake_get_pfit_config(struct intel_crtc *crtc, 7641static void ironlake_get_pfit_config(struct intel_crtc *crtc,
7355 struct intel_crtc_config *pipe_config) 7642 struct intel_crtc_config *pipe_config)
7356{ 7643{
@@ -7442,8 +7729,8 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
7442 struct drm_i915_private *dev_priv = dev->dev_private; 7729 struct drm_i915_private *dev_priv = dev->dev_private;
7443 uint32_t tmp; 7730 uint32_t tmp;
7444 7731
7445 if (!intel_display_power_enabled(dev_priv, 7732 if (!intel_display_power_is_enabled(dev_priv,
7446 POWER_DOMAIN_PIPE(crtc->pipe))) 7733 POWER_DOMAIN_PIPE(crtc->pipe)))
7447 return false; 7734 return false;
7448 7735
7449 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; 7736 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
@@ -7636,7 +7923,6 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
7636static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) 7923static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7637{ 7924{
7638 uint32_t val; 7925 uint32_t val;
7639 unsigned long irqflags;
7640 7926
7641 val = I915_READ(LCPLL_CTL); 7927 val = I915_READ(LCPLL_CTL);
7642 7928
@@ -7656,10 +7942,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7656 * to call special forcewake code that doesn't touch runtime PM and 7942 * to call special forcewake code that doesn't touch runtime PM and
7657 * doesn't enable the forcewake delayed work. 7943 * doesn't enable the forcewake delayed work.
7658 */ 7944 */
7659 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 7945 spin_lock_irq(&dev_priv->uncore.lock);
7660 if (dev_priv->uncore.forcewake_count++ == 0) 7946 if (dev_priv->uncore.forcewake_count++ == 0)
7661 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL); 7947 dev_priv->uncore.funcs.force_wake_get(dev_priv, FORCEWAKE_ALL);
7662 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 7948 spin_unlock_irq(&dev_priv->uncore.lock);
7663 7949
7664 if (val & LCPLL_POWER_DOWN_ALLOW) { 7950 if (val & LCPLL_POWER_DOWN_ALLOW) {
7665 val &= ~LCPLL_POWER_DOWN_ALLOW; 7951 val &= ~LCPLL_POWER_DOWN_ALLOW;
@@ -7690,10 +7976,10 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
7690 } 7976 }
7691 7977
7692 /* See the big comment above. */ 7978 /* See the big comment above. */
7693 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); 7979 spin_lock_irq(&dev_priv->uncore.lock);
7694 if (--dev_priv->uncore.forcewake_count == 0) 7980 if (--dev_priv->uncore.forcewake_count == 0)
7695 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL); 7981 dev_priv->uncore.funcs.force_wake_put(dev_priv, FORCEWAKE_ALL);
7696 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 7982 spin_unlock_irq(&dev_priv->uncore.lock);
7697} 7983}
7698 7984
7699/* 7985/*
@@ -7755,28 +8041,36 @@ void hsw_disable_pc8(struct drm_i915_private *dev_priv)
7755 intel_prepare_ddi(dev); 8041 intel_prepare_ddi(dev);
7756} 8042}
7757 8043
7758static void snb_modeset_global_resources(struct drm_device *dev) 8044static int haswell_crtc_compute_clock(struct intel_crtc *crtc)
7759{ 8045{
7760 modeset_update_crtc_power_domains(dev); 8046 if (!intel_ddi_pll_select(crtc))
7761} 8047 return -EINVAL;
7762 8048
7763static void haswell_modeset_global_resources(struct drm_device *dev) 8049 crtc->lowfreq_avail = false;
7764{ 8050
7765 modeset_update_crtc_power_domains(dev); 8051 return 0;
7766} 8052}
7767 8053
7768static int haswell_crtc_mode_set(struct drm_crtc *crtc, 8054static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
7769 int x, int y, 8055 enum port port,
7770 struct drm_framebuffer *fb) 8056 struct intel_crtc_config *pipe_config)
7771{ 8057{
7772 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8058 u32 temp;
7773
7774 if (!intel_ddi_pll_select(intel_crtc))
7775 return -EINVAL;
7776 8059
7777 intel_crtc->lowfreq_avail = false; 8060 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
8061 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
7778 8062
7779 return 0; 8063 switch (pipe_config->ddi_pll_sel) {
8064 case SKL_DPLL1:
8065 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL1;
8066 break;
8067 case SKL_DPLL2:
8068 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL2;
8069 break;
8070 case SKL_DPLL3:
8071 pipe_config->shared_dpll = DPLL_ID_SKL_DPLL3;
8072 break;
8073 }
7780} 8074}
7781 8075
7782static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv, 8076static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
@@ -7808,7 +8102,10 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7808 8102
7809 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT; 8103 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
7810 8104
7811 haswell_get_ddi_pll(dev_priv, port, pipe_config); 8105 if (IS_SKYLAKE(dev))
8106 skylake_get_ddi_pll(dev_priv, port, pipe_config);
8107 else
8108 haswell_get_ddi_pll(dev_priv, port, pipe_config);
7812 8109
7813 if (pipe_config->shared_dpll >= 0) { 8110 if (pipe_config->shared_dpll >= 0) {
7814 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll]; 8111 pll = &dev_priv->shared_dplls[pipe_config->shared_dpll];
@@ -7822,7 +8119,8 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
7822 * DDI E. So just check whether this pipe is wired to DDI E and whether 8119 * DDI E. So just check whether this pipe is wired to DDI E and whether
7823 * the PCH transcoder is on. 8120 * the PCH transcoder is on.
7824 */ 8121 */
7825 if ((port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) { 8122 if (INTEL_INFO(dev)->gen < 9 &&
8123 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
7826 pipe_config->has_pch_encoder = true; 8124 pipe_config->has_pch_encoder = true;
7827 8125
7828 tmp = I915_READ(FDI_RX_CTL(PIPE_A)); 8126 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
@@ -7841,7 +8139,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7841 enum intel_display_power_domain pfit_domain; 8139 enum intel_display_power_domain pfit_domain;
7842 uint32_t tmp; 8140 uint32_t tmp;
7843 8141
7844 if (!intel_display_power_enabled(dev_priv, 8142 if (!intel_display_power_is_enabled(dev_priv,
7845 POWER_DOMAIN_PIPE(crtc->pipe))) 8143 POWER_DOMAIN_PIPE(crtc->pipe)))
7846 return false; 8144 return false;
7847 8145
@@ -7870,7 +8168,7 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7870 pipe_config->cpu_transcoder = TRANSCODER_EDP; 8168 pipe_config->cpu_transcoder = TRANSCODER_EDP;
7871 } 8169 }
7872 8170
7873 if (!intel_display_power_enabled(dev_priv, 8171 if (!intel_display_power_is_enabled(dev_priv,
7874 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder))) 8172 POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
7875 return false; 8173 return false;
7876 8174
@@ -7883,8 +8181,12 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7883 intel_get_pipe_timings(crtc, pipe_config); 8181 intel_get_pipe_timings(crtc, pipe_config);
7884 8182
7885 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 8183 pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
7886 if (intel_display_power_enabled(dev_priv, pfit_domain)) 8184 if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
7887 ironlake_get_pfit_config(crtc, pipe_config); 8185 if (IS_SKYLAKE(dev))
8186 skylake_get_pfit_config(crtc, pipe_config);
8187 else
8188 ironlake_get_pfit_config(crtc, pipe_config);
8189 }
7888 8190
7889 if (IS_HASWELL(dev)) 8191 if (IS_HASWELL(dev))
7890 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) && 8192 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
@@ -7900,314 +8202,6 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc,
7900 return true; 8202 return true;
7901} 8203}
7902 8204
7903static struct {
7904 int clock;
7905 u32 config;
7906} hdmi_audio_clock[] = {
7907 { DIV_ROUND_UP(25200 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_25175 },
7908 { 25200, AUD_CONFIG_PIXEL_CLOCK_HDMI_25200 }, /* default per bspec */
7909 { 27000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27000 },
7910 { 27000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_27027 },
7911 { 54000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54000 },
7912 { 54000 * 1001 / 1000, AUD_CONFIG_PIXEL_CLOCK_HDMI_54054 },
7913 { DIV_ROUND_UP(74250 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_74176 },
7914 { 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
7915 { DIV_ROUND_UP(148500 * 1000, 1001), AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
7916 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
7917};
7918
7919/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
7920static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode)
7921{
7922 int i;
7923
7924 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) {
7925 if (mode->clock == hdmi_audio_clock[i].clock)
7926 break;
7927 }
7928
7929 if (i == ARRAY_SIZE(hdmi_audio_clock)) {
7930 DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock);
7931 i = 1;
7932 }
7933
7934 DRM_DEBUG_KMS("Configuring HDMI audio for pixel clock %d (0x%08x)\n",
7935 hdmi_audio_clock[i].clock,
7936 hdmi_audio_clock[i].config);
7937
7938 return hdmi_audio_clock[i].config;
7939}
7940
7941static bool intel_eld_uptodate(struct drm_connector *connector,
7942 int reg_eldv, uint32_t bits_eldv,
7943 int reg_elda, uint32_t bits_elda,
7944 int reg_edid)
7945{
7946 struct drm_i915_private *dev_priv = connector->dev->dev_private;
7947 uint8_t *eld = connector->eld;
7948 uint32_t i;
7949
7950 i = I915_READ(reg_eldv);
7951 i &= bits_eldv;
7952
7953 if (!eld[0])
7954 return !i;
7955
7956 if (!i)
7957 return false;
7958
7959 i = I915_READ(reg_elda);
7960 i &= ~bits_elda;
7961 I915_WRITE(reg_elda, i);
7962
7963 for (i = 0; i < eld[2]; i++)
7964 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
7965 return false;
7966
7967 return true;
7968}
7969
7970static void g4x_write_eld(struct drm_connector *connector,
7971 struct drm_crtc *crtc,
7972 struct drm_display_mode *mode)
7973{
7974 struct drm_i915_private *dev_priv = connector->dev->dev_private;
7975 uint8_t *eld = connector->eld;
7976 uint32_t eldv;
7977 uint32_t len;
7978 uint32_t i;
7979
7980 i = I915_READ(G4X_AUD_VID_DID);
7981
7982 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
7983 eldv = G4X_ELDV_DEVCL_DEVBLC;
7984 else
7985 eldv = G4X_ELDV_DEVCTG;
7986
7987 if (intel_eld_uptodate(connector,
7988 G4X_AUD_CNTL_ST, eldv,
7989 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
7990 G4X_HDMIW_HDMIEDID))
7991 return;
7992
7993 i = I915_READ(G4X_AUD_CNTL_ST);
7994 i &= ~(eldv | G4X_ELD_ADDR);
7995 len = (i >> 9) & 0x1f; /* ELD buffer size */
7996 I915_WRITE(G4X_AUD_CNTL_ST, i);
7997
7998 if (!eld[0])
7999 return;
8000
8001 len = min_t(uint8_t, eld[2], len);
8002 DRM_DEBUG_DRIVER("ELD size %d\n", len);
8003 for (i = 0; i < len; i++)
8004 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
8005
8006 i = I915_READ(G4X_AUD_CNTL_ST);
8007 i |= eldv;
8008 I915_WRITE(G4X_AUD_CNTL_ST, i);
8009}
8010
8011static void haswell_write_eld(struct drm_connector *connector,
8012 struct drm_crtc *crtc,
8013 struct drm_display_mode *mode)
8014{
8015 struct drm_i915_private *dev_priv = connector->dev->dev_private;
8016 uint8_t *eld = connector->eld;
8017 uint32_t eldv;
8018 uint32_t i;
8019 int len;
8020 int pipe = to_intel_crtc(crtc)->pipe;
8021 int tmp;
8022
8023 int hdmiw_hdmiedid = HSW_AUD_EDID_DATA(pipe);
8024 int aud_cntl_st = HSW_AUD_DIP_ELD_CTRL(pipe);
8025 int aud_config = HSW_AUD_CFG(pipe);
8026 int aud_cntrl_st2 = HSW_AUD_PIN_ELD_CP_VLD;
8027
8028 /* Audio output enable */
8029 DRM_DEBUG_DRIVER("HDMI audio: enable codec\n");
8030 tmp = I915_READ(aud_cntrl_st2);
8031 tmp |= (AUDIO_OUTPUT_ENABLE_A << (pipe * 4));
8032 I915_WRITE(aud_cntrl_st2, tmp);
8033 POSTING_READ(aud_cntrl_st2);
8034
8035 assert_pipe_disabled(dev_priv, to_intel_crtc(crtc)->pipe);
8036
8037 /* Set ELD valid state */
8038 tmp = I915_READ(aud_cntrl_st2);
8039 DRM_DEBUG_DRIVER("HDMI audio: pin eld vld status=0x%08x\n", tmp);
8040 tmp |= (AUDIO_ELD_VALID_A << (pipe * 4));
8041 I915_WRITE(aud_cntrl_st2, tmp);
8042 tmp = I915_READ(aud_cntrl_st2);
8043 DRM_DEBUG_DRIVER("HDMI audio: eld vld status=0x%08x\n", tmp);
8044
8045 /* Enable HDMI mode */
8046 tmp = I915_READ(aud_config);
8047 DRM_DEBUG_DRIVER("HDMI audio: audio conf: 0x%08x\n", tmp);
8048 /* clear N_programing_enable and N_value_index */
8049 tmp &= ~(AUD_CONFIG_N_VALUE_INDEX | AUD_CONFIG_N_PROG_ENABLE);
8050 I915_WRITE(aud_config, tmp);
8051
8052 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
8053
8054 eldv = AUDIO_ELD_VALID_A << (pipe * 4);
8055
8056 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
8057 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
8058 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
8059 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
8060 } else {
8061 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
8062 }
8063
8064 if (intel_eld_uptodate(connector,
8065 aud_cntrl_st2, eldv,
8066 aud_cntl_st, IBX_ELD_ADDRESS,
8067 hdmiw_hdmiedid))
8068 return;
8069
8070 i = I915_READ(aud_cntrl_st2);
8071 i &= ~eldv;
8072 I915_WRITE(aud_cntrl_st2, i);
8073
8074 if (!eld[0])
8075 return;
8076
8077 i = I915_READ(aud_cntl_st);
8078 i &= ~IBX_ELD_ADDRESS;
8079 I915_WRITE(aud_cntl_st, i);
8080 i = (i >> 29) & DIP_PORT_SEL_MASK; /* DIP_Port_Select, 0x1 = PortB */
8081 DRM_DEBUG_DRIVER("port num:%d\n", i);
8082
8083 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
8084 DRM_DEBUG_DRIVER("ELD size %d\n", len);
8085 for (i = 0; i < len; i++)
8086 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
8087
8088 i = I915_READ(aud_cntrl_st2);
8089 i |= eldv;
8090 I915_WRITE(aud_cntrl_st2, i);
8091
8092}
8093
8094static void ironlake_write_eld(struct drm_connector *connector,
8095 struct drm_crtc *crtc,
8096 struct drm_display_mode *mode)
8097{
8098 struct drm_i915_private *dev_priv = connector->dev->dev_private;
8099 uint8_t *eld = connector->eld;
8100 uint32_t eldv;
8101 uint32_t i;
8102 int len;
8103 int hdmiw_hdmiedid;
8104 int aud_config;
8105 int aud_cntl_st;
8106 int aud_cntrl_st2;
8107 int pipe = to_intel_crtc(crtc)->pipe;
8108
8109 if (HAS_PCH_IBX(connector->dev)) {
8110 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID(pipe);
8111 aud_config = IBX_AUD_CFG(pipe);
8112 aud_cntl_st = IBX_AUD_CNTL_ST(pipe);
8113 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
8114 } else if (IS_VALLEYVIEW(connector->dev)) {
8115 hdmiw_hdmiedid = VLV_HDMIW_HDMIEDID(pipe);
8116 aud_config = VLV_AUD_CFG(pipe);
8117 aud_cntl_st = VLV_AUD_CNTL_ST(pipe);
8118 aud_cntrl_st2 = VLV_AUD_CNTL_ST2;
8119 } else {
8120 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID(pipe);
8121 aud_config = CPT_AUD_CFG(pipe);
8122 aud_cntl_st = CPT_AUD_CNTL_ST(pipe);
8123 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
8124 }
8125
8126 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
8127
8128 if (IS_VALLEYVIEW(connector->dev)) {
8129 struct intel_encoder *intel_encoder;
8130 struct intel_digital_port *intel_dig_port;
8131
8132 intel_encoder = intel_attached_encoder(connector);
8133 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
8134 i = intel_dig_port->port;
8135 } else {
8136 i = I915_READ(aud_cntl_st);
8137 i = (i >> 29) & DIP_PORT_SEL_MASK;
8138 /* DIP_Port_Select, 0x1 = PortB */
8139 }
8140
8141 if (!i) {
8142 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
8143 /* operate blindly on all ports */
8144 eldv = IBX_ELD_VALIDB;
8145 eldv |= IBX_ELD_VALIDB << 4;
8146 eldv |= IBX_ELD_VALIDB << 8;
8147 } else {
8148 DRM_DEBUG_DRIVER("ELD on port %c\n", port_name(i));
8149 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
8150 }
8151
8152 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
8153 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
8154 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
8155 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
8156 } else {
8157 I915_WRITE(aud_config, audio_config_hdmi_pixel_clock(mode));
8158 }
8159
8160 if (intel_eld_uptodate(connector,
8161 aud_cntrl_st2, eldv,
8162 aud_cntl_st, IBX_ELD_ADDRESS,
8163 hdmiw_hdmiedid))
8164 return;
8165
8166 i = I915_READ(aud_cntrl_st2);
8167 i &= ~eldv;
8168 I915_WRITE(aud_cntrl_st2, i);
8169
8170 if (!eld[0])
8171 return;
8172
8173 i = I915_READ(aud_cntl_st);
8174 i &= ~IBX_ELD_ADDRESS;
8175 I915_WRITE(aud_cntl_st, i);
8176
8177 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
8178 DRM_DEBUG_DRIVER("ELD size %d\n", len);
8179 for (i = 0; i < len; i++)
8180 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
8181
8182 i = I915_READ(aud_cntrl_st2);
8183 i |= eldv;
8184 I915_WRITE(aud_cntrl_st2, i);
8185}
8186
8187void intel_write_eld(struct drm_encoder *encoder,
8188 struct drm_display_mode *mode)
8189{
8190 struct drm_crtc *crtc = encoder->crtc;
8191 struct drm_connector *connector;
8192 struct drm_device *dev = encoder->dev;
8193 struct drm_i915_private *dev_priv = dev->dev_private;
8194
8195 connector = drm_select_eld(encoder, mode);
8196 if (!connector)
8197 return;
8198
8199 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
8200 connector->base.id,
8201 connector->name,
8202 connector->encoder->base.id,
8203 connector->encoder->name);
8204
8205 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
8206
8207 if (dev_priv->display.write_eld)
8208 dev_priv->display.write_eld(connector, crtc, mode);
8209}
8210
8211static void i845_update_cursor(struct drm_crtc *crtc, u32 base) 8205static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8212{ 8206{
8213 struct drm_device *dev = crtc->dev; 8207 struct drm_device *dev = crtc->dev;
@@ -8253,8 +8247,10 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
8253 intel_crtc->cursor_cntl = 0; 8247 intel_crtc->cursor_cntl = 0;
8254 } 8248 }
8255 8249
8256 if (intel_crtc->cursor_base != base) 8250 if (intel_crtc->cursor_base != base) {
8257 I915_WRITE(_CURABASE, base); 8251 I915_WRITE(_CURABASE, base);
8252 intel_crtc->cursor_base = base;
8253 }
8258 8254
8259 if (intel_crtc->cursor_size != size) { 8255 if (intel_crtc->cursor_size != size) {
8260 I915_WRITE(CURSIZE, size); 8256 I915_WRITE(CURSIZE, size);
@@ -8294,9 +8290,13 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8294 return; 8290 return;
8295 } 8291 }
8296 cntl |= pipe << 28; /* Connect to correct pipe */ 8292 cntl |= pipe << 28; /* Connect to correct pipe */
8293
8294 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
8295 cntl |= CURSOR_PIPE_CSC_ENABLE;
8297 } 8296 }
8298 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 8297
8299 cntl |= CURSOR_PIPE_CSC_ENABLE; 8298 if (to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180))
8299 cntl |= CURSOR_ROTATE_180;
8300 8300
8301 if (intel_crtc->cursor_cntl != cntl) { 8301 if (intel_crtc->cursor_cntl != cntl) {
8302 I915_WRITE(CURCNTR(pipe), cntl); 8302 I915_WRITE(CURCNTR(pipe), cntl);
@@ -8307,6 +8307,8 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
8307 /* and commit changes on next vblank */ 8307 /* and commit changes on next vblank */
8308 I915_WRITE(CURBASE(pipe), base); 8308 I915_WRITE(CURBASE(pipe), base);
8309 POSTING_READ(CURBASE(pipe)); 8309 POSTING_READ(CURBASE(pipe));
8310
8311 intel_crtc->cursor_base = base;
8310} 8312}
8311 8313
8312/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ 8314/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
@@ -8353,11 +8355,17 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
8353 8355
8354 I915_WRITE(CURPOS(pipe), pos); 8356 I915_WRITE(CURPOS(pipe), pos);
8355 8357
8358 /* ILK+ do this automagically */
8359 if (HAS_GMCH_DISPLAY(dev) &&
8360 to_intel_plane(crtc->cursor)->rotation == BIT(DRM_ROTATE_180)) {
8361 base += (intel_crtc->cursor_height *
8362 intel_crtc->cursor_width - 1) * 4;
8363 }
8364
8356 if (IS_845G(dev) || IS_I865G(dev)) 8365 if (IS_845G(dev) || IS_I865G(dev))
8357 i845_update_cursor(crtc, base); 8366 i845_update_cursor(crtc, base);
8358 else 8367 else
8359 i9xx_update_cursor(crtc, base); 8368 i9xx_update_cursor(crtc, base);
8360 intel_crtc->cursor_base = base;
8361} 8369}
8362 8370
8363static bool cursor_size_ok(struct drm_device *dev, 8371static bool cursor_size_ok(struct drm_device *dev,
@@ -8397,22 +8405,15 @@ static bool cursor_size_ok(struct drm_device *dev,
8397 return true; 8405 return true;
8398} 8406}
8399 8407
8400/*
8401 * intel_crtc_cursor_set_obj - Set cursor to specified GEM object
8402 *
8403 * Note that the object's reference will be consumed if the update fails. If
8404 * the update succeeds, the reference of the old object (if any) will be
8405 * consumed.
8406 */
8407static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc, 8408static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8408 struct drm_i915_gem_object *obj, 8409 struct drm_i915_gem_object *obj,
8409 uint32_t width, uint32_t height) 8410 uint32_t width, uint32_t height)
8410{ 8411{
8411 struct drm_device *dev = crtc->dev; 8412 struct drm_device *dev = crtc->dev;
8412 struct drm_i915_private *dev_priv = dev->dev_private; 8413 struct drm_i915_private *dev_priv = to_i915(dev);
8413 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8414 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8414 enum pipe pipe = intel_crtc->pipe; 8415 enum pipe pipe = intel_crtc->pipe;
8415 unsigned old_width, stride; 8416 unsigned old_width;
8416 uint32_t addr; 8417 uint32_t addr;
8417 int ret; 8418 int ret;
8418 8419
@@ -8424,30 +8425,11 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8424 goto finish; 8425 goto finish;
8425 } 8426 }
8426 8427
8427 /* Check for which cursor types we support */
8428 if (!cursor_size_ok(dev, width, height)) {
8429 DRM_DEBUG("Cursor dimension not supported\n");
8430 return -EINVAL;
8431 }
8432
8433 stride = roundup_pow_of_two(width) * 4;
8434 if (obj->base.size < stride * height) {
8435 DRM_DEBUG_KMS("buffer is too small\n");
8436 ret = -ENOMEM;
8437 goto fail;
8438 }
8439
8440 /* we only need to pin inside GTT if cursor is non-phy */ 8428 /* we only need to pin inside GTT if cursor is non-phy */
8441 mutex_lock(&dev->struct_mutex); 8429 mutex_lock(&dev->struct_mutex);
8442 if (!INTEL_INFO(dev)->cursor_needs_physical) { 8430 if (!INTEL_INFO(dev)->cursor_needs_physical) {
8443 unsigned alignment; 8431 unsigned alignment;
8444 8432
8445 if (obj->tiling_mode) {
8446 DRM_DEBUG_KMS("cursor cannot be tiled\n");
8447 ret = -EINVAL;
8448 goto fail_locked;
8449 }
8450
8451 /* 8433 /*
8452 * Global gtt pte registers are special registers which actually 8434 * Global gtt pte registers are special registers which actually
8453 * forward writes to a chunk of system memory. Which means that 8435 * forward writes to a chunk of system memory. Which means that
@@ -8514,17 +8496,15 @@ static int intel_crtc_cursor_set_obj(struct drm_crtc *crtc,
8514 if (old_width != width) 8496 if (old_width != width)
8515 intel_update_watermarks(crtc); 8497 intel_update_watermarks(crtc);
8516 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL); 8498 intel_crtc_update_cursor(crtc, intel_crtc->cursor_bo != NULL);
8517 }
8518 8499
8519 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe)); 8500 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_CURSOR(pipe));
8501 }
8520 8502
8521 return 0; 8503 return 0;
8522fail_unpin: 8504fail_unpin:
8523 i915_gem_object_unpin_from_display_plane(obj); 8505 i915_gem_object_unpin_from_display_plane(obj);
8524fail_locked: 8506fail_locked:
8525 mutex_unlock(&dev->struct_mutex); 8507 mutex_unlock(&dev->struct_mutex);
8526fail:
8527 drm_gem_object_unreference_unlocked(&obj->base);
8528 return ret; 8508 return ret;
8529} 8509}
8530 8510
@@ -8559,7 +8539,7 @@ __intel_framebuffer_create(struct drm_device *dev,
8559 8539
8560 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); 8540 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8561 if (!intel_fb) { 8541 if (!intel_fb) {
8562 drm_gem_object_unreference_unlocked(&obj->base); 8542 drm_gem_object_unreference(&obj->base);
8563 return ERR_PTR(-ENOMEM); 8543 return ERR_PTR(-ENOMEM);
8564 } 8544 }
8565 8545
@@ -8569,7 +8549,7 @@ __intel_framebuffer_create(struct drm_device *dev,
8569 8549
8570 return &intel_fb->base; 8550 return &intel_fb->base;
8571err: 8551err:
8572 drm_gem_object_unreference_unlocked(&obj->base); 8552 drm_gem_object_unreference(&obj->base);
8573 kfree(intel_fb); 8553 kfree(intel_fb);
8574 8554
8575 return ERR_PTR(ret); 8555 return ERR_PTR(ret);
@@ -8702,6 +8682,9 @@ retry:
8702 ret = drm_modeset_lock(&crtc->mutex, ctx); 8682 ret = drm_modeset_lock(&crtc->mutex, ctx);
8703 if (ret) 8683 if (ret)
8704 goto fail_unlock; 8684 goto fail_unlock;
8685 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8686 if (ret)
8687 goto fail_unlock;
8705 8688
8706 old->dpms_mode = connector->dpms; 8689 old->dpms_mode = connector->dpms;
8707 old->load_detect_temp = false; 8690 old->load_detect_temp = false;
@@ -8739,6 +8722,9 @@ retry:
8739 ret = drm_modeset_lock(&crtc->mutex, ctx); 8722 ret = drm_modeset_lock(&crtc->mutex, ctx);
8740 if (ret) 8723 if (ret)
8741 goto fail_unlock; 8724 goto fail_unlock;
8725 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
8726 if (ret)
8727 goto fail_unlock;
8742 intel_encoder->new_crtc = to_intel_crtc(crtc); 8728 intel_encoder->new_crtc = to_intel_crtc(crtc);
8743 to_intel_connector(connector)->new_encoder = intel_encoder; 8729 to_intel_connector(connector)->new_encoder = intel_encoder;
8744 8730
@@ -9021,35 +9007,6 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
9021 return mode; 9007 return mode;
9022} 9008}
9023 9009
9024static void intel_increase_pllclock(struct drm_device *dev,
9025 enum pipe pipe)
9026{
9027 struct drm_i915_private *dev_priv = dev->dev_private;
9028 int dpll_reg = DPLL(pipe);
9029 int dpll;
9030
9031 if (!HAS_GMCH_DISPLAY(dev))
9032 return;
9033
9034 if (!dev_priv->lvds_downclock_avail)
9035 return;
9036
9037 dpll = I915_READ(dpll_reg);
9038 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
9039 DRM_DEBUG_DRIVER("upclocking LVDS\n");
9040
9041 assert_panel_unlocked(dev_priv, pipe);
9042
9043 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
9044 I915_WRITE(dpll_reg, dpll);
9045 intel_wait_for_vblank(dev, pipe);
9046
9047 dpll = I915_READ(dpll_reg);
9048 if (dpll & DISPLAY_RATE_SELECT_FPA1)
9049 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
9050 }
9051}
9052
9053static void intel_decrease_pllclock(struct drm_crtc *crtc) 9010static void intel_decrease_pllclock(struct drm_crtc *crtc)
9054{ 9011{
9055 struct drm_device *dev = crtc->dev; 9012 struct drm_device *dev = crtc->dev;
@@ -9125,199 +9082,16 @@ out:
9125 intel_runtime_pm_put(dev_priv); 9082 intel_runtime_pm_put(dev_priv);
9126} 9083}
9127 9084
9128
9129/**
9130 * intel_mark_fb_busy - mark given planes as busy
9131 * @dev: DRM device
9132 * @frontbuffer_bits: bits for the affected planes
9133 * @ring: optional ring for asynchronous commands
9134 *
9135 * This function gets called every time the screen contents change. It can be
9136 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
9137 */
9138static void intel_mark_fb_busy(struct drm_device *dev,
9139 unsigned frontbuffer_bits,
9140 struct intel_engine_cs *ring)
9141{
9142 struct drm_i915_private *dev_priv = dev->dev_private;
9143 enum pipe pipe;
9144
9145 if (!i915.powersave)
9146 return;
9147
9148 for_each_pipe(dev_priv, pipe) {
9149 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
9150 continue;
9151
9152 intel_increase_pllclock(dev, pipe);
9153 if (ring && intel_fbc_enabled(dev))
9154 ring->fbc_dirty = true;
9155 }
9156}
9157
9158/**
9159 * intel_fb_obj_invalidate - invalidate frontbuffer object
9160 * @obj: GEM object to invalidate
9161 * @ring: set for asynchronous rendering
9162 *
9163 * This function gets called every time rendering on the given object starts and
9164 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
9165 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
9166 * until the rendering completes or a flip on this frontbuffer plane is
9167 * scheduled.
9168 */
9169void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
9170 struct intel_engine_cs *ring)
9171{
9172 struct drm_device *dev = obj->base.dev;
9173 struct drm_i915_private *dev_priv = dev->dev_private;
9174
9175 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
9176
9177 if (!obj->frontbuffer_bits)
9178 return;
9179
9180 if (ring) {
9181 mutex_lock(&dev_priv->fb_tracking.lock);
9182 dev_priv->fb_tracking.busy_bits
9183 |= obj->frontbuffer_bits;
9184 dev_priv->fb_tracking.flip_bits
9185 &= ~obj->frontbuffer_bits;
9186 mutex_unlock(&dev_priv->fb_tracking.lock);
9187 }
9188
9189 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
9190
9191 intel_edp_psr_invalidate(dev, obj->frontbuffer_bits);
9192}
9193
9194/**
9195 * intel_frontbuffer_flush - flush frontbuffer
9196 * @dev: DRM device
9197 * @frontbuffer_bits: frontbuffer plane tracking bits
9198 *
9199 * This function gets called every time rendering on the given planes has
9200 * completed and frontbuffer caching can be started again. Flushes will get
9201 * delayed if they're blocked by some oustanding asynchronous rendering.
9202 *
9203 * Can be called without any locks held.
9204 */
9205void intel_frontbuffer_flush(struct drm_device *dev,
9206 unsigned frontbuffer_bits)
9207{
9208 struct drm_i915_private *dev_priv = dev->dev_private;
9209
9210 /* Delay flushing when rings are still busy.*/
9211 mutex_lock(&dev_priv->fb_tracking.lock);
9212 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
9213 mutex_unlock(&dev_priv->fb_tracking.lock);
9214
9215 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
9216
9217 intel_edp_psr_flush(dev, frontbuffer_bits);
9218
9219 /*
9220 * FIXME: Unconditional fbc flushing here is a rather gross hack and
9221 * needs to be reworked into a proper frontbuffer tracking scheme like
9222 * psr employs.
9223 */
9224 if (IS_BROADWELL(dev))
9225 gen8_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
9226}
9227
9228/**
9229 * intel_fb_obj_flush - flush frontbuffer object
9230 * @obj: GEM object to flush
9231 * @retire: set when retiring asynchronous rendering
9232 *
9233 * This function gets called every time rendering on the given object has
9234 * completed and frontbuffer caching can be started again. If @retire is true
9235 * then any delayed flushes will be unblocked.
9236 */
9237void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
9238 bool retire)
9239{
9240 struct drm_device *dev = obj->base.dev;
9241 struct drm_i915_private *dev_priv = dev->dev_private;
9242 unsigned frontbuffer_bits;
9243
9244 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
9245
9246 if (!obj->frontbuffer_bits)
9247 return;
9248
9249 frontbuffer_bits = obj->frontbuffer_bits;
9250
9251 if (retire) {
9252 mutex_lock(&dev_priv->fb_tracking.lock);
9253 /* Filter out new bits since rendering started. */
9254 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
9255
9256 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
9257 mutex_unlock(&dev_priv->fb_tracking.lock);
9258 }
9259
9260 intel_frontbuffer_flush(dev, frontbuffer_bits);
9261}
9262
9263/**
9264 * intel_frontbuffer_flip_prepare - prepare asnychronous frontbuffer flip
9265 * @dev: DRM device
9266 * @frontbuffer_bits: frontbuffer plane tracking bits
9267 *
9268 * This function gets called after scheduling a flip on @obj. The actual
9269 * frontbuffer flushing will be delayed until completion is signalled with
9270 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
9271 * flush will be cancelled.
9272 *
9273 * Can be called without any locks held.
9274 */
9275void intel_frontbuffer_flip_prepare(struct drm_device *dev,
9276 unsigned frontbuffer_bits)
9277{
9278 struct drm_i915_private *dev_priv = dev->dev_private;
9279
9280 mutex_lock(&dev_priv->fb_tracking.lock);
9281 dev_priv->fb_tracking.flip_bits
9282 |= frontbuffer_bits;
9283 mutex_unlock(&dev_priv->fb_tracking.lock);
9284}
9285
9286/**
9287 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flush
9288 * @dev: DRM device
9289 * @frontbuffer_bits: frontbuffer plane tracking bits
9290 *
9291 * This function gets called after the flip has been latched and will complete
9292 * on the next vblank. It will execute the fush if it hasn't been cancalled yet.
9293 *
9294 * Can be called without any locks held.
9295 */
9296void intel_frontbuffer_flip_complete(struct drm_device *dev,
9297 unsigned frontbuffer_bits)
9298{
9299 struct drm_i915_private *dev_priv = dev->dev_private;
9300
9301 mutex_lock(&dev_priv->fb_tracking.lock);
9302 /* Mask any cancelled flips. */
9303 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
9304 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
9305 mutex_unlock(&dev_priv->fb_tracking.lock);
9306
9307 intel_frontbuffer_flush(dev, frontbuffer_bits);
9308}
9309
9310static void intel_crtc_destroy(struct drm_crtc *crtc) 9085static void intel_crtc_destroy(struct drm_crtc *crtc)
9311{ 9086{
9312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9087 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9313 struct drm_device *dev = crtc->dev; 9088 struct drm_device *dev = crtc->dev;
9314 struct intel_unpin_work *work; 9089 struct intel_unpin_work *work;
9315 unsigned long flags;
9316 9090
9317 spin_lock_irqsave(&dev->event_lock, flags); 9091 spin_lock_irq(&dev->event_lock);
9318 work = intel_crtc->unpin_work; 9092 work = intel_crtc->unpin_work;
9319 intel_crtc->unpin_work = NULL; 9093 intel_crtc->unpin_work = NULL;
9320 spin_unlock_irqrestore(&dev->event_lock, flags); 9094 spin_unlock_irq(&dev->event_lock);
9321 9095
9322 if (work) { 9096 if (work) {
9323 cancel_work_sync(&work->work); 9097 cancel_work_sync(&work->work);
@@ -9363,6 +9137,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
9363 if (intel_crtc == NULL) 9137 if (intel_crtc == NULL)
9364 return; 9138 return;
9365 9139
9140 /*
9141 * This is called both by irq handlers and the reset code (to complete
9142 * lost pageflips) so needs the full irqsave spinlocks.
9143 */
9366 spin_lock_irqsave(&dev->event_lock, flags); 9144 spin_lock_irqsave(&dev->event_lock, flags);
9367 work = intel_crtc->unpin_work; 9145 work = intel_crtc->unpin_work;
9368 9146
@@ -9448,7 +9226,12 @@ void intel_prepare_page_flip(struct drm_device *dev, int plane)
9448 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); 9226 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
9449 unsigned long flags; 9227 unsigned long flags;
9450 9228
9451 /* NB: An MMIO update of the plane base pointer will also 9229
9230 /*
9231 * This is called both by irq handlers and the reset code (to complete
9232 * lost pageflips) so needs the full irqsave spinlocks.
9233 *
9234 * NB: An MMIO update of the plane base pointer will also
9452 * generate a page-flip completion irq, i.e. every modeset 9235 * generate a page-flip completion irq, i.e. every modeset
9453 * is also accompanied by a spurious intel_prepare_page_flip(). 9236 * is also accompanied by a spurious intel_prepare_page_flip().
9454 */ 9237 */
@@ -9738,115 +9521,128 @@ static void intel_do_mmio_flip(struct intel_crtc *intel_crtc)
9738 struct intel_framebuffer *intel_fb = 9521 struct intel_framebuffer *intel_fb =
9739 to_intel_framebuffer(intel_crtc->base.primary->fb); 9522 to_intel_framebuffer(intel_crtc->base.primary->fb);
9740 struct drm_i915_gem_object *obj = intel_fb->obj; 9523 struct drm_i915_gem_object *obj = intel_fb->obj;
9524 bool atomic_update;
9525 u32 start_vbl_count;
9741 u32 dspcntr; 9526 u32 dspcntr;
9742 u32 reg; 9527 u32 reg;
9743 9528
9744 intel_mark_page_flip_active(intel_crtc); 9529 intel_mark_page_flip_active(intel_crtc);
9745 9530
9531 atomic_update = intel_pipe_update_start(intel_crtc, &start_vbl_count);
9532
9746 reg = DSPCNTR(intel_crtc->plane); 9533 reg = DSPCNTR(intel_crtc->plane);
9747 dspcntr = I915_READ(reg); 9534 dspcntr = I915_READ(reg);
9748 9535
9749 if (INTEL_INFO(dev)->gen >= 4) { 9536 if (obj->tiling_mode != I915_TILING_NONE)
9750 if (obj->tiling_mode != I915_TILING_NONE) 9537 dspcntr |= DISPPLANE_TILED;
9751 dspcntr |= DISPPLANE_TILED; 9538 else
9752 else 9539 dspcntr &= ~DISPPLANE_TILED;
9753 dspcntr &= ~DISPPLANE_TILED; 9540
9754 }
9755 I915_WRITE(reg, dspcntr); 9541 I915_WRITE(reg, dspcntr);
9756 9542
9757 I915_WRITE(DSPSURF(intel_crtc->plane), 9543 I915_WRITE(DSPSURF(intel_crtc->plane),
9758 intel_crtc->unpin_work->gtt_offset); 9544 intel_crtc->unpin_work->gtt_offset);
9759 POSTING_READ(DSPSURF(intel_crtc->plane)); 9545 POSTING_READ(DSPSURF(intel_crtc->plane));
9546
9547 if (atomic_update)
9548 intel_pipe_update_end(intel_crtc, start_vbl_count);
9760} 9549}
9761 9550
9762static int intel_postpone_flip(struct drm_i915_gem_object *obj) 9551static void intel_mmio_flip_work_func(struct work_struct *work)
9763{ 9552{
9553 struct intel_crtc *intel_crtc =
9554 container_of(work, struct intel_crtc, mmio_flip.work);
9764 struct intel_engine_cs *ring; 9555 struct intel_engine_cs *ring;
9765 int ret; 9556 uint32_t seqno;
9766 9557
9767 lockdep_assert_held(&obj->base.dev->struct_mutex); 9558 seqno = intel_crtc->mmio_flip.seqno;
9559 ring = intel_crtc->mmio_flip.ring;
9768 9560
9769 if (!obj->last_write_seqno) 9561 if (seqno)
9770 return 0; 9562 WARN_ON(__i915_wait_seqno(ring, seqno,
9563 intel_crtc->reset_counter,
9564 false, NULL, NULL) != 0);
9771 9565
9772 ring = obj->ring; 9566 intel_do_mmio_flip(intel_crtc);
9773
9774 if (i915_seqno_passed(ring->get_seqno(ring, true),
9775 obj->last_write_seqno))
9776 return 0;
9777
9778 ret = i915_gem_check_olr(ring, obj->last_write_seqno);
9779 if (ret)
9780 return ret;
9781
9782 if (WARN_ON(!ring->irq_get(ring)))
9783 return 0;
9784
9785 return 1;
9786} 9567}
9787 9568
9788void intel_notify_mmio_flip(struct intel_engine_cs *ring) 9569static int intel_queue_mmio_flip(struct drm_device *dev,
9570 struct drm_crtc *crtc,
9571 struct drm_framebuffer *fb,
9572 struct drm_i915_gem_object *obj,
9573 struct intel_engine_cs *ring,
9574 uint32_t flags)
9789{ 9575{
9790 struct drm_i915_private *dev_priv = to_i915(ring->dev); 9576 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9791 struct intel_crtc *intel_crtc;
9792 unsigned long irq_flags;
9793 u32 seqno;
9794
9795 seqno = ring->get_seqno(ring, false);
9796
9797 spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags);
9798 for_each_intel_crtc(ring->dev, intel_crtc) {
9799 struct intel_mmio_flip *mmio_flip;
9800 9577
9801 mmio_flip = &intel_crtc->mmio_flip; 9578 intel_crtc->mmio_flip.seqno = obj->last_write_seqno;
9802 if (mmio_flip->seqno == 0) 9579 intel_crtc->mmio_flip.ring = obj->ring;
9803 continue;
9804 9580
9805 if (ring->id != mmio_flip->ring_id) 9581 schedule_work(&intel_crtc->mmio_flip.work);
9806 continue;
9807 9582
9808 if (i915_seqno_passed(seqno, mmio_flip->seqno)) { 9583 return 0;
9809 intel_do_mmio_flip(intel_crtc);
9810 mmio_flip->seqno = 0;
9811 ring->irq_put(ring);
9812 }
9813 }
9814 spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags);
9815} 9584}
9816 9585
9817static int intel_queue_mmio_flip(struct drm_device *dev, 9586static int intel_gen9_queue_flip(struct drm_device *dev,
9818 struct drm_crtc *crtc, 9587 struct drm_crtc *crtc,
9819 struct drm_framebuffer *fb, 9588 struct drm_framebuffer *fb,
9820 struct drm_i915_gem_object *obj, 9589 struct drm_i915_gem_object *obj,
9821 struct intel_engine_cs *ring, 9590 struct intel_engine_cs *ring,
9822 uint32_t flags) 9591 uint32_t flags)
9823{ 9592{
9824 struct drm_i915_private *dev_priv = dev->dev_private;
9825 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9593 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9826 unsigned long irq_flags; 9594 uint32_t plane = 0, stride;
9827 int ret; 9595 int ret;
9828 9596
9829 if (WARN_ON(intel_crtc->mmio_flip.seqno)) 9597 switch(intel_crtc->pipe) {
9830 return -EBUSY; 9598 case PIPE_A:
9599 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_A;
9600 break;
9601 case PIPE_B:
9602 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_B;
9603 break;
9604 case PIPE_C:
9605 plane = MI_DISPLAY_FLIP_SKL_PLANE_1_C;
9606 break;
9607 default:
9608 WARN_ONCE(1, "unknown plane in flip command\n");
9609 return -ENODEV;
9610 }
9831 9611
9832 ret = intel_postpone_flip(obj); 9612 switch (obj->tiling_mode) {
9833 if (ret < 0) 9613 case I915_TILING_NONE:
9834 return ret; 9614 stride = fb->pitches[0] >> 6;
9835 if (ret == 0) { 9615 break;
9836 intel_do_mmio_flip(intel_crtc); 9616 case I915_TILING_X:
9837 return 0; 9617 stride = fb->pitches[0] >> 9;
9618 break;
9619 default:
9620 WARN_ONCE(1, "unknown tiling in flip command\n");
9621 return -ENODEV;
9838 } 9622 }
9839 9623
9840 spin_lock_irqsave(&dev_priv->mmio_flip_lock, irq_flags); 9624 ret = intel_ring_begin(ring, 10);
9841 intel_crtc->mmio_flip.seqno = obj->last_write_seqno; 9625 if (ret)
9842 intel_crtc->mmio_flip.ring_id = obj->ring->id; 9626 return ret;
9843 spin_unlock_irqrestore(&dev_priv->mmio_flip_lock, irq_flags); 9627
9628 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
9629 intel_ring_emit(ring, DERRMR);
9630 intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
9631 DERRMR_PIPEB_PRI_FLIP_DONE |
9632 DERRMR_PIPEC_PRI_FLIP_DONE));
9633 intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8(1) |
9634 MI_SRM_LRM_GLOBAL_GTT);
9635 intel_ring_emit(ring, DERRMR);
9636 intel_ring_emit(ring, ring->scratch.gtt_offset + 256);
9637 intel_ring_emit(ring, 0);
9638
9639 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane);
9640 intel_ring_emit(ring, stride << 6 | obj->tiling_mode);
9641 intel_ring_emit(ring, intel_crtc->unpin_work->gtt_offset);
9642
9643 intel_mark_page_flip_active(intel_crtc);
9644 __intel_ring_advance(ring);
9844 9645
9845 /*
9846 * Double check to catch cases where irq fired before
9847 * mmio flip data was ready
9848 */
9849 intel_notify_mmio_flip(obj->ring);
9850 return 0; 9646 return 0;
9851} 9647}
9852 9648
@@ -9905,18 +9701,19 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
9905 struct drm_i915_private *dev_priv = dev->dev_private; 9701 struct drm_i915_private *dev_priv = dev->dev_private;
9906 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 9702 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
9907 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 9703 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9908 unsigned long flags; 9704
9705 WARN_ON(!in_irq());
9909 9706
9910 if (crtc == NULL) 9707 if (crtc == NULL)
9911 return; 9708 return;
9912 9709
9913 spin_lock_irqsave(&dev->event_lock, flags); 9710 spin_lock(&dev->event_lock);
9914 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) { 9711 if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
9915 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n", 9712 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
9916 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe)); 9713 intel_crtc->unpin_work->flip_queued_vblank, drm_vblank_count(dev, pipe));
9917 page_flip_completed(intel_crtc); 9714 page_flip_completed(intel_crtc);
9918 } 9715 }
9919 spin_unlock_irqrestore(&dev->event_lock, flags); 9716 spin_unlock(&dev->event_lock);
9920} 9717}
9921 9718
9922static int intel_crtc_page_flip(struct drm_crtc *crtc, 9719static int intel_crtc_page_flip(struct drm_crtc *crtc,
@@ -9932,7 +9729,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9932 enum pipe pipe = intel_crtc->pipe; 9729 enum pipe pipe = intel_crtc->pipe;
9933 struct intel_unpin_work *work; 9730 struct intel_unpin_work *work;
9934 struct intel_engine_cs *ring; 9731 struct intel_engine_cs *ring;
9935 unsigned long flags;
9936 int ret; 9732 int ret;
9937 9733
9938 /* 9734 /*
@@ -9973,7 +9769,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9973 goto free_work; 9769 goto free_work;
9974 9770
9975 /* We borrow the event spin lock for protecting unpin_work */ 9771 /* We borrow the event spin lock for protecting unpin_work */
9976 spin_lock_irqsave(&dev->event_lock, flags); 9772 spin_lock_irq(&dev->event_lock);
9977 if (intel_crtc->unpin_work) { 9773 if (intel_crtc->unpin_work) {
9978 /* Before declaring the flip queue wedged, check if 9774 /* Before declaring the flip queue wedged, check if
9979 * the hardware completed the operation behind our backs. 9775 * the hardware completed the operation behind our backs.
@@ -9983,7 +9779,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9983 page_flip_completed(intel_crtc); 9779 page_flip_completed(intel_crtc);
9984 } else { 9780 } else {
9985 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 9781 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
9986 spin_unlock_irqrestore(&dev->event_lock, flags); 9782 spin_unlock_irq(&dev->event_lock);
9987 9783
9988 drm_crtc_vblank_put(crtc); 9784 drm_crtc_vblank_put(crtc);
9989 kfree(work); 9785 kfree(work);
@@ -9991,7 +9787,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
9991 } 9787 }
9992 } 9788 }
9993 intel_crtc->unpin_work = work; 9789 intel_crtc->unpin_work = work;
9994 spin_unlock_irqrestore(&dev->event_lock, flags); 9790 spin_unlock_irq(&dev->event_lock);
9995 9791
9996 if (atomic_read(&intel_crtc->unpin_work_count) >= 2) 9792 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
9997 flush_workqueue(dev_priv->wq); 9793 flush_workqueue(dev_priv->wq);
@@ -10029,7 +9825,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
10029 ring = &dev_priv->ring[RCS]; 9825 ring = &dev_priv->ring[RCS];
10030 } 9826 }
10031 9827
10032 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 9828 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, ring);
10033 if (ret) 9829 if (ret)
10034 goto cleanup_pending; 9830 goto cleanup_pending;
10035 9831
@@ -10078,9 +9874,9 @@ cleanup_pending:
10078 mutex_unlock(&dev->struct_mutex); 9874 mutex_unlock(&dev->struct_mutex);
10079 9875
10080cleanup: 9876cleanup:
10081 spin_lock_irqsave(&dev->event_lock, flags); 9877 spin_lock_irq(&dev->event_lock);
10082 intel_crtc->unpin_work = NULL; 9878 intel_crtc->unpin_work = NULL;
10083 spin_unlock_irqrestore(&dev->event_lock, flags); 9879 spin_unlock_irq(&dev->event_lock);
10084 9880
10085 drm_crtc_vblank_put(crtc); 9881 drm_crtc_vblank_put(crtc);
10086free_work: 9882free_work:
@@ -10091,9 +9887,9 @@ out_hang:
10091 intel_crtc_wait_for_pending_flips(crtc); 9887 intel_crtc_wait_for_pending_flips(crtc);
10092 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb); 9888 ret = intel_pipe_set_base(crtc, crtc->x, crtc->y, fb);
10093 if (ret == 0 && event) { 9889 if (ret == 0 && event) {
10094 spin_lock_irqsave(&dev->event_lock, flags); 9890 spin_lock_irq(&dev->event_lock);
10095 drm_send_vblank_event(dev, pipe, event); 9891 drm_send_vblank_event(dev, pipe, event);
10096 spin_unlock_irqrestore(&dev->event_lock, flags); 9892 spin_unlock_irq(&dev->event_lock);
10097 } 9893 }
10098 } 9894 }
10099 return ret; 9895 return ret;
@@ -10289,6 +10085,10 @@ static void intel_dump_pipe_config(struct intel_crtc *crtc,
10289 pipe_config->dp_m2_n2.link_n, 10085 pipe_config->dp_m2_n2.link_n,
10290 pipe_config->dp_m2_n2.tu); 10086 pipe_config->dp_m2_n2.tu);
10291 10087
10088 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
10089 pipe_config->has_audio,
10090 pipe_config->has_infoframe);
10091
10292 DRM_DEBUG_KMS("requested mode:\n"); 10092 DRM_DEBUG_KMS("requested mode:\n");
10293 drm_mode_debug_printmodeline(&pipe_config->requested_mode); 10093 drm_mode_debug_printmodeline(&pipe_config->requested_mode);
10294 DRM_DEBUG_KMS("adjusted mode:\n"); 10094 DRM_DEBUG_KMS("adjusted mode:\n");
@@ -10350,6 +10150,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc)
10350 return true; 10150 return true;
10351} 10151}
10352 10152
10153static bool check_digital_port_conflicts(struct drm_device *dev)
10154{
10155 struct intel_connector *connector;
10156 unsigned int used_ports = 0;
10157
10158 /*
10159 * Walk the connector list instead of the encoder
10160 * list to detect the problem on ddi platforms
10161 * where there's just one encoder per digital port.
10162 */
10163 list_for_each_entry(connector,
10164 &dev->mode_config.connector_list, base.head) {
10165 struct intel_encoder *encoder = connector->new_encoder;
10166
10167 if (!encoder)
10168 continue;
10169
10170 WARN_ON(!encoder->new_crtc);
10171
10172 switch (encoder->type) {
10173 unsigned int port_mask;
10174 case INTEL_OUTPUT_UNKNOWN:
10175 if (WARN_ON(!HAS_DDI(dev)))
10176 break;
10177 case INTEL_OUTPUT_DISPLAYPORT:
10178 case INTEL_OUTPUT_HDMI:
10179 case INTEL_OUTPUT_EDP:
10180 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
10181
10182 /* the same port mustn't appear more than once */
10183 if (used_ports & port_mask)
10184 return false;
10185
10186 used_ports |= port_mask;
10187 default:
10188 break;
10189 }
10190 }
10191
10192 return true;
10193}
10194
10353static struct intel_crtc_config * 10195static struct intel_crtc_config *
10354intel_modeset_pipe_config(struct drm_crtc *crtc, 10196intel_modeset_pipe_config(struct drm_crtc *crtc,
10355 struct drm_framebuffer *fb, 10197 struct drm_framebuffer *fb,
@@ -10366,6 +10208,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc,
10366 return ERR_PTR(-EINVAL); 10208 return ERR_PTR(-EINVAL);
10367 } 10209 }
10368 10210
10211 if (!check_digital_port_conflicts(dev)) {
10212 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
10213 return ERR_PTR(-EINVAL);
10214 }
10215
10369 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); 10216 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10370 if (!pipe_config) 10217 if (!pipe_config)
10371 return ERR_PTR(-ENOMEM); 10218 return ERR_PTR(-ENOMEM);
@@ -10571,10 +10418,13 @@ static bool intel_crtc_in_use(struct drm_crtc *crtc)
10571static void 10418static void
10572intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes) 10419intel_modeset_update_state(struct drm_device *dev, unsigned prepare_pipes)
10573{ 10420{
10421 struct drm_i915_private *dev_priv = dev->dev_private;
10574 struct intel_encoder *intel_encoder; 10422 struct intel_encoder *intel_encoder;
10575 struct intel_crtc *intel_crtc; 10423 struct intel_crtc *intel_crtc;
10576 struct drm_connector *connector; 10424 struct drm_connector *connector;
10577 10425
10426 intel_shared_dpll_commit(dev_priv);
10427
10578 for_each_intel_encoder(dev, intel_encoder) { 10428 for_each_intel_encoder(dev, intel_encoder) {
10579 if (!intel_encoder->base.crtc) 10429 if (!intel_encoder->base.crtc)
10580 continue; 10430 continue;
@@ -10754,6 +10604,7 @@ intel_pipe_config_compare(struct drm_device *dev,
10754 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) || 10604 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
10755 IS_VALLEYVIEW(dev)) 10605 IS_VALLEYVIEW(dev))
10756 PIPE_CONF_CHECK_I(limited_color_range); 10606 PIPE_CONF_CHECK_I(limited_color_range);
10607 PIPE_CONF_CHECK_I(has_infoframe);
10757 10608
10758 PIPE_CONF_CHECK_I(has_audio); 10609 PIPE_CONF_CHECK_I(has_audio);
10759 10610
@@ -10810,6 +10661,9 @@ intel_pipe_config_compare(struct drm_device *dev,
10810 PIPE_CONF_CHECK_X(dpll_hw_state.fp0); 10661 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
10811 PIPE_CONF_CHECK_X(dpll_hw_state.fp1); 10662 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
10812 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll); 10663 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
10664 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
10665 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
10666 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
10813 10667
10814 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) 10668 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
10815 PIPE_CONF_CHECK_I(pipe_bpp); 10669 PIPE_CONF_CHECK_I(pipe_bpp);
@@ -10827,6 +10681,56 @@ intel_pipe_config_compare(struct drm_device *dev,
10827 return true; 10681 return true;
10828} 10682}
10829 10683
10684static void check_wm_state(struct drm_device *dev)
10685{
10686 struct drm_i915_private *dev_priv = dev->dev_private;
10687 struct skl_ddb_allocation hw_ddb, *sw_ddb;
10688 struct intel_crtc *intel_crtc;
10689 int plane;
10690
10691 if (INTEL_INFO(dev)->gen < 9)
10692 return;
10693
10694 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
10695 sw_ddb = &dev_priv->wm.skl_hw.ddb;
10696
10697 for_each_intel_crtc(dev, intel_crtc) {
10698 struct skl_ddb_entry *hw_entry, *sw_entry;
10699 const enum pipe pipe = intel_crtc->pipe;
10700
10701 if (!intel_crtc->active)
10702 continue;
10703
10704 /* planes */
10705 for_each_plane(pipe, plane) {
10706 hw_entry = &hw_ddb.plane[pipe][plane];
10707 sw_entry = &sw_ddb->plane[pipe][plane];
10708
10709 if (skl_ddb_entry_equal(hw_entry, sw_entry))
10710 continue;
10711
10712 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
10713 "(expected (%u,%u), found (%u,%u))\n",
10714 pipe_name(pipe), plane + 1,
10715 sw_entry->start, sw_entry->end,
10716 hw_entry->start, hw_entry->end);
10717 }
10718
10719 /* cursor */
10720 hw_entry = &hw_ddb.cursor[pipe];
10721 sw_entry = &sw_ddb->cursor[pipe];
10722
10723 if (skl_ddb_entry_equal(hw_entry, sw_entry))
10724 continue;
10725
10726 DRM_ERROR("mismatch in DDB state pipe %c cursor "
10727 "(expected (%u,%u), found (%u,%u))\n",
10728 pipe_name(pipe),
10729 sw_entry->start, sw_entry->end,
10730 hw_entry->start, hw_entry->end);
10731 }
10732}
10733
10830static void 10734static void
10831check_connector_state(struct drm_device *dev) 10735check_connector_state(struct drm_device *dev)
10832{ 10736{
@@ -10993,9 +10897,9 @@ check_shared_dpll_state(struct drm_device *dev)
10993 10897
10994 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state); 10898 active = pll->get_hw_state(dev_priv, pll, &dpll_hw_state);
10995 10899
10996 WARN(pll->active > pll->refcount, 10900 WARN(pll->active > hweight32(pll->config.crtc_mask),
10997 "more active pll users than references: %i vs %i\n", 10901 "more active pll users than references: %i vs %i\n",
10998 pll->active, pll->refcount); 10902 pll->active, hweight32(pll->config.crtc_mask));
10999 WARN(pll->active && !pll->on, 10903 WARN(pll->active && !pll->on,
11000 "pll in active use but not on in sw tracking\n"); 10904 "pll in active use but not on in sw tracking\n");
11001 WARN(pll->on && !pll->active, 10905 WARN(pll->on && !pll->active,
@@ -11013,11 +10917,11 @@ check_shared_dpll_state(struct drm_device *dev)
11013 WARN(pll->active != active_crtcs, 10917 WARN(pll->active != active_crtcs,
11014 "pll active crtcs mismatch (expected %i, found %i)\n", 10918 "pll active crtcs mismatch (expected %i, found %i)\n",
11015 pll->active, active_crtcs); 10919 pll->active, active_crtcs);
11016 WARN(pll->refcount != enabled_crtcs, 10920 WARN(hweight32(pll->config.crtc_mask) != enabled_crtcs,
11017 "pll enabled crtcs mismatch (expected %i, found %i)\n", 10921 "pll enabled crtcs mismatch (expected %i, found %i)\n",
11018 pll->refcount, enabled_crtcs); 10922 hweight32(pll->config.crtc_mask), enabled_crtcs);
11019 10923
11020 WARN(pll->on && memcmp(&pll->hw_state, &dpll_hw_state, 10924 WARN(pll->on && memcmp(&pll->config.hw_state, &dpll_hw_state,
11021 sizeof(dpll_hw_state)), 10925 sizeof(dpll_hw_state)),
11022 "pll hw state mismatch\n"); 10926 "pll hw state mismatch\n");
11023 } 10927 }
@@ -11026,6 +10930,7 @@ check_shared_dpll_state(struct drm_device *dev)
11026void 10930void
11027intel_modeset_check_state(struct drm_device *dev) 10931intel_modeset_check_state(struct drm_device *dev)
11028{ 10932{
10933 check_wm_state(dev);
11029 check_connector_state(dev); 10934 check_connector_state(dev);
11030 check_encoder_state(dev); 10935 check_encoder_state(dev);
11031 check_crtc_state(dev); 10936 check_crtc_state(dev);
@@ -11076,50 +10981,67 @@ static void update_scanline_offset(struct intel_crtc *crtc)
11076 10981
11077 crtc->scanline_offset = vtotal - 1; 10982 crtc->scanline_offset = vtotal - 1;
11078 } else if (HAS_DDI(dev) && 10983 } else if (HAS_DDI(dev) &&
11079 intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI)) { 10984 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
11080 crtc->scanline_offset = 2; 10985 crtc->scanline_offset = 2;
11081 } else 10986 } else
11082 crtc->scanline_offset = 1; 10987 crtc->scanline_offset = 1;
11083} 10988}
11084 10989
10990static struct intel_crtc_config *
10991intel_modeset_compute_config(struct drm_crtc *crtc,
10992 struct drm_display_mode *mode,
10993 struct drm_framebuffer *fb,
10994 unsigned *modeset_pipes,
10995 unsigned *prepare_pipes,
10996 unsigned *disable_pipes)
10997{
10998 struct intel_crtc_config *pipe_config = NULL;
10999
11000 intel_modeset_affected_pipes(crtc, modeset_pipes,
11001 prepare_pipes, disable_pipes);
11002
11003 if ((*modeset_pipes) == 0)
11004 goto out;
11005
11006 /*
11007 * Note this needs changes when we start tracking multiple modes
11008 * and crtcs. At that point we'll need to compute the whole config
11009 * (i.e. one pipe_config for each crtc) rather than just the one
11010 * for this crtc.
11011 */
11012 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
11013 if (IS_ERR(pipe_config)) {
11014 goto out;
11015 }
11016 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
11017 "[modeset]");
11018
11019out:
11020 return pipe_config;
11021}
11022
11085static int __intel_set_mode(struct drm_crtc *crtc, 11023static int __intel_set_mode(struct drm_crtc *crtc,
11086 struct drm_display_mode *mode, 11024 struct drm_display_mode *mode,
11087 int x, int y, struct drm_framebuffer *fb) 11025 int x, int y, struct drm_framebuffer *fb,
11026 struct intel_crtc_config *pipe_config,
11027 unsigned modeset_pipes,
11028 unsigned prepare_pipes,
11029 unsigned disable_pipes)
11088{ 11030{
11089 struct drm_device *dev = crtc->dev; 11031 struct drm_device *dev = crtc->dev;
11090 struct drm_i915_private *dev_priv = dev->dev_private; 11032 struct drm_i915_private *dev_priv = dev->dev_private;
11091 struct drm_display_mode *saved_mode; 11033 struct drm_display_mode *saved_mode;
11092 struct intel_crtc_config *pipe_config = NULL;
11093 struct intel_crtc *intel_crtc; 11034 struct intel_crtc *intel_crtc;
11094 unsigned disable_pipes, prepare_pipes, modeset_pipes;
11095 int ret = 0; 11035 int ret = 0;
11096 11036
11097 saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL); 11037 saved_mode = kmalloc(sizeof(*saved_mode), GFP_KERNEL);
11098 if (!saved_mode) 11038 if (!saved_mode)
11099 return -ENOMEM; 11039 return -ENOMEM;
11100 11040
11101 intel_modeset_affected_pipes(crtc, &modeset_pipes,
11102 &prepare_pipes, &disable_pipes);
11103
11104 *saved_mode = crtc->mode; 11041 *saved_mode = crtc->mode;
11105 11042
11106 /* Hack: Because we don't (yet) support global modeset on multiple 11043 if (modeset_pipes)
11107 * crtcs, we don't keep track of the new mode for more than one crtc.
11108 * Hence simply check whether any bit is set in modeset_pipes in all the
11109 * pieces of code that are not yet converted to deal with mutliple crtcs
11110 * changing their mode at the same time. */
11111 if (modeset_pipes) {
11112 pipe_config = intel_modeset_pipe_config(crtc, fb, mode);
11113 if (IS_ERR(pipe_config)) {
11114 ret = PTR_ERR(pipe_config);
11115 pipe_config = NULL;
11116
11117 goto out;
11118 }
11119 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
11120 "[modeset]");
11121 to_intel_crtc(crtc)->new_config = pipe_config; 11044 to_intel_crtc(crtc)->new_config = pipe_config;
11122 }
11123 11045
11124 /* 11046 /*
11125 * See if the config requires any additional preparation, e.g. 11047 * See if the config requires any additional preparation, e.g.
@@ -11135,6 +11057,22 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11135 prepare_pipes &= ~disable_pipes; 11057 prepare_pipes &= ~disable_pipes;
11136 } 11058 }
11137 11059
11060 if (dev_priv->display.crtc_compute_clock) {
11061 unsigned clear_pipes = modeset_pipes | disable_pipes;
11062
11063 ret = intel_shared_dpll_start_config(dev_priv, clear_pipes);
11064 if (ret)
11065 goto done;
11066
11067 for_each_intel_crtc_masked(dev, modeset_pipes, intel_crtc) {
11068 ret = dev_priv->display.crtc_compute_clock(intel_crtc);
11069 if (ret) {
11070 intel_shared_dpll_abort_config(dev_priv);
11071 goto done;
11072 }
11073 }
11074 }
11075
11138 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc) 11076 for_each_intel_crtc_masked(dev, disable_pipes, intel_crtc)
11139 intel_crtc_disable(&intel_crtc->base); 11077 intel_crtc_disable(&intel_crtc->base);
11140 11078
@@ -11145,6 +11083,10 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11145 11083
11146 /* crtc->mode is already used by the ->mode_set callbacks, hence we need 11084 /* crtc->mode is already used by the ->mode_set callbacks, hence we need
11147 * to set it here already despite that we pass it down the callchain. 11085 * to set it here already despite that we pass it down the callchain.
11086 *
11087 * Note we'll need to fix this up when we start tracking multiple
11088 * pipes; here we assume a single modeset_pipe and only track the
11089 * single crtc and mode.
11148 */ 11090 */
11149 if (modeset_pipes) { 11091 if (modeset_pipes) {
11150 crtc->mode = *mode; 11092 crtc->mode = *mode;
@@ -11166,8 +11108,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11166 * update the the output configuration. */ 11108 * update the the output configuration. */
11167 intel_modeset_update_state(dev, prepare_pipes); 11109 intel_modeset_update_state(dev, prepare_pipes);
11168 11110
11169 if (dev_priv->display.modeset_global_resources) 11111 modeset_update_crtc_power_domains(dev);
11170 dev_priv->display.modeset_global_resources(dev);
11171 11112
11172 /* Set up the DPLL and any encoders state that needs to adjust or depend 11113 /* Set up the DPLL and any encoders state that needs to adjust or depend
11173 * on the DPLL. 11114 * on the DPLL.
@@ -11178,9 +11119,7 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11178 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11119 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11179 11120
11180 mutex_lock(&dev->struct_mutex); 11121 mutex_lock(&dev->struct_mutex);
11181 ret = intel_pin_and_fence_fb_obj(dev, 11122 ret = intel_pin_and_fence_fb_obj(crtc->primary, fb, NULL);
11182 obj,
11183 NULL);
11184 if (ret != 0) { 11123 if (ret != 0) {
11185 DRM_ERROR("pin & fence failed\n"); 11124 DRM_ERROR("pin & fence failed\n");
11186 mutex_unlock(&dev->struct_mutex); 11125 mutex_unlock(&dev->struct_mutex);
@@ -11195,11 +11134,6 @@ static int __intel_set_mode(struct drm_crtc *crtc,
11195 crtc->primary->fb = fb; 11134 crtc->primary->fb = fb;
11196 crtc->x = x; 11135 crtc->x = x;
11197 crtc->y = y; 11136 crtc->y = y;
11198
11199 ret = dev_priv->display.crtc_mode_set(&intel_crtc->base,
11200 x, y, fb);
11201 if (ret)
11202 goto done;
11203 } 11137 }
11204 11138
11205 /* Now enable the clocks, plane, pipe, and connectors that we set up. */ 11139 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
@@ -11214,19 +11148,23 @@ done:
11214 if (ret && crtc->enabled) 11148 if (ret && crtc->enabled)
11215 crtc->mode = *saved_mode; 11149 crtc->mode = *saved_mode;
11216 11150
11217out:
11218 kfree(pipe_config); 11151 kfree(pipe_config);
11219 kfree(saved_mode); 11152 kfree(saved_mode);
11220 return ret; 11153 return ret;
11221} 11154}
11222 11155
11223static int intel_set_mode(struct drm_crtc *crtc, 11156static int intel_set_mode_pipes(struct drm_crtc *crtc,
11224 struct drm_display_mode *mode, 11157 struct drm_display_mode *mode,
11225 int x, int y, struct drm_framebuffer *fb) 11158 int x, int y, struct drm_framebuffer *fb,
11159 struct intel_crtc_config *pipe_config,
11160 unsigned modeset_pipes,
11161 unsigned prepare_pipes,
11162 unsigned disable_pipes)
11226{ 11163{
11227 int ret; 11164 int ret;
11228 11165
11229 ret = __intel_set_mode(crtc, mode, x, y, fb); 11166 ret = __intel_set_mode(crtc, mode, x, y, fb, pipe_config, modeset_pipes,
11167 prepare_pipes, disable_pipes);
11230 11168
11231 if (ret == 0) 11169 if (ret == 0)
11232 intel_modeset_check_state(crtc->dev); 11170 intel_modeset_check_state(crtc->dev);
@@ -11234,6 +11172,26 @@ static int intel_set_mode(struct drm_crtc *crtc,
11234 return ret; 11172 return ret;
11235} 11173}
11236 11174
11175static int intel_set_mode(struct drm_crtc *crtc,
11176 struct drm_display_mode *mode,
11177 int x, int y, struct drm_framebuffer *fb)
11178{
11179 struct intel_crtc_config *pipe_config;
11180 unsigned modeset_pipes, prepare_pipes, disable_pipes;
11181
11182 pipe_config = intel_modeset_compute_config(crtc, mode, fb,
11183 &modeset_pipes,
11184 &prepare_pipes,
11185 &disable_pipes);
11186
11187 if (IS_ERR(pipe_config))
11188 return PTR_ERR(pipe_config);
11189
11190 return intel_set_mode_pipes(crtc, mode, x, y, fb, pipe_config,
11191 modeset_pipes, prepare_pipes,
11192 disable_pipes);
11193}
11194
11237void intel_crtc_restore_mode(struct drm_crtc *crtc) 11195void intel_crtc_restore_mode(struct drm_crtc *crtc)
11238{ 11196{
11239 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb); 11197 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, crtc->primary->fb);
@@ -11562,6 +11520,8 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11562 struct drm_device *dev; 11520 struct drm_device *dev;
11563 struct drm_mode_set save_set; 11521 struct drm_mode_set save_set;
11564 struct intel_set_config *config; 11522 struct intel_set_config *config;
11523 struct intel_crtc_config *pipe_config;
11524 unsigned modeset_pipes, prepare_pipes, disable_pipes;
11565 int ret; 11525 int ret;
11566 11526
11567 BUG_ON(!set); 11527 BUG_ON(!set);
@@ -11607,9 +11567,38 @@ static int intel_crtc_set_config(struct drm_mode_set *set)
11607 if (ret) 11567 if (ret)
11608 goto fail; 11568 goto fail;
11609 11569
11570 pipe_config = intel_modeset_compute_config(set->crtc, set->mode,
11571 set->fb,
11572 &modeset_pipes,
11573 &prepare_pipes,
11574 &disable_pipes);
11575 if (IS_ERR(pipe_config)) {
11576 ret = PTR_ERR(pipe_config);
11577 goto fail;
11578 } else if (pipe_config) {
11579 if (pipe_config->has_audio !=
11580 to_intel_crtc(set->crtc)->config.has_audio)
11581 config->mode_changed = true;
11582
11583 /*
11584 * Note we have an issue here with infoframes: current code
11585 * only updates them on the full mode set path per hw
11586 * requirements. So here we should be checking for any
11587 * required changes and forcing a mode set.
11588 */
11589 }
11590
11591 /* set_mode will free it in the mode_changed case */
11592 if (!config->mode_changed)
11593 kfree(pipe_config);
11594
11595 intel_update_pipe_size(to_intel_crtc(set->crtc));
11596
11610 if (config->mode_changed) { 11597 if (config->mode_changed) {
11611 ret = intel_set_mode(set->crtc, set->mode, 11598 ret = intel_set_mode_pipes(set->crtc, set->mode,
11612 set->x, set->y, set->fb); 11599 set->x, set->y, set->fb, pipe_config,
11600 modeset_pipes, prepare_pipes,
11601 disable_pipes);
11613 } else if (config->fb_changed) { 11602 } else if (config->fb_changed) {
11614 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc); 11603 struct intel_crtc *intel_crtc = to_intel_crtc(set->crtc);
11615 11604
@@ -11679,7 +11668,7 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11679{ 11668{
11680 uint32_t val; 11669 uint32_t val;
11681 11670
11682 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_PLLS)) 11671 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
11683 return false; 11672 return false;
11684 11673
11685 val = I915_READ(PCH_DPLL(pll->id)); 11674 val = I915_READ(PCH_DPLL(pll->id));
@@ -11693,8 +11682,8 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
11693static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv, 11682static void ibx_pch_dpll_mode_set(struct drm_i915_private *dev_priv,
11694 struct intel_shared_dpll *pll) 11683 struct intel_shared_dpll *pll)
11695{ 11684{
11696 I915_WRITE(PCH_FP0(pll->id), pll->hw_state.fp0); 11685 I915_WRITE(PCH_FP0(pll->id), pll->config.hw_state.fp0);
11697 I915_WRITE(PCH_FP1(pll->id), pll->hw_state.fp1); 11686 I915_WRITE(PCH_FP1(pll->id), pll->config.hw_state.fp1);
11698} 11687}
11699 11688
11700static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv, 11689static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
@@ -11703,7 +11692,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11703 /* PCH refclock must be enabled first */ 11692 /* PCH refclock must be enabled first */
11704 ibx_assert_pch_refclk_enabled(dev_priv); 11693 ibx_assert_pch_refclk_enabled(dev_priv);
11705 11694
11706 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 11695 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
11707 11696
11708 /* Wait for the clocks to stabilize. */ 11697 /* Wait for the clocks to stabilize. */
11709 POSTING_READ(PCH_DPLL(pll->id)); 11698 POSTING_READ(PCH_DPLL(pll->id));
@@ -11714,7 +11703,7 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
11714 * 11703 *
11715 * So write it again. 11704 * So write it again.
11716 */ 11705 */
11717 I915_WRITE(PCH_DPLL(pll->id), pll->hw_state.dpll); 11706 I915_WRITE(PCH_DPLL(pll->id), pll->config.hw_state.dpll);
11718 POSTING_READ(PCH_DPLL(pll->id)); 11707 POSTING_READ(PCH_DPLL(pll->id));
11719 udelay(200); 11708 udelay(200);
11720} 11709}
@@ -11813,161 +11802,195 @@ disable_unpin:
11813} 11802}
11814 11803
11815static int 11804static int
11816intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc, 11805intel_check_primary_plane(struct drm_plane *plane,
11817 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 11806 struct intel_plane_state *state)
11818 unsigned int crtc_w, unsigned int crtc_h,
11819 uint32_t src_x, uint32_t src_y,
11820 uint32_t src_w, uint32_t src_h)
11821{ 11807{
11808 struct drm_crtc *crtc = state->crtc;
11809 struct drm_framebuffer *fb = state->fb;
11810 struct drm_rect *dest = &state->dst;
11811 struct drm_rect *src = &state->src;
11812 const struct drm_rect *clip = &state->clip;
11813
11814 return drm_plane_helper_check_update(plane, crtc, fb,
11815 src, dest, clip,
11816 DRM_PLANE_HELPER_NO_SCALING,
11817 DRM_PLANE_HELPER_NO_SCALING,
11818 false, true, &state->visible);
11819}
11820
11821static int
11822intel_prepare_primary_plane(struct drm_plane *plane,
11823 struct intel_plane_state *state)
11824{
11825 struct drm_crtc *crtc = state->crtc;
11826 struct drm_framebuffer *fb = state->fb;
11822 struct drm_device *dev = crtc->dev; 11827 struct drm_device *dev = crtc->dev;
11823 struct drm_i915_private *dev_priv = dev->dev_private;
11824 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 11828 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11829 enum pipe pipe = intel_crtc->pipe;
11825 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 11830 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11826 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb); 11831 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11827 struct drm_rect dest = {
11828 /* integer pixels */
11829 .x1 = crtc_x,
11830 .y1 = crtc_y,
11831 .x2 = crtc_x + crtc_w,
11832 .y2 = crtc_y + crtc_h,
11833 };
11834 struct drm_rect src = {
11835 /* 16.16 fixed point */
11836 .x1 = src_x,
11837 .y1 = src_y,
11838 .x2 = src_x + src_w,
11839 .y2 = src_y + src_h,
11840 };
11841 const struct drm_rect clip = {
11842 /* integer pixels */
11843 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
11844 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
11845 };
11846 const struct {
11847 int crtc_x, crtc_y;
11848 unsigned int crtc_w, crtc_h;
11849 uint32_t src_x, src_y, src_w, src_h;
11850 } orig = {
11851 .crtc_x = crtc_x,
11852 .crtc_y = crtc_y,
11853 .crtc_w = crtc_w,
11854 .crtc_h = crtc_h,
11855 .src_x = src_x,
11856 .src_y = src_y,
11857 .src_w = src_w,
11858 .src_h = src_h,
11859 };
11860 struct intel_plane *intel_plane = to_intel_plane(plane);
11861 bool visible;
11862 int ret; 11832 int ret;
11863 11833
11864 ret = drm_plane_helper_check_update(plane, crtc, fb, 11834 intel_crtc_wait_for_pending_flips(crtc);
11865 &src, &dest, &clip,
11866 DRM_PLANE_HELPER_NO_SCALING,
11867 DRM_PLANE_HELPER_NO_SCALING,
11868 false, true, &visible);
11869 11835
11870 if (ret) 11836 if (intel_crtc_has_pending_flip(crtc)) {
11871 return ret; 11837 DRM_ERROR("pipe is still busy with an old pageflip\n");
11838 return -EBUSY;
11839 }
11872 11840
11873 /* 11841 if (old_obj != obj) {
11874 * If the CRTC isn't enabled, we're just pinning the framebuffer,
11875 * updating the fb pointer, and returning without touching the
11876 * hardware. This allows us to later do a drmModeSetCrtc with fb=-1 to
11877 * turn on the display with all planes setup as desired.
11878 */
11879 if (!crtc->enabled) {
11880 mutex_lock(&dev->struct_mutex); 11842 mutex_lock(&dev->struct_mutex);
11881 11843 ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
11882 /* 11844 if (ret == 0)
11883 * If we already called setplane while the crtc was disabled, 11845 i915_gem_track_fb(old_obj, obj,
11884 * we may have an fb pinned; unpin it. 11846 INTEL_FRONTBUFFER_PRIMARY(pipe));
11885 */
11886 if (plane->fb)
11887 intel_unpin_fb_obj(old_obj);
11888
11889 i915_gem_track_fb(old_obj, obj,
11890 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe));
11891
11892 /* Pin and return without programming hardware */
11893 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
11894 mutex_unlock(&dev->struct_mutex); 11847 mutex_unlock(&dev->struct_mutex);
11895 11848 if (ret != 0) {
11896 return ret; 11849 DRM_DEBUG_KMS("pin & fence failed\n");
11850 return ret;
11851 }
11897 } 11852 }
11898 11853
11899 intel_crtc_wait_for_pending_flips(crtc); 11854 return 0;
11855}
11900 11856
11901 /* 11857static void
11902 * If clipping results in a non-visible primary plane, we'll disable 11858intel_commit_primary_plane(struct drm_plane *plane,
11903 * the primary plane. Note that this is a bit different than what 11859 struct intel_plane_state *state)
11904 * happens if userspace explicitly disables the plane by passing fb=0 11860{
11905 * because plane->fb still gets set and pinned. 11861 struct drm_crtc *crtc = state->crtc;
11906 */ 11862 struct drm_framebuffer *fb = state->fb;
11907 if (!visible) { 11863 struct drm_device *dev = crtc->dev;
11908 mutex_lock(&dev->struct_mutex); 11864 struct drm_i915_private *dev_priv = dev->dev_private;
11865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11866 enum pipe pipe = intel_crtc->pipe;
11867 struct drm_framebuffer *old_fb = plane->fb;
11868 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11869 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->fb);
11870 struct intel_plane *intel_plane = to_intel_plane(plane);
11871 struct drm_rect *src = &state->src;
11909 11872
11873 crtc->primary->fb = fb;
11874 crtc->x = src->x1 >> 16;
11875 crtc->y = src->y1 >> 16;
11876
11877 intel_plane->crtc_x = state->orig_dst.x1;
11878 intel_plane->crtc_y = state->orig_dst.y1;
11879 intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
11880 intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
11881 intel_plane->src_x = state->orig_src.x1;
11882 intel_plane->src_y = state->orig_src.y1;
11883 intel_plane->src_w = drm_rect_width(&state->orig_src);
11884 intel_plane->src_h = drm_rect_height(&state->orig_src);
11885 intel_plane->obj = obj;
11886
11887 if (intel_crtc->active) {
11910 /* 11888 /*
11911 * Try to pin the new fb first so that we can bail out if we 11889 * FBC does not work on some platforms for rotated
11912 * fail. 11890 * planes, so disable it when rotation is not 0 and
11891 * update it when rotation is set back to 0.
11892 *
11893 * FIXME: This is redundant with the fbc update done in
11894 * the primary plane enable function except that that
11895 * one is done too late. We eventually need to unify
11896 * this.
11913 */ 11897 */
11914 if (plane->fb != fb) { 11898 if (intel_crtc->primary_enabled &&
11915 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 11899 INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) &&
11916 if (ret) { 11900 dev_priv->fbc.plane == intel_crtc->plane &&
11917 mutex_unlock(&dev->struct_mutex); 11901 intel_plane->rotation != BIT(DRM_ROTATE_0)) {
11918 return ret; 11902 intel_disable_fbc(dev);
11919 }
11920 } 11903 }
11921 11904
11922 i915_gem_track_fb(old_obj, obj, 11905 if (state->visible) {
11923 INTEL_FRONTBUFFER_PRIMARY(intel_crtc->pipe)); 11906 bool was_enabled = intel_crtc->primary_enabled;
11924
11925 if (intel_crtc->primary_enabled)
11926 intel_disable_primary_hw_plane(plane, crtc);
11927 11907
11908 /* FIXME: kill this fastboot hack */
11909 intel_update_pipe_size(intel_crtc);
11928 11910
11929 if (plane->fb != fb) 11911 intel_crtc->primary_enabled = true;
11930 if (plane->fb)
11931 intel_unpin_fb_obj(old_obj);
11932 11912
11933 mutex_unlock(&dev->struct_mutex); 11913 dev_priv->display.update_primary_plane(crtc, plane->fb,
11914 crtc->x, crtc->y);
11934 11915
11935 } else {
11936 if (intel_crtc && intel_crtc->active &&
11937 intel_crtc->primary_enabled) {
11938 /* 11916 /*
11939 * FBC does not work on some platforms for rotated 11917 * BDW signals flip done immediately if the plane
11940 * planes, so disable it when rotation is not 0 and 11918 * is disabled, even if the plane enable is already
11941 * update it when rotation is set back to 0. 11919 * armed to occur at the next vblank :(
11942 *
11943 * FIXME: This is redundant with the fbc update done in
11944 * the primary plane enable function except that that
11945 * one is done too late. We eventually need to unify
11946 * this.
11947 */ 11920 */
11948 if (INTEL_INFO(dev)->gen <= 4 && !IS_G4X(dev) && 11921 if (IS_BROADWELL(dev) && !was_enabled)
11949 dev_priv->fbc.plane == intel_crtc->plane && 11922 intel_wait_for_vblank(dev, intel_crtc->pipe);
11950 intel_plane->rotation != BIT(DRM_ROTATE_0)) { 11923 } else {
11951 intel_disable_fbc(dev); 11924 /*
11952 } 11925 * If clipping results in a non-visible primary plane,
11926 * we'll disable the primary plane. Note that this is
11927 * a bit different than what happens if userspace
11928 * explicitly disables the plane by passing fb=0
11929 * because plane->fb still gets set and pinned.
11930 */
11931 intel_disable_primary_hw_plane(plane, crtc);
11953 } 11932 }
11954 ret = intel_pipe_set_base(crtc, src.x1, src.y1, fb);
11955 if (ret)
11956 return ret;
11957 11933
11958 if (!intel_crtc->primary_enabled) 11934 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_PRIMARY(pipe));
11959 intel_enable_primary_hw_plane(plane, crtc); 11935
11936 mutex_lock(&dev->struct_mutex);
11937 intel_update_fbc(dev);
11938 mutex_unlock(&dev->struct_mutex);
11960 } 11939 }
11961 11940
11962 intel_plane->crtc_x = orig.crtc_x; 11941 if (old_fb && old_fb != fb) {
11963 intel_plane->crtc_y = orig.crtc_y; 11942 if (intel_crtc->active)
11964 intel_plane->crtc_w = orig.crtc_w; 11943 intel_wait_for_vblank(dev, intel_crtc->pipe);
11965 intel_plane->crtc_h = orig.crtc_h; 11944
11966 intel_plane->src_x = orig.src_x; 11945 mutex_lock(&dev->struct_mutex);
11967 intel_plane->src_y = orig.src_y; 11946 intel_unpin_fb_obj(old_obj);
11968 intel_plane->src_w = orig.src_w; 11947 mutex_unlock(&dev->struct_mutex);
11969 intel_plane->src_h = orig.src_h; 11948 }
11970 intel_plane->obj = obj; 11949}
11950
11951static int
11952intel_primary_plane_setplane(struct drm_plane *plane, struct drm_crtc *crtc,
11953 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
11954 unsigned int crtc_w, unsigned int crtc_h,
11955 uint32_t src_x, uint32_t src_y,
11956 uint32_t src_w, uint32_t src_h)
11957{
11958 struct intel_plane_state state;
11959 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11960 int ret;
11961
11962 state.crtc = crtc;
11963 state.fb = fb;
11964
11965 /* sample coordinates in 16.16 fixed point */
11966 state.src.x1 = src_x;
11967 state.src.x2 = src_x + src_w;
11968 state.src.y1 = src_y;
11969 state.src.y2 = src_y + src_h;
11970
11971 /* integer pixels */
11972 state.dst.x1 = crtc_x;
11973 state.dst.x2 = crtc_x + crtc_w;
11974 state.dst.y1 = crtc_y;
11975 state.dst.y2 = crtc_y + crtc_h;
11976
11977 state.clip.x1 = 0;
11978 state.clip.y1 = 0;
11979 state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
11980 state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
11981
11982 state.orig_src = state.src;
11983 state.orig_dst = state.dst;
11984
11985 ret = intel_check_primary_plane(plane, &state);
11986 if (ret)
11987 return ret;
11988
11989 ret = intel_prepare_primary_plane(plane, &state);
11990 if (ret)
11991 return ret;
11992
11993 intel_commit_primary_plane(plane, &state);
11971 11994
11972 return 0; 11995 return 0;
11973} 11996}
@@ -12046,51 +12069,92 @@ intel_cursor_plane_disable(struct drm_plane *plane)
12046} 12069}
12047 12070
12048static int 12071static int
12049intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 12072intel_check_cursor_plane(struct drm_plane *plane,
12050 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 12073 struct intel_plane_state *state)
12051 unsigned int crtc_w, unsigned int crtc_h,
12052 uint32_t src_x, uint32_t src_y,
12053 uint32_t src_w, uint32_t src_h)
12054{ 12074{
12055 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 12075 struct drm_crtc *crtc = state->crtc;
12056 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 12076 struct drm_device *dev = crtc->dev;
12057 struct drm_i915_gem_object *obj = intel_fb->obj; 12077 struct drm_framebuffer *fb = state->fb;
12058 struct drm_rect dest = { 12078 struct drm_rect *dest = &state->dst;
12059 /* integer pixels */ 12079 struct drm_rect *src = &state->src;
12060 .x1 = crtc_x, 12080 const struct drm_rect *clip = &state->clip;
12061 .y1 = crtc_y, 12081 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
12062 .x2 = crtc_x + crtc_w, 12082 int crtc_w, crtc_h;
12063 .y2 = crtc_y + crtc_h, 12083 unsigned stride;
12064 };
12065 struct drm_rect src = {
12066 /* 16.16 fixed point */
12067 .x1 = src_x,
12068 .y1 = src_y,
12069 .x2 = src_x + src_w,
12070 .y2 = src_y + src_h,
12071 };
12072 const struct drm_rect clip = {
12073 /* integer pixels */
12074 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
12075 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
12076 };
12077 bool visible;
12078 int ret; 12084 int ret;
12079 12085
12080 ret = drm_plane_helper_check_update(plane, crtc, fb, 12086 ret = drm_plane_helper_check_update(plane, crtc, fb,
12081 &src, &dest, &clip, 12087 src, dest, clip,
12082 DRM_PLANE_HELPER_NO_SCALING, 12088 DRM_PLANE_HELPER_NO_SCALING,
12083 DRM_PLANE_HELPER_NO_SCALING, 12089 DRM_PLANE_HELPER_NO_SCALING,
12084 true, true, &visible); 12090 true, true, &state->visible);
12085 if (ret) 12091 if (ret)
12086 return ret; 12092 return ret;
12087 12093
12088 crtc->cursor_x = crtc_x; 12094
12089 crtc->cursor_y = crtc_y; 12095 /* if we want to turn off the cursor ignore width and height */
12096 if (!obj)
12097 return 0;
12098
12099 /* Check for which cursor types we support */
12100 crtc_w = drm_rect_width(&state->orig_dst);
12101 crtc_h = drm_rect_height(&state->orig_dst);
12102 if (!cursor_size_ok(dev, crtc_w, crtc_h)) {
12103 DRM_DEBUG("Cursor dimension not supported\n");
12104 return -EINVAL;
12105 }
12106
12107 stride = roundup_pow_of_two(crtc_w) * 4;
12108 if (obj->base.size < stride * crtc_h) {
12109 DRM_DEBUG_KMS("buffer is too small\n");
12110 return -ENOMEM;
12111 }
12112
12113 if (fb == crtc->cursor->fb)
12114 return 0;
12115
12116 /* we only need to pin inside GTT if cursor is non-phy */
12117 mutex_lock(&dev->struct_mutex);
12118 if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) {
12119 DRM_DEBUG_KMS("cursor cannot be tiled\n");
12120 ret = -EINVAL;
12121 }
12122 mutex_unlock(&dev->struct_mutex);
12123
12124 return ret;
12125}
12126
12127static int
12128intel_commit_cursor_plane(struct drm_plane *plane,
12129 struct intel_plane_state *state)
12130{
12131 struct drm_crtc *crtc = state->crtc;
12132 struct drm_framebuffer *fb = state->fb;
12133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12134 struct intel_plane *intel_plane = to_intel_plane(plane);
12135 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
12136 struct drm_i915_gem_object *obj = intel_fb->obj;
12137 int crtc_w, crtc_h;
12138
12139 crtc->cursor_x = state->orig_dst.x1;
12140 crtc->cursor_y = state->orig_dst.y1;
12141
12142 intel_plane->crtc_x = state->orig_dst.x1;
12143 intel_plane->crtc_y = state->orig_dst.y1;
12144 intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
12145 intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
12146 intel_plane->src_x = state->orig_src.x1;
12147 intel_plane->src_y = state->orig_src.y1;
12148 intel_plane->src_w = drm_rect_width(&state->orig_src);
12149 intel_plane->src_h = drm_rect_height(&state->orig_src);
12150 intel_plane->obj = obj;
12151
12090 if (fb != crtc->cursor->fb) { 12152 if (fb != crtc->cursor->fb) {
12153 crtc_w = drm_rect_width(&state->orig_dst);
12154 crtc_h = drm_rect_height(&state->orig_dst);
12091 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h); 12155 return intel_crtc_cursor_set_obj(crtc, obj, crtc_w, crtc_h);
12092 } else { 12156 } else {
12093 intel_crtc_update_cursor(crtc, visible); 12157 intel_crtc_update_cursor(crtc, state->visible);
12094 12158
12095 intel_frontbuffer_flip(crtc->dev, 12159 intel_frontbuffer_flip(crtc->dev,
12096 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe)); 12160 INTEL_FRONTBUFFER_CURSOR(intel_crtc->pipe));
@@ -12098,10 +12162,53 @@ intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
12098 return 0; 12162 return 0;
12099 } 12163 }
12100} 12164}
12165
12166static int
12167intel_cursor_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
12168 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
12169 unsigned int crtc_w, unsigned int crtc_h,
12170 uint32_t src_x, uint32_t src_y,
12171 uint32_t src_w, uint32_t src_h)
12172{
12173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12174 struct intel_plane_state state;
12175 int ret;
12176
12177 state.crtc = crtc;
12178 state.fb = fb;
12179
12180 /* sample coordinates in 16.16 fixed point */
12181 state.src.x1 = src_x;
12182 state.src.x2 = src_x + src_w;
12183 state.src.y1 = src_y;
12184 state.src.y2 = src_y + src_h;
12185
12186 /* integer pixels */
12187 state.dst.x1 = crtc_x;
12188 state.dst.x2 = crtc_x + crtc_w;
12189 state.dst.y1 = crtc_y;
12190 state.dst.y2 = crtc_y + crtc_h;
12191
12192 state.clip.x1 = 0;
12193 state.clip.y1 = 0;
12194 state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
12195 state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
12196
12197 state.orig_src = state.src;
12198 state.orig_dst = state.dst;
12199
12200 ret = intel_check_cursor_plane(plane, &state);
12201 if (ret)
12202 return ret;
12203
12204 return intel_commit_cursor_plane(plane, &state);
12205}
12206
12101static const struct drm_plane_funcs intel_cursor_plane_funcs = { 12207static const struct drm_plane_funcs intel_cursor_plane_funcs = {
12102 .update_plane = intel_cursor_plane_update, 12208 .update_plane = intel_cursor_plane_update,
12103 .disable_plane = intel_cursor_plane_disable, 12209 .disable_plane = intel_cursor_plane_disable,
12104 .destroy = intel_plane_destroy, 12210 .destroy = intel_plane_destroy,
12211 .set_property = intel_plane_set_property,
12105}; 12212};
12106 12213
12107static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev, 12214static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
@@ -12117,12 +12224,26 @@ static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
12117 cursor->max_downscale = 1; 12224 cursor->max_downscale = 1;
12118 cursor->pipe = pipe; 12225 cursor->pipe = pipe;
12119 cursor->plane = pipe; 12226 cursor->plane = pipe;
12227 cursor->rotation = BIT(DRM_ROTATE_0);
12120 12228
12121 drm_universal_plane_init(dev, &cursor->base, 0, 12229 drm_universal_plane_init(dev, &cursor->base, 0,
12122 &intel_cursor_plane_funcs, 12230 &intel_cursor_plane_funcs,
12123 intel_cursor_formats, 12231 intel_cursor_formats,
12124 ARRAY_SIZE(intel_cursor_formats), 12232 ARRAY_SIZE(intel_cursor_formats),
12125 DRM_PLANE_TYPE_CURSOR); 12233 DRM_PLANE_TYPE_CURSOR);
12234
12235 if (INTEL_INFO(dev)->gen >= 4) {
12236 if (!dev->mode_config.rotation_property)
12237 dev->mode_config.rotation_property =
12238 drm_mode_create_rotation_property(dev,
12239 BIT(DRM_ROTATE_0) |
12240 BIT(DRM_ROTATE_180));
12241 if (dev->mode_config.rotation_property)
12242 drm_object_attach_property(&cursor->base.base,
12243 dev->mode_config.rotation_property,
12244 cursor->rotation);
12245 }
12246
12126 return &cursor->base; 12247 return &cursor->base;
12127} 12248}
12128 12249
@@ -12178,6 +12299,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
12178 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; 12299 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
12179 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 12300 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
12180 12301
12302 INIT_WORK(&intel_crtc->mmio_flip.work, intel_mmio_flip_work_func);
12303
12181 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); 12304 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
12182 12305
12183 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); 12306 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
@@ -12198,7 +12321,7 @@ enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
12198 12321
12199 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); 12322 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
12200 12323
12201 if (!encoder) 12324 if (!encoder || WARN_ON(!encoder->crtc))
12202 return INVALID_PIPE; 12325 return INVALID_PIPE;
12203 12326
12204 return to_intel_crtc(encoder->crtc)->pipe; 12327 return to_intel_crtc(encoder->crtc)->pipe;
@@ -12286,7 +12409,10 @@ static bool intel_crt_present(struct drm_device *dev)
12286{ 12409{
12287 struct drm_i915_private *dev_priv = dev->dev_private; 12410 struct drm_i915_private *dev_priv = dev->dev_private;
12288 12411
12289 if (IS_ULT(dev)) 12412 if (INTEL_INFO(dev)->gen >= 9)
12413 return false;
12414
12415 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
12290 return false; 12416 return false;
12291 12417
12292 if (IS_CHERRYVIEW(dev)) 12418 if (IS_CHERRYVIEW(dev))
@@ -12430,7 +12556,7 @@ static void intel_setup_outputs(struct drm_device *dev)
12430 if (SUPPORTS_TV(dev)) 12556 if (SUPPORTS_TV(dev))
12431 intel_tv_init(dev); 12557 intel_tv_init(dev);
12432 12558
12433 intel_edp_psr_init(dev); 12559 intel_psr_init(dev);
12434 12560
12435 for_each_intel_encoder(dev, encoder) { 12561 for_each_intel_encoder(dev, encoder) {
12436 encoder->base.possible_crtcs = encoder->crtc_mask; 12562 encoder->base.possible_crtcs = encoder->crtc_mask;
@@ -12634,16 +12760,22 @@ static void intel_init_display(struct drm_device *dev)
12634 if (HAS_DDI(dev)) { 12760 if (HAS_DDI(dev)) {
12635 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 12761 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
12636 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12762 dev_priv->display.get_plane_config = ironlake_get_plane_config;
12637 dev_priv->display.crtc_mode_set = haswell_crtc_mode_set; 12763 dev_priv->display.crtc_compute_clock =
12764 haswell_crtc_compute_clock;
12638 dev_priv->display.crtc_enable = haswell_crtc_enable; 12765 dev_priv->display.crtc_enable = haswell_crtc_enable;
12639 dev_priv->display.crtc_disable = haswell_crtc_disable; 12766 dev_priv->display.crtc_disable = haswell_crtc_disable;
12640 dev_priv->display.off = ironlake_crtc_off; 12767 dev_priv->display.off = ironlake_crtc_off;
12641 dev_priv->display.update_primary_plane = 12768 if (INTEL_INFO(dev)->gen >= 9)
12642 ironlake_update_primary_plane; 12769 dev_priv->display.update_primary_plane =
12770 skylake_update_primary_plane;
12771 else
12772 dev_priv->display.update_primary_plane =
12773 ironlake_update_primary_plane;
12643 } else if (HAS_PCH_SPLIT(dev)) { 12774 } else if (HAS_PCH_SPLIT(dev)) {
12644 dev_priv->display.get_pipe_config = ironlake_get_pipe_config; 12775 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
12645 dev_priv->display.get_plane_config = ironlake_get_plane_config; 12776 dev_priv->display.get_plane_config = ironlake_get_plane_config;
12646 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; 12777 dev_priv->display.crtc_compute_clock =
12778 ironlake_crtc_compute_clock;
12647 dev_priv->display.crtc_enable = ironlake_crtc_enable; 12779 dev_priv->display.crtc_enable = ironlake_crtc_enable;
12648 dev_priv->display.crtc_disable = ironlake_crtc_disable; 12780 dev_priv->display.crtc_disable = ironlake_crtc_disable;
12649 dev_priv->display.off = ironlake_crtc_off; 12781 dev_priv->display.off = ironlake_crtc_off;
@@ -12652,7 +12784,7 @@ static void intel_init_display(struct drm_device *dev)
12652 } else if (IS_VALLEYVIEW(dev)) { 12784 } else if (IS_VALLEYVIEW(dev)) {
12653 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12785 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12654 dev_priv->display.get_plane_config = i9xx_get_plane_config; 12786 dev_priv->display.get_plane_config = i9xx_get_plane_config;
12655 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 12787 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
12656 dev_priv->display.crtc_enable = valleyview_crtc_enable; 12788 dev_priv->display.crtc_enable = valleyview_crtc_enable;
12657 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12789 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12658 dev_priv->display.off = i9xx_crtc_off; 12790 dev_priv->display.off = i9xx_crtc_off;
@@ -12661,7 +12793,7 @@ static void intel_init_display(struct drm_device *dev)
12661 } else { 12793 } else {
12662 dev_priv->display.get_pipe_config = i9xx_get_pipe_config; 12794 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
12663 dev_priv->display.get_plane_config = i9xx_get_plane_config; 12795 dev_priv->display.get_plane_config = i9xx_get_plane_config;
12664 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; 12796 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
12665 dev_priv->display.crtc_enable = i9xx_crtc_enable; 12797 dev_priv->display.crtc_enable = i9xx_crtc_enable;
12666 dev_priv->display.crtc_disable = i9xx_crtc_disable; 12798 dev_priv->display.crtc_disable = i9xx_crtc_disable;
12667 dev_priv->display.off = i9xx_crtc_off; 12799 dev_priv->display.off = i9xx_crtc_off;
@@ -12698,31 +12830,20 @@ static void intel_init_display(struct drm_device *dev)
12698 dev_priv->display.get_display_clock_speed = 12830 dev_priv->display.get_display_clock_speed =
12699 i830_get_display_clock_speed; 12831 i830_get_display_clock_speed;
12700 12832
12701 if (IS_G4X(dev)) { 12833 if (IS_GEN5(dev)) {
12702 dev_priv->display.write_eld = g4x_write_eld;
12703 } else if (IS_GEN5(dev)) {
12704 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 12834 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
12705 dev_priv->display.write_eld = ironlake_write_eld;
12706 } else if (IS_GEN6(dev)) { 12835 } else if (IS_GEN6(dev)) {
12707 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 12836 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
12708 dev_priv->display.write_eld = ironlake_write_eld;
12709 dev_priv->display.modeset_global_resources =
12710 snb_modeset_global_resources;
12711 } else if (IS_IVYBRIDGE(dev)) { 12837 } else if (IS_IVYBRIDGE(dev)) {
12712 /* FIXME: detect B0+ stepping and use auto training */ 12838 /* FIXME: detect B0+ stepping and use auto training */
12713 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 12839 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
12714 dev_priv->display.write_eld = ironlake_write_eld;
12715 dev_priv->display.modeset_global_resources = 12840 dev_priv->display.modeset_global_resources =
12716 ivb_modeset_global_resources; 12841 ivb_modeset_global_resources;
12717 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 12842 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
12718 dev_priv->display.fdi_link_train = hsw_fdi_link_train; 12843 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
12719 dev_priv->display.write_eld = haswell_write_eld;
12720 dev_priv->display.modeset_global_resources =
12721 haswell_modeset_global_resources;
12722 } else if (IS_VALLEYVIEW(dev)) { 12844 } else if (IS_VALLEYVIEW(dev)) {
12723 dev_priv->display.modeset_global_resources = 12845 dev_priv->display.modeset_global_resources =
12724 valleyview_modeset_global_resources; 12846 valleyview_modeset_global_resources;
12725 dev_priv->display.write_eld = ironlake_write_eld;
12726 } 12847 }
12727 12848
12728 /* Default just returns -ENODEV to indicate unsupported */ 12849 /* Default just returns -ENODEV to indicate unsupported */
@@ -12749,6 +12870,9 @@ static void intel_init_display(struct drm_device *dev)
12749 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */ 12870 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
12750 dev_priv->display.queue_flip = intel_gen7_queue_flip; 12871 dev_priv->display.queue_flip = intel_gen7_queue_flip;
12751 break; 12872 break;
12873 case 9:
12874 dev_priv->display.queue_flip = intel_gen9_queue_flip;
12875 break;
12752 } 12876 }
12753 12877
12754 intel_panel_init_backlight_funcs(dev); 12878 intel_panel_init_backlight_funcs(dev);
@@ -12953,11 +13077,6 @@ void intel_modeset_init_hw(struct drm_device *dev)
12953 intel_enable_gt_powersave(dev); 13077 intel_enable_gt_powersave(dev);
12954} 13078}
12955 13079
12956void intel_modeset_suspend_hw(struct drm_device *dev)
12957{
12958 intel_suspend_hw(dev);
12959}
12960
12961void intel_modeset_init(struct drm_device *dev) 13080void intel_modeset_init(struct drm_device *dev)
12962{ 13081{
12963 struct drm_i915_private *dev_priv = dev->dev_private; 13082 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -12983,6 +13102,7 @@ void intel_modeset_init(struct drm_device *dev)
12983 return; 13102 return;
12984 13103
12985 intel_init_display(dev); 13104 intel_init_display(dev);
13105 intel_init_audio(dev);
12986 13106
12987 if (IS_GEN2(dev)) { 13107 if (IS_GEN2(dev)) {
12988 dev->mode_config.max_width = 2048; 13108 dev->mode_config.max_width = 2048;
@@ -13293,7 +13413,7 @@ void i915_redisable_vga(struct drm_device *dev)
13293 * level, just check if the power well is enabled instead of trying to 13413 * level, just check if the power well is enabled instead of trying to
13294 * follow the "don't touch the power well if we don't need it" policy 13414 * follow the "don't touch the power well if we don't need it" policy
13295 * the rest of the driver uses. */ 13415 * the rest of the driver uses. */
13296 if (!intel_display_power_enabled(dev_priv, POWER_DOMAIN_VGA)) 13416 if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
13297 return; 13417 return;
13298 13418
13299 i915_redisable_vga_power_on(dev); 13419 i915_redisable_vga_power_on(dev);
@@ -13337,18 +13457,21 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
13337 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 13457 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
13338 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 13458 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
13339 13459
13340 pll->on = pll->get_hw_state(dev_priv, pll, &pll->hw_state); 13460 pll->on = pll->get_hw_state(dev_priv, pll,
13461 &pll->config.hw_state);
13341 pll->active = 0; 13462 pll->active = 0;
13463 pll->config.crtc_mask = 0;
13342 for_each_intel_crtc(dev, crtc) { 13464 for_each_intel_crtc(dev, crtc) {
13343 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) 13465 if (crtc->active && intel_crtc_to_shared_dpll(crtc) == pll) {
13344 pll->active++; 13466 pll->active++;
13467 pll->config.crtc_mask |= 1 << crtc->pipe;
13468 }
13345 } 13469 }
13346 pll->refcount = pll->active;
13347 13470
13348 DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", 13471 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
13349 pll->name, pll->refcount, pll->on); 13472 pll->name, pll->config.crtc_mask, pll->on);
13350 13473
13351 if (pll->refcount) 13474 if (pll->config.crtc_mask)
13352 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS); 13475 intel_display_power_get(dev_priv, POWER_DOMAIN_PLLS);
13353 } 13476 }
13354 13477
@@ -13438,7 +13561,9 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13438 pll->on = false; 13561 pll->on = false;
13439 } 13562 }
13440 13563
13441 if (HAS_PCH_SPLIT(dev)) 13564 if (IS_GEN9(dev))
13565 skl_wm_get_hw_state(dev);
13566 else if (HAS_PCH_SPLIT(dev))
13442 ilk_wm_get_hw_state(dev); 13567 ilk_wm_get_hw_state(dev);
13443 13568
13444 if (force_restore) { 13569 if (force_restore) {
@@ -13452,8 +13577,8 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13452 struct drm_crtc *crtc = 13577 struct drm_crtc *crtc =
13453 dev_priv->pipe_to_crtc_mapping[pipe]; 13578 dev_priv->pipe_to_crtc_mapping[pipe];
13454 13579
13455 __intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y, 13580 intel_set_mode(crtc, &crtc->mode, crtc->x, crtc->y,
13456 crtc->primary->fb); 13581 crtc->primary->fb);
13457 } 13582 }
13458 } else { 13583 } else {
13459 intel_modeset_update_staged_output_state(dev); 13584 intel_modeset_update_staged_output_state(dev);
@@ -13464,6 +13589,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
13464 13589
13465void intel_modeset_gem_init(struct drm_device *dev) 13590void intel_modeset_gem_init(struct drm_device *dev)
13466{ 13591{
13592 struct drm_i915_private *dev_priv = dev->dev_private;
13467 struct drm_crtc *c; 13593 struct drm_crtc *c;
13468 struct drm_i915_gem_object *obj; 13594 struct drm_i915_gem_object *obj;
13469 13595
@@ -13471,6 +13597,16 @@ void intel_modeset_gem_init(struct drm_device *dev)
13471 intel_init_gt_powersave(dev); 13597 intel_init_gt_powersave(dev);
13472 mutex_unlock(&dev->struct_mutex); 13598 mutex_unlock(&dev->struct_mutex);
13473 13599
13600 /*
13601 * There may be no VBT; and if the BIOS enabled SSC we can
13602 * just keep using it to avoid unnecessary flicker. Whereas if the
13603 * BIOS isn't using it, don't assume it will work even if the VBT
13604 * indicates as much.
13605 */
13606 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
13607 dev_priv->vbt.lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
13608 DREF_SSC1_ENABLE);
13609
13474 intel_modeset_init_hw(dev); 13610 intel_modeset_init_hw(dev);
13475 13611
13476 intel_setup_overlay(dev); 13612 intel_setup_overlay(dev);
@@ -13486,7 +13622,9 @@ void intel_modeset_gem_init(struct drm_device *dev)
13486 if (obj == NULL) 13622 if (obj == NULL)
13487 continue; 13623 continue;
13488 13624
13489 if (intel_pin_and_fence_fb_obj(dev, obj, NULL)) { 13625 if (intel_pin_and_fence_fb_obj(c->primary,
13626 c->primary->fb,
13627 NULL)) {
13490 DRM_ERROR("failed to pin boot fb on pipe %d\n", 13628 DRM_ERROR("failed to pin boot fb on pipe %d\n",
13491 to_intel_crtc(c)->pipe); 13629 to_intel_crtc(c)->pipe);
13492 drm_framebuffer_unreference(c->primary->fb); 13630 drm_framebuffer_unreference(c->primary->fb);
@@ -13494,6 +13632,8 @@ void intel_modeset_gem_init(struct drm_device *dev)
13494 } 13632 }
13495 } 13633 }
13496 mutex_unlock(&dev->struct_mutex); 13634 mutex_unlock(&dev->struct_mutex);
13635
13636 intel_backlight_register(dev);
13497} 13637}
13498 13638
13499void intel_connector_unregister(struct intel_connector *intel_connector) 13639void intel_connector_unregister(struct intel_connector *intel_connector)
@@ -13509,14 +13649,16 @@ void intel_modeset_cleanup(struct drm_device *dev)
13509 struct drm_i915_private *dev_priv = dev->dev_private; 13649 struct drm_i915_private *dev_priv = dev->dev_private;
13510 struct drm_connector *connector; 13650 struct drm_connector *connector;
13511 13651
13652 intel_disable_gt_powersave(dev);
13653
13654 intel_backlight_unregister(dev);
13655
13512 /* 13656 /*
13513 * Interrupts and polling as the first thing to avoid creating havoc. 13657 * Interrupts and polling as the first thing to avoid creating havoc.
13514 * Too much stuff here (turning of rps, connectors, ...) would 13658 * Too much stuff here (turning of connectors, ...) would
13515 * experience fancy races otherwise. 13659 * experience fancy races otherwise.
13516 */ 13660 */
13517 drm_irq_uninstall(dev); 13661 intel_irq_uninstall(dev_priv);
13518 intel_hpd_cancel_work(dev_priv);
13519 dev_priv->pm._irqs_disabled = true;
13520 13662
13521 /* 13663 /*
13522 * Due to the hpd irq storm handling the hotplug work can re-arm the 13664 * Due to the hpd irq storm handling the hotplug work can re-arm the
@@ -13530,8 +13672,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
13530 13672
13531 intel_disable_fbc(dev); 13673 intel_disable_fbc(dev);
13532 13674
13533 intel_disable_gt_powersave(dev);
13534
13535 ironlake_teardown_rc6(dev); 13675 ironlake_teardown_rc6(dev);
13536 13676
13537 mutex_unlock(&dev->struct_mutex); 13677 mutex_unlock(&dev->struct_mutex);
@@ -13671,8 +13811,8 @@ intel_display_capture_error_state(struct drm_device *dev)
13671 13811
13672 for_each_pipe(dev_priv, i) { 13812 for_each_pipe(dev_priv, i) {
13673 error->pipe[i].power_domain_on = 13813 error->pipe[i].power_domain_on =
13674 intel_display_power_enabled_unlocked(dev_priv, 13814 __intel_display_power_is_enabled(dev_priv,
13675 POWER_DOMAIN_PIPE(i)); 13815 POWER_DOMAIN_PIPE(i));
13676 if (!error->pipe[i].power_domain_on) 13816 if (!error->pipe[i].power_domain_on)
13677 continue; 13817 continue;
13678 13818
@@ -13707,7 +13847,7 @@ intel_display_capture_error_state(struct drm_device *dev)
13707 enum transcoder cpu_transcoder = transcoders[i]; 13847 enum transcoder cpu_transcoder = transcoders[i];
13708 13848
13709 error->transcoder[i].power_domain_on = 13849 error->transcoder[i].power_domain_on =
13710 intel_display_power_enabled_unlocked(dev_priv, 13850 __intel_display_power_is_enabled(dev_priv,
13711 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 13851 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
13712 if (!error->transcoder[i].power_domain_on) 13852 if (!error->transcoder[i].power_domain_on)
13713 continue; 13853 continue;
@@ -13791,9 +13931,8 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13791 13931
13792 for_each_intel_crtc(dev, crtc) { 13932 for_each_intel_crtc(dev, crtc) {
13793 struct intel_unpin_work *work; 13933 struct intel_unpin_work *work;
13794 unsigned long irqflags;
13795 13934
13796 spin_lock_irqsave(&dev->event_lock, irqflags); 13935 spin_lock_irq(&dev->event_lock);
13797 13936
13798 work = crtc->unpin_work; 13937 work = crtc->unpin_work;
13799 13938
@@ -13803,6 +13942,6 @@ void intel_modeset_preclose(struct drm_device *dev, struct drm_file *file)
13803 work->event = NULL; 13942 work->event = NULL;
13804 } 13943 }
13805 13944
13806 spin_unlock_irqrestore(&dev->event_lock, irqflags); 13945 spin_unlock_irq(&dev->event_lock);
13807 } 13946 }
13808} 13947}
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 4bcd91757321..5cecc20efa71 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -113,6 +113,9 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
113static void intel_dp_link_down(struct intel_dp *intel_dp); 113static void intel_dp_link_down(struct intel_dp *intel_dp);
114static bool edp_panel_vdd_on(struct intel_dp *intel_dp); 114static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 115static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
116static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
117static void vlv_steal_power_sequencer(struct drm_device *dev,
118 enum pipe pipe);
116 119
117int 120int
118intel_dp_max_link_bw(struct intel_dp *intel_dp) 121intel_dp_max_link_bw(struct intel_dp *intel_dp)
@@ -224,8 +227,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
224 return MODE_OK; 227 return MODE_OK;
225} 228}
226 229
227static uint32_t 230uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
228pack_aux(uint8_t *src, int src_bytes)
229{ 231{
230 int i; 232 int i;
231 uint32_t v = 0; 233 uint32_t v = 0;
@@ -237,8 +239,7 @@ pack_aux(uint8_t *src, int src_bytes)
237 return v; 239 return v;
238} 240}
239 241
240static void 242void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
241unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
242{ 243{
243 int i; 244 int i;
244 if (dst_bytes > 4) 245 if (dst_bytes > 4)
@@ -283,12 +284,10 @@ intel_hrawclk(struct drm_device *dev)
283 284
284static void 285static void
285intel_dp_init_panel_power_sequencer(struct drm_device *dev, 286intel_dp_init_panel_power_sequencer(struct drm_device *dev,
286 struct intel_dp *intel_dp, 287 struct intel_dp *intel_dp);
287 struct edp_power_seq *out);
288static void 288static void
289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 289intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
290 struct intel_dp *intel_dp, 290 struct intel_dp *intel_dp);
291 struct edp_power_seq *out);
292 291
293static void pps_lock(struct intel_dp *intel_dp) 292static void pps_lock(struct intel_dp *intel_dp)
294{ 293{
@@ -322,6 +321,66 @@ static void pps_unlock(struct intel_dp *intel_dp)
322 intel_display_power_put(dev_priv, power_domain); 321 intel_display_power_put(dev_priv, power_domain);
323} 322}
324 323
324static void
325vlv_power_sequencer_kick(struct intel_dp *intel_dp)
326{
327 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
328 struct drm_device *dev = intel_dig_port->base.base.dev;
329 struct drm_i915_private *dev_priv = dev->dev_private;
330 enum pipe pipe = intel_dp->pps_pipe;
331 bool pll_enabled;
332 uint32_t DP;
333
334 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
335 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
336 pipe_name(pipe), port_name(intel_dig_port->port)))
337 return;
338
339 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
340 pipe_name(pipe), port_name(intel_dig_port->port));
341
342 /* Preserve the BIOS-computed detected bit. This is
343 * supposed to be read-only.
344 */
345 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
346 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
347 DP |= DP_PORT_WIDTH(1);
348 DP |= DP_LINK_TRAIN_PAT_1;
349
350 if (IS_CHERRYVIEW(dev))
351 DP |= DP_PIPE_SELECT_CHV(pipe);
352 else if (pipe == PIPE_B)
353 DP |= DP_PIPEB_SELECT;
354
355 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
356
357 /*
358 * The DPLL for the pipe must be enabled for this to work.
359 * So enable temporarily it if it's not already enabled.
360 */
361 if (!pll_enabled)
362 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
363 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
364
365 /*
366 * Similar magic as in intel_dp_enable_port().
367 * We _must_ do this port enable + disable trick
368 * to make this power seqeuencer lock onto the port.
369 * Otherwise even VDD force bit won't work.
370 */
371 I915_WRITE(intel_dp->output_reg, DP);
372 POSTING_READ(intel_dp->output_reg);
373
374 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
375 POSTING_READ(intel_dp->output_reg);
376
377 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
378 POSTING_READ(intel_dp->output_reg);
379
380 if (!pll_enabled)
381 vlv_force_pll_off(dev, pipe);
382}
383
325static enum pipe 384static enum pipe
326vlv_power_sequencer_pipe(struct intel_dp *intel_dp) 385vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
327{ 386{
@@ -330,10 +389,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
330 struct drm_i915_private *dev_priv = dev->dev_private; 389 struct drm_i915_private *dev_priv = dev->dev_private;
331 struct intel_encoder *encoder; 390 struct intel_encoder *encoder;
332 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B); 391 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
333 struct edp_power_seq power_seq; 392 enum pipe pipe;
334 393
335 lockdep_assert_held(&dev_priv->pps_mutex); 394 lockdep_assert_held(&dev_priv->pps_mutex);
336 395
396 /* We should never land here with regular DP ports */
397 WARN_ON(!is_edp(intel_dp));
398
337 if (intel_dp->pps_pipe != INVALID_PIPE) 399 if (intel_dp->pps_pipe != INVALID_PIPE)
338 return intel_dp->pps_pipe; 400 return intel_dp->pps_pipe;
339 401
@@ -359,18 +421,26 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
359 * are two power sequencers and up to two eDP ports. 421 * are two power sequencers and up to two eDP ports.
360 */ 422 */
361 if (WARN_ON(pipes == 0)) 423 if (WARN_ON(pipes == 0))
362 return PIPE_A; 424 pipe = PIPE_A;
425 else
426 pipe = ffs(pipes) - 1;
363 427
364 intel_dp->pps_pipe = ffs(pipes) - 1; 428 vlv_steal_power_sequencer(dev, pipe);
429 intel_dp->pps_pipe = pipe;
365 430
366 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n", 431 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
367 pipe_name(intel_dp->pps_pipe), 432 pipe_name(intel_dp->pps_pipe),
368 port_name(intel_dig_port->port)); 433 port_name(intel_dig_port->port));
369 434
370 /* init power sequencer on this pipe and port */ 435 /* init power sequencer on this pipe and port */
371 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 436 intel_dp_init_panel_power_sequencer(dev, intel_dp);
372 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 437 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
373 &power_seq); 438
439 /*
440 * Even vdd force doesn't work until we've made
441 * the power sequencer lock in on the port.
442 */
443 vlv_power_sequencer_kick(intel_dp);
374 444
375 return intel_dp->pps_pipe; 445 return intel_dp->pps_pipe;
376} 446}
@@ -425,7 +495,6 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
425 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 495 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
426 struct drm_device *dev = intel_dig_port->base.base.dev; 496 struct drm_device *dev = intel_dig_port->base.base.dev;
427 struct drm_i915_private *dev_priv = dev->dev_private; 497 struct drm_i915_private *dev_priv = dev->dev_private;
428 struct edp_power_seq power_seq;
429 enum port port = intel_dig_port->port; 498 enum port port = intel_dig_port->port;
430 499
431 lockdep_assert_held(&dev_priv->pps_mutex); 500 lockdep_assert_held(&dev_priv->pps_mutex);
@@ -453,9 +522,8 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
453 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n", 522 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
454 port_name(port), pipe_name(intel_dp->pps_pipe)); 523 port_name(port), pipe_name(intel_dp->pps_pipe));
455 524
456 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 525 intel_dp_init_panel_power_sequencer(dev, intel_dp);
457 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 526 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
458 &power_seq);
459} 527}
460 528
461void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv) 529void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
@@ -550,6 +618,10 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
550 618
551 lockdep_assert_held(&dev_priv->pps_mutex); 619 lockdep_assert_held(&dev_priv->pps_mutex);
552 620
621 if (IS_VALLEYVIEW(dev) &&
622 intel_dp->pps_pipe == INVALID_PIPE)
623 return false;
624
553 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0; 625 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
554} 626}
555 627
@@ -560,6 +632,10 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
560 632
561 lockdep_assert_held(&dev_priv->pps_mutex); 633 lockdep_assert_held(&dev_priv->pps_mutex);
562 634
635 if (IS_VALLEYVIEW(dev) &&
636 intel_dp->pps_pipe == INVALID_PIPE)
637 return false;
638
563 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD; 639 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
564} 640}
565 641
@@ -661,6 +737,16 @@ static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
661 return index ? 0 : 100; 737 return index ? 0 : 100;
662} 738}
663 739
740static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
741{
742 /*
743 * SKL doesn't need us to program the AUX clock divider (Hardware will
744 * derive the clock from CDCLK automatically). We still implement the
745 * get_aux_clock_divider vfunc to plug-in into the existing code.
746 */
747 return index ? 0 : 1;
748}
749
664static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp, 750static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
665 bool has_aux_irq, 751 bool has_aux_irq,
666 int send_bytes, 752 int send_bytes,
@@ -691,9 +777,24 @@ static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
691 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT); 777 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
692} 778}
693 779
780static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
781 bool has_aux_irq,
782 int send_bytes,
783 uint32_t unused)
784{
785 return DP_AUX_CH_CTL_SEND_BUSY |
786 DP_AUX_CH_CTL_DONE |
787 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788 DP_AUX_CH_CTL_TIME_OUT_ERROR |
789 DP_AUX_CH_CTL_TIME_OUT_1600us |
790 DP_AUX_CH_CTL_RECEIVE_ERROR |
791 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
792 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
793}
794
694static int 795static int
695intel_dp_aux_ch(struct intel_dp *intel_dp, 796intel_dp_aux_ch(struct intel_dp *intel_dp,
696 uint8_t *send, int send_bytes, 797 const uint8_t *send, int send_bytes,
697 uint8_t *recv, int recv_size) 798 uint8_t *recv, int recv_size)
698{ 799{
699 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 800 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -760,7 +861,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
760 /* Load the send data into the aux channel data registers */ 861 /* Load the send data into the aux channel data registers */
761 for (i = 0; i < send_bytes; i += 4) 862 for (i = 0; i < send_bytes; i += 4)
762 I915_WRITE(ch_data + i, 863 I915_WRITE(ch_data + i,
763 pack_aux(send + i, send_bytes - i)); 864 intel_dp_pack_aux(send + i,
865 send_bytes - i));
764 866
765 /* Send the command and wait for it to complete */ 867 /* Send the command and wait for it to complete */
766 I915_WRITE(ch_ctl, send_ctl); 868 I915_WRITE(ch_ctl, send_ctl);
@@ -814,8 +916,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
814 recv_bytes = recv_size; 916 recv_bytes = recv_size;
815 917
816 for (i = 0; i < recv_bytes; i += 4) 918 for (i = 0; i < recv_bytes; i += 4)
817 unpack_aux(I915_READ(ch_data + i), 919 intel_dp_unpack_aux(I915_READ(ch_data + i),
818 recv + i, recv_bytes - i); 920 recv + i, recv_bytes - i);
819 921
820 ret = recv_bytes; 922 ret = recv_bytes;
821out: 923out:
@@ -925,7 +1027,16 @@ intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
925 BUG(); 1027 BUG();
926 } 1028 }
927 1029
928 if (!HAS_DDI(dev)) 1030 /*
1031 * The AUX_CTL register is usually DP_CTL + 0x10.
1032 *
1033 * On Haswell and Broadwell though:
1034 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1035 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1036 *
1037 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1038 */
1039 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
929 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10; 1040 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
930 1041
931 intel_dp->aux.name = name; 1042 intel_dp->aux.name = name;
@@ -963,6 +1074,33 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector)
963} 1074}
964 1075
965static void 1076static void
1077skl_edp_set_pll_config(struct intel_crtc_config *pipe_config, int link_bw)
1078{
1079 u32 ctrl1;
1080
1081 pipe_config->ddi_pll_sel = SKL_DPLL0;
1082 pipe_config->dpll_hw_state.cfgcr1 = 0;
1083 pipe_config->dpll_hw_state.cfgcr2 = 0;
1084
1085 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
1086 switch (link_bw) {
1087 case DP_LINK_BW_1_62:
1088 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810,
1089 SKL_DPLL0);
1090 break;
1091 case DP_LINK_BW_2_7:
1092 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350,
1093 SKL_DPLL0);
1094 break;
1095 case DP_LINK_BW_5_4:
1096 ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700,
1097 SKL_DPLL0);
1098 break;
1099 }
1100 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1101}
1102
1103static void
966hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw) 1104hsw_dp_set_ddi_pll_sel(struct intel_crtc_config *pipe_config, int link_bw)
967{ 1105{
968 switch (link_bw) { 1106 switch (link_bw) {
@@ -1139,7 +1277,9 @@ found:
1139 &pipe_config->dp_m2_n2); 1277 &pipe_config->dp_m2_n2);
1140 } 1278 }
1141 1279
1142 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 1280 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
1281 skl_edp_set_pll_config(pipe_config, intel_dp->link_bw);
1282 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
1143 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); 1283 hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw);
1144 else 1284 else
1145 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw); 1285 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
@@ -1212,12 +1352,8 @@ static void intel_dp_prepare(struct intel_encoder *encoder)
1212 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 1352 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1213 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count); 1353 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
1214 1354
1215 if (crtc->config.has_audio) { 1355 if (crtc->config.has_audio)
1216 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
1217 pipe_name(crtc->pipe));
1218 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 1356 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
1219 intel_write_eld(&encoder->base, adjusted_mode);
1220 }
1221 1357
1222 /* Split out the IBX/CPU vs CPT settings */ 1358 /* Split out the IBX/CPU vs CPT settings */
1223 1359
@@ -1367,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1367 if (!is_edp(intel_dp)) 1503 if (!is_edp(intel_dp))
1368 return false; 1504 return false;
1369 1505
1506 cancel_delayed_work(&intel_dp->panel_vdd_work);
1370 intel_dp->want_panel_vdd = true; 1507 intel_dp->want_panel_vdd = true;
1371 1508
1372 if (edp_have_panel_vdd(intel_dp)) 1509 if (edp_have_panel_vdd(intel_dp))
@@ -1375,7 +1512,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1375 power_domain = intel_display_port_power_domain(intel_encoder); 1512 power_domain = intel_display_port_power_domain(intel_encoder);
1376 intel_display_power_get(dev_priv, power_domain); 1513 intel_display_power_get(dev_priv, power_domain);
1377 1514
1378 DRM_DEBUG_KMS("Turning eDP VDD on\n"); 1515 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1516 port_name(intel_dig_port->port));
1379 1517
1380 if (!edp_have_panel_power(intel_dp)) 1518 if (!edp_have_panel_power(intel_dp))
1381 wait_panel_power_cycle(intel_dp); 1519 wait_panel_power_cycle(intel_dp);
@@ -1394,7 +1532,8 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
1394 * If the panel wasn't on, delay before accessing aux channel 1532 * If the panel wasn't on, delay before accessing aux channel
1395 */ 1533 */
1396 if (!edp_have_panel_power(intel_dp)) { 1534 if (!edp_have_panel_power(intel_dp)) {
1397 DRM_DEBUG_KMS("eDP was not running\n"); 1535 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1536 port_name(intel_dig_port->port));
1398 msleep(intel_dp->panel_power_up_delay); 1537 msleep(intel_dp->panel_power_up_delay);
1399 } 1538 }
1400 1539
@@ -1419,7 +1558,8 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
1419 vdd = edp_panel_vdd_on(intel_dp); 1558 vdd = edp_panel_vdd_on(intel_dp);
1420 pps_unlock(intel_dp); 1559 pps_unlock(intel_dp);
1421 1560
1422 WARN(!vdd, "eDP VDD already requested on\n"); 1561 WARN(!vdd, "eDP port %c VDD already requested on\n",
1562 port_name(dp_to_dig_port(intel_dp)->port));
1423} 1563}
1424 1564
1425static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) 1565static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
@@ -1440,7 +1580,8 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
1440 if (!edp_have_panel_vdd(intel_dp)) 1580 if (!edp_have_panel_vdd(intel_dp))
1441 return; 1581 return;
1442 1582
1443 DRM_DEBUG_KMS("Turning eDP VDD off\n"); 1583 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1584 port_name(intel_dig_port->port));
1444 1585
1445 pp = ironlake_get_pp_control(intel_dp); 1586 pp = ironlake_get_pp_control(intel_dp);
1446 pp &= ~EDP_FORCE_VDD; 1587 pp &= ~EDP_FORCE_VDD;
@@ -1501,7 +1642,8 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1501 if (!is_edp(intel_dp)) 1642 if (!is_edp(intel_dp))
1502 return; 1643 return;
1503 1644
1504 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1645 WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
1646 port_name(dp_to_dig_port(intel_dp)->port));
1505 1647
1506 intel_dp->want_panel_vdd = false; 1648 intel_dp->want_panel_vdd = false;
1507 1649
@@ -1511,40 +1653,25 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1511 edp_panel_vdd_schedule_off(intel_dp); 1653 edp_panel_vdd_schedule_off(intel_dp);
1512} 1654}
1513 1655
1514/* 1656static void edp_panel_on(struct intel_dp *intel_dp)
1515 * Must be paired with intel_edp_panel_vdd_on().
1516 * Nested calls to these functions are not allowed since
1517 * we drop the lock. Caller must use some higher level
1518 * locking to prevent nested calls from other threads.
1519 */
1520static void intel_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1521{
1522 if (!is_edp(intel_dp))
1523 return;
1524
1525 pps_lock(intel_dp);
1526 edp_panel_vdd_off(intel_dp, sync);
1527 pps_unlock(intel_dp);
1528}
1529
1530void intel_edp_panel_on(struct intel_dp *intel_dp)
1531{ 1657{
1532 struct drm_device *dev = intel_dp_to_dev(intel_dp); 1658 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1533 struct drm_i915_private *dev_priv = dev->dev_private; 1659 struct drm_i915_private *dev_priv = dev->dev_private;
1534 u32 pp; 1660 u32 pp;
1535 u32 pp_ctrl_reg; 1661 u32 pp_ctrl_reg;
1536 1662
1663 lockdep_assert_held(&dev_priv->pps_mutex);
1664
1537 if (!is_edp(intel_dp)) 1665 if (!is_edp(intel_dp))
1538 return; 1666 return;
1539 1667
1540 DRM_DEBUG_KMS("Turn eDP power on\n"); 1668 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1669 port_name(dp_to_dig_port(intel_dp)->port));
1541 1670
1542 pps_lock(intel_dp); 1671 if (WARN(edp_have_panel_power(intel_dp),
1543 1672 "eDP port %c panel power already on\n",
1544 if (edp_have_panel_power(intel_dp)) { 1673 port_name(dp_to_dig_port(intel_dp)->port)))
1545 DRM_DEBUG_KMS("eDP power already on\n"); 1674 return;
1546 goto out;
1547 }
1548 1675
1549 wait_panel_power_cycle(intel_dp); 1676 wait_panel_power_cycle(intel_dp);
1550 1677
@@ -1572,12 +1699,20 @@ void intel_edp_panel_on(struct intel_dp *intel_dp)
1572 I915_WRITE(pp_ctrl_reg, pp); 1699 I915_WRITE(pp_ctrl_reg, pp);
1573 POSTING_READ(pp_ctrl_reg); 1700 POSTING_READ(pp_ctrl_reg);
1574 } 1701 }
1702}
1703
1704void intel_edp_panel_on(struct intel_dp *intel_dp)
1705{
1706 if (!is_edp(intel_dp))
1707 return;
1575 1708
1576 out: 1709 pps_lock(intel_dp);
1710 edp_panel_on(intel_dp);
1577 pps_unlock(intel_dp); 1711 pps_unlock(intel_dp);
1578} 1712}
1579 1713
1580void intel_edp_panel_off(struct intel_dp *intel_dp) 1714
1715static void edp_panel_off(struct intel_dp *intel_dp)
1581{ 1716{
1582 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 1717 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1583 struct intel_encoder *intel_encoder = &intel_dig_port->base; 1718 struct intel_encoder *intel_encoder = &intel_dig_port->base;
@@ -1587,14 +1722,16 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1587 u32 pp; 1722 u32 pp;
1588 u32 pp_ctrl_reg; 1723 u32 pp_ctrl_reg;
1589 1724
1725 lockdep_assert_held(&dev_priv->pps_mutex);
1726
1590 if (!is_edp(intel_dp)) 1727 if (!is_edp(intel_dp))
1591 return; 1728 return;
1592 1729
1593 DRM_DEBUG_KMS("Turn eDP power off\n"); 1730 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
1594 1731 port_name(dp_to_dig_port(intel_dp)->port));
1595 pps_lock(intel_dp);
1596 1732
1597 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1733 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
1734 port_name(dp_to_dig_port(intel_dp)->port));
1598 1735
1599 pp = ironlake_get_pp_control(intel_dp); 1736 pp = ironlake_get_pp_control(intel_dp);
1600 /* We need to switch off panel power _and_ force vdd, for otherwise some 1737 /* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1615,7 +1752,15 @@ void intel_edp_panel_off(struct intel_dp *intel_dp)
1615 /* We got a reference when we enabled the VDD. */ 1752 /* We got a reference when we enabled the VDD. */
1616 power_domain = intel_display_port_power_domain(intel_encoder); 1753 power_domain = intel_display_port_power_domain(intel_encoder);
1617 intel_display_power_put(dev_priv, power_domain); 1754 intel_display_power_put(dev_priv, power_domain);
1755}
1756
1757void intel_edp_panel_off(struct intel_dp *intel_dp)
1758{
1759 if (!is_edp(intel_dp))
1760 return;
1618 1761
1762 pps_lock(intel_dp);
1763 edp_panel_off(intel_dp);
1619 pps_unlock(intel_dp); 1764 pps_unlock(intel_dp);
1620} 1765}
1621 1766
@@ -1819,7 +1964,7 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1819 u32 tmp; 1964 u32 tmp;
1820 1965
1821 power_domain = intel_display_port_power_domain(encoder); 1966 power_domain = intel_display_port_power_domain(encoder);
1822 if (!intel_display_power_enabled(dev_priv, power_domain)) 1967 if (!intel_display_power_is_enabled(dev_priv, power_domain))
1823 return false; 1968 return false;
1824 1969
1825 tmp = I915_READ(intel_dp->output_reg); 1970 tmp = I915_READ(intel_dp->output_reg);
@@ -1951,368 +2096,14 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
1951 } 2096 }
1952} 2097}
1953 2098
1954static bool is_edp_psr(struct intel_dp *intel_dp)
1955{
1956 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1957}
1958
1959static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1960{
1961 struct drm_i915_private *dev_priv = dev->dev_private;
1962
1963 if (!HAS_PSR(dev))
1964 return false;
1965
1966 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
1967}
1968
1969static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1970 struct edp_vsc_psr *vsc_psr)
1971{
1972 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1973 struct drm_device *dev = dig_port->base.base.dev;
1974 struct drm_i915_private *dev_priv = dev->dev_private;
1975 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1976 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1977 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1978 uint32_t *data = (uint32_t *) vsc_psr;
1979 unsigned int i;
1980
1981 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1982 the video DIP being updated before program video DIP data buffer
1983 registers for DIP being updated. */
1984 I915_WRITE(ctl_reg, 0);
1985 POSTING_READ(ctl_reg);
1986
1987 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1988 if (i < sizeof(struct edp_vsc_psr))
1989 I915_WRITE(data_reg + i, *data++);
1990 else
1991 I915_WRITE(data_reg + i, 0);
1992 }
1993
1994 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1995 POSTING_READ(ctl_reg);
1996}
1997
1998static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1999{
2000 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2001 struct drm_i915_private *dev_priv = dev->dev_private;
2002 struct edp_vsc_psr psr_vsc;
2003
2004 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
2005 memset(&psr_vsc, 0, sizeof(psr_vsc));
2006 psr_vsc.sdp_header.HB0 = 0;
2007 psr_vsc.sdp_header.HB1 = 0x7;
2008 psr_vsc.sdp_header.HB2 = 0x2;
2009 psr_vsc.sdp_header.HB3 = 0x8;
2010 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
2011
2012 /* Avoid continuous PSR exit by masking memup and hpd */
2013 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
2014 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
2015}
2016
2017static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
2018{
2019 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2020 struct drm_device *dev = dig_port->base.base.dev;
2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022 uint32_t aux_clock_divider;
2023 int precharge = 0x3;
2024 int msg_size = 5; /* Header(4) + Message(1) */
2025 bool only_standby = false;
2026
2027 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
2028
2029 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2030 only_standby = true;
2031
2032 /* Enable PSR in sink */
2033 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
2034 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2035 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
2036 else
2037 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
2038 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
2039
2040 /* Setup AUX registers */
2041 I915_WRITE(EDP_PSR_AUX_DATA1(dev), EDP_PSR_DPCD_COMMAND);
2042 I915_WRITE(EDP_PSR_AUX_DATA2(dev), EDP_PSR_DPCD_NORMAL_OPERATION);
2043 I915_WRITE(EDP_PSR_AUX_CTL(dev),
2044 DP_AUX_CH_CTL_TIME_OUT_400us |
2045 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
2046 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
2047 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
2048}
2049
2050static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
2051{
2052 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2053 struct drm_device *dev = dig_port->base.base.dev;
2054 struct drm_i915_private *dev_priv = dev->dev_private;
2055 uint32_t max_sleep_time = 0x1f;
2056 uint32_t idle_frames = 1;
2057 uint32_t val = 0x0;
2058 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
2059 bool only_standby = false;
2060
2061 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
2062 only_standby = true;
2063
2064 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
2065 val |= EDP_PSR_LINK_STANDBY;
2066 val |= EDP_PSR_TP2_TP3_TIME_0us;
2067 val |= EDP_PSR_TP1_TIME_0us;
2068 val |= EDP_PSR_SKIP_AUX_EXIT;
2069 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
2070 } else
2071 val |= EDP_PSR_LINK_DISABLE;
2072
2073 I915_WRITE(EDP_PSR_CTL(dev), val |
2074 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
2075 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
2076 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
2077 EDP_PSR_ENABLE);
2078}
2079
2080static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
2081{
2082 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2083 struct drm_device *dev = dig_port->base.base.dev;
2084 struct drm_i915_private *dev_priv = dev->dev_private;
2085 struct drm_crtc *crtc = dig_port->base.base.crtc;
2086 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2087
2088 lockdep_assert_held(&dev_priv->psr.lock);
2089 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
2090 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
2091
2092 dev_priv->psr.source_ok = false;
2093
2094 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
2095 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
2096 return false;
2097 }
2098
2099 if (!i915.enable_psr) {
2100 DRM_DEBUG_KMS("PSR disable by flag\n");
2101 return false;
2102 }
2103
2104 /* Below limitations aren't valid for Broadwell */
2105 if (IS_BROADWELL(dev))
2106 goto out;
2107
2108 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
2109 S3D_ENABLE) {
2110 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
2111 return false;
2112 }
2113
2114 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
2115 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
2116 return false;
2117 }
2118
2119 out:
2120 dev_priv->psr.source_ok = true;
2121 return true;
2122}
2123
2124static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2125{
2126 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2127 struct drm_device *dev = intel_dig_port->base.base.dev;
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2129
2130 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2131 WARN_ON(dev_priv->psr.active);
2132 lockdep_assert_held(&dev_priv->psr.lock);
2133
2134 /* Enable PSR on the panel */
2135 intel_edp_psr_enable_sink(intel_dp);
2136
2137 /* Enable PSR on the host */
2138 intel_edp_psr_enable_source(intel_dp);
2139
2140 dev_priv->psr.active = true;
2141}
2142
2143void intel_edp_psr_enable(struct intel_dp *intel_dp)
2144{
2145 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2146 struct drm_i915_private *dev_priv = dev->dev_private;
2147
2148 if (!HAS_PSR(dev)) {
2149 DRM_DEBUG_KMS("PSR not supported on this platform\n");
2150 return;
2151 }
2152
2153 if (!is_edp_psr(intel_dp)) {
2154 DRM_DEBUG_KMS("PSR not supported by this panel\n");
2155 return;
2156 }
2157
2158 mutex_lock(&dev_priv->psr.lock);
2159 if (dev_priv->psr.enabled) {
2160 DRM_DEBUG_KMS("PSR already in use\n");
2161 mutex_unlock(&dev_priv->psr.lock);
2162 return;
2163 }
2164
2165 dev_priv->psr.busy_frontbuffer_bits = 0;
2166
2167 /* Setup PSR once */
2168 intel_edp_psr_setup(intel_dp);
2169
2170 if (intel_edp_psr_match_conditions(intel_dp))
2171 dev_priv->psr.enabled = intel_dp;
2172 mutex_unlock(&dev_priv->psr.lock);
2173}
2174
2175void intel_edp_psr_disable(struct intel_dp *intel_dp)
2176{
2177 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2178 struct drm_i915_private *dev_priv = dev->dev_private;
2179
2180 mutex_lock(&dev_priv->psr.lock);
2181 if (!dev_priv->psr.enabled) {
2182 mutex_unlock(&dev_priv->psr.lock);
2183 return;
2184 }
2185
2186 if (dev_priv->psr.active) {
2187 I915_WRITE(EDP_PSR_CTL(dev),
2188 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
2189
2190 /* Wait till PSR is idle */
2191 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
2192 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
2193 DRM_ERROR("Timed out waiting for PSR Idle State\n");
2194
2195 dev_priv->psr.active = false;
2196 } else {
2197 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
2198 }
2199
2200 dev_priv->psr.enabled = NULL;
2201 mutex_unlock(&dev_priv->psr.lock);
2202
2203 cancel_delayed_work_sync(&dev_priv->psr.work);
2204}
2205
2206static void intel_edp_psr_work(struct work_struct *work)
2207{
2208 struct drm_i915_private *dev_priv =
2209 container_of(work, typeof(*dev_priv), psr.work.work);
2210 struct intel_dp *intel_dp = dev_priv->psr.enabled;
2211
2212 mutex_lock(&dev_priv->psr.lock);
2213 intel_dp = dev_priv->psr.enabled;
2214
2215 if (!intel_dp)
2216 goto unlock;
2217
2218 /*
2219 * The delayed work can race with an invalidate hence we need to
2220 * recheck. Since psr_flush first clears this and then reschedules we
2221 * won't ever miss a flush when bailing out here.
2222 */
2223 if (dev_priv->psr.busy_frontbuffer_bits)
2224 goto unlock;
2225
2226 intel_edp_psr_do_enable(intel_dp);
2227unlock:
2228 mutex_unlock(&dev_priv->psr.lock);
2229}
2230
2231static void intel_edp_psr_do_exit(struct drm_device *dev)
2232{
2233 struct drm_i915_private *dev_priv = dev->dev_private;
2234
2235 if (dev_priv->psr.active) {
2236 u32 val = I915_READ(EDP_PSR_CTL(dev));
2237
2238 WARN_ON(!(val & EDP_PSR_ENABLE));
2239
2240 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
2241
2242 dev_priv->psr.active = false;
2243 }
2244
2245}
2246
2247void intel_edp_psr_invalidate(struct drm_device *dev,
2248 unsigned frontbuffer_bits)
2249{
2250 struct drm_i915_private *dev_priv = dev->dev_private;
2251 struct drm_crtc *crtc;
2252 enum pipe pipe;
2253
2254 mutex_lock(&dev_priv->psr.lock);
2255 if (!dev_priv->psr.enabled) {
2256 mutex_unlock(&dev_priv->psr.lock);
2257 return;
2258 }
2259
2260 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2261 pipe = to_intel_crtc(crtc)->pipe;
2262
2263 intel_edp_psr_do_exit(dev);
2264
2265 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
2266
2267 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
2268 mutex_unlock(&dev_priv->psr.lock);
2269}
2270
2271void intel_edp_psr_flush(struct drm_device *dev,
2272 unsigned frontbuffer_bits)
2273{
2274 struct drm_i915_private *dev_priv = dev->dev_private;
2275 struct drm_crtc *crtc;
2276 enum pipe pipe;
2277
2278 mutex_lock(&dev_priv->psr.lock);
2279 if (!dev_priv->psr.enabled) {
2280 mutex_unlock(&dev_priv->psr.lock);
2281 return;
2282 }
2283
2284 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
2285 pipe = to_intel_crtc(crtc)->pipe;
2286 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
2287
2288 /*
2289 * On Haswell sprite plane updates don't result in a psr invalidating
2290 * signal in the hardware. Which means we need to manually fake this in
2291 * software for all flushes, not just when we've seen a preceding
2292 * invalidation through frontbuffer rendering.
2293 */
2294 if (IS_HASWELL(dev) &&
2295 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
2296 intel_edp_psr_do_exit(dev);
2297
2298 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
2299 schedule_delayed_work(&dev_priv->psr.work,
2300 msecs_to_jiffies(100));
2301 mutex_unlock(&dev_priv->psr.lock);
2302}
2303
2304void intel_edp_psr_init(struct drm_device *dev)
2305{
2306 struct drm_i915_private *dev_priv = dev->dev_private;
2307
2308 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_edp_psr_work);
2309 mutex_init(&dev_priv->psr.lock);
2310}
2311
2312static void intel_disable_dp(struct intel_encoder *encoder) 2099static void intel_disable_dp(struct intel_encoder *encoder)
2313{ 2100{
2314 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2101 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2315 struct drm_device *dev = encoder->base.dev; 2102 struct drm_device *dev = encoder->base.dev;
2103 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2104
2105 if (crtc->config.has_audio)
2106 intel_audio_codec_disable(encoder);
2316 2107
2317 /* Make sure the panel is off before trying to change the mode. But also 2108 /* Make sure the panel is off before trying to change the mode. But also
2318 * ensure that we have vdd while we switch off the panel. */ 2109 * ensure that we have vdd while we switch off the panel. */
@@ -2467,14 +2258,23 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp)
2467 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2468 struct drm_i915_private *dev_priv = dev->dev_private; 2259 struct drm_i915_private *dev_priv = dev->dev_private;
2469 2260
2470 intel_dp->DP |= DP_PORT_EN;
2471
2472 /* enable with pattern 1 (as per spec) */ 2261 /* enable with pattern 1 (as per spec) */
2473 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, 2262 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2474 DP_TRAINING_PATTERN_1); 2263 DP_TRAINING_PATTERN_1);
2475 2264
2476 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 2265 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2477 POSTING_READ(intel_dp->output_reg); 2266 POSTING_READ(intel_dp->output_reg);
2267
2268 /*
2269 * Magic for VLV/CHV. We _must_ first set up the register
2270 * without actually enabling the port, and then do another
2271 * write to enable the port. Otherwise link training will
2272 * fail when the power sequencer is freshly used for this port.
2273 */
2274 intel_dp->DP |= DP_PORT_EN;
2275
2276 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2277 POSTING_READ(intel_dp->output_reg);
2478} 2278}
2479 2279
2480static void intel_enable_dp(struct intel_encoder *encoder) 2280static void intel_enable_dp(struct intel_encoder *encoder)
@@ -2482,19 +2282,38 @@ static void intel_enable_dp(struct intel_encoder *encoder)
2482 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2282 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2483 struct drm_device *dev = encoder->base.dev; 2283 struct drm_device *dev = encoder->base.dev;
2484 struct drm_i915_private *dev_priv = dev->dev_private; 2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2485 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 2286 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2486 2287
2487 if (WARN_ON(dp_reg & DP_PORT_EN)) 2288 if (WARN_ON(dp_reg & DP_PORT_EN))
2488 return; 2289 return;
2489 2290
2291 pps_lock(intel_dp);
2292
2293 if (IS_VALLEYVIEW(dev))
2294 vlv_init_panel_power_sequencer(intel_dp);
2295
2490 intel_dp_enable_port(intel_dp); 2296 intel_dp_enable_port(intel_dp);
2491 intel_edp_panel_vdd_on(intel_dp); 2297
2492 intel_edp_panel_on(intel_dp); 2298 edp_panel_vdd_on(intel_dp);
2493 intel_edp_panel_vdd_off(intel_dp, true); 2299 edp_panel_on(intel_dp);
2300 edp_panel_vdd_off(intel_dp, true);
2301
2302 pps_unlock(intel_dp);
2303
2304 if (IS_VALLEYVIEW(dev))
2305 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp));
2306
2494 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2307 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2495 intel_dp_start_link_train(intel_dp); 2308 intel_dp_start_link_train(intel_dp);
2496 intel_dp_complete_link_train(intel_dp); 2309 intel_dp_complete_link_train(intel_dp);
2497 intel_dp_stop_link_train(intel_dp); 2310 intel_dp_stop_link_train(intel_dp);
2311
2312 if (crtc->config.has_audio) {
2313 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2314 pipe_name(crtc->pipe));
2315 intel_audio_codec_enable(encoder);
2316 }
2498} 2317}
2499 2318
2500static void g4x_enable_dp(struct intel_encoder *encoder) 2319static void g4x_enable_dp(struct intel_encoder *encoder)
@@ -2526,6 +2345,32 @@ static void g4x_pre_enable_dp(struct intel_encoder *encoder)
2526 } 2345 }
2527} 2346}
2528 2347
2348static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2349{
2350 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2351 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2352 enum pipe pipe = intel_dp->pps_pipe;
2353 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2354
2355 edp_panel_vdd_off_sync(intel_dp);
2356
2357 /*
2358 * VLV seems to get confused when multiple power seqeuencers
2359 * have the same port selected (even if only one has power/vdd
2360 * enabled). The failure manifests as vlv_wait_port_ready() failing
2361 * CHV on the other hand doesn't seem to mind having the same port
2362 * selected in multiple power seqeuencers, but let's clear the
2363 * port select always when logically disconnecting a power sequencer
2364 * from a port.
2365 */
2366 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2367 pipe_name(pipe), port_name(intel_dig_port->port));
2368 I915_WRITE(pp_on_reg, 0);
2369 POSTING_READ(pp_on_reg);
2370
2371 intel_dp->pps_pipe = INVALID_PIPE;
2372}
2373
2529static void vlv_steal_power_sequencer(struct drm_device *dev, 2374static void vlv_steal_power_sequencer(struct drm_device *dev,
2530 enum pipe pipe) 2375 enum pipe pipe)
2531{ 2376{
@@ -2534,6 +2379,9 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
2534 2379
2535 lockdep_assert_held(&dev_priv->pps_mutex); 2380 lockdep_assert_held(&dev_priv->pps_mutex);
2536 2381
2382 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2383 return;
2384
2537 list_for_each_entry(encoder, &dev->mode_config.encoder_list, 2385 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2538 base.head) { 2386 base.head) {
2539 struct intel_dp *intel_dp; 2387 struct intel_dp *intel_dp;
@@ -2551,10 +2399,12 @@ static void vlv_steal_power_sequencer(struct drm_device *dev,
2551 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", 2399 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2552 pipe_name(pipe), port_name(port)); 2400 pipe_name(pipe), port_name(port));
2553 2401
2554 /* make sure vdd is off before we steal it */ 2402 WARN(encoder->connectors_active,
2555 edp_panel_vdd_off_sync(intel_dp); 2403 "stealing pipe %c power sequencer from active eDP port %c\n",
2404 pipe_name(pipe), port_name(port));
2556 2405
2557 intel_dp->pps_pipe = INVALID_PIPE; 2406 /* make sure vdd is off before we steal it */
2407 vlv_detach_power_sequencer(intel_dp);
2558 } 2408 }
2559} 2409}
2560 2410
@@ -2565,10 +2415,12 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2565 struct drm_device *dev = encoder->base.dev; 2415 struct drm_device *dev = encoder->base.dev;
2566 struct drm_i915_private *dev_priv = dev->dev_private; 2416 struct drm_i915_private *dev_priv = dev->dev_private;
2567 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 2417 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2568 struct edp_power_seq power_seq;
2569 2418
2570 lockdep_assert_held(&dev_priv->pps_mutex); 2419 lockdep_assert_held(&dev_priv->pps_mutex);
2571 2420
2421 if (!is_edp(intel_dp))
2422 return;
2423
2572 if (intel_dp->pps_pipe == crtc->pipe) 2424 if (intel_dp->pps_pipe == crtc->pipe)
2573 return; 2425 return;
2574 2426
@@ -2578,7 +2430,7 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2578 * we still have control of it. 2430 * we still have control of it.
2579 */ 2431 */
2580 if (intel_dp->pps_pipe != INVALID_PIPE) 2432 if (intel_dp->pps_pipe != INVALID_PIPE)
2581 edp_panel_vdd_off_sync(intel_dp); 2433 vlv_detach_power_sequencer(intel_dp);
2582 2434
2583 /* 2435 /*
2584 * We may be stealing the power 2436 * We may be stealing the power
@@ -2593,9 +2445,8 @@ static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2593 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port)); 2445 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2594 2446
2595 /* init power sequencer on this pipe and port */ 2447 /* init power sequencer on this pipe and port */
2596 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq); 2448 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2597 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, 2449 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
2598 &power_seq);
2599} 2450}
2600 2451
2601static void vlv_pre_enable_dp(struct intel_encoder *encoder) 2452static void vlv_pre_enable_dp(struct intel_encoder *encoder)
@@ -2624,15 +2475,7 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
2624 2475
2625 mutex_unlock(&dev_priv->dpio_lock); 2476 mutex_unlock(&dev_priv->dpio_lock);
2626 2477
2627 if (is_edp(intel_dp)) {
2628 pps_lock(intel_dp);
2629 vlv_init_panel_power_sequencer(intel_dp);
2630 pps_unlock(intel_dp);
2631 }
2632
2633 intel_enable_dp(encoder); 2478 intel_enable_dp(encoder);
2634
2635 vlv_wait_port_ready(dev_priv, dport);
2636} 2479}
2637 2480
2638static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder) 2481static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2680,6 +2523,15 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2680 2523
2681 mutex_lock(&dev_priv->dpio_lock); 2524 mutex_lock(&dev_priv->dpio_lock);
2682 2525
2526 /* allow hardware to manage TX FIFO reset source */
2527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2528 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2529 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2530
2531 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2532 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2533 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2534
2683 /* Deassert soft data lane reset*/ 2535 /* Deassert soft data lane reset*/
2684 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 2536 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
2685 val |= CHV_PCS_REQ_SOFTRESET_EN; 2537 val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -2715,15 +2567,7 @@ static void chv_pre_enable_dp(struct intel_encoder *encoder)
2715 2567
2716 mutex_unlock(&dev_priv->dpio_lock); 2568 mutex_unlock(&dev_priv->dpio_lock);
2717 2569
2718 if (is_edp(intel_dp)) {
2719 pps_lock(intel_dp);
2720 vlv_init_panel_power_sequencer(intel_dp);
2721 pps_unlock(intel_dp);
2722 }
2723
2724 intel_enable_dp(encoder); 2570 intel_enable_dp(encoder);
2725
2726 vlv_wait_port_ready(dev_priv, dport);
2727} 2571}
2728 2572
2729static void chv_dp_pre_pll_enable(struct intel_encoder *encoder) 2573static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
@@ -2843,7 +2687,9 @@ intel_dp_voltage_max(struct intel_dp *intel_dp)
2843 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2687 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2844 enum port port = dp_to_dig_port(intel_dp)->port; 2688 enum port port = dp_to_dig_port(intel_dp)->port;
2845 2689
2846 if (IS_VALLEYVIEW(dev)) 2690 if (INTEL_INFO(dev)->gen >= 9)
2691 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
2692 else if (IS_VALLEYVIEW(dev))
2847 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; 2693 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
2848 else if (IS_GEN7(dev) && port == PORT_A) 2694 else if (IS_GEN7(dev) && port == PORT_A)
2849 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; 2695 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
@@ -2859,7 +2705,18 @@ intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
2859 struct drm_device *dev = intel_dp_to_dev(intel_dp); 2705 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2860 enum port port = dp_to_dig_port(intel_dp)->port; 2706 enum port port = dp_to_dig_port(intel_dp)->port;
2861 2707
2862 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2708 if (INTEL_INFO(dev)->gen >= 9) {
2709 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2710 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2711 return DP_TRAIN_PRE_EMPH_LEVEL_3;
2712 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
2713 return DP_TRAIN_PRE_EMPH_LEVEL_2;
2714 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
2715 return DP_TRAIN_PRE_EMPH_LEVEL_1;
2716 default:
2717 return DP_TRAIN_PRE_EMPH_LEVEL_0;
2718 }
2719 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2863 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 2720 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
2864 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: 2721 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
2865 return DP_TRAIN_PRE_EMPH_LEVEL_3; 2722 return DP_TRAIN_PRE_EMPH_LEVEL_3;
@@ -3095,12 +2952,26 @@ static uint32_t intel_chv_signal_levels(struct intel_dp *intel_dp)
3095 /* Clear calc init */ 2952 /* Clear calc init */
3096 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 2953 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3097 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 2954 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2955 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2956 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3098 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 2957 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3099 2958
3100 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 2959 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3101 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 2960 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
2961 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
2962 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3102 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 2963 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3103 2964
2965 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
2966 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2967 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2968 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
2969
2970 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
2971 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
2972 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
2973 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
2974
3104 /* Program swing deemph */ 2975 /* Program swing deemph */
3105 for (i = 0; i < 4; i++) { 2976 for (i = 0; i < 4; i++) {
3106 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i)); 2977 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
@@ -3341,7 +3212,7 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3341 uint32_t signal_levels, mask; 3212 uint32_t signal_levels, mask;
3342 uint8_t train_set = intel_dp->train_set[0]; 3213 uint8_t train_set = intel_dp->train_set[0];
3343 3214
3344 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 3215 if (IS_HASWELL(dev) || IS_BROADWELL(dev) || INTEL_INFO(dev)->gen >= 9) {
3345 signal_levels = intel_hsw_signal_levels(train_set); 3216 signal_levels = intel_hsw_signal_levels(train_set);
3346 mask = DDI_BUF_EMP_MASK; 3217 mask = DDI_BUF_EMP_MASK;
3347 } else if (IS_CHERRYVIEW(dev)) { 3218 } else if (IS_CHERRYVIEW(dev)) {
@@ -3605,7 +3476,6 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
3605 3476
3606 /* Try 5 times, then try clock recovery if that fails */ 3477 /* Try 5 times, then try clock recovery if that fails */
3607 if (tries > 5) { 3478 if (tries > 5) {
3608 intel_dp_link_down(intel_dp);
3609 intel_dp_start_link_train(intel_dp); 3479 intel_dp_start_link_train(intel_dp);
3610 intel_dp_set_link_train(intel_dp, &DP, 3480 intel_dp_set_link_train(intel_dp, &DP,
3611 training_pattern | 3481 training_pattern |
@@ -3763,8 +3633,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
3763 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 3633 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
3764 return; 3634 return;
3765 3635
3766 intel_edp_panel_vdd_on(intel_dp);
3767
3768 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3) 3636 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
3769 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 3637 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
3770 buf[0], buf[1], buf[2]); 3638 buf[0], buf[1], buf[2]);
@@ -3772,8 +3640,6 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
3772 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3) 3640 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
3773 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 3641 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
3774 buf[0], buf[1], buf[2]); 3642 buf[0], buf[1], buf[2]);
3775
3776 intel_edp_panel_vdd_off(intel_dp, false);
3777} 3643}
3778 3644
3779static bool 3645static bool
@@ -3787,7 +3653,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3787 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12) 3653 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3788 return false; 3654 return false;
3789 3655
3790 intel_edp_panel_vdd_on(intel_dp);
3791 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) { 3656 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
3792 if (buf[0] & DP_MST_CAP) { 3657 if (buf[0] & DP_MST_CAP) {
3793 DRM_DEBUG_KMS("Sink is MST capable\n"); 3658 DRM_DEBUG_KMS("Sink is MST capable\n");
@@ -3797,7 +3662,6 @@ intel_dp_probe_mst(struct intel_dp *intel_dp)
3797 intel_dp->is_mst = false; 3662 intel_dp->is_mst = false;
3798 } 3663 }
3799 } 3664 }
3800 intel_edp_panel_vdd_off(intel_dp, false);
3801 3665
3802 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst); 3666 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
3803 return intel_dp->is_mst; 3667 return intel_dp->is_mst;
@@ -3809,26 +3673,48 @@ int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3809 struct drm_device *dev = intel_dig_port->base.base.dev; 3673 struct drm_device *dev = intel_dig_port->base.base.dev;
3810 struct intel_crtc *intel_crtc = 3674 struct intel_crtc *intel_crtc =
3811 to_intel_crtc(intel_dig_port->base.base.crtc); 3675 to_intel_crtc(intel_dig_port->base.base.crtc);
3812 u8 buf[1]; 3676 u8 buf;
3677 int test_crc_count;
3678 int attempts = 6;
3813 3679
3814 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, buf) < 0) 3680 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3815 return -EIO; 3681 return -EIO;
3816 3682
3817 if (!(buf[0] & DP_TEST_CRC_SUPPORTED)) 3683 if (!(buf & DP_TEST_CRC_SUPPORTED))
3818 return -ENOTTY; 3684 return -ENOTTY;
3819 3685
3686 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3687 return -EIO;
3688
3820 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 3689 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3821 DP_TEST_SINK_START) < 0) 3690 buf | DP_TEST_SINK_START) < 0)
3691 return -EIO;
3692
3693 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3822 return -EIO; 3694 return -EIO;
3695 test_crc_count = buf & DP_TEST_COUNT_MASK;
3823 3696
3824 /* Wait 2 vblanks to be sure we will have the correct CRC value */ 3697 do {
3825 intel_wait_for_vblank(dev, intel_crtc->pipe); 3698 if (drm_dp_dpcd_readb(&intel_dp->aux,
3826 intel_wait_for_vblank(dev, intel_crtc->pipe); 3699 DP_TEST_SINK_MISC, &buf) < 0)
3700 return -EIO;
3701 intel_wait_for_vblank(dev, intel_crtc->pipe);
3702 } while (--attempts && (buf & DP_TEST_COUNT_MASK) == test_crc_count);
3703
3704 if (attempts == 0) {
3705 DRM_DEBUG_KMS("Panel is unable to calculate CRC after 6 vblanks\n");
3706 return -ETIMEDOUT;
3707 }
3827 3708
3828 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) 3709 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0)
3829 return -EIO; 3710 return -EIO;
3830 3711
3831 drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK, 0); 3712 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3713 return -EIO;
3714 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3715 buf & ~DP_TEST_SINK_START) < 0)
3716 return -EIO;
3717
3832 return 0; 3718 return 0;
3833} 3719}
3834 3720
@@ -4456,9 +4342,52 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4456 pps_unlock(intel_dp); 4342 pps_unlock(intel_dp);
4457} 4343}
4458 4344
4345static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4346{
4347 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4348 struct drm_device *dev = intel_dig_port->base.base.dev;
4349 struct drm_i915_private *dev_priv = dev->dev_private;
4350 enum intel_display_power_domain power_domain;
4351
4352 lockdep_assert_held(&dev_priv->pps_mutex);
4353
4354 if (!edp_have_panel_vdd(intel_dp))
4355 return;
4356
4357 /*
4358 * The VDD bit needs a power domain reference, so if the bit is
4359 * already enabled when we boot or resume, grab this reference and
4360 * schedule a vdd off, so we don't hold on to the reference
4361 * indefinitely.
4362 */
4363 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4364 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
4365 intel_display_power_get(dev_priv, power_domain);
4366
4367 edp_panel_vdd_schedule_off(intel_dp);
4368}
4369
4459static void intel_dp_encoder_reset(struct drm_encoder *encoder) 4370static void intel_dp_encoder_reset(struct drm_encoder *encoder)
4460{ 4371{
4461 intel_edp_panel_vdd_sanitize(to_intel_encoder(encoder)); 4372 struct intel_dp *intel_dp;
4373
4374 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
4375 return;
4376
4377 intel_dp = enc_to_intel_dp(encoder);
4378
4379 pps_lock(intel_dp);
4380
4381 /*
4382 * Read out the current power sequencer assignment,
4383 * in case the BIOS did something with it.
4384 */
4385 if (IS_VALLEYVIEW(encoder->dev))
4386 vlv_initial_power_sequencer_setup(intel_dp);
4387
4388 intel_edp_panel_vdd_sanitize(intel_dp);
4389
4390 pps_unlock(intel_dp);
4462} 4391}
4463 4392
4464static const struct drm_connector_funcs intel_dp_connector_funcs = { 4393static const struct drm_connector_funcs intel_dp_connector_funcs = {
@@ -4645,16 +4574,20 @@ static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
4645 4574
4646static void 4575static void
4647intel_dp_init_panel_power_sequencer(struct drm_device *dev, 4576intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4648 struct intel_dp *intel_dp, 4577 struct intel_dp *intel_dp)
4649 struct edp_power_seq *out)
4650{ 4578{
4651 struct drm_i915_private *dev_priv = dev->dev_private; 4579 struct drm_i915_private *dev_priv = dev->dev_private;
4652 struct edp_power_seq cur, vbt, spec, final; 4580 struct edp_power_seq cur, vbt, spec,
4581 *final = &intel_dp->pps_delays;
4653 u32 pp_on, pp_off, pp_div, pp; 4582 u32 pp_on, pp_off, pp_div, pp;
4654 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg; 4583 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg;
4655 4584
4656 lockdep_assert_held(&dev_priv->pps_mutex); 4585 lockdep_assert_held(&dev_priv->pps_mutex);
4657 4586
4587 /* already initialized? */
4588 if (final->t11_t12 != 0)
4589 return;
4590
4658 if (HAS_PCH_SPLIT(dev)) { 4591 if (HAS_PCH_SPLIT(dev)) {
4659 pp_ctrl_reg = PCH_PP_CONTROL; 4592 pp_ctrl_reg = PCH_PP_CONTROL;
4660 pp_on_reg = PCH_PP_ON_DELAYS; 4593 pp_on_reg = PCH_PP_ON_DELAYS;
@@ -4716,7 +4649,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4716 4649
4717 /* Use the max of the register settings and vbt. If both are 4650 /* Use the max of the register settings and vbt. If both are
4718 * unset, fall back to the spec limits. */ 4651 * unset, fall back to the spec limits. */
4719#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \ 4652#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
4720 spec.field : \ 4653 spec.field : \
4721 max(cur.field, vbt.field)) 4654 max(cur.field, vbt.field))
4722 assign_final(t1_t3); 4655 assign_final(t1_t3);
@@ -4726,7 +4659,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4726 assign_final(t11_t12); 4659 assign_final(t11_t12);
4727#undef assign_final 4660#undef assign_final
4728 4661
4729#define get_delay(field) (DIV_ROUND_UP(final.field, 10)) 4662#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
4730 intel_dp->panel_power_up_delay = get_delay(t1_t3); 4663 intel_dp->panel_power_up_delay = get_delay(t1_t3);
4731 intel_dp->backlight_on_delay = get_delay(t8); 4664 intel_dp->backlight_on_delay = get_delay(t8);
4732 intel_dp->backlight_off_delay = get_delay(t9); 4665 intel_dp->backlight_off_delay = get_delay(t9);
@@ -4740,21 +4673,18 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev,
4740 4673
4741 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 4674 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
4742 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 4675 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
4743
4744 if (out)
4745 *out = final;
4746} 4676}
4747 4677
4748static void 4678static void
4749intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev, 4679intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
4750 struct intel_dp *intel_dp, 4680 struct intel_dp *intel_dp)
4751 struct edp_power_seq *seq)
4752{ 4681{
4753 struct drm_i915_private *dev_priv = dev->dev_private; 4682 struct drm_i915_private *dev_priv = dev->dev_private;
4754 u32 pp_on, pp_off, pp_div, port_sel = 0; 4683 u32 pp_on, pp_off, pp_div, port_sel = 0;
4755 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev); 4684 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
4756 int pp_on_reg, pp_off_reg, pp_div_reg; 4685 int pp_on_reg, pp_off_reg, pp_div_reg;
4757 enum port port = dp_to_dig_port(intel_dp)->port; 4686 enum port port = dp_to_dig_port(intel_dp)->port;
4687 const struct edp_power_seq *seq = &intel_dp->pps_delays;
4758 4688
4759 lockdep_assert_held(&dev_priv->pps_mutex); 4689 lockdep_assert_held(&dev_priv->pps_mutex);
4760 4690
@@ -4837,7 +4767,7 @@ void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
4837 * hard to tell without seeing the user of this function of this code. 4767 * hard to tell without seeing the user of this function of this code.
4838 * Check locking and ordering once that lands. 4768 * Check locking and ordering once that lands.
4839 */ 4769 */
4840 if (INTEL_INFO(dev)->gen < 8 && intel_edp_is_psr_enabled(dev)) { 4770 if (INTEL_INFO(dev)->gen < 8 && intel_psr_is_enabled(dev)) {
4841 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n"); 4771 DRM_DEBUG_KMS("DRRS is disabled as PSR is enabled\n");
4842 return; 4772 return;
4843 } 4773 }
@@ -4940,40 +4870,8 @@ intel_dp_drrs_init(struct intel_digital_port *intel_dig_port,
4940 return downclock_mode; 4870 return downclock_mode;
4941} 4871}
4942 4872
4943void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder)
4944{
4945 struct drm_device *dev = intel_encoder->base.dev;
4946 struct drm_i915_private *dev_priv = dev->dev_private;
4947 struct intel_dp *intel_dp;
4948 enum intel_display_power_domain power_domain;
4949
4950 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4951 return;
4952
4953 intel_dp = enc_to_intel_dp(&intel_encoder->base);
4954
4955 pps_lock(intel_dp);
4956
4957 if (!edp_have_panel_vdd(intel_dp))
4958 goto out;
4959 /*
4960 * The VDD bit needs a power domain reference, so if the bit is
4961 * already enabled when we boot or resume, grab this reference and
4962 * schedule a vdd off, so we don't hold on to the reference
4963 * indefinitely.
4964 */
4965 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4966 power_domain = intel_display_port_power_domain(intel_encoder);
4967 intel_display_power_get(dev_priv, power_domain);
4968
4969 edp_panel_vdd_schedule_off(intel_dp);
4970 out:
4971 pps_unlock(intel_dp);
4972}
4973
4974static bool intel_edp_init_connector(struct intel_dp *intel_dp, 4873static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4975 struct intel_connector *intel_connector, 4874 struct intel_connector *intel_connector)
4976 struct edp_power_seq *power_seq)
4977{ 4875{
4978 struct drm_connector *connector = &intel_connector->base; 4876 struct drm_connector *connector = &intel_connector->base;
4979 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 4877 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -4985,18 +4883,19 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4985 bool has_dpcd; 4883 bool has_dpcd;
4986 struct drm_display_mode *scan; 4884 struct drm_display_mode *scan;
4987 struct edid *edid; 4885 struct edid *edid;
4886 enum pipe pipe = INVALID_PIPE;
4988 4887
4989 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED; 4888 intel_dp->drrs_state.type = DRRS_NOT_SUPPORTED;
4990 4889
4991 if (!is_edp(intel_dp)) 4890 if (!is_edp(intel_dp))
4992 return true; 4891 return true;
4993 4892
4994 intel_edp_panel_vdd_sanitize(intel_encoder); 4893 pps_lock(intel_dp);
4894 intel_edp_panel_vdd_sanitize(intel_dp);
4895 pps_unlock(intel_dp);
4995 4896
4996 /* Cache DPCD and EDID for edp. */ 4897 /* Cache DPCD and EDID for edp. */
4997 intel_edp_panel_vdd_on(intel_dp);
4998 has_dpcd = intel_dp_get_dpcd(intel_dp); 4898 has_dpcd = intel_dp_get_dpcd(intel_dp);
4999 intel_edp_panel_vdd_off(intel_dp, false);
5000 4899
5001 if (has_dpcd) { 4900 if (has_dpcd) {
5002 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 4901 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
@@ -5011,7 +4910,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5011 4910
5012 /* We now know it's not a ghost, init power sequence regs. */ 4911 /* We now know it's not a ghost, init power sequence regs. */
5013 pps_lock(intel_dp); 4912 pps_lock(intel_dp);
5014 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, power_seq); 4913 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
5015 pps_unlock(intel_dp); 4914 pps_unlock(intel_dp);
5016 4915
5017 mutex_lock(&dev->mode_config.mutex); 4916 mutex_lock(&dev->mode_config.mutex);
@@ -5053,11 +4952,30 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5053 if (IS_VALLEYVIEW(dev)) { 4952 if (IS_VALLEYVIEW(dev)) {
5054 intel_dp->edp_notifier.notifier_call = edp_notify_handler; 4953 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5055 register_reboot_notifier(&intel_dp->edp_notifier); 4954 register_reboot_notifier(&intel_dp->edp_notifier);
4955
4956 /*
4957 * Figure out the current pipe for the initial backlight setup.
4958 * If the current pipe isn't valid, try the PPS pipe, and if that
4959 * fails just assume pipe A.
4960 */
4961 if (IS_CHERRYVIEW(dev))
4962 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
4963 else
4964 pipe = PORT_TO_PIPE(intel_dp->DP);
4965
4966 if (pipe != PIPE_A && pipe != PIPE_B)
4967 pipe = intel_dp->pps_pipe;
4968
4969 if (pipe != PIPE_A && pipe != PIPE_B)
4970 pipe = PIPE_A;
4971
4972 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
4973 pipe_name(pipe));
5056 } 4974 }
5057 4975
5058 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4976 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5059 intel_connector->panel.backlight_power = intel_edp_backlight_power; 4977 intel_connector->panel.backlight_power = intel_edp_backlight_power;
5060 intel_panel_setup_backlight(connector); 4978 intel_panel_setup_backlight(connector, pipe);
5061 4979
5062 return true; 4980 return true;
5063} 4981}
@@ -5072,13 +4990,14 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5072 struct drm_device *dev = intel_encoder->base.dev; 4990 struct drm_device *dev = intel_encoder->base.dev;
5073 struct drm_i915_private *dev_priv = dev->dev_private; 4991 struct drm_i915_private *dev_priv = dev->dev_private;
5074 enum port port = intel_dig_port->port; 4992 enum port port = intel_dig_port->port;
5075 struct edp_power_seq power_seq = { 0 };
5076 int type; 4993 int type;
5077 4994
5078 intel_dp->pps_pipe = INVALID_PIPE; 4995 intel_dp->pps_pipe = INVALID_PIPE;
5079 4996
5080 /* intel_dp vfuncs */ 4997 /* intel_dp vfuncs */
5081 if (IS_VALLEYVIEW(dev)) 4998 if (INTEL_INFO(dev)->gen >= 9)
4999 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5000 else if (IS_VALLEYVIEW(dev))
5082 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider; 5001 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
5083 else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 5002 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
5084 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider; 5003 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -5087,7 +5006,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5087 else 5006 else
5088 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider; 5007 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
5089 5008
5090 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl; 5009 if (INTEL_INFO(dev)->gen >= 9)
5010 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5011 else
5012 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
5091 5013
5092 /* Preserve the current hw state. */ 5014 /* Preserve the current hw state. */
5093 intel_dp->DP = I915_READ(intel_dp->output_reg); 5015 intel_dp->DP = I915_READ(intel_dp->output_reg);
@@ -5106,6 +5028,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5106 if (type == DRM_MODE_CONNECTOR_eDP) 5028 if (type == DRM_MODE_CONNECTOR_eDP)
5107 intel_encoder->type = INTEL_OUTPUT_EDP; 5029 intel_encoder->type = INTEL_OUTPUT_EDP;
5108 5030
5031 /* eDP only on port B and/or C on vlv/chv */
5032 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
5033 port != PORT_B && port != PORT_C))
5034 return false;
5035
5109 DRM_DEBUG_KMS("Adding %s connector on port %c\n", 5036 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
5110 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", 5037 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
5111 port_name(port)); 5038 port_name(port));
@@ -5148,13 +5075,11 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5148 5075
5149 if (is_edp(intel_dp)) { 5076 if (is_edp(intel_dp)) {
5150 pps_lock(intel_dp); 5077 pps_lock(intel_dp);
5151 if (IS_VALLEYVIEW(dev)) { 5078 intel_dp_init_panel_power_timestamps(intel_dp);
5079 if (IS_VALLEYVIEW(dev))
5152 vlv_initial_power_sequencer_setup(intel_dp); 5080 vlv_initial_power_sequencer_setup(intel_dp);
5153 } else { 5081 else
5154 intel_dp_init_panel_power_timestamps(intel_dp); 5082 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5155 intel_dp_init_panel_power_sequencer(dev, intel_dp,
5156 &power_seq);
5157 }
5158 pps_unlock(intel_dp); 5083 pps_unlock(intel_dp);
5159 } 5084 }
5160 5085
@@ -5168,7 +5093,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5168 } 5093 }
5169 } 5094 }
5170 5095
5171 if (!intel_edp_init_connector(intel_dp, intel_connector, &power_seq)) { 5096 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
5172 drm_dp_aux_unregister(&intel_dp->aux); 5097 drm_dp_aux_unregister(&intel_dp->aux);
5173 if (is_edp(intel_dp)) { 5098 if (is_edp(intel_dp)) {
5174 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 5099 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index d9a7a7865f66..7f8c6a66680a 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -278,20 +278,12 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
278} 278}
279 279
280static enum drm_connector_status 280static enum drm_connector_status
281intel_mst_port_dp_detect(struct drm_connector *connector) 281intel_dp_mst_detect(struct drm_connector *connector, bool force)
282{ 282{
283 struct intel_connector *intel_connector = to_intel_connector(connector); 283 struct intel_connector *intel_connector = to_intel_connector(connector);
284 struct intel_dp *intel_dp = intel_connector->mst_port; 284 struct intel_dp *intel_dp = intel_connector->mst_port;
285 285
286 return drm_dp_mst_detect_port(&intel_dp->mst_mgr, intel_connector->port); 286 return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, intel_connector->port);
287}
288
289static enum drm_connector_status
290intel_dp_mst_detect(struct drm_connector *connector, bool force)
291{
292 enum drm_connector_status status;
293 status = intel_mst_port_dp_detect(connector);
294 return status;
295} 287}
296 288
297static int 289static int
@@ -393,7 +385,7 @@ static void intel_connector_remove_from_fbdev(struct intel_connector *connector)
393#endif 385#endif
394} 386}
395 387
396static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *pathprop) 388static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *pathprop)
397{ 389{
398 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr); 390 struct intel_dp *intel_dp = container_of(mgr, struct intel_dp, mst_mgr);
399 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); 391 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
@@ -422,6 +414,8 @@ static struct drm_connector *intel_dp_add_mst_connector(struct drm_dp_mst_topolo
422 intel_dp_add_properties(intel_dp, connector); 414 intel_dp_add_properties(intel_dp, connector);
423 415
424 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0); 416 drm_object_attach_property(&connector->base, dev->mode_config.path_property, 0);
417 drm_object_attach_property(&connector->base, dev->mode_config.tile_property, 0);
418
425 drm_mode_connector_set_path_property(connector, pathprop); 419 drm_mode_connector_set_path_property(connector, pathprop);
426 drm_reinit_primary_mode_group(dev); 420 drm_reinit_primary_mode_group(dev);
427 mutex_lock(&dev->mode_config.mutex); 421 mutex_lock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ba715229a540..25fdbb16d4e0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -34,6 +34,7 @@
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_fb_helper.h> 35#include <drm/drm_fb_helper.h>
36#include <drm/drm_dp_mst_helper.h> 36#include <drm/drm_dp_mst_helper.h>
37#include <drm/drm_rect.h>
37 38
38#define DIV_ROUND_CLOSEST_ULL(ll, d) \ 39#define DIV_ROUND_CLOSEST_ULL(ll, d) \
39({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; }) 40({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
@@ -93,18 +94,20 @@
93 94
94/* these are outputs from the chip - integrated only 95/* these are outputs from the chip - integrated only
95 external chips are via DVO or SDVO output */ 96 external chips are via DVO or SDVO output */
96#define INTEL_OUTPUT_UNUSED 0 97enum intel_output_type {
97#define INTEL_OUTPUT_ANALOG 1 98 INTEL_OUTPUT_UNUSED = 0,
98#define INTEL_OUTPUT_DVO 2 99 INTEL_OUTPUT_ANALOG = 1,
99#define INTEL_OUTPUT_SDVO 3 100 INTEL_OUTPUT_DVO = 2,
100#define INTEL_OUTPUT_LVDS 4 101 INTEL_OUTPUT_SDVO = 3,
101#define INTEL_OUTPUT_TVOUT 5 102 INTEL_OUTPUT_LVDS = 4,
102#define INTEL_OUTPUT_HDMI 6 103 INTEL_OUTPUT_TVOUT = 5,
103#define INTEL_OUTPUT_DISPLAYPORT 7 104 INTEL_OUTPUT_HDMI = 6,
104#define INTEL_OUTPUT_EDP 8 105 INTEL_OUTPUT_DISPLAYPORT = 7,
105#define INTEL_OUTPUT_DSI 9 106 INTEL_OUTPUT_EDP = 8,
106#define INTEL_OUTPUT_UNKNOWN 10 107 INTEL_OUTPUT_DSI = 9,
107#define INTEL_OUTPUT_DP_MST 11 108 INTEL_OUTPUT_UNKNOWN = 10,
109 INTEL_OUTPUT_DP_MST = 11,
110};
108 111
109#define INTEL_DVO_CHIP_NONE 0 112#define INTEL_DVO_CHIP_NONE 0
110#define INTEL_DVO_CHIP_LVDS 1 113#define INTEL_DVO_CHIP_LVDS 1
@@ -135,7 +138,7 @@ struct intel_encoder {
135 */ 138 */
136 struct intel_crtc *new_crtc; 139 struct intel_crtc *new_crtc;
137 140
138 int type; 141 enum intel_output_type type;
139 unsigned int cloneable; 142 unsigned int cloneable;
140 bool connectors_active; 143 bool connectors_active;
141 void (*hot_plug)(struct intel_encoder *); 144 void (*hot_plug)(struct intel_encoder *);
@@ -240,6 +243,17 @@ typedef struct dpll {
240 int p; 243 int p;
241} intel_clock_t; 244} intel_clock_t;
242 245
246struct intel_plane_state {
247 struct drm_crtc *crtc;
248 struct drm_framebuffer *fb;
249 struct drm_rect src;
250 struct drm_rect dst;
251 struct drm_rect clip;
252 struct drm_rect orig_src;
253 struct drm_rect orig_dst;
254 bool visible;
255};
256
243struct intel_plane_config { 257struct intel_plane_config {
244 bool tiled; 258 bool tiled;
245 int size; 259 int size;
@@ -278,6 +292,9 @@ struct intel_crtc_config {
278 * between pch encoders and cpu encoders. */ 292 * between pch encoders and cpu encoders. */
279 bool has_pch_encoder; 293 bool has_pch_encoder;
280 294
295 /* Are we sending infoframes on the attached port */
296 bool has_infoframe;
297
281 /* CPU Transcoder for the pipe. Currently this can only differ from the 298 /* CPU Transcoder for the pipe. Currently this can only differ from the
282 * pipe on Haswell (where we have a special eDP transcoder). */ 299 * pipe on Haswell (where we have a special eDP transcoder). */
283 enum transcoder cpu_transcoder; 300 enum transcoder cpu_transcoder;
@@ -326,7 +343,10 @@ struct intel_crtc_config {
326 /* Selected dpll when shared or DPLL_ID_PRIVATE. */ 343 /* Selected dpll when shared or DPLL_ID_PRIVATE. */
327 enum intel_dpll_id shared_dpll; 344 enum intel_dpll_id shared_dpll;
328 345
329 /* PORT_CLK_SEL for DDI ports. */ 346 /*
347 * - PORT_CLK_SEL for DDI ports on HSW/BDW.
348 * - enum skl_dpll on SKL
349 */
330 uint32_t ddi_pll_sel; 350 uint32_t ddi_pll_sel;
331 351
332 /* Actual register state of the dpll, for shared dpll cross-checking. */ 352 /* Actual register state of the dpll, for shared dpll cross-checking. */
@@ -387,7 +407,14 @@ struct intel_pipe_wm {
387 407
388struct intel_mmio_flip { 408struct intel_mmio_flip {
389 u32 seqno; 409 u32 seqno;
390 u32 ring_id; 410 struct intel_engine_cs *ring;
411 struct work_struct work;
412};
413
414struct skl_pipe_wm {
415 struct skl_wm_level wm[8];
416 struct skl_wm_level trans_wm;
417 uint32_t linetime;
391}; 418};
392 419
393struct intel_crtc { 420struct intel_crtc {
@@ -437,6 +464,8 @@ struct intel_crtc {
437 struct { 464 struct {
438 /* watermarks currently being used */ 465 /* watermarks currently being used */
439 struct intel_pipe_wm active; 466 struct intel_pipe_wm active;
467 /* SKL wm values currently in use */
468 struct skl_pipe_wm skl_active;
440 } wm; 469 } wm;
441 470
442 int scanline_offset; 471 int scanline_offset;
@@ -529,6 +558,7 @@ struct intel_hdmi {
529 void (*set_infoframes)(struct drm_encoder *encoder, 558 void (*set_infoframes)(struct drm_encoder *encoder,
530 bool enable, 559 bool enable,
531 struct drm_display_mode *adjusted_mode); 560 struct drm_display_mode *adjusted_mode);
561 bool (*infoframe_enabled)(struct drm_encoder *encoder);
532}; 562};
533 563
534struct intel_dp_mst_encoder; 564struct intel_dp_mst_encoder;
@@ -578,6 +608,7 @@ struct intel_dp {
578 * this port. Only relevant on VLV/CHV. 608 * this port. Only relevant on VLV/CHV.
579 */ 609 */
580 enum pipe pps_pipe; 610 enum pipe pps_pipe;
611 struct edp_power_seq pps_delays;
581 612
582 bool use_tps3; 613 bool use_tps3;
583 bool can_mst; /* this port supports mst */ 614 bool can_mst; /* this port supports mst */
@@ -734,32 +765,47 @@ hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
734 return container_of(intel_hdmi, struct intel_digital_port, hdmi); 765 return container_of(intel_hdmi, struct intel_digital_port, hdmi);
735} 766}
736 767
768/*
769 * Returns the number of planes for this pipe, ie the number of sprites + 1
770 * (primary plane). This doesn't count the cursor plane then.
771 */
772static inline unsigned int intel_num_planes(struct intel_crtc *crtc)
773{
774 return INTEL_INFO(crtc->base.dev)->num_sprites[crtc->pipe] + 1;
775}
737 776
738/* i915_irq.c */ 777/* intel_fifo_underrun.c */
739bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, 778bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
740 enum pipe pipe, bool enable); 779 enum pipe pipe, bool enable);
741bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, 780bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
742 enum transcoder pch_transcoder, 781 enum transcoder pch_transcoder,
743 bool enable); 782 bool enable);
783void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
784 enum pipe pipe);
785void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
786 enum transcoder pch_transcoder);
787void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv);
788
789/* i915_irq.c */
744void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 790void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
745void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask); 791void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask);
746void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 792void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
747void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 793void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
748void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 794void gen6_reset_rps_interrupts(struct drm_device *dev);
749void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); 795void gen6_enable_rps_interrupts(struct drm_device *dev);
750void intel_runtime_pm_disable_interrupts(struct drm_device *dev); 796void gen6_disable_rps_interrupts(struct drm_device *dev);
751void intel_runtime_pm_restore_interrupts(struct drm_device *dev); 797void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
798void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
752static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) 799static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
753{ 800{
754 /* 801 /*
755 * We only use drm_irq_uninstall() at unload and VT switch, so 802 * We only use drm_irq_uninstall() at unload and VT switch, so
756 * this is the only thing we need to check. 803 * this is the only thing we need to check.
757 */ 804 */
758 return !dev_priv->pm._irqs_disabled; 805 return dev_priv->pm.irqs_enabled;
759} 806}
760 807
761int intel_get_crtc_scanline(struct intel_crtc *crtc); 808int intel_get_crtc_scanline(struct intel_crtc *crtc);
762void i9xx_check_fifo_underruns(struct drm_device *dev);
763void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); 809void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv);
764 810
765/* intel_crt.c */ 811/* intel_crt.c */
@@ -792,11 +838,7 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
792 struct intel_crtc_config *pipe_config); 838 struct intel_crtc_config *pipe_config);
793void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); 839void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
794 840
795/* intel_display.c */ 841/* intel_frontbuffer.c */
796const char *intel_output_name(int output);
797bool intel_has_pending_fb_unpin(struct drm_device *dev);
798int intel_pch_rawclk(struct drm_device *dev);
799void intel_mark_busy(struct drm_device *dev);
800void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, 842void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
801 struct intel_engine_cs *ring); 843 struct intel_engine_cs *ring);
802void intel_frontbuffer_flip_prepare(struct drm_device *dev, 844void intel_frontbuffer_flip_prepare(struct drm_device *dev,
@@ -806,7 +848,7 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
806void intel_frontbuffer_flush(struct drm_device *dev, 848void intel_frontbuffer_flush(struct drm_device *dev,
807 unsigned frontbuffer_bits); 849 unsigned frontbuffer_bits);
808/** 850/**
809 * intel_frontbuffer_flip - prepare frontbuffer flip 851 * intel_frontbuffer_flip - synchronous frontbuffer flip
810 * @dev: DRM device 852 * @dev: DRM device
811 * @frontbuffer_bits: frontbuffer plane tracking bits 853 * @frontbuffer_bits: frontbuffer plane tracking bits
812 * 854 *
@@ -824,6 +866,18 @@ void intel_frontbuffer_flip(struct drm_device *dev,
824} 866}
825 867
826void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire); 868void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire);
869
870
871/* intel_audio.c */
872void intel_init_audio(struct drm_device *dev);
873void intel_audio_codec_enable(struct intel_encoder *encoder);
874void intel_audio_codec_disable(struct intel_encoder *encoder);
875
876/* intel_display.c */
877const char *intel_output_name(int output);
878bool intel_has_pending_fb_unpin(struct drm_device *dev);
879int intel_pch_rawclk(struct drm_device *dev);
880void intel_mark_busy(struct drm_device *dev);
827void intel_mark_idle(struct drm_device *dev); 881void intel_mark_idle(struct drm_device *dev);
828void intel_crtc_restore_mode(struct drm_crtc *crtc); 882void intel_crtc_restore_mode(struct drm_crtc *crtc);
829void intel_crtc_control(struct drm_crtc *crtc, bool enable); 883void intel_crtc_control(struct drm_crtc *crtc, bool enable);
@@ -844,7 +898,12 @@ int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
844 struct drm_file *file_priv); 898 struct drm_file *file_priv);
845enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 899enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
846 enum pipe pipe); 900 enum pipe pipe);
847void intel_wait_for_vblank(struct drm_device *dev, int pipe); 901bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type);
902static inline void
903intel_wait_for_vblank(struct drm_device *dev, int pipe)
904{
905 drm_wait_one_vblank(dev, pipe);
906}
848int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp); 907int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp);
849void vlv_wait_port_ready(struct drm_i915_private *dev_priv, 908void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
850 struct intel_digital_port *dport); 909 struct intel_digital_port *dport);
@@ -854,8 +913,8 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
854 struct drm_modeset_acquire_ctx *ctx); 913 struct drm_modeset_acquire_ctx *ctx);
855void intel_release_load_detect_pipe(struct drm_connector *connector, 914void intel_release_load_detect_pipe(struct drm_connector *connector,
856 struct intel_load_detect_pipe *old); 915 struct intel_load_detect_pipe *old);
857int intel_pin_and_fence_fb_obj(struct drm_device *dev, 916int intel_pin_and_fence_fb_obj(struct drm_plane *plane,
858 struct drm_i915_gem_object *obj, 917 struct drm_framebuffer *fb,
859 struct intel_engine_cs *pipelined); 918 struct intel_engine_cs *pipelined);
860void intel_unpin_fb_obj(struct drm_i915_gem_object *obj); 919void intel_unpin_fb_obj(struct drm_i915_gem_object *obj);
861struct drm_framebuffer * 920struct drm_framebuffer *
@@ -877,7 +936,13 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
877struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc); 936struct intel_shared_dpll *intel_get_shared_dpll(struct intel_crtc *crtc);
878void intel_put_shared_dpll(struct intel_crtc *crtc); 937void intel_put_shared_dpll(struct intel_crtc *crtc);
879 938
939void vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
940 const struct dpll *dpll);
941void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe);
942
880/* modesetting asserts */ 943/* modesetting asserts */
944void assert_panel_unlocked(struct drm_i915_private *dev_priv,
945 enum pipe pipe);
881void assert_pll(struct drm_i915_private *dev_priv, 946void assert_pll(struct drm_i915_private *dev_priv,
882 enum pipe pipe, bool state); 947 enum pipe pipe, bool state);
883#define assert_pll_enabled(d, p) assert_pll(d, p, true) 948#define assert_pll_enabled(d, p) assert_pll(d, p, true)
@@ -889,13 +954,12 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
889void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state); 954void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, bool state);
890#define assert_pipe_enabled(d, p) assert_pipe(d, p, true) 955#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
891#define assert_pipe_disabled(d, p) assert_pipe(d, p, false) 956#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
892void intel_write_eld(struct drm_encoder *encoder,
893 struct drm_display_mode *mode);
894unsigned long intel_gen4_compute_page_offset(int *x, int *y, 957unsigned long intel_gen4_compute_page_offset(int *x, int *y,
895 unsigned int tiling_mode, 958 unsigned int tiling_mode,
896 unsigned int bpp, 959 unsigned int bpp,
897 unsigned int pitch); 960 unsigned int pitch);
898void intel_display_handle_reset(struct drm_device *dev); 961void intel_prepare_reset(struct drm_device *dev);
962void intel_finish_reset(struct drm_device *dev);
899void hsw_enable_pc8(struct drm_i915_private *dev_priv); 963void hsw_enable_pc8(struct drm_i915_private *dev_priv);
900void hsw_disable_pc8(struct drm_i915_private *dev_priv); 964void hsw_disable_pc8(struct drm_i915_private *dev_priv);
901void intel_dp_get_m_n(struct intel_crtc *crtc, 965void intel_dp_get_m_n(struct intel_crtc *crtc,
@@ -908,7 +972,6 @@ ironlake_check_encoder_dotclock(const struct intel_crtc_config *pipe_config,
908bool intel_crtc_active(struct drm_crtc *crtc); 972bool intel_crtc_active(struct drm_crtc *crtc);
909void hsw_enable_ips(struct intel_crtc *crtc); 973void hsw_enable_ips(struct intel_crtc *crtc);
910void hsw_disable_ips(struct intel_crtc *crtc); 974void hsw_disable_ips(struct intel_crtc *crtc);
911void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
912enum intel_display_power_domain 975enum intel_display_power_domain
913intel_display_port_power_domain(struct intel_encoder *intel_encoder); 976intel_display_port_power_domain(struct intel_encoder *intel_encoder);
914void intel_mode_from_pipe_config(struct drm_display_mode *mode, 977void intel_mode_from_pipe_config(struct drm_display_mode *mode,
@@ -936,25 +999,18 @@ bool intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port,
936void intel_edp_backlight_on(struct intel_dp *intel_dp); 999void intel_edp_backlight_on(struct intel_dp *intel_dp);
937void intel_edp_backlight_off(struct intel_dp *intel_dp); 1000void intel_edp_backlight_off(struct intel_dp *intel_dp);
938void intel_edp_panel_vdd_on(struct intel_dp *intel_dp); 1001void intel_edp_panel_vdd_on(struct intel_dp *intel_dp);
939void intel_edp_panel_vdd_sanitize(struct intel_encoder *intel_encoder);
940void intel_edp_panel_on(struct intel_dp *intel_dp); 1002void intel_edp_panel_on(struct intel_dp *intel_dp);
941void intel_edp_panel_off(struct intel_dp *intel_dp); 1003void intel_edp_panel_off(struct intel_dp *intel_dp);
942void intel_edp_psr_enable(struct intel_dp *intel_dp);
943void intel_edp_psr_disable(struct intel_dp *intel_dp);
944void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate); 1004void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate);
945void intel_edp_psr_invalidate(struct drm_device *dev,
946 unsigned frontbuffer_bits);
947void intel_edp_psr_flush(struct drm_device *dev,
948 unsigned frontbuffer_bits);
949void intel_edp_psr_init(struct drm_device *dev);
950
951int intel_dp_handle_hpd_irq(struct intel_digital_port *digport, bool long_hpd);
952void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector); 1005void intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector);
953void intel_dp_mst_suspend(struct drm_device *dev); 1006void intel_dp_mst_suspend(struct drm_device *dev);
954void intel_dp_mst_resume(struct drm_device *dev); 1007void intel_dp_mst_resume(struct drm_device *dev);
955int intel_dp_max_link_bw(struct intel_dp *intel_dp); 1008int intel_dp_max_link_bw(struct intel_dp *intel_dp);
956void intel_dp_hot_plug(struct intel_encoder *intel_encoder); 1009void intel_dp_hot_plug(struct intel_encoder *intel_encoder);
957void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv); 1010void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv);
1011uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
1012void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes);
1013
958/* intel_dp_mst.c */ 1014/* intel_dp_mst.c */
959int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id); 1015int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_id);
960void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port); 1016void intel_dp_mst_encoder_cleanup(struct intel_digital_port *intel_dig_port);
@@ -1044,7 +1100,7 @@ void intel_gmch_panel_fitting(struct intel_crtc *crtc,
1044 int fitting_mode); 1100 int fitting_mode);
1045void intel_panel_set_backlight_acpi(struct intel_connector *connector, 1101void intel_panel_set_backlight_acpi(struct intel_connector *connector,
1046 u32 level, u32 max); 1102 u32 level, u32 max);
1047int intel_panel_setup_backlight(struct drm_connector *connector); 1103int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe);
1048void intel_panel_enable_backlight(struct intel_connector *connector); 1104void intel_panel_enable_backlight(struct intel_connector *connector);
1049void intel_panel_disable_backlight(struct intel_connector *connector); 1105void intel_panel_disable_backlight(struct intel_connector *connector);
1050void intel_panel_destroy_backlight(struct drm_connector *connector); 1106void intel_panel_destroy_backlight(struct drm_connector *connector);
@@ -1054,6 +1110,41 @@ extern struct drm_display_mode *intel_find_panel_downclock(
1054 struct drm_device *dev, 1110 struct drm_device *dev,
1055 struct drm_display_mode *fixed_mode, 1111 struct drm_display_mode *fixed_mode,
1056 struct drm_connector *connector); 1112 struct drm_connector *connector);
1113void intel_backlight_register(struct drm_device *dev);
1114void intel_backlight_unregister(struct drm_device *dev);
1115
1116
1117/* intel_psr.c */
1118bool intel_psr_is_enabled(struct drm_device *dev);
1119void intel_psr_enable(struct intel_dp *intel_dp);
1120void intel_psr_disable(struct intel_dp *intel_dp);
1121void intel_psr_invalidate(struct drm_device *dev,
1122 unsigned frontbuffer_bits);
1123void intel_psr_flush(struct drm_device *dev,
1124 unsigned frontbuffer_bits);
1125void intel_psr_init(struct drm_device *dev);
1126
1127/* intel_runtime_pm.c */
1128int intel_power_domains_init(struct drm_i915_private *);
1129void intel_power_domains_fini(struct drm_i915_private *);
1130void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
1131void intel_runtime_pm_enable(struct drm_i915_private *dev_priv);
1132
1133bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1134 enum intel_display_power_domain domain);
1135bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
1136 enum intel_display_power_domain domain);
1137void intel_display_power_get(struct drm_i915_private *dev_priv,
1138 enum intel_display_power_domain domain);
1139void intel_display_power_put(struct drm_i915_private *dev_priv,
1140 enum intel_display_power_domain domain);
1141void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1142void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1143void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1144void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1145void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1146
1147void intel_display_set_init_power(struct drm_i915_private *dev, bool enable);
1057 1148
1058/* intel_pm.c */ 1149/* intel_pm.c */
1059void intel_init_clock_gating(struct drm_device *dev); 1150void intel_init_clock_gating(struct drm_device *dev);
@@ -1072,17 +1163,6 @@ bool intel_fbc_enabled(struct drm_device *dev);
1072void intel_update_fbc(struct drm_device *dev); 1163void intel_update_fbc(struct drm_device *dev);
1073void intel_gpu_ips_init(struct drm_i915_private *dev_priv); 1164void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
1074void intel_gpu_ips_teardown(void); 1165void intel_gpu_ips_teardown(void);
1075int intel_power_domains_init(struct drm_i915_private *);
1076void intel_power_domains_remove(struct drm_i915_private *);
1077bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
1078 enum intel_display_power_domain domain);
1079bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
1080 enum intel_display_power_domain domain);
1081void intel_display_power_get(struct drm_i915_private *dev_priv,
1082 enum intel_display_power_domain domain);
1083void intel_display_power_put(struct drm_i915_private *dev_priv,
1084 enum intel_display_power_domain domain);
1085void intel_power_domains_init_hw(struct drm_i915_private *dev_priv);
1086void intel_init_gt_powersave(struct drm_device *dev); 1166void intel_init_gt_powersave(struct drm_device *dev);
1087void intel_cleanup_gt_powersave(struct drm_device *dev); 1167void intel_cleanup_gt_powersave(struct drm_device *dev);
1088void intel_enable_gt_powersave(struct drm_device *dev); 1168void intel_enable_gt_powersave(struct drm_device *dev);
@@ -1093,14 +1173,10 @@ void ironlake_teardown_rc6(struct drm_device *dev);
1093void gen6_update_ring_freq(struct drm_device *dev); 1173void gen6_update_ring_freq(struct drm_device *dev);
1094void gen6_rps_idle(struct drm_i915_private *dev_priv); 1174void gen6_rps_idle(struct drm_i915_private *dev_priv);
1095void gen6_rps_boost(struct drm_i915_private *dev_priv); 1175void gen6_rps_boost(struct drm_i915_private *dev_priv);
1096void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
1097void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
1098void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
1099void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
1100void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
1101void intel_init_runtime_pm(struct drm_i915_private *dev_priv);
1102void intel_fini_runtime_pm(struct drm_i915_private *dev_priv);
1103void ilk_wm_get_hw_state(struct drm_device *dev); 1176void ilk_wm_get_hw_state(struct drm_device *dev);
1177void skl_wm_get_hw_state(struct drm_device *dev);
1178void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
1179 struct skl_ddb_allocation *ddb /* out */);
1104 1180
1105 1181
1106/* intel_sdvo.c */ 1182/* intel_sdvo.c */
@@ -1120,7 +1196,9 @@ int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
1120 struct drm_file *file_priv); 1196 struct drm_file *file_priv);
1121int intel_sprite_get_colorkey(struct drm_device *dev, void *data, 1197int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
1122 struct drm_file *file_priv); 1198 struct drm_file *file_priv);
1123 1199bool intel_pipe_update_start(struct intel_crtc *crtc,
1200 uint32_t *start_vbl_count);
1201void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count);
1124 1202
1125/* intel_tv.c */ 1203/* intel_tv.c */
1126void intel_tv_init(struct drm_device *dev); 1204void intel_tv_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 5bd9e09ad3c5..0b184079de14 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -344,7 +344,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
344 DRM_DEBUG_KMS("\n"); 344 DRM_DEBUG_KMS("\n");
345 345
346 power_domain = intel_display_port_power_domain(encoder); 346 power_domain = intel_display_port_power_domain(encoder);
347 if (!intel_display_power_enabled(dev_priv, power_domain)) 347 if (!intel_display_power_is_enabled(dev_priv, power_domain))
348 return false; 348 return false;
349 349
350 /* XXX: this only works for one DSI output */ 350 /* XXX: this only works for one DSI output */
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 9b584f3fbb99..850cf7d6578c 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -119,25 +119,25 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
119 goto out; 119 goto out;
120 } 120 }
121 121
122 /* Flush everything out, we'll be doing GTT only from now on */
123 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
124 if (ret) {
125 DRM_ERROR("failed to pin obj: %d\n", ret);
126 goto out_unref;
127 }
128
129 fb = __intel_framebuffer_create(dev, &mode_cmd, obj); 122 fb = __intel_framebuffer_create(dev, &mode_cmd, obj);
130 if (IS_ERR(fb)) { 123 if (IS_ERR(fb)) {
131 ret = PTR_ERR(fb); 124 ret = PTR_ERR(fb);
132 goto out_unpin; 125 goto out_unref;
126 }
127
128 /* Flush everything out, we'll be doing GTT only from now on */
129 ret = intel_pin_and_fence_fb_obj(NULL, fb, NULL);
130 if (ret) {
131 DRM_ERROR("failed to pin obj: %d\n", ret);
132 goto out_fb;
133 } 133 }
134 134
135 ifbdev->fb = to_intel_framebuffer(fb); 135 ifbdev->fb = to_intel_framebuffer(fb);
136 136
137 return 0; 137 return 0;
138 138
139out_unpin: 139out_fb:
140 i915_gem_object_ggtt_unpin(obj); 140 drm_framebuffer_remove(fb);
141out_unref: 141out_unref:
142 drm_gem_object_unreference(&obj->base); 142 drm_gem_object_unreference(&obj->base);
143out: 143out:
@@ -324,6 +324,7 @@ intel_fb_helper_crtc(struct drm_fb_helper *fb_helper, struct drm_crtc *crtc)
324static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper, 324static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
325 struct drm_fb_helper_crtc **crtcs, 325 struct drm_fb_helper_crtc **crtcs,
326 struct drm_display_mode **modes, 326 struct drm_display_mode **modes,
327 struct drm_fb_offset *offsets,
327 bool *enabled, int width, int height) 328 bool *enabled, int width, int height)
328{ 329{
329 struct drm_device *dev = fb_helper->dev; 330 struct drm_device *dev = fb_helper->dev;
@@ -332,6 +333,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
332 bool fallback = true; 333 bool fallback = true;
333 int num_connectors_enabled = 0; 334 int num_connectors_enabled = 0;
334 int num_connectors_detected = 0; 335 int num_connectors_detected = 0;
336 uint64_t conn_configured = 0, mask;
337 int pass = 0;
335 338
336 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool), 339 save_enabled = kcalloc(dev->mode_config.num_connector, sizeof(bool),
337 GFP_KERNEL); 340 GFP_KERNEL);
@@ -339,7 +342,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
339 return false; 342 return false;
340 343
341 memcpy(save_enabled, enabled, dev->mode_config.num_connector); 344 memcpy(save_enabled, enabled, dev->mode_config.num_connector);
342 345 mask = (1 << fb_helper->connector_count) - 1;
346retry:
343 for (i = 0; i < fb_helper->connector_count; i++) { 347 for (i = 0; i < fb_helper->connector_count; i++) {
344 struct drm_fb_helper_connector *fb_conn; 348 struct drm_fb_helper_connector *fb_conn;
345 struct drm_connector *connector; 349 struct drm_connector *connector;
@@ -349,12 +353,19 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
349 fb_conn = fb_helper->connector_info[i]; 353 fb_conn = fb_helper->connector_info[i];
350 connector = fb_conn->connector; 354 connector = fb_conn->connector;
351 355
356 if (conn_configured & (1 << i))
357 continue;
358
359 if (pass == 0 && !connector->has_tile)
360 continue;
361
352 if (connector->status == connector_status_connected) 362 if (connector->status == connector_status_connected)
353 num_connectors_detected++; 363 num_connectors_detected++;
354 364
355 if (!enabled[i]) { 365 if (!enabled[i]) {
356 DRM_DEBUG_KMS("connector %s not enabled, skipping\n", 366 DRM_DEBUG_KMS("connector %s not enabled, skipping\n",
357 connector->name); 367 connector->name);
368 conn_configured |= (1 << i);
358 continue; 369 continue;
359 } 370 }
360 371
@@ -373,6 +384,7 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
373 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n", 384 DRM_DEBUG_KMS("connector %s has no encoder or crtc, skipping\n",
374 connector->name); 385 connector->name);
375 enabled[i] = false; 386 enabled[i] = false;
387 conn_configured |= (1 << i);
376 continue; 388 continue;
377 } 389 }
378 390
@@ -400,8 +412,8 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
400 412
401 /* try for preferred next */ 413 /* try for preferred next */
402 if (!modes[i]) { 414 if (!modes[i]) {
403 DRM_DEBUG_KMS("looking for preferred mode on connector %s\n", 415 DRM_DEBUG_KMS("looking for preferred mode on connector %s %d\n",
404 connector->name); 416 connector->name, connector->has_tile);
405 modes[i] = drm_has_preferred_mode(fb_conn, width, 417 modes[i] = drm_has_preferred_mode(fb_conn, width,
406 height); 418 height);
407 } 419 }
@@ -444,6 +456,12 @@ static bool intel_fb_initial_config(struct drm_fb_helper *fb_helper,
444 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :""); 456 modes[i]->flags & DRM_MODE_FLAG_INTERLACE ? "i" :"");
445 457
446 fallback = false; 458 fallback = false;
459 conn_configured |= (1 << i);
460 }
461
462 if ((conn_configured & mask) != mask) {
463 pass++;
464 goto retry;
447 } 465 }
448 466
449 /* 467 /*
diff --git a/drivers/gpu/drm/i915/intel_fifo_underrun.c b/drivers/gpu/drm/i915/intel_fifo_underrun.c
new file mode 100644
index 000000000000..77af512d2d35
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_fifo_underrun.c
@@ -0,0 +1,381 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 *
26 */
27
28#include "i915_drv.h"
29#include "intel_drv.h"
30
31/**
32 * DOC: fifo underrun handling
33 *
34 * The i915 driver checks for display fifo underruns using the interrupt signals
35 * provided by the hardware. This is enabled by default and fairly useful to
36 * debug display issues, especially watermark settings.
37 *
38 * If an underrun is detected this is logged into dmesg. To avoid flooding logs
39 * and occupying the cpu underrun interrupts are disabled after the first
40 * occurrence until the next modeset on a given pipe.
41 *
42 * Note that underrun detection on gmch platforms is a bit more ugly since there
43 * is no interrupt (despite that the signalling bit is in the PIPESTAT pipe
44 * interrupt register). Also on some other platforms underrun interrupts are
45 * shared, which means that if we detect an underrun we need to disable underrun
46 * reporting on all pipes.
47 *
48 * The code also supports underrun detection on the PCH transcoder.
49 */
50
51static bool ivb_can_enable_err_int(struct drm_device *dev)
52{
53 struct drm_i915_private *dev_priv = dev->dev_private;
54 struct intel_crtc *crtc;
55 enum pipe pipe;
56
57 assert_spin_locked(&dev_priv->irq_lock);
58
59 for_each_pipe(dev_priv, pipe) {
60 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
61
62 if (crtc->cpu_fifo_underrun_disabled)
63 return false;
64 }
65
66 return true;
67}
68
69static bool cpt_can_enable_serr_int(struct drm_device *dev)
70{
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 enum pipe pipe;
73 struct intel_crtc *crtc;
74
75 assert_spin_locked(&dev_priv->irq_lock);
76
77 for_each_pipe(dev_priv, pipe) {
78 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
79
80 if (crtc->pch_fifo_underrun_disabled)
81 return false;
82 }
83
84 return true;
85}
86
87/**
88 * i9xx_check_fifo_underruns - check for fifo underruns
89 * @dev_priv: i915 device instance
90 *
91 * This function checks for fifo underruns on GMCH platforms. This needs to be
92 * done manually on modeset to make sure that we catch all underruns since they
93 * do not generate an interrupt by themselves on these platforms.
94 */
95void i9xx_check_fifo_underruns(struct drm_i915_private *dev_priv)
96{
97 struct intel_crtc *crtc;
98
99 spin_lock_irq(&dev_priv->irq_lock);
100
101 for_each_intel_crtc(dev_priv->dev, crtc) {
102 u32 reg = PIPESTAT(crtc->pipe);
103 u32 pipestat;
104
105 if (crtc->cpu_fifo_underrun_disabled)
106 continue;
107
108 pipestat = I915_READ(reg) & 0xffff0000;
109 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
110 continue;
111
112 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
113 POSTING_READ(reg);
114
115 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
116 }
117
118 spin_unlock_irq(&dev_priv->irq_lock);
119}
120
121static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
122 enum pipe pipe,
123 bool enable, bool old)
124{
125 struct drm_i915_private *dev_priv = dev->dev_private;
126 u32 reg = PIPESTAT(pipe);
127 u32 pipestat = I915_READ(reg) & 0xffff0000;
128
129 assert_spin_locked(&dev_priv->irq_lock);
130
131 if (enable) {
132 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
133 POSTING_READ(reg);
134 } else {
135 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
136 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
137 }
138}
139
140static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
141 enum pipe pipe, bool enable)
142{
143 struct drm_i915_private *dev_priv = dev->dev_private;
144 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
145 DE_PIPEB_FIFO_UNDERRUN;
146
147 if (enable)
148 ironlake_enable_display_irq(dev_priv, bit);
149 else
150 ironlake_disable_display_irq(dev_priv, bit);
151}
152
153static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
154 enum pipe pipe,
155 bool enable, bool old)
156{
157 struct drm_i915_private *dev_priv = dev->dev_private;
158 if (enable) {
159 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
160
161 if (!ivb_can_enable_err_int(dev))
162 return;
163
164 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
165 } else {
166 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
167
168 if (old &&
169 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
170 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
171 pipe_name(pipe));
172 }
173 }
174}
175
176static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
177 enum pipe pipe, bool enable)
178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180
181 assert_spin_locked(&dev_priv->irq_lock);
182
183 if (enable)
184 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
185 else
186 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
187 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
188 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
189}
190
191static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
192 enum transcoder pch_transcoder,
193 bool enable)
194{
195 struct drm_i915_private *dev_priv = dev->dev_private;
196 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
197 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
198
199 if (enable)
200 ibx_enable_display_interrupt(dev_priv, bit);
201 else
202 ibx_disable_display_interrupt(dev_priv, bit);
203}
204
205static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
206 enum transcoder pch_transcoder,
207 bool enable, bool old)
208{
209 struct drm_i915_private *dev_priv = dev->dev_private;
210
211 if (enable) {
212 I915_WRITE(SERR_INT,
213 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
214
215 if (!cpt_can_enable_serr_int(dev))
216 return;
217
218 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
219 } else {
220 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
221
222 if (old && I915_READ(SERR_INT) &
223 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
224 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
225 transcoder_name(pch_transcoder));
226 }
227 }
228}
229
230static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
231 enum pipe pipe, bool enable)
232{
233 struct drm_i915_private *dev_priv = dev->dev_private;
234 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
235 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
236 bool old;
237
238 assert_spin_locked(&dev_priv->irq_lock);
239
240 old = !intel_crtc->cpu_fifo_underrun_disabled;
241 intel_crtc->cpu_fifo_underrun_disabled = !enable;
242
243 if (HAS_GMCH_DISPLAY(dev))
244 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
245 else if (IS_GEN5(dev) || IS_GEN6(dev))
246 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
247 else if (IS_GEN7(dev))
248 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
249 else if (IS_GEN8(dev) || IS_GEN9(dev))
250 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
251
252 return old;
253}
254
255/**
256 * intel_set_cpu_fifo_underrun_reporting - set cpu fifo underrrun reporting state
257 * @dev_priv: i915 device instance
258 * @pipe: (CPU) pipe to set state for
259 * @enable: whether underruns should be reported or not
260 *
261 * This function sets the fifo underrun state for @pipe. It is used in the
262 * modeset code to avoid false positives since on many platforms underruns are
263 * expected when disabling or enabling the pipe.
264 *
265 * Notice that on some platforms disabling underrun reports for one pipe
266 * disables for all due to shared interrupts. Actual reporting is still per-pipe
267 * though.
268 *
269 * Returns the previous state of underrun reporting.
270 */
271bool intel_set_cpu_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
272 enum pipe pipe, bool enable)
273{
274 unsigned long flags;
275 bool ret;
276
277 spin_lock_irqsave(&dev_priv->irq_lock, flags);
278 ret = __intel_set_cpu_fifo_underrun_reporting(dev_priv->dev, pipe,
279 enable);
280 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
281
282 return ret;
283}
284
285static bool
286__cpu_fifo_underrun_reporting_enabled(struct drm_i915_private *dev_priv,
287 enum pipe pipe)
288{
289 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291
292 return !intel_crtc->cpu_fifo_underrun_disabled;
293}
294
295/**
296 * intel_set_pch_fifo_underrun_reporting - set PCH fifo underrun reporting state
297 * @dev_priv: i915 device instance
298 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
299 * @enable: whether underruns should be reported or not
300 *
301 * This function makes us disable or enable PCH fifo underruns for a specific
302 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
303 * underrun reporting for one transcoder may also disable all the other PCH
304 * error interruts for the other transcoders, due to the fact that there's just
305 * one interrupt mask/enable bit for all the transcoders.
306 *
307 * Returns the previous state of underrun reporting.
308 */
309bool intel_set_pch_fifo_underrun_reporting(struct drm_i915_private *dev_priv,
310 enum transcoder pch_transcoder,
311 bool enable)
312{
313 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
315 unsigned long flags;
316 bool old;
317
318 /*
319 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
320 * has only one pch transcoder A that all pipes can use. To avoid racy
321 * pch transcoder -> pipe lookups from interrupt code simply store the
322 * underrun statistics in crtc A. Since we never expose this anywhere
323 * nor use it outside of the fifo underrun code here using the "wrong"
324 * crtc on LPT won't cause issues.
325 */
326
327 spin_lock_irqsave(&dev_priv->irq_lock, flags);
328
329 old = !intel_crtc->pch_fifo_underrun_disabled;
330 intel_crtc->pch_fifo_underrun_disabled = !enable;
331
332 if (HAS_PCH_IBX(dev_priv->dev))
333 ibx_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
334 enable);
335 else
336 cpt_set_fifo_underrun_reporting(dev_priv->dev, pch_transcoder,
337 enable, old);
338
339 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
340 return old;
341}
342
343/**
344 * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
345 * @dev_priv: i915 device instance
346 * @pipe: (CPU) pipe to set state for
347 *
348 * This handles a CPU fifo underrun interrupt, generating an underrun warning
349 * into dmesg if underrun reporting is enabled and then disables the underrun
350 * interrupt to avoid an irq storm.
351 */
352void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
353 enum pipe pipe)
354{
355 /* GMCH can't disable fifo underruns, filter them. */
356 if (HAS_GMCH_DISPLAY(dev_priv->dev) &&
357 !__cpu_fifo_underrun_reporting_enabled(dev_priv, pipe))
358 return;
359
360 if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false))
361 DRM_ERROR("CPU pipe %c FIFO underrun\n",
362 pipe_name(pipe));
363}
364
365/**
366 * intel_pch_fifo_underrun_irq_handler - handle PCH fifo underrun interrupt
367 * @dev_priv: i915 device instance
368 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
369 *
370 * This handles a PCH fifo underrun interrupt, generating an underrun warning
371 * into dmesg if underrun reporting is enabled and then disables the underrun
372 * interrupt to avoid an irq storm.
373 */
374void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
375 enum transcoder pch_transcoder)
376{
377 if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
378 false))
379 DRM_ERROR("PCH transcoder %c FIFO underrun\n",
380 transcoder_name(pch_transcoder));
381}
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
new file mode 100644
index 000000000000..79f6d72179c5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -0,0 +1,279 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Daniel Vetter <daniel.vetter@ffwll.ch>
25 */
26
27/**
28 * DOC: frontbuffer tracking
29 *
30 * Many features require us to track changes to the currently active
31 * frontbuffer, especially rendering targeted at the frontbuffer.
32 *
33 * To be able to do so GEM tracks frontbuffers using a bitmask for all possible
34 * frontbuffer slots through i915_gem_track_fb(). The function in this file are
35 * then called when the contents of the frontbuffer are invalidated, when
36 * frontbuffer rendering has stopped again to flush out all the changes and when
37 * the frontbuffer is exchanged with a flip. Subsystems interested in
38 * frontbuffer changes (e.g. PSR, FBC, DRRS) should directly put their callbacks
39 * into the relevant places and filter for the frontbuffer slots that they are
40 * interested int.
41 *
42 * On a high level there are two types of powersaving features. The first one
43 * work like a special cache (FBC and PSR) and are interested when they should
44 * stop caching and when to restart caching. This is done by placing callbacks
45 * into the invalidate and the flush functions: At invalidate the caching must
46 * be stopped and at flush time it can be restarted. And maybe they need to know
47 * when the frontbuffer changes (e.g. when the hw doesn't initiate an invalidate
48 * and flush on its own) which can be achieved with placing callbacks into the
49 * flip functions.
50 *
51 * The other type of display power saving feature only cares about busyness
52 * (e.g. DRRS). In that case all three (invalidate, flush and flip) indicate
53 * busyness. There is no direct way to detect idleness. Instead an idle timer
54 * work delayed work should be started from the flush and flip functions and
55 * cancelled as soon as busyness is detected.
56 *
57 * Note that there's also an older frontbuffer activity tracking scheme which
58 * just tracks general activity. This is done by the various mark_busy and
59 * mark_idle functions. For display power management features using these
60 * functions is deprecated and should be avoided.
61 */
62
63#include <drm/drmP.h>
64
65#include "intel_drv.h"
66#include "i915_drv.h"
67
68static void intel_increase_pllclock(struct drm_device *dev,
69 enum pipe pipe)
70{
71 struct drm_i915_private *dev_priv = dev->dev_private;
72 int dpll_reg = DPLL(pipe);
73 int dpll;
74
75 if (!HAS_GMCH_DISPLAY(dev))
76 return;
77
78 if (!dev_priv->lvds_downclock_avail)
79 return;
80
81 dpll = I915_READ(dpll_reg);
82 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
83 DRM_DEBUG_DRIVER("upclocking LVDS\n");
84
85 assert_panel_unlocked(dev_priv, pipe);
86
87 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
88 I915_WRITE(dpll_reg, dpll);
89 intel_wait_for_vblank(dev, pipe);
90
91 dpll = I915_READ(dpll_reg);
92 if (dpll & DISPLAY_RATE_SELECT_FPA1)
93 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
94 }
95}
96
97/**
98 * intel_mark_fb_busy - mark given planes as busy
99 * @dev: DRM device
100 * @frontbuffer_bits: bits for the affected planes
101 * @ring: optional ring for asynchronous commands
102 *
103 * This function gets called every time the screen contents change. It can be
104 * used to keep e.g. the update rate at the nominal refresh rate with DRRS.
105 */
106static void intel_mark_fb_busy(struct drm_device *dev,
107 unsigned frontbuffer_bits,
108 struct intel_engine_cs *ring)
109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 enum pipe pipe;
112
113 if (!i915.powersave)
114 return;
115
116 for_each_pipe(dev_priv, pipe) {
117 if (!(frontbuffer_bits & INTEL_FRONTBUFFER_ALL_MASK(pipe)))
118 continue;
119
120 intel_increase_pllclock(dev, pipe);
121 if (ring && intel_fbc_enabled(dev))
122 ring->fbc_dirty = true;
123 }
124}
125
126/**
127 * intel_fb_obj_invalidate - invalidate frontbuffer object
128 * @obj: GEM object to invalidate
129 * @ring: set for asynchronous rendering
130 *
131 * This function gets called every time rendering on the given object starts and
132 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
133 * be invalidated. If @ring is non-NULL any subsequent invalidation will be delayed
134 * until the rendering completes or a flip on this frontbuffer plane is
135 * scheduled.
136 */
137void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
138 struct intel_engine_cs *ring)
139{
140 struct drm_device *dev = obj->base.dev;
141 struct drm_i915_private *dev_priv = dev->dev_private;
142
143 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
144
145 if (!obj->frontbuffer_bits)
146 return;
147
148 if (ring) {
149 mutex_lock(&dev_priv->fb_tracking.lock);
150 dev_priv->fb_tracking.busy_bits
151 |= obj->frontbuffer_bits;
152 dev_priv->fb_tracking.flip_bits
153 &= ~obj->frontbuffer_bits;
154 mutex_unlock(&dev_priv->fb_tracking.lock);
155 }
156
157 intel_mark_fb_busy(dev, obj->frontbuffer_bits, ring);
158
159 intel_psr_invalidate(dev, obj->frontbuffer_bits);
160}
161
162/**
163 * intel_frontbuffer_flush - flush frontbuffer
164 * @dev: DRM device
165 * @frontbuffer_bits: frontbuffer plane tracking bits
166 *
167 * This function gets called every time rendering on the given planes has
168 * completed and frontbuffer caching can be started again. Flushes will get
169 * delayed if they're blocked by some outstanding asynchronous rendering.
170 *
171 * Can be called without any locks held.
172 */
173void intel_frontbuffer_flush(struct drm_device *dev,
174 unsigned frontbuffer_bits)
175{
176 struct drm_i915_private *dev_priv = dev->dev_private;
177
178 /* Delay flushing when rings are still busy.*/
179 mutex_lock(&dev_priv->fb_tracking.lock);
180 frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
181 mutex_unlock(&dev_priv->fb_tracking.lock);
182
183 intel_mark_fb_busy(dev, frontbuffer_bits, NULL);
184
185 intel_psr_flush(dev, frontbuffer_bits);
186
187 /*
188 * FIXME: Unconditional fbc flushing here is a rather gross hack and
189 * needs to be reworked into a proper frontbuffer tracking scheme like
190 * psr employs.
191 */
192 if (dev_priv->fbc.need_sw_cache_clean) {
193 dev_priv->fbc.need_sw_cache_clean = false;
194 bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN);
195 }
196}
197
198/**
199 * intel_fb_obj_flush - flush frontbuffer object
200 * @obj: GEM object to flush
201 * @retire: set when retiring asynchronous rendering
202 *
203 * This function gets called every time rendering on the given object has
204 * completed and frontbuffer caching can be started again. If @retire is true
205 * then any delayed flushes will be unblocked.
206 */
207void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
208 bool retire)
209{
210 struct drm_device *dev = obj->base.dev;
211 struct drm_i915_private *dev_priv = dev->dev_private;
212 unsigned frontbuffer_bits;
213
214 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
215
216 if (!obj->frontbuffer_bits)
217 return;
218
219 frontbuffer_bits = obj->frontbuffer_bits;
220
221 if (retire) {
222 mutex_lock(&dev_priv->fb_tracking.lock);
223 /* Filter out new bits since rendering started. */
224 frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
225
226 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
227 mutex_unlock(&dev_priv->fb_tracking.lock);
228 }
229
230 intel_frontbuffer_flush(dev, frontbuffer_bits);
231}
232
233/**
234 * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
235 * @dev: DRM device
236 * @frontbuffer_bits: frontbuffer plane tracking bits
237 *
238 * This function gets called after scheduling a flip on @obj. The actual
239 * frontbuffer flushing will be delayed until completion is signalled with
240 * intel_frontbuffer_flip_complete. If an invalidate happens in between this
241 * flush will be cancelled.
242 *
243 * Can be called without any locks held.
244 */
245void intel_frontbuffer_flip_prepare(struct drm_device *dev,
246 unsigned frontbuffer_bits)
247{
248 struct drm_i915_private *dev_priv = dev->dev_private;
249
250 mutex_lock(&dev_priv->fb_tracking.lock);
251 dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
252 /* Remove stale busy bits due to the old buffer. */
253 dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
254 mutex_unlock(&dev_priv->fb_tracking.lock);
255}
256
257/**
258 * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
259 * @dev: DRM device
260 * @frontbuffer_bits: frontbuffer plane tracking bits
261 *
262 * This function gets called after the flip has been latched and will complete
263 * on the next vblank. It will execute the flush if it hasn't been cancelled yet.
264 *
265 * Can be called without any locks held.
266 */
267void intel_frontbuffer_flip_complete(struct drm_device *dev,
268 unsigned frontbuffer_bits)
269{
270 struct drm_i915_private *dev_priv = dev->dev_private;
271
272 mutex_lock(&dev_priv->fb_tracking.lock);
273 /* Mask any cancelled flips. */
274 frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
275 dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
276 mutex_unlock(&dev_priv->fb_tracking.lock);
277
278 intel_frontbuffer_flush(dev, frontbuffer_bits);
279}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 29ec1535992d..3abc2000fce9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -166,6 +166,19 @@ static void g4x_write_infoframe(struct drm_encoder *encoder,
166 POSTING_READ(VIDEO_DIP_CTL); 166 POSTING_READ(VIDEO_DIP_CTL);
167} 167}
168 168
169static bool g4x_infoframe_enabled(struct drm_encoder *encoder)
170{
171 struct drm_device *dev = encoder->dev;
172 struct drm_i915_private *dev_priv = dev->dev_private;
173 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
174 u32 val = I915_READ(VIDEO_DIP_CTL);
175
176 if (VIDEO_DIP_PORT(intel_dig_port->port) == (val & VIDEO_DIP_PORT_MASK))
177 return val & VIDEO_DIP_ENABLE;
178
179 return false;
180}
181
169static void ibx_write_infoframe(struct drm_encoder *encoder, 182static void ibx_write_infoframe(struct drm_encoder *encoder,
170 enum hdmi_infoframe_type type, 183 enum hdmi_infoframe_type type,
171 const void *frame, ssize_t len) 184 const void *frame, ssize_t len)
@@ -204,6 +217,17 @@ static void ibx_write_infoframe(struct drm_encoder *encoder,
204 POSTING_READ(reg); 217 POSTING_READ(reg);
205} 218}
206 219
220static bool ibx_infoframe_enabled(struct drm_encoder *encoder)
221{
222 struct drm_device *dev = encoder->dev;
223 struct drm_i915_private *dev_priv = dev->dev_private;
224 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
225 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
226 u32 val = I915_READ(reg);
227
228 return val & VIDEO_DIP_ENABLE;
229}
230
207static void cpt_write_infoframe(struct drm_encoder *encoder, 231static void cpt_write_infoframe(struct drm_encoder *encoder,
208 enum hdmi_infoframe_type type, 232 enum hdmi_infoframe_type type,
209 const void *frame, ssize_t len) 233 const void *frame, ssize_t len)
@@ -245,6 +269,17 @@ static void cpt_write_infoframe(struct drm_encoder *encoder,
245 POSTING_READ(reg); 269 POSTING_READ(reg);
246} 270}
247 271
272static bool cpt_infoframe_enabled(struct drm_encoder *encoder)
273{
274 struct drm_device *dev = encoder->dev;
275 struct drm_i915_private *dev_priv = dev->dev_private;
276 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
277 int reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
278 u32 val = I915_READ(reg);
279
280 return val & VIDEO_DIP_ENABLE;
281}
282
248static void vlv_write_infoframe(struct drm_encoder *encoder, 283static void vlv_write_infoframe(struct drm_encoder *encoder,
249 enum hdmi_infoframe_type type, 284 enum hdmi_infoframe_type type,
250 const void *frame, ssize_t len) 285 const void *frame, ssize_t len)
@@ -283,6 +318,17 @@ static void vlv_write_infoframe(struct drm_encoder *encoder,
283 POSTING_READ(reg); 318 POSTING_READ(reg);
284} 319}
285 320
321static bool vlv_infoframe_enabled(struct drm_encoder *encoder)
322{
323 struct drm_device *dev = encoder->dev;
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
326 int reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
327 u32 val = I915_READ(reg);
328
329 return val & VIDEO_DIP_ENABLE;
330}
331
286static void hsw_write_infoframe(struct drm_encoder *encoder, 332static void hsw_write_infoframe(struct drm_encoder *encoder,
287 enum hdmi_infoframe_type type, 333 enum hdmi_infoframe_type type,
288 const void *frame, ssize_t len) 334 const void *frame, ssize_t len)
@@ -320,6 +366,18 @@ static void hsw_write_infoframe(struct drm_encoder *encoder,
320 POSTING_READ(ctl_reg); 366 POSTING_READ(ctl_reg);
321} 367}
322 368
369static bool hsw_infoframe_enabled(struct drm_encoder *encoder)
370{
371 struct drm_device *dev = encoder->dev;
372 struct drm_i915_private *dev_priv = dev->dev_private;
373 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
374 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(intel_crtc->config.cpu_transcoder);
375 u32 val = I915_READ(ctl_reg);
376
377 return val & (VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_SPD_HSW |
378 VIDEO_DIP_ENABLE_VS_HSW);
379}
380
323/* 381/*
324 * The data we write to the DIP data buffer registers is 1 byte bigger than the 382 * The data we write to the DIP data buffer registers is 1 byte bigger than the
325 * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting 383 * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
@@ -661,14 +719,6 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder)
661 if (crtc->config.has_hdmi_sink) 719 if (crtc->config.has_hdmi_sink)
662 hdmi_val |= HDMI_MODE_SELECT_HDMI; 720 hdmi_val |= HDMI_MODE_SELECT_HDMI;
663 721
664 if (crtc->config.has_audio) {
665 WARN_ON(!crtc->config.has_hdmi_sink);
666 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
667 pipe_name(crtc->pipe));
668 hdmi_val |= SDVO_AUDIO_ENABLE;
669 intel_write_eld(&encoder->base, adjusted_mode);
670 }
671
672 if (HAS_PCH_CPT(dev)) 722 if (HAS_PCH_CPT(dev))
673 hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe); 723 hdmi_val |= SDVO_PIPE_SEL_CPT(crtc->pipe);
674 else if (IS_CHERRYVIEW(dev)) 724 else if (IS_CHERRYVIEW(dev))
@@ -690,7 +740,7 @@ static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
690 u32 tmp; 740 u32 tmp;
691 741
692 power_domain = intel_display_port_power_domain(encoder); 742 power_domain = intel_display_port_power_domain(encoder);
693 if (!intel_display_power_enabled(dev_priv, power_domain)) 743 if (!intel_display_power_is_enabled(dev_priv, power_domain))
694 return false; 744 return false;
695 745
696 tmp = I915_READ(intel_hdmi->hdmi_reg); 746 tmp = I915_READ(intel_hdmi->hdmi_reg);
@@ -732,6 +782,9 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
732 if (tmp & HDMI_MODE_SELECT_HDMI) 782 if (tmp & HDMI_MODE_SELECT_HDMI)
733 pipe_config->has_hdmi_sink = true; 783 pipe_config->has_hdmi_sink = true;
734 784
785 if (intel_hdmi->infoframe_enabled(&encoder->base))
786 pipe_config->has_infoframe = true;
787
735 if (tmp & SDVO_AUDIO_ENABLE) 788 if (tmp & SDVO_AUDIO_ENABLE)
736 pipe_config->has_audio = true; 789 pipe_config->has_audio = true;
737 790
@@ -791,6 +844,13 @@ static void intel_enable_hdmi(struct intel_encoder *encoder)
791 I915_WRITE(intel_hdmi->hdmi_reg, temp); 844 I915_WRITE(intel_hdmi->hdmi_reg, temp);
792 POSTING_READ(intel_hdmi->hdmi_reg); 845 POSTING_READ(intel_hdmi->hdmi_reg);
793 } 846 }
847
848 if (intel_crtc->config.has_audio) {
849 WARN_ON(!intel_crtc->config.has_hdmi_sink);
850 DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
851 pipe_name(intel_crtc->pipe));
852 intel_audio_codec_enable(encoder);
853 }
794} 854}
795 855
796static void vlv_enable_hdmi(struct intel_encoder *encoder) 856static void vlv_enable_hdmi(struct intel_encoder *encoder)
@@ -802,9 +862,13 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
802 struct drm_device *dev = encoder->base.dev; 862 struct drm_device *dev = encoder->base.dev;
803 struct drm_i915_private *dev_priv = dev->dev_private; 863 struct drm_i915_private *dev_priv = dev->dev_private;
804 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 864 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
865 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
805 u32 temp; 866 u32 temp;
806 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE; 867 u32 enable_bits = SDVO_ENABLE | SDVO_AUDIO_ENABLE;
807 868
869 if (crtc->config.has_audio)
870 intel_audio_codec_disable(encoder);
871
808 temp = I915_READ(intel_hdmi->hdmi_reg); 872 temp = I915_READ(intel_hdmi->hdmi_reg);
809 873
810 /* HW workaround for IBX, we need to move the port to transcoder A 874 /* HW workaround for IBX, we need to move the port to transcoder A
@@ -922,6 +986,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
922 986
923 pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink; 987 pipe_config->has_hdmi_sink = intel_hdmi->has_hdmi_sink;
924 988
989 if (pipe_config->has_hdmi_sink)
990 pipe_config->has_infoframe = true;
991
925 if (intel_hdmi->color_range_auto) { 992 if (intel_hdmi->color_range_auto) {
926 /* See CEA-861-E - 5.1 Default Encoding Parameters */ 993 /* See CEA-861-E - 5.1 Default Encoding Parameters */
927 if (pipe_config->has_hdmi_sink && 994 if (pipe_config->has_hdmi_sink &&
@@ -1394,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder)
1394static void chv_hdmi_pre_enable(struct intel_encoder *encoder) 1461static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1395{ 1462{
1396 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); 1463 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1464 struct intel_hdmi *intel_hdmi = &dport->hdmi;
1397 struct drm_device *dev = encoder->base.dev; 1465 struct drm_device *dev = encoder->base.dev;
1398 struct drm_i915_private *dev_priv = dev->dev_private; 1466 struct drm_i915_private *dev_priv = dev->dev_private;
1399 struct intel_crtc *intel_crtc = 1467 struct intel_crtc *intel_crtc =
1400 to_intel_crtc(encoder->base.crtc); 1468 to_intel_crtc(encoder->base.crtc);
1469 struct drm_display_mode *adjusted_mode =
1470 &intel_crtc->config.adjusted_mode;
1401 enum dpio_channel ch = vlv_dport_to_channel(dport); 1471 enum dpio_channel ch = vlv_dport_to_channel(dport);
1402 int pipe = intel_crtc->pipe; 1472 int pipe = intel_crtc->pipe;
1403 int data, i; 1473 int data, i;
@@ -1405,6 +1475,15 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1405 1475
1406 mutex_lock(&dev_priv->dpio_lock); 1476 mutex_lock(&dev_priv->dpio_lock);
1407 1477
1478 /* allow hardware to manage TX FIFO reset source */
1479 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
1480 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1481 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
1482
1483 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
1484 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
1485 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
1486
1408 /* Deassert soft data lane reset*/ 1487 /* Deassert soft data lane reset*/
1409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch)); 1488 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
1410 val |= CHV_PCS_REQ_SOFTRESET_EN; 1489 val |= CHV_PCS_REQ_SOFTRESET_EN;
@@ -1441,12 +1520,26 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1441 /* Clear calc init */ 1520 /* Clear calc init */
1442 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch)); 1521 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
1443 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 1522 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1523 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
1524 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1444 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val); 1525 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
1445 1526
1446 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch)); 1527 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
1447 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3); 1528 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
1529 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
1530 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1448 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 1531 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
1449 1532
1533 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
1534 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
1535 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
1536 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
1537
1538 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
1539 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
1540 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
1541 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
1542
1450 /* FIXME: Program the support xxx V-dB */ 1543 /* FIXME: Program the support xxx V-dB */
1451 /* Use 800mV-0dB */ 1544 /* Use 800mV-0dB */
1452 for (i = 0; i < 4; i++) { 1545 for (i = 0; i < 4; i++) {
@@ -1499,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder)
1499 1592
1500 mutex_unlock(&dev_priv->dpio_lock); 1593 mutex_unlock(&dev_priv->dpio_lock);
1501 1594
1595 intel_hdmi->set_infoframes(&encoder->base,
1596 intel_crtc->config.has_hdmi_sink,
1597 adjusted_mode);
1598
1502 intel_enable_hdmi(encoder); 1599 intel_enable_hdmi(encoder);
1503 1600
1504 vlv_wait_port_ready(dev_priv, dport); 1601 vlv_wait_port_ready(dev_priv, dport);
@@ -1593,18 +1690,23 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
1593 if (IS_VALLEYVIEW(dev)) { 1690 if (IS_VALLEYVIEW(dev)) {
1594 intel_hdmi->write_infoframe = vlv_write_infoframe; 1691 intel_hdmi->write_infoframe = vlv_write_infoframe;
1595 intel_hdmi->set_infoframes = vlv_set_infoframes; 1692 intel_hdmi->set_infoframes = vlv_set_infoframes;
1693 intel_hdmi->infoframe_enabled = vlv_infoframe_enabled;
1596 } else if (IS_G4X(dev)) { 1694 } else if (IS_G4X(dev)) {
1597 intel_hdmi->write_infoframe = g4x_write_infoframe; 1695 intel_hdmi->write_infoframe = g4x_write_infoframe;
1598 intel_hdmi->set_infoframes = g4x_set_infoframes; 1696 intel_hdmi->set_infoframes = g4x_set_infoframes;
1697 intel_hdmi->infoframe_enabled = g4x_infoframe_enabled;
1599 } else if (HAS_DDI(dev)) { 1698 } else if (HAS_DDI(dev)) {
1600 intel_hdmi->write_infoframe = hsw_write_infoframe; 1699 intel_hdmi->write_infoframe = hsw_write_infoframe;
1601 intel_hdmi->set_infoframes = hsw_set_infoframes; 1700 intel_hdmi->set_infoframes = hsw_set_infoframes;
1701 intel_hdmi->infoframe_enabled = hsw_infoframe_enabled;
1602 } else if (HAS_PCH_IBX(dev)) { 1702 } else if (HAS_PCH_IBX(dev)) {
1603 intel_hdmi->write_infoframe = ibx_write_infoframe; 1703 intel_hdmi->write_infoframe = ibx_write_infoframe;
1604 intel_hdmi->set_infoframes = ibx_set_infoframes; 1704 intel_hdmi->set_infoframes = ibx_set_infoframes;
1705 intel_hdmi->infoframe_enabled = ibx_infoframe_enabled;
1605 } else { 1706 } else {
1606 intel_hdmi->write_infoframe = cpt_write_infoframe; 1707 intel_hdmi->write_infoframe = cpt_write_infoframe;
1607 intel_hdmi->set_infoframes = cpt_set_infoframes; 1708 intel_hdmi->set_infoframes = cpt_set_infoframes;
1709 intel_hdmi->infoframe_enabled = cpt_infoframe_enabled;
1608 } 1710 }
1609 1711
1610 if (HAS_DDI(dev)) 1712 if (HAS_DDI(dev))
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bafd38b5703e..e588376227ea 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -136,11 +136,10 @@
136#include <drm/i915_drm.h> 136#include <drm/i915_drm.h>
137#include "i915_drv.h" 137#include "i915_drv.h"
138 138
139#define GEN9_LR_CONTEXT_RENDER_SIZE (22 * PAGE_SIZE)
139#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE) 140#define GEN8_LR_CONTEXT_RENDER_SIZE (20 * PAGE_SIZE)
140#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE) 141#define GEN8_LR_CONTEXT_OTHER_SIZE (2 * PAGE_SIZE)
141 142
142#define GEN8_LR_CONTEXT_ALIGN 4096
143
144#define RING_EXECLIST_QFULL (1 << 0x2) 143#define RING_EXECLIST_QFULL (1 << 0x2)
145#define RING_EXECLIST1_VALID (1 << 0x3) 144#define RING_EXECLIST1_VALID (1 << 0x3)
146#define RING_EXECLIST0_VALID (1 << 0x4) 145#define RING_EXECLIST0_VALID (1 << 0x4)
@@ -204,6 +203,9 @@ enum {
204}; 203};
205#define GEN8_CTX_ID_SHIFT 32 204#define GEN8_CTX_ID_SHIFT 32
206 205
206static int intel_lr_context_pin(struct intel_engine_cs *ring,
207 struct intel_context *ctx);
208
207/** 209/**
208 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists 210 * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
209 * @dev: DRM device. 211 * @dev: DRM device.
@@ -219,6 +221,9 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
219{ 221{
220 WARN_ON(i915.enable_ppgtt == -1); 222 WARN_ON(i915.enable_ppgtt == -1);
221 223
224 if (INTEL_INFO(dev)->gen >= 9)
225 return 1;
226
222 if (enable_execlists == 0) 227 if (enable_execlists == 0)
223 return 0; 228 return 0;
224 229
@@ -275,7 +280,8 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
275 struct drm_i915_gem_object *ctx_obj0, 280 struct drm_i915_gem_object *ctx_obj0,
276 struct drm_i915_gem_object *ctx_obj1) 281 struct drm_i915_gem_object *ctx_obj1)
277{ 282{
278 struct drm_i915_private *dev_priv = ring->dev->dev_private; 283 struct drm_device *dev = ring->dev;
284 struct drm_i915_private *dev_priv = dev->dev_private;
279 uint64_t temp = 0; 285 uint64_t temp = 0;
280 uint32_t desc[4]; 286 uint32_t desc[4];
281 unsigned long flags; 287 unsigned long flags;
@@ -300,13 +306,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
300 * Instead, we do the runtime_pm_get/put when creating/destroying requests. 306 * Instead, we do the runtime_pm_get/put when creating/destroying requests.
301 */ 307 */
302 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 308 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
303 if (IS_CHERRYVIEW(dev_priv->dev)) { 309 if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
304 if (dev_priv->uncore.fw_rendercount++ == 0) 310 if (dev_priv->uncore.fw_rendercount++ == 0)
305 dev_priv->uncore.funcs.force_wake_get(dev_priv, 311 dev_priv->uncore.funcs.force_wake_get(dev_priv,
306 FORCEWAKE_RENDER); 312 FORCEWAKE_RENDER);
307 if (dev_priv->uncore.fw_mediacount++ == 0) 313 if (dev_priv->uncore.fw_mediacount++ == 0)
308 dev_priv->uncore.funcs.force_wake_get(dev_priv, 314 dev_priv->uncore.funcs.force_wake_get(dev_priv,
309 FORCEWAKE_MEDIA); 315 FORCEWAKE_MEDIA);
316 if (INTEL_INFO(dev)->gen >= 9) {
317 if (dev_priv->uncore.fw_blittercount++ == 0)
318 dev_priv->uncore.funcs.force_wake_get(dev_priv,
319 FORCEWAKE_BLITTER);
320 }
310 } else { 321 } else {
311 if (dev_priv->uncore.forcewake_count++ == 0) 322 if (dev_priv->uncore.forcewake_count++ == 0)
312 dev_priv->uncore.funcs.force_wake_get(dev_priv, 323 dev_priv->uncore.funcs.force_wake_get(dev_priv,
@@ -325,13 +336,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
325 336
326 /* Release Force Wakeup (see the big comment above). */ 337 /* Release Force Wakeup (see the big comment above). */
327 spin_lock_irqsave(&dev_priv->uncore.lock, flags); 338 spin_lock_irqsave(&dev_priv->uncore.lock, flags);
328 if (IS_CHERRYVIEW(dev_priv->dev)) { 339 if (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen >= 9) {
329 if (--dev_priv->uncore.fw_rendercount == 0) 340 if (--dev_priv->uncore.fw_rendercount == 0)
330 dev_priv->uncore.funcs.force_wake_put(dev_priv, 341 dev_priv->uncore.funcs.force_wake_put(dev_priv,
331 FORCEWAKE_RENDER); 342 FORCEWAKE_RENDER);
332 if (--dev_priv->uncore.fw_mediacount == 0) 343 if (--dev_priv->uncore.fw_mediacount == 0)
333 dev_priv->uncore.funcs.force_wake_put(dev_priv, 344 dev_priv->uncore.funcs.force_wake_put(dev_priv,
334 FORCEWAKE_MEDIA); 345 FORCEWAKE_MEDIA);
346 if (INTEL_INFO(dev)->gen >= 9) {
347 if (--dev_priv->uncore.fw_blittercount == 0)
348 dev_priv->uncore.funcs.force_wake_put(dev_priv,
349 FORCEWAKE_BLITTER);
350 }
335 } else { 351 } else {
336 if (--dev_priv->uncore.forcewake_count == 0) 352 if (--dev_priv->uncore.forcewake_count == 0)
337 dev_priv->uncore.funcs.force_wake_put(dev_priv, 353 dev_priv->uncore.funcs.force_wake_put(dev_priv,
@@ -341,7 +357,9 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
341 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 357 spin_unlock_irqrestore(&dev_priv->uncore.lock, flags);
342} 358}
343 359
344static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tail) 360static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
361 struct drm_i915_gem_object *ring_obj,
362 u32 tail)
345{ 363{
346 struct page *page; 364 struct page *page;
347 uint32_t *reg_state; 365 uint32_t *reg_state;
@@ -350,43 +368,45 @@ static int execlists_ctx_write_tail(struct drm_i915_gem_object *ctx_obj, u32 tai
350 reg_state = kmap_atomic(page); 368 reg_state = kmap_atomic(page);
351 369
352 reg_state[CTX_RING_TAIL+1] = tail; 370 reg_state[CTX_RING_TAIL+1] = tail;
371 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
353 372
354 kunmap_atomic(reg_state); 373 kunmap_atomic(reg_state);
355 374
356 return 0; 375 return 0;
357} 376}
358 377
359static int execlists_submit_context(struct intel_engine_cs *ring, 378static void execlists_submit_contexts(struct intel_engine_cs *ring,
360 struct intel_context *to0, u32 tail0, 379 struct intel_context *to0, u32 tail0,
361 struct intel_context *to1, u32 tail1) 380 struct intel_context *to1, u32 tail1)
362{ 381{
363 struct drm_i915_gem_object *ctx_obj0; 382 struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
383 struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
364 struct drm_i915_gem_object *ctx_obj1 = NULL; 384 struct drm_i915_gem_object *ctx_obj1 = NULL;
385 struct intel_ringbuffer *ringbuf1 = NULL;
365 386
366 ctx_obj0 = to0->engine[ring->id].state;
367 BUG_ON(!ctx_obj0); 387 BUG_ON(!ctx_obj0);
368 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0)); 388 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
389 WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
369 390
370 execlists_ctx_write_tail(ctx_obj0, tail0); 391 execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
371 392
372 if (to1) { 393 if (to1) {
394 ringbuf1 = to1->engine[ring->id].ringbuf;
373 ctx_obj1 = to1->engine[ring->id].state; 395 ctx_obj1 = to1->engine[ring->id].state;
374 BUG_ON(!ctx_obj1); 396 BUG_ON(!ctx_obj1);
375 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1)); 397 WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
398 WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
376 399
377 execlists_ctx_write_tail(ctx_obj1, tail1); 400 execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
378 } 401 }
379 402
380 execlists_elsp_write(ring, ctx_obj0, ctx_obj1); 403 execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
381
382 return 0;
383} 404}
384 405
385static void execlists_context_unqueue(struct intel_engine_cs *ring) 406static void execlists_context_unqueue(struct intel_engine_cs *ring)
386{ 407{
387 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL; 408 struct intel_ctx_submit_request *req0 = NULL, *req1 = NULL;
388 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL; 409 struct intel_ctx_submit_request *cursor = NULL, *tmp = NULL;
389 struct drm_i915_private *dev_priv = ring->dev->dev_private;
390 410
391 assert_spin_locked(&ring->execlist_lock); 411 assert_spin_locked(&ring->execlist_lock);
392 412
@@ -403,7 +423,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
403 * will update tail past first request's workload */ 423 * will update tail past first request's workload */
404 cursor->elsp_submitted = req0->elsp_submitted; 424 cursor->elsp_submitted = req0->elsp_submitted;
405 list_del(&req0->execlist_link); 425 list_del(&req0->execlist_link);
406 queue_work(dev_priv->wq, &req0->work); 426 list_add_tail(&req0->execlist_link,
427 &ring->execlist_retired_req_list);
407 req0 = cursor; 428 req0 = cursor;
408 } else { 429 } else {
409 req1 = cursor; 430 req1 = cursor;
@@ -413,9 +434,9 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
413 434
414 WARN_ON(req1 && req1->elsp_submitted); 435 WARN_ON(req1 && req1->elsp_submitted);
415 436
416 WARN_ON(execlists_submit_context(ring, req0->ctx, req0->tail, 437 execlists_submit_contexts(ring, req0->ctx, req0->tail,
417 req1 ? req1->ctx : NULL, 438 req1 ? req1->ctx : NULL,
418 req1 ? req1->tail : 0)); 439 req1 ? req1->tail : 0);
419 440
420 req0->elsp_submitted++; 441 req0->elsp_submitted++;
421 if (req1) 442 if (req1)
@@ -425,7 +446,6 @@ static void execlists_context_unqueue(struct intel_engine_cs *ring)
425static bool execlists_check_remove_request(struct intel_engine_cs *ring, 446static bool execlists_check_remove_request(struct intel_engine_cs *ring,
426 u32 request_id) 447 u32 request_id)
427{ 448{
428 struct drm_i915_private *dev_priv = ring->dev->dev_private;
429 struct intel_ctx_submit_request *head_req; 449 struct intel_ctx_submit_request *head_req;
430 450
431 assert_spin_locked(&ring->execlist_lock); 451 assert_spin_locked(&ring->execlist_lock);
@@ -443,7 +463,8 @@ static bool execlists_check_remove_request(struct intel_engine_cs *ring,
443 463
444 if (--head_req->elsp_submitted <= 0) { 464 if (--head_req->elsp_submitted <= 0) {
445 list_del(&head_req->execlist_link); 465 list_del(&head_req->execlist_link);
446 queue_work(dev_priv->wq, &head_req->work); 466 list_add_tail(&head_req->execlist_link,
467 &ring->execlist_retired_req_list);
447 return true; 468 return true;
448 } 469 }
449 } 470 }
@@ -512,22 +533,6 @@ void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring)
512 ((u32)ring->next_context_status_buffer & 0x07) << 8); 533 ((u32)ring->next_context_status_buffer & 0x07) << 8);
513} 534}
514 535
515static void execlists_free_request_task(struct work_struct *work)
516{
517 struct intel_ctx_submit_request *req =
518 container_of(work, struct intel_ctx_submit_request, work);
519 struct drm_device *dev = req->ring->dev;
520 struct drm_i915_private *dev_priv = dev->dev_private;
521
522 intel_runtime_pm_put(dev_priv);
523
524 mutex_lock(&dev->struct_mutex);
525 i915_gem_context_unreference(req->ctx);
526 mutex_unlock(&dev->struct_mutex);
527
528 kfree(req);
529}
530
531static int execlists_context_queue(struct intel_engine_cs *ring, 536static int execlists_context_queue(struct intel_engine_cs *ring,
532 struct intel_context *to, 537 struct intel_context *to,
533 u32 tail) 538 u32 tail)
@@ -542,9 +547,12 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
542 return -ENOMEM; 547 return -ENOMEM;
543 req->ctx = to; 548 req->ctx = to;
544 i915_gem_context_reference(req->ctx); 549 i915_gem_context_reference(req->ctx);
550
551 if (to != ring->default_context)
552 intel_lr_context_pin(ring, to);
553
545 req->ring = ring; 554 req->ring = ring;
546 req->tail = tail; 555 req->tail = tail;
547 INIT_WORK(&req->work, execlists_free_request_task);
548 556
549 intel_runtime_pm_get(dev_priv); 557 intel_runtime_pm_get(dev_priv);
550 558
@@ -563,9 +571,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
563 571
564 if (to == tail_req->ctx) { 572 if (to == tail_req->ctx) {
565 WARN(tail_req->elsp_submitted != 0, 573 WARN(tail_req->elsp_submitted != 0,
566 "More than 2 already-submitted reqs queued\n"); 574 "More than 2 already-submitted reqs queued\n");
567 list_del(&tail_req->execlist_link); 575 list_del(&tail_req->execlist_link);
568 queue_work(dev_priv->wq, &tail_req->work); 576 list_add_tail(&tail_req->execlist_link,
577 &ring->execlist_retired_req_list);
569 } 578 }
570 } 579 }
571 580
@@ -733,6 +742,36 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
733 return 0; 742 return 0;
734} 743}
735 744
745void intel_execlists_retire_requests(struct intel_engine_cs *ring)
746{
747 struct intel_ctx_submit_request *req, *tmp;
748 struct drm_i915_private *dev_priv = ring->dev->dev_private;
749 unsigned long flags;
750 struct list_head retired_list;
751
752 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
753 if (list_empty(&ring->execlist_retired_req_list))
754 return;
755
756 INIT_LIST_HEAD(&retired_list);
757 spin_lock_irqsave(&ring->execlist_lock, flags);
758 list_replace_init(&ring->execlist_retired_req_list, &retired_list);
759 spin_unlock_irqrestore(&ring->execlist_lock, flags);
760
761 list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
762 struct intel_context *ctx = req->ctx;
763 struct drm_i915_gem_object *ctx_obj =
764 ctx->engine[ring->id].state;
765
766 if (ctx_obj && (ctx != ring->default_context))
767 intel_lr_context_unpin(ring, ctx);
768 intel_runtime_pm_put(dev_priv);
769 i915_gem_context_unreference(req->ctx);
770 list_del(&req->execlist_link);
771 kfree(req);
772 }
773}
774
736void intel_logical_ring_stop(struct intel_engine_cs *ring) 775void intel_logical_ring_stop(struct intel_engine_cs *ring)
737{ 776{
738 struct drm_i915_private *dev_priv = ring->dev->dev_private; 777 struct drm_i915_private *dev_priv = ring->dev->dev_private;
@@ -793,9 +832,55 @@ void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf)
793 execlists_context_queue(ring, ctx, ringbuf->tail); 832 execlists_context_queue(ring, ctx, ringbuf->tail);
794} 833}
795 834
835static int intel_lr_context_pin(struct intel_engine_cs *ring,
836 struct intel_context *ctx)
837{
838 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
839 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
840 int ret = 0;
841
842 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
843 if (ctx->engine[ring->id].unpin_count++ == 0) {
844 ret = i915_gem_obj_ggtt_pin(ctx_obj,
845 GEN8_LR_CONTEXT_ALIGN, 0);
846 if (ret)
847 goto reset_unpin_count;
848
849 ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
850 if (ret)
851 goto unpin_ctx_obj;
852 }
853
854 return ret;
855
856unpin_ctx_obj:
857 i915_gem_object_ggtt_unpin(ctx_obj);
858reset_unpin_count:
859 ctx->engine[ring->id].unpin_count = 0;
860
861 return ret;
862}
863
864void intel_lr_context_unpin(struct intel_engine_cs *ring,
865 struct intel_context *ctx)
866{
867 struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
868 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
869
870 if (ctx_obj) {
871 WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
872 if (--ctx->engine[ring->id].unpin_count == 0) {
873 intel_unpin_ringbuffer_obj(ringbuf);
874 i915_gem_object_ggtt_unpin(ctx_obj);
875 }
876 }
877}
878
796static int logical_ring_alloc_seqno(struct intel_engine_cs *ring, 879static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
797 struct intel_context *ctx) 880 struct intel_context *ctx)
798{ 881{
882 int ret;
883
799 if (ring->outstanding_lazy_seqno) 884 if (ring->outstanding_lazy_seqno)
800 return 0; 885 return 0;
801 886
@@ -806,6 +891,14 @@ static int logical_ring_alloc_seqno(struct intel_engine_cs *ring,
806 if (request == NULL) 891 if (request == NULL)
807 return -ENOMEM; 892 return -ENOMEM;
808 893
894 if (ctx != ring->default_context) {
895 ret = intel_lr_context_pin(ring, ctx);
896 if (ret) {
897 kfree(request);
898 return ret;
899 }
900 }
901
809 /* Hold a reference to the context this request belongs to 902 /* Hold a reference to the context this request belongs to
810 * (we will need it when the time comes to emit/retire the 903 * (we will need it when the time comes to emit/retire the
811 * request). 904 * request).
@@ -991,6 +1084,44 @@ int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords)
991 return 0; 1084 return 0;
992} 1085}
993 1086
1087static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
1088 struct intel_context *ctx)
1089{
1090 int ret, i;
1091 struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
1092 struct drm_device *dev = ring->dev;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 struct i915_workarounds *w = &dev_priv->workarounds;
1095
1096 if (WARN_ON(w->count == 0))
1097 return 0;
1098
1099 ring->gpu_caches_dirty = true;
1100 ret = logical_ring_flush_all_caches(ringbuf);
1101 if (ret)
1102 return ret;
1103
1104 ret = intel_logical_ring_begin(ringbuf, w->count * 2 + 2);
1105 if (ret)
1106 return ret;
1107
1108 intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
1109 for (i = 0; i < w->count; i++) {
1110 intel_logical_ring_emit(ringbuf, w->reg[i].addr);
1111 intel_logical_ring_emit(ringbuf, w->reg[i].value);
1112 }
1113 intel_logical_ring_emit(ringbuf, MI_NOOP);
1114
1115 intel_logical_ring_advance(ringbuf);
1116
1117 ring->gpu_caches_dirty = true;
1118 ret = logical_ring_flush_all_caches(ringbuf);
1119 if (ret)
1120 return ret;
1121
1122 return 0;
1123}
1124
994static int gen8_init_common_ring(struct intel_engine_cs *ring) 1125static int gen8_init_common_ring(struct intel_engine_cs *ring)
995{ 1126{
996 struct drm_device *dev = ring->dev; 1127 struct drm_device *dev = ring->dev;
@@ -1034,7 +1165,7 @@ static int gen8_init_render_ring(struct intel_engine_cs *ring)
1034 1165
1035 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 1166 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1036 1167
1037 return ret; 1168 return init_workarounds_ring(ring);
1038} 1169}
1039 1170
1040static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf, 1171static int gen8_emit_bb_start(struct intel_ringbuffer *ringbuf,
@@ -1063,7 +1194,7 @@ static bool gen8_logical_ring_get_irq(struct intel_engine_cs *ring)
1063 struct drm_i915_private *dev_priv = dev->dev_private; 1194 struct drm_i915_private *dev_priv = dev->dev_private;
1064 unsigned long flags; 1195 unsigned long flags;
1065 1196
1066 if (!dev->irq_enabled) 1197 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1067 return false; 1198 return false;
1068 1199
1069 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1200 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1214,11 +1345,13 @@ static int gen8_emit_request(struct intel_ringbuffer *ringbuf)
1214 */ 1345 */
1215void intel_logical_ring_cleanup(struct intel_engine_cs *ring) 1346void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
1216{ 1347{
1217 struct drm_i915_private *dev_priv = ring->dev->dev_private; 1348 struct drm_i915_private *dev_priv;
1218 1349
1219 if (!intel_ring_initialized(ring)) 1350 if (!intel_ring_initialized(ring))
1220 return; 1351 return;
1221 1352
1353 dev_priv = ring->dev->dev_private;
1354
1222 intel_logical_ring_stop(ring); 1355 intel_logical_ring_stop(ring);
1223 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); 1356 WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0);
1224 ring->preallocated_lazy_request = NULL; 1357 ring->preallocated_lazy_request = NULL;
@@ -1248,6 +1381,7 @@ static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *rin
1248 init_waitqueue_head(&ring->irq_queue); 1381 init_waitqueue_head(&ring->irq_queue);
1249 1382
1250 INIT_LIST_HEAD(&ring->execlist_queue); 1383 INIT_LIST_HEAD(&ring->execlist_queue);
1384 INIT_LIST_HEAD(&ring->execlist_retired_req_list);
1251 spin_lock_init(&ring->execlist_lock); 1385 spin_lock_init(&ring->execlist_lock);
1252 ring->next_context_status_buffer = 0; 1386 ring->next_context_status_buffer = 0;
1253 1387
@@ -1282,6 +1416,7 @@ static int logical_render_ring_init(struct drm_device *dev)
1282 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; 1416 ring->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
1283 1417
1284 ring->init = gen8_init_render_ring; 1418 ring->init = gen8_init_render_ring;
1419 ring->init_context = intel_logical_ring_workarounds_emit;
1285 ring->cleanup = intel_fini_pipe_control; 1420 ring->cleanup = intel_fini_pipe_control;
1286 ring->get_seqno = gen8_get_seqno; 1421 ring->get_seqno = gen8_get_seqno;
1287 ring->set_seqno = gen8_set_seqno; 1422 ring->set_seqno = gen8_set_seqno;
@@ -1495,7 +1630,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1495{ 1630{
1496 struct drm_device *dev = ring->dev; 1631 struct drm_device *dev = ring->dev;
1497 struct drm_i915_private *dev_priv = dev->dev_private; 1632 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct drm_i915_gem_object *ring_obj = ringbuf->obj;
1499 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 1633 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
1500 struct page *page; 1634 struct page *page;
1501 uint32_t *reg_state; 1635 uint32_t *reg_state;
@@ -1541,7 +1675,9 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
1541 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base); 1675 reg_state[CTX_RING_TAIL] = RING_TAIL(ring->mmio_base);
1542 reg_state[CTX_RING_TAIL+1] = 0; 1676 reg_state[CTX_RING_TAIL+1] = 0;
1543 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base); 1677 reg_state[CTX_RING_BUFFER_START] = RING_START(ring->mmio_base);
1544 reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj); 1678 /* Ring buffer start address is not known until the buffer is pinned.
1679 * It is written to the context image in execlists_update_context()
1680 */
1545 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base); 1681 reg_state[CTX_RING_BUFFER_CONTROL] = RING_CTL(ring->mmio_base);
1546 reg_state[CTX_RING_BUFFER_CONTROL+1] = 1682 reg_state[CTX_RING_BUFFER_CONTROL+1] =
1547 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID; 1683 ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID;
@@ -1617,12 +1753,18 @@ void intel_lr_context_free(struct intel_context *ctx)
1617 1753
1618 for (i = 0; i < I915_NUM_RINGS; i++) { 1754 for (i = 0; i < I915_NUM_RINGS; i++) {
1619 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state; 1755 struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
1620 struct intel_ringbuffer *ringbuf = ctx->engine[i].ringbuf;
1621 1756
1622 if (ctx_obj) { 1757 if (ctx_obj) {
1758 struct intel_ringbuffer *ringbuf =
1759 ctx->engine[i].ringbuf;
1760 struct intel_engine_cs *ring = ringbuf->ring;
1761
1762 if (ctx == ring->default_context) {
1763 intel_unpin_ringbuffer_obj(ringbuf);
1764 i915_gem_object_ggtt_unpin(ctx_obj);
1765 }
1623 intel_destroy_ringbuffer_obj(ringbuf); 1766 intel_destroy_ringbuffer_obj(ringbuf);
1624 kfree(ringbuf); 1767 kfree(ringbuf);
1625 i915_gem_object_ggtt_unpin(ctx_obj);
1626 drm_gem_object_unreference(&ctx_obj->base); 1768 drm_gem_object_unreference(&ctx_obj->base);
1627 } 1769 }
1628 } 1770 }
@@ -1632,11 +1774,14 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1632{ 1774{
1633 int ret = 0; 1775 int ret = 0;
1634 1776
1635 WARN_ON(INTEL_INFO(ring->dev)->gen != 8); 1777 WARN_ON(INTEL_INFO(ring->dev)->gen < 8);
1636 1778
1637 switch (ring->id) { 1779 switch (ring->id) {
1638 case RCS: 1780 case RCS:
1639 ret = GEN8_LR_CONTEXT_RENDER_SIZE; 1781 if (INTEL_INFO(ring->dev)->gen >= 9)
1782 ret = GEN9_LR_CONTEXT_RENDER_SIZE;
1783 else
1784 ret = GEN8_LR_CONTEXT_RENDER_SIZE;
1640 break; 1785 break;
1641 case VCS: 1786 case VCS:
1642 case BCS: 1787 case BCS:
@@ -1649,6 +1794,23 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1649 return ret; 1794 return ret;
1650} 1795}
1651 1796
1797static void lrc_setup_hardware_status_page(struct intel_engine_cs *ring,
1798 struct drm_i915_gem_object *default_ctx_obj)
1799{
1800 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1801
1802 /* The status page is offset 0 from the default context object
1803 * in LRC mode. */
1804 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(default_ctx_obj);
1805 ring->status_page.page_addr =
1806 kmap(sg_page(default_ctx_obj->pages->sgl));
1807 ring->status_page.obj = default_ctx_obj;
1808
1809 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
1810 (u32)ring->status_page.gfx_addr);
1811 POSTING_READ(RING_HWS_PGA(ring->mmio_base));
1812}
1813
1652/** 1814/**
1653 * intel_lr_context_deferred_create() - create the LRC specific bits of a context 1815 * intel_lr_context_deferred_create() - create the LRC specific bits of a context
1654 * @ctx: LR context to create. 1816 * @ctx: LR context to create.
@@ -1660,11 +1822,12 @@ static uint32_t get_lr_context_size(struct intel_engine_cs *ring)
1660 * the creation is a deferred call: it's better to make sure first that we need to use 1822 * the creation is a deferred call: it's better to make sure first that we need to use
1661 * a given ring with the context. 1823 * a given ring with the context.
1662 * 1824 *
1663 * Return: non-zero on eror. 1825 * Return: non-zero on error.
1664 */ 1826 */
1665int intel_lr_context_deferred_create(struct intel_context *ctx, 1827int intel_lr_context_deferred_create(struct intel_context *ctx,
1666 struct intel_engine_cs *ring) 1828 struct intel_engine_cs *ring)
1667{ 1829{
1830 const bool is_global_default_ctx = (ctx == ring->default_context);
1668 struct drm_device *dev = ring->dev; 1831 struct drm_device *dev = ring->dev;
1669 struct drm_i915_gem_object *ctx_obj; 1832 struct drm_i915_gem_object *ctx_obj;
1670 uint32_t context_size; 1833 uint32_t context_size;
@@ -1684,21 +1847,22 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1684 return ret; 1847 return ret;
1685 } 1848 }
1686 1849
1687 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0); 1850 if (is_global_default_ctx) {
1688 if (ret) { 1851 ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
1689 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n", ret); 1852 if (ret) {
1690 drm_gem_object_unreference(&ctx_obj->base); 1853 DRM_DEBUG_DRIVER("Pin LRC backing obj failed: %d\n",
1691 return ret; 1854 ret);
1855 drm_gem_object_unreference(&ctx_obj->base);
1856 return ret;
1857 }
1692 } 1858 }
1693 1859
1694 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); 1860 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
1695 if (!ringbuf) { 1861 if (!ringbuf) {
1696 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n", 1862 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",
1697 ring->name); 1863 ring->name);
1698 i915_gem_object_ggtt_unpin(ctx_obj);
1699 drm_gem_object_unreference(&ctx_obj->base);
1700 ret = -ENOMEM; 1864 ret = -ENOMEM;
1701 return ret; 1865 goto error_unpin_ctx;
1702 } 1866 }
1703 1867
1704 ringbuf->ring = ring; 1868 ringbuf->ring = ring;
@@ -1711,46 +1875,51 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1711 ringbuf->space = ringbuf->size; 1875 ringbuf->space = ringbuf->size;
1712 ringbuf->last_retired_head = -1; 1876 ringbuf->last_retired_head = -1;
1713 1877
1714 /* TODO: For now we put this in the mappable region so that we can reuse 1878 if (ringbuf->obj == NULL) {
1715 * the existing ringbuffer code which ioremaps it. When we start 1879 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1716 * creating many contexts, this will no longer work and we must switch 1880 if (ret) {
1717 * to a kmapish interface. 1881 DRM_DEBUG_DRIVER(
1718 */ 1882 "Failed to allocate ringbuffer obj %s: %d\n",
1719 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1720 if (ret) {
1721 DRM_DEBUG_DRIVER("Failed to allocate ringbuffer obj %s: %d\n",
1722 ring->name, ret); 1883 ring->name, ret);
1723 goto error; 1884 goto error_free_rbuf;
1885 }
1886
1887 if (is_global_default_ctx) {
1888 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1889 if (ret) {
1890 DRM_ERROR(
1891 "Failed to pin and map ringbuffer %s: %d\n",
1892 ring->name, ret);
1893 goto error_destroy_rbuf;
1894 }
1895 }
1896
1724 } 1897 }
1725 1898
1726 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf); 1899 ret = populate_lr_context(ctx, ctx_obj, ring, ringbuf);
1727 if (ret) { 1900 if (ret) {
1728 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret); 1901 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
1729 intel_destroy_ringbuffer_obj(ringbuf);
1730 goto error; 1902 goto error;
1731 } 1903 }
1732 1904
1733 ctx->engine[ring->id].ringbuf = ringbuf; 1905 ctx->engine[ring->id].ringbuf = ringbuf;
1734 ctx->engine[ring->id].state = ctx_obj; 1906 ctx->engine[ring->id].state = ctx_obj;
1735 1907
1736 if (ctx == ring->default_context) { 1908 if (ctx == ring->default_context)
1737 /* The status page is offset 0 from the default context object 1909 lrc_setup_hardware_status_page(ring, ctx_obj);
1738 * in LRC mode. */
1739 ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(ctx_obj);
1740 ring->status_page.page_addr =
1741 kmap(sg_page(ctx_obj->pages->sgl));
1742 if (ring->status_page.page_addr == NULL)
1743 return -ENOMEM;
1744 ring->status_page.obj = ctx_obj;
1745 }
1746 1910
1747 if (ring->id == RCS && !ctx->rcs_initialized) { 1911 if (ring->id == RCS && !ctx->rcs_initialized) {
1912 if (ring->init_context) {
1913 ret = ring->init_context(ring, ctx);
1914 if (ret)
1915 DRM_ERROR("ring init context: %d\n", ret);
1916 }
1917
1748 ret = intel_lr_context_render_state_init(ring, ctx); 1918 ret = intel_lr_context_render_state_init(ring, ctx);
1749 if (ret) { 1919 if (ret) {
1750 DRM_ERROR("Init render state failed: %d\n", ret); 1920 DRM_ERROR("Init render state failed: %d\n", ret);
1751 ctx->engine[ring->id].ringbuf = NULL; 1921 ctx->engine[ring->id].ringbuf = NULL;
1752 ctx->engine[ring->id].state = NULL; 1922 ctx->engine[ring->id].state = NULL;
1753 intel_destroy_ringbuffer_obj(ringbuf);
1754 goto error; 1923 goto error;
1755 } 1924 }
1756 ctx->rcs_initialized = true; 1925 ctx->rcs_initialized = true;
@@ -1759,8 +1928,15 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
1759 return 0; 1928 return 0;
1760 1929
1761error: 1930error:
1931 if (is_global_default_ctx)
1932 intel_unpin_ringbuffer_obj(ringbuf);
1933error_destroy_rbuf:
1934 intel_destroy_ringbuffer_obj(ringbuf);
1935error_free_rbuf:
1762 kfree(ringbuf); 1936 kfree(ringbuf);
1763 i915_gem_object_ggtt_unpin(ctx_obj); 1937error_unpin_ctx:
1938 if (is_global_default_ctx)
1939 i915_gem_object_ggtt_unpin(ctx_obj);
1764 drm_gem_object_unreference(&ctx_obj->base); 1940 drm_gem_object_unreference(&ctx_obj->base);
1765 return ret; 1941 return ret;
1766} 1942}
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33c3b4bf28c5..14b216b9be7f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -24,6 +24,8 @@
24#ifndef _INTEL_LRC_H_ 24#ifndef _INTEL_LRC_H_
25#define _INTEL_LRC_H_ 25#define _INTEL_LRC_H_
26 26
27#define GEN8_LR_CONTEXT_ALIGN 4096
28
27/* Execlists regs */ 29/* Execlists regs */
28#define RING_ELSP(ring) ((ring)->mmio_base+0x230) 30#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
29#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 31#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
@@ -67,6 +69,8 @@ int intel_lr_context_render_state_init(struct intel_engine_cs *ring,
67void intel_lr_context_free(struct intel_context *ctx); 69void intel_lr_context_free(struct intel_context *ctx);
68int intel_lr_context_deferred_create(struct intel_context *ctx, 70int intel_lr_context_deferred_create(struct intel_context *ctx,
69 struct intel_engine_cs *ring); 71 struct intel_engine_cs *ring);
72void intel_lr_context_unpin(struct intel_engine_cs *ring,
73 struct intel_context *ctx);
70 74
71/* Execlists */ 75/* Execlists */
72int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists); 76int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
@@ -104,11 +108,11 @@ struct intel_ctx_submit_request {
104 u32 tail; 108 u32 tail;
105 109
106 struct list_head execlist_link; 110 struct list_head execlist_link;
107 struct work_struct work;
108 111
109 int elsp_submitted; 112 int elsp_submitted;
110}; 113};
111 114
112void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring); 115void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
116void intel_execlists_retire_requests(struct intel_engine_cs *ring);
113 117
114#endif /* _INTEL_LRC_H_ */ 118#endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c0bbf2172446..14654d628ca4 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -76,7 +76,7 @@ static bool intel_lvds_get_hw_state(struct intel_encoder *encoder,
76 u32 tmp; 76 u32 tmp;
77 77
78 power_domain = intel_display_port_power_domain(encoder); 78 power_domain = intel_display_port_power_domain(encoder);
79 if (!intel_display_power_enabled(dev_priv, power_domain)) 79 if (!intel_display_power_is_enabled(dev_priv, power_domain))
80 return false; 80 return false;
81 81
82 tmp = I915_READ(lvds_encoder->reg); 82 tmp = I915_READ(lvds_encoder->reg);
@@ -1116,7 +1116,7 @@ out:
1116 drm_connector_register(connector); 1116 drm_connector_register(connector);
1117 1117
1118 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 1118 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
1119 intel_panel_setup_backlight(connector); 1119 intel_panel_setup_backlight(connector, INVALID_PIPE);
1120 1120
1121 return; 1121 return;
1122 1122
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 41b3be217493..4d63839bd9b4 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -521,6 +521,9 @@ static u32 _vlv_get_backlight(struct drm_device *dev, enum pipe pipe)
521{ 521{
522 struct drm_i915_private *dev_priv = dev->dev_private; 522 struct drm_i915_private *dev_priv = dev->dev_private;
523 523
524 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
525 return 0;
526
524 return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK; 527 return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
525} 528}
526 529
@@ -536,15 +539,17 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
536{ 539{
537 struct drm_device *dev = connector->base.dev; 540 struct drm_device *dev = connector->base.dev;
538 struct drm_i915_private *dev_priv = dev->dev_private; 541 struct drm_i915_private *dev_priv = dev->dev_private;
539 u32 val; 542 struct intel_panel *panel = &connector->panel;
540 unsigned long flags; 543 u32 val = 0;
541 544
542 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 545 mutex_lock(&dev_priv->backlight_lock);
543 546
544 val = dev_priv->display.get_backlight(connector); 547 if (panel->backlight.enabled) {
545 val = intel_panel_compute_brightness(connector, val); 548 val = dev_priv->display.get_backlight(connector);
549 val = intel_panel_compute_brightness(connector, val);
550 }
546 551
547 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 552 mutex_unlock(&dev_priv->backlight_lock);
548 553
549 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); 554 DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
550 return val; 555 return val;
@@ -603,6 +608,9 @@ static void vlv_set_backlight(struct intel_connector *connector, u32 level)
603 enum pipe pipe = intel_get_pipe_from_connector(connector); 608 enum pipe pipe = intel_get_pipe_from_connector(connector);
604 u32 tmp; 609 u32 tmp;
605 610
611 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
612 return;
613
606 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK; 614 tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
607 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level); 615 I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
608} 616}
@@ -626,14 +634,12 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
626 struct drm_device *dev = connector->base.dev; 634 struct drm_device *dev = connector->base.dev;
627 struct drm_i915_private *dev_priv = dev->dev_private; 635 struct drm_i915_private *dev_priv = dev->dev_private;
628 struct intel_panel *panel = &connector->panel; 636 struct intel_panel *panel = &connector->panel;
629 enum pipe pipe = intel_get_pipe_from_connector(connector);
630 u32 hw_level; 637 u32 hw_level;
631 unsigned long flags;
632 638
633 if (!panel->backlight.present || pipe == INVALID_PIPE) 639 if (!panel->backlight.present)
634 return; 640 return;
635 641
636 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 642 mutex_lock(&dev_priv->backlight_lock);
637 643
638 WARN_ON(panel->backlight.max == 0); 644 WARN_ON(panel->backlight.max == 0);
639 645
@@ -643,7 +649,7 @@ static void intel_panel_set_backlight(struct intel_connector *connector,
643 if (panel->backlight.enabled) 649 if (panel->backlight.enabled)
644 intel_panel_actually_set_backlight(connector, hw_level); 650 intel_panel_actually_set_backlight(connector, hw_level);
645 651
646 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 652 mutex_unlock(&dev_priv->backlight_lock);
647} 653}
648 654
649/* set backlight brightness to level in range [0..max], assuming hw min is 655/* set backlight brightness to level in range [0..max], assuming hw min is
@@ -657,12 +663,17 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
657 struct intel_panel *panel = &connector->panel; 663 struct intel_panel *panel = &connector->panel;
658 enum pipe pipe = intel_get_pipe_from_connector(connector); 664 enum pipe pipe = intel_get_pipe_from_connector(connector);
659 u32 hw_level; 665 u32 hw_level;
660 unsigned long flags;
661 666
667 /*
668 * INVALID_PIPE may occur during driver init because
669 * connection_mutex isn't held across the entire backlight
670 * setup + modeset readout, and the BIOS can issue the
671 * requests at any time.
672 */
662 if (!panel->backlight.present || pipe == INVALID_PIPE) 673 if (!panel->backlight.present || pipe == INVALID_PIPE)
663 return; 674 return;
664 675
665 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 676 mutex_lock(&dev_priv->backlight_lock);
666 677
667 WARN_ON(panel->backlight.max == 0); 678 WARN_ON(panel->backlight.max == 0);
668 679
@@ -678,7 +689,7 @@ void intel_panel_set_backlight_acpi(struct intel_connector *connector,
678 if (panel->backlight.enabled) 689 if (panel->backlight.enabled)
679 intel_panel_actually_set_backlight(connector, hw_level); 690 intel_panel_actually_set_backlight(connector, hw_level);
680 691
681 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 692 mutex_unlock(&dev_priv->backlight_lock);
682} 693}
683 694
684static void pch_disable_backlight(struct intel_connector *connector) 695static void pch_disable_backlight(struct intel_connector *connector)
@@ -720,6 +731,9 @@ static void vlv_disable_backlight(struct intel_connector *connector)
720 enum pipe pipe = intel_get_pipe_from_connector(connector); 731 enum pipe pipe = intel_get_pipe_from_connector(connector);
721 u32 tmp; 732 u32 tmp;
722 733
734 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
735 return;
736
723 intel_panel_actually_set_backlight(connector, 0); 737 intel_panel_actually_set_backlight(connector, 0);
724 738
725 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 739 tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
@@ -731,10 +745,8 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
731 struct drm_device *dev = connector->base.dev; 745 struct drm_device *dev = connector->base.dev;
732 struct drm_i915_private *dev_priv = dev->dev_private; 746 struct drm_i915_private *dev_priv = dev->dev_private;
733 struct intel_panel *panel = &connector->panel; 747 struct intel_panel *panel = &connector->panel;
734 enum pipe pipe = intel_get_pipe_from_connector(connector);
735 unsigned long flags;
736 748
737 if (!panel->backlight.present || pipe == INVALID_PIPE) 749 if (!panel->backlight.present)
738 return; 750 return;
739 751
740 /* 752 /*
@@ -748,14 +760,14 @@ void intel_panel_disable_backlight(struct intel_connector *connector)
748 return; 760 return;
749 } 761 }
750 762
751 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 763 mutex_lock(&dev_priv->backlight_lock);
752 764
753 if (panel->backlight.device) 765 if (panel->backlight.device)
754 panel->backlight.device->props.power = FB_BLANK_POWERDOWN; 766 panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
755 panel->backlight.enabled = false; 767 panel->backlight.enabled = false;
756 dev_priv->display.disable_backlight(connector); 768 dev_priv->display.disable_backlight(connector);
757 769
758 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 770 mutex_unlock(&dev_priv->backlight_lock);
759} 771}
760 772
761static void bdw_enable_backlight(struct intel_connector *connector) 773static void bdw_enable_backlight(struct intel_connector *connector)
@@ -779,8 +791,9 @@ static void bdw_enable_backlight(struct intel_connector *connector)
779 if (panel->backlight.active_low_pwm) 791 if (panel->backlight.active_low_pwm)
780 pch_ctl1 |= BLM_PCH_POLARITY; 792 pch_ctl1 |= BLM_PCH_POLARITY;
781 793
782 /* BDW always uses the pch pwm controls. */ 794 /* After LPT, override is the default. */
783 pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE; 795 if (HAS_PCH_LPT(dev_priv))
796 pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
784 797
785 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1); 798 I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
786 POSTING_READ(BLC_PWM_PCH_CTL1); 799 POSTING_READ(BLC_PWM_PCH_CTL1);
@@ -909,6 +922,9 @@ static void vlv_enable_backlight(struct intel_connector *connector)
909 enum pipe pipe = intel_get_pipe_from_connector(connector); 922 enum pipe pipe = intel_get_pipe_from_connector(connector);
910 u32 ctl, ctl2; 923 u32 ctl, ctl2;
911 924
925 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
926 return;
927
912 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe)); 928 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
913 if (ctl2 & BLM_PWM_ENABLE) { 929 if (ctl2 & BLM_PWM_ENABLE) {
914 DRM_DEBUG_KMS("backlight already enabled\n"); 930 DRM_DEBUG_KMS("backlight already enabled\n");
@@ -936,14 +952,13 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
936 struct drm_i915_private *dev_priv = dev->dev_private; 952 struct drm_i915_private *dev_priv = dev->dev_private;
937 struct intel_panel *panel = &connector->panel; 953 struct intel_panel *panel = &connector->panel;
938 enum pipe pipe = intel_get_pipe_from_connector(connector); 954 enum pipe pipe = intel_get_pipe_from_connector(connector);
939 unsigned long flags;
940 955
941 if (!panel->backlight.present || pipe == INVALID_PIPE) 956 if (!panel->backlight.present)
942 return; 957 return;
943 958
944 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe)); 959 DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
945 960
946 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 961 mutex_lock(&dev_priv->backlight_lock);
947 962
948 WARN_ON(panel->backlight.max == 0); 963 WARN_ON(panel->backlight.max == 0);
949 964
@@ -961,7 +976,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
961 if (panel->backlight.device) 976 if (panel->backlight.device)
962 panel->backlight.device->props.power = FB_BLANK_UNBLANK; 977 panel->backlight.device->props.power = FB_BLANK_UNBLANK;
963 978
964 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 979 mutex_unlock(&dev_priv->backlight_lock);
965} 980}
966 981
967#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) 982#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
@@ -1030,6 +1045,9 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1030 if (WARN_ON(panel->backlight.device)) 1045 if (WARN_ON(panel->backlight.device))
1031 return -ENODEV; 1046 return -ENODEV;
1032 1047
1048 if (!panel->backlight.present)
1049 return 0;
1050
1033 WARN_ON(panel->backlight.max == 0); 1051 WARN_ON(panel->backlight.max == 0);
1034 1052
1035 memset(&props, 0, sizeof(props)); 1053 memset(&props, 0, sizeof(props));
@@ -1065,6 +1083,10 @@ static int intel_backlight_device_register(struct intel_connector *connector)
1065 panel->backlight.device = NULL; 1083 panel->backlight.device = NULL;
1066 return -ENODEV; 1084 return -ENODEV;
1067 } 1085 }
1086
1087 DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n",
1088 connector->base.name);
1089
1068 return 0; 1090 return 0;
1069} 1091}
1070 1092
@@ -1119,7 +1141,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
1119 return scale(min, 0, 255, 0, panel->backlight.max); 1141 return scale(min, 0, 255, 0, panel->backlight.max);
1120} 1142}
1121 1143
1122static int bdw_setup_backlight(struct intel_connector *connector) 1144static int bdw_setup_backlight(struct intel_connector *connector, enum pipe unused)
1123{ 1145{
1124 struct drm_device *dev = connector->base.dev; 1146 struct drm_device *dev = connector->base.dev;
1125 struct drm_i915_private *dev_priv = dev->dev_private; 1147 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1145,7 +1167,7 @@ static int bdw_setup_backlight(struct intel_connector *connector)
1145 return 0; 1167 return 0;
1146} 1168}
1147 1169
1148static int pch_setup_backlight(struct intel_connector *connector) 1170static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
1149{ 1171{
1150 struct drm_device *dev = connector->base.dev; 1172 struct drm_device *dev = connector->base.dev;
1151 struct drm_i915_private *dev_priv = dev->dev_private; 1173 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1172,7 +1194,7 @@ static int pch_setup_backlight(struct intel_connector *connector)
1172 return 0; 1194 return 0;
1173} 1195}
1174 1196
1175static int i9xx_setup_backlight(struct intel_connector *connector) 1197static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
1176{ 1198{
1177 struct drm_device *dev = connector->base.dev; 1199 struct drm_device *dev = connector->base.dev;
1178 struct drm_i915_private *dev_priv = dev->dev_private; 1200 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1204,7 +1226,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
1204 return 0; 1226 return 0;
1205} 1227}
1206 1228
1207static int i965_setup_backlight(struct intel_connector *connector) 1229static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
1208{ 1230{
1209 struct drm_device *dev = connector->base.dev; 1231 struct drm_device *dev = connector->base.dev;
1210 struct drm_i915_private *dev_priv = dev->dev_private; 1232 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1234,37 +1256,40 @@ static int i965_setup_backlight(struct intel_connector *connector)
1234 return 0; 1256 return 0;
1235} 1257}
1236 1258
1237static int vlv_setup_backlight(struct intel_connector *connector) 1259static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
1238{ 1260{
1239 struct drm_device *dev = connector->base.dev; 1261 struct drm_device *dev = connector->base.dev;
1240 struct drm_i915_private *dev_priv = dev->dev_private; 1262 struct drm_i915_private *dev_priv = dev->dev_private;
1241 struct intel_panel *panel = &connector->panel; 1263 struct intel_panel *panel = &connector->panel;
1242 enum pipe pipe; 1264 enum pipe p;
1243 u32 ctl, ctl2, val; 1265 u32 ctl, ctl2, val;
1244 1266
1245 for_each_pipe(dev_priv, pipe) { 1267 for_each_pipe(dev_priv, p) {
1246 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(pipe)); 1268 u32 cur_val = I915_READ(VLV_BLC_PWM_CTL(p));
1247 1269
1248 /* Skip if the modulation freq is already set */ 1270 /* Skip if the modulation freq is already set */
1249 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK) 1271 if (cur_val & ~BACKLIGHT_DUTY_CYCLE_MASK)
1250 continue; 1272 continue;
1251 1273
1252 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK; 1274 cur_val &= BACKLIGHT_DUTY_CYCLE_MASK;
1253 I915_WRITE(VLV_BLC_PWM_CTL(pipe), (0xf42 << 16) | 1275 I915_WRITE(VLV_BLC_PWM_CTL(p), (0xf42 << 16) |
1254 cur_val); 1276 cur_val);
1255 } 1277 }
1256 1278
1257 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(PIPE_A)); 1279 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
1280 return -ENODEV;
1281
1282 ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
1258 panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965; 1283 panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
1259 1284
1260 ctl = I915_READ(VLV_BLC_PWM_CTL(PIPE_A)); 1285 ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
1261 panel->backlight.max = ctl >> 16; 1286 panel->backlight.max = ctl >> 16;
1262 if (!panel->backlight.max) 1287 if (!panel->backlight.max)
1263 return -ENODEV; 1288 return -ENODEV;
1264 1289
1265 panel->backlight.min = get_backlight_min_vbt(connector); 1290 panel->backlight.min = get_backlight_min_vbt(connector);
1266 1291
1267 val = _vlv_get_backlight(dev, PIPE_A); 1292 val = _vlv_get_backlight(dev, pipe);
1268 panel->backlight.level = intel_panel_compute_brightness(connector, val); 1293 panel->backlight.level = intel_panel_compute_brightness(connector, val);
1269 1294
1270 panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) && 1295 panel->backlight.enabled = (ctl2 & BLM_PWM_ENABLE) &&
@@ -1273,13 +1298,12 @@ static int vlv_setup_backlight(struct intel_connector *connector)
1273 return 0; 1298 return 0;
1274} 1299}
1275 1300
1276int intel_panel_setup_backlight(struct drm_connector *connector) 1301int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
1277{ 1302{
1278 struct drm_device *dev = connector->dev; 1303 struct drm_device *dev = connector->dev;
1279 struct drm_i915_private *dev_priv = dev->dev_private; 1304 struct drm_i915_private *dev_priv = dev->dev_private;
1280 struct intel_connector *intel_connector = to_intel_connector(connector); 1305 struct intel_connector *intel_connector = to_intel_connector(connector);
1281 struct intel_panel *panel = &intel_connector->panel; 1306 struct intel_panel *panel = &intel_connector->panel;
1282 unsigned long flags;
1283 int ret; 1307 int ret;
1284 1308
1285 if (!dev_priv->vbt.backlight.present) { 1309 if (!dev_priv->vbt.backlight.present) {
@@ -1292,9 +1316,9 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1292 } 1316 }
1293 1317
1294 /* set level and max in panel struct */ 1318 /* set level and max in panel struct */
1295 spin_lock_irqsave(&dev_priv->backlight_lock, flags); 1319 mutex_lock(&dev_priv->backlight_lock);
1296 ret = dev_priv->display.setup_backlight(intel_connector); 1320 ret = dev_priv->display.setup_backlight(intel_connector, pipe);
1297 spin_unlock_irqrestore(&dev_priv->backlight_lock, flags); 1321 mutex_unlock(&dev_priv->backlight_lock);
1298 1322
1299 if (ret) { 1323 if (ret) {
1300 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n", 1324 DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
@@ -1302,15 +1326,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1302 return ret; 1326 return ret;
1303 } 1327 }
1304 1328
1305 intel_backlight_device_register(intel_connector);
1306
1307 panel->backlight.present = true; 1329 panel->backlight.present = true;
1308 1330
1309 DRM_DEBUG_KMS("backlight initialized, %s, brightness %u/%u, " 1331 DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
1310 "sysfs interface %sregistered\n", 1332 connector->name,
1311 panel->backlight.enabled ? "enabled" : "disabled", 1333 panel->backlight.enabled ? "enabled" : "disabled",
1312 panel->backlight.level, panel->backlight.max, 1334 panel->backlight.level, panel->backlight.max);
1313 panel->backlight.device ? "" : "not ");
1314 1335
1315 return 0; 1336 return 0;
1316} 1337}
@@ -1321,7 +1342,6 @@ void intel_panel_destroy_backlight(struct drm_connector *connector)
1321 struct intel_panel *panel = &intel_connector->panel; 1342 struct intel_panel *panel = &intel_connector->panel;
1322 1343
1323 panel->backlight.present = false; 1344 panel->backlight.present = false;
1324 intel_backlight_device_unregister(intel_connector);
1325} 1345}
1326 1346
1327/* Set up chip specific backlight functions */ 1347/* Set up chip specific backlight functions */
@@ -1329,7 +1349,7 @@ void intel_panel_init_backlight_funcs(struct drm_device *dev)
1329{ 1349{
1330 struct drm_i915_private *dev_priv = dev->dev_private; 1350 struct drm_i915_private *dev_priv = dev->dev_private;
1331 1351
1332 if (IS_BROADWELL(dev)) { 1352 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) {
1333 dev_priv->display.setup_backlight = bdw_setup_backlight; 1353 dev_priv->display.setup_backlight = bdw_setup_backlight;
1334 dev_priv->display.enable_backlight = bdw_enable_backlight; 1354 dev_priv->display.enable_backlight = bdw_enable_backlight;
1335 dev_priv->display.disable_backlight = pch_disable_backlight; 1355 dev_priv->display.disable_backlight = pch_disable_backlight;
@@ -1384,3 +1404,19 @@ void intel_panel_fini(struct intel_panel *panel)
1384 drm_mode_destroy(intel_connector->base.dev, 1404 drm_mode_destroy(intel_connector->base.dev,
1385 panel->downclock_mode); 1405 panel->downclock_mode);
1386} 1406}
1407
1408void intel_backlight_register(struct drm_device *dev)
1409{
1410 struct intel_connector *connector;
1411
1412 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
1413 intel_backlight_device_register(connector);
1414}
1415
1416void intel_backlight_unregister(struct drm_device *dev)
1417{
1418 struct intel_connector *connector;
1419
1420 list_for_each_entry(connector, &dev->mode_config.connector_list, base.head)
1421 intel_backlight_device_unregister(connector);
1422}
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index ad2fd605f76b..1f4b56e273c8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -30,9 +30,6 @@
30#include "intel_drv.h" 30#include "intel_drv.h"
31#include "../../../platform/x86/intel_ips.h" 31#include "../../../platform/x86/intel_ips.h"
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/vgaarb.h>
34#include <drm/i915_powerwell.h>
35#include <linux/pm_runtime.h>
36 33
37/** 34/**
38 * RC6 is a special power stage which allows the GPU to enter an very 35 * RC6 is a special power stage which allows the GPU to enter an very
@@ -66,11 +63,37 @@
66 * i915.i915_enable_fbc parameter 63 * i915.i915_enable_fbc parameter
67 */ 64 */
68 65
66static void gen9_init_clock_gating(struct drm_device *dev)
67{
68 struct drm_i915_private *dev_priv = dev->dev_private;
69
70 /*
71 * WaDisableSDEUnitClockGating:skl
72 * This seems to be a pre-production w/a.
73 */
74 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
75 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
76
77 /*
78 * WaDisableDgMirrorFixInHalfSliceChicken5:skl
79 * This is a pre-production w/a.
80 */
81 I915_WRITE(GEN9_HALF_SLICE_CHICKEN5,
82 I915_READ(GEN9_HALF_SLICE_CHICKEN5) &
83 ~GEN9_DG_MIRROR_FIX_ENABLE);
84
85 /* Wa4x4STCOptimizationDisable:skl */
86 I915_WRITE(CACHE_MODE_1,
87 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE));
88}
89
69static void i8xx_disable_fbc(struct drm_device *dev) 90static void i8xx_disable_fbc(struct drm_device *dev)
70{ 91{
71 struct drm_i915_private *dev_priv = dev->dev_private; 92 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 fbc_ctl; 93 u32 fbc_ctl;
73 94
95 dev_priv->fbc.enabled = false;
96
74 /* Disable compression */ 97 /* Disable compression */
75 fbc_ctl = I915_READ(FBC_CONTROL); 98 fbc_ctl = I915_READ(FBC_CONTROL);
76 if ((fbc_ctl & FBC_CTL_EN) == 0) 99 if ((fbc_ctl & FBC_CTL_EN) == 0)
@@ -99,6 +122,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc)
99 int i; 122 int i;
100 u32 fbc_ctl; 123 u32 fbc_ctl;
101 124
125 dev_priv->fbc.enabled = true;
126
102 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE; 127 cfb_pitch = dev_priv->fbc.size / FBC_LL_SIZE;
103 if (fb->pitches[0] < cfb_pitch) 128 if (fb->pitches[0] < cfb_pitch)
104 cfb_pitch = fb->pitches[0]; 129 cfb_pitch = fb->pitches[0];
@@ -153,6 +178,8 @@ static void g4x_enable_fbc(struct drm_crtc *crtc)
153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
154 u32 dpfc_ctl; 179 u32 dpfc_ctl;
155 180
181 dev_priv->fbc.enabled = true;
182
156 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN; 183 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane) | DPFC_SR_EN;
157 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 184 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
158 dpfc_ctl |= DPFC_CTL_LIMIT_2X; 185 dpfc_ctl |= DPFC_CTL_LIMIT_2X;
@@ -173,6 +200,8 @@ static void g4x_disable_fbc(struct drm_device *dev)
173 struct drm_i915_private *dev_priv = dev->dev_private; 200 struct drm_i915_private *dev_priv = dev->dev_private;
174 u32 dpfc_ctl; 201 u32 dpfc_ctl;
175 202
203 dev_priv->fbc.enabled = false;
204
176 /* Disable compression */ 205 /* Disable compression */
177 dpfc_ctl = I915_READ(DPFC_CONTROL); 206 dpfc_ctl = I915_READ(DPFC_CONTROL);
178 if (dpfc_ctl & DPFC_CTL_EN) { 207 if (dpfc_ctl & DPFC_CTL_EN) {
@@ -224,6 +253,8 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc)
224 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 253 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
225 u32 dpfc_ctl; 254 u32 dpfc_ctl;
226 255
256 dev_priv->fbc.enabled = true;
257
227 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane); 258 dpfc_ctl = DPFC_CTL_PLANE(intel_crtc->plane);
228 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 259 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
229 dev_priv->fbc.threshold++; 260 dev_priv->fbc.threshold++;
@@ -264,6 +295,8 @@ static void ironlake_disable_fbc(struct drm_device *dev)
264 struct drm_i915_private *dev_priv = dev->dev_private; 295 struct drm_i915_private *dev_priv = dev->dev_private;
265 u32 dpfc_ctl; 296 u32 dpfc_ctl;
266 297
298 dev_priv->fbc.enabled = false;
299
267 /* Disable compression */ 300 /* Disable compression */
268 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 301 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
269 if (dpfc_ctl & DPFC_CTL_EN) { 302 if (dpfc_ctl & DPFC_CTL_EN) {
@@ -290,6 +323,8 @@ static void gen7_enable_fbc(struct drm_crtc *crtc)
290 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 323 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
291 u32 dpfc_ctl; 324 u32 dpfc_ctl;
292 325
326 dev_priv->fbc.enabled = true;
327
293 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane); 328 dpfc_ctl = IVB_DPFC_CTL_PLANE(intel_crtc->plane);
294 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2) 329 if (drm_format_plane_cpp(fb->pixel_format, 0) == 2)
295 dev_priv->fbc.threshold++; 330 dev_priv->fbc.threshold++;
@@ -339,19 +374,19 @@ bool intel_fbc_enabled(struct drm_device *dev)
339{ 374{
340 struct drm_i915_private *dev_priv = dev->dev_private; 375 struct drm_i915_private *dev_priv = dev->dev_private;
341 376
342 if (!dev_priv->display.fbc_enabled) 377 return dev_priv->fbc.enabled;
343 return false;
344
345 return dev_priv->display.fbc_enabled(dev);
346} 378}
347 379
348void gen8_fbc_sw_flush(struct drm_device *dev, u32 value) 380void bdw_fbc_sw_flush(struct drm_device *dev, u32 value)
349{ 381{
350 struct drm_i915_private *dev_priv = dev->dev_private; 382 struct drm_i915_private *dev_priv = dev->dev_private;
351 383
352 if (!IS_GEN8(dev)) 384 if (!IS_GEN8(dev))
353 return; 385 return;
354 386
387 if (!intel_fbc_enabled(dev))
388 return;
389
355 I915_WRITE(MSG_FBC_REND_STATE, value); 390 I915_WRITE(MSG_FBC_REND_STATE, value);
356} 391}
357 392
@@ -1310,6 +1345,7 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1310 int *prec_mult, 1345 int *prec_mult,
1311 int *drain_latency) 1346 int *drain_latency)
1312{ 1347{
1348 struct drm_device *dev = crtc->dev;
1313 int entries; 1349 int entries;
1314 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock; 1350 int clock = to_intel_crtc(crtc)->config.adjusted_mode.crtc_clock;
1315 1351
@@ -1320,8 +1356,12 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1320 return false; 1356 return false;
1321 1357
1322 entries = DIV_ROUND_UP(clock, 1000) * pixel_size; 1358 entries = DIV_ROUND_UP(clock, 1000) * pixel_size;
1323 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : 1359 if (IS_CHERRYVIEW(dev))
1324 DRAIN_LATENCY_PRECISION_32; 1360 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 :
1361 DRAIN_LATENCY_PRECISION_16;
1362 else
1363 *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 :
1364 DRAIN_LATENCY_PRECISION_32;
1325 *drain_latency = (64 * (*prec_mult) * 4) / entries; 1365 *drain_latency = (64 * (*prec_mult) * 4) / entries;
1326 1366
1327 if (*drain_latency > DRAIN_LATENCY_MASK) 1367 if (*drain_latency > DRAIN_LATENCY_MASK)
@@ -1340,15 +1380,18 @@ static bool vlv_compute_drain_latency(struct drm_crtc *crtc,
1340 1380
1341static void vlv_update_drain_latency(struct drm_crtc *crtc) 1381static void vlv_update_drain_latency(struct drm_crtc *crtc)
1342{ 1382{
1343 struct drm_i915_private *dev_priv = crtc->dev->dev_private; 1383 struct drm_device *dev = crtc->dev;
1384 struct drm_i915_private *dev_priv = dev->dev_private;
1344 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1345 int pixel_size; 1386 int pixel_size;
1346 int drain_latency; 1387 int drain_latency;
1347 enum pipe pipe = intel_crtc->pipe; 1388 enum pipe pipe = intel_crtc->pipe;
1348 int plane_prec, prec_mult, plane_dl; 1389 int plane_prec, prec_mult, plane_dl;
1390 const int high_precision = IS_CHERRYVIEW(dev) ?
1391 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1349 1392
1350 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_64 | 1393 plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH |
1351 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_64 | 1394 DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH |
1352 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); 1395 (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT));
1353 1396
1354 if (!intel_crtc_active(crtc)) { 1397 if (!intel_crtc_active(crtc)) {
@@ -1359,9 +1402,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
1359 /* Primary plane Drain Latency */ 1402 /* Primary plane Drain Latency */
1360 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ 1403 pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */
1361 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { 1404 if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1362 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1405 plane_prec = (prec_mult == high_precision) ?
1363 DDL_PLANE_PRECISION_64 : 1406 DDL_PLANE_PRECISION_HIGH :
1364 DDL_PLANE_PRECISION_32; 1407 DDL_PLANE_PRECISION_LOW;
1365 plane_dl |= plane_prec | drain_latency; 1408 plane_dl |= plane_prec | drain_latency;
1366 } 1409 }
1367 1410
@@ -1373,9 +1416,9 @@ static void vlv_update_drain_latency(struct drm_crtc *crtc)
1373 /* Program cursor DL only if it is enabled */ 1416 /* Program cursor DL only if it is enabled */
1374 if (intel_crtc->cursor_base && 1417 if (intel_crtc->cursor_base &&
1375 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { 1418 vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) {
1376 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1419 plane_prec = (prec_mult == high_precision) ?
1377 DDL_CURSOR_PRECISION_64 : 1420 DDL_CURSOR_PRECISION_HIGH :
1378 DDL_CURSOR_PRECISION_32; 1421 DDL_CURSOR_PRECISION_LOW;
1379 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); 1422 plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT);
1380 } 1423 }
1381 1424
@@ -1543,15 +1586,17 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane,
1543 int plane_prec; 1586 int plane_prec;
1544 int sprite_dl; 1587 int sprite_dl;
1545 int prec_mult; 1588 int prec_mult;
1589 const int high_precision = IS_CHERRYVIEW(dev) ?
1590 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64;
1546 1591
1547 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_64(sprite) | 1592 sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) |
1548 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); 1593 (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite)));
1549 1594
1550 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, 1595 if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult,
1551 &drain_latency)) { 1596 &drain_latency)) {
1552 plane_prec = (prec_mult == DRAIN_LATENCY_PRECISION_64) ? 1597 plane_prec = (prec_mult == high_precision) ?
1553 DDL_SPRITE_PRECISION_64(sprite) : 1598 DDL_SPRITE_PRECISION_HIGH(sprite) :
1554 DDL_SPRITE_PRECISION_32(sprite); 1599 DDL_SPRITE_PRECISION_LOW(sprite);
1555 sprite_dl |= plane_prec | 1600 sprite_dl |= plane_prec |
1556 (drain_latency << DDL_SPRITE_SHIFT(sprite)); 1601 (drain_latency << DDL_SPRITE_SHIFT(sprite));
1557 } 1602 }
@@ -1915,6 +1960,14 @@ static uint32_t ilk_wm_fbc(uint32_t pri_val, uint32_t horiz_pixels,
1915 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1960 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2;
1916} 1961}
1917 1962
1963struct skl_pipe_wm_parameters {
1964 bool active;
1965 uint32_t pipe_htotal;
1966 uint32_t pixel_rate; /* in KHz */
1967 struct intel_plane_wm_parameters plane[I915_MAX_PLANES];
1968 struct intel_plane_wm_parameters cursor;
1969};
1970
1918struct ilk_pipe_wm_parameters { 1971struct ilk_pipe_wm_parameters {
1919 bool active; 1972 bool active;
1920 uint32_t pipe_htotal; 1973 uint32_t pipe_htotal;
@@ -2226,11 +2279,82 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc)
2226 PIPE_WM_LINETIME_TIME(linetime); 2279 PIPE_WM_LINETIME_TIME(linetime);
2227} 2280}
2228 2281
2229static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[5]) 2282static void intel_read_wm_latency(struct drm_device *dev, uint16_t wm[8])
2230{ 2283{
2231 struct drm_i915_private *dev_priv = dev->dev_private; 2284 struct drm_i915_private *dev_priv = dev->dev_private;
2232 2285
2233 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 2286 if (IS_GEN9(dev)) {
2287 uint32_t val;
2288 int ret, i;
2289 int level, max_level = ilk_wm_max_level(dev);
2290
2291 /* read the first set of memory latencies[0:3] */
2292 val = 0; /* data0 to be programmed to 0 for first set */
2293 mutex_lock(&dev_priv->rps.hw_lock);
2294 ret = sandybridge_pcode_read(dev_priv,
2295 GEN9_PCODE_READ_MEM_LATENCY,
2296 &val);
2297 mutex_unlock(&dev_priv->rps.hw_lock);
2298
2299 if (ret) {
2300 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2301 return;
2302 }
2303
2304 wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2305 wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2306 GEN9_MEM_LATENCY_LEVEL_MASK;
2307 wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2308 GEN9_MEM_LATENCY_LEVEL_MASK;
2309 wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2310 GEN9_MEM_LATENCY_LEVEL_MASK;
2311
2312 /* read the second set of memory latencies[4:7] */
2313 val = 1; /* data0 to be programmed to 1 for second set */
2314 mutex_lock(&dev_priv->rps.hw_lock);
2315 ret = sandybridge_pcode_read(dev_priv,
2316 GEN9_PCODE_READ_MEM_LATENCY,
2317 &val);
2318 mutex_unlock(&dev_priv->rps.hw_lock);
2319 if (ret) {
2320 DRM_ERROR("SKL Mailbox read error = %d\n", ret);
2321 return;
2322 }
2323
2324 wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
2325 wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
2326 GEN9_MEM_LATENCY_LEVEL_MASK;
2327 wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
2328 GEN9_MEM_LATENCY_LEVEL_MASK;
2329 wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
2330 GEN9_MEM_LATENCY_LEVEL_MASK;
2331
2332 /*
2333 * punit doesn't take into account the read latency so we need
2334 * to add 2us to the various latency levels we retrieve from
2335 * the punit.
2336 * - W0 is a bit special in that it's the only level that
2337 * can't be disabled if we want to have display working, so
2338 * we always add 2us there.
2339 * - For levels >=1, punit returns 0us latency when they are
2340 * disabled, so we respect that and don't add 2us then
2341 *
2342 * Additionally, if a level n (n > 1) has a 0us latency, all
2343 * levels m (m >= n) need to be disabled. We make sure to
2344 * sanitize the values out of the punit to satisfy this
2345 * requirement.
2346 */
2347 wm[0] += 2;
2348 for (level = 1; level <= max_level; level++)
2349 if (wm[level] != 0)
2350 wm[level] += 2;
2351 else {
2352 for (i = level + 1; i <= max_level; i++)
2353 wm[i] = 0;
2354
2355 break;
2356 }
2357 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2234 uint64_t sskpd = I915_READ64(MCH_SSKPD); 2358 uint64_t sskpd = I915_READ64(MCH_SSKPD);
2235 2359
2236 wm[0] = (sskpd >> 56) & 0xFF; 2360 wm[0] = (sskpd >> 56) & 0xFF;
@@ -2278,7 +2402,9 @@ static void intel_fixup_cur_wm_latency(struct drm_device *dev, uint16_t wm[5])
2278int ilk_wm_max_level(const struct drm_device *dev) 2402int ilk_wm_max_level(const struct drm_device *dev)
2279{ 2403{
2280 /* how many WM levels are we expecting */ 2404 /* how many WM levels are we expecting */
2281 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2405 if (IS_GEN9(dev))
2406 return 7;
2407 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2282 return 4; 2408 return 4;
2283 else if (INTEL_INFO(dev)->gen >= 6) 2409 else if (INTEL_INFO(dev)->gen >= 6)
2284 return 3; 2410 return 3;
@@ -2288,7 +2414,7 @@ int ilk_wm_max_level(const struct drm_device *dev)
2288 2414
2289static void intel_print_wm_latency(struct drm_device *dev, 2415static void intel_print_wm_latency(struct drm_device *dev,
2290 const char *name, 2416 const char *name,
2291 const uint16_t wm[5]) 2417 const uint16_t wm[8])
2292{ 2418{
2293 int level, max_level = ilk_wm_max_level(dev); 2419 int level, max_level = ilk_wm_max_level(dev);
2294 2420
@@ -2301,8 +2427,13 @@ static void intel_print_wm_latency(struct drm_device *dev,
2301 continue; 2427 continue;
2302 } 2428 }
2303 2429
2304 /* WM1+ latency values in 0.5us units */ 2430 /*
2305 if (level > 0) 2431 * - latencies are in us on gen9.
2432 * - before then, WM1+ latency values are in 0.5us units
2433 */
2434 if (IS_GEN9(dev))
2435 latency *= 10;
2436 else if (level > 0)
2306 latency *= 5; 2437 latency *= 5;
2307 2438
2308 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n", 2439 DRM_DEBUG_KMS("%s WM%d latency %u (%u.%u usec)\n",
@@ -2370,6 +2501,14 @@ static void ilk_setup_wm_latency(struct drm_device *dev)
2370 snb_wm_latency_quirk(dev); 2501 snb_wm_latency_quirk(dev);
2371} 2502}
2372 2503
2504static void skl_setup_wm_latency(struct drm_device *dev)
2505{
2506 struct drm_i915_private *dev_priv = dev->dev_private;
2507
2508 intel_read_wm_latency(dev, dev_priv->wm.skl_latency);
2509 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency);
2510}
2511
2373static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2512static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
2374 struct ilk_pipe_wm_parameters *p) 2513 struct ilk_pipe_wm_parameters *p)
2375{ 2514{
@@ -2860,6 +2999,769 @@ static bool ilk_disable_lp_wm(struct drm_device *dev)
2860 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL); 2999 return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
2861} 3000}
2862 3001
3002/*
3003 * On gen9, we need to allocate Display Data Buffer (DDB) portions to the
3004 * different active planes.
3005 */
3006
3007#define SKL_DDB_SIZE 896 /* in blocks */
3008
3009static void
3010skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
3011 struct drm_crtc *for_crtc,
3012 const struct intel_wm_config *config,
3013 const struct skl_pipe_wm_parameters *params,
3014 struct skl_ddb_entry *alloc /* out */)
3015{
3016 struct drm_crtc *crtc;
3017 unsigned int pipe_size, ddb_size;
3018 int nth_active_pipe;
3019
3020 if (!params->active) {
3021 alloc->start = 0;
3022 alloc->end = 0;
3023 return;
3024 }
3025
3026 ddb_size = SKL_DDB_SIZE;
3027
3028 ddb_size -= 4; /* 4 blocks for bypass path allocation */
3029
3030 nth_active_pipe = 0;
3031 for_each_crtc(dev, crtc) {
3032 if (!intel_crtc_active(crtc))
3033 continue;
3034
3035 if (crtc == for_crtc)
3036 break;
3037
3038 nth_active_pipe++;
3039 }
3040
3041 pipe_size = ddb_size / config->num_pipes_active;
3042 alloc->start = nth_active_pipe * ddb_size / config->num_pipes_active;
3043 alloc->end = alloc->start + pipe_size;
3044}
3045
3046static unsigned int skl_cursor_allocation(const struct intel_wm_config *config)
3047{
3048 if (config->num_pipes_active == 1)
3049 return 32;
3050
3051 return 8;
3052}
3053
3054static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg)
3055{
3056 entry->start = reg & 0x3ff;
3057 entry->end = (reg >> 16) & 0x3ff;
3058 if (entry->end)
3059 entry->end += 1;
3060}
3061
3062void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
3063 struct skl_ddb_allocation *ddb /* out */)
3064{
3065 struct drm_device *dev = dev_priv->dev;
3066 enum pipe pipe;
3067 int plane;
3068 u32 val;
3069
3070 for_each_pipe(dev_priv, pipe) {
3071 for_each_plane(pipe, plane) {
3072 val = I915_READ(PLANE_BUF_CFG(pipe, plane));
3073 skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane],
3074 val);
3075 }
3076
3077 val = I915_READ(CUR_BUF_CFG(pipe));
3078 skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val);
3079 }
3080}
3081
3082static unsigned int
3083skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p)
3084{
3085 return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel;
3086}
3087
3088/*
3089 * We don't overflow 32 bits. Worst case is 3 planes enabled, each fetching
3090 * a 8192x4096@32bpp framebuffer:
3091 * 3 * 4096 * 8192 * 4 < 2^32
3092 */
3093static unsigned int
3094skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc,
3095 const struct skl_pipe_wm_parameters *params)
3096{
3097 unsigned int total_data_rate = 0;
3098 int plane;
3099
3100 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3101 const struct intel_plane_wm_parameters *p;
3102
3103 p = &params->plane[plane];
3104 if (!p->enabled)
3105 continue;
3106
3107 total_data_rate += skl_plane_relative_data_rate(p);
3108 }
3109
3110 return total_data_rate;
3111}
3112
3113static void
3114skl_allocate_pipe_ddb(struct drm_crtc *crtc,
3115 const struct intel_wm_config *config,
3116 const struct skl_pipe_wm_parameters *params,
3117 struct skl_ddb_allocation *ddb /* out */)
3118{
3119 struct drm_device *dev = crtc->dev;
3120 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3121 enum pipe pipe = intel_crtc->pipe;
3122 struct skl_ddb_entry *alloc = &ddb->pipe[pipe];
3123 uint16_t alloc_size, start, cursor_blocks;
3124 unsigned int total_data_rate;
3125 int plane;
3126
3127 skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc);
3128 alloc_size = skl_ddb_entry_size(alloc);
3129 if (alloc_size == 0) {
3130 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe]));
3131 memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe]));
3132 return;
3133 }
3134
3135 cursor_blocks = skl_cursor_allocation(config);
3136 ddb->cursor[pipe].start = alloc->end - cursor_blocks;
3137 ddb->cursor[pipe].end = alloc->end;
3138
3139 alloc_size -= cursor_blocks;
3140 alloc->end -= cursor_blocks;
3141
3142 /*
3143 * Each active plane get a portion of the remaining space, in
3144 * proportion to the amount of data they need to fetch from memory.
3145 *
3146 * FIXME: we may not allocate every single block here.
3147 */
3148 total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params);
3149
3150 start = alloc->start;
3151 for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) {
3152 const struct intel_plane_wm_parameters *p;
3153 unsigned int data_rate;
3154 uint16_t plane_blocks;
3155
3156 p = &params->plane[plane];
3157 if (!p->enabled)
3158 continue;
3159
3160 data_rate = skl_plane_relative_data_rate(p);
3161
3162 /*
3163 * promote the expression to 64 bits to avoid overflowing, the
3164 * result is < available as data_rate / total_data_rate < 1
3165 */
3166 plane_blocks = div_u64((uint64_t)alloc_size * data_rate,
3167 total_data_rate);
3168
3169 ddb->plane[pipe][plane].start = start;
3170 ddb->plane[pipe][plane].end = start + plane_blocks;
3171
3172 start += plane_blocks;
3173 }
3174
3175}
3176
3177static uint32_t skl_pipe_pixel_rate(const struct intel_crtc_config *config)
3178{
3179 /* TODO: Take into account the scalers once we support them */
3180 return config->adjusted_mode.crtc_clock;
3181}
3182
3183/*
3184 * The max latency should be 257 (max the punit can code is 255 and we add 2us
3185 * for the read latency) and bytes_per_pixel should always be <= 8, so that
3186 * should allow pixel_rate up to ~2 GHz which seems sufficient since max
3187 * 2xcdclk is 1350 MHz and the pixel rate should never exceed that.
3188*/
3189static uint32_t skl_wm_method1(uint32_t pixel_rate, uint8_t bytes_per_pixel,
3190 uint32_t latency)
3191{
3192 uint32_t wm_intermediate_val, ret;
3193
3194 if (latency == 0)
3195 return UINT_MAX;
3196
3197 wm_intermediate_val = latency * pixel_rate * bytes_per_pixel;
3198 ret = DIV_ROUND_UP(wm_intermediate_val, 1000);
3199
3200 return ret;
3201}
3202
3203static uint32_t skl_wm_method2(uint32_t pixel_rate, uint32_t pipe_htotal,
3204 uint32_t horiz_pixels, uint8_t bytes_per_pixel,
3205 uint32_t latency)
3206{
3207 uint32_t ret, plane_bytes_per_line, wm_intermediate_val;
3208
3209 if (latency == 0)
3210 return UINT_MAX;
3211
3212 plane_bytes_per_line = horiz_pixels * bytes_per_pixel;
3213 wm_intermediate_val = latency * pixel_rate;
3214 ret = DIV_ROUND_UP(wm_intermediate_val, pipe_htotal * 1000) *
3215 plane_bytes_per_line;
3216
3217 return ret;
3218}
3219
3220static bool skl_ddb_allocation_changed(const struct skl_ddb_allocation *new_ddb,
3221 const struct intel_crtc *intel_crtc)
3222{
3223 struct drm_device *dev = intel_crtc->base.dev;
3224 struct drm_i915_private *dev_priv = dev->dev_private;
3225 const struct skl_ddb_allocation *cur_ddb = &dev_priv->wm.skl_hw.ddb;
3226 enum pipe pipe = intel_crtc->pipe;
3227
3228 if (memcmp(new_ddb->plane[pipe], cur_ddb->plane[pipe],
3229 sizeof(new_ddb->plane[pipe])))
3230 return true;
3231
3232 if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe],
3233 sizeof(new_ddb->cursor[pipe])))
3234 return true;
3235
3236 return false;
3237}
3238
3239static void skl_compute_wm_global_parameters(struct drm_device *dev,
3240 struct intel_wm_config *config)
3241{
3242 struct drm_crtc *crtc;
3243 struct drm_plane *plane;
3244
3245 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3246 config->num_pipes_active += intel_crtc_active(crtc);
3247
3248 /* FIXME: I don't think we need those two global parameters on SKL */
3249 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3250 struct intel_plane *intel_plane = to_intel_plane(plane);
3251
3252 config->sprites_enabled |= intel_plane->wm.enabled;
3253 config->sprites_scaled |= intel_plane->wm.scaled;
3254 }
3255}
3256
3257static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc,
3258 struct skl_pipe_wm_parameters *p)
3259{
3260 struct drm_device *dev = crtc->dev;
3261 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3262 enum pipe pipe = intel_crtc->pipe;
3263 struct drm_plane *plane;
3264 int i = 1; /* Index for sprite planes start */
3265
3266 p->active = intel_crtc_active(crtc);
3267 if (p->active) {
3268 p->pipe_htotal = intel_crtc->config.adjusted_mode.crtc_htotal;
3269 p->pixel_rate = skl_pipe_pixel_rate(&intel_crtc->config);
3270
3271 /*
3272 * For now, assume primary and cursor planes are always enabled.
3273 */
3274 p->plane[0].enabled = true;
3275 p->plane[0].bytes_per_pixel =
3276 crtc->primary->fb->bits_per_pixel / 8;
3277 p->plane[0].horiz_pixels = intel_crtc->config.pipe_src_w;
3278 p->plane[0].vert_pixels = intel_crtc->config.pipe_src_h;
3279
3280 p->cursor.enabled = true;
3281 p->cursor.bytes_per_pixel = 4;
3282 p->cursor.horiz_pixels = intel_crtc->cursor_width ?
3283 intel_crtc->cursor_width : 64;
3284 }
3285
3286 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
3287 struct intel_plane *intel_plane = to_intel_plane(plane);
3288
3289 if (intel_plane->pipe == pipe)
3290 p->plane[i++] = intel_plane->wm;
3291 }
3292}
3293
3294static bool skl_compute_plane_wm(struct skl_pipe_wm_parameters *p,
3295 struct intel_plane_wm_parameters *p_params,
3296 uint16_t ddb_allocation,
3297 uint32_t mem_value,
3298 uint16_t *out_blocks, /* out */
3299 uint8_t *out_lines /* out */)
3300{
3301 uint32_t method1, method2, plane_bytes_per_line, res_blocks, res_lines;
3302 uint32_t result_bytes;
3303
3304 if (mem_value == 0 || !p->active || !p_params->enabled)
3305 return false;
3306
3307 method1 = skl_wm_method1(p->pixel_rate,
3308 p_params->bytes_per_pixel,
3309 mem_value);
3310 method2 = skl_wm_method2(p->pixel_rate,
3311 p->pipe_htotal,
3312 p_params->horiz_pixels,
3313 p_params->bytes_per_pixel,
3314 mem_value);
3315
3316 plane_bytes_per_line = p_params->horiz_pixels *
3317 p_params->bytes_per_pixel;
3318
3319 /* For now xtile and linear */
3320 if (((ddb_allocation * 512) / plane_bytes_per_line) >= 1)
3321 result_bytes = min(method1, method2);
3322 else
3323 result_bytes = method1;
3324
3325 res_blocks = DIV_ROUND_UP(result_bytes, 512) + 1;
3326 res_lines = DIV_ROUND_UP(result_bytes, plane_bytes_per_line);
3327
3328 if (res_blocks > ddb_allocation || res_lines > 31)
3329 return false;
3330
3331 *out_blocks = res_blocks;
3332 *out_lines = res_lines;
3333
3334 return true;
3335}
3336
3337static void skl_compute_wm_level(const struct drm_i915_private *dev_priv,
3338 struct skl_ddb_allocation *ddb,
3339 struct skl_pipe_wm_parameters *p,
3340 enum pipe pipe,
3341 int level,
3342 int num_planes,
3343 struct skl_wm_level *result)
3344{
3345 uint16_t latency = dev_priv->wm.skl_latency[level];
3346 uint16_t ddb_blocks;
3347 int i;
3348
3349 for (i = 0; i < num_planes; i++) {
3350 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]);
3351
3352 result->plane_en[i] = skl_compute_plane_wm(p, &p->plane[i],
3353 ddb_blocks,
3354 latency,
3355 &result->plane_res_b[i],
3356 &result->plane_res_l[i]);
3357 }
3358
3359 ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]);
3360 result->cursor_en = skl_compute_plane_wm(p, &p->cursor, ddb_blocks,
3361 latency, &result->cursor_res_b,
3362 &result->cursor_res_l);
3363}
3364
3365static uint32_t
3366skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p)
3367{
3368 if (!intel_crtc_active(crtc))
3369 return 0;
3370
3371 return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate);
3372
3373}
3374
3375static void skl_compute_transition_wm(struct drm_crtc *crtc,
3376 struct skl_pipe_wm_parameters *params,
3377 struct skl_wm_level *trans_wm /* out */)
3378{
3379 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3380 int i;
3381
3382 if (!params->active)
3383 return;
3384
3385 /* Until we know more, just disable transition WMs */
3386 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3387 trans_wm->plane_en[i] = false;
3388 trans_wm->cursor_en = false;
3389}
3390
3391static void skl_compute_pipe_wm(struct drm_crtc *crtc,
3392 struct skl_ddb_allocation *ddb,
3393 struct skl_pipe_wm_parameters *params,
3394 struct skl_pipe_wm *pipe_wm)
3395{
3396 struct drm_device *dev = crtc->dev;
3397 const struct drm_i915_private *dev_priv = dev->dev_private;
3398 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3399 int level, max_level = ilk_wm_max_level(dev);
3400
3401 for (level = 0; level <= max_level; level++) {
3402 skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe,
3403 level, intel_num_planes(intel_crtc),
3404 &pipe_wm->wm[level]);
3405 }
3406 pipe_wm->linetime = skl_compute_linetime_wm(crtc, params);
3407
3408 skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm);
3409}
3410
3411static void skl_compute_wm_results(struct drm_device *dev,
3412 struct skl_pipe_wm_parameters *p,
3413 struct skl_pipe_wm *p_wm,
3414 struct skl_wm_values *r,
3415 struct intel_crtc *intel_crtc)
3416{
3417 int level, max_level = ilk_wm_max_level(dev);
3418 enum pipe pipe = intel_crtc->pipe;
3419 uint32_t temp;
3420 int i;
3421
3422 for (level = 0; level <= max_level; level++) {
3423 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3424 temp = 0;
3425
3426 temp |= p_wm->wm[level].plane_res_l[i] <<
3427 PLANE_WM_LINES_SHIFT;
3428 temp |= p_wm->wm[level].plane_res_b[i];
3429 if (p_wm->wm[level].plane_en[i])
3430 temp |= PLANE_WM_EN;
3431
3432 r->plane[pipe][i][level] = temp;
3433 }
3434
3435 temp = 0;
3436
3437 temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT;
3438 temp |= p_wm->wm[level].cursor_res_b;
3439
3440 if (p_wm->wm[level].cursor_en)
3441 temp |= PLANE_WM_EN;
3442
3443 r->cursor[pipe][level] = temp;
3444
3445 }
3446
3447 /* transition WMs */
3448 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3449 temp = 0;
3450 temp |= p_wm->trans_wm.plane_res_l[i] << PLANE_WM_LINES_SHIFT;
3451 temp |= p_wm->trans_wm.plane_res_b[i];
3452 if (p_wm->trans_wm.plane_en[i])
3453 temp |= PLANE_WM_EN;
3454
3455 r->plane_trans[pipe][i] = temp;
3456 }
3457
3458 temp = 0;
3459 temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT;
3460 temp |= p_wm->trans_wm.cursor_res_b;
3461 if (p_wm->trans_wm.cursor_en)
3462 temp |= PLANE_WM_EN;
3463
3464 r->cursor_trans[pipe] = temp;
3465
3466 r->wm_linetime[pipe] = p_wm->linetime;
3467}
3468
3469static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, uint32_t reg,
3470 const struct skl_ddb_entry *entry)
3471{
3472 if (entry->end)
3473 I915_WRITE(reg, (entry->end - 1) << 16 | entry->start);
3474 else
3475 I915_WRITE(reg, 0);
3476}
3477
3478static void skl_write_wm_values(struct drm_i915_private *dev_priv,
3479 const struct skl_wm_values *new)
3480{
3481 struct drm_device *dev = dev_priv->dev;
3482 struct intel_crtc *crtc;
3483
3484 list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
3485 int i, level, max_level = ilk_wm_max_level(dev);
3486 enum pipe pipe = crtc->pipe;
3487
3488 if (!new->dirty[pipe])
3489 continue;
3490
3491 I915_WRITE(PIPE_WM_LINETIME(pipe), new->wm_linetime[pipe]);
3492
3493 for (level = 0; level <= max_level; level++) {
3494 for (i = 0; i < intel_num_planes(crtc); i++)
3495 I915_WRITE(PLANE_WM(pipe, i, level),
3496 new->plane[pipe][i][level]);
3497 I915_WRITE(CUR_WM(pipe, level),
3498 new->cursor[pipe][level]);
3499 }
3500 for (i = 0; i < intel_num_planes(crtc); i++)
3501 I915_WRITE(PLANE_WM_TRANS(pipe, i),
3502 new->plane_trans[pipe][i]);
3503 I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]);
3504
3505 for (i = 0; i < intel_num_planes(crtc); i++)
3506 skl_ddb_entry_write(dev_priv,
3507 PLANE_BUF_CFG(pipe, i),
3508 &new->ddb.plane[pipe][i]);
3509
3510 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe),
3511 &new->ddb.cursor[pipe]);
3512 }
3513}
3514
3515/*
3516 * When setting up a new DDB allocation arrangement, we need to correctly
3517 * sequence the times at which the new allocations for the pipes are taken into
3518 * account or we'll have pipes fetching from space previously allocated to
3519 * another pipe.
3520 *
3521 * Roughly the sequence looks like:
3522 * 1. re-allocate the pipe(s) with the allocation being reduced and not
3523 * overlapping with a previous light-up pipe (another way to put it is:
3524 * pipes with their new allocation strickly included into their old ones).
3525 * 2. re-allocate the other pipes that get their allocation reduced
3526 * 3. allocate the pipes having their allocation increased
3527 *
3528 * Steps 1. and 2. are here to take care of the following case:
3529 * - Initially DDB looks like this:
3530 * | B | C |
3531 * - enable pipe A.
3532 * - pipe B has a reduced DDB allocation that overlaps with the old pipe C
3533 * allocation
3534 * | A | B | C |
3535 *
3536 * We need to sequence the re-allocation: C, B, A (and not B, C, A).
3537 */
3538
3539static void
3540skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass)
3541{
3542 struct drm_device *dev = dev_priv->dev;
3543 int plane;
3544
3545 DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass);
3546
3547 for_each_plane(pipe, plane) {
3548 I915_WRITE(PLANE_SURF(pipe, plane),
3549 I915_READ(PLANE_SURF(pipe, plane)));
3550 }
3551 I915_WRITE(CURBASE(pipe), I915_READ(CURBASE(pipe)));
3552}
3553
3554static bool
3555skl_ddb_allocation_included(const struct skl_ddb_allocation *old,
3556 const struct skl_ddb_allocation *new,
3557 enum pipe pipe)
3558{
3559 uint16_t old_size, new_size;
3560
3561 old_size = skl_ddb_entry_size(&old->pipe[pipe]);
3562 new_size = skl_ddb_entry_size(&new->pipe[pipe]);
3563
3564 return old_size != new_size &&
3565 new->pipe[pipe].start >= old->pipe[pipe].start &&
3566 new->pipe[pipe].end <= old->pipe[pipe].end;
3567}
3568
3569static void skl_flush_wm_values(struct drm_i915_private *dev_priv,
3570 struct skl_wm_values *new_values)
3571{
3572 struct drm_device *dev = dev_priv->dev;
3573 struct skl_ddb_allocation *cur_ddb, *new_ddb;
3574 bool reallocated[I915_MAX_PIPES] = {false, false, false};
3575 struct intel_crtc *crtc;
3576 enum pipe pipe;
3577
3578 new_ddb = &new_values->ddb;
3579 cur_ddb = &dev_priv->wm.skl_hw.ddb;
3580
3581 /*
3582 * First pass: flush the pipes with the new allocation contained into
3583 * the old space.
3584 *
3585 * We'll wait for the vblank on those pipes to ensure we can safely
3586 * re-allocate the freed space without this pipe fetching from it.
3587 */
3588 for_each_intel_crtc(dev, crtc) {
3589 if (!crtc->active)
3590 continue;
3591
3592 pipe = crtc->pipe;
3593
3594 if (!skl_ddb_allocation_included(cur_ddb, new_ddb, pipe))
3595 continue;
3596
3597 skl_wm_flush_pipe(dev_priv, pipe, 1);
3598 intel_wait_for_vblank(dev, pipe);
3599
3600 reallocated[pipe] = true;
3601 }
3602
3603
3604 /*
3605 * Second pass: flush the pipes that are having their allocation
3606 * reduced, but overlapping with a previous allocation.
3607 *
3608 * Here as well we need to wait for the vblank to make sure the freed
3609 * space is not used anymore.
3610 */
3611 for_each_intel_crtc(dev, crtc) {
3612 if (!crtc->active)
3613 continue;
3614
3615 pipe = crtc->pipe;
3616
3617 if (reallocated[pipe])
3618 continue;
3619
3620 if (skl_ddb_entry_size(&new_ddb->pipe[pipe]) <
3621 skl_ddb_entry_size(&cur_ddb->pipe[pipe])) {
3622 skl_wm_flush_pipe(dev_priv, pipe, 2);
3623 intel_wait_for_vblank(dev, pipe);
3624 }
3625
3626 reallocated[pipe] = true;
3627 }
3628
3629 /*
3630 * Third pass: flush the pipes that got more space allocated.
3631 *
3632 * We don't need to actively wait for the update here, next vblank
3633 * will just get more DDB space with the correct WM values.
3634 */
3635 for_each_intel_crtc(dev, crtc) {
3636 if (!crtc->active)
3637 continue;
3638
3639 pipe = crtc->pipe;
3640
3641 /*
3642 * At this point, only the pipes more space than before are
3643 * left to re-allocate.
3644 */
3645 if (reallocated[pipe])
3646 continue;
3647
3648 skl_wm_flush_pipe(dev_priv, pipe, 3);
3649 }
3650}
3651
3652static bool skl_update_pipe_wm(struct drm_crtc *crtc,
3653 struct skl_pipe_wm_parameters *params,
3654 struct intel_wm_config *config,
3655 struct skl_ddb_allocation *ddb, /* out */
3656 struct skl_pipe_wm *pipe_wm /* out */)
3657{
3658 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3659
3660 skl_compute_wm_pipe_parameters(crtc, params);
3661 skl_allocate_pipe_ddb(crtc, config, params, ddb);
3662 skl_compute_pipe_wm(crtc, ddb, params, pipe_wm);
3663
3664 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm)))
3665 return false;
3666
3667 intel_crtc->wm.skl_active = *pipe_wm;
3668 return true;
3669}
3670
3671static void skl_update_other_pipe_wm(struct drm_device *dev,
3672 struct drm_crtc *crtc,
3673 struct intel_wm_config *config,
3674 struct skl_wm_values *r)
3675{
3676 struct intel_crtc *intel_crtc;
3677 struct intel_crtc *this_crtc = to_intel_crtc(crtc);
3678
3679 /*
3680 * If the WM update hasn't changed the allocation for this_crtc (the
3681 * crtc we are currently computing the new WM values for), other
3682 * enabled crtcs will keep the same allocation and we don't need to
3683 * recompute anything for them.
3684 */
3685 if (!skl_ddb_allocation_changed(&r->ddb, this_crtc))
3686 return;
3687
3688 /*
3689 * Otherwise, because of this_crtc being freshly enabled/disabled, the
3690 * other active pipes need new DDB allocation and WM values.
3691 */
3692 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list,
3693 base.head) {
3694 struct skl_pipe_wm_parameters params = {};
3695 struct skl_pipe_wm pipe_wm = {};
3696 bool wm_changed;
3697
3698 if (this_crtc->pipe == intel_crtc->pipe)
3699 continue;
3700
3701 if (!intel_crtc->active)
3702 continue;
3703
3704 wm_changed = skl_update_pipe_wm(&intel_crtc->base,
3705 &params, config,
3706 &r->ddb, &pipe_wm);
3707
3708 /*
3709 * If we end up re-computing the other pipe WM values, it's
3710 * because it was really needed, so we expect the WM values to
3711 * be different.
3712 */
3713 WARN_ON(!wm_changed);
3714
3715 skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc);
3716 r->dirty[intel_crtc->pipe] = true;
3717 }
3718}
3719
3720static void skl_update_wm(struct drm_crtc *crtc)
3721{
3722 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3723 struct drm_device *dev = crtc->dev;
3724 struct drm_i915_private *dev_priv = dev->dev_private;
3725 struct skl_pipe_wm_parameters params = {};
3726 struct skl_wm_values *results = &dev_priv->wm.skl_results;
3727 struct skl_pipe_wm pipe_wm = {};
3728 struct intel_wm_config config = {};
3729
3730 memset(results, 0, sizeof(*results));
3731
3732 skl_compute_wm_global_parameters(dev, &config);
3733
3734 if (!skl_update_pipe_wm(crtc, &params, &config,
3735 &results->ddb, &pipe_wm))
3736 return;
3737
3738 skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc);
3739 results->dirty[intel_crtc->pipe] = true;
3740
3741 skl_update_other_pipe_wm(dev, crtc, &config, results);
3742 skl_write_wm_values(dev_priv, results);
3743 skl_flush_wm_values(dev_priv, results);
3744
3745 /* store the new configuration */
3746 dev_priv->wm.skl_hw = *results;
3747}
3748
3749static void
3750skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc,
3751 uint32_t sprite_width, uint32_t sprite_height,
3752 int pixel_size, bool enabled, bool scaled)
3753{
3754 struct intel_plane *intel_plane = to_intel_plane(plane);
3755
3756 intel_plane->wm.enabled = enabled;
3757 intel_plane->wm.scaled = scaled;
3758 intel_plane->wm.horiz_pixels = sprite_width;
3759 intel_plane->wm.vert_pixels = sprite_height;
3760 intel_plane->wm.bytes_per_pixel = pixel_size;
3761
3762 skl_update_wm(crtc);
3763}
3764
2863static void ilk_update_wm(struct drm_crtc *crtc) 3765static void ilk_update_wm(struct drm_crtc *crtc)
2864{ 3766{
2865 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -2934,6 +3836,113 @@ ilk_update_sprite_wm(struct drm_plane *plane,
2934 ilk_update_wm(crtc); 3836 ilk_update_wm(crtc);
2935} 3837}
2936 3838
3839static void skl_pipe_wm_active_state(uint32_t val,
3840 struct skl_pipe_wm *active,
3841 bool is_transwm,
3842 bool is_cursor,
3843 int i,
3844 int level)
3845{
3846 bool is_enabled = (val & PLANE_WM_EN) != 0;
3847
3848 if (!is_transwm) {
3849 if (!is_cursor) {
3850 active->wm[level].plane_en[i] = is_enabled;
3851 active->wm[level].plane_res_b[i] =
3852 val & PLANE_WM_BLOCKS_MASK;
3853 active->wm[level].plane_res_l[i] =
3854 (val >> PLANE_WM_LINES_SHIFT) &
3855 PLANE_WM_LINES_MASK;
3856 } else {
3857 active->wm[level].cursor_en = is_enabled;
3858 active->wm[level].cursor_res_b =
3859 val & PLANE_WM_BLOCKS_MASK;
3860 active->wm[level].cursor_res_l =
3861 (val >> PLANE_WM_LINES_SHIFT) &
3862 PLANE_WM_LINES_MASK;
3863 }
3864 } else {
3865 if (!is_cursor) {
3866 active->trans_wm.plane_en[i] = is_enabled;
3867 active->trans_wm.plane_res_b[i] =
3868 val & PLANE_WM_BLOCKS_MASK;
3869 active->trans_wm.plane_res_l[i] =
3870 (val >> PLANE_WM_LINES_SHIFT) &
3871 PLANE_WM_LINES_MASK;
3872 } else {
3873 active->trans_wm.cursor_en = is_enabled;
3874 active->trans_wm.cursor_res_b =
3875 val & PLANE_WM_BLOCKS_MASK;
3876 active->trans_wm.cursor_res_l =
3877 (val >> PLANE_WM_LINES_SHIFT) &
3878 PLANE_WM_LINES_MASK;
3879 }
3880 }
3881}
3882
3883static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc)
3884{
3885 struct drm_device *dev = crtc->dev;
3886 struct drm_i915_private *dev_priv = dev->dev_private;
3887 struct skl_wm_values *hw = &dev_priv->wm.skl_hw;
3888 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3889 struct skl_pipe_wm *active = &intel_crtc->wm.skl_active;
3890 enum pipe pipe = intel_crtc->pipe;
3891 int level, i, max_level;
3892 uint32_t temp;
3893
3894 max_level = ilk_wm_max_level(dev);
3895
3896 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
3897
3898 for (level = 0; level <= max_level; level++) {
3899 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3900 hw->plane[pipe][i][level] =
3901 I915_READ(PLANE_WM(pipe, i, level));
3902 hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level));
3903 }
3904
3905 for (i = 0; i < intel_num_planes(intel_crtc); i++)
3906 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i));
3907 hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe));
3908
3909 if (!intel_crtc_active(crtc))
3910 return;
3911
3912 hw->dirty[pipe] = true;
3913
3914 active->linetime = hw->wm_linetime[pipe];
3915
3916 for (level = 0; level <= max_level; level++) {
3917 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3918 temp = hw->plane[pipe][i][level];
3919 skl_pipe_wm_active_state(temp, active, false,
3920 false, i, level);
3921 }
3922 temp = hw->cursor[pipe][level];
3923 skl_pipe_wm_active_state(temp, active, false, true, i, level);
3924 }
3925
3926 for (i = 0; i < intel_num_planes(intel_crtc); i++) {
3927 temp = hw->plane_trans[pipe][i];
3928 skl_pipe_wm_active_state(temp, active, true, false, i, 0);
3929 }
3930
3931 temp = hw->cursor_trans[pipe];
3932 skl_pipe_wm_active_state(temp, active, true, true, i, 0);
3933}
3934
3935void skl_wm_get_hw_state(struct drm_device *dev)
3936{
3937 struct drm_i915_private *dev_priv = dev->dev_private;
3938 struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
3939 struct drm_crtc *crtc;
3940
3941 skl_ddb_get_hw_state(dev_priv, ddb);
3942 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
3943 skl_pipe_wm_get_hw_state(crtc);
3944}
3945
2937static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) 3946static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc)
2938{ 3947{
2939 struct drm_device *dev = crtc->dev; 3948 struct drm_device *dev = crtc->dev;
@@ -3442,7 +4451,7 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3442 dev_priv->rps.min_freq_softlimit); 4451 dev_priv->rps.min_freq_softlimit);
3443 4452
3444 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS)) 4453 if (wait_for(((vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS))
3445 & GENFREQSTATUS) == 0, 5)) 4454 & GENFREQSTATUS) == 0, 100))
3446 DRM_ERROR("timed out waiting for Punit\n"); 4455 DRM_ERROR("timed out waiting for Punit\n");
3447 4456
3448 vlv_force_gfx_clock(dev_priv, false); 4457 vlv_force_gfx_clock(dev_priv, false);
@@ -3495,14 +4504,8 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3495 "Odd GPU freq value\n")) 4504 "Odd GPU freq value\n"))
3496 val &= ~1; 4505 val &= ~1;
3497 4506
3498 if (val != dev_priv->rps.cur_freq) { 4507 if (val != dev_priv->rps.cur_freq)
3499 DRM_DEBUG_DRIVER("GPU freq request from %d MHz (%u) to %d MHz (%u)\n",
3500 vlv_gpu_freq(dev_priv, dev_priv->rps.cur_freq),
3501 dev_priv->rps.cur_freq,
3502 vlv_gpu_freq(dev_priv, val), val);
3503
3504 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); 4508 vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val);
3505 }
3506 4509
3507 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); 4510 I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val));
3508 4511
@@ -3510,43 +4513,11 @@ void valleyview_set_rps(struct drm_device *dev, u8 val)
3510 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val)); 4513 trace_intel_gpu_freq_change(vlv_gpu_freq(dev_priv, val));
3511} 4514}
3512 4515
3513static void gen8_disable_rps_interrupts(struct drm_device *dev) 4516static void gen9_disable_rps(struct drm_device *dev)
3514{
3515 struct drm_i915_private *dev_priv = dev->dev_private;
3516
3517 I915_WRITE(GEN6_PMINTRMSK, ~GEN8_PMINTR_REDIRECT_TO_NON_DISP);
3518 I915_WRITE(GEN8_GT_IER(2), I915_READ(GEN8_GT_IER(2)) &
3519 ~dev_priv->pm_rps_events);
3520 /* Complete PM interrupt masking here doesn't race with the rps work
3521 * item again unmasking PM interrupts because that is using a different
3522 * register (GEN8_GT_IMR(2)) to mask PM interrupts. The only risk is in
3523 * leaving stale bits in GEN8_GT_IIR(2) and GEN8_GT_IMR(2) which
3524 * gen8_enable_rps will clean up. */
3525
3526 spin_lock_irq(&dev_priv->irq_lock);
3527 dev_priv->rps.pm_iir = 0;
3528 spin_unlock_irq(&dev_priv->irq_lock);
3529
3530 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3531}
3532
3533static void gen6_disable_rps_interrupts(struct drm_device *dev)
3534{ 4517{
3535 struct drm_i915_private *dev_priv = dev->dev_private; 4518 struct drm_i915_private *dev_priv = dev->dev_private;
3536 4519
3537 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); 4520 I915_WRITE(GEN6_RC_CONTROL, 0);
3538 I915_WRITE(GEN6_PMIER, I915_READ(GEN6_PMIER) &
3539 ~dev_priv->pm_rps_events);
3540 /* Complete PM interrupt masking here doesn't race with the rps work
3541 * item again unmasking PM interrupts because that is using a different
3542 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
3543 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
3544
3545 spin_lock_irq(&dev_priv->irq_lock);
3546 dev_priv->rps.pm_iir = 0;
3547 spin_unlock_irq(&dev_priv->irq_lock);
3548
3549 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3550} 4521}
3551 4522
3552static void gen6_disable_rps(struct drm_device *dev) 4523static void gen6_disable_rps(struct drm_device *dev)
@@ -3555,11 +4526,6 @@ static void gen6_disable_rps(struct drm_device *dev)
3555 4526
3556 I915_WRITE(GEN6_RC_CONTROL, 0); 4527 I915_WRITE(GEN6_RC_CONTROL, 0);
3557 I915_WRITE(GEN6_RPNSWREQ, 1 << 31); 4528 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
3558
3559 if (IS_BROADWELL(dev))
3560 gen8_disable_rps_interrupts(dev);
3561 else
3562 gen6_disable_rps_interrupts(dev);
3563} 4529}
3564 4530
3565static void cherryview_disable_rps(struct drm_device *dev) 4531static void cherryview_disable_rps(struct drm_device *dev)
@@ -3567,8 +4533,6 @@ static void cherryview_disable_rps(struct drm_device *dev)
3567 struct drm_i915_private *dev_priv = dev->dev_private; 4533 struct drm_i915_private *dev_priv = dev->dev_private;
3568 4534
3569 I915_WRITE(GEN6_RC_CONTROL, 0); 4535 I915_WRITE(GEN6_RC_CONTROL, 0);
3570
3571 gen8_disable_rps_interrupts(dev);
3572} 4536}
3573 4537
3574static void valleyview_disable_rps(struct drm_device *dev) 4538static void valleyview_disable_rps(struct drm_device *dev)
@@ -3582,8 +4546,6 @@ static void valleyview_disable_rps(struct drm_device *dev)
3582 I915_WRITE(GEN6_RC_CONTROL, 0); 4546 I915_WRITE(GEN6_RC_CONTROL, 0);
3583 4547
3584 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4548 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3585
3586 gen6_disable_rps_interrupts(dev);
3587} 4549}
3588 4550
3589static void intel_print_rc6_info(struct drm_device *dev, u32 mode) 4551static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
@@ -3594,10 +4556,15 @@ static void intel_print_rc6_info(struct drm_device *dev, u32 mode)
3594 else 4556 else
3595 mode = 0; 4557 mode = 0;
3596 } 4558 }
3597 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n", 4559 if (HAS_RC6p(dev))
3598 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off", 4560 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s RC6p %s RC6pp %s\n",
3599 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off", 4561 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off",
3600 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off"); 4562 (mode & GEN6_RC_CTL_RC6p_ENABLE) ? "on" : "off",
4563 (mode & GEN6_RC_CTL_RC6pp_ENABLE) ? "on" : "off");
4564
4565 else
4566 DRM_DEBUG_KMS("Enabling RC6 states: RC6 %s\n",
4567 (mode & GEN6_RC_CTL_RC6_ENABLE) ? "on" : "off");
3601} 4568}
3602 4569
3603static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6) 4570static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
@@ -3614,7 +4581,7 @@ static int sanitize_rc6_option(const struct drm_device *dev, int enable_rc6)
3614 if (enable_rc6 >= 0) { 4581 if (enable_rc6 >= 0) {
3615 int mask; 4582 int mask;
3616 4583
3617 if (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev)) 4584 if (HAS_RC6p(dev))
3618 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE | 4585 mask = INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE |
3619 INTEL_RC6pp_ENABLE; 4586 INTEL_RC6pp_ENABLE;
3620 else 4587 else
@@ -3642,54 +4609,92 @@ int intel_enable_rc6(const struct drm_device *dev)
3642 return i915.enable_rc6; 4609 return i915.enable_rc6;
3643} 4610}
3644 4611
3645static void gen8_enable_rps_interrupts(struct drm_device *dev) 4612static void gen6_init_rps_frequencies(struct drm_device *dev)
3646{
3647 struct drm_i915_private *dev_priv = dev->dev_private;
3648
3649 spin_lock_irq(&dev_priv->irq_lock);
3650 WARN_ON(dev_priv->rps.pm_iir);
3651 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3652 I915_WRITE(GEN8_GT_IIR(2), dev_priv->pm_rps_events);
3653 spin_unlock_irq(&dev_priv->irq_lock);
3654}
3655
3656static void gen6_enable_rps_interrupts(struct drm_device *dev)
3657{ 4613{
3658 struct drm_i915_private *dev_priv = dev->dev_private; 4614 struct drm_i915_private *dev_priv = dev->dev_private;
4615 uint32_t rp_state_cap;
4616 u32 ddcc_status = 0;
4617 int ret;
3659 4618
3660 spin_lock_irq(&dev_priv->irq_lock); 4619 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
3661 WARN_ON(dev_priv->rps.pm_iir);
3662 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
3663 I915_WRITE(GEN6_PMIIR, dev_priv->pm_rps_events);
3664 spin_unlock_irq(&dev_priv->irq_lock);
3665}
3666
3667static void parse_rp_state_cap(struct drm_i915_private *dev_priv, u32 rp_state_cap)
3668{
3669 /* All of these values are in units of 50MHz */ 4620 /* All of these values are in units of 50MHz */
3670 dev_priv->rps.cur_freq = 0; 4621 dev_priv->rps.cur_freq = 0;
3671 /* static values from HW: RP0 < RPe < RP1 < RPn (min_freq) */ 4622 /* static values from HW: RP0 > RP1 > RPn (min_freq) */
3672 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3673 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; 4623 dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff;
4624 dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff;
3674 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; 4625 dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff;
3675 /* XXX: only BYT has a special efficient freq */
3676 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
3677 /* hw_max = RP0 until we check for overclocking */ 4626 /* hw_max = RP0 until we check for overclocking */
3678 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; 4627 dev_priv->rps.max_freq = dev_priv->rps.rp0_freq;
3679 4628
4629 dev_priv->rps.efficient_freq = dev_priv->rps.rp1_freq;
4630 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
4631 ret = sandybridge_pcode_read(dev_priv,
4632 HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL,
4633 &ddcc_status);
4634 if (0 == ret)
4635 dev_priv->rps.efficient_freq =
4636 (ddcc_status >> 8) & 0xff;
4637 }
4638
3680 /* Preserve min/max settings in case of re-init */ 4639 /* Preserve min/max settings in case of re-init */
3681 if (dev_priv->rps.max_freq_softlimit == 0) 4640 if (dev_priv->rps.max_freq_softlimit == 0)
3682 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq; 4641 dev_priv->rps.max_freq_softlimit = dev_priv->rps.max_freq;
3683 4642
3684 if (dev_priv->rps.min_freq_softlimit == 0) 4643 if (dev_priv->rps.min_freq_softlimit == 0) {
3685 dev_priv->rps.min_freq_softlimit = dev_priv->rps.min_freq; 4644 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
4645 dev_priv->rps.min_freq_softlimit =
4646 /* max(RPe, 450 MHz) */
4647 max(dev_priv->rps.efficient_freq, (u8) 9);
4648 else
4649 dev_priv->rps.min_freq_softlimit =
4650 dev_priv->rps.min_freq;
4651 }
4652}
4653
4654static void gen9_enable_rps(struct drm_device *dev)
4655{
4656 struct drm_i915_private *dev_priv = dev->dev_private;
4657 struct intel_engine_cs *ring;
4658 uint32_t rc6_mask = 0;
4659 int unused;
4660
4661 /* 1a: Software RC state - RC0 */
4662 I915_WRITE(GEN6_RC_STATE, 0);
4663
4664 /* 1b: Get forcewake during program sequence. Although the driver
4665 * hasn't enabled a state yet where we need forcewake, BIOS may have.*/
4666 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
4667
4668 /* 2a: Disable RC states. */
4669 I915_WRITE(GEN6_RC_CONTROL, 0);
4670
4671 /* 2b: Program RC6 thresholds.*/
4672 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
4673 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */
4674 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */
4675 for_each_ring(ring, dev_priv, unused)
4676 I915_WRITE(RING_MAX_IDLE(ring->mmio_base), 10);
4677 I915_WRITE(GEN6_RC_SLEEP, 0);
4678 I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
4679
4680 /* 3a: Enable RC6 */
4681 if (intel_enable_rc6(dev) & INTEL_RC6_ENABLE)
4682 rc6_mask = GEN6_RC_CTL_RC6_ENABLE;
4683 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
4684 "on" : "off");
4685 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE |
4686 GEN6_RC_CTL_EI_MODE(1) |
4687 rc6_mask);
4688
4689 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4690
3686} 4691}
3687 4692
3688static void gen8_enable_rps(struct drm_device *dev) 4693static void gen8_enable_rps(struct drm_device *dev)
3689{ 4694{
3690 struct drm_i915_private *dev_priv = dev->dev_private; 4695 struct drm_i915_private *dev_priv = dev->dev_private;
3691 struct intel_engine_cs *ring; 4696 struct intel_engine_cs *ring;
3692 uint32_t rc6_mask = 0, rp_state_cap; 4697 uint32_t rc6_mask = 0;
3693 int unused; 4698 int unused;
3694 4699
3695 /* 1a: Software RC state - RC0 */ 4700 /* 1a: Software RC state - RC0 */
@@ -3702,8 +4707,8 @@ static void gen8_enable_rps(struct drm_device *dev)
3702 /* 2a: Disable RC states. */ 4707 /* 2a: Disable RC states. */
3703 I915_WRITE(GEN6_RC_CONTROL, 0); 4708 I915_WRITE(GEN6_RC_CONTROL, 0);
3704 4709
3705 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4710 /* Initialize rps frequencies */
3706 parse_rp_state_cap(dev_priv, rp_state_cap); 4711 gen6_init_rps_frequencies(dev);
3707 4712
3708 /* 2b: Program RC6 thresholds.*/ 4713 /* 2b: Program RC6 thresholds.*/
3709 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); 4714 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16);
@@ -3761,9 +4766,8 @@ static void gen8_enable_rps(struct drm_device *dev)
3761 4766
3762 /* 6: Ring frequency + overclocking (our driver does this later */ 4767 /* 6: Ring frequency + overclocking (our driver does this later */
3763 4768
3764 gen6_set_rps(dev, (I915_READ(GEN6_GT_PERF_STATUS) & 0xff00) >> 8); 4769 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3765 4770 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3766 gen8_enable_rps_interrupts(dev);
3767 4771
3768 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 4772 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
3769} 4773}
@@ -3772,7 +4776,6 @@ static void gen6_enable_rps(struct drm_device *dev)
3772{ 4776{
3773 struct drm_i915_private *dev_priv = dev->dev_private; 4777 struct drm_i915_private *dev_priv = dev->dev_private;
3774 struct intel_engine_cs *ring; 4778 struct intel_engine_cs *ring;
3775 u32 rp_state_cap;
3776 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0; 4779 u32 rc6vids, pcu_mbox = 0, rc6_mask = 0;
3777 u32 gtfifodbg; 4780 u32 gtfifodbg;
3778 int rc6_mode; 4781 int rc6_mode;
@@ -3796,9 +4799,8 @@ static void gen6_enable_rps(struct drm_device *dev)
3796 4799
3797 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); 4800 gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
3798 4801
3799 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 4802 /* Initialize rps frequencies */
3800 4803 gen6_init_rps_frequencies(dev);
3801 parse_rp_state_cap(dev_priv, rp_state_cap);
3802 4804
3803 /* disable the counters and set deterministic thresholds */ 4805 /* disable the counters and set deterministic thresholds */
3804 I915_WRITE(GEN6_RC_CONTROL, 0); 4806 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -3861,8 +4863,6 @@ static void gen6_enable_rps(struct drm_device *dev)
3861 dev_priv->rps.power = HIGH_POWER; /* force a reset */ 4863 dev_priv->rps.power = HIGH_POWER; /* force a reset */
3862 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); 4864 gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3863 4865
3864 gen6_enable_rps_interrupts(dev);
3865
3866 rc6vids = 0; 4866 rc6vids = 0;
3867 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 4867 ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids);
3868 if (IS_GEN6(dev) && ret) { 4868 if (IS_GEN6(dev) && ret) {
@@ -3915,9 +4915,9 @@ static void __gen6_update_ring_freq(struct drm_device *dev)
3915 * to use for memory access. We do this by specifying the IA frequency 4915 * to use for memory access. We do this by specifying the IA frequency
3916 * the PCU should use as a reference to determine the ring frequency. 4916 * the PCU should use as a reference to determine the ring frequency.
3917 */ 4917 */
3918 for (gpu_freq = dev_priv->rps.max_freq_softlimit; gpu_freq >= dev_priv->rps.min_freq_softlimit; 4918 for (gpu_freq = dev_priv->rps.max_freq; gpu_freq >= dev_priv->rps.min_freq;
3919 gpu_freq--) { 4919 gpu_freq--) {
3920 int diff = dev_priv->rps.max_freq_softlimit - gpu_freq; 4920 int diff = dev_priv->rps.max_freq - gpu_freq;
3921 unsigned int ia_freq = 0, ring_freq = 0; 4921 unsigned int ia_freq = 0, ring_freq = 0;
3922 4922
3923 if (INTEL_INFO(dev)->gen >= 8) { 4923 if (INTEL_INFO(dev)->gen >= 8) {
@@ -4072,12 +5072,15 @@ static void cherryview_setup_pctx(struct drm_device *dev)
4072 5072
4073 pcbr = I915_READ(VLV_PCBR); 5073 pcbr = I915_READ(VLV_PCBR);
4074 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { 5074 if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
5075 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
4075 paddr = (dev_priv->mm.stolen_base + 5076 paddr = (dev_priv->mm.stolen_base +
4076 (gtt->stolen_size - pctx_size)); 5077 (gtt->stolen_size - pctx_size));
4077 5078
4078 pctx_paddr = (paddr & (~4095)); 5079 pctx_paddr = (paddr & (~4095));
4079 I915_WRITE(VLV_PCBR, pctx_paddr); 5080 I915_WRITE(VLV_PCBR, pctx_paddr);
4080 } 5081 }
5082
5083 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4081} 5084}
4082 5085
4083static void valleyview_setup_pctx(struct drm_device *dev) 5086static void valleyview_setup_pctx(struct drm_device *dev)
@@ -4103,6 +5106,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
4103 goto out; 5106 goto out;
4104 } 5107 }
4105 5108
5109 DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
5110
4106 /* 5111 /*
4107 * From the Gunit register HAS: 5112 * From the Gunit register HAS:
4108 * The Gfx driver is expected to program this register and ensure 5113 * The Gfx driver is expected to program this register and ensure
@@ -4121,6 +5126,7 @@ static void valleyview_setup_pctx(struct drm_device *dev)
4121 I915_WRITE(VLV_PCBR, pctx_paddr); 5126 I915_WRITE(VLV_PCBR, pctx_paddr);
4122 5127
4123out: 5128out:
5129 DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR));
4124 dev_priv->vlv_pctx = pctx; 5130 dev_priv->vlv_pctx = pctx;
4125} 5131}
4126 5132
@@ -4157,7 +5163,7 @@ static void valleyview_init_gt_powersave(struct drm_device *dev)
4157 dev_priv->mem_freq = 1333; 5163 dev_priv->mem_freq = 1333;
4158 break; 5164 break;
4159 } 5165 }
4160 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5166 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4161 5167
4162 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv); 5168 dev_priv->rps.max_freq = valleyview_rps_max_freq(dev_priv);
4163 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5169 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4199,7 +5205,10 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
4199 5205
4200 mutex_lock(&dev_priv->rps.hw_lock); 5206 mutex_lock(&dev_priv->rps.hw_lock);
4201 5207
4202 val = vlv_punit_read(dev_priv, CCK_FUSE_REG); 5208 mutex_lock(&dev_priv->dpio_lock);
5209 val = vlv_cck_read(dev_priv, CCK_FUSE_REG);
5210 mutex_unlock(&dev_priv->dpio_lock);
5211
4203 switch ((val >> 2) & 0x7) { 5212 switch ((val >> 2) & 0x7) {
4204 case 0: 5213 case 0:
4205 case 1: 5214 case 1:
@@ -4223,7 +5232,7 @@ static void cherryview_init_gt_powersave(struct drm_device *dev)
4223 dev_priv->mem_freq = 1600; 5232 dev_priv->mem_freq = 1600;
4224 break; 5233 break;
4225 } 5234 }
4226 DRM_DEBUG_DRIVER("DDR speed: %d MHz", dev_priv->mem_freq); 5235 DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq);
4227 5236
4228 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv); 5237 dev_priv->rps.max_freq = cherryview_rps_max_freq(dev_priv);
4229 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq; 5238 dev_priv->rps.rp0_freq = dev_priv->rps.max_freq;
@@ -4309,8 +5318,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
4309 /* For now we assume BIOS is allocating and populating the PCBR */ 5318 /* For now we assume BIOS is allocating and populating the PCBR */
4310 pcbr = I915_READ(VLV_PCBR); 5319 pcbr = I915_READ(VLV_PCBR);
4311 5320
4312 DRM_DEBUG_DRIVER("PCBR offset : 0x%x\n", pcbr);
4313
4314 /* 3: Enable RC6 */ 5321 /* 3: Enable RC6 */
4315 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) && 5322 if ((intel_enable_rc6(dev) & INTEL_RC6_ENABLE) &&
4316 (pcbr >> VLV_PCBR_ADDR_SHIFT)) 5323 (pcbr >> VLV_PCBR_ADDR_SHIFT))
@@ -4340,7 +5347,10 @@ static void cherryview_enable_rps(struct drm_device *dev)
4340 5347
4341 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5348 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4342 5349
4343 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 5350 /* RPS code assumes GPLL is used */
5351 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5352
5353 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
4344 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5354 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4345 5355
4346 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5356 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4354,8 +5364,6 @@ static void cherryview_enable_rps(struct drm_device *dev)
4354 5364
4355 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5365 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4356 5366
4357 gen8_enable_rps_interrupts(dev);
4358
4359 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 5367 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4360} 5368}
4361 5369
@@ -4420,7 +5428,10 @@ static void valleyview_enable_rps(struct drm_device *dev)
4420 5428
4421 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5429 val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
4422 5430
4423 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & 0x10 ? "yes" : "no"); 5431 /* RPS code assumes GPLL is used */
5432 WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
5433
5434 DRM_DEBUG_DRIVER("GPLL enabled? %s\n", val & GPLLENABLE ? "yes" : "no");
4424 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); 5435 DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
4425 5436
4426 dev_priv->rps.cur_freq = (val >> 8) & 0xff; 5437 dev_priv->rps.cur_freq = (val >> 8) & 0xff;
@@ -4434,8 +5445,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
4434 5445
4435 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq); 5446 valleyview_set_rps(dev_priv->dev, dev_priv->rps.efficient_freq);
4436 5447
4437 gen6_enable_rps_interrupts(dev);
4438
4439 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL); 5448 gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);
4440} 5449}
4441 5450
@@ -5194,12 +6203,17 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
5194{ 6203{
5195 struct drm_i915_private *dev_priv = dev->dev_private; 6204 struct drm_i915_private *dev_priv = dev->dev_private;
5196 6205
5197 /* Interrupts should be disabled already to avoid re-arming. */ 6206 if (INTEL_INFO(dev)->gen < 6)
5198 WARN_ON(intel_irqs_enabled(dev_priv)); 6207 return;
5199 6208
5200 flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6209 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
5201 6210
5202 cancel_work_sync(&dev_priv->rps.work); 6211 /*
6212 * TODO: disable RPS interrupts on GEN9+ too once RPS support
6213 * is added for it.
6214 */
6215 if (INTEL_INFO(dev)->gen < 9)
6216 gen6_disable_rps_interrupts(dev);
5203 6217
5204 /* Force GPU to min freq during suspend */ 6218 /* Force GPU to min freq during suspend */
5205 gen6_rps_idle(dev_priv); 6219 gen6_rps_idle(dev_priv);
@@ -5209,9 +6223,6 @@ void intel_disable_gt_powersave(struct drm_device *dev)
5209{ 6223{
5210 struct drm_i915_private *dev_priv = dev->dev_private; 6224 struct drm_i915_private *dev_priv = dev->dev_private;
5211 6225
5212 /* Interrupts should be disabled already to avoid re-arming. */
5213 WARN_ON(intel_irqs_enabled(dev_priv));
5214
5215 if (IS_IRONLAKE_M(dev)) { 6226 if (IS_IRONLAKE_M(dev)) {
5216 ironlake_disable_drps(dev); 6227 ironlake_disable_drps(dev);
5217 ironlake_disable_rc6(dev); 6228 ironlake_disable_rc6(dev);
@@ -5219,12 +6230,15 @@ void intel_disable_gt_powersave(struct drm_device *dev)
5219 intel_suspend_gt_powersave(dev); 6230 intel_suspend_gt_powersave(dev);
5220 6231
5221 mutex_lock(&dev_priv->rps.hw_lock); 6232 mutex_lock(&dev_priv->rps.hw_lock);
5222 if (IS_CHERRYVIEW(dev)) 6233 if (INTEL_INFO(dev)->gen >= 9)
6234 gen9_disable_rps(dev);
6235 else if (IS_CHERRYVIEW(dev))
5223 cherryview_disable_rps(dev); 6236 cherryview_disable_rps(dev);
5224 else if (IS_VALLEYVIEW(dev)) 6237 else if (IS_VALLEYVIEW(dev))
5225 valleyview_disable_rps(dev); 6238 valleyview_disable_rps(dev);
5226 else 6239 else
5227 gen6_disable_rps(dev); 6240 gen6_disable_rps(dev);
6241
5228 dev_priv->rps.enabled = false; 6242 dev_priv->rps.enabled = false;
5229 mutex_unlock(&dev_priv->rps.hw_lock); 6243 mutex_unlock(&dev_priv->rps.hw_lock);
5230 } 6244 }
@@ -5239,10 +6253,19 @@ static void intel_gen6_powersave_work(struct work_struct *work)
5239 6253
5240 mutex_lock(&dev_priv->rps.hw_lock); 6254 mutex_lock(&dev_priv->rps.hw_lock);
5241 6255
6256 /*
6257 * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is
6258 * added for it.
6259 */
6260 if (INTEL_INFO(dev)->gen < 9)
6261 gen6_reset_rps_interrupts(dev);
6262
5242 if (IS_CHERRYVIEW(dev)) { 6263 if (IS_CHERRYVIEW(dev)) {
5243 cherryview_enable_rps(dev); 6264 cherryview_enable_rps(dev);
5244 } else if (IS_VALLEYVIEW(dev)) { 6265 } else if (IS_VALLEYVIEW(dev)) {
5245 valleyview_enable_rps(dev); 6266 valleyview_enable_rps(dev);
6267 } else if (INTEL_INFO(dev)->gen >= 9) {
6268 gen9_enable_rps(dev);
5246 } else if (IS_BROADWELL(dev)) { 6269 } else if (IS_BROADWELL(dev)) {
5247 gen8_enable_rps(dev); 6270 gen8_enable_rps(dev);
5248 __gen6_update_ring_freq(dev); 6271 __gen6_update_ring_freq(dev);
@@ -5251,6 +6274,10 @@ static void intel_gen6_powersave_work(struct work_struct *work)
5251 __gen6_update_ring_freq(dev); 6274 __gen6_update_ring_freq(dev);
5252 } 6275 }
5253 dev_priv->rps.enabled = true; 6276 dev_priv->rps.enabled = true;
6277
6278 if (INTEL_INFO(dev)->gen < 9)
6279 gen6_enable_rps_interrupts(dev);
6280
5254 mutex_unlock(&dev_priv->rps.hw_lock); 6281 mutex_unlock(&dev_priv->rps.hw_lock);
5255 6282
5256 intel_runtime_pm_put(dev_priv); 6283 intel_runtime_pm_put(dev_priv);
@@ -5481,7 +6508,7 @@ static void gen6_init_clock_gating(struct drm_device *dev)
5481 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6508 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5482 */ 6509 */
5483 I915_WRITE(GEN6_GT_MODE, 6510 I915_WRITE(GEN6_GT_MODE,
5484 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 6511 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
5485 6512
5486 ilk_init_lp_watermarks(dev); 6513 ilk_init_lp_watermarks(dev);
5487 6514
@@ -5609,16 +6636,6 @@ static void broadwell_init_clock_gating(struct drm_device *dev)
5609 I915_WRITE(WM2_LP_ILK, 0); 6636 I915_WRITE(WM2_LP_ILK, 0);
5610 I915_WRITE(WM1_LP_ILK, 0); 6637 I915_WRITE(WM1_LP_ILK, 0);
5611 6638
5612 /* FIXME(BDW): Check all the w/a, some might only apply to
5613 * pre-production hw. */
5614
5615
5616 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_BWGTLB_DISABLE));
5617
5618 I915_WRITE(_3D_CHICKEN3,
5619 _MASKED_BIT_ENABLE(_3D_CHICKEN_SDE_LIMIT_FIFO_POLY_DEPTH(2)));
5620
5621
5622 /* WaSwitchSolVfFArbitrationPriority:bdw */ 6639 /* WaSwitchSolVfFArbitrationPriority:bdw */
5623 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 6640 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
5624 6641
@@ -5689,7 +6706,7 @@ static void haswell_init_clock_gating(struct drm_device *dev)
5689 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6706 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5690 */ 6707 */
5691 I915_WRITE(GEN7_GT_MODE, 6708 I915_WRITE(GEN7_GT_MODE,
5692 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 6709 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
5693 6710
5694 /* WaSwitchSolVfFArbitrationPriority:hsw */ 6711 /* WaSwitchSolVfFArbitrationPriority:hsw */
5695 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL); 6712 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | HSW_ECOCHK_ARB_PRIO_SOL);
@@ -5786,7 +6803,7 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
5786 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 6803 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
5787 */ 6804 */
5788 I915_WRITE(GEN7_GT_MODE, 6805 I915_WRITE(GEN7_GT_MODE,
5789 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 6806 _MASKED_FIELD(GEN6_WIZ_HASHING_MASK, GEN6_WIZ_HASHING_16x4));
5790 6807
5791 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR); 6808 snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
5792 snpcr &= ~GEN6_MBC_SNPCR_MASK; 6809 snpcr &= ~GEN6_MBC_SNPCR_MASK;
@@ -5899,18 +6916,6 @@ static void cherryview_init_clock_gating(struct drm_device *dev)
5899 /* WaDisableSDEUnitClockGating:chv */ 6916 /* WaDisableSDEUnitClockGating:chv */
5900 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 6917 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
5901 GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 6918 GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
5902
5903 /* WaDisableGunitClockGating:chv (pre-production hw) */
5904 I915_WRITE(VLV_GUNIT_CLOCK_GATE, I915_READ(VLV_GUNIT_CLOCK_GATE) |
5905 GINT_DIS);
5906
5907 /* WaDisableFfDopClockGating:chv (pre-production hw) */
5908 I915_WRITE(GEN6_RC_SLEEP_PSMI_CONTROL,
5909 _MASKED_BIT_ENABLE(GEN8_FF_DOP_CLOCK_GATE_DISABLE));
5910
5911 /* WaDisableDopClockGating:chv (pre-production hw) */
5912 I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
5913 GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE);
5914} 6919}
5915 6920
5916static void g4x_init_clock_gating(struct drm_device *dev) 6921static void g4x_init_clock_gating(struct drm_device *dev)
@@ -6036,1161 +7041,35 @@ void intel_suspend_hw(struct drm_device *dev)
6036 lpt_suspend_hw(dev); 7041 lpt_suspend_hw(dev);
6037} 7042}
6038 7043
6039#define for_each_power_well(i, power_well, domain_mask, power_domains) \ 7044static void intel_init_fbc(struct drm_i915_private *dev_priv)
6040 for (i = 0; \
6041 i < (power_domains)->power_well_count && \
6042 ((power_well) = &(power_domains)->power_wells[i]); \
6043 i++) \
6044 if ((power_well)->domains & (domain_mask))
6045
6046#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
6047 for (i = (power_domains)->power_well_count - 1; \
6048 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
6049 i--) \
6050 if ((power_well)->domains & (domain_mask))
6051
6052/**
6053 * We should only use the power well if we explicitly asked the hardware to
6054 * enable it, so check if it's enabled and also check if we've requested it to
6055 * be enabled.
6056 */
6057static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
6058 struct i915_power_well *power_well)
6059{
6060 return I915_READ(HSW_PWR_WELL_DRIVER) ==
6061 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
6062}
6063
6064bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
6065 enum intel_display_power_domain domain)
6066{
6067 struct i915_power_domains *power_domains;
6068 struct i915_power_well *power_well;
6069 bool is_enabled;
6070 int i;
6071
6072 if (dev_priv->pm.suspended)
6073 return false;
6074
6075 power_domains = &dev_priv->power_domains;
6076
6077 is_enabled = true;
6078
6079 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6080 if (power_well->always_on)
6081 continue;
6082
6083 if (!power_well->hw_enabled) {
6084 is_enabled = false;
6085 break;
6086 }
6087 }
6088
6089 return is_enabled;
6090}
6091
6092bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
6093 enum intel_display_power_domain domain)
6094{
6095 struct i915_power_domains *power_domains;
6096 bool ret;
6097
6098 power_domains = &dev_priv->power_domains;
6099
6100 mutex_lock(&power_domains->lock);
6101 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
6102 mutex_unlock(&power_domains->lock);
6103
6104 return ret;
6105}
6106
6107/*
6108 * Starting with Haswell, we have a "Power Down Well" that can be turned off
6109 * when not needed anymore. We have 4 registers that can request the power well
6110 * to be enabled, and it will only be disabled if none of the registers is
6111 * requesting it to be enabled.
6112 */
6113static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
6114{
6115 struct drm_device *dev = dev_priv->dev;
6116
6117 /*
6118 * After we re-enable the power well, if we touch VGA register 0x3d5
6119 * we'll get unclaimed register interrupts. This stops after we write
6120 * anything to the VGA MSR register. The vgacon module uses this
6121 * register all the time, so if we unbind our driver and, as a
6122 * consequence, bind vgacon, we'll get stuck in an infinite loop at
6123 * console_unlock(). So make here we touch the VGA MSR register, making
6124 * sure vgacon can keep working normally without triggering interrupts
6125 * and error messages.
6126 */
6127 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
6128 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
6129 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
6130
6131 if (IS_BROADWELL(dev))
6132 gen8_irq_power_well_post_enable(dev_priv);
6133}
6134
6135static void hsw_set_power_well(struct drm_i915_private *dev_priv,
6136 struct i915_power_well *power_well, bool enable)
6137{
6138 bool is_enabled, enable_requested;
6139 uint32_t tmp;
6140
6141 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
6142 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
6143 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
6144
6145 if (enable) {
6146 if (!enable_requested)
6147 I915_WRITE(HSW_PWR_WELL_DRIVER,
6148 HSW_PWR_WELL_ENABLE_REQUEST);
6149
6150 if (!is_enabled) {
6151 DRM_DEBUG_KMS("Enabling power well\n");
6152 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
6153 HSW_PWR_WELL_STATE_ENABLED), 20))
6154 DRM_ERROR("Timeout enabling power well\n");
6155 }
6156
6157 hsw_power_well_post_enable(dev_priv);
6158 } else {
6159 if (enable_requested) {
6160 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
6161 POSTING_READ(HSW_PWR_WELL_DRIVER);
6162 DRM_DEBUG_KMS("Requesting to disable the power well\n");
6163 }
6164 }
6165}
6166
6167static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
6168 struct i915_power_well *power_well)
6169{
6170 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
6171
6172 /*
6173 * We're taking over the BIOS, so clear any requests made by it since
6174 * the driver is in charge now.
6175 */
6176 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
6177 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
6178}
6179
6180static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
6181 struct i915_power_well *power_well)
6182{
6183 hsw_set_power_well(dev_priv, power_well, true);
6184}
6185
6186static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
6187 struct i915_power_well *power_well)
6188{
6189 hsw_set_power_well(dev_priv, power_well, false);
6190}
6191
6192static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
6193 struct i915_power_well *power_well)
6194{ 7045{
6195} 7046 if (!HAS_FBC(dev_priv)) {
6196 7047 dev_priv->fbc.enabled = false;
6197static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
6198 struct i915_power_well *power_well)
6199{
6200 return true;
6201}
6202
6203static void vlv_set_power_well(struct drm_i915_private *dev_priv,
6204 struct i915_power_well *power_well, bool enable)
6205{
6206 enum punit_power_well power_well_id = power_well->data;
6207 u32 mask;
6208 u32 state;
6209 u32 ctrl;
6210
6211 mask = PUNIT_PWRGT_MASK(power_well_id);
6212 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
6213 PUNIT_PWRGT_PWR_GATE(power_well_id);
6214
6215 mutex_lock(&dev_priv->rps.hw_lock);
6216
6217#define COND \
6218 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
6219
6220 if (COND)
6221 goto out;
6222
6223 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
6224 ctrl &= ~mask;
6225 ctrl |= state;
6226 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
6227
6228 if (wait_for(COND, 100))
6229 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6230 state,
6231 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
6232
6233#undef COND
6234
6235out:
6236 mutex_unlock(&dev_priv->rps.hw_lock);
6237}
6238
6239static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
6240 struct i915_power_well *power_well)
6241{
6242 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
6243}
6244
6245static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
6246 struct i915_power_well *power_well)
6247{
6248 vlv_set_power_well(dev_priv, power_well, true);
6249}
6250
6251static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
6252 struct i915_power_well *power_well)
6253{
6254 vlv_set_power_well(dev_priv, power_well, false);
6255}
6256
6257static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
6258 struct i915_power_well *power_well)
6259{
6260 int power_well_id = power_well->data;
6261 bool enabled = false;
6262 u32 mask;
6263 u32 state;
6264 u32 ctrl;
6265
6266 mask = PUNIT_PWRGT_MASK(power_well_id);
6267 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
6268
6269 mutex_lock(&dev_priv->rps.hw_lock);
6270
6271 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
6272 /*
6273 * We only ever set the power-on and power-gate states, anything
6274 * else is unexpected.
6275 */
6276 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
6277 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
6278 if (state == ctrl)
6279 enabled = true;
6280
6281 /*
6282 * A transient state at this point would mean some unexpected party
6283 * is poking at the power controls too.
6284 */
6285 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
6286 WARN_ON(ctrl != state);
6287
6288 mutex_unlock(&dev_priv->rps.hw_lock);
6289
6290 return enabled;
6291}
6292
6293static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
6294 struct i915_power_well *power_well)
6295{
6296 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6297
6298 vlv_set_power_well(dev_priv, power_well, true);
6299
6300 spin_lock_irq(&dev_priv->irq_lock);
6301 valleyview_enable_display_irqs(dev_priv);
6302 spin_unlock_irq(&dev_priv->irq_lock);
6303
6304 /*
6305 * During driver initialization/resume we can avoid restoring the
6306 * part of the HW/SW state that will be inited anyway explicitly.
6307 */
6308 if (dev_priv->power_domains.initializing)
6309 return; 7048 return;
6310
6311 intel_hpd_init(dev_priv->dev);
6312
6313 i915_redisable_vga_power_on(dev_priv->dev);
6314}
6315
6316static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
6317 struct i915_power_well *power_well)
6318{
6319 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
6320
6321 spin_lock_irq(&dev_priv->irq_lock);
6322 valleyview_disable_display_irqs(dev_priv);
6323 spin_unlock_irq(&dev_priv->irq_lock);
6324
6325 vlv_set_power_well(dev_priv, power_well, false);
6326
6327 vlv_power_sequencer_reset(dev_priv);
6328}
6329
6330static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6331 struct i915_power_well *power_well)
6332{
6333 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6334
6335 /*
6336 * Enable the CRI clock source so we can get at the
6337 * display and the reference clock for VGA
6338 * hotplug / manual detection.
6339 */
6340 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6341 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6342 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6343
6344 vlv_set_power_well(dev_priv, power_well, true);
6345
6346 /*
6347 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
6348 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
6349 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
6350 * b. The other bits such as sfr settings / modesel may all
6351 * be set to 0.
6352 *
6353 * This should only be done on init and resume from S3 with
6354 * both PLLs disabled, or we risk losing DPIO and PLL
6355 * synchronization.
6356 */
6357 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
6358}
6359
6360static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
6361 struct i915_power_well *power_well)
6362{
6363 enum pipe pipe;
6364
6365 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
6366
6367 for_each_pipe(dev_priv, pipe)
6368 assert_pll_disabled(dev_priv, pipe);
6369
6370 /* Assert common reset */
6371 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
6372
6373 vlv_set_power_well(dev_priv, power_well, false);
6374}
6375
6376static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
6377 struct i915_power_well *power_well)
6378{
6379 enum dpio_phy phy;
6380
6381 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6382 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6383
6384 /*
6385 * Enable the CRI clock source so we can get at the
6386 * display and the reference clock for VGA
6387 * hotplug / manual detection.
6388 */
6389 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6390 phy = DPIO_PHY0;
6391 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6392 DPLL_REFA_CLK_ENABLE_VLV);
6393 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
6394 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6395 } else {
6396 phy = DPIO_PHY1;
6397 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
6398 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
6399 } 7049 }
6400 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
6401 vlv_set_power_well(dev_priv, power_well, true);
6402 7050
6403 /* Poll for phypwrgood signal */ 7051 if (INTEL_INFO(dev_priv)->gen >= 7) {
6404 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1)) 7052 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6405 DRM_ERROR("Display PHY %d is not power up\n", phy); 7053 dev_priv->display.enable_fbc = gen7_enable_fbc;
6406 7054 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6407 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) | 7055 } else if (INTEL_INFO(dev_priv)->gen >= 5) {
6408 PHY_COM_LANE_RESET_DEASSERT(phy)); 7056 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
6409} 7057 dev_priv->display.enable_fbc = ironlake_enable_fbc;
6410 7058 dev_priv->display.disable_fbc = ironlake_disable_fbc;
6411static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv, 7059 } else if (IS_GM45(dev_priv)) {
6412 struct i915_power_well *power_well) 7060 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
6413{ 7061 dev_priv->display.enable_fbc = g4x_enable_fbc;
6414 enum dpio_phy phy; 7062 dev_priv->display.disable_fbc = g4x_disable_fbc;
6415
6416 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
6417 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
6418
6419 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
6420 phy = DPIO_PHY0;
6421 assert_pll_disabled(dev_priv, PIPE_A);
6422 assert_pll_disabled(dev_priv, PIPE_B);
6423 } else { 7063 } else {
6424 phy = DPIO_PHY1; 7064 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
6425 assert_pll_disabled(dev_priv, PIPE_C); 7065 dev_priv->display.enable_fbc = i8xx_enable_fbc;
6426 } 7066 dev_priv->display.disable_fbc = i8xx_disable_fbc;
6427 7067
6428 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) & 7068 /* This value was pulled out of someone's hat */
6429 ~PHY_COM_LANE_RESET_DEASSERT(phy)); 7069 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
6430
6431 vlv_set_power_well(dev_priv, power_well, false);
6432}
6433
6434static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
6435 struct i915_power_well *power_well)
6436{
6437 enum pipe pipe = power_well->data;
6438 bool enabled;
6439 u32 state, ctrl;
6440
6441 mutex_lock(&dev_priv->rps.hw_lock);
6442
6443 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
6444 /*
6445 * We only ever set the power-on and power-gate states, anything
6446 * else is unexpected.
6447 */
6448 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
6449 enabled = state == DP_SSS_PWR_ON(pipe);
6450
6451 /*
6452 * A transient state at this point would mean some unexpected party
6453 * is poking at the power controls too.
6454 */
6455 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
6456 WARN_ON(ctrl << 16 != state);
6457
6458 mutex_unlock(&dev_priv->rps.hw_lock);
6459
6460 return enabled;
6461}
6462
6463static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
6464 struct i915_power_well *power_well,
6465 bool enable)
6466{
6467 enum pipe pipe = power_well->data;
6468 u32 state;
6469 u32 ctrl;
6470
6471 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
6472
6473 mutex_lock(&dev_priv->rps.hw_lock);
6474
6475#define COND \
6476 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
6477
6478 if (COND)
6479 goto out;
6480
6481 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
6482 ctrl &= ~DP_SSC_MASK(pipe);
6483 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
6484 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
6485
6486 if (wait_for(COND, 100))
6487 DRM_ERROR("timout setting power well state %08x (%08x)\n",
6488 state,
6489 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
6490
6491#undef COND
6492
6493out:
6494 mutex_unlock(&dev_priv->rps.hw_lock);
6495}
6496
6497static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
6498 struct i915_power_well *power_well)
6499{
6500 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
6501}
6502
6503static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
6504 struct i915_power_well *power_well)
6505{
6506 WARN_ON_ONCE(power_well->data != PIPE_A &&
6507 power_well->data != PIPE_B &&
6508 power_well->data != PIPE_C);
6509
6510 chv_set_pipe_power_well(dev_priv, power_well, true);
6511}
6512
6513static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
6514 struct i915_power_well *power_well)
6515{
6516 WARN_ON_ONCE(power_well->data != PIPE_A &&
6517 power_well->data != PIPE_B &&
6518 power_well->data != PIPE_C);
6519
6520 chv_set_pipe_power_well(dev_priv, power_well, false);
6521}
6522
6523static void check_power_well_state(struct drm_i915_private *dev_priv,
6524 struct i915_power_well *power_well)
6525{
6526 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
6527
6528 if (power_well->always_on || !i915.disable_power_well) {
6529 if (!enabled)
6530 goto mismatch;
6531
6532 return;
6533 }
6534
6535 if (enabled != (power_well->count > 0))
6536 goto mismatch;
6537
6538 return;
6539
6540mismatch:
6541 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
6542 power_well->name, power_well->always_on, enabled,
6543 power_well->count, i915.disable_power_well);
6544}
6545
6546void intel_display_power_get(struct drm_i915_private *dev_priv,
6547 enum intel_display_power_domain domain)
6548{
6549 struct i915_power_domains *power_domains;
6550 struct i915_power_well *power_well;
6551 int i;
6552
6553 intel_runtime_pm_get(dev_priv);
6554
6555 power_domains = &dev_priv->power_domains;
6556
6557 mutex_lock(&power_domains->lock);
6558
6559 for_each_power_well(i, power_well, BIT(domain), power_domains) {
6560 if (!power_well->count++) {
6561 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
6562 power_well->ops->enable(dev_priv, power_well);
6563 power_well->hw_enabled = true;
6564 }
6565
6566 check_power_well_state(dev_priv, power_well);
6567 } 7070 }
6568 7071
6569 power_domains->domain_use_count[domain]++; 7072 dev_priv->fbc.enabled = dev_priv->display.fbc_enabled(dev_priv->dev);
6570
6571 mutex_unlock(&power_domains->lock);
6572}
6573
6574void intel_display_power_put(struct drm_i915_private *dev_priv,
6575 enum intel_display_power_domain domain)
6576{
6577 struct i915_power_domains *power_domains;
6578 struct i915_power_well *power_well;
6579 int i;
6580
6581 power_domains = &dev_priv->power_domains;
6582
6583 mutex_lock(&power_domains->lock);
6584
6585 WARN_ON(!power_domains->domain_use_count[domain]);
6586 power_domains->domain_use_count[domain]--;
6587
6588 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
6589 WARN_ON(!power_well->count);
6590
6591 if (!--power_well->count && i915.disable_power_well) {
6592 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6593 power_well->hw_enabled = false;
6594 power_well->ops->disable(dev_priv, power_well);
6595 }
6596
6597 check_power_well_state(dev_priv, power_well);
6598 }
6599
6600 mutex_unlock(&power_domains->lock);
6601
6602 intel_runtime_pm_put(dev_priv);
6603}
6604
6605static struct i915_power_domains *hsw_pwr;
6606
6607/* Display audio driver power well request */
6608int i915_request_power_well(void)
6609{
6610 struct drm_i915_private *dev_priv;
6611
6612 if (!hsw_pwr)
6613 return -ENODEV;
6614
6615 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6616 power_domains);
6617 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
6618 return 0;
6619}
6620EXPORT_SYMBOL_GPL(i915_request_power_well);
6621
6622/* Display audio driver power well release */
6623int i915_release_power_well(void)
6624{
6625 struct drm_i915_private *dev_priv;
6626
6627 if (!hsw_pwr)
6628 return -ENODEV;
6629
6630 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6631 power_domains);
6632 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
6633 return 0;
6634}
6635EXPORT_SYMBOL_GPL(i915_release_power_well);
6636
6637/*
6638 * Private interface for the audio driver to get CDCLK in kHz.
6639 *
6640 * Caller must request power well using i915_request_power_well() prior to
6641 * making the call.
6642 */
6643int i915_get_cdclk_freq(void)
6644{
6645 struct drm_i915_private *dev_priv;
6646
6647 if (!hsw_pwr)
6648 return -ENODEV;
6649
6650 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6651 power_domains);
6652
6653 return intel_ddi_get_cdclk_freq(dev_priv);
6654}
6655EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6656
6657
6658#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6659
6660#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
6661 BIT(POWER_DOMAIN_PIPE_A) | \
6662 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
6663 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
6664 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
6665 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6666 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6667 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6668 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6669 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6670 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6671 BIT(POWER_DOMAIN_PORT_CRT) | \
6672 BIT(POWER_DOMAIN_PLLS) | \
6673 BIT(POWER_DOMAIN_INIT))
6674#define HSW_DISPLAY_POWER_DOMAINS ( \
6675 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
6676 BIT(POWER_DOMAIN_INIT))
6677
6678#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
6679 HSW_ALWAYS_ON_POWER_DOMAINS | \
6680 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
6681#define BDW_DISPLAY_POWER_DOMAINS ( \
6682 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
6683 BIT(POWER_DOMAIN_INIT))
6684
6685#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
6686#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
6687
6688#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
6689 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6690 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6691 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6692 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6693 BIT(POWER_DOMAIN_PORT_CRT) | \
6694 BIT(POWER_DOMAIN_INIT))
6695
6696#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
6697 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6698 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6699 BIT(POWER_DOMAIN_INIT))
6700
6701#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
6702 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6703 BIT(POWER_DOMAIN_INIT))
6704
6705#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
6706 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6707 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6708 BIT(POWER_DOMAIN_INIT))
6709
6710#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
6711 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6712 BIT(POWER_DOMAIN_INIT))
6713
6714#define CHV_PIPE_A_POWER_DOMAINS ( \
6715 BIT(POWER_DOMAIN_PIPE_A) | \
6716 BIT(POWER_DOMAIN_INIT))
6717
6718#define CHV_PIPE_B_POWER_DOMAINS ( \
6719 BIT(POWER_DOMAIN_PIPE_B) | \
6720 BIT(POWER_DOMAIN_INIT))
6721
6722#define CHV_PIPE_C_POWER_DOMAINS ( \
6723 BIT(POWER_DOMAIN_PIPE_C) | \
6724 BIT(POWER_DOMAIN_INIT))
6725
6726#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
6727 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
6728 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
6729 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
6730 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
6731 BIT(POWER_DOMAIN_INIT))
6732
6733#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
6734 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6735 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6736 BIT(POWER_DOMAIN_INIT))
6737
6738#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
6739 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
6740 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6741 BIT(POWER_DOMAIN_INIT))
6742
6743#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
6744 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
6745 BIT(POWER_DOMAIN_INIT))
6746
6747static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
6748 .sync_hw = i9xx_always_on_power_well_noop,
6749 .enable = i9xx_always_on_power_well_noop,
6750 .disable = i9xx_always_on_power_well_noop,
6751 .is_enabled = i9xx_always_on_power_well_enabled,
6752};
6753
6754static const struct i915_power_well_ops chv_pipe_power_well_ops = {
6755 .sync_hw = chv_pipe_power_well_sync_hw,
6756 .enable = chv_pipe_power_well_enable,
6757 .disable = chv_pipe_power_well_disable,
6758 .is_enabled = chv_pipe_power_well_enabled,
6759};
6760
6761static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
6762 .sync_hw = vlv_power_well_sync_hw,
6763 .enable = chv_dpio_cmn_power_well_enable,
6764 .disable = chv_dpio_cmn_power_well_disable,
6765 .is_enabled = vlv_power_well_enabled,
6766};
6767
6768static struct i915_power_well i9xx_always_on_power_well[] = {
6769 {
6770 .name = "always-on",
6771 .always_on = 1,
6772 .domains = POWER_DOMAIN_MASK,
6773 .ops = &i9xx_always_on_power_well_ops,
6774 },
6775};
6776
6777static const struct i915_power_well_ops hsw_power_well_ops = {
6778 .sync_hw = hsw_power_well_sync_hw,
6779 .enable = hsw_power_well_enable,
6780 .disable = hsw_power_well_disable,
6781 .is_enabled = hsw_power_well_enabled,
6782};
6783
6784static struct i915_power_well hsw_power_wells[] = {
6785 {
6786 .name = "always-on",
6787 .always_on = 1,
6788 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
6789 .ops = &i9xx_always_on_power_well_ops,
6790 },
6791 {
6792 .name = "display",
6793 .domains = HSW_DISPLAY_POWER_DOMAINS,
6794 .ops = &hsw_power_well_ops,
6795 },
6796};
6797
6798static struct i915_power_well bdw_power_wells[] = {
6799 {
6800 .name = "always-on",
6801 .always_on = 1,
6802 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
6803 .ops = &i9xx_always_on_power_well_ops,
6804 },
6805 {
6806 .name = "display",
6807 .domains = BDW_DISPLAY_POWER_DOMAINS,
6808 .ops = &hsw_power_well_ops,
6809 },
6810};
6811
6812static const struct i915_power_well_ops vlv_display_power_well_ops = {
6813 .sync_hw = vlv_power_well_sync_hw,
6814 .enable = vlv_display_power_well_enable,
6815 .disable = vlv_display_power_well_disable,
6816 .is_enabled = vlv_power_well_enabled,
6817};
6818
6819static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
6820 .sync_hw = vlv_power_well_sync_hw,
6821 .enable = vlv_dpio_cmn_power_well_enable,
6822 .disable = vlv_dpio_cmn_power_well_disable,
6823 .is_enabled = vlv_power_well_enabled,
6824};
6825
6826static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
6827 .sync_hw = vlv_power_well_sync_hw,
6828 .enable = vlv_power_well_enable,
6829 .disable = vlv_power_well_disable,
6830 .is_enabled = vlv_power_well_enabled,
6831};
6832
6833static struct i915_power_well vlv_power_wells[] = {
6834 {
6835 .name = "always-on",
6836 .always_on = 1,
6837 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6838 .ops = &i9xx_always_on_power_well_ops,
6839 },
6840 {
6841 .name = "display",
6842 .domains = VLV_DISPLAY_POWER_DOMAINS,
6843 .data = PUNIT_POWER_WELL_DISP2D,
6844 .ops = &vlv_display_power_well_ops,
6845 },
6846 {
6847 .name = "dpio-tx-b-01",
6848 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6849 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6850 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6851 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6852 .ops = &vlv_dpio_power_well_ops,
6853 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6854 },
6855 {
6856 .name = "dpio-tx-b-23",
6857 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6858 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6859 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6860 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6861 .ops = &vlv_dpio_power_well_ops,
6862 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6863 },
6864 {
6865 .name = "dpio-tx-c-01",
6866 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6867 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6868 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6869 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6870 .ops = &vlv_dpio_power_well_ops,
6871 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6872 },
6873 {
6874 .name = "dpio-tx-c-23",
6875 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6876 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
6877 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6878 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6879 .ops = &vlv_dpio_power_well_ops,
6880 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6881 },
6882 {
6883 .name = "dpio-common",
6884 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
6885 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6886 .ops = &vlv_dpio_cmn_power_well_ops,
6887 },
6888};
6889
6890static struct i915_power_well chv_power_wells[] = {
6891 {
6892 .name = "always-on",
6893 .always_on = 1,
6894 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
6895 .ops = &i9xx_always_on_power_well_ops,
6896 },
6897#if 0
6898 {
6899 .name = "display",
6900 .domains = VLV_DISPLAY_POWER_DOMAINS,
6901 .data = PUNIT_POWER_WELL_DISP2D,
6902 .ops = &vlv_display_power_well_ops,
6903 },
6904 {
6905 .name = "pipe-a",
6906 .domains = CHV_PIPE_A_POWER_DOMAINS,
6907 .data = PIPE_A,
6908 .ops = &chv_pipe_power_well_ops,
6909 },
6910 {
6911 .name = "pipe-b",
6912 .domains = CHV_PIPE_B_POWER_DOMAINS,
6913 .data = PIPE_B,
6914 .ops = &chv_pipe_power_well_ops,
6915 },
6916 {
6917 .name = "pipe-c",
6918 .domains = CHV_PIPE_C_POWER_DOMAINS,
6919 .data = PIPE_C,
6920 .ops = &chv_pipe_power_well_ops,
6921 },
6922#endif
6923 {
6924 .name = "dpio-common-bc",
6925 /*
6926 * XXX: cmnreset for one PHY seems to disturb the other.
6927 * As a workaround keep both powered on at the same
6928 * time for now.
6929 */
6930 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6931 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
6932 .ops = &chv_dpio_cmn_power_well_ops,
6933 },
6934 {
6935 .name = "dpio-common-d",
6936 /*
6937 * XXX: cmnreset for one PHY seems to disturb the other.
6938 * As a workaround keep both powered on at the same
6939 * time for now.
6940 */
6941 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
6942 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
6943 .ops = &chv_dpio_cmn_power_well_ops,
6944 },
6945#if 0
6946 {
6947 .name = "dpio-tx-b-01",
6948 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6949 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6950 .ops = &vlv_dpio_power_well_ops,
6951 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
6952 },
6953 {
6954 .name = "dpio-tx-b-23",
6955 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
6956 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
6957 .ops = &vlv_dpio_power_well_ops,
6958 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
6959 },
6960 {
6961 .name = "dpio-tx-c-01",
6962 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6963 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6964 .ops = &vlv_dpio_power_well_ops,
6965 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
6966 },
6967 {
6968 .name = "dpio-tx-c-23",
6969 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
6970 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
6971 .ops = &vlv_dpio_power_well_ops,
6972 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
6973 },
6974 {
6975 .name = "dpio-tx-d-01",
6976 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6977 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6978 .ops = &vlv_dpio_power_well_ops,
6979 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
6980 },
6981 {
6982 .name = "dpio-tx-d-23",
6983 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
6984 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
6985 .ops = &vlv_dpio_power_well_ops,
6986 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
6987 },
6988#endif
6989};
6990
6991static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
6992 enum punit_power_well power_well_id)
6993{
6994 struct i915_power_domains *power_domains = &dev_priv->power_domains;
6995 struct i915_power_well *power_well;
6996 int i;
6997
6998 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6999 if (power_well->data == power_well_id)
7000 return power_well;
7001 }
7002
7003 return NULL;
7004}
7005
7006#define set_power_wells(power_domains, __power_wells) ({ \
7007 (power_domains)->power_wells = (__power_wells); \
7008 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
7009})
7010
7011int intel_power_domains_init(struct drm_i915_private *dev_priv)
7012{
7013 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7014
7015 mutex_init(&power_domains->lock);
7016
7017 /*
7018 * The enabling order will be from lower to higher indexed wells,
7019 * the disabling order is reversed.
7020 */
7021 if (IS_HASWELL(dev_priv->dev)) {
7022 set_power_wells(power_domains, hsw_power_wells);
7023 hsw_pwr = power_domains;
7024 } else if (IS_BROADWELL(dev_priv->dev)) {
7025 set_power_wells(power_domains, bdw_power_wells);
7026 hsw_pwr = power_domains;
7027 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
7028 set_power_wells(power_domains, chv_power_wells);
7029 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
7030 set_power_wells(power_domains, vlv_power_wells);
7031 } else {
7032 set_power_wells(power_domains, i9xx_always_on_power_well);
7033 }
7034
7035 return 0;
7036}
7037
7038void intel_power_domains_remove(struct drm_i915_private *dev_priv)
7039{
7040 hsw_pwr = NULL;
7041}
7042
7043static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
7044{
7045 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7046 struct i915_power_well *power_well;
7047 int i;
7048
7049 mutex_lock(&power_domains->lock);
7050 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
7051 power_well->ops->sync_hw(dev_priv, power_well);
7052 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
7053 power_well);
7054 }
7055 mutex_unlock(&power_domains->lock);
7056}
7057
7058static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
7059{
7060 struct i915_power_well *cmn =
7061 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
7062 struct i915_power_well *disp2d =
7063 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
7064
7065 /* nothing to do if common lane is already off */
7066 if (!cmn->ops->is_enabled(dev_priv, cmn))
7067 return;
7068
7069 /* If the display might be already active skip this */
7070 if (disp2d->ops->is_enabled(dev_priv, disp2d) &&
7071 I915_READ(DPIO_CTL) & DPIO_CMNRST)
7072 return;
7073
7074 DRM_DEBUG_KMS("toggling display PHY side reset\n");
7075
7076 /* cmnlane needs DPLL registers */
7077 disp2d->ops->enable(dev_priv, disp2d);
7078
7079 /*
7080 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
7081 * Need to assert and de-assert PHY SB reset by gating the
7082 * common lane power, then un-gating it.
7083 * Simply ungating isn't enough to reset the PHY enough to get
7084 * ports and lanes running.
7085 */
7086 cmn->ops->disable(dev_priv, cmn);
7087}
7088
7089void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
7090{
7091 struct drm_device *dev = dev_priv->dev;
7092 struct i915_power_domains *power_domains = &dev_priv->power_domains;
7093
7094 power_domains->initializing = true;
7095
7096 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7097 mutex_lock(&power_domains->lock);
7098 vlv_cmnlane_wa(dev_priv);
7099 mutex_unlock(&power_domains->lock);
7100 }
7101
7102 /* For now, we need the power well to be always enabled. */
7103 intel_display_set_init_power(dev_priv, true);
7104 intel_power_domains_resume(dev_priv);
7105 power_domains->initializing = false;
7106}
7107
7108void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
7109{
7110 intel_runtime_pm_get(dev_priv);
7111}
7112
7113void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
7114{
7115 intel_runtime_pm_put(dev_priv);
7116}
7117
7118void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
7119{
7120 struct drm_device *dev = dev_priv->dev;
7121 struct device *device = &dev->pdev->dev;
7122
7123 if (!HAS_RUNTIME_PM(dev))
7124 return;
7125
7126 pm_runtime_get_sync(device);
7127 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
7128}
7129
7130void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
7131{
7132 struct drm_device *dev = dev_priv->dev;
7133 struct device *device = &dev->pdev->dev;
7134
7135 if (!HAS_RUNTIME_PM(dev))
7136 return;
7137
7138 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
7139 pm_runtime_get_noresume(device);
7140}
7141
7142void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
7143{
7144 struct drm_device *dev = dev_priv->dev;
7145 struct device *device = &dev->pdev->dev;
7146
7147 if (!HAS_RUNTIME_PM(dev))
7148 return;
7149
7150 pm_runtime_mark_last_busy(device);
7151 pm_runtime_put_autosuspend(device);
7152}
7153
7154void intel_init_runtime_pm(struct drm_i915_private *dev_priv)
7155{
7156 struct drm_device *dev = dev_priv->dev;
7157 struct device *device = &dev->pdev->dev;
7158
7159 if (!HAS_RUNTIME_PM(dev))
7160 return;
7161
7162 pm_runtime_set_active(device);
7163
7164 /*
7165 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
7166 * requirement.
7167 */
7168 if (!intel_enable_rc6(dev)) {
7169 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
7170 return;
7171 }
7172
7173 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
7174 pm_runtime_mark_last_busy(device);
7175 pm_runtime_use_autosuspend(device);
7176
7177 pm_runtime_put_autosuspend(device);
7178}
7179
7180void intel_fini_runtime_pm(struct drm_i915_private *dev_priv)
7181{
7182 struct drm_device *dev = dev_priv->dev;
7183 struct device *device = &dev->pdev->dev;
7184
7185 if (!HAS_RUNTIME_PM(dev))
7186 return;
7187
7188 if (!intel_enable_rc6(dev))
7189 return;
7190
7191 /* Make sure we're not suspended first. */
7192 pm_runtime_get_sync(device);
7193 pm_runtime_disable(device);
7194} 7073}
7195 7074
7196/* Set up chip specific power management-related functions */ 7075/* Set up chip specific power management-related functions */
@@ -7198,28 +7077,7 @@ void intel_init_pm(struct drm_device *dev)
7198{ 7077{
7199 struct drm_i915_private *dev_priv = dev->dev_private; 7078 struct drm_i915_private *dev_priv = dev->dev_private;
7200 7079
7201 if (HAS_FBC(dev)) { 7080 intel_init_fbc(dev_priv);
7202 if (INTEL_INFO(dev)->gen >= 7) {
7203 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7204 dev_priv->display.enable_fbc = gen7_enable_fbc;
7205 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7206 } else if (INTEL_INFO(dev)->gen >= 5) {
7207 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
7208 dev_priv->display.enable_fbc = ironlake_enable_fbc;
7209 dev_priv->display.disable_fbc = ironlake_disable_fbc;
7210 } else if (IS_GM45(dev)) {
7211 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
7212 dev_priv->display.enable_fbc = g4x_enable_fbc;
7213 dev_priv->display.disable_fbc = g4x_disable_fbc;
7214 } else {
7215 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
7216 dev_priv->display.enable_fbc = i8xx_enable_fbc;
7217 dev_priv->display.disable_fbc = i8xx_disable_fbc;
7218
7219 /* This value was pulled out of someone's hat */
7220 I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
7221 }
7222 }
7223 7081
7224 /* For cxsr */ 7082 /* For cxsr */
7225 if (IS_PINEVIEW(dev)) 7083 if (IS_PINEVIEW(dev))
@@ -7228,7 +7086,13 @@ void intel_init_pm(struct drm_device *dev)
7228 i915_ironlake_get_mem_freq(dev); 7086 i915_ironlake_get_mem_freq(dev);
7229 7087
7230 /* For FIFO watermark updates */ 7088 /* For FIFO watermark updates */
7231 if (HAS_PCH_SPLIT(dev)) { 7089 if (INTEL_INFO(dev)->gen >= 9) {
7090 skl_setup_wm_latency(dev);
7091
7092 dev_priv->display.init_clock_gating = gen9_init_clock_gating;
7093 dev_priv->display.update_wm = skl_update_wm;
7094 dev_priv->display.update_sprite_wm = skl_update_sprite_wm;
7095 } else if (HAS_PCH_SPLIT(dev)) {
7232 ilk_setup_wm_latency(dev); 7096 ilk_setup_wm_latency(dev);
7233 7097
7234 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] && 7098 if ((IS_GEN5(dev) && dev_priv->wm.pri_latency[1] &&
@@ -7309,7 +7173,7 @@ void intel_init_pm(struct drm_device *dev)
7309 } 7173 }
7310} 7174}
7311 7175
7312int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val) 7176int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7313{ 7177{
7314 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7178 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7315 7179
@@ -7319,6 +7183,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7319 } 7183 }
7320 7184
7321 I915_WRITE(GEN6_PCODE_DATA, *val); 7185 I915_WRITE(GEN6_PCODE_DATA, *val);
7186 I915_WRITE(GEN6_PCODE_DATA1, 0);
7322 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox); 7187 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
7323 7188
7324 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, 7189 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
@@ -7333,7 +7198,7 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val)
7333 return 0; 7198 return 0;
7334} 7199}
7335 7200
7336int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val) 7201int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val)
7337{ 7202{
7338 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7203 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7339 7204
@@ -7356,99 +7221,66 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)
7356 return 0; 7221 return 0;
7357} 7222}
7358 7223
7359static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7224static int vlv_gpu_freq_div(unsigned int czclk_freq)
7360{ 7225{
7361 int div; 7226 switch (czclk_freq) {
7362 7227 case 200:
7363 /* 4 x czclk */ 7228 return 10;
7364 switch (dev_priv->mem_freq) { 7229 case 267:
7365 case 800: 7230 return 12;
7366 div = 10; 7231 case 320:
7367 break; 7232 case 333:
7368 case 1066: 7233 return 16;
7369 div = 12; 7234 case 400:
7370 break; 7235 return 20;
7371 case 1333:
7372 div = 16;
7373 break;
7374 default: 7236 default:
7375 return -1; 7237 return -1;
7376 } 7238 }
7239}
7240
7241static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7242{
7243 int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7244
7245 div = vlv_gpu_freq_div(czclk_freq);
7246 if (div < 0)
7247 return div;
7377 7248
7378 return DIV_ROUND_CLOSEST(dev_priv->mem_freq * (val + 6 - 0xbd), 4 * div); 7249 return DIV_ROUND_CLOSEST(czclk_freq * (val + 6 - 0xbd), div);
7379} 7250}
7380 7251
7381static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7252static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val)
7382{ 7253{
7383 int mul; 7254 int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4);
7384 7255
7385 /* 4 x czclk */ 7256 mul = vlv_gpu_freq_div(czclk_freq);
7386 switch (dev_priv->mem_freq) { 7257 if (mul < 0)
7387 case 800: 7258 return mul;
7388 mul = 10;
7389 break;
7390 case 1066:
7391 mul = 12;
7392 break;
7393 case 1333:
7394 mul = 16;
7395 break;
7396 default:
7397 return -1;
7398 }
7399 7259
7400 return DIV_ROUND_CLOSEST(4 * mul * val, dev_priv->mem_freq) + 0xbd - 6; 7260 return DIV_ROUND_CLOSEST(mul * val, czclk_freq) + 0xbd - 6;
7401} 7261}
7402 7262
7403static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7263static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val)
7404{ 7264{
7405 int div, freq; 7265 int div, czclk_freq = dev_priv->rps.cz_freq;
7406 7266
7407 switch (dev_priv->rps.cz_freq) { 7267 div = vlv_gpu_freq_div(czclk_freq) / 2;
7408 case 200: 7268 if (div < 0)
7409 div = 5; 7269 return div;
7410 break;
7411 case 267:
7412 div = 6;
7413 break;
7414 case 320:
7415 case 333:
7416 case 400:
7417 div = 8;
7418 break;
7419 default:
7420 return -1;
7421 }
7422
7423 freq = (DIV_ROUND_CLOSEST((dev_priv->rps.cz_freq * val), 2 * div) / 2);
7424 7270
7425 return freq; 7271 return DIV_ROUND_CLOSEST(czclk_freq * val, 2 * div) / 2;
7426} 7272}
7427 7273
7428static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7274static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val)
7429{ 7275{
7430 int mul, opcode; 7276 int mul, czclk_freq = dev_priv->rps.cz_freq;
7431 7277
7432 switch (dev_priv->rps.cz_freq) { 7278 mul = vlv_gpu_freq_div(czclk_freq) / 2;
7433 case 200: 7279 if (mul < 0)
7434 mul = 5; 7280 return mul;
7435 break;
7436 case 267:
7437 mul = 6;
7438 break;
7439 case 320:
7440 case 333:
7441 case 400:
7442 mul = 8;
7443 break;
7444 default:
7445 return -1;
7446 }
7447 7281
7448 /* CHV needs even values */ 7282 /* CHV needs even values */
7449 opcode = (DIV_ROUND_CLOSEST((val * 2 * mul), dev_priv->rps.cz_freq) * 2); 7283 return DIV_ROUND_CLOSEST(val * 2 * mul, czclk_freq) * 2;
7450
7451 return opcode;
7452} 7284}
7453 7285
7454int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7286int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val)
@@ -7485,5 +7317,4 @@ void intel_pm_setup(struct drm_device *dev)
7485 intel_gen6_powersave_work); 7317 intel_gen6_powersave_work);
7486 7318
7487 dev_priv->pm.suspended = false; 7319 dev_priv->pm.suspended = false;
7488 dev_priv->pm._irqs_disabled = false;
7489} 7320}
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
new file mode 100644
index 000000000000..716b8a961eea
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -0,0 +1,481 @@
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24/**
25 * DOC: Panel Self Refresh (PSR/SRD)
26 *
27 * Since Haswell Display controller supports Panel Self-Refresh on display
28 * panels witch have a remote frame buffer (RFB) implemented according to PSR
29 * spec in eDP1.3. PSR feature allows the display to go to lower standby states
30 * when system is idle but display is on as it eliminates display refresh
31 * request to DDR memory completely as long as the frame buffer for that
32 * display is unchanged.
33 *
34 * Panel Self Refresh must be supported by both Hardware (source) and
35 * Panel (sink).
36 *
37 * PSR saves power by caching the framebuffer in the panel RFB, which allows us
38 * to power down the link and memory controller. For DSI panels the same idea
39 * is called "manual mode".
40 *
41 * The implementation uses the hardware-based PSR support which automatically
42 * enters/exits self-refresh mode. The hardware takes care of sending the
43 * required DP aux message and could even retrain the link (that part isn't
44 * enabled yet though). The hardware also keeps track of any frontbuffer
45 * changes to know when to exit self-refresh mode again. Unfortunately that
46 * part doesn't work too well, hence why the i915 PSR support uses the
47 * software frontbuffer tracking to make sure it doesn't miss a screen
48 * update. For this integration intel_psr_invalidate() and intel_psr_flush()
49 * get called by the frontbuffer tracking code. Note that because of locking
50 * issues the self-refresh re-enable code is done from a work queue, which
51 * must be correctly synchronized/cancelled when shutting down the pipe."
52 */
53
54#include <drm/drmP.h>
55
56#include "intel_drv.h"
57#include "i915_drv.h"
58
59static bool is_edp_psr(struct intel_dp *intel_dp)
60{
61 return intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
62}
63
64bool intel_psr_is_enabled(struct drm_device *dev)
65{
66 struct drm_i915_private *dev_priv = dev->dev_private;
67
68 if (!HAS_PSR(dev))
69 return false;
70
71 return I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE;
72}
73
74static void intel_psr_write_vsc(struct intel_dp *intel_dp,
75 struct edp_vsc_psr *vsc_psr)
76{
77 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
78 struct drm_device *dev = dig_port->base.base.dev;
79 struct drm_i915_private *dev_priv = dev->dev_private;
80 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
81 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
82 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
83 uint32_t *data = (uint32_t *) vsc_psr;
84 unsigned int i;
85
86 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
87 the video DIP being updated before program video DIP data buffer
88 registers for DIP being updated. */
89 I915_WRITE(ctl_reg, 0);
90 POSTING_READ(ctl_reg);
91
92 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
93 if (i < sizeof(struct edp_vsc_psr))
94 I915_WRITE(data_reg + i, *data++);
95 else
96 I915_WRITE(data_reg + i, 0);
97 }
98
99 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
100 POSTING_READ(ctl_reg);
101}
102
103static void intel_psr_setup_vsc(struct intel_dp *intel_dp)
104{
105 struct edp_vsc_psr psr_vsc;
106
107 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
108 memset(&psr_vsc, 0, sizeof(psr_vsc));
109 psr_vsc.sdp_header.HB0 = 0;
110 psr_vsc.sdp_header.HB1 = 0x7;
111 psr_vsc.sdp_header.HB2 = 0x2;
112 psr_vsc.sdp_header.HB3 = 0x8;
113 intel_psr_write_vsc(intel_dp, &psr_vsc);
114}
115
116static void intel_psr_enable_sink(struct intel_dp *intel_dp)
117{
118 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
119 struct drm_device *dev = dig_port->base.base.dev;
120 struct drm_i915_private *dev_priv = dev->dev_private;
121 uint32_t aux_clock_divider;
122 int precharge = 0x3;
123 bool only_standby = false;
124 static const uint8_t aux_msg[] = {
125 [0] = DP_AUX_NATIVE_WRITE << 4,
126 [1] = DP_SET_POWER >> 8,
127 [2] = DP_SET_POWER & 0xff,
128 [3] = 1 - 1,
129 [4] = DP_SET_POWER_D0,
130 };
131 int i;
132
133 BUILD_BUG_ON(sizeof(aux_msg) > 20);
134
135 aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
136
137 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
138 only_standby = true;
139
140 /* Enable PSR in sink */
141 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby)
142 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
143 DP_PSR_ENABLE & ~DP_PSR_MAIN_LINK_ACTIVE);
144 else
145 drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG,
146 DP_PSR_ENABLE | DP_PSR_MAIN_LINK_ACTIVE);
147
148 /* Setup AUX registers */
149 for (i = 0; i < sizeof(aux_msg); i += 4)
150 I915_WRITE(EDP_PSR_AUX_DATA1(dev) + i,
151 intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
152
153 I915_WRITE(EDP_PSR_AUX_CTL(dev),
154 DP_AUX_CH_CTL_TIME_OUT_400us |
155 (sizeof(aux_msg) << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
156 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
157 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
158}
159
160static void intel_psr_enable_source(struct intel_dp *intel_dp)
161{
162 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
163 struct drm_device *dev = dig_port->base.base.dev;
164 struct drm_i915_private *dev_priv = dev->dev_private;
165 uint32_t max_sleep_time = 0x1f;
166 uint32_t idle_frames = 1;
167 uint32_t val = 0x0;
168 const uint32_t link_entry_time = EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES;
169 bool only_standby = false;
170
171 if (IS_BROADWELL(dev) && dig_port->port != PORT_A)
172 only_standby = true;
173
174 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT || only_standby) {
175 val |= EDP_PSR_LINK_STANDBY;
176 val |= EDP_PSR_TP2_TP3_TIME_0us;
177 val |= EDP_PSR_TP1_TIME_0us;
178 val |= EDP_PSR_SKIP_AUX_EXIT;
179 val |= IS_BROADWELL(dev) ? BDW_PSR_SINGLE_FRAME : 0;
180 } else
181 val |= EDP_PSR_LINK_DISABLE;
182
183 I915_WRITE(EDP_PSR_CTL(dev), val |
184 (IS_BROADWELL(dev) ? 0 : link_entry_time) |
185 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
186 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
187 EDP_PSR_ENABLE);
188}
189
190static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
191{
192 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
193 struct drm_device *dev = dig_port->base.base.dev;
194 struct drm_i915_private *dev_priv = dev->dev_private;
195 struct drm_crtc *crtc = dig_port->base.base.crtc;
196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
197
198 lockdep_assert_held(&dev_priv->psr.lock);
199 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
200 WARN_ON(!drm_modeset_is_locked(&crtc->mutex));
201
202 dev_priv->psr.source_ok = false;
203
204 if (IS_HASWELL(dev) && dig_port->port != PORT_A) {
205 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
206 return false;
207 }
208
209 if (!i915.enable_psr) {
210 DRM_DEBUG_KMS("PSR disable by flag\n");
211 return false;
212 }
213
214 /* Below limitations aren't valid for Broadwell */
215 if (IS_BROADWELL(dev))
216 goto out;
217
218 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
219 S3D_ENABLE) {
220 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
221 return false;
222 }
223
224 if (intel_crtc->config.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
225 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
226 return false;
227 }
228
229 out:
230 dev_priv->psr.source_ok = true;
231 return true;
232}
233
234static void intel_psr_do_enable(struct intel_dp *intel_dp)
235{
236 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
237 struct drm_device *dev = intel_dig_port->base.base.dev;
238 struct drm_i915_private *dev_priv = dev->dev_private;
239
240 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
241 WARN_ON(dev_priv->psr.active);
242 lockdep_assert_held(&dev_priv->psr.lock);
243
244 /* Enable/Re-enable PSR on the host */
245 intel_psr_enable_source(intel_dp);
246
247 dev_priv->psr.active = true;
248}
249
250/**
251 * intel_psr_enable - Enable PSR
252 * @intel_dp: Intel DP
253 *
254 * This function can only be called after the pipe is fully trained and enabled.
255 */
256void intel_psr_enable(struct intel_dp *intel_dp)
257{
258 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
259 struct drm_device *dev = intel_dig_port->base.base.dev;
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 if (!HAS_PSR(dev)) {
263 DRM_DEBUG_KMS("PSR not supported on this platform\n");
264 return;
265 }
266
267 if (!is_edp_psr(intel_dp)) {
268 DRM_DEBUG_KMS("PSR not supported by this panel\n");
269 return;
270 }
271
272 mutex_lock(&dev_priv->psr.lock);
273 if (dev_priv->psr.enabled) {
274 DRM_DEBUG_KMS("PSR already in use\n");
275 goto unlock;
276 }
277
278 if (!intel_psr_match_conditions(intel_dp))
279 goto unlock;
280
281 dev_priv->psr.busy_frontbuffer_bits = 0;
282
283 intel_psr_setup_vsc(intel_dp);
284
285 /* Avoid continuous PSR exit by masking memup and hpd */
286 I915_WRITE(EDP_PSR_DEBUG_CTL(dev), EDP_PSR_DEBUG_MASK_MEMUP |
287 EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
288
289 /* Enable PSR on the panel */
290 intel_psr_enable_sink(intel_dp);
291
292 dev_priv->psr.enabled = intel_dp;
293unlock:
294 mutex_unlock(&dev_priv->psr.lock);
295}
296
297/**
298 * intel_psr_disable - Disable PSR
299 * @intel_dp: Intel DP
300 *
301 * This function needs to be called before disabling pipe.
302 */
303void intel_psr_disable(struct intel_dp *intel_dp)
304{
305 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
306 struct drm_device *dev = intel_dig_port->base.base.dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
308
309 mutex_lock(&dev_priv->psr.lock);
310 if (!dev_priv->psr.enabled) {
311 mutex_unlock(&dev_priv->psr.lock);
312 return;
313 }
314
315 if (dev_priv->psr.active) {
316 I915_WRITE(EDP_PSR_CTL(dev),
317 I915_READ(EDP_PSR_CTL(dev)) & ~EDP_PSR_ENABLE);
318
319 /* Wait till PSR is idle */
320 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev)) &
321 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
322 DRM_ERROR("Timed out waiting for PSR Idle State\n");
323
324 dev_priv->psr.active = false;
325 } else {
326 WARN_ON(I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE);
327 }
328
329 dev_priv->psr.enabled = NULL;
330 mutex_unlock(&dev_priv->psr.lock);
331
332 cancel_delayed_work_sync(&dev_priv->psr.work);
333}
334
335static void intel_psr_work(struct work_struct *work)
336{
337 struct drm_i915_private *dev_priv =
338 container_of(work, typeof(*dev_priv), psr.work.work);
339 struct intel_dp *intel_dp = dev_priv->psr.enabled;
340
341 /* We have to make sure PSR is ready for re-enable
342 * otherwise it keeps disabled until next full enable/disable cycle.
343 * PSR might take some time to get fully disabled
344 * and be ready for re-enable.
345 */
346 if (wait_for((I915_READ(EDP_PSR_STATUS_CTL(dev_priv->dev)) &
347 EDP_PSR_STATUS_STATE_MASK) == 0, 50)) {
348 DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
349 return;
350 }
351
352 mutex_lock(&dev_priv->psr.lock);
353 intel_dp = dev_priv->psr.enabled;
354
355 if (!intel_dp)
356 goto unlock;
357
358 /*
359 * The delayed work can race with an invalidate hence we need to
360 * recheck. Since psr_flush first clears this and then reschedules we
361 * won't ever miss a flush when bailing out here.
362 */
363 if (dev_priv->psr.busy_frontbuffer_bits)
364 goto unlock;
365
366 intel_psr_do_enable(intel_dp);
367unlock:
368 mutex_unlock(&dev_priv->psr.lock);
369}
370
371static void intel_psr_exit(struct drm_device *dev)
372{
373 struct drm_i915_private *dev_priv = dev->dev_private;
374
375 if (dev_priv->psr.active) {
376 u32 val = I915_READ(EDP_PSR_CTL(dev));
377
378 WARN_ON(!(val & EDP_PSR_ENABLE));
379
380 I915_WRITE(EDP_PSR_CTL(dev), val & ~EDP_PSR_ENABLE);
381
382 dev_priv->psr.active = false;
383 }
384
385}
386
387/**
388 * intel_psr_invalidate - Invalidade PSR
389 * @dev: DRM device
390 * @frontbuffer_bits: frontbuffer plane tracking bits
391 *
392 * Since the hardware frontbuffer tracking has gaps we need to integrate
393 * with the software frontbuffer tracking. This function gets called every
394 * time frontbuffer rendering starts and a buffer gets dirtied. PSR must be
395 * disabled if the frontbuffer mask contains a buffer relevant to PSR.
396 *
397 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
398 */
399void intel_psr_invalidate(struct drm_device *dev,
400 unsigned frontbuffer_bits)
401{
402 struct drm_i915_private *dev_priv = dev->dev_private;
403 struct drm_crtc *crtc;
404 enum pipe pipe;
405
406 mutex_lock(&dev_priv->psr.lock);
407 if (!dev_priv->psr.enabled) {
408 mutex_unlock(&dev_priv->psr.lock);
409 return;
410 }
411
412 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
413 pipe = to_intel_crtc(crtc)->pipe;
414
415 intel_psr_exit(dev);
416
417 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
418
419 dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
420 mutex_unlock(&dev_priv->psr.lock);
421}
422
423/**
424 * intel_psr_flush - Flush PSR
425 * @dev: DRM device
426 * @frontbuffer_bits: frontbuffer plane tracking bits
427 *
428 * Since the hardware frontbuffer tracking has gaps we need to integrate
429 * with the software frontbuffer tracking. This function gets called every
430 * time frontbuffer rendering has completed and flushed out to memory. PSR
431 * can be enabled again if no other frontbuffer relevant to PSR is dirty.
432 *
433 * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
434 */
435void intel_psr_flush(struct drm_device *dev,
436 unsigned frontbuffer_bits)
437{
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 struct drm_crtc *crtc;
440 enum pipe pipe;
441
442 mutex_lock(&dev_priv->psr.lock);
443 if (!dev_priv->psr.enabled) {
444 mutex_unlock(&dev_priv->psr.lock);
445 return;
446 }
447
448 crtc = dp_to_dig_port(dev_priv->psr.enabled)->base.base.crtc;
449 pipe = to_intel_crtc(crtc)->pipe;
450 dev_priv->psr.busy_frontbuffer_bits &= ~frontbuffer_bits;
451
452 /*
453 * On Haswell sprite plane updates don't result in a psr invalidating
454 * signal in the hardware. Which means we need to manually fake this in
455 * software for all flushes, not just when we've seen a preceding
456 * invalidation through frontbuffer rendering.
457 */
458 if (IS_HASWELL(dev) &&
459 (frontbuffer_bits & INTEL_FRONTBUFFER_SPRITE(pipe)))
460 intel_psr_exit(dev);
461
462 if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
463 schedule_delayed_work(&dev_priv->psr.work,
464 msecs_to_jiffies(100));
465 mutex_unlock(&dev_priv->psr.lock);
466}
467
468/**
469 * intel_psr_init - Init basic PSR work and mutex.
470 * @dev: DRM device
471 *
472 * This function is called only once at driver load to initialize basic
473 * PSR stuff.
474 */
475void intel_psr_init(struct drm_device *dev)
476{
477 struct drm_i915_private *dev_priv = dev->dev_private;
478
479 INIT_DELAYED_WORK(&dev_priv->psr.work, intel_psr_work);
480 mutex_init(&dev_priv->psr.lock);
481}
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 6c792d3a9c9c..5bd69852752c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -29,6 +29,7 @@
29extern const struct intel_renderstate_rodata gen6_null_state; 29extern const struct intel_renderstate_rodata gen6_null_state;
30extern const struct intel_renderstate_rodata gen7_null_state; 30extern const struct intel_renderstate_rodata gen7_null_state;
31extern const struct intel_renderstate_rodata gen8_null_state; 31extern const struct intel_renderstate_rodata gen8_null_state;
32extern const struct intel_renderstate_rodata gen9_null_state;
32 33
33#define RO_RENDERSTATE(_g) \ 34#define RO_RENDERSTATE(_g) \
34 const struct intel_renderstate_rodata gen ## _g ## _null_state = { \ 35 const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen8.c b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
index 75ef1b5de45c..78011d73fa9f 100644
--- a/drivers/gpu/drm/i915/intel_renderstate_gen8.c
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen8.c
@@ -1,16 +1,134 @@
1#include "intel_renderstate.h" 1#include "intel_renderstate.h"
2 2
3static const u32 gen8_null_state_relocs[] = { 3static const u32 gen8_null_state_relocs[] = {
4 0x00000048, 4 0x00000798,
5 0x00000050, 5 0x000007a4,
6 0x00000060, 6 0x000007ac,
7 0x000003ec, 7 0x000007bc,
8 -1, 8 -1,
9}; 9};
10 10
11static const u32 gen8_null_state_batch[] = { 11static const u32 gen8_null_state_batch[] = {
12 0x7a000004,
13 0x01000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
12 0x69040000, 18 0x69040000,
13 0x61020001, 19 0x78140000,
20 0x04000000,
21 0x7820000a,
22 0x00000000,
23 0x00000000,
24 0x80000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x78130002,
34 0x00000000,
35 0x00000000,
36 0x02001808,
37 0x781f0002,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x78510009,
42 0x00000000,
43 0x00000000,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x78100007,
53 0x00000000,
54 0x00000000,
55 0x00010000,
56 0x00000000,
57 0x00000000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x781b0007,
62 0x00000000,
63 0x00000000,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000800,
69 0x00000000,
70 0x78110008,
71 0x00000000,
72 0x00000000,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x781e0003,
81 0x00000000,
82 0x00000000,
83 0x00000000,
84 0x00000000,
85 0x781d0007,
86 0x00000000,
87 0x00000000,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x78120002,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x78500003,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x00000000,
103 0x781c0002,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x780c0000,
108 0x00000000,
109 0x78520003,
110 0x00000000,
111 0x00000000,
112 0x00000000,
113 0x00000000,
114 0x78300000,
115 0x08010040,
116 0x78310000,
117 0x1e000000,
118 0x78320000,
119 0x1e000000,
120 0x78330000,
121 0x1e000000,
122 0x79190002,
123 0x00000000,
124 0x00000000,
125 0x00000000,
126 0x791a0002,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x791b0002,
131 0x00000000,
14 0x00000000, 132 0x00000000,
15 0x00000000, 133 0x00000000,
16 0x79120000, 134 0x79120000,
@@ -23,48 +141,435 @@ static const u32 gen8_null_state_batch[] = {
23 0x00000000, 141 0x00000000,
24 0x79160000, 142 0x79160000,
25 0x00000000, 143 0x00000000,
26 0x6101000e, 144 0x78150009,
27 0x00000001,
28 0x00000000, 145 0x00000000,
29 0x00000001, 146 0x00000000,
147 0x00000000,
148 0x00000000,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x78190009,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x00000000,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x781a0009,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x00000000,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x78160009,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x00000000,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x78170009,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x00000000,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x78490001,
200 0x00000000,
201 0x00000000,
202 0x784a0000,
203 0x00000000,
204 0x784b0000,
205 0x00000004,
206 0x79170101,
207 0x00000000,
208 0x00000080,
209 0x00000000,
210 0x00000000,
211 0x00000000,
212 0x00000000,
213 0x00000000,
214 0x00000000,
215 0x00000000,
216 0x00000000,
217 0x00000000,
218 0x00000000,
219 0x00000000,
220 0x00000000,
221 0x00000000,
222 0x00000000,
223 0x00000000,
224 0x00000000,
225 0x00000000,
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x79180006,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x00000000,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x79180006,
474 0x20000000,
475 0x00000000,
476 0x00000000,
477 0x00000000,
478 0x00000000,
479 0x00000000,
480 0x00000000,
481 0x79180006,
482 0x40000000,
483 0x00000000,
484 0x00000000,
485 0x00000000,
486 0x00000000,
487 0x00000000,
488 0x00000000,
489 0x79180006,
490 0x60000000,
491 0x00000000,
492 0x00000000,
493 0x00000000,
494 0x00000000,
495 0x00000000,
496 0x00000000,
497 0x6101000e,
30 0x00000001, /* reloc */ 498 0x00000001, /* reloc */
31 0x00000000, 499 0x00000000,
500 0x00000000,
32 0x00000001, /* reloc */ 501 0x00000001, /* reloc */
33 0x00000000, 502 0x00000000,
503 0x00000001, /* reloc */
34 0x00000000, 504 0x00000000,
505 0x00000001,
35 0x00000000, 506 0x00000000,
36 0x00000001, /* reloc */ 507 0x00000001, /* reloc */
37 0x00000000, 508 0x00000000,
38 0xfffff001,
39 0x00001001, 509 0x00001001,
40 0xfffff001,
41 0x00001001, 510 0x00001001,
42 0x78230000, 511 0x00000001,
43 0x000006e0, 512 0x00001001,
44 0x78210000, 513 0x61020001,
45 0x00000700, 514 0x00000000,
46 0x78300000, 515 0x00000000,
47 0x08010040, 516 0x79000002,
48 0x78330000, 517 0x00000000,
49 0x08000000, 518 0x00000000,
50 0x78310000, 519 0x00000000,
51 0x08000000, 520 0x78050006,
52 0x78320000, 521 0x00000000,
53 0x08000000, 522 0x00000000,
54 0x78240000, 523 0x00000000,
55 0x00000641, 524 0x00000000,
56 0x780e0000, 525 0x00000000,
57 0x00000601, 526 0x00000000,
527 0x00000000,
528 0x79040002,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x79040002,
533 0x40000000,
534 0x00000000,
535 0x00000000,
536 0x79040002,
537 0x80000000,
538 0x00000000,
539 0x00000000,
540 0x79040002,
541 0xc0000000,
542 0x00000000,
543 0x00000000,
544 0x79080001,
545 0x00000000,
546 0x00000000,
547 0x790a0001,
548 0x00000000,
549 0x00000000,
550 0x78060003,
551 0x00000000,
552 0x00000000,
553 0x00000000,
554 0x00000000,
555 0x78070003,
556 0x00000000,
557 0x00000000,
558 0x00000000,
559 0x00000000,
560 0x78040001,
561 0x00000000,
562 0x00000000,
563 0x79110000,
564 0x00000000,
58 0x780d0000, 565 0x780d0000,
59 0x00000000, 566 0x00000000,
60 0x78180000, 567 0x79060000,
61 0x00000001,
62 0x78520003,
63 0x00000000, 568 0x00000000,
569 0x7907001f,
64 0x00000000, 570 0x00000000,
65 0x00000000, 571 0x00000000,
66 0x00000000, 572 0x00000000,
67 0x78190009,
68 0x00000000, 573 0x00000000,
69 0x00000000, 574 0x00000000,
70 0x00000000, 575 0x00000000,
@@ -75,7 +580,6 @@ static const u32 gen8_null_state_batch[] = {
75 0x00000000, 580 0x00000000,
76 0x00000000, 581 0x00000000,
77 0x00000000, 582 0x00000000,
78 0x781b0007,
79 0x00000000, 583 0x00000000,
80 0x00000000, 584 0x00000000,
81 0x00000000, 585 0x00000000,
@@ -84,26 +588,22 @@ static const u32 gen8_null_state_batch[] = {
84 0x00000000, 588 0x00000000,
85 0x00000000, 589 0x00000000,
86 0x00000000, 590 0x00000000,
87 0x78270000,
88 0x00000000, 591 0x00000000,
89 0x782c0000,
90 0x00000000, 592 0x00000000,
91 0x781c0002,
92 0x00000000, 593 0x00000000,
93 0x00000000, 594 0x00000000,
94 0x00000000, 595 0x00000000,
95 0x78160009,
96 0x00000000, 596 0x00000000,
97 0x00000000, 597 0x00000000,
98 0x00000000, 598 0x00000000,
99 0x00000000, 599 0x00000000,
100 0x00000000, 600 0x00000000,
101 0x00000000, 601 0x00000000,
602 0x7902000f,
102 0x00000000, 603 0x00000000,
103 0x00000000, 604 0x00000000,
104 0x00000000, 605 0x00000000,
105 0x00000000, 606 0x00000000,
106 0x78110008,
107 0x00000000, 607 0x00000000,
108 0x00000000, 608 0x00000000,
109 0x00000000, 609 0x00000000,
@@ -113,12 +613,10 @@ static const u32 gen8_null_state_batch[] = {
113 0x00000000, 613 0x00000000,
114 0x00000000, 614 0x00000000,
115 0x00000000, 615 0x00000000,
116 0x78290000,
117 0x00000000, 616 0x00000000,
118 0x782e0000,
119 0x00000000, 617 0x00000000,
120 0x781a0009,
121 0x00000000, 618 0x00000000,
619 0x790c000f,
122 0x00000000, 620 0x00000000,
123 0x00000000, 621 0x00000000,
124 0x00000000, 622 0x00000000,
@@ -128,7 +626,6 @@ static const u32 gen8_null_state_batch[] = {
128 0x00000000, 626 0x00000000,
129 0x00000000, 627 0x00000000,
130 0x00000000, 628 0x00000000,
131 0x781d0007,
132 0x00000000, 629 0x00000000,
133 0x00000000, 630 0x00000000,
134 0x00000000, 631 0x00000000,
@@ -136,153 +633,153 @@ static const u32 gen8_null_state_batch[] = {
136 0x00000000, 633 0x00000000,
137 0x00000000, 634 0x00000000,
138 0x00000000, 635 0x00000000,
636 0x780a0003,
139 0x00000000, 637 0x00000000,
140 0x78280000,
141 0x00000000, 638 0x00000000,
142 0x782d0000,
143 0x00000000, 639 0x00000000,
144 0x78260000,
145 0x00000000, 640 0x00000000,
146 0x782b0000, 641 0x78080083,
642 0x00004000,
147 0x00000000, 643 0x00000000,
148 0x78150009,
149 0x00000000, 644 0x00000000,
150 0x00000000, 645 0x00000000,
646 0x04004000,
151 0x00000000, 647 0x00000000,
152 0x00000000, 648 0x00000000,
153 0x00000000, 649 0x00000000,
650 0x08004000,
154 0x00000000, 651 0x00000000,
155 0x00000000, 652 0x00000000,
156 0x00000000, 653 0x00000000,
654 0x0c004000,
157 0x00000000, 655 0x00000000,
158 0x00000000, 656 0x00000000,
159 0x78100007,
160 0x00000000, 657 0x00000000,
658 0x10004000,
161 0x00000000, 659 0x00000000,
162 0x00000000, 660 0x00000000,
163 0x00000000, 661 0x00000000,
662 0x14004000,
164 0x00000000, 663 0x00000000,
165 0x00000000, 664 0x00000000,
166 0x00000000, 665 0x00000000,
666 0x18004000,
167 0x00000000, 667 0x00000000,
168 0x781e0003,
169 0x00000000, 668 0x00000000,
170 0x00000000, 669 0x00000000,
670 0x1c004000,
171 0x00000000, 671 0x00000000,
172 0x00000000, 672 0x00000000,
173 0x78120002,
174 0x00000000, 673 0x00000000,
674 0x20004000,
175 0x00000000, 675 0x00000000,
176 0x00000000, 676 0x00000000,
177 0x781f0002,
178 0x30400820,
179 0x00000000, 677 0x00000000,
678 0x24004000,
180 0x00000000, 679 0x00000000,
181 0x78510009,
182 0x00000000, 680 0x00000000,
183 0x00000000, 681 0x00000000,
682 0x28004000,
184 0x00000000, 683 0x00000000,
185 0x00000000, 684 0x00000000,
186 0x00000000, 685 0x00000000,
686 0x2c004000,
187 0x00000000, 687 0x00000000,
188 0x00000000, 688 0x00000000,
189 0x00000000, 689 0x00000000,
690 0x30004000,
190 0x00000000, 691 0x00000000,
191 0x00000000, 692 0x00000000,
192 0x78500003,
193 0x00210000,
194 0x00000000, 693 0x00000000,
694 0x34004000,
195 0x00000000, 695 0x00000000,
196 0x00000000, 696 0x00000000,
197 0x78130002,
198 0x00000000, 697 0x00000000,
698 0x38004000,
199 0x00000000, 699 0x00000000,
200 0x00000000, 700 0x00000000,
201 0x782a0000,
202 0x00000480,
203 0x782f0000,
204 0x00000540,
205 0x78140000,
206 0x00000800,
207 0x78170009,
208 0x00000000, 701 0x00000000,
702 0x3c004000,
209 0x00000000, 703 0x00000000,
210 0x00000000, 704 0x00000000,
211 0x00000000, 705 0x00000000,
706 0x40004000,
212 0x00000000, 707 0x00000000,
213 0x00000000, 708 0x00000000,
214 0x00000000, 709 0x00000000,
710 0x44004000,
215 0x00000000, 711 0x00000000,
216 0x00000000, 712 0x00000000,
217 0x00000000, 713 0x00000000,
218 0x7820000a, 714 0x48004000,
219 0x00000580,
220 0x00000000, 715 0x00000000,
221 0x08080000,
222 0x00000000, 716 0x00000000,
223 0x00000000, 717 0x00000000,
224 0x1f000002, 718 0x4c004000,
225 0x00060000,
226 0x00000000, 719 0x00000000,
227 0x00000000, 720 0x00000000,
228 0x00000000, 721 0x00000000,
722 0x50004000,
229 0x00000000, 723 0x00000000,
230 0x784d0000,
231 0x40000000,
232 0x784f0000,
233 0x80000100,
234 0x780f0000,
235 0x00000740,
236 0x78050006,
237 0x00000000, 724 0x00000000,
238 0x00000000, 725 0x00000000,
726 0x54004000,
239 0x00000000, 727 0x00000000,
240 0x00000000, 728 0x00000000,
241 0x00000000, 729 0x00000000,
730 0x58004000,
242 0x00000000, 731 0x00000000,
243 0x00000000, 732 0x00000000,
244 0x78070003,
245 0x00000000, 733 0x00000000,
734 0x5c004000,
246 0x00000000, 735 0x00000000,
247 0x00000000, 736 0x00000000,
248 0x00000000, 737 0x00000000,
249 0x78060003, 738 0x60004000,
250 0x00000000, 739 0x00000000,
251 0x00000000, 740 0x00000000,
252 0x00000000, 741 0x00000000,
742 0x64004000,
253 0x00000000, 743 0x00000000,
254 0x78040001,
255 0x00000000, 744 0x00000000,
256 0x00000001, 745 0x00000000,
257 0x79000002, 746 0x68004000,
258 0xffffffff, 747 0x00000000,
748 0x00000000,
749 0x00000000,
750 0x6c004000,
751 0x00000000,
259 0x00000000, 752 0x00000000,
260 0x00000000, 753 0x00000000,
261 0x78080003, 754 0x70004000,
262 0x00006000,
263 0x000005e0, /* reloc */
264 0x00000000, 755 0x00000000,
265 0x00000000, 756 0x00000000,
266 0x78090005, 757 0x00000000,
758 0x74004000,
759 0x00000000,
760 0x00000000,
761 0x00000000,
762 0x78004000,
763 0x00000000,
764 0x00000000,
765 0x00000000,
766 0x7c004000,
767 0x00000000,
768 0x00000000,
769 0x00000000,
770 0x80004000,
771 0x00000000,
772 0x00000000,
773 0x00000000,
774 0x78090043,
267 0x02000000, 775 0x02000000,
268 0x22220000, 776 0x22220000,
269 0x02f60000,
270 0x11230000,
271 0x02850004,
272 0x11230000,
273 0x784b0000,
274 0x0000000f,
275 0x78490001,
276 0x00000000, 777 0x00000000,
277 0x00000000, 778 0x00000000,
278 0x7b000005,
279 0x00000000, 779 0x00000000,
280 0x00000003,
281 0x00000000, 780 0x00000000,
282 0x00000001,
283 0x00000000, 781 0x00000000,
284 0x00000000, 782 0x00000000,
285 0x05000000, /* cmds end */
286 0x00000000, 783 0x00000000,
287 0x00000000, 784 0x00000000,
288 0x00000000, 785 0x00000000,
@@ -297,8 +794,6 @@ static const u32 gen8_null_state_batch[] = {
297 0x00000000, 794 0x00000000,
298 0x00000000, 795 0x00000000,
299 0x00000000, 796 0x00000000,
300 0x000004c0, /* state start */
301 0x00000500,
302 0x00000000, 797 0x00000000,
303 0x00000000, 798 0x00000000,
304 0x00000000, 799 0x00000000,
@@ -345,46 +840,65 @@ static const u32 gen8_null_state_batch[] = {
345 0x00000000, 840 0x00000000,
346 0x00000000, 841 0x00000000,
347 0x00000000, 842 0x00000000,
843 0x680b0001,
844 0x78260000,
845 0x00000000,
846 0x78270000,
847 0x00000000,
848 0x78280000,
849 0x00000000,
850 0x78290000,
851 0x00000000,
852 0x782a0000,
853 0x00000000,
854 0x780e0000,
855 0x00000dc1,
856 0x78240000,
857 0x00000e01,
858 0x784f0000,
859 0x80000100,
860 0x784d0000,
861 0x40000000,
862 0x782b0000,
863 0x00000000,
864 0x782c0000,
865 0x00000000,
866 0x782d0000,
348 0x00000000, 867 0x00000000,
868 0x782e0000,
349 0x00000000, 869 0x00000000,
870 0x782f0000,
350 0x00000000, 871 0x00000000,
351 0x00000092, 872 0x780f0000,
352 0x00000000, 873 0x00000000,
874 0x78230000,
875 0x00000e60,
876 0x78210000,
877 0x00000e80,
878 0x7b000005,
879 0x00000004,
880 0x00000001,
353 0x00000000, 881 0x00000000,
882 0x00000001,
354 0x00000000, 883 0x00000000,
355 0x00000000, 884 0x00000000,
885 0x05000000, /* cmds end */
356 0x00000000, 886 0x00000000,
357 0x00000000, 887 0x00000000,
358 0x00000000, 888 0x00000000,
359 0x00000000, 889 0x00000000,
360 0x00000000, 890 0x00000000,
361 0x00000000, 891 0x00000000,
892 0x00000000, /* state start */
893 0x00000000,
894 0x3f800000,
895 0x3f800000,
896 0x3f800000,
897 0x3f800000,
898 0x00000000,
899 0x00000000,
362 0x00000000, 900 0x00000000,
363 0x00000000, 901 0x00000000,
364 0x0060005a,
365 0x21403ae8,
366 0x3a0000c0,
367 0x008d0040,
368 0x0060005a,
369 0x21603ae8,
370 0x3a0000c0,
371 0x008d0080,
372 0x0060005a,
373 0x21803ae8,
374 0x3a0000d0,
375 0x008d0040,
376 0x0060005a,
377 0x21a03ae8,
378 0x3a0000d0,
379 0x008d0080,
380 0x02800031,
381 0x2e0022e8,
382 0x0e000140,
383 0x08840001,
384 0x05800031,
385 0x200022e0,
386 0x0e000e00,
387 0x90031000,
388 0x00000000, 902 0x00000000,
389 0x00000000, 903 0x00000000,
390 0x00000000, 904 0x00000000,
@@ -410,38 +924,6 @@ static const u32 gen8_null_state_batch[] = {
410 0x00000000, 924 0x00000000,
411 0x00000000, 925 0x00000000,
412 0x00000000, 926 0x00000000,
413 0x06200000,
414 0x00000002,
415 0x06200000,
416 0x00000002,
417 0x06200000,
418 0x00000002,
419 0x06200000,
420 0x00000002,
421 0x06200000,
422 0x00000002,
423 0x06200000,
424 0x00000002,
425 0x06200000,
426 0x00000002,
427 0x06200000,
428 0x00000002,
429 0x06200000,
430 0x00000002,
431 0x06200000,
432 0x00000002,
433 0x06200000,
434 0x00000002,
435 0x06200000,
436 0x00000002,
437 0x06200000,
438 0x00000002,
439 0x06200000,
440 0x00000002,
441 0x06200000,
442 0x00000002,
443 0x06200000,
444 0x00000002,
445 0x00000000, 927 0x00000000,
446 0x00000000, 928 0x00000000,
447 0x00000000, 929 0x00000000,
@@ -449,8 +931,6 @@ static const u32 gen8_null_state_batch[] = {
449 0x00000000, 931 0x00000000,
450 0x00000000, 932 0x00000000,
451 0x00000000, 933 0x00000000,
452 0xf99a130c,
453 0x799a130c,
454 0x00000000, 934 0x00000000,
455 0x00000000, 935 0x00000000,
456 0x00000000, 936 0x00000000,
@@ -466,9 +946,7 @@ static const u32 gen8_null_state_batch[] = {
466 0x00000000, 946 0x00000000,
467 0x00000000, 947 0x00000000,
468 0x00000000, 948 0x00000000,
469 0x3f800000,
470 0x00000000, 949 0x00000000,
471 0x3f800000,
472 0x00000000, 950 0x00000000,
473 0x00000000, 951 0x00000000,
474 0x00000000, 952 0x00000000,
diff --git a/drivers/gpu/drm/i915/intel_renderstate_gen9.c b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
new file mode 100644
index 000000000000..875075373807
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_renderstate_gen9.c
@@ -0,0 +1,974 @@
1#include "intel_renderstate.h"
2
3static const u32 gen9_null_state_relocs[] = {
4 0x000007a8,
5 0x000007b4,
6 0x000007bc,
7 0x000007cc,
8 -1,
9};
10
11static const u32 gen9_null_state_batch[] = {
12 0x7a000004,
13 0x01000000,
14 0x00000000,
15 0x00000000,
16 0x00000000,
17 0x00000000,
18 0x69040300,
19 0x78140000,
20 0x04000000,
21 0x7820000a,
22 0x00000000,
23 0x00000000,
24 0x80000000,
25 0x00000000,
26 0x00000000,
27 0x00000000,
28 0x00000000,
29 0x00000000,
30 0x00000000,
31 0x00000000,
32 0x00000000,
33 0x78130002,
34 0x00000000,
35 0x00000000,
36 0x02001808,
37 0x781f0004,
38 0x00000000,
39 0x00000000,
40 0x00000000,
41 0x00000000,
42 0x00000000,
43 0x78510009,
44 0x00000000,
45 0x00000000,
46 0x00000000,
47 0x00000000,
48 0x00000000,
49 0x00000000,
50 0x00000000,
51 0x00000000,
52 0x00000000,
53 0x00000000,
54 0x78100007,
55 0x00000000,
56 0x00000000,
57 0x00010000,
58 0x00000000,
59 0x00000000,
60 0x00000000,
61 0x00000000,
62 0x00000000,
63 0x781b0007,
64 0x00000000,
65 0x00000000,
66 0x00000000,
67 0x00000000,
68 0x00000000,
69 0x00000000,
70 0x00000800,
71 0x00000000,
72 0x78110008,
73 0x00000000,
74 0x00000000,
75 0x00000000,
76 0x00000000,
77 0x00000000,
78 0x00000000,
79 0x00000000,
80 0x00000000,
81 0x00000000,
82 0x781e0003,
83 0x00000000,
84 0x00000000,
85 0x00000000,
86 0x00000000,
87 0x781d0009,
88 0x00000000,
89 0x00000000,
90 0x00000000,
91 0x00000000,
92 0x00000000,
93 0x00000000,
94 0x00000000,
95 0x00000000,
96 0x00000000,
97 0x00000000,
98 0x78120002,
99 0x00000000,
100 0x00000000,
101 0x00000000,
102 0x78500003,
103 0x00000000,
104 0x00000000,
105 0x00000000,
106 0x00000000,
107 0x781c0002,
108 0x00000000,
109 0x00000000,
110 0x00000000,
111 0x780c0000,
112 0x00000000,
113 0x78520003,
114 0x00000000,
115 0x00000000,
116 0x00000000,
117 0x00000000,
118 0x78300000,
119 0x08010040,
120 0x78310000,
121 0x1e000000,
122 0x78320000,
123 0x1e000000,
124 0x78330000,
125 0x1e000000,
126 0x79190002,
127 0x00000000,
128 0x00000000,
129 0x00000000,
130 0x791a0002,
131 0x00000000,
132 0x00000000,
133 0x00000000,
134 0x791b0002,
135 0x00000000,
136 0x00000000,
137 0x00000000,
138 0x79120000,
139 0x00000000,
140 0x79130000,
141 0x00000000,
142 0x79140000,
143 0x00000000,
144 0x79150000,
145 0x00000000,
146 0x79160000,
147 0x00000000,
148 0x78150009,
149 0x00000000,
150 0x00000000,
151 0x00000000,
152 0x00000000,
153 0x00000000,
154 0x00000000,
155 0x00000000,
156 0x00000000,
157 0x00000000,
158 0x00000000,
159 0x78190009,
160 0x00000000,
161 0x00000000,
162 0x00000000,
163 0x00000000,
164 0x00000000,
165 0x00000000,
166 0x00000000,
167 0x00000000,
168 0x00000000,
169 0x00000000,
170 0x781a0009,
171 0x00000000,
172 0x00000000,
173 0x00000000,
174 0x00000000,
175 0x00000000,
176 0x00000000,
177 0x00000000,
178 0x00000000,
179 0x00000000,
180 0x00000000,
181 0x78160009,
182 0x00000000,
183 0x00000000,
184 0x00000000,
185 0x00000000,
186 0x00000000,
187 0x00000000,
188 0x00000000,
189 0x00000000,
190 0x00000000,
191 0x00000000,
192 0x78170009,
193 0x00000000,
194 0x00000000,
195 0x00000000,
196 0x00000000,
197 0x00000000,
198 0x00000000,
199 0x00000000,
200 0x00000000,
201 0x00000000,
202 0x00000000,
203 0x78490001,
204 0x00000000,
205 0x00000000,
206 0x784a0000,
207 0x00000000,
208 0x784b0000,
209 0x00000004,
210 0x79170101,
211 0x00000000,
212 0x00000080,
213 0x00000000,
214 0x00000000,
215 0x00000000,
216 0x00000000,
217 0x00000000,
218 0x00000000,
219 0x00000000,
220 0x00000000,
221 0x00000000,
222 0x00000000,
223 0x00000000,
224 0x00000000,
225 0x00000000,
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000,
237 0x00000000,
238 0x00000000,
239 0x00000000,
240 0x00000000,
241 0x00000000,
242 0x00000000,
243 0x00000000,
244 0x00000000,
245 0x00000000,
246 0x00000000,
247 0x00000000,
248 0x00000000,
249 0x00000000,
250 0x00000000,
251 0x00000000,
252 0x00000000,
253 0x00000000,
254 0x00000000,
255 0x00000000,
256 0x00000000,
257 0x00000000,
258 0x00000000,
259 0x00000000,
260 0x00000000,
261 0x00000000,
262 0x00000000,
263 0x00000000,
264 0x00000000,
265 0x00000000,
266 0x00000000,
267 0x00000000,
268 0x00000000,
269 0x00000000,
270 0x00000000,
271 0x00000000,
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277 0x00000000,
278 0x00000000,
279 0x00000000,
280 0x00000000,
281 0x00000000,
282 0x00000000,
283 0x00000000,
284 0x00000000,
285 0x00000000,
286 0x00000000,
287 0x00000000,
288 0x00000000,
289 0x00000000,
290 0x00000000,
291 0x00000000,
292 0x00000000,
293 0x00000000,
294 0x00000000,
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0x00000000,
311 0x00000000,
312 0x00000000,
313 0x00000000,
314 0x00000000,
315 0x00000000,
316 0x00000000,
317 0x00000000,
318 0x00000000,
319 0x00000000,
320 0x00000000,
321 0x00000000,
322 0x00000000,
323 0x00000000,
324 0x00000000,
325 0x00000000,
326 0x00000000,
327 0x00000000,
328 0x00000000,
329 0x00000000,
330 0x00000000,
331 0x00000000,
332 0x00000000,
333 0x00000000,
334 0x00000000,
335 0x00000000,
336 0x00000000,
337 0x00000000,
338 0x00000000,
339 0x00000000,
340 0x00000000,
341 0x00000000,
342 0x00000000,
343 0x00000000,
344 0x00000000,
345 0x00000000,
346 0x00000000,
347 0x00000000,
348 0x00000000,
349 0x00000000,
350 0x00000000,
351 0x00000000,
352 0x00000000,
353 0x00000000,
354 0x00000000,
355 0x00000000,
356 0x00000000,
357 0x00000000,
358 0x00000000,
359 0x00000000,
360 0x00000000,
361 0x00000000,
362 0x00000000,
363 0x00000000,
364 0x00000000,
365 0x00000000,
366 0x00000000,
367 0x00000000,
368 0x00000000,
369 0x00000000,
370 0x00000000,
371 0x00000000,
372 0x00000000,
373 0x00000000,
374 0x00000000,
375 0x00000000,
376 0x00000000,
377 0x00000000,
378 0x00000000,
379 0x00000000,
380 0x00000000,
381 0x00000000,
382 0x00000000,
383 0x00000000,
384 0x00000000,
385 0x00000000,
386 0x00000000,
387 0x00000000,
388 0x00000000,
389 0x00000000,
390 0x00000000,
391 0x00000000,
392 0x00000000,
393 0x00000000,
394 0x00000000,
395 0x00000000,
396 0x00000000,
397 0x00000000,
398 0x00000000,
399 0x00000000,
400 0x00000000,
401 0x00000000,
402 0x00000000,
403 0x00000000,
404 0x00000000,
405 0x00000000,
406 0x00000000,
407 0x00000000,
408 0x00000000,
409 0x00000000,
410 0x00000000,
411 0x00000000,
412 0x00000000,
413 0x00000000,
414 0x00000000,
415 0x00000000,
416 0x00000000,
417 0x00000000,
418 0x00000000,
419 0x00000000,
420 0x00000000,
421 0x00000000,
422 0x00000000,
423 0x00000000,
424 0x00000000,
425 0x00000000,
426 0x00000000,
427 0x00000000,
428 0x00000000,
429 0x00000000,
430 0x00000000,
431 0x00000000,
432 0x00000000,
433 0x00000000,
434 0x00000000,
435 0x00000000,
436 0x00000000,
437 0x00000000,
438 0x00000000,
439 0x00000000,
440 0x00000000,
441 0x00000000,
442 0x00000000,
443 0x00000000,
444 0x00000000,
445 0x00000000,
446 0x00000000,
447 0x00000000,
448 0x00000000,
449 0x00000000,
450 0x00000000,
451 0x00000000,
452 0x00000000,
453 0x00000000,
454 0x00000000,
455 0x00000000,
456 0x00000000,
457 0x00000000,
458 0x00000000,
459 0x00000000,
460 0x00000000,
461 0x00000000,
462 0x00000000,
463 0x00000000,
464 0x00000000,
465 0x00000000,
466 0x00000000,
467 0x00000000,
468 0x00000000,
469 0x79180006,
470 0x00000000,
471 0x00000000,
472 0x00000000,
473 0x00000000,
474 0x00000000,
475 0x00000000,
476 0x00000000,
477 0x79180006,
478 0x20000000,
479 0x00000000,
480 0x00000000,
481 0x00000000,
482 0x00000000,
483 0x00000000,
484 0x00000000,
485 0x79180006,
486 0x40000000,
487 0x00000000,
488 0x00000000,
489 0x00000000,
490 0x00000000,
491 0x00000000,
492 0x00000000,
493 0x79180006,
494 0x60000000,
495 0x00000000,
496 0x00000000,
497 0x00000000,
498 0x00000000,
499 0x00000000,
500 0x00000000,
501 0x61010011,
502 0x00000001, /* reloc */
503 0x00000000,
504 0x00000000,
505 0x00000001, /* reloc */
506 0x00000000,
507 0x00000001, /* reloc */
508 0x00000000,
509 0x00000001,
510 0x00000000,
511 0x00000001, /* reloc */
512 0x00000000,
513 0x00001001,
514 0x00001001,
515 0x00000001,
516 0x00001001,
517 0x00000000,
518 0x00000000,
519 0x00000000,
520 0x61020001,
521 0x00000000,
522 0x00000000,
523 0x79000002,
524 0x00000000,
525 0x00000000,
526 0x00000000,
527 0x78050006,
528 0x00000000,
529 0x00000000,
530 0x00000000,
531 0x00000000,
532 0x00000000,
533 0x00000000,
534 0x00000000,
535 0x79040002,
536 0x00000000,
537 0x00000000,
538 0x00000000,
539 0x79040002,
540 0x40000000,
541 0x00000000,
542 0x00000000,
543 0x79040002,
544 0x80000000,
545 0x00000000,
546 0x00000000,
547 0x79040002,
548 0xc0000000,
549 0x00000000,
550 0x00000000,
551 0x79080001,
552 0x00000000,
553 0x00000000,
554 0x790a0001,
555 0x00000000,
556 0x00000000,
557 0x78060003,
558 0x00000000,
559 0x00000000,
560 0x00000000,
561 0x00000000,
562 0x78070003,
563 0x00000000,
564 0x00000000,
565 0x00000000,
566 0x00000000,
567 0x78040001,
568 0x00000000,
569 0x00000000,
570 0x79110000,
571 0x00000000,
572 0x780d0000,
573 0x00000000,
574 0x79060000,
575 0x00000000,
576 0x7907001f,
577 0x00000000,
578 0x00000000,
579 0x00000000,
580 0x00000000,
581 0x00000000,
582 0x00000000,
583 0x00000000,
584 0x00000000,
585 0x00000000,
586 0x00000000,
587 0x00000000,
588 0x00000000,
589 0x00000000,
590 0x00000000,
591 0x00000000,
592 0x00000000,
593 0x00000000,
594 0x00000000,
595 0x00000000,
596 0x00000000,
597 0x00000000,
598 0x00000000,
599 0x00000000,
600 0x00000000,
601 0x00000000,
602 0x00000000,
603 0x00000000,
604 0x00000000,
605 0x00000000,
606 0x00000000,
607 0x00000000,
608 0x00000000,
609 0x7902000f,
610 0x00000000,
611 0x00000000,
612 0x00000000,
613 0x00000000,
614 0x00000000,
615 0x00000000,
616 0x00000000,
617 0x00000000,
618 0x00000000,
619 0x00000000,
620 0x00000000,
621 0x00000000,
622 0x00000000,
623 0x00000000,
624 0x00000000,
625 0x00000000,
626 0x790c000f,
627 0x00000000,
628 0x00000000,
629 0x00000000,
630 0x00000000,
631 0x00000000,
632 0x00000000,
633 0x00000000,
634 0x00000000,
635 0x00000000,
636 0x00000000,
637 0x00000000,
638 0x00000000,
639 0x00000000,
640 0x00000000,
641 0x00000000,
642 0x00000000,
643 0x780a0003,
644 0x00000000,
645 0x00000000,
646 0x00000000,
647 0x00000000,
648 0x78080083,
649 0x00004000,
650 0x00000000,
651 0x00000000,
652 0x00000000,
653 0x04004000,
654 0x00000000,
655 0x00000000,
656 0x00000000,
657 0x08004000,
658 0x00000000,
659 0x00000000,
660 0x00000000,
661 0x0c004000,
662 0x00000000,
663 0x00000000,
664 0x00000000,
665 0x10004000,
666 0x00000000,
667 0x00000000,
668 0x00000000,
669 0x14004000,
670 0x00000000,
671 0x00000000,
672 0x00000000,
673 0x18004000,
674 0x00000000,
675 0x00000000,
676 0x00000000,
677 0x1c004000,
678 0x00000000,
679 0x00000000,
680 0x00000000,
681 0x20004000,
682 0x00000000,
683 0x00000000,
684 0x00000000,
685 0x24004000,
686 0x00000000,
687 0x00000000,
688 0x00000000,
689 0x28004000,
690 0x00000000,
691 0x00000000,
692 0x00000000,
693 0x2c004000,
694 0x00000000,
695 0x00000000,
696 0x00000000,
697 0x30004000,
698 0x00000000,
699 0x00000000,
700 0x00000000,
701 0x34004000,
702 0x00000000,
703 0x00000000,
704 0x00000000,
705 0x38004000,
706 0x00000000,
707 0x00000000,
708 0x00000000,
709 0x3c004000,
710 0x00000000,
711 0x00000000,
712 0x00000000,
713 0x40004000,
714 0x00000000,
715 0x00000000,
716 0x00000000,
717 0x44004000,
718 0x00000000,
719 0x00000000,
720 0x00000000,
721 0x48004000,
722 0x00000000,
723 0x00000000,
724 0x00000000,
725 0x4c004000,
726 0x00000000,
727 0x00000000,
728 0x00000000,
729 0x50004000,
730 0x00000000,
731 0x00000000,
732 0x00000000,
733 0x54004000,
734 0x00000000,
735 0x00000000,
736 0x00000000,
737 0x58004000,
738 0x00000000,
739 0x00000000,
740 0x00000000,
741 0x5c004000,
742 0x00000000,
743 0x00000000,
744 0x00000000,
745 0x60004000,
746 0x00000000,
747 0x00000000,
748 0x00000000,
749 0x64004000,
750 0x00000000,
751 0x00000000,
752 0x00000000,
753 0x68004000,
754 0x00000000,
755 0x00000000,
756 0x00000000,
757 0x6c004000,
758 0x00000000,
759 0x00000000,
760 0x00000000,
761 0x70004000,
762 0x00000000,
763 0x00000000,
764 0x00000000,
765 0x74004000,
766 0x00000000,
767 0x00000000,
768 0x00000000,
769 0x78004000,
770 0x00000000,
771 0x00000000,
772 0x00000000,
773 0x7c004000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x80004000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x78090043,
782 0x02000000,
783 0x22220000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836 0x00000000,
837 0x00000000,
838 0x00000000,
839 0x00000000,
840 0x00000000,
841 0x00000000,
842 0x00000000,
843 0x00000000,
844 0x00000000,
845 0x00000000,
846 0x00000000,
847 0x00000000,
848 0x00000000,
849 0x00000000,
850 0x78550003,
851 0x00000000,
852 0x00000000,
853 0x00000000,
854 0x00000000,
855 0x680b0001,
856 0x780e0000,
857 0x00000e01,
858 0x78240000,
859 0x00000e41,
860 0x784f0000,
861 0x80000100,
862 0x784d0000,
863 0x40000000,
864 0x782b0000,
865 0x00000000,
866 0x782c0000,
867 0x00000000,
868 0x782d0000,
869 0x00000000,
870 0x782e0000,
871 0x00000000,
872 0x782f0000,
873 0x00000000,
874 0x780f0000,
875 0x00000000,
876 0x78230000,
877 0x00000ea0,
878 0x78210000,
879 0x00000ec0,
880 0x78260000,
881 0x00000000,
882 0x78270000,
883 0x00000000,
884 0x78280000,
885 0x00000000,
886 0x78290000,
887 0x00000000,
888 0x782a0000,
889 0x00000000,
890 0x7b000005,
891 0x00000004,
892 0x00000001,
893 0x00000000,
894 0x00000001,
895 0x00000000,
896 0x00000000,
897 0x05000000, /* cmds end */
898 0x00000000,
899 0x00000000,
900 0x00000000,
901 0x00000000,
902 0x00000000,
903 0x00000000,
904 0x00000000,
905 0x00000000,
906 0x00000000,
907 0x00000000,
908 0x00000000, /* state start */
909 0x00000000,
910 0x3f800000,
911 0x3f800000,
912 0x3f800000,
913 0x3f800000,
914 0x00000000,
915 0x00000000,
916 0x00000000,
917 0x00000000,
918 0x00000000,
919 0x00000000,
920 0x00000000,
921 0x00000000,
922 0x00000000,
923 0x00000000,
924 0x00000000,
925 0x00000000,
926 0x00000000,
927 0x00000000,
928 0x00000000,
929 0x00000000,
930 0x00000000,
931 0x00000000,
932 0x00000000,
933 0x00000000,
934 0x00000000,
935 0x00000000,
936 0x00000000,
937 0x00000000,
938 0x00000000,
939 0x00000000,
940 0x00000000,
941 0x00000000,
942 0x00000000,
943 0x00000000,
944 0x00000000,
945 0x00000000,
946 0x00000000,
947 0x00000000,
948 0x00000000,
949 0x00000000,
950 0x00000000,
951 0x00000000,
952 0x00000000,
953 0x00000000,
954 0x00000000,
955 0x00000000,
956 0x00000000,
957 0x00000000,
958 0x00000000,
959 0x00000000,
960 0x00000000,
961 0x00000000,
962 0x00000000,
963 0x00000000,
964 0x00000000,
965 0x00000000,
966 0x00000000,
967 0x00000000,
968 0x00000000,
969 0x00000000,
970 0x00000000,
971 0x00000000, /* state end */
972};
973
974RO_RENDERSTATE(9);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 0a80e419b589..9f445e9a75d1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -589,14 +589,10 @@ static int init_ring_common(struct intel_engine_cs *ring)
589 goto out; 589 goto out;
590 } 590 }
591 591
592 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 592 ringbuf->head = I915_READ_HEAD(ring);
593 i915_kernel_lost_context(ring->dev); 593 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
594 else { 594 ringbuf->space = intel_ring_space(ringbuf);
595 ringbuf->head = I915_READ_HEAD(ring); 595 ringbuf->last_retired_head = -1;
596 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
597 ringbuf->space = intel_ring_space(ringbuf);
598 ringbuf->last_retired_head = -1;
599 }
600 596
601 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck)); 597 memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
602 598
@@ -665,76 +661,112 @@ err:
665 return ret; 661 return ret;
666} 662}
667 663
668static inline void intel_ring_emit_wa(struct intel_engine_cs *ring, 664static int intel_ring_workarounds_emit(struct intel_engine_cs *ring,
669 u32 addr, u32 value) 665 struct intel_context *ctx)
670{ 666{
667 int ret, i;
671 struct drm_device *dev = ring->dev; 668 struct drm_device *dev = ring->dev;
672 struct drm_i915_private *dev_priv = dev->dev_private; 669 struct drm_i915_private *dev_priv = dev->dev_private;
670 struct i915_workarounds *w = &dev_priv->workarounds;
673 671
674 if (WARN_ON(dev_priv->num_wa_regs >= I915_MAX_WA_REGS)) 672 if (WARN_ON(w->count == 0))
675 return; 673 return 0;
676 674
677 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 675 ring->gpu_caches_dirty = true;
678 intel_ring_emit(ring, addr); 676 ret = intel_ring_flush_all_caches(ring);
679 intel_ring_emit(ring, value); 677 if (ret)
678 return ret;
680 679
681 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].addr = addr; 680 ret = intel_ring_begin(ring, (w->count * 2 + 2));
682 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].mask = value & 0xFFFF; 681 if (ret)
683 /* value is updated with the status of remaining bits of this 682 return ret;
684 * register when it is read from debugfs file 683
685 */ 684 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
686 dev_priv->intel_wa_regs[dev_priv->num_wa_regs].value = value; 685 for (i = 0; i < w->count; i++) {
687 dev_priv->num_wa_regs++; 686 intel_ring_emit(ring, w->reg[i].addr);
687 intel_ring_emit(ring, w->reg[i].value);
688 }
689 intel_ring_emit(ring, MI_NOOP);
690
691 intel_ring_advance(ring);
688 692
689 return; 693 ring->gpu_caches_dirty = true;
694 ret = intel_ring_flush_all_caches(ring);
695 if (ret)
696 return ret;
697
698 DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
699
700 return 0;
690} 701}
691 702
703static int wa_add(struct drm_i915_private *dev_priv,
704 const u32 addr, const u32 mask, const u32 val)
705{
706 const u32 idx = dev_priv->workarounds.count;
707
708 if (WARN_ON(idx >= I915_MAX_WA_REGS))
709 return -ENOSPC;
710
711 dev_priv->workarounds.reg[idx].addr = addr;
712 dev_priv->workarounds.reg[idx].value = val;
713 dev_priv->workarounds.reg[idx].mask = mask;
714
715 dev_priv->workarounds.count++;
716
717 return 0;
718}
719
720#define WA_REG(addr, mask, val) { \
721 const int r = wa_add(dev_priv, (addr), (mask), (val)); \
722 if (r) \
723 return r; \
724 }
725
726#define WA_SET_BIT_MASKED(addr, mask) \
727 WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
728
729#define WA_CLR_BIT_MASKED(addr, mask) \
730 WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
731
732#define WA_SET_FIELD_MASKED(addr, mask, value) \
733 WA_REG(addr, mask, _MASKED_FIELD(mask, value))
734
735#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
736#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
737
738#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
739
692static int bdw_init_workarounds(struct intel_engine_cs *ring) 740static int bdw_init_workarounds(struct intel_engine_cs *ring)
693{ 741{
694 int ret;
695 struct drm_device *dev = ring->dev; 742 struct drm_device *dev = ring->dev;
696 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_private *dev_priv = dev->dev_private;
697 744
698 /*
699 * workarounds applied in this fn are part of register state context,
700 * they need to be re-initialized followed by gpu reset, suspend/resume,
701 * module reload.
702 */
703 dev_priv->num_wa_regs = 0;
704 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs));
705
706 /*
707 * update the number of dwords required based on the
708 * actual number of workarounds applied
709 */
710 ret = intel_ring_begin(ring, 18);
711 if (ret)
712 return ret;
713
714 /* WaDisablePartialInstShootdown:bdw */ 745 /* WaDisablePartialInstShootdown:bdw */
715 /* WaDisableThreadStallDopClockGating:bdw */ 746 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
716 /* FIXME: Unclear whether we really need this on production bdw. */ 747 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
717 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 748 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
718 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE 749 STALL_DOP_GATING_DISABLE);
719 | STALL_DOP_GATING_DISABLE));
720 750
721 /* WaDisableDopClockGating:bdw May not be needed for production */ 751 /* WaDisableDopClockGating:bdw */
722 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2, 752 WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
723 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE)); 753 DOP_CLOCK_GATING_DISABLE);
724 754
725 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 755 WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
726 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS)); 756 GEN8_SAMPLER_POWER_BYPASS_DIS);
727 757
728 /* Use Force Non-Coherent whenever executing a 3D context. This is a 758 /* Use Force Non-Coherent whenever executing a 3D context. This is a
729 * workaround for for a possible hang in the unlikely event a TLB 759 * workaround for for a possible hang in the unlikely event a TLB
730 * invalidation occurs during a PSD flush. 760 * invalidation occurs during a PSD flush.
731 */ 761 */
732 intel_ring_emit_wa(ring, HDC_CHICKEN0, 762 /* WaDisableFenceDestinationToSLM:bdw (GT3 pre-production) */
733 _MASKED_BIT_ENABLE(HDC_FORCE_NON_COHERENT)); 763 WA_SET_BIT_MASKED(HDC_CHICKEN0,
764 HDC_FORCE_NON_COHERENT |
765 (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
734 766
735 /* Wa4x4STCOptimizationDisable:bdw */ 767 /* Wa4x4STCOptimizationDisable:bdw */
736 intel_ring_emit_wa(ring, CACHE_MODE_1, 768 WA_SET_BIT_MASKED(CACHE_MODE_1,
737 _MASKED_BIT_ENABLE(GEN8_4x4_STC_OPTIMIZATION_DISABLE)); 769 GEN8_4x4_STC_OPTIMIZATION_DISABLE);
738 770
739 /* 771 /*
740 * BSpec recommends 8x4 when MSAA is used, 772 * BSpec recommends 8x4 when MSAA is used,
@@ -744,52 +776,51 @@ static int bdw_init_workarounds(struct intel_engine_cs *ring)
744 * disable bit, which we don't touch here, but it's good 776 * disable bit, which we don't touch here, but it's good
745 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 777 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
746 */ 778 */
747 intel_ring_emit_wa(ring, GEN7_GT_MODE, 779 WA_SET_FIELD_MASKED(GEN7_GT_MODE,
748 GEN6_WIZ_HASHING_MASK | GEN6_WIZ_HASHING_16x4); 780 GEN6_WIZ_HASHING_MASK,
749 781 GEN6_WIZ_HASHING_16x4);
750 intel_ring_advance(ring);
751
752 DRM_DEBUG_DRIVER("Number of Workarounds applied: %d\n",
753 dev_priv->num_wa_regs);
754 782
755 return 0; 783 return 0;
756} 784}
757 785
758static int chv_init_workarounds(struct intel_engine_cs *ring) 786static int chv_init_workarounds(struct intel_engine_cs *ring)
759{ 787{
760 int ret;
761 struct drm_device *dev = ring->dev; 788 struct drm_device *dev = ring->dev;
762 struct drm_i915_private *dev_priv = dev->dev_private; 789 struct drm_i915_private *dev_priv = dev->dev_private;
763 790
764 /* 791 /* WaDisablePartialInstShootdown:chv */
765 * workarounds applied in this fn are part of register state context, 792 /* WaDisableThreadStallDopClockGating:chv */
766 * they need to be re-initialized followed by gpu reset, suspend/resume, 793 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
767 * module reload. 794 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE |
795 STALL_DOP_GATING_DISABLE);
796
797 /* Use Force Non-Coherent whenever executing a 3D context. This is a
798 * workaround for a possible hang in the unlikely event a TLB
799 * invalidation occurs during a PSD flush.
768 */ 800 */
769 dev_priv->num_wa_regs = 0; 801 /* WaForceEnableNonCoherent:chv */
770 memset(dev_priv->intel_wa_regs, 0, sizeof(dev_priv->intel_wa_regs)); 802 /* WaHdcDisableFetchWhenMasked:chv */
803 WA_SET_BIT_MASKED(HDC_CHICKEN0,
804 HDC_FORCE_NON_COHERENT |
805 HDC_DONOT_FETCH_MEM_WHEN_MASKED);
771 806
772 ret = intel_ring_begin(ring, 12); 807 return 0;
773 if (ret) 808}
774 return ret;
775 809
776 /* WaDisablePartialInstShootdown:chv */ 810int init_workarounds_ring(struct intel_engine_cs *ring)
777 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN, 811{
778 _MASKED_BIT_ENABLE(PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE)); 812 struct drm_device *dev = ring->dev;
813 struct drm_i915_private *dev_priv = dev->dev_private;
779 814
780 /* WaDisableThreadStallDopClockGating:chv */ 815 WARN_ON(ring->id != RCS);
781 intel_ring_emit_wa(ring, GEN8_ROW_CHICKEN,
782 _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
783 816
784 /* WaDisableDopClockGating:chv (pre-production hw) */ 817 dev_priv->workarounds.count = 0;
785 intel_ring_emit_wa(ring, GEN7_ROW_CHICKEN2,
786 _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
787 818
788 /* WaDisableSamplerPowerBypass:chv (pre-production hw) */ 819 if (IS_BROADWELL(dev))
789 intel_ring_emit_wa(ring, HALF_SLICE_CHICKEN3, 820 return bdw_init_workarounds(ring);
790 _MASKED_BIT_ENABLE(GEN8_SAMPLER_POWER_BYPASS_DIS));
791 821
792 intel_ring_advance(ring); 822 if (IS_CHERRYVIEW(dev))
823 return chv_init_workarounds(ring);
793 824
794 return 0; 825 return 0;
795} 826}
@@ -812,7 +843,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
812 * 843 *
813 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv 844 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
814 */ 845 */
815 if (INTEL_INFO(dev)->gen >= 6) 846 if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 9)
816 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); 847 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
817 848
818 /* Required for the hardware to program scanline values for waiting */ 849 /* Required for the hardware to program scanline values for waiting */
@@ -849,7 +880,7 @@ static int init_render_ring(struct intel_engine_cs *ring)
849 if (HAS_L3_DPF(dev)) 880 if (HAS_L3_DPF(dev))
850 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev)); 881 I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));
851 882
852 return ret; 883 return init_workarounds_ring(ring);
853} 884}
854 885
855static void render_ring_cleanup(struct intel_engine_cs *ring) 886static void render_ring_cleanup(struct intel_engine_cs *ring)
@@ -1186,7 +1217,7 @@ gen5_ring_get_irq(struct intel_engine_cs *ring)
1186 struct drm_i915_private *dev_priv = dev->dev_private; 1217 struct drm_i915_private *dev_priv = dev->dev_private;
1187 unsigned long flags; 1218 unsigned long flags;
1188 1219
1189 if (!dev->irq_enabled) 1220 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1190 return false; 1221 return false;
1191 1222
1192 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1223 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1217,7 +1248,7 @@ i9xx_ring_get_irq(struct intel_engine_cs *ring)
1217 struct drm_i915_private *dev_priv = dev->dev_private; 1248 struct drm_i915_private *dev_priv = dev->dev_private;
1218 unsigned long flags; 1249 unsigned long flags;
1219 1250
1220 if (!dev->irq_enabled) 1251 if (!intel_irqs_enabled(dev_priv))
1221 return false; 1252 return false;
1222 1253
1223 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1254 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1254,7 +1285,7 @@ i8xx_ring_get_irq(struct intel_engine_cs *ring)
1254 struct drm_i915_private *dev_priv = dev->dev_private; 1285 struct drm_i915_private *dev_priv = dev->dev_private;
1255 unsigned long flags; 1286 unsigned long flags;
1256 1287
1257 if (!dev->irq_enabled) 1288 if (!intel_irqs_enabled(dev_priv))
1258 return false; 1289 return false;
1259 1290
1260 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1291 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1388,8 +1419,8 @@ gen6_ring_get_irq(struct intel_engine_cs *ring)
1388 struct drm_i915_private *dev_priv = dev->dev_private; 1419 struct drm_i915_private *dev_priv = dev->dev_private;
1389 unsigned long flags; 1420 unsigned long flags;
1390 1421
1391 if (!dev->irq_enabled) 1422 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1392 return false; 1423 return false;
1393 1424
1394 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1425 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1395 if (ring->irq_refcount++ == 0) { 1426 if (ring->irq_refcount++ == 0) {
@@ -1431,7 +1462,7 @@ hsw_vebox_get_irq(struct intel_engine_cs *ring)
1431 struct drm_i915_private *dev_priv = dev->dev_private; 1462 struct drm_i915_private *dev_priv = dev->dev_private;
1432 unsigned long flags; 1463 unsigned long flags;
1433 1464
1434 if (!dev->irq_enabled) 1465 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1435 return false; 1466 return false;
1436 1467
1437 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1468 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1451,9 +1482,6 @@ hsw_vebox_put_irq(struct intel_engine_cs *ring)
1451 struct drm_i915_private *dev_priv = dev->dev_private; 1482 struct drm_i915_private *dev_priv = dev->dev_private;
1452 unsigned long flags; 1483 unsigned long flags;
1453 1484
1454 if (!dev->irq_enabled)
1455 return;
1456
1457 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1485 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1458 if (--ring->irq_refcount == 0) { 1486 if (--ring->irq_refcount == 0) {
1459 I915_WRITE_IMR(ring, ~0); 1487 I915_WRITE_IMR(ring, ~0);
@@ -1469,7 +1497,7 @@ gen8_ring_get_irq(struct intel_engine_cs *ring)
1469 struct drm_i915_private *dev_priv = dev->dev_private; 1497 struct drm_i915_private *dev_priv = dev->dev_private;
1470 unsigned long flags; 1498 unsigned long flags;
1471 1499
1472 if (!dev->irq_enabled) 1500 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
1473 return false; 1501 return false;
1474 1502
1475 spin_lock_irqsave(&dev_priv->irq_lock, flags); 1503 spin_lock_irqsave(&dev_priv->irq_lock, flags);
@@ -1694,13 +1722,42 @@ static int init_phys_status_page(struct intel_engine_cs *ring)
1694 return 0; 1722 return 0;
1695} 1723}
1696 1724
1697void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf) 1725void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1698{ 1726{
1699 if (!ringbuf->obj)
1700 return;
1701
1702 iounmap(ringbuf->virtual_start); 1727 iounmap(ringbuf->virtual_start);
1728 ringbuf->virtual_start = NULL;
1703 i915_gem_object_ggtt_unpin(ringbuf->obj); 1729 i915_gem_object_ggtt_unpin(ringbuf->obj);
1730}
1731
1732int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
1733 struct intel_ringbuffer *ringbuf)
1734{
1735 struct drm_i915_private *dev_priv = to_i915(dev);
1736 struct drm_i915_gem_object *obj = ringbuf->obj;
1737 int ret;
1738
1739 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1740 if (ret)
1741 return ret;
1742
1743 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1744 if (ret) {
1745 i915_gem_object_ggtt_unpin(obj);
1746 return ret;
1747 }
1748
1749 ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
1750 i915_gem_obj_ggtt_offset(obj), ringbuf->size);
1751 if (ringbuf->virtual_start == NULL) {
1752 i915_gem_object_ggtt_unpin(obj);
1753 return -EINVAL;
1754 }
1755
1756 return 0;
1757}
1758
1759void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1760{
1704 drm_gem_object_unreference(&ringbuf->obj->base); 1761 drm_gem_object_unreference(&ringbuf->obj->base);
1705 ringbuf->obj = NULL; 1762 ringbuf->obj = NULL;
1706} 1763}
@@ -1708,12 +1765,7 @@ void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
1708int intel_alloc_ringbuffer_obj(struct drm_device *dev, 1765int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1709 struct intel_ringbuffer *ringbuf) 1766 struct intel_ringbuffer *ringbuf)
1710{ 1767{
1711 struct drm_i915_private *dev_priv = to_i915(dev);
1712 struct drm_i915_gem_object *obj; 1768 struct drm_i915_gem_object *obj;
1713 int ret;
1714
1715 if (ringbuf->obj)
1716 return 0;
1717 1769
1718 obj = NULL; 1770 obj = NULL;
1719 if (!HAS_LLC(dev)) 1771 if (!HAS_LLC(dev))
@@ -1726,30 +1778,9 @@ int intel_alloc_ringbuffer_obj(struct drm_device *dev,
1726 /* mark ring buffers as read-only from GPU side by default */ 1778 /* mark ring buffers as read-only from GPU side by default */
1727 obj->gt_ro = 1; 1779 obj->gt_ro = 1;
1728 1780
1729 ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
1730 if (ret)
1731 goto err_unref;
1732
1733 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1734 if (ret)
1735 goto err_unpin;
1736
1737 ringbuf->virtual_start =
1738 ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
1739 ringbuf->size);
1740 if (ringbuf->virtual_start == NULL) {
1741 ret = -EINVAL;
1742 goto err_unpin;
1743 }
1744
1745 ringbuf->obj = obj; 1781 ringbuf->obj = obj;
1746 return 0;
1747 1782
1748err_unpin: 1783 return 0;
1749 i915_gem_object_ggtt_unpin(obj);
1750err_unref:
1751 drm_gem_object_unreference(&obj->base);
1752 return ret;
1753} 1784}
1754 1785
1755static int intel_init_ring_buffer(struct drm_device *dev, 1786static int intel_init_ring_buffer(struct drm_device *dev,
@@ -1786,10 +1817,21 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1786 goto error; 1817 goto error;
1787 } 1818 }
1788 1819
1789 ret = intel_alloc_ringbuffer_obj(dev, ringbuf); 1820 if (ringbuf->obj == NULL) {
1790 if (ret) { 1821 ret = intel_alloc_ringbuffer_obj(dev, ringbuf);
1791 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); 1822 if (ret) {
1792 goto error; 1823 DRM_ERROR("Failed to allocate ringbuffer %s: %d\n",
1824 ring->name, ret);
1825 goto error;
1826 }
1827
1828 ret = intel_pin_and_map_ringbuffer_obj(dev, ringbuf);
1829 if (ret) {
1830 DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
1831 ring->name, ret);
1832 intel_destroy_ringbuffer_obj(ringbuf);
1833 goto error;
1834 }
1793 } 1835 }
1794 1836
1795 /* Workaround an erratum on the i830 which causes a hang if 1837 /* Workaround an erratum on the i830 which causes a hang if
@@ -1818,15 +1860,19 @@ error:
1818 1860
1819void intel_cleanup_ring_buffer(struct intel_engine_cs *ring) 1861void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)
1820{ 1862{
1821 struct drm_i915_private *dev_priv = to_i915(ring->dev); 1863 struct drm_i915_private *dev_priv;
1822 struct intel_ringbuffer *ringbuf = ring->buffer; 1864 struct intel_ringbuffer *ringbuf;
1823 1865
1824 if (!intel_ring_initialized(ring)) 1866 if (!intel_ring_initialized(ring))
1825 return; 1867 return;
1826 1868
1869 dev_priv = to_i915(ring->dev);
1870 ringbuf = ring->buffer;
1871
1827 intel_stop_ring_buffer(ring); 1872 intel_stop_ring_buffer(ring);
1828 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); 1873 WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0);
1829 1874
1875 intel_unpin_ringbuffer_obj(ringbuf);
1830 intel_destroy_ringbuffer_obj(ringbuf); 1876 intel_destroy_ringbuffer_obj(ringbuf);
1831 ring->preallocated_lazy_request = NULL; 1877 ring->preallocated_lazy_request = NULL;
1832 ring->outstanding_lazy_seqno = 0; 1878 ring->outstanding_lazy_seqno = 0;
@@ -1912,13 +1958,6 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1912 break; 1958 break;
1913 } 1959 }
1914 1960
1915 if (!drm_core_check_feature(dev, DRIVER_MODESET) &&
1916 dev->primary->master) {
1917 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1918 if (master_priv->sarea_priv)
1919 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1920 }
1921
1922 msleep(1); 1961 msleep(1);
1923 1962
1924 if (dev_priv->mm.interruptible && signal_pending(current)) { 1963 if (dev_priv->mm.interruptible && signal_pending(current)) {
@@ -2229,6 +2268,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2229 u32 invalidate, u32 flush) 2268 u32 invalidate, u32 flush)
2230{ 2269{
2231 struct drm_device *dev = ring->dev; 2270 struct drm_device *dev = ring->dev;
2271 struct drm_i915_private *dev_priv = dev->dev_private;
2232 uint32_t cmd; 2272 uint32_t cmd;
2233 int ret; 2273 int ret;
2234 2274
@@ -2259,8 +2299,12 @@ static int gen6_ring_flush(struct intel_engine_cs *ring,
2259 } 2299 }
2260 intel_ring_advance(ring); 2300 intel_ring_advance(ring);
2261 2301
2262 if (IS_GEN7(dev) && !invalidate && flush) 2302 if (!invalidate && flush) {
2263 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); 2303 if (IS_GEN7(dev))
2304 return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
2305 else if (IS_BROADWELL(dev))
2306 dev_priv->fbc.need_sw_cache_clean = true;
2307 }
2264 2308
2265 return 0; 2309 return 0;
2266} 2310}
@@ -2293,10 +2337,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2293 dev_priv->semaphore_obj = obj; 2337 dev_priv->semaphore_obj = obj;
2294 } 2338 }
2295 } 2339 }
2296 if (IS_CHERRYVIEW(dev)) 2340
2297 ring->init_context = chv_init_workarounds; 2341 ring->init_context = intel_ring_workarounds_emit;
2298 else
2299 ring->init_context = bdw_init_workarounds;
2300 ring->add_request = gen6_add_request; 2342 ring->add_request = gen6_add_request;
2301 ring->flush = gen8_render_ring_flush; 2343 ring->flush = gen8_render_ring_flush;
2302 ring->irq_get = gen8_ring_get_irq; 2344 ring->irq_get = gen8_ring_get_irq;
@@ -2406,91 +2448,6 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
2406 return intel_init_ring_buffer(dev, ring); 2448 return intel_init_ring_buffer(dev, ring);
2407} 2449}
2408 2450
2409int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
2410{
2411 struct drm_i915_private *dev_priv = dev->dev_private;
2412 struct intel_engine_cs *ring = &dev_priv->ring[RCS];
2413 struct intel_ringbuffer *ringbuf = ring->buffer;
2414 int ret;
2415
2416 if (ringbuf == NULL) {
2417 ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL);
2418 if (!ringbuf)
2419 return -ENOMEM;
2420 ring->buffer = ringbuf;
2421 }
2422
2423 ring->name = "render ring";
2424 ring->id = RCS;
2425 ring->mmio_base = RENDER_RING_BASE;
2426
2427 if (INTEL_INFO(dev)->gen >= 6) {
2428 /* non-kms not supported on gen6+ */
2429 ret = -ENODEV;
2430 goto err_ringbuf;
2431 }
2432
2433 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
2434 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
2435 * the special gen5 functions. */
2436 ring->add_request = i9xx_add_request;
2437 if (INTEL_INFO(dev)->gen < 4)
2438 ring->flush = gen2_render_ring_flush;
2439 else
2440 ring->flush = gen4_render_ring_flush;
2441 ring->get_seqno = ring_get_seqno;
2442 ring->set_seqno = ring_set_seqno;
2443 if (IS_GEN2(dev)) {
2444 ring->irq_get = i8xx_ring_get_irq;
2445 ring->irq_put = i8xx_ring_put_irq;
2446 } else {
2447 ring->irq_get = i9xx_ring_get_irq;
2448 ring->irq_put = i9xx_ring_put_irq;
2449 }
2450 ring->irq_enable_mask = I915_USER_INTERRUPT;
2451 ring->write_tail = ring_write_tail;
2452 if (INTEL_INFO(dev)->gen >= 4)
2453 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
2454 else if (IS_I830(dev) || IS_845G(dev))
2455 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
2456 else
2457 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
2458 ring->init = init_render_ring;
2459 ring->cleanup = render_ring_cleanup;
2460
2461 ring->dev = dev;
2462 INIT_LIST_HEAD(&ring->active_list);
2463 INIT_LIST_HEAD(&ring->request_list);
2464
2465 ringbuf->size = size;
2466 ringbuf->effective_size = ringbuf->size;
2467 if (IS_I830(ring->dev) || IS_845G(ring->dev))
2468 ringbuf->effective_size -= 2 * CACHELINE_BYTES;
2469
2470 ringbuf->virtual_start = ioremap_wc(start, size);
2471 if (ringbuf->virtual_start == NULL) {
2472 DRM_ERROR("can not ioremap virtual address for"
2473 " ring buffer\n");
2474 ret = -ENOMEM;
2475 goto err_ringbuf;
2476 }
2477
2478 if (!I915_NEED_GFX_HWS(dev)) {
2479 ret = init_phys_status_page(ring);
2480 if (ret)
2481 goto err_vstart;
2482 }
2483
2484 return 0;
2485
2486err_vstart:
2487 iounmap(ringbuf->virtual_start);
2488err_ringbuf:
2489 kfree(ringbuf);
2490 ring->buffer = NULL;
2491 return ret;
2492}
2493
2494int intel_init_bsd_ring_buffer(struct drm_device *dev) 2451int intel_init_bsd_ring_buffer(struct drm_device *dev)
2495{ 2452{
2496 struct drm_i915_private *dev_priv = dev->dev_private; 2453 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 96479c89f4bd..fe426cff598b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -148,7 +148,8 @@ struct intel_engine_cs {
148 148
149 int (*init)(struct intel_engine_cs *ring); 149 int (*init)(struct intel_engine_cs *ring);
150 150
151 int (*init_context)(struct intel_engine_cs *ring); 151 int (*init_context)(struct intel_engine_cs *ring,
152 struct intel_context *ctx);
152 153
153 void (*write_tail)(struct intel_engine_cs *ring, 154 void (*write_tail)(struct intel_engine_cs *ring,
154 u32 value); 155 u32 value);
@@ -235,6 +236,7 @@ struct intel_engine_cs {
235 /* Execlists */ 236 /* Execlists */
236 spinlock_t execlist_lock; 237 spinlock_t execlist_lock;
237 struct list_head execlist_queue; 238 struct list_head execlist_queue;
239 struct list_head execlist_retired_req_list;
238 u8 next_context_status_buffer; 240 u8 next_context_status_buffer;
239 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ 241 u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */
240 int (*emit_request)(struct intel_ringbuffer *ringbuf); 242 int (*emit_request)(struct intel_ringbuffer *ringbuf);
@@ -381,6 +383,9 @@ intel_write_status_page(struct intel_engine_cs *ring,
381#define I915_GEM_HWS_SCRATCH_INDEX 0x30 383#define I915_GEM_HWS_SCRATCH_INDEX 0x30
382#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT) 384#define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
383 385
386void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
387int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
388 struct intel_ringbuffer *ringbuf);
384void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf); 389void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
385int intel_alloc_ringbuffer_obj(struct drm_device *dev, 390int intel_alloc_ringbuffer_obj(struct drm_device *dev,
386 struct intel_ringbuffer *ringbuf); 391 struct intel_ringbuffer *ringbuf);
@@ -424,6 +429,8 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev);
424u64 intel_ring_get_active_head(struct intel_engine_cs *ring); 429u64 intel_ring_get_active_head(struct intel_engine_cs *ring);
425void intel_ring_setup_status_page(struct intel_engine_cs *ring); 430void intel_ring_setup_status_page(struct intel_engine_cs *ring);
426 431
432int init_workarounds_ring(struct intel_engine_cs *ring);
433
427static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf) 434static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
428{ 435{
429 return ringbuf->tail; 436 return ringbuf->tail;
@@ -441,7 +448,4 @@ static inline void i915_trace_irq_get(struct intel_engine_cs *ring, u32 seqno)
441 ring->trace_irq_seqno = seqno; 448 ring->trace_irq_seqno = seqno;
442} 449}
443 450
444/* DRI warts */
445int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
446
447#endif /* _INTEL_RINGBUFFER_H_ */ 451#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
new file mode 100644
index 000000000000..f5a78d53e297
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -0,0 +1,1406 @@
1/*
2 * Copyright © 2012-2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 *
27 */
28
29#include <linux/pm_runtime.h>
30#include <linux/vgaarb.h>
31
32#include "i915_drv.h"
33#include "intel_drv.h"
34#include <drm/i915_powerwell.h>
35
36/**
37 * DOC: runtime pm
38 *
39 * The i915 driver supports dynamic enabling and disabling of entire hardware
40 * blocks at runtime. This is especially important on the display side where
41 * software is supposed to control many power gates manually on recent hardware,
42 * since on the GT side a lot of the power management is done by the hardware.
43 * But even there some manual control at the device level is required.
44 *
45 * Since i915 supports a diverse set of platforms with a unified codebase and
46 * hardware engineers just love to shuffle functionality around between power
47 * domains there's a sizeable amount of indirection required. This file provides
48 * generic functions to the driver for grabbing and releasing references for
49 * abstract power domains. It then maps those to the actual power wells
50 * present for a given platform.
51 */
52
53static struct i915_power_domains *hsw_pwr;
54
55#define for_each_power_well(i, power_well, domain_mask, power_domains) \
56 for (i = 0; \
57 i < (power_domains)->power_well_count && \
58 ((power_well) = &(power_domains)->power_wells[i]); \
59 i++) \
60 if ((power_well)->domains & (domain_mask))
61
62#define for_each_power_well_rev(i, power_well, domain_mask, power_domains) \
63 for (i = (power_domains)->power_well_count - 1; \
64 i >= 0 && ((power_well) = &(power_domains)->power_wells[i]);\
65 i--) \
66 if ((power_well)->domains & (domain_mask))
67
68/*
69 * We should only use the power well if we explicitly asked the hardware to
70 * enable it, so check if it's enabled and also check if we've requested it to
71 * be enabled.
72 */
73static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
74 struct i915_power_well *power_well)
75{
76 return I915_READ(HSW_PWR_WELL_DRIVER) ==
77 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
78}
79
80/**
81 * __intel_display_power_is_enabled - unlocked check for a power domain
82 * @dev_priv: i915 device instance
83 * @domain: power domain to check
84 *
85 * This is the unlocked version of intel_display_power_is_enabled() and should
86 * only be used from error capture and recovery code where deadlocks are
87 * possible.
88 *
89 * Returns:
90 * True when the power domain is enabled, false otherwise.
91 */
92bool __intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
93 enum intel_display_power_domain domain)
94{
95 struct i915_power_domains *power_domains;
96 struct i915_power_well *power_well;
97 bool is_enabled;
98 int i;
99
100 if (dev_priv->pm.suspended)
101 return false;
102
103 power_domains = &dev_priv->power_domains;
104
105 is_enabled = true;
106
107 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
108 if (power_well->always_on)
109 continue;
110
111 if (!power_well->hw_enabled) {
112 is_enabled = false;
113 break;
114 }
115 }
116
117 return is_enabled;
118}
119
120/**
121 * intel_display_power_is_enabled - unlocked check for a power domain
122 * @dev_priv: i915 device instance
123 * @domain: power domain to check
124 *
125 * This function can be used to check the hw power domain state. It is mostly
126 * used in hardware state readout functions. Everywhere else code should rely
127 * upon explicit power domain reference counting to ensure that the hardware
128 * block is powered up before accessing it.
129 *
130 * Callers must hold the relevant modesetting locks to ensure that concurrent
131 * threads can't disable the power well while the caller tries to read a few
132 * registers.
133 *
134 * Returns:
135 * True when the power domain is enabled, false otherwise.
136 */
137bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv,
138 enum intel_display_power_domain domain)
139{
140 struct i915_power_domains *power_domains;
141 bool ret;
142
143 power_domains = &dev_priv->power_domains;
144
145 mutex_lock(&power_domains->lock);
146 ret = __intel_display_power_is_enabled(dev_priv, domain);
147 mutex_unlock(&power_domains->lock);
148
149 return ret;
150}
151
152/**
153 * intel_display_set_init_power - set the initial power domain state
154 * @dev_priv: i915 device instance
155 * @enable: whether to enable or disable the initial power domain state
156 *
157 * For simplicity our driver load/unload and system suspend/resume code assumes
158 * that all power domains are always enabled. This functions controls the state
159 * of this little hack. While the initial power domain state is enabled runtime
160 * pm is effectively disabled.
161 */
162void intel_display_set_init_power(struct drm_i915_private *dev_priv,
163 bool enable)
164{
165 if (dev_priv->power_domains.init_power_on == enable)
166 return;
167
168 if (enable)
169 intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
170 else
171 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
172
173 dev_priv->power_domains.init_power_on = enable;
174}
175
176/*
177 * Starting with Haswell, we have a "Power Down Well" that can be turned off
178 * when not needed anymore. We have 4 registers that can request the power well
179 * to be enabled, and it will only be disabled if none of the registers is
180 * requesting it to be enabled.
181 */
182static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv)
183{
184 struct drm_device *dev = dev_priv->dev;
185
186 /*
187 * After we re-enable the power well, if we touch VGA register 0x3d5
188 * we'll get unclaimed register interrupts. This stops after we write
189 * anything to the VGA MSR register. The vgacon module uses this
190 * register all the time, so if we unbind our driver and, as a
191 * consequence, bind vgacon, we'll get stuck in an infinite loop at
192 * console_unlock(). So make here we touch the VGA MSR register, making
193 * sure vgacon can keep working normally without triggering interrupts
194 * and error messages.
195 */
196 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
197 outb(inb(VGA_MSR_READ), VGA_MSR_WRITE);
198 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
199
200 if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9))
201 gen8_irq_power_well_post_enable(dev_priv);
202}
203
204static void hsw_set_power_well(struct drm_i915_private *dev_priv,
205 struct i915_power_well *power_well, bool enable)
206{
207 bool is_enabled, enable_requested;
208 uint32_t tmp;
209
210 tmp = I915_READ(HSW_PWR_WELL_DRIVER);
211 is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
212 enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
213
214 if (enable) {
215 if (!enable_requested)
216 I915_WRITE(HSW_PWR_WELL_DRIVER,
217 HSW_PWR_WELL_ENABLE_REQUEST);
218
219 if (!is_enabled) {
220 DRM_DEBUG_KMS("Enabling power well\n");
221 if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
222 HSW_PWR_WELL_STATE_ENABLED), 20))
223 DRM_ERROR("Timeout enabling power well\n");
224 hsw_power_well_post_enable(dev_priv);
225 }
226
227 } else {
228 if (enable_requested) {
229 I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
230 POSTING_READ(HSW_PWR_WELL_DRIVER);
231 DRM_DEBUG_KMS("Requesting to disable the power well\n");
232 }
233 }
234}
235
236static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
237 struct i915_power_well *power_well)
238{
239 hsw_set_power_well(dev_priv, power_well, power_well->count > 0);
240
241 /*
242 * We're taking over the BIOS, so clear any requests made by it since
243 * the driver is in charge now.
244 */
245 if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE_REQUEST)
246 I915_WRITE(HSW_PWR_WELL_BIOS, 0);
247}
248
249static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
250 struct i915_power_well *power_well)
251{
252 hsw_set_power_well(dev_priv, power_well, true);
253}
254
255static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
256 struct i915_power_well *power_well)
257{
258 hsw_set_power_well(dev_priv, power_well, false);
259}
260
261static void i9xx_always_on_power_well_noop(struct drm_i915_private *dev_priv,
262 struct i915_power_well *power_well)
263{
264}
265
266static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
267 struct i915_power_well *power_well)
268{
269 return true;
270}
271
272static void vlv_set_power_well(struct drm_i915_private *dev_priv,
273 struct i915_power_well *power_well, bool enable)
274{
275 enum punit_power_well power_well_id = power_well->data;
276 u32 mask;
277 u32 state;
278 u32 ctrl;
279
280 mask = PUNIT_PWRGT_MASK(power_well_id);
281 state = enable ? PUNIT_PWRGT_PWR_ON(power_well_id) :
282 PUNIT_PWRGT_PWR_GATE(power_well_id);
283
284 mutex_lock(&dev_priv->rps.hw_lock);
285
286#define COND \
287 ((vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask) == state)
288
289 if (COND)
290 goto out;
291
292 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL);
293 ctrl &= ~mask;
294 ctrl |= state;
295 vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
296
297 if (wait_for(COND, 100))
298 DRM_ERROR("timout setting power well state %08x (%08x)\n",
299 state,
300 vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
301
302#undef COND
303
304out:
305 mutex_unlock(&dev_priv->rps.hw_lock);
306}
307
308static void vlv_power_well_sync_hw(struct drm_i915_private *dev_priv,
309 struct i915_power_well *power_well)
310{
311 vlv_set_power_well(dev_priv, power_well, power_well->count > 0);
312}
313
314static void vlv_power_well_enable(struct drm_i915_private *dev_priv,
315 struct i915_power_well *power_well)
316{
317 vlv_set_power_well(dev_priv, power_well, true);
318}
319
320static void vlv_power_well_disable(struct drm_i915_private *dev_priv,
321 struct i915_power_well *power_well)
322{
323 vlv_set_power_well(dev_priv, power_well, false);
324}
325
326static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
327 struct i915_power_well *power_well)
328{
329 int power_well_id = power_well->data;
330 bool enabled = false;
331 u32 mask;
332 u32 state;
333 u32 ctrl;
334
335 mask = PUNIT_PWRGT_MASK(power_well_id);
336 ctrl = PUNIT_PWRGT_PWR_ON(power_well_id);
337
338 mutex_lock(&dev_priv->rps.hw_lock);
339
340 state = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS) & mask;
341 /*
342 * We only ever set the power-on and power-gate states, anything
343 * else is unexpected.
344 */
345 WARN_ON(state != PUNIT_PWRGT_PWR_ON(power_well_id) &&
346 state != PUNIT_PWRGT_PWR_GATE(power_well_id));
347 if (state == ctrl)
348 enabled = true;
349
350 /*
351 * A transient state at this point would mean some unexpected party
352 * is poking at the power controls too.
353 */
354 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
355 WARN_ON(ctrl != state);
356
357 mutex_unlock(&dev_priv->rps.hw_lock);
358
359 return enabled;
360}
361
362static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv,
363 struct i915_power_well *power_well)
364{
365 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
366
367 vlv_set_power_well(dev_priv, power_well, true);
368
369 spin_lock_irq(&dev_priv->irq_lock);
370 valleyview_enable_display_irqs(dev_priv);
371 spin_unlock_irq(&dev_priv->irq_lock);
372
373 /*
374 * During driver initialization/resume we can avoid restoring the
375 * part of the HW/SW state that will be inited anyway explicitly.
376 */
377 if (dev_priv->power_domains.initializing)
378 return;
379
380 intel_hpd_init(dev_priv);
381
382 i915_redisable_vga_power_on(dev_priv->dev);
383}
384
385static void vlv_display_power_well_disable(struct drm_i915_private *dev_priv,
386 struct i915_power_well *power_well)
387{
388 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DISP2D);
389
390 spin_lock_irq(&dev_priv->irq_lock);
391 valleyview_disable_display_irqs(dev_priv);
392 spin_unlock_irq(&dev_priv->irq_lock);
393
394 vlv_set_power_well(dev_priv, power_well, false);
395
396 vlv_power_sequencer_reset(dev_priv);
397}
398
399static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
400 struct i915_power_well *power_well)
401{
402 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
403
404 /*
405 * Enable the CRI clock source so we can get at the
406 * display and the reference clock for VGA
407 * hotplug / manual detection.
408 */
409 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
410 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
411 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
412
413 vlv_set_power_well(dev_priv, power_well, true);
414
415 /*
416 * From VLV2A0_DP_eDP_DPIO_driver_vbios_notes_10.docx -
417 * 6. De-assert cmn_reset/side_reset. Same as VLV X0.
418 * a. GUnit 0x2110 bit[0] set to 1 (def 0)
419 * b. The other bits such as sfr settings / modesel may all
420 * be set to 0.
421 *
422 * This should only be done on init and resume from S3 with
423 * both PLLs disabled, or we risk losing DPIO and PLL
424 * synchronization.
425 */
426 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
427}
428
429static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
430 struct i915_power_well *power_well)
431{
432 enum pipe pipe;
433
434 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC);
435
436 for_each_pipe(dev_priv, pipe)
437 assert_pll_disabled(dev_priv, pipe);
438
439 /* Assert common reset */
440 I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
441
442 vlv_set_power_well(dev_priv, power_well, false);
443}
444
445static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
446 struct i915_power_well *power_well)
447{
448 enum dpio_phy phy;
449
450 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
451 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
452
453 /*
454 * Enable the CRI clock source so we can get at the
455 * display and the reference clock for VGA
456 * hotplug / manual detection.
457 */
458 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
459 phy = DPIO_PHY0;
460 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
461 DPLL_REFA_CLK_ENABLE_VLV);
462 I915_WRITE(DPLL(PIPE_B), I915_READ(DPLL(PIPE_B)) |
463 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
464 } else {
465 phy = DPIO_PHY1;
466 I915_WRITE(DPLL(PIPE_C), I915_READ(DPLL(PIPE_C)) |
467 DPLL_REFA_CLK_ENABLE_VLV | DPLL_INTEGRATED_CRI_CLK_VLV);
468 }
469 udelay(1); /* >10ns for cmnreset, >0ns for sidereset */
470 vlv_set_power_well(dev_priv, power_well, true);
471
472 /* Poll for phypwrgood signal */
473 if (wait_for(I915_READ(DISPLAY_PHY_STATUS) & PHY_POWERGOOD(phy), 1))
474 DRM_ERROR("Display PHY %d is not power up\n", phy);
475
476 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) |
477 PHY_COM_LANE_RESET_DEASSERT(phy));
478}
479
480static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
481 struct i915_power_well *power_well)
482{
483 enum dpio_phy phy;
484
485 WARN_ON_ONCE(power_well->data != PUNIT_POWER_WELL_DPIO_CMN_BC &&
486 power_well->data != PUNIT_POWER_WELL_DPIO_CMN_D);
487
488 if (power_well->data == PUNIT_POWER_WELL_DPIO_CMN_BC) {
489 phy = DPIO_PHY0;
490 assert_pll_disabled(dev_priv, PIPE_A);
491 assert_pll_disabled(dev_priv, PIPE_B);
492 } else {
493 phy = DPIO_PHY1;
494 assert_pll_disabled(dev_priv, PIPE_C);
495 }
496
497 I915_WRITE(DISPLAY_PHY_CONTROL, I915_READ(DISPLAY_PHY_CONTROL) &
498 ~PHY_COM_LANE_RESET_DEASSERT(phy));
499
500 vlv_set_power_well(dev_priv, power_well, false);
501}
502
503static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
504 struct i915_power_well *power_well)
505{
506 enum pipe pipe = power_well->data;
507 bool enabled;
508 u32 state, ctrl;
509
510 mutex_lock(&dev_priv->rps.hw_lock);
511
512 state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
513 /*
514 * We only ever set the power-on and power-gate states, anything
515 * else is unexpected.
516 */
517 WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
518 enabled = state == DP_SSS_PWR_ON(pipe);
519
520 /*
521 * A transient state at this point would mean some unexpected party
522 * is poking at the power controls too.
523 */
524 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
525 WARN_ON(ctrl << 16 != state);
526
527 mutex_unlock(&dev_priv->rps.hw_lock);
528
529 return enabled;
530}
531
532static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
533 struct i915_power_well *power_well,
534 bool enable)
535{
536 enum pipe pipe = power_well->data;
537 u32 state;
538 u32 ctrl;
539
540 state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe);
541
542 mutex_lock(&dev_priv->rps.hw_lock);
543
544#define COND \
545 ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
546
547 if (COND)
548 goto out;
549
550 ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
551 ctrl &= ~DP_SSC_MASK(pipe);
552 ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
553 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
554
555 if (wait_for(COND, 100))
556 DRM_ERROR("timout setting power well state %08x (%08x)\n",
557 state,
558 vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
559
560#undef COND
561
562out:
563 mutex_unlock(&dev_priv->rps.hw_lock);
564}
565
566static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
567 struct i915_power_well *power_well)
568{
569 chv_set_pipe_power_well(dev_priv, power_well, power_well->count > 0);
570}
571
572static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
573 struct i915_power_well *power_well)
574{
575 WARN_ON_ONCE(power_well->data != PIPE_A &&
576 power_well->data != PIPE_B &&
577 power_well->data != PIPE_C);
578
579 chv_set_pipe_power_well(dev_priv, power_well, true);
580
581 if (power_well->data == PIPE_A) {
582 spin_lock_irq(&dev_priv->irq_lock);
583 valleyview_enable_display_irqs(dev_priv);
584 spin_unlock_irq(&dev_priv->irq_lock);
585
586 /*
587 * During driver initialization/resume we can avoid restoring the
588 * part of the HW/SW state that will be inited anyway explicitly.
589 */
590 if (dev_priv->power_domains.initializing)
591 return;
592
593 intel_hpd_init(dev_priv);
594
595 i915_redisable_vga_power_on(dev_priv->dev);
596 }
597}
598
599static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
600 struct i915_power_well *power_well)
601{
602 WARN_ON_ONCE(power_well->data != PIPE_A &&
603 power_well->data != PIPE_B &&
604 power_well->data != PIPE_C);
605
606 if (power_well->data == PIPE_A) {
607 spin_lock_irq(&dev_priv->irq_lock);
608 valleyview_disable_display_irqs(dev_priv);
609 spin_unlock_irq(&dev_priv->irq_lock);
610 }
611
612 chv_set_pipe_power_well(dev_priv, power_well, false);
613
614 if (power_well->data == PIPE_A)
615 vlv_power_sequencer_reset(dev_priv);
616}
617
618static void check_power_well_state(struct drm_i915_private *dev_priv,
619 struct i915_power_well *power_well)
620{
621 bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
622
623 if (power_well->always_on || !i915.disable_power_well) {
624 if (!enabled)
625 goto mismatch;
626
627 return;
628 }
629
630 if (enabled != (power_well->count > 0))
631 goto mismatch;
632
633 return;
634
635mismatch:
636 WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
637 power_well->name, power_well->always_on, enabled,
638 power_well->count, i915.disable_power_well);
639}
640
641/**
642 * intel_display_power_get - grab a power domain reference
643 * @dev_priv: i915 device instance
644 * @domain: power domain to reference
645 *
646 * This function grabs a power domain reference for @domain and ensures that the
647 * power domain and all its parents are powered up. Therefore users should only
648 * grab a reference to the innermost power domain they need.
649 *
650 * Any power domain reference obtained by this function must have a symmetric
651 * call to intel_display_power_put() to release the reference again.
652 */
653void intel_display_power_get(struct drm_i915_private *dev_priv,
654 enum intel_display_power_domain domain)
655{
656 struct i915_power_domains *power_domains;
657 struct i915_power_well *power_well;
658 int i;
659
660 intel_runtime_pm_get(dev_priv);
661
662 power_domains = &dev_priv->power_domains;
663
664 mutex_lock(&power_domains->lock);
665
666 for_each_power_well(i, power_well, BIT(domain), power_domains) {
667 if (!power_well->count++) {
668 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
669 power_well->ops->enable(dev_priv, power_well);
670 power_well->hw_enabled = true;
671 }
672
673 check_power_well_state(dev_priv, power_well);
674 }
675
676 power_domains->domain_use_count[domain]++;
677
678 mutex_unlock(&power_domains->lock);
679}
680
681/**
682 * intel_display_power_put - release a power domain reference
683 * @dev_priv: i915 device instance
684 * @domain: power domain to reference
685 *
686 * This function drops the power domain reference obtained by
687 * intel_display_power_get() and might power down the corresponding hardware
688 * block right away if this is the last reference.
689 */
690void intel_display_power_put(struct drm_i915_private *dev_priv,
691 enum intel_display_power_domain domain)
692{
693 struct i915_power_domains *power_domains;
694 struct i915_power_well *power_well;
695 int i;
696
697 power_domains = &dev_priv->power_domains;
698
699 mutex_lock(&power_domains->lock);
700
701 WARN_ON(!power_domains->domain_use_count[domain]);
702 power_domains->domain_use_count[domain]--;
703
704 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
705 WARN_ON(!power_well->count);
706
707 if (!--power_well->count && i915.disable_power_well) {
708 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
709 power_well->hw_enabled = false;
710 power_well->ops->disable(dev_priv, power_well);
711 }
712
713 check_power_well_state(dev_priv, power_well);
714 }
715
716 mutex_unlock(&power_domains->lock);
717
718 intel_runtime_pm_put(dev_priv);
719}
720
721#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
722
723#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
724 BIT(POWER_DOMAIN_PIPE_A) | \
725 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
726 BIT(POWER_DOMAIN_PORT_DDI_A_2_LANES) | \
727 BIT(POWER_DOMAIN_PORT_DDI_A_4_LANES) | \
728 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
729 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
730 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
731 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
732 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
733 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
734 BIT(POWER_DOMAIN_PORT_CRT) | \
735 BIT(POWER_DOMAIN_PLLS) | \
736 BIT(POWER_DOMAIN_INIT))
737#define HSW_DISPLAY_POWER_DOMAINS ( \
738 (POWER_DOMAIN_MASK & ~HSW_ALWAYS_ON_POWER_DOMAINS) | \
739 BIT(POWER_DOMAIN_INIT))
740
741#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
742 HSW_ALWAYS_ON_POWER_DOMAINS | \
743 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
744#define BDW_DISPLAY_POWER_DOMAINS ( \
745 (POWER_DOMAIN_MASK & ~BDW_ALWAYS_ON_POWER_DOMAINS) | \
746 BIT(POWER_DOMAIN_INIT))
747
748#define VLV_ALWAYS_ON_POWER_DOMAINS BIT(POWER_DOMAIN_INIT)
749#define VLV_DISPLAY_POWER_DOMAINS POWER_DOMAIN_MASK
750
751#define VLV_DPIO_CMN_BC_POWER_DOMAINS ( \
752 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
753 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
754 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
755 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
756 BIT(POWER_DOMAIN_PORT_CRT) | \
757 BIT(POWER_DOMAIN_INIT))
758
759#define VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS ( \
760 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
761 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
762 BIT(POWER_DOMAIN_INIT))
763
764#define VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS ( \
765 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
766 BIT(POWER_DOMAIN_INIT))
767
768#define VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS ( \
769 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
770 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
771 BIT(POWER_DOMAIN_INIT))
772
773#define VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS ( \
774 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
775 BIT(POWER_DOMAIN_INIT))
776
777#define CHV_PIPE_A_POWER_DOMAINS ( \
778 BIT(POWER_DOMAIN_PIPE_A) | \
779 BIT(POWER_DOMAIN_INIT))
780
781#define CHV_PIPE_B_POWER_DOMAINS ( \
782 BIT(POWER_DOMAIN_PIPE_B) | \
783 BIT(POWER_DOMAIN_INIT))
784
785#define CHV_PIPE_C_POWER_DOMAINS ( \
786 BIT(POWER_DOMAIN_PIPE_C) | \
787 BIT(POWER_DOMAIN_INIT))
788
789#define CHV_DPIO_CMN_BC_POWER_DOMAINS ( \
790 BIT(POWER_DOMAIN_PORT_DDI_B_2_LANES) | \
791 BIT(POWER_DOMAIN_PORT_DDI_B_4_LANES) | \
792 BIT(POWER_DOMAIN_PORT_DDI_C_2_LANES) | \
793 BIT(POWER_DOMAIN_PORT_DDI_C_4_LANES) | \
794 BIT(POWER_DOMAIN_INIT))
795
796#define CHV_DPIO_CMN_D_POWER_DOMAINS ( \
797 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
798 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
799 BIT(POWER_DOMAIN_INIT))
800
801#define CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS ( \
802 BIT(POWER_DOMAIN_PORT_DDI_D_2_LANES) | \
803 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
804 BIT(POWER_DOMAIN_INIT))
805
806#define CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS ( \
807 BIT(POWER_DOMAIN_PORT_DDI_D_4_LANES) | \
808 BIT(POWER_DOMAIN_INIT))
809
810static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
811 .sync_hw = i9xx_always_on_power_well_noop,
812 .enable = i9xx_always_on_power_well_noop,
813 .disable = i9xx_always_on_power_well_noop,
814 .is_enabled = i9xx_always_on_power_well_enabled,
815};
816
817static const struct i915_power_well_ops chv_pipe_power_well_ops = {
818 .sync_hw = chv_pipe_power_well_sync_hw,
819 .enable = chv_pipe_power_well_enable,
820 .disable = chv_pipe_power_well_disable,
821 .is_enabled = chv_pipe_power_well_enabled,
822};
823
824static const struct i915_power_well_ops chv_dpio_cmn_power_well_ops = {
825 .sync_hw = vlv_power_well_sync_hw,
826 .enable = chv_dpio_cmn_power_well_enable,
827 .disable = chv_dpio_cmn_power_well_disable,
828 .is_enabled = vlv_power_well_enabled,
829};
830
831static struct i915_power_well i9xx_always_on_power_well[] = {
832 {
833 .name = "always-on",
834 .always_on = 1,
835 .domains = POWER_DOMAIN_MASK,
836 .ops = &i9xx_always_on_power_well_ops,
837 },
838};
839
840static const struct i915_power_well_ops hsw_power_well_ops = {
841 .sync_hw = hsw_power_well_sync_hw,
842 .enable = hsw_power_well_enable,
843 .disable = hsw_power_well_disable,
844 .is_enabled = hsw_power_well_enabled,
845};
846
847static struct i915_power_well hsw_power_wells[] = {
848 {
849 .name = "always-on",
850 .always_on = 1,
851 .domains = HSW_ALWAYS_ON_POWER_DOMAINS,
852 .ops = &i9xx_always_on_power_well_ops,
853 },
854 {
855 .name = "display",
856 .domains = HSW_DISPLAY_POWER_DOMAINS,
857 .ops = &hsw_power_well_ops,
858 },
859};
860
861static struct i915_power_well bdw_power_wells[] = {
862 {
863 .name = "always-on",
864 .always_on = 1,
865 .domains = BDW_ALWAYS_ON_POWER_DOMAINS,
866 .ops = &i9xx_always_on_power_well_ops,
867 },
868 {
869 .name = "display",
870 .domains = BDW_DISPLAY_POWER_DOMAINS,
871 .ops = &hsw_power_well_ops,
872 },
873};
874
875static const struct i915_power_well_ops vlv_display_power_well_ops = {
876 .sync_hw = vlv_power_well_sync_hw,
877 .enable = vlv_display_power_well_enable,
878 .disable = vlv_display_power_well_disable,
879 .is_enabled = vlv_power_well_enabled,
880};
881
882static const struct i915_power_well_ops vlv_dpio_cmn_power_well_ops = {
883 .sync_hw = vlv_power_well_sync_hw,
884 .enable = vlv_dpio_cmn_power_well_enable,
885 .disable = vlv_dpio_cmn_power_well_disable,
886 .is_enabled = vlv_power_well_enabled,
887};
888
889static const struct i915_power_well_ops vlv_dpio_power_well_ops = {
890 .sync_hw = vlv_power_well_sync_hw,
891 .enable = vlv_power_well_enable,
892 .disable = vlv_power_well_disable,
893 .is_enabled = vlv_power_well_enabled,
894};
895
896static struct i915_power_well vlv_power_wells[] = {
897 {
898 .name = "always-on",
899 .always_on = 1,
900 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
901 .ops = &i9xx_always_on_power_well_ops,
902 },
903 {
904 .name = "display",
905 .domains = VLV_DISPLAY_POWER_DOMAINS,
906 .data = PUNIT_POWER_WELL_DISP2D,
907 .ops = &vlv_display_power_well_ops,
908 },
909 {
910 .name = "dpio-tx-b-01",
911 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
912 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
913 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
914 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
915 .ops = &vlv_dpio_power_well_ops,
916 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
917 },
918 {
919 .name = "dpio-tx-b-23",
920 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
921 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
922 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
923 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
924 .ops = &vlv_dpio_power_well_ops,
925 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
926 },
927 {
928 .name = "dpio-tx-c-01",
929 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
930 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
931 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
932 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
933 .ops = &vlv_dpio_power_well_ops,
934 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
935 },
936 {
937 .name = "dpio-tx-c-23",
938 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
939 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS |
940 VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
941 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
942 .ops = &vlv_dpio_power_well_ops,
943 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
944 },
945 {
946 .name = "dpio-common",
947 .domains = VLV_DPIO_CMN_BC_POWER_DOMAINS,
948 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
949 .ops = &vlv_dpio_cmn_power_well_ops,
950 },
951};
952
953static struct i915_power_well chv_power_wells[] = {
954 {
955 .name = "always-on",
956 .always_on = 1,
957 .domains = VLV_ALWAYS_ON_POWER_DOMAINS,
958 .ops = &i9xx_always_on_power_well_ops,
959 },
960#if 0
961 {
962 .name = "display",
963 .domains = VLV_DISPLAY_POWER_DOMAINS,
964 .data = PUNIT_POWER_WELL_DISP2D,
965 .ops = &vlv_display_power_well_ops,
966 },
967#endif
968 {
969 .name = "pipe-a",
970 /*
971 * FIXME: pipe A power well seems to be the new disp2d well.
972 * At least all registers seem to be housed there. Figure
973 * out if this a a temporary situation in pre-production
974 * hardware or a permanent state of affairs.
975 */
976 .domains = CHV_PIPE_A_POWER_DOMAINS | VLV_DISPLAY_POWER_DOMAINS,
977 .data = PIPE_A,
978 .ops = &chv_pipe_power_well_ops,
979 },
980#if 0
981 {
982 .name = "pipe-b",
983 .domains = CHV_PIPE_B_POWER_DOMAINS,
984 .data = PIPE_B,
985 .ops = &chv_pipe_power_well_ops,
986 },
987 {
988 .name = "pipe-c",
989 .domains = CHV_PIPE_C_POWER_DOMAINS,
990 .data = PIPE_C,
991 .ops = &chv_pipe_power_well_ops,
992 },
993#endif
994 {
995 .name = "dpio-common-bc",
996 /*
997 * XXX: cmnreset for one PHY seems to disturb the other.
998 * As a workaround keep both powered on at the same
999 * time for now.
1000 */
1001 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1002 .data = PUNIT_POWER_WELL_DPIO_CMN_BC,
1003 .ops = &chv_dpio_cmn_power_well_ops,
1004 },
1005 {
1006 .name = "dpio-common-d",
1007 /*
1008 * XXX: cmnreset for one PHY seems to disturb the other.
1009 * As a workaround keep both powered on at the same
1010 * time for now.
1011 */
1012 .domains = CHV_DPIO_CMN_BC_POWER_DOMAINS | CHV_DPIO_CMN_D_POWER_DOMAINS,
1013 .data = PUNIT_POWER_WELL_DPIO_CMN_D,
1014 .ops = &chv_dpio_cmn_power_well_ops,
1015 },
1016#if 0
1017 {
1018 .name = "dpio-tx-b-01",
1019 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1020 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1021 .ops = &vlv_dpio_power_well_ops,
1022 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_01,
1023 },
1024 {
1025 .name = "dpio-tx-b-23",
1026 .domains = VLV_DPIO_TX_B_LANES_01_POWER_DOMAINS |
1027 VLV_DPIO_TX_B_LANES_23_POWER_DOMAINS,
1028 .ops = &vlv_dpio_power_well_ops,
1029 .data = PUNIT_POWER_WELL_DPIO_TX_B_LANES_23,
1030 },
1031 {
1032 .name = "dpio-tx-c-01",
1033 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1034 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1035 .ops = &vlv_dpio_power_well_ops,
1036 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_01,
1037 },
1038 {
1039 .name = "dpio-tx-c-23",
1040 .domains = VLV_DPIO_TX_C_LANES_01_POWER_DOMAINS |
1041 VLV_DPIO_TX_C_LANES_23_POWER_DOMAINS,
1042 .ops = &vlv_dpio_power_well_ops,
1043 .data = PUNIT_POWER_WELL_DPIO_TX_C_LANES_23,
1044 },
1045 {
1046 .name = "dpio-tx-d-01",
1047 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1048 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1049 .ops = &vlv_dpio_power_well_ops,
1050 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_01,
1051 },
1052 {
1053 .name = "dpio-tx-d-23",
1054 .domains = CHV_DPIO_TX_D_LANES_01_POWER_DOMAINS |
1055 CHV_DPIO_TX_D_LANES_23_POWER_DOMAINS,
1056 .ops = &vlv_dpio_power_well_ops,
1057 .data = PUNIT_POWER_WELL_DPIO_TX_D_LANES_23,
1058 },
1059#endif
1060};
1061
1062static struct i915_power_well *lookup_power_well(struct drm_i915_private *dev_priv,
1063 enum punit_power_well power_well_id)
1064{
1065 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1066 struct i915_power_well *power_well;
1067 int i;
1068
1069 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1070 if (power_well->data == power_well_id)
1071 return power_well;
1072 }
1073
1074 return NULL;
1075}
1076
1077#define set_power_wells(power_domains, __power_wells) ({ \
1078 (power_domains)->power_wells = (__power_wells); \
1079 (power_domains)->power_well_count = ARRAY_SIZE(__power_wells); \
1080})
1081
1082/**
1083 * intel_power_domains_init - initializes the power domain structures
1084 * @dev_priv: i915 device instance
1085 *
1086 * Initializes the power domain structures for @dev_priv depending upon the
1087 * supported platform.
1088 */
1089int intel_power_domains_init(struct drm_i915_private *dev_priv)
1090{
1091 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1092
1093 mutex_init(&power_domains->lock);
1094
1095 /*
1096 * The enabling order will be from lower to higher indexed wells,
1097 * the disabling order is reversed.
1098 */
1099 if (IS_HASWELL(dev_priv->dev)) {
1100 set_power_wells(power_domains, hsw_power_wells);
1101 hsw_pwr = power_domains;
1102 } else if (IS_BROADWELL(dev_priv->dev)) {
1103 set_power_wells(power_domains, bdw_power_wells);
1104 hsw_pwr = power_domains;
1105 } else if (IS_CHERRYVIEW(dev_priv->dev)) {
1106 set_power_wells(power_domains, chv_power_wells);
1107 } else if (IS_VALLEYVIEW(dev_priv->dev)) {
1108 set_power_wells(power_domains, vlv_power_wells);
1109 } else {
1110 set_power_wells(power_domains, i9xx_always_on_power_well);
1111 }
1112
1113 return 0;
1114}
1115
1116static void intel_runtime_pm_disable(struct drm_i915_private *dev_priv)
1117{
1118 struct drm_device *dev = dev_priv->dev;
1119 struct device *device = &dev->pdev->dev;
1120
1121 if (!HAS_RUNTIME_PM(dev))
1122 return;
1123
1124 if (!intel_enable_rc6(dev))
1125 return;
1126
1127 /* Make sure we're not suspended first. */
1128 pm_runtime_get_sync(device);
1129 pm_runtime_disable(device);
1130}
1131
1132/**
1133 * intel_power_domains_fini - finalizes the power domain structures
1134 * @dev_priv: i915 device instance
1135 *
1136 * Finalizes the power domain structures for @dev_priv depending upon the
1137 * supported platform. This function also disables runtime pm and ensures that
1138 * the device stays powered up so that the driver can be reloaded.
1139 */
1140void intel_power_domains_fini(struct drm_i915_private *dev_priv)
1141{
1142 intel_runtime_pm_disable(dev_priv);
1143
1144 /* The i915.ko module is still not prepared to be loaded when
1145 * the power well is not enabled, so just enable it in case
1146 * we're going to unload/reload. */
1147 intel_display_set_init_power(dev_priv, true);
1148
1149 hsw_pwr = NULL;
1150}
1151
1152static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
1153{
1154 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1155 struct i915_power_well *power_well;
1156 int i;
1157
1158 mutex_lock(&power_domains->lock);
1159 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
1160 power_well->ops->sync_hw(dev_priv, power_well);
1161 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
1162 power_well);
1163 }
1164 mutex_unlock(&power_domains->lock);
1165}
1166
1167static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
1168{
1169 struct i915_power_well *cmn =
1170 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_BC);
1171 struct i915_power_well *disp2d =
1172 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DISP2D);
1173
1174 /* If the display might be already active skip this */
1175 if (cmn->ops->is_enabled(dev_priv, cmn) &&
1176 disp2d->ops->is_enabled(dev_priv, disp2d) &&
1177 I915_READ(DPIO_CTL) & DPIO_CMNRST)
1178 return;
1179
1180 DRM_DEBUG_KMS("toggling display PHY side reset\n");
1181
1182 /* cmnlane needs DPLL registers */
1183 disp2d->ops->enable(dev_priv, disp2d);
1184
1185 /*
1186 * From VLV2A0_DP_eDP_HDMI_DPIO_driver_vbios_notes_11.docx:
1187 * Need to assert and de-assert PHY SB reset by gating the
1188 * common lane power, then un-gating it.
1189 * Simply ungating isn't enough to reset the PHY enough to get
1190 * ports and lanes running.
1191 */
1192 cmn->ops->disable(dev_priv, cmn);
1193}
1194
1195/**
1196 * intel_power_domains_init_hw - initialize hardware power domain state
1197 * @dev_priv: i915 device instance
1198 *
1199 * This function initializes the hardware power domain state and enables all
1200 * power domains using intel_display_set_init_power().
1201 */
1202void intel_power_domains_init_hw(struct drm_i915_private *dev_priv)
1203{
1204 struct drm_device *dev = dev_priv->dev;
1205 struct i915_power_domains *power_domains = &dev_priv->power_domains;
1206
1207 power_domains->initializing = true;
1208
1209 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1210 mutex_lock(&power_domains->lock);
1211 vlv_cmnlane_wa(dev_priv);
1212 mutex_unlock(&power_domains->lock);
1213 }
1214
1215 /* For now, we need the power well to be always enabled. */
1216 intel_display_set_init_power(dev_priv, true);
1217 intel_power_domains_resume(dev_priv);
1218 power_domains->initializing = false;
1219}
1220
1221/**
1222 * intel_aux_display_runtime_get - grab an auxilliary power domain reference
1223 * @dev_priv: i915 device instance
1224 *
1225 * This function grabs a power domain reference for the auxiliary power domain
1226 * (for access to the GMBUS and DP AUX blocks) and ensures that it and all its
1227 * parents are powered up. Therefore users should only grab a reference to the
1228 * innermost power domain they need.
1229 *
1230 * Any power domain reference obtained by this function must have a symmetric
1231 * call to intel_aux_display_runtime_put() to release the reference again.
1232 */
1233void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv)
1234{
1235 intel_runtime_pm_get(dev_priv);
1236}
1237
1238/**
1239 * intel_aux_display_runtime_put - release an auxilliary power domain reference
1240 * @dev_priv: i915 device instance
1241 *
1242 * This function drops the auxilliary power domain reference obtained by
1243 * intel_aux_display_runtime_get() and might power down the corresponding
1244 * hardware block right away if this is the last reference.
1245 */
1246void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv)
1247{
1248 intel_runtime_pm_put(dev_priv);
1249}
1250
1251/**
1252 * intel_runtime_pm_get - grab a runtime pm reference
1253 * @dev_priv: i915 device instance
1254 *
1255 * This function grabs a device-level runtime pm reference (mostly used for GEM
1256 * code to ensure the GTT or GT is on) and ensures that it is powered up.
1257 *
1258 * Any runtime pm reference obtained by this function must have a symmetric
1259 * call to intel_runtime_pm_put() to release the reference again.
1260 */
1261void intel_runtime_pm_get(struct drm_i915_private *dev_priv)
1262{
1263 struct drm_device *dev = dev_priv->dev;
1264 struct device *device = &dev->pdev->dev;
1265
1266 if (!HAS_RUNTIME_PM(dev))
1267 return;
1268
1269 pm_runtime_get_sync(device);
1270 WARN(dev_priv->pm.suspended, "Device still suspended.\n");
1271}
1272
1273/**
1274 * intel_runtime_pm_get_noresume - grab a runtime pm reference
1275 * @dev_priv: i915 device instance
1276 *
1277 * This function grabs a device-level runtime pm reference (mostly used for GEM
1278 * code to ensure the GTT or GT is on).
1279 *
1280 * It will _not_ power up the device but instead only check that it's powered
1281 * on. Therefore it is only valid to call this functions from contexts where
1282 * the device is known to be powered up and where trying to power it up would
1283 * result in hilarity and deadlocks. That pretty much means only the system
1284 * suspend/resume code where this is used to grab runtime pm references for
1285 * delayed setup down in work items.
1286 *
1287 * Any runtime pm reference obtained by this function must have a symmetric
1288 * call to intel_runtime_pm_put() to release the reference again.
1289 */
1290void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv)
1291{
1292 struct drm_device *dev = dev_priv->dev;
1293 struct device *device = &dev->pdev->dev;
1294
1295 if (!HAS_RUNTIME_PM(dev))
1296 return;
1297
1298 WARN(dev_priv->pm.suspended, "Getting nosync-ref while suspended.\n");
1299 pm_runtime_get_noresume(device);
1300}
1301
1302/**
1303 * intel_runtime_pm_put - release a runtime pm reference
1304 * @dev_priv: i915 device instance
1305 *
1306 * This function drops the device-level runtime pm reference obtained by
1307 * intel_runtime_pm_get() and might power down the corresponding
1308 * hardware block right away if this is the last reference.
1309 */
1310void intel_runtime_pm_put(struct drm_i915_private *dev_priv)
1311{
1312 struct drm_device *dev = dev_priv->dev;
1313 struct device *device = &dev->pdev->dev;
1314
1315 if (!HAS_RUNTIME_PM(dev))
1316 return;
1317
1318 pm_runtime_mark_last_busy(device);
1319 pm_runtime_put_autosuspend(device);
1320}
1321
1322/**
1323 * intel_runtime_pm_enable - enable runtime pm
1324 * @dev_priv: i915 device instance
1325 *
1326 * This function enables runtime pm at the end of the driver load sequence.
1327 *
1328 * Note that this function does currently not enable runtime pm for the
1329 * subordinate display power domains. That is only done on the first modeset
1330 * using intel_display_set_init_power().
1331 */
1332void intel_runtime_pm_enable(struct drm_i915_private *dev_priv)
1333{
1334 struct drm_device *dev = dev_priv->dev;
1335 struct device *device = &dev->pdev->dev;
1336
1337 if (!HAS_RUNTIME_PM(dev))
1338 return;
1339
1340 pm_runtime_set_active(device);
1341
1342 /*
1343 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
1344 * requirement.
1345 */
1346 if (!intel_enable_rc6(dev)) {
1347 DRM_INFO("RC6 disabled, disabling runtime PM support\n");
1348 return;
1349 }
1350
1351 pm_runtime_set_autosuspend_delay(device, 10000); /* 10s */
1352 pm_runtime_mark_last_busy(device);
1353 pm_runtime_use_autosuspend(device);
1354
1355 pm_runtime_put_autosuspend(device);
1356}
1357
1358/* Display audio driver power well request */
1359int i915_request_power_well(void)
1360{
1361 struct drm_i915_private *dev_priv;
1362
1363 if (!hsw_pwr)
1364 return -ENODEV;
1365
1366 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1367 power_domains);
1368 intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
1369 return 0;
1370}
1371EXPORT_SYMBOL_GPL(i915_request_power_well);
1372
1373/* Display audio driver power well release */
1374int i915_release_power_well(void)
1375{
1376 struct drm_i915_private *dev_priv;
1377
1378 if (!hsw_pwr)
1379 return -ENODEV;
1380
1381 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1382 power_domains);
1383 intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
1384 return 0;
1385}
1386EXPORT_SYMBOL_GPL(i915_release_power_well);
1387
1388/*
1389 * Private interface for the audio driver to get CDCLK in kHz.
1390 *
1391 * Caller must request power well using i915_request_power_well() prior to
1392 * making the call.
1393 */
1394int i915_get_cdclk_freq(void)
1395{
1396 struct drm_i915_private *dev_priv;
1397
1398 if (!hsw_pwr)
1399 return -ENODEV;
1400
1401 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
1402 power_domains);
1403
1404 return intel_ddi_get_cdclk_freq(dev_priv);
1405}
1406EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9350edd6728d..6d7a277458b5 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1991,57 +1991,10 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1991 return !list_empty(&connector->probed_modes); 1991 return !list_empty(&connector->probed_modes);
1992} 1992}
1993 1993
1994static void
1995intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
1996{
1997 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1998 struct drm_device *dev = connector->dev;
1999
2000 if (intel_sdvo_connector->left)
2001 drm_property_destroy(dev, intel_sdvo_connector->left);
2002 if (intel_sdvo_connector->right)
2003 drm_property_destroy(dev, intel_sdvo_connector->right);
2004 if (intel_sdvo_connector->top)
2005 drm_property_destroy(dev, intel_sdvo_connector->top);
2006 if (intel_sdvo_connector->bottom)
2007 drm_property_destroy(dev, intel_sdvo_connector->bottom);
2008 if (intel_sdvo_connector->hpos)
2009 drm_property_destroy(dev, intel_sdvo_connector->hpos);
2010 if (intel_sdvo_connector->vpos)
2011 drm_property_destroy(dev, intel_sdvo_connector->vpos);
2012 if (intel_sdvo_connector->saturation)
2013 drm_property_destroy(dev, intel_sdvo_connector->saturation);
2014 if (intel_sdvo_connector->contrast)
2015 drm_property_destroy(dev, intel_sdvo_connector->contrast);
2016 if (intel_sdvo_connector->hue)
2017 drm_property_destroy(dev, intel_sdvo_connector->hue);
2018 if (intel_sdvo_connector->sharpness)
2019 drm_property_destroy(dev, intel_sdvo_connector->sharpness);
2020 if (intel_sdvo_connector->flicker_filter)
2021 drm_property_destroy(dev, intel_sdvo_connector->flicker_filter);
2022 if (intel_sdvo_connector->flicker_filter_2d)
2023 drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_2d);
2024 if (intel_sdvo_connector->flicker_filter_adaptive)
2025 drm_property_destroy(dev, intel_sdvo_connector->flicker_filter_adaptive);
2026 if (intel_sdvo_connector->tv_luma_filter)
2027 drm_property_destroy(dev, intel_sdvo_connector->tv_luma_filter);
2028 if (intel_sdvo_connector->tv_chroma_filter)
2029 drm_property_destroy(dev, intel_sdvo_connector->tv_chroma_filter);
2030 if (intel_sdvo_connector->dot_crawl)
2031 drm_property_destroy(dev, intel_sdvo_connector->dot_crawl);
2032 if (intel_sdvo_connector->brightness)
2033 drm_property_destroy(dev, intel_sdvo_connector->brightness);
2034}
2035
2036static void intel_sdvo_destroy(struct drm_connector *connector) 1994static void intel_sdvo_destroy(struct drm_connector *connector)
2037{ 1995{
2038 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1996 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
2039 1997
2040 if (intel_sdvo_connector->tv_format)
2041 drm_property_destroy(connector->dev,
2042 intel_sdvo_connector->tv_format);
2043
2044 intel_sdvo_destroy_enhance_property(connector);
2045 drm_connector_cleanup(connector); 1998 drm_connector_cleanup(connector);
2046 kfree(intel_sdvo_connector); 1999 kfree(intel_sdvo_connector);
2047} 2000}
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 07a74ef589bd..7d9c340f7693 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -37,6 +37,20 @@
37#include <drm/i915_drm.h> 37#include <drm/i915_drm.h>
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40static bool
41format_is_yuv(uint32_t format)
42{
43 switch (format) {
44 case DRM_FORMAT_YUYV:
45 case DRM_FORMAT_UYVY:
46 case DRM_FORMAT_VYUY:
47 case DRM_FORMAT_YVYU:
48 return true;
49 default:
50 return false;
51 }
52}
53
40static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs) 54static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
41{ 55{
42 /* paranoia */ 56 /* paranoia */
@@ -46,7 +60,23 @@ static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs)
46 return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal); 60 return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal);
47} 61}
48 62
49static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count) 63/**
64 * intel_pipe_update_start() - start update of a set of display registers
65 * @crtc: the crtc of which the registers are going to be updated
66 * @start_vbl_count: vblank counter return pointer used for error checking
67 *
68 * Mark the start of an update to pipe registers that should be updated
69 * atomically regarding vblank. If the next vblank will happens within
70 * the next 100 us, this function waits until the vblank passes.
71 *
72 * After a successful call to this function, interrupts will be disabled
73 * until a subsequent call to intel_pipe_update_end(). That is done to
74 * avoid random delays. The value written to @start_vbl_count should be
75 * supplied to intel_pipe_update_end() for error checking.
76 *
77 * Return: true if the call was successful
78 */
79bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl_count)
50{ 80{
51 struct drm_device *dev = crtc->base.dev; 81 struct drm_device *dev = crtc->base.dev;
52 const struct drm_display_mode *mode = &crtc->config.adjusted_mode; 82 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
@@ -56,8 +86,6 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
56 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base);
57 DEFINE_WAIT(wait); 87 DEFINE_WAIT(wait);
58 88
59 WARN_ON(!drm_modeset_is_locked(&crtc->base.mutex));
60
61 vblank_start = mode->crtc_vblank_start; 89 vblank_start = mode->crtc_vblank_start;
62 if (mode->flags & DRM_MODE_FLAG_INTERLACE) 90 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
63 vblank_start = DIV_ROUND_UP(vblank_start, 2); 91 vblank_start = DIV_ROUND_UP(vblank_start, 2);
@@ -112,7 +140,16 @@ static bool intel_pipe_update_start(struct intel_crtc *crtc, uint32_t *start_vbl
112 return true; 140 return true;
113} 141}
114 142
115static void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count) 143/**
144 * intel_pipe_update_end() - end update of a set of display registers
145 * @crtc: the crtc of which the registers were updated
146 * @start_vbl_count: start vblank counter (used for error checking)
147 *
148 * Mark the end of an update started with intel_pipe_update_start(). This
149 * re-enables interrupts and verifies the update was actually completed
150 * before a vblank using the value of @start_vbl_count.
151 */
152void intel_pipe_update_end(struct intel_crtc *crtc, u32 start_vbl_count)
116{ 153{
117 struct drm_device *dev = crtc->base.dev; 154 struct drm_device *dev = crtc->base.dev;
118 enum pipe pipe = crtc->pipe; 155 enum pipe pipe = crtc->pipe;
@@ -139,6 +176,226 @@ static void intel_update_primary_plane(struct intel_crtc *crtc)
139} 176}
140 177
141static void 178static void
179skl_update_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc,
180 struct drm_framebuffer *fb,
181 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
182 unsigned int crtc_w, unsigned int crtc_h,
183 uint32_t x, uint32_t y,
184 uint32_t src_w, uint32_t src_h)
185{
186 struct drm_device *dev = drm_plane->dev;
187 struct drm_i915_private *dev_priv = dev->dev_private;
188 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
189 const int pipe = intel_plane->pipe;
190 const int plane = intel_plane->plane + 1;
191 u32 plane_ctl, stride;
192 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
193
194 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
195
196 /* Mask out pixel format bits in case we change it */
197 plane_ctl &= ~PLANE_CTL_FORMAT_MASK;
198 plane_ctl &= ~PLANE_CTL_ORDER_RGBX;
199 plane_ctl &= ~PLANE_CTL_YUV422_ORDER_MASK;
200 plane_ctl &= ~PLANE_CTL_TILED_MASK;
201 plane_ctl &= ~PLANE_CTL_ALPHA_MASK;
202 plane_ctl &= ~PLANE_CTL_ROTATE_MASK;
203
204 /* Trickle feed has to be enabled */
205 plane_ctl &= ~PLANE_CTL_TRICKLE_FEED_DISABLE;
206
207 switch (fb->pixel_format) {
208 case DRM_FORMAT_RGB565:
209 plane_ctl |= PLANE_CTL_FORMAT_RGB_565;
210 break;
211 case DRM_FORMAT_XBGR8888:
212 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
213 break;
214 case DRM_FORMAT_XRGB8888:
215 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888;
216 break;
217 /*
218 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
219 * to be already pre-multiplied. We need to add a knob (or a different
220 * DRM_FORMAT) for user-space to configure that.
221 */
222 case DRM_FORMAT_ABGR8888:
223 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
224 PLANE_CTL_ORDER_RGBX |
225 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
226 break;
227 case DRM_FORMAT_ARGB8888:
228 plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888 |
229 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
230 break;
231 case DRM_FORMAT_YUYV:
232 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
233 break;
234 case DRM_FORMAT_YVYU:
235 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
236 break;
237 case DRM_FORMAT_UYVY:
238 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
239 break;
240 case DRM_FORMAT_VYUY:
241 plane_ctl |= PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
242 break;
243 default:
244 BUG();
245 }
246
247 switch (obj->tiling_mode) {
248 case I915_TILING_NONE:
249 stride = fb->pitches[0] >> 6;
250 break;
251 case I915_TILING_X:
252 plane_ctl |= PLANE_CTL_TILED_X;
253 stride = fb->pitches[0] >> 9;
254 break;
255 default:
256 BUG();
257 }
258 if (intel_plane->rotation == BIT(DRM_ROTATE_180))
259 plane_ctl |= PLANE_CTL_ROTATE_180;
260
261 plane_ctl |= PLANE_CTL_ENABLE;
262 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
263
264 intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h,
265 pixel_size, true,
266 src_w != crtc_w || src_h != crtc_h);
267
268 /* Sizes are 0 based */
269 src_w--;
270 src_h--;
271 crtc_w--;
272 crtc_h--;
273
274 I915_WRITE(PLANE_OFFSET(pipe, plane), (y << 16) | x);
275 I915_WRITE(PLANE_STRIDE(pipe, plane), stride);
276 I915_WRITE(PLANE_POS(pipe, plane), (crtc_y << 16) | crtc_x);
277 I915_WRITE(PLANE_SIZE(pipe, plane), (crtc_h << 16) | crtc_w);
278 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
279 I915_WRITE(PLANE_SURF(pipe, plane), i915_gem_obj_ggtt_offset(obj));
280 POSTING_READ(PLANE_SURF(pipe, plane));
281}
282
283static void
284skl_disable_plane(struct drm_plane *drm_plane, struct drm_crtc *crtc)
285{
286 struct drm_device *dev = drm_plane->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
289 const int pipe = intel_plane->pipe;
290 const int plane = intel_plane->plane + 1;
291
292 I915_WRITE(PLANE_CTL(pipe, plane),
293 I915_READ(PLANE_CTL(pipe, plane)) & ~PLANE_CTL_ENABLE);
294
295 /* Activate double buffered register update */
296 I915_WRITE(PLANE_CTL(pipe, plane), 0);
297 POSTING_READ(PLANE_CTL(pipe, plane));
298
299 intel_update_sprite_watermarks(drm_plane, crtc, 0, 0, 0, false, false);
300}
301
302static int
303skl_update_colorkey(struct drm_plane *drm_plane,
304 struct drm_intel_sprite_colorkey *key)
305{
306 struct drm_device *dev = drm_plane->dev;
307 struct drm_i915_private *dev_priv = dev->dev_private;
308 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
309 const int pipe = intel_plane->pipe;
310 const int plane = intel_plane->plane;
311 u32 plane_ctl;
312
313 I915_WRITE(PLANE_KEYVAL(pipe, plane), key->min_value);
314 I915_WRITE(PLANE_KEYMAX(pipe, plane), key->max_value);
315 I915_WRITE(PLANE_KEYMSK(pipe, plane), key->channel_mask);
316
317 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
318 plane_ctl &= ~PLANE_CTL_KEY_ENABLE_MASK;
319 if (key->flags & I915_SET_COLORKEY_DESTINATION)
320 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
321 else if (key->flags & I915_SET_COLORKEY_SOURCE)
322 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
323 I915_WRITE(PLANE_CTL(pipe, plane), plane_ctl);
324
325 POSTING_READ(PLANE_CTL(pipe, plane));
326
327 return 0;
328}
329
330static void
331skl_get_colorkey(struct drm_plane *drm_plane,
332 struct drm_intel_sprite_colorkey *key)
333{
334 struct drm_device *dev = drm_plane->dev;
335 struct drm_i915_private *dev_priv = dev->dev_private;
336 struct intel_plane *intel_plane = to_intel_plane(drm_plane);
337 const int pipe = intel_plane->pipe;
338 const int plane = intel_plane->plane;
339 u32 plane_ctl;
340
341 key->min_value = I915_READ(PLANE_KEYVAL(pipe, plane));
342 key->max_value = I915_READ(PLANE_KEYMAX(pipe, plane));
343 key->channel_mask = I915_READ(PLANE_KEYMSK(pipe, plane));
344
345 plane_ctl = I915_READ(PLANE_CTL(pipe, plane));
346
347 switch (plane_ctl & PLANE_CTL_KEY_ENABLE_MASK) {
348 case PLANE_CTL_KEY_ENABLE_DESTINATION:
349 key->flags = I915_SET_COLORKEY_DESTINATION;
350 break;
351 case PLANE_CTL_KEY_ENABLE_SOURCE:
352 key->flags = I915_SET_COLORKEY_SOURCE;
353 break;
354 default:
355 key->flags = I915_SET_COLORKEY_NONE;
356 }
357}
358
359static void
360chv_update_csc(struct intel_plane *intel_plane, uint32_t format)
361{
362 struct drm_i915_private *dev_priv = intel_plane->base.dev->dev_private;
363 int plane = intel_plane->plane;
364
365 /* Seems RGB data bypasses the CSC always */
366 if (!format_is_yuv(format))
367 return;
368
369 /*
370 * BT.601 limited range YCbCr -> full range RGB
371 *
372 * |r| | 6537 4769 0| |cr |
373 * |g| = |-3330 4769 -1605| x |y-64|
374 * |b| | 0 4769 8263| |cb |
375 *
376 * Cb and Cr apparently come in as signed already, so no
377 * need for any offset. For Y we need to remove the offset.
378 */
379 I915_WRITE(SPCSCYGOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
380 I915_WRITE(SPCSCCBOFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
381 I915_WRITE(SPCSCCROFF(plane), SPCSC_OOFF(0) | SPCSC_IOFF(0));
382
383 I915_WRITE(SPCSCC01(plane), SPCSC_C1(4769) | SPCSC_C0(6537));
384 I915_WRITE(SPCSCC23(plane), SPCSC_C1(-3330) | SPCSC_C0(0));
385 I915_WRITE(SPCSCC45(plane), SPCSC_C1(-1605) | SPCSC_C0(4769));
386 I915_WRITE(SPCSCC67(plane), SPCSC_C1(4769) | SPCSC_C0(0));
387 I915_WRITE(SPCSCC8(plane), SPCSC_C0(8263));
388
389 I915_WRITE(SPCSCYGICLAMP(plane), SPCSC_IMAX(940) | SPCSC_IMIN(64));
390 I915_WRITE(SPCSCCBICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
391 I915_WRITE(SPCSCCRICLAMP(plane), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
392
393 I915_WRITE(SPCSCYGOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
394 I915_WRITE(SPCSCCBOCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
395 I915_WRITE(SPCSCCROCLAMP(plane), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
396}
397
398static void
142vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc, 399vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
143 struct drm_framebuffer *fb, 400 struct drm_framebuffer *fb,
144 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 401 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
@@ -249,6 +506,9 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
249 506
250 intel_update_primary_plane(intel_crtc); 507 intel_update_primary_plane(intel_crtc);
251 508
509 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B)
510 chv_update_csc(intel_plane, fb->pixel_format);
511
252 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]); 512 I915_WRITE(SPSTRIDE(pipe, plane), fb->pitches[0]);
253 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x); 513 I915_WRITE(SPPOS(pipe, plane), (crtc_y << 16) | crtc_x);
254 514
@@ -257,6 +517,8 @@ vlv_update_plane(struct drm_plane *dplane, struct drm_crtc *crtc,
257 else 517 else
258 I915_WRITE(SPLINOFF(pipe, plane), linear_offset); 518 I915_WRITE(SPLINOFF(pipe, plane), linear_offset);
259 519
520 I915_WRITE(SPCONSTALPHA(pipe, plane), 0);
521
260 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w); 522 I915_WRITE(SPSIZE(pipe, plane), (crtc_h << 16) | crtc_w);
261 I915_WRITE(SPCNTR(pipe, plane), sprctl); 523 I915_WRITE(SPCNTR(pipe, plane), sprctl);
262 I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) + 524 I915_WRITE(SPSURF(pipe, plane), i915_gem_obj_ggtt_offset(obj) +
@@ -821,20 +1083,6 @@ ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
821 key->flags = I915_SET_COLORKEY_NONE; 1083 key->flags = I915_SET_COLORKEY_NONE;
822} 1084}
823 1085
824static bool
825format_is_yuv(uint32_t format)
826{
827 switch (format) {
828 case DRM_FORMAT_YUYV:
829 case DRM_FORMAT_UYVY:
830 case DRM_FORMAT_VYUY:
831 case DRM_FORMAT_YVYU:
832 return true;
833 default:
834 return false;
835 }
836}
837
838static bool colorkey_enabled(struct intel_plane *intel_plane) 1086static bool colorkey_enabled(struct intel_plane *intel_plane)
839{ 1087{
840 struct drm_intel_sprite_colorkey key; 1088 struct drm_intel_sprite_colorkey key;
@@ -845,57 +1093,23 @@ static bool colorkey_enabled(struct intel_plane *intel_plane)
845} 1093}
846 1094
847static int 1095static int
848intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc, 1096intel_check_sprite_plane(struct drm_plane *plane,
849 struct drm_framebuffer *fb, int crtc_x, int crtc_y, 1097 struct intel_plane_state *state)
850 unsigned int crtc_w, unsigned int crtc_h,
851 uint32_t src_x, uint32_t src_y,
852 uint32_t src_w, uint32_t src_h)
853{ 1098{
854 struct drm_device *dev = plane->dev; 1099 struct intel_crtc *intel_crtc = to_intel_crtc(state->crtc);
855 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
856 struct intel_plane *intel_plane = to_intel_plane(plane); 1100 struct intel_plane *intel_plane = to_intel_plane(plane);
857 enum pipe pipe = intel_crtc->pipe; 1101 struct drm_framebuffer *fb = state->fb;
858 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1102 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
859 struct drm_i915_gem_object *obj = intel_fb->obj; 1103 int crtc_x, crtc_y;
860 struct drm_i915_gem_object *old_obj = intel_plane->obj; 1104 unsigned int crtc_w, crtc_h;
861 int ret; 1105 uint32_t src_x, src_y, src_w, src_h;
862 bool primary_enabled; 1106 struct drm_rect *src = &state->src;
863 bool visible; 1107 struct drm_rect *dst = &state->dst;
1108 struct drm_rect *orig_src = &state->orig_src;
1109 const struct drm_rect *clip = &state->clip;
864 int hscale, vscale; 1110 int hscale, vscale;
865 int max_scale, min_scale; 1111 int max_scale, min_scale;
866 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 1112 int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0);
867 struct drm_rect src = {
868 /* sample coordinates in 16.16 fixed point */
869 .x1 = src_x,
870 .x2 = src_x + src_w,
871 .y1 = src_y,
872 .y2 = src_y + src_h,
873 };
874 struct drm_rect dst = {
875 /* integer pixels */
876 .x1 = crtc_x,
877 .x2 = crtc_x + crtc_w,
878 .y1 = crtc_y,
879 .y2 = crtc_y + crtc_h,
880 };
881 const struct drm_rect clip = {
882 .x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0,
883 .y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0,
884 };
885 const struct {
886 int crtc_x, crtc_y;
887 unsigned int crtc_w, crtc_h;
888 uint32_t src_x, src_y, src_w, src_h;
889 } orig = {
890 .crtc_x = crtc_x,
891 .crtc_y = crtc_y,
892 .crtc_w = crtc_w,
893 .crtc_h = crtc_h,
894 .src_x = src_x,
895 .src_y = src_y,
896 .src_w = src_w,
897 .src_h = src_h,
898 };
899 1113
900 /* Don't modify another pipe's plane */ 1114 /* Don't modify another pipe's plane */
901 if (intel_plane->pipe != intel_crtc->pipe) { 1115 if (intel_plane->pipe != intel_crtc->pipe) {
@@ -927,55 +1141,55 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
927 max_scale = intel_plane->max_downscale << 16; 1141 max_scale = intel_plane->max_downscale << 16;
928 min_scale = intel_plane->can_scale ? 1 : (1 << 16); 1142 min_scale = intel_plane->can_scale ? 1 : (1 << 16);
929 1143
930 drm_rect_rotate(&src, fb->width << 16, fb->height << 16, 1144 drm_rect_rotate(src, fb->width << 16, fb->height << 16,
931 intel_plane->rotation); 1145 intel_plane->rotation);
932 1146
933 hscale = drm_rect_calc_hscale_relaxed(&src, &dst, min_scale, max_scale); 1147 hscale = drm_rect_calc_hscale_relaxed(src, dst, min_scale, max_scale);
934 BUG_ON(hscale < 0); 1148 BUG_ON(hscale < 0);
935 1149
936 vscale = drm_rect_calc_vscale_relaxed(&src, &dst, min_scale, max_scale); 1150 vscale = drm_rect_calc_vscale_relaxed(src, dst, min_scale, max_scale);
937 BUG_ON(vscale < 0); 1151 BUG_ON(vscale < 0);
938 1152
939 visible = drm_rect_clip_scaled(&src, &dst, &clip, hscale, vscale); 1153 state->visible = drm_rect_clip_scaled(src, dst, clip, hscale, vscale);
940 1154
941 crtc_x = dst.x1; 1155 crtc_x = dst->x1;
942 crtc_y = dst.y1; 1156 crtc_y = dst->y1;
943 crtc_w = drm_rect_width(&dst); 1157 crtc_w = drm_rect_width(dst);
944 crtc_h = drm_rect_height(&dst); 1158 crtc_h = drm_rect_height(dst);
945 1159
946 if (visible) { 1160 if (state->visible) {
947 /* check again in case clipping clamped the results */ 1161 /* check again in case clipping clamped the results */
948 hscale = drm_rect_calc_hscale(&src, &dst, min_scale, max_scale); 1162 hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale);
949 if (hscale < 0) { 1163 if (hscale < 0) {
950 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n"); 1164 DRM_DEBUG_KMS("Horizontal scaling factor out of limits\n");
951 drm_rect_debug_print(&src, true); 1165 drm_rect_debug_print(src, true);
952 drm_rect_debug_print(&dst, false); 1166 drm_rect_debug_print(dst, false);
953 1167
954 return hscale; 1168 return hscale;
955 } 1169 }
956 1170
957 vscale = drm_rect_calc_vscale(&src, &dst, min_scale, max_scale); 1171 vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale);
958 if (vscale < 0) { 1172 if (vscale < 0) {
959 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n"); 1173 DRM_DEBUG_KMS("Vertical scaling factor out of limits\n");
960 drm_rect_debug_print(&src, true); 1174 drm_rect_debug_print(src, true);
961 drm_rect_debug_print(&dst, false); 1175 drm_rect_debug_print(dst, false);
962 1176
963 return vscale; 1177 return vscale;
964 } 1178 }
965 1179
966 /* Make the source viewport size an exact multiple of the scaling factors. */ 1180 /* Make the source viewport size an exact multiple of the scaling factors. */
967 drm_rect_adjust_size(&src, 1181 drm_rect_adjust_size(src,
968 drm_rect_width(&dst) * hscale - drm_rect_width(&src), 1182 drm_rect_width(dst) * hscale - drm_rect_width(src),
969 drm_rect_height(&dst) * vscale - drm_rect_height(&src)); 1183 drm_rect_height(dst) * vscale - drm_rect_height(src));
970 1184
971 drm_rect_rotate_inv(&src, fb->width << 16, fb->height << 16, 1185 drm_rect_rotate_inv(src, fb->width << 16, fb->height << 16,
972 intel_plane->rotation); 1186 intel_plane->rotation);
973 1187
974 /* sanity check to make sure the src viewport wasn't enlarged */ 1188 /* sanity check to make sure the src viewport wasn't enlarged */
975 WARN_ON(src.x1 < (int) src_x || 1189 WARN_ON(src->x1 < (int) orig_src->x1 ||
976 src.y1 < (int) src_y || 1190 src->y1 < (int) orig_src->y1 ||
977 src.x2 > (int) (src_x + src_w) || 1191 src->x2 > (int) orig_src->x2 ||
978 src.y2 > (int) (src_y + src_h)); 1192 src->y2 > (int) orig_src->y2);
979 1193
980 /* 1194 /*
981 * Hardware doesn't handle subpixel coordinates. 1195 * Hardware doesn't handle subpixel coordinates.
@@ -983,10 +1197,10 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
983 * increase the source viewport size, because that could 1197 * increase the source viewport size, because that could
984 * push the downscaling factor out of bounds. 1198 * push the downscaling factor out of bounds.
985 */ 1199 */
986 src_x = src.x1 >> 16; 1200 src_x = src->x1 >> 16;
987 src_w = drm_rect_width(&src) >> 16; 1201 src_w = drm_rect_width(src) >> 16;
988 src_y = src.y1 >> 16; 1202 src_y = src->y1 >> 16;
989 src_h = drm_rect_height(&src) >> 16; 1203 src_h = drm_rect_height(src) >> 16;
990 1204
991 if (format_is_yuv(fb->pixel_format)) { 1205 if (format_is_yuv(fb->pixel_format)) {
992 src_x &= ~1; 1206 src_x &= ~1;
@@ -1000,12 +1214,12 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1000 crtc_w &= ~1; 1214 crtc_w &= ~1;
1001 1215
1002 if (crtc_w == 0) 1216 if (crtc_w == 0)
1003 visible = false; 1217 state->visible = false;
1004 } 1218 }
1005 } 1219 }
1006 1220
1007 /* Check size restrictions when scaling */ 1221 /* Check size restrictions when scaling */
1008 if (visible && (src_w != crtc_w || src_h != crtc_h)) { 1222 if (state->visible && (src_w != crtc_w || src_h != crtc_h)) {
1009 unsigned int width_bytes; 1223 unsigned int width_bytes;
1010 1224
1011 WARN_ON(!intel_plane->can_scale); 1225 WARN_ON(!intel_plane->can_scale);
@@ -1013,12 +1227,13 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1013 /* FIXME interlacing min height is 6 */ 1227 /* FIXME interlacing min height is 6 */
1014 1228
1015 if (crtc_w < 3 || crtc_h < 3) 1229 if (crtc_w < 3 || crtc_h < 3)
1016 visible = false; 1230 state->visible = false;
1017 1231
1018 if (src_w < 3 || src_h < 3) 1232 if (src_w < 3 || src_h < 3)
1019 visible = false; 1233 state->visible = false;
1020 1234
1021 width_bytes = ((src_x * pixel_size) & 63) + src_w * pixel_size; 1235 width_bytes = ((src_x * pixel_size) & 63) +
1236 src_w * pixel_size;
1022 1237
1023 if (src_w > 2048 || src_h > 2048 || 1238 if (src_w > 2048 || src_h > 2048 ||
1024 width_bytes > 4096 || fb->pitches[0] > 4096) { 1239 width_bytes > 4096 || fb->pitches[0] > 4096) {
@@ -1027,42 +1242,90 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1027 } 1242 }
1028 } 1243 }
1029 1244
1030 dst.x1 = crtc_x; 1245 if (state->visible) {
1031 dst.x2 = crtc_x + crtc_w; 1246 src->x1 = src_x;
1032 dst.y1 = crtc_y; 1247 src->x2 = src_x + src_w;
1033 dst.y2 = crtc_y + crtc_h; 1248 src->y1 = src_y;
1249 src->y2 = src_y + src_h;
1250 }
1034 1251
1035 /* 1252 dst->x1 = crtc_x;
1036 * If the sprite is completely covering the primary plane, 1253 dst->x2 = crtc_x + crtc_w;
1037 * we can disable the primary and save power. 1254 dst->y1 = crtc_y;
1038 */ 1255 dst->y2 = crtc_y + crtc_h;
1039 primary_enabled = !drm_rect_equals(&dst, &clip) || colorkey_enabled(intel_plane);
1040 WARN_ON(!primary_enabled && !visible && intel_crtc->active);
1041 1256
1042 mutex_lock(&dev->struct_mutex); 1257 return 0;
1258}
1043 1259
1044 /* Note that this will apply the VT-d workaround for scanouts, 1260static int
1045 * which is more restrictive than required for sprites. (The 1261intel_prepare_sprite_plane(struct drm_plane *plane,
1046 * primary plane requires 256KiB alignment with 64 PTE padding, 1262 struct intel_plane_state *state)
1047 * the sprite planes only require 128KiB alignment and 32 PTE padding. 1263{
1048 */ 1264 struct drm_device *dev = plane->dev;
1049 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL); 1265 struct drm_crtc *crtc = state->crtc;
1266 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1267 struct intel_plane *intel_plane = to_intel_plane(plane);
1268 enum pipe pipe = intel_crtc->pipe;
1269 struct drm_framebuffer *fb = state->fb;
1270 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1271 struct drm_i915_gem_object *old_obj = intel_plane->obj;
1272 int ret;
1050 1273
1051 i915_gem_track_fb(old_obj, obj, 1274 if (old_obj != obj) {
1052 INTEL_FRONTBUFFER_SPRITE(pipe)); 1275 mutex_lock(&dev->struct_mutex);
1053 mutex_unlock(&dev->struct_mutex);
1054 1276
1055 if (ret) 1277 /* Note that this will apply the VT-d workaround for scanouts,
1056 return ret; 1278 * which is more restrictive than required for sprites. (The
1279 * primary plane requires 256KiB alignment with 64 PTE padding,
1280 * the sprite planes only require 128KiB alignment and 32 PTE
1281 * padding.
1282 */
1283 ret = intel_pin_and_fence_fb_obj(plane, fb, NULL);
1284 if (ret == 0)
1285 i915_gem_track_fb(old_obj, obj,
1286 INTEL_FRONTBUFFER_SPRITE(pipe));
1287 mutex_unlock(&dev->struct_mutex);
1288 if (ret)
1289 return ret;
1290 }
1291
1292 return 0;
1293}
1057 1294
1058 intel_plane->crtc_x = orig.crtc_x; 1295static void
1059 intel_plane->crtc_y = orig.crtc_y; 1296intel_commit_sprite_plane(struct drm_plane *plane,
1060 intel_plane->crtc_w = orig.crtc_w; 1297 struct intel_plane_state *state)
1061 intel_plane->crtc_h = orig.crtc_h; 1298{
1062 intel_plane->src_x = orig.src_x; 1299 struct drm_device *dev = plane->dev;
1063 intel_plane->src_y = orig.src_y; 1300 struct drm_crtc *crtc = state->crtc;
1064 intel_plane->src_w = orig.src_w; 1301 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1065 intel_plane->src_h = orig.src_h; 1302 struct intel_plane *intel_plane = to_intel_plane(plane);
1303 enum pipe pipe = intel_crtc->pipe;
1304 struct drm_framebuffer *fb = state->fb;
1305 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1306 struct drm_i915_gem_object *old_obj = intel_plane->obj;
1307 int crtc_x, crtc_y;
1308 unsigned int crtc_w, crtc_h;
1309 uint32_t src_x, src_y, src_w, src_h;
1310 struct drm_rect *dst = &state->dst;
1311 const struct drm_rect *clip = &state->clip;
1312 bool primary_enabled;
1313
1314 /*
1315 * If the sprite is completely covering the primary plane,
1316 * we can disable the primary and save power.
1317 */
1318 primary_enabled = !drm_rect_equals(dst, clip) || colorkey_enabled(intel_plane);
1319 WARN_ON(!primary_enabled && !state->visible && intel_crtc->active);
1320
1321 intel_plane->crtc_x = state->orig_dst.x1;
1322 intel_plane->crtc_y = state->orig_dst.y1;
1323 intel_plane->crtc_w = drm_rect_width(&state->orig_dst);
1324 intel_plane->crtc_h = drm_rect_height(&state->orig_dst);
1325 intel_plane->src_x = state->orig_src.x1;
1326 intel_plane->src_y = state->orig_src.y1;
1327 intel_plane->src_w = drm_rect_width(&state->orig_src);
1328 intel_plane->src_h = drm_rect_height(&state->orig_src);
1066 intel_plane->obj = obj; 1329 intel_plane->obj = obj;
1067 1330
1068 if (intel_crtc->active) { 1331 if (intel_crtc->active) {
@@ -1076,12 +1339,22 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1076 if (primary_was_enabled && !primary_enabled) 1339 if (primary_was_enabled && !primary_enabled)
1077 intel_pre_disable_primary(crtc); 1340 intel_pre_disable_primary(crtc);
1078 1341
1079 if (visible) 1342 if (state->visible) {
1343 crtc_x = state->dst.x1;
1344 crtc_y = state->dst.y1;
1345 crtc_w = drm_rect_width(&state->dst);
1346 crtc_h = drm_rect_height(&state->dst);
1347 src_x = state->src.x1;
1348 src_y = state->src.y1;
1349 src_w = drm_rect_width(&state->src);
1350 src_h = drm_rect_height(&state->src);
1080 intel_plane->update_plane(plane, crtc, fb, obj, 1351 intel_plane->update_plane(plane, crtc, fb, obj,
1081 crtc_x, crtc_y, crtc_w, crtc_h, 1352 crtc_x, crtc_y, crtc_w, crtc_h,
1082 src_x, src_y, src_w, src_h); 1353 src_x, src_y, src_w, src_h);
1083 else 1354 } else {
1084 intel_plane->disable_plane(plane, crtc); 1355 intel_plane->disable_plane(plane, crtc);
1356 }
1357
1085 1358
1086 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe)); 1359 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_SPRITE(pipe));
1087 1360
@@ -1090,21 +1363,65 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1090 } 1363 }
1091 1364
1092 /* Unpin old obj after new one is active to avoid ugliness */ 1365 /* Unpin old obj after new one is active to avoid ugliness */
1093 if (old_obj) { 1366 if (old_obj && old_obj != obj) {
1367
1094 /* 1368 /*
1095 * It's fairly common to simply update the position of 1369 * It's fairly common to simply update the position of
1096 * an existing object. In that case, we don't need to 1370 * an existing object. In that case, we don't need to
1097 * wait for vblank to avoid ugliness, we only need to 1371 * wait for vblank to avoid ugliness, we only need to
1098 * do the pin & ref bookkeeping. 1372 * do the pin & ref bookkeeping.
1099 */ 1373 */
1100 if (old_obj != obj && intel_crtc->active) 1374 if (intel_crtc->active)
1101 intel_wait_for_vblank(dev, intel_crtc->pipe); 1375 intel_wait_for_vblank(dev, intel_crtc->pipe);
1102 1376
1103 mutex_lock(&dev->struct_mutex); 1377 mutex_lock(&dev->struct_mutex);
1104 intel_unpin_fb_obj(old_obj); 1378 intel_unpin_fb_obj(old_obj);
1105 mutex_unlock(&dev->struct_mutex); 1379 mutex_unlock(&dev->struct_mutex);
1106 } 1380 }
1381}
1382
1383static int
1384intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
1385 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
1386 unsigned int crtc_w, unsigned int crtc_h,
1387 uint32_t src_x, uint32_t src_y,
1388 uint32_t src_w, uint32_t src_h)
1389{
1390 struct intel_plane_state state;
1391 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1392 int ret;
1107 1393
1394 state.crtc = crtc;
1395 state.fb = fb;
1396
1397 /* sample coordinates in 16.16 fixed point */
1398 state.src.x1 = src_x;
1399 state.src.x2 = src_x + src_w;
1400 state.src.y1 = src_y;
1401 state.src.y2 = src_y + src_h;
1402
1403 /* integer pixels */
1404 state.dst.x1 = crtc_x;
1405 state.dst.x2 = crtc_x + crtc_w;
1406 state.dst.y1 = crtc_y;
1407 state.dst.y2 = crtc_y + crtc_h;
1408
1409 state.clip.x1 = 0;
1410 state.clip.y1 = 0;
1411 state.clip.x2 = intel_crtc->active ? intel_crtc->config.pipe_src_w : 0;
1412 state.clip.y2 = intel_crtc->active ? intel_crtc->config.pipe_src_h : 0;
1413 state.orig_src = state.src;
1414 state.orig_dst = state.dst;
1415
1416 ret = intel_check_sprite_plane(plane, &state);
1417 if (ret)
1418 return ret;
1419
1420 ret = intel_prepare_sprite_plane(plane, &state);
1421 if (ret)
1422 return ret;
1423
1424 intel_commit_sprite_plane(plane, &state);
1108 return 0; 1425 return 0;
1109} 1426}
1110 1427
@@ -1305,6 +1622,18 @@ static uint32_t vlv_plane_formats[] = {
1305 DRM_FORMAT_VYUY, 1622 DRM_FORMAT_VYUY,
1306}; 1623};
1307 1624
1625static uint32_t skl_plane_formats[] = {
1626 DRM_FORMAT_RGB565,
1627 DRM_FORMAT_ABGR8888,
1628 DRM_FORMAT_ARGB8888,
1629 DRM_FORMAT_XBGR8888,
1630 DRM_FORMAT_XRGB8888,
1631 DRM_FORMAT_YUYV,
1632 DRM_FORMAT_YVYU,
1633 DRM_FORMAT_UYVY,
1634 DRM_FORMAT_VYUY,
1635};
1636
1308int 1637int
1309intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane) 1638intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1310{ 1639{
@@ -1368,7 +1697,21 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe, int plane)
1368 num_plane_formats = ARRAY_SIZE(snb_plane_formats); 1697 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
1369 } 1698 }
1370 break; 1699 break;
1371 1700 case 9:
1701 /*
1702 * FIXME: Skylake planes can be scaled (with some restrictions),
1703 * but this is for another time.
1704 */
1705 intel_plane->can_scale = false;
1706 intel_plane->max_downscale = 1;
1707 intel_plane->update_plane = skl_update_plane;
1708 intel_plane->disable_plane = skl_disable_plane;
1709 intel_plane->update_colorkey = skl_update_colorkey;
1710 intel_plane->get_colorkey = skl_get_colorkey;
1711
1712 plane_formats = skl_plane_formats;
1713 num_plane_formats = ARRAY_SIZE(skl_plane_formats);
1714 break;
1372 default: 1715 default:
1373 kfree(intel_plane); 1716 kfree(intel_plane);
1374 return -ENODEV; 1717 return -ENODEV;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index c14341ca3ef9..6f5f59b880f5 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1182,18 +1182,17 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1182 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1183 struct drm_device *dev = encoder->dev; 1183 struct drm_device *dev = encoder->dev;
1184 struct drm_i915_private *dev_priv = dev->dev_private; 1184 struct drm_i915_private *dev_priv = dev->dev_private;
1185 unsigned long irqflags;
1186 u32 tv_ctl, save_tv_ctl; 1185 u32 tv_ctl, save_tv_ctl;
1187 u32 tv_dac, save_tv_dac; 1186 u32 tv_dac, save_tv_dac;
1188 int type; 1187 int type;
1189 1188
1190 /* Disable TV interrupts around load detect or we'll recurse */ 1189 /* Disable TV interrupts around load detect or we'll recurse */
1191 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1190 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1192 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1191 spin_lock_irq(&dev_priv->irq_lock);
1193 i915_disable_pipestat(dev_priv, 0, 1192 i915_disable_pipestat(dev_priv, 0,
1194 PIPE_HOTPLUG_INTERRUPT_STATUS | 1193 PIPE_HOTPLUG_INTERRUPT_STATUS |
1195 PIPE_HOTPLUG_TV_INTERRUPT_STATUS); 1194 PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1195 spin_unlock_irq(&dev_priv->irq_lock);
1197 } 1196 }
1198 1197
1199 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1198 save_tv_dac = tv_dac = I915_READ(TV_DAC);
@@ -1266,11 +1265,11 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
1266 1265
1267 /* Restore interrupt config */ 1266 /* Restore interrupt config */
1268 if (connector->polled & DRM_CONNECTOR_POLL_HPD) { 1267 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1269 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1268 spin_lock_irq(&dev_priv->irq_lock);
1270 i915_enable_pipestat(dev_priv, 0, 1269 i915_enable_pipestat(dev_priv, 0,
1271 PIPE_HOTPLUG_INTERRUPT_STATUS | 1270 PIPE_HOTPLUG_INTERRUPT_STATUS |
1272 PIPE_HOTPLUG_TV_INTERRUPT_STATUS); 1271 PIPE_HOTPLUG_TV_INTERRUPT_STATUS);
1273 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1272 spin_unlock_irq(&dev_priv->irq_lock);
1274 } 1273 }
1275 1274
1276 return type; 1275 return type;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 918b76163965..46de8d75b4bf 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -43,23 +43,17 @@
43static void 43static void
44assert_device_not_suspended(struct drm_i915_private *dev_priv) 44assert_device_not_suspended(struct drm_i915_private *dev_priv)
45{ 45{
46 WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, 46 WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended,
47 "Device suspended\n"); 47 "Device suspended\n");
48} 48}
49 49
50static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) 50static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
51{ 51{
52 u32 gt_thread_status_mask;
53
54 if (IS_HASWELL(dev_priv->dev))
55 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
56 else
57 gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
58
59 /* w/a for a sporadic read returning 0 by waiting for the GT 52 /* w/a for a sporadic read returning 0 by waiting for the GT
60 * thread to wake up. 53 * thread to wake up.
61 */ 54 */
62 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500)) 55 if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) &
56 GEN6_GT_THREAD_STATUS_CORE_MASK) == 0, 500))
63 DRM_ERROR("GT thread status wait timed out\n"); 57 DRM_ERROR("GT thread status wait timed out\n");
64} 58}
65 59
@@ -120,8 +114,7 @@ static void __gen7_gt_force_wake_mt_get(struct drm_i915_private *dev_priv,
120 DRM_ERROR("Timed out waiting for forcewake to ack request.\n"); 114 DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
121 115
122 /* WaRsForcewakeWaitTC0:ivb,hsw */ 116 /* WaRsForcewakeWaitTC0:ivb,hsw */
123 if (INTEL_INFO(dev_priv->dev)->gen < 8) 117 __gen6_gt_wait_for_thread_c0(dev_priv);
124 __gen6_gt_wait_for_thread_c0(dev_priv);
125} 118}
126 119
127static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) 120static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
@@ -229,10 +222,6 @@ static void __vlv_force_wake_get(struct drm_i915_private *dev_priv,
229 FORCEWAKE_ACK_TIMEOUT_MS)) 222 FORCEWAKE_ACK_TIMEOUT_MS))
230 DRM_ERROR("Timed out: waiting for media to ack.\n"); 223 DRM_ERROR("Timed out: waiting for media to ack.\n");
231 } 224 }
232
233 /* WaRsForcewakeWaitTC0:vlv */
234 if (!IS_CHERRYVIEW(dev_priv->dev))
235 __gen6_gt_wait_for_thread_c0(dev_priv);
236} 225}
237 226
238static void __vlv_force_wake_put(struct drm_i915_private *dev_priv, 227static void __vlv_force_wake_put(struct drm_i915_private *dev_priv,
@@ -299,6 +288,154 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
299 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 288 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
300} 289}
301 290
291static void __gen9_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
292{
293 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
294 _MASKED_BIT_DISABLE(0xffff));
295
296 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
297 _MASKED_BIT_DISABLE(0xffff));
298
299 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
300 _MASKED_BIT_DISABLE(0xffff));
301}
302
303static void
304__gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
305{
306 /* Check for Render Engine */
307 if (FORCEWAKE_RENDER & fw_engine) {
308 if (wait_for_atomic((__raw_i915_read32(dev_priv,
309 FORCEWAKE_ACK_RENDER_GEN9) &
310 FORCEWAKE_KERNEL) == 0,
311 FORCEWAKE_ACK_TIMEOUT_MS))
312 DRM_ERROR("Timed out: Render forcewake old ack to clear.\n");
313
314 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
315 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
316
317 if (wait_for_atomic((__raw_i915_read32(dev_priv,
318 FORCEWAKE_ACK_RENDER_GEN9) &
319 FORCEWAKE_KERNEL),
320 FORCEWAKE_ACK_TIMEOUT_MS))
321 DRM_ERROR("Timed out: waiting for Render to ack.\n");
322 }
323
324 /* Check for Media Engine */
325 if (FORCEWAKE_MEDIA & fw_engine) {
326 if (wait_for_atomic((__raw_i915_read32(dev_priv,
327 FORCEWAKE_ACK_MEDIA_GEN9) &
328 FORCEWAKE_KERNEL) == 0,
329 FORCEWAKE_ACK_TIMEOUT_MS))
330 DRM_ERROR("Timed out: Media forcewake old ack to clear.\n");
331
332 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
333 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
334
335 if (wait_for_atomic((__raw_i915_read32(dev_priv,
336 FORCEWAKE_ACK_MEDIA_GEN9) &
337 FORCEWAKE_KERNEL),
338 FORCEWAKE_ACK_TIMEOUT_MS))
339 DRM_ERROR("Timed out: waiting for Media to ack.\n");
340 }
341
342 /* Check for Blitter Engine */
343 if (FORCEWAKE_BLITTER & fw_engine) {
344 if (wait_for_atomic((__raw_i915_read32(dev_priv,
345 FORCEWAKE_ACK_BLITTER_GEN9) &
346 FORCEWAKE_KERNEL) == 0,
347 FORCEWAKE_ACK_TIMEOUT_MS))
348 DRM_ERROR("Timed out: Blitter forcewake old ack to clear.\n");
349
350 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
351 _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
352
353 if (wait_for_atomic((__raw_i915_read32(dev_priv,
354 FORCEWAKE_ACK_BLITTER_GEN9) &
355 FORCEWAKE_KERNEL),
356 FORCEWAKE_ACK_TIMEOUT_MS))
357 DRM_ERROR("Timed out: waiting for Blitter to ack.\n");
358 }
359}
360
361static void
362__gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
363{
364 /* Check for Render Engine */
365 if (FORCEWAKE_RENDER & fw_engine)
366 __raw_i915_write32(dev_priv, FORCEWAKE_RENDER_GEN9,
367 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
368
369 /* Check for Media Engine */
370 if (FORCEWAKE_MEDIA & fw_engine)
371 __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_GEN9,
372 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
373
374 /* Check for Blitter Engine */
375 if (FORCEWAKE_BLITTER & fw_engine)
376 __raw_i915_write32(dev_priv, FORCEWAKE_BLITTER_GEN9,
377 _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
378}
379
380static void
381gen9_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
382{
383 unsigned long irqflags;
384
385 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
386
387 if (FORCEWAKE_RENDER & fw_engine) {
388 if (dev_priv->uncore.fw_rendercount++ == 0)
389 dev_priv->uncore.funcs.force_wake_get(dev_priv,
390 FORCEWAKE_RENDER);
391 }
392
393 if (FORCEWAKE_MEDIA & fw_engine) {
394 if (dev_priv->uncore.fw_mediacount++ == 0)
395 dev_priv->uncore.funcs.force_wake_get(dev_priv,
396 FORCEWAKE_MEDIA);
397 }
398
399 if (FORCEWAKE_BLITTER & fw_engine) {
400 if (dev_priv->uncore.fw_blittercount++ == 0)
401 dev_priv->uncore.funcs.force_wake_get(dev_priv,
402 FORCEWAKE_BLITTER);
403 }
404
405 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
406}
407
408static void
409gen9_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
410{
411 unsigned long irqflags;
412
413 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
414
415 if (FORCEWAKE_RENDER & fw_engine) {
416 WARN_ON(dev_priv->uncore.fw_rendercount == 0);
417 if (--dev_priv->uncore.fw_rendercount == 0)
418 dev_priv->uncore.funcs.force_wake_put(dev_priv,
419 FORCEWAKE_RENDER);
420 }
421
422 if (FORCEWAKE_MEDIA & fw_engine) {
423 WARN_ON(dev_priv->uncore.fw_mediacount == 0);
424 if (--dev_priv->uncore.fw_mediacount == 0)
425 dev_priv->uncore.funcs.force_wake_put(dev_priv,
426 FORCEWAKE_MEDIA);
427 }
428
429 if (FORCEWAKE_BLITTER & fw_engine) {
430 WARN_ON(dev_priv->uncore.fw_blittercount == 0);
431 if (--dev_priv->uncore.fw_blittercount == 0)
432 dev_priv->uncore.funcs.force_wake_put(dev_priv,
433 FORCEWAKE_BLITTER);
434 }
435
436 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
437}
438
302static void gen6_force_wake_timer(unsigned long arg) 439static void gen6_force_wake_timer(unsigned long arg)
303{ 440{
304 struct drm_i915_private *dev_priv = (void *)arg; 441 struct drm_i915_private *dev_priv = (void *)arg;
@@ -337,6 +474,9 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
337 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev)) 474 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev) || IS_BROADWELL(dev))
338 __gen7_gt_force_wake_mt_reset(dev_priv); 475 __gen7_gt_force_wake_mt_reset(dev_priv);
339 476
477 if (IS_GEN9(dev))
478 __gen9_gt_force_wake_mt_reset(dev_priv);
479
340 if (restore) { /* If reset with a user forcewake, try to restore */ 480 if (restore) { /* If reset with a user forcewake, try to restore */
341 unsigned fw = 0; 481 unsigned fw = 0;
342 482
@@ -346,6 +486,15 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
346 486
347 if (dev_priv->uncore.fw_mediacount) 487 if (dev_priv->uncore.fw_mediacount)
348 fw |= FORCEWAKE_MEDIA; 488 fw |= FORCEWAKE_MEDIA;
489 } else if (IS_GEN9(dev)) {
490 if (dev_priv->uncore.fw_rendercount)
491 fw |= FORCEWAKE_RENDER;
492
493 if (dev_priv->uncore.fw_mediacount)
494 fw |= FORCEWAKE_MEDIA;
495
496 if (dev_priv->uncore.fw_blittercount)
497 fw |= FORCEWAKE_BLITTER;
349 } else { 498 } else {
350 if (dev_priv->uncore.forcewake_count) 499 if (dev_priv->uncore.forcewake_count)
351 fw = FORCEWAKE_ALL; 500 fw = FORCEWAKE_ALL;
@@ -363,7 +512,8 @@ void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
363 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); 512 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
364} 513}
365 514
366void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake) 515static void __intel_uncore_early_sanitize(struct drm_device *dev,
516 bool restore_forcewake)
367{ 517{
368 struct drm_i915_private *dev_priv = dev->dev_private; 518 struct drm_i915_private *dev_priv = dev->dev_private;
369 519
@@ -389,6 +539,12 @@ void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
389 intel_uncore_forcewake_reset(dev, restore_forcewake); 539 intel_uncore_forcewake_reset(dev, restore_forcewake);
390} 540}
391 541
542void intel_uncore_early_sanitize(struct drm_device *dev, bool restore_forcewake)
543{
544 __intel_uncore_early_sanitize(dev, restore_forcewake);
545 i915_check_and_clear_faults(dev);
546}
547
392void intel_uncore_sanitize(struct drm_device *dev) 548void intel_uncore_sanitize(struct drm_device *dev)
393{ 549{
394 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 550 /* BIOS often leaves RC6 enabled, but disable it for hw init */
@@ -410,6 +566,10 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine)
410 566
411 intel_runtime_pm_get(dev_priv); 567 intel_runtime_pm_get(dev_priv);
412 568
569 /* Redirect to Gen9 specific routine */
570 if (IS_GEN9(dev_priv->dev))
571 return gen9_force_wake_get(dev_priv, fw_engine);
572
413 /* Redirect to VLV specific routine */ 573 /* Redirect to VLV specific routine */
414 if (IS_VALLEYVIEW(dev_priv->dev)) 574 if (IS_VALLEYVIEW(dev_priv->dev))
415 return vlv_force_wake_get(dev_priv, fw_engine); 575 return vlv_force_wake_get(dev_priv, fw_engine);
@@ -431,6 +591,12 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine)
431 if (!dev_priv->uncore.funcs.force_wake_put) 591 if (!dev_priv->uncore.funcs.force_wake_put)
432 return; 592 return;
433 593
594 /* Redirect to Gen9 specific routine */
595 if (IS_GEN9(dev_priv->dev)) {
596 gen9_force_wake_put(dev_priv, fw_engine);
597 goto out;
598 }
599
434 /* Redirect to VLV specific routine */ 600 /* Redirect to VLV specific routine */
435 if (IS_VALLEYVIEW(dev_priv->dev)) { 601 if (IS_VALLEYVIEW(dev_priv->dev)) {
436 vlv_force_wake_put(dev_priv, fw_engine); 602 vlv_force_wake_put(dev_priv, fw_engine);
@@ -504,6 +670,38 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv)
504 REG_RANGE((reg), 0x14000, 0x14400) || \ 670 REG_RANGE((reg), 0x14000, 0x14400) || \
505 REG_RANGE((reg), 0x22000, 0x24000)) 671 REG_RANGE((reg), 0x22000, 0x24000))
506 672
673#define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \
674 REG_RANGE((reg), 0xB00, 0x2000)
675
676#define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \
677 (REG_RANGE((reg), 0x2000, 0x2700) || \
678 REG_RANGE((reg), 0x3000, 0x4000) || \
679 REG_RANGE((reg), 0x5200, 0x8000) || \
680 REG_RANGE((reg), 0x8140, 0x8160) || \
681 REG_RANGE((reg), 0x8300, 0x8500) || \
682 REG_RANGE((reg), 0x8C00, 0x8D00) || \
683 REG_RANGE((reg), 0xB000, 0xB480) || \
684 REG_RANGE((reg), 0xE000, 0xE900) || \
685 REG_RANGE((reg), 0x24400, 0x24800))
686
687#define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \
688 (REG_RANGE((reg), 0x8130, 0x8140) || \
689 REG_RANGE((reg), 0x8800, 0x8A00) || \
690 REG_RANGE((reg), 0xD000, 0xD800) || \
691 REG_RANGE((reg), 0x12000, 0x14000) || \
692 REG_RANGE((reg), 0x1A000, 0x1EA00) || \
693 REG_RANGE((reg), 0x30000, 0x40000))
694
695#define FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg) \
696 REG_RANGE((reg), 0x9400, 0x9800)
697
698#define FORCEWAKE_GEN9_BLITTER_RANGE_OFFSET(reg) \
699 ((reg) < 0x40000 &&\
700 !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) && \
701 !FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) && \
702 !FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) && \
703 !FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg))
704
507static void 705static void
508ilk_dummy_write(struct drm_i915_private *dev_priv) 706ilk_dummy_write(struct drm_i915_private *dev_priv)
509{ 707{
@@ -634,6 +832,45 @@ chv_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
634 REG_READ_FOOTER; \ 832 REG_READ_FOOTER; \
635} 833}
636 834
835#define SKL_NEEDS_FORCE_WAKE(dev_priv, reg) \
836 ((reg) < 0x40000 && !FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg))
837
838#define __gen9_read(x) \
839static u##x \
840gen9_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
841 REG_READ_HEADER(x); \
842 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
843 val = __raw_i915_read##x(dev_priv, reg); \
844 } else { \
845 unsigned fwengine = 0; \
846 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
847 if (dev_priv->uncore.fw_rendercount == 0) \
848 fwengine = FORCEWAKE_RENDER; \
849 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
850 if (dev_priv->uncore.fw_mediacount == 0) \
851 fwengine = FORCEWAKE_MEDIA; \
852 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
853 if (dev_priv->uncore.fw_rendercount == 0) \
854 fwengine |= FORCEWAKE_RENDER; \
855 if (dev_priv->uncore.fw_mediacount == 0) \
856 fwengine |= FORCEWAKE_MEDIA; \
857 } else { \
858 if (dev_priv->uncore.fw_blittercount == 0) \
859 fwengine = FORCEWAKE_BLITTER; \
860 } \
861 if (fwengine) \
862 dev_priv->uncore.funcs.force_wake_get(dev_priv, fwengine); \
863 val = __raw_i915_read##x(dev_priv, reg); \
864 if (fwengine) \
865 dev_priv->uncore.funcs.force_wake_put(dev_priv, fwengine); \
866 } \
867 REG_READ_FOOTER; \
868}
869
870__gen9_read(8)
871__gen9_read(16)
872__gen9_read(32)
873__gen9_read(64)
637__chv_read(8) 874__chv_read(8)
638__chv_read(16) 875__chv_read(16)
639__chv_read(32) 876__chv_read(32)
@@ -655,6 +892,7 @@ __gen4_read(16)
655__gen4_read(32) 892__gen4_read(32)
656__gen4_read(64) 893__gen4_read(64)
657 894
895#undef __gen9_read
658#undef __chv_read 896#undef __chv_read
659#undef __vlv_read 897#undef __vlv_read
660#undef __gen6_read 898#undef __gen6_read
@@ -792,6 +1030,69 @@ chv_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace)
792 REG_WRITE_FOOTER; \ 1030 REG_WRITE_FOOTER; \
793} 1031}
794 1032
1033static const u32 gen9_shadowed_regs[] = {
1034 RING_TAIL(RENDER_RING_BASE),
1035 RING_TAIL(GEN6_BSD_RING_BASE),
1036 RING_TAIL(VEBOX_RING_BASE),
1037 RING_TAIL(BLT_RING_BASE),
1038 FORCEWAKE_BLITTER_GEN9,
1039 FORCEWAKE_RENDER_GEN9,
1040 FORCEWAKE_MEDIA_GEN9,
1041 GEN6_RPNSWREQ,
1042 GEN6_RC_VIDEO_FREQ,
1043 /* TODO: Other registers are not yet used */
1044};
1045
1046static bool is_gen9_shadowed(struct drm_i915_private *dev_priv, u32 reg)
1047{
1048 int i;
1049 for (i = 0; i < ARRAY_SIZE(gen9_shadowed_regs); i++)
1050 if (reg == gen9_shadowed_regs[i])
1051 return true;
1052
1053 return false;
1054}
1055
1056#define __gen9_write(x) \
1057static void \
1058gen9_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, \
1059 bool trace) { \
1060 REG_WRITE_HEADER; \
1061 if (!SKL_NEEDS_FORCE_WAKE((dev_priv), (reg)) || \
1062 is_gen9_shadowed(dev_priv, reg)) { \
1063 __raw_i915_write##x(dev_priv, reg, val); \
1064 } else { \
1065 unsigned fwengine = 0; \
1066 if (FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg)) { \
1067 if (dev_priv->uncore.fw_rendercount == 0) \
1068 fwengine = FORCEWAKE_RENDER; \
1069 } else if (FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg)) { \
1070 if (dev_priv->uncore.fw_mediacount == 0) \
1071 fwengine = FORCEWAKE_MEDIA; \
1072 } else if (FORCEWAKE_GEN9_COMMON_RANGE_OFFSET(reg)) { \
1073 if (dev_priv->uncore.fw_rendercount == 0) \
1074 fwengine |= FORCEWAKE_RENDER; \
1075 if (dev_priv->uncore.fw_mediacount == 0) \
1076 fwengine |= FORCEWAKE_MEDIA; \
1077 } else { \
1078 if (dev_priv->uncore.fw_blittercount == 0) \
1079 fwengine = FORCEWAKE_BLITTER; \
1080 } \
1081 if (fwengine) \
1082 dev_priv->uncore.funcs.force_wake_get(dev_priv, \
1083 fwengine); \
1084 __raw_i915_write##x(dev_priv, reg, val); \
1085 if (fwengine) \
1086 dev_priv->uncore.funcs.force_wake_put(dev_priv, \
1087 fwengine); \
1088 } \
1089 REG_WRITE_FOOTER; \
1090}
1091
1092__gen9_write(8)
1093__gen9_write(16)
1094__gen9_write(32)
1095__gen9_write(64)
795__chv_write(8) 1096__chv_write(8)
796__chv_write(16) 1097__chv_write(16)
797__chv_write(32) 1098__chv_write(32)
@@ -817,6 +1118,7 @@ __gen4_write(16)
817__gen4_write(32) 1118__gen4_write(32)
818__gen4_write(64) 1119__gen4_write(64)
819 1120
1121#undef __gen9_write
820#undef __chv_write 1122#undef __chv_write
821#undef __gen8_write 1123#undef __gen8_write
822#undef __hsw_write 1124#undef __hsw_write
@@ -826,6 +1128,22 @@ __gen4_write(64)
826#undef REG_WRITE_FOOTER 1128#undef REG_WRITE_FOOTER
827#undef REG_WRITE_HEADER 1129#undef REG_WRITE_HEADER
828 1130
1131#define ASSIGN_WRITE_MMIO_VFUNCS(x) \
1132do { \
1133 dev_priv->uncore.funcs.mmio_writeb = x##_write8; \
1134 dev_priv->uncore.funcs.mmio_writew = x##_write16; \
1135 dev_priv->uncore.funcs.mmio_writel = x##_write32; \
1136 dev_priv->uncore.funcs.mmio_writeq = x##_write64; \
1137} while (0)
1138
1139#define ASSIGN_READ_MMIO_VFUNCS(x) \
1140do { \
1141 dev_priv->uncore.funcs.mmio_readb = x##_read8; \
1142 dev_priv->uncore.funcs.mmio_readw = x##_read16; \
1143 dev_priv->uncore.funcs.mmio_readl = x##_read32; \
1144 dev_priv->uncore.funcs.mmio_readq = x##_read64; \
1145} while (0)
1146
829void intel_uncore_init(struct drm_device *dev) 1147void intel_uncore_init(struct drm_device *dev)
830{ 1148{
831 struct drm_i915_private *dev_priv = dev->dev_private; 1149 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -833,9 +1151,12 @@ void intel_uncore_init(struct drm_device *dev)
833 setup_timer(&dev_priv->uncore.force_wake_timer, 1151 setup_timer(&dev_priv->uncore.force_wake_timer,
834 gen6_force_wake_timer, (unsigned long)dev_priv); 1152 gen6_force_wake_timer, (unsigned long)dev_priv);
835 1153
836 intel_uncore_early_sanitize(dev, false); 1154 __intel_uncore_early_sanitize(dev, false);
837 1155
838 if (IS_VALLEYVIEW(dev)) { 1156 if (IS_GEN9(dev)) {
1157 dev_priv->uncore.funcs.force_wake_get = __gen9_force_wake_get;
1158 dev_priv->uncore.funcs.force_wake_put = __gen9_force_wake_put;
1159 } else if (IS_VALLEYVIEW(dev)) {
839 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get; 1160 dev_priv->uncore.funcs.force_wake_get = __vlv_force_wake_get;
840 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put; 1161 dev_priv->uncore.funcs.force_wake_put = __vlv_force_wake_put;
841 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 1162 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
@@ -881,77 +1202,52 @@ void intel_uncore_init(struct drm_device *dev)
881 1202
882 switch (INTEL_INFO(dev)->gen) { 1203 switch (INTEL_INFO(dev)->gen) {
883 default: 1204 default:
1205 WARN_ON(1);
1206 return;
1207 case 9:
1208 ASSIGN_WRITE_MMIO_VFUNCS(gen9);
1209 ASSIGN_READ_MMIO_VFUNCS(gen9);
1210 break;
1211 case 8:
884 if (IS_CHERRYVIEW(dev)) { 1212 if (IS_CHERRYVIEW(dev)) {
885 dev_priv->uncore.funcs.mmio_writeb = chv_write8; 1213 ASSIGN_WRITE_MMIO_VFUNCS(chv);
886 dev_priv->uncore.funcs.mmio_writew = chv_write16; 1214 ASSIGN_READ_MMIO_VFUNCS(chv);
887 dev_priv->uncore.funcs.mmio_writel = chv_write32;
888 dev_priv->uncore.funcs.mmio_writeq = chv_write64;
889 dev_priv->uncore.funcs.mmio_readb = chv_read8;
890 dev_priv->uncore.funcs.mmio_readw = chv_read16;
891 dev_priv->uncore.funcs.mmio_readl = chv_read32;
892 dev_priv->uncore.funcs.mmio_readq = chv_read64;
893 1215
894 } else { 1216 } else {
895 dev_priv->uncore.funcs.mmio_writeb = gen8_write8; 1217 ASSIGN_WRITE_MMIO_VFUNCS(gen8);
896 dev_priv->uncore.funcs.mmio_writew = gen8_write16; 1218 ASSIGN_READ_MMIO_VFUNCS(gen6);
897 dev_priv->uncore.funcs.mmio_writel = gen8_write32;
898 dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
899 dev_priv->uncore.funcs.mmio_readb = gen6_read8;
900 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
901 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
902 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
903 } 1219 }
904 break; 1220 break;
905 case 7: 1221 case 7:
906 case 6: 1222 case 6:
907 if (IS_HASWELL(dev)) { 1223 if (IS_HASWELL(dev)) {
908 dev_priv->uncore.funcs.mmio_writeb = hsw_write8; 1224 ASSIGN_WRITE_MMIO_VFUNCS(hsw);
909 dev_priv->uncore.funcs.mmio_writew = hsw_write16;
910 dev_priv->uncore.funcs.mmio_writel = hsw_write32;
911 dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
912 } else { 1225 } else {
913 dev_priv->uncore.funcs.mmio_writeb = gen6_write8; 1226 ASSIGN_WRITE_MMIO_VFUNCS(gen6);
914 dev_priv->uncore.funcs.mmio_writew = gen6_write16;
915 dev_priv->uncore.funcs.mmio_writel = gen6_write32;
916 dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
917 } 1227 }
918 1228
919 if (IS_VALLEYVIEW(dev)) { 1229 if (IS_VALLEYVIEW(dev)) {
920 dev_priv->uncore.funcs.mmio_readb = vlv_read8; 1230 ASSIGN_READ_MMIO_VFUNCS(vlv);
921 dev_priv->uncore.funcs.mmio_readw = vlv_read16;
922 dev_priv->uncore.funcs.mmio_readl = vlv_read32;
923 dev_priv->uncore.funcs.mmio_readq = vlv_read64;
924 } else { 1231 } else {
925 dev_priv->uncore.funcs.mmio_readb = gen6_read8; 1232 ASSIGN_READ_MMIO_VFUNCS(gen6);
926 dev_priv->uncore.funcs.mmio_readw = gen6_read16;
927 dev_priv->uncore.funcs.mmio_readl = gen6_read32;
928 dev_priv->uncore.funcs.mmio_readq = gen6_read64;
929 } 1233 }
930 break; 1234 break;
931 case 5: 1235 case 5:
932 dev_priv->uncore.funcs.mmio_writeb = gen5_write8; 1236 ASSIGN_WRITE_MMIO_VFUNCS(gen5);
933 dev_priv->uncore.funcs.mmio_writew = gen5_write16; 1237 ASSIGN_READ_MMIO_VFUNCS(gen5);
934 dev_priv->uncore.funcs.mmio_writel = gen5_write32;
935 dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
936 dev_priv->uncore.funcs.mmio_readb = gen5_read8;
937 dev_priv->uncore.funcs.mmio_readw = gen5_read16;
938 dev_priv->uncore.funcs.mmio_readl = gen5_read32;
939 dev_priv->uncore.funcs.mmio_readq = gen5_read64;
940 break; 1238 break;
941 case 4: 1239 case 4:
942 case 3: 1240 case 3:
943 case 2: 1241 case 2:
944 dev_priv->uncore.funcs.mmio_writeb = gen4_write8; 1242 ASSIGN_WRITE_MMIO_VFUNCS(gen4);
945 dev_priv->uncore.funcs.mmio_writew = gen4_write16; 1243 ASSIGN_READ_MMIO_VFUNCS(gen4);
946 dev_priv->uncore.funcs.mmio_writel = gen4_write32;
947 dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
948 dev_priv->uncore.funcs.mmio_readb = gen4_read8;
949 dev_priv->uncore.funcs.mmio_readw = gen4_read16;
950 dev_priv->uncore.funcs.mmio_readl = gen4_read32;
951 dev_priv->uncore.funcs.mmio_readq = gen4_read64;
952 break; 1244 break;
953 } 1245 }
1246
1247 i915_check_and_clear_faults(dev);
954} 1248}
1249#undef ASSIGN_WRITE_MMIO_VFUNCS
1250#undef ASSIGN_READ_MMIO_VFUNCS
955 1251
956void intel_uncore_fini(struct drm_device *dev) 1252void intel_uncore_fini(struct drm_device *dev)
957{ 1253{
@@ -968,7 +1264,7 @@ static const struct register_whitelist {
968 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */ 1264 /* supported gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
969 uint32_t gen_bitmask; 1265 uint32_t gen_bitmask;
970} whitelist[] = { 1266} whitelist[] = {
971 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 8) }, 1267 { RING_TIMESTAMP(RENDER_RING_BASE), 8, GEN_RANGE(4, 9) },
972}; 1268};
973 1269
974int i915_reg_read_ioctl(struct drm_device *dev, 1270int i915_reg_read_ioctl(struct drm_device *dev,
@@ -1053,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev,
1053 return 0; 1349 return 0;
1054} 1350}
1055 1351
1056static int i965_reset_complete(struct drm_device *dev) 1352static int i915_reset_complete(struct drm_device *dev)
1057{ 1353{
1058 u8 gdrst; 1354 u8 gdrst;
1059 pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); 1355 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1060 return (gdrst & GRDOM_RESET_ENABLE) == 0; 1356 return (gdrst & GRDOM_RESET_STATUS) == 0;
1061} 1357}
1062 1358
1063static int i965_do_reset(struct drm_device *dev) 1359static int i915_do_reset(struct drm_device *dev)
1064{ 1360{
1065 int ret; 1361 /* assert reset for at least 20 usec */
1066 1362 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1067 /* FIXME: i965g/gm need a display save/restore for gpu reset. */ 1363 udelay(20);
1068 return -ENODEV; 1364 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1069 1365
1070 /* 1366 return wait_for(i915_reset_complete(dev), 500);
1071 * Set the domains we want to reset (GRDOM/bits 2 and 3) as 1367}
1072 * well as the reset bit (GR/bit 0). Setting the GR bit
1073 * triggers the reset; when done, the hardware will clear it.
1074 */
1075 pci_write_config_byte(dev->pdev, I965_GDRST,
1076 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1077 ret = wait_for(i965_reset_complete(dev), 500);
1078 if (ret)
1079 return ret;
1080
1081 pci_write_config_byte(dev->pdev, I965_GDRST,
1082 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1083
1084 ret = wait_for(i965_reset_complete(dev), 500);
1085 if (ret)
1086 return ret;
1087 1368
1088 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 1369static int g4x_reset_complete(struct drm_device *dev)
1370{
1371 u8 gdrst;
1372 pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst);
1373 return (gdrst & GRDOM_RESET_ENABLE) == 0;
1374}
1089 1375
1090 return 0; 1376static int g33_do_reset(struct drm_device *dev)
1377{
1378 pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE);
1379 return wait_for(g4x_reset_complete(dev), 500);
1091} 1380}
1092 1381
1093static int g4x_do_reset(struct drm_device *dev) 1382static int g4x_do_reset(struct drm_device *dev)
@@ -1095,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev)
1095 struct drm_i915_private *dev_priv = dev->dev_private; 1384 struct drm_i915_private *dev_priv = dev->dev_private;
1096 int ret; 1385 int ret;
1097 1386
1098 pci_write_config_byte(dev->pdev, I965_GDRST, 1387 pci_write_config_byte(dev->pdev, I915_GDRST,
1099 GRDOM_RENDER | GRDOM_RESET_ENABLE); 1388 GRDOM_RENDER | GRDOM_RESET_ENABLE);
1100 ret = wait_for(i965_reset_complete(dev), 500); 1389 ret = wait_for(g4x_reset_complete(dev), 500);
1101 if (ret) 1390 if (ret)
1102 return ret; 1391 return ret;
1103 1392
@@ -1105,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev)
1105 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); 1394 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE);
1106 POSTING_READ(VDECCLK_GATE_D); 1395 POSTING_READ(VDECCLK_GATE_D);
1107 1396
1108 pci_write_config_byte(dev->pdev, I965_GDRST, 1397 pci_write_config_byte(dev->pdev, I915_GDRST,
1109 GRDOM_MEDIA | GRDOM_RESET_ENABLE); 1398 GRDOM_MEDIA | GRDOM_RESET_ENABLE);
1110 ret = wait_for(i965_reset_complete(dev), 500); 1399 ret = wait_for(g4x_reset_complete(dev), 500);
1111 if (ret) 1400 if (ret)
1112 return ret; 1401 return ret;
1113 1402
@@ -1115,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev)
1115 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); 1404 I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE);
1116 POSTING_READ(VDECCLK_GATE_D); 1405 POSTING_READ(VDECCLK_GATE_D);
1117 1406
1118 pci_write_config_byte(dev->pdev, I965_GDRST, 0); 1407 pci_write_config_byte(dev->pdev, I915_GDRST, 0);
1119 1408
1120 return 0; 1409 return 0;
1121} 1410}
@@ -1173,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev)
1173 return ironlake_do_reset(dev); 1462 return ironlake_do_reset(dev);
1174 else if (IS_G4X(dev)) 1463 else if (IS_G4X(dev))
1175 return g4x_do_reset(dev); 1464 return g4x_do_reset(dev);
1176 else if (IS_GEN4(dev)) 1465 else if (IS_G33(dev))
1177 return i965_do_reset(dev); 1466 return g33_do_reset(dev);
1467 else if (INTEL_INFO(dev)->gen >= 3)
1468 return i915_do_reset(dev);
1178 else 1469 else
1179 return -ENODEV; 1470 return -ENODEV;
1180} 1471}
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/gpu/drm/imx/Kconfig
index 82fb758a29bc..82fb758a29bc 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/gpu/drm/imx/Kconfig
diff --git a/drivers/staging/imx-drm/Makefile b/drivers/gpu/drm/imx/Makefile
index 582c438d8cbd..582c438d8cbd 100644
--- a/drivers/staging/imx-drm/Makefile
+++ b/drivers/gpu/drm/imx/Makefile
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index ad6173500bfc..e48b2211d2d6 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -24,6 +24,7 @@
24#include <drm/drm_crtc_helper.h> 24#include <drm/drm_crtc_helper.h>
25#include <drm/drm_gem_cma_helper.h> 25#include <drm/drm_gem_cma_helper.h>
26#include <drm/drm_fb_cma_helper.h> 26#include <drm/drm_fb_cma_helper.h>
27#include <drm/drm_plane_helper.h>
27 28
28#include "imx-drm.h" 29#include "imx-drm.h"
29 30
diff --git a/drivers/staging/imx-drm/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 7453ae00c412..7453ae00c412 100644
--- a/drivers/staging/imx-drm/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/gpu/drm/imx/imx-hdmi.c
index ddc53e039530..ddc53e039530 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/gpu/drm/imx/imx-hdmi.c
diff --git a/drivers/staging/imx-drm/imx-hdmi.h b/drivers/gpu/drm/imx/imx-hdmi.h
index 39b677689db6..39b677689db6 100644
--- a/drivers/staging/imx-drm/imx-hdmi.h
+++ b/drivers/gpu/drm/imx/imx-hdmi.h
diff --git a/drivers/staging/imx-drm/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 2638dc1671d0..2638dc1671d0 100644
--- a/drivers/staging/imx-drm/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c
index 64b54d7f996c..64b54d7f996c 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/gpu/drm/imx/imx-tve.c
diff --git a/drivers/staging/imx-drm/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 11e84a251773..11e84a251773 100644
--- a/drivers/staging/imx-drm/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
diff --git a/drivers/staging/imx-drm/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 944962b692bb..944962b692bb 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
diff --git a/drivers/staging/imx-drm/ipuv3-plane.h b/drivers/gpu/drm/imx/ipuv3-plane.h
index c0aae5bcb5d4..c0aae5bcb5d4 100644
--- a/drivers/staging/imx-drm/ipuv3-plane.h
+++ b/drivers/gpu/drm/imx/ipuv3-plane.h
diff --git a/drivers/staging/imx-drm/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 8a76a5c1c34b..8a76a5c1c34b 100644
--- a/drivers/staging/imx-drm/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 83485ab81ce8..9872ba9abf1a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
15 15
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18#include <drm/drm_plane_helper.h>
18 19
19#include "mgag200_drv.h" 20#include "mgag200_drv.h"
20 21
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 9d907c526c94..5b2a1ff95d3d 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,6 +3,7 @@ config DRM_MSM
3 tristate "MSM DRM" 3 tristate "MSM DRM"
4 depends on DRM 4 depends on DRM
5 depends on ARCH_QCOM || (ARM && COMPILE_TEST) 5 depends on ARCH_QCOM || (ARM && COMPILE_TEST)
6 select REGULATOR
6 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
7 select DRM_PANEL 8 select DRM_PANEL
8 select SHMEM 9 select SHMEM
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 6283dcb96af5..143d988f8add 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -7,6 +7,7 @@ msm-y := \
7 adreno/adreno_device.o \ 7 adreno/adreno_device.o \
8 adreno/adreno_gpu.o \ 8 adreno/adreno_gpu.o \
9 adreno/a3xx_gpu.o \ 9 adreno/a3xx_gpu.o \
10 adreno/a4xx_gpu.o \
10 hdmi/hdmi.o \ 11 hdmi/hdmi.o \
11 hdmi/hdmi_audio.o \ 12 hdmi/hdmi_audio.o \
12 hdmi/hdmi_bridge.o \ 13 hdmi/hdmi_bridge.o \
@@ -24,12 +25,15 @@ msm-y := \
24 mdp/mdp4/mdp4_irq.o \ 25 mdp/mdp4/mdp4_irq.o \
25 mdp/mdp4/mdp4_kms.o \ 26 mdp/mdp4/mdp4_kms.o \
26 mdp/mdp4/mdp4_plane.o \ 27 mdp/mdp4/mdp4_plane.o \
28 mdp/mdp5/mdp5_cfg.o \
29 mdp/mdp5/mdp5_ctl.o \
27 mdp/mdp5/mdp5_crtc.o \ 30 mdp/mdp5/mdp5_crtc.o \
28 mdp/mdp5/mdp5_encoder.o \ 31 mdp/mdp5/mdp5_encoder.o \
29 mdp/mdp5/mdp5_irq.o \ 32 mdp/mdp5/mdp5_irq.o \
30 mdp/mdp5/mdp5_kms.o \ 33 mdp/mdp5/mdp5_kms.o \
31 mdp/mdp5/mdp5_plane.o \ 34 mdp/mdp5/mdp5_plane.o \
32 mdp/mdp5/mdp5_smp.o \ 35 mdp/mdp5/mdp5_smp.o \
36 msm_atomic.o \
33 msm_drv.o \ 37 msm_drv.o \
34 msm_fb.o \ 38 msm_fb.o \
35 msm_gem.o \ 39 msm_gem.o \
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index a3104598c27f..22882cc0a573 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -926,11 +926,11 @@ static inline uint32_t A2XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
926#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 926#define A2XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
927#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 927#define A2XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
928#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 928#define A2XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
929#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000 929#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
930#define A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16 930#define A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
931static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val) 931static inline uint32_t A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
932{ 932{
933 return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK; 933 return ((val) << A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A2XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
934} 934}
935 935
936#define REG_A2XX_VGT_IMMED_DATA 0x000021fd 936#define REG_A2XX_VGT_IMMED_DATA 0x000021fd
@@ -1243,13 +1243,13 @@ static inline uint32_t A2XX_CLEAR_COLOR_ALPHA(uint32_t val)
1243#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0 1243#define A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT 0
1244static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val) 1244static inline uint32_t A2XX_PA_SU_POINT_SIZE_HEIGHT(float val)
1245{ 1245{
1246 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK; 1246 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_HEIGHT__SHIFT) & A2XX_PA_SU_POINT_SIZE_HEIGHT__MASK;
1247} 1247}
1248#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000 1248#define A2XX_PA_SU_POINT_SIZE_WIDTH__MASK 0xffff0000
1249#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16 1249#define A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT 16
1250static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val) 1250static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1251{ 1251{
1252 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK; 1252 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_SIZE_WIDTH__SHIFT) & A2XX_PA_SU_POINT_SIZE_WIDTH__MASK;
1253} 1253}
1254 1254
1255#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281 1255#define REG_A2XX_PA_SU_POINT_MINMAX 0x00002281
@@ -1257,13 +1257,13 @@ static inline uint32_t A2XX_PA_SU_POINT_SIZE_WIDTH(float val)
1257#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0 1257#define A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT 0
1258static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val) 1258static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MIN(float val)
1259{ 1259{
1260 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK; 1260 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MIN__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MIN__MASK;
1261} 1261}
1262#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000 1262#define A2XX_PA_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1263#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16 1263#define A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT 16
1264static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val) 1264static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1265{ 1265{
1266 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK; 1266 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_POINT_MINMAX_MAX__SHIFT) & A2XX_PA_SU_POINT_MINMAX_MAX__MASK;
1267} 1267}
1268 1268
1269#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282 1269#define REG_A2XX_PA_SU_LINE_CNTL 0x00002282
@@ -1271,7 +1271,7 @@ static inline uint32_t A2XX_PA_SU_POINT_MINMAX_MAX(float val)
1271#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0 1271#define A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT 0
1272static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val) 1272static inline uint32_t A2XX_PA_SU_LINE_CNTL_WIDTH(float val)
1273{ 1273{
1274 return ((((uint32_t)(val * 8.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK; 1274 return ((((uint32_t)(val * 16.0))) << A2XX_PA_SU_LINE_CNTL_WIDTH__SHIFT) & A2XX_PA_SU_LINE_CNTL_WIDTH__MASK;
1275} 1275}
1276 1276
1277#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283 1277#define REG_A2XX_PA_SC_LINE_STIPPLE 0x00002283
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 82d015279b47..109e9a263daf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -86,6 +86,14 @@ enum a3xx_vtx_fmt {
86 VFMT_NORM_USHORT_16_16 = 29, 86 VFMT_NORM_USHORT_16_16 = 29,
87 VFMT_NORM_USHORT_16_16_16 = 30, 87 VFMT_NORM_USHORT_16_16_16 = 30,
88 VFMT_NORM_USHORT_16_16_16_16 = 31, 88 VFMT_NORM_USHORT_16_16_16_16 = 31,
89 VFMT_UINT_32 = 32,
90 VFMT_UINT_32_32 = 33,
91 VFMT_UINT_32_32_32 = 34,
92 VFMT_UINT_32_32_32_32 = 35,
93 VFMT_INT_32 = 36,
94 VFMT_INT_32_32 = 37,
95 VFMT_INT_32_32_32 = 38,
96 VFMT_INT_32_32_32_32 = 39,
89 VFMT_UBYTE_8 = 40, 97 VFMT_UBYTE_8 = 40,
90 VFMT_UBYTE_8_8 = 41, 98 VFMT_UBYTE_8_8 = 41,
91 VFMT_UBYTE_8_8_8 = 42, 99 VFMT_UBYTE_8_8_8 = 42,
@@ -112,7 +120,9 @@ enum a3xx_tex_fmt {
112 TFMT_NORM_USHORT_565 = 4, 120 TFMT_NORM_USHORT_565 = 4,
113 TFMT_NORM_USHORT_5551 = 6, 121 TFMT_NORM_USHORT_5551 = 6,
114 TFMT_NORM_USHORT_4444 = 7, 122 TFMT_NORM_USHORT_4444 = 7,
123 TFMT_NORM_USHORT_Z16 = 9,
115 TFMT_NORM_UINT_X8Z24 = 10, 124 TFMT_NORM_UINT_X8Z24 = 10,
125 TFMT_FLOAT_Z32 = 11,
116 TFMT_NORM_UINT_NV12_UV_TILED = 17, 126 TFMT_NORM_UINT_NV12_UV_TILED = 17,
117 TFMT_NORM_UINT_NV12_Y_TILED = 19, 127 TFMT_NORM_UINT_NV12_Y_TILED = 19,
118 TFMT_NORM_UINT_NV12_UV = 21, 128 TFMT_NORM_UINT_NV12_UV = 21,
@@ -121,18 +131,38 @@ enum a3xx_tex_fmt {
121 TFMT_NORM_UINT_I420_U = 26, 131 TFMT_NORM_UINT_I420_U = 26,
122 TFMT_NORM_UINT_I420_V = 27, 132 TFMT_NORM_UINT_I420_V = 27,
123 TFMT_NORM_UINT_2_10_10_10 = 41, 133 TFMT_NORM_UINT_2_10_10_10 = 41,
134 TFMT_FLOAT_9_9_9_E5 = 42,
135 TFMT_FLOAT_10_11_11 = 43,
124 TFMT_NORM_UINT_A8 = 44, 136 TFMT_NORM_UINT_A8 = 44,
125 TFMT_NORM_UINT_L8_A8 = 47, 137 TFMT_NORM_UINT_L8_A8 = 47,
126 TFMT_NORM_UINT_8 = 48, 138 TFMT_NORM_UINT_8 = 48,
127 TFMT_NORM_UINT_8_8 = 49, 139 TFMT_NORM_UINT_8_8 = 49,
128 TFMT_NORM_UINT_8_8_8 = 50, 140 TFMT_NORM_UINT_8_8_8 = 50,
129 TFMT_NORM_UINT_8_8_8_8 = 51, 141 TFMT_NORM_UINT_8_8_8_8 = 51,
142 TFMT_NORM_SINT_8_8 = 53,
143 TFMT_NORM_SINT_8_8_8_8 = 55,
144 TFMT_UINT_8_8 = 57,
145 TFMT_UINT_8_8_8_8 = 59,
146 TFMT_SINT_8_8 = 61,
147 TFMT_SINT_8_8_8_8 = 63,
130 TFMT_FLOAT_16 = 64, 148 TFMT_FLOAT_16 = 64,
131 TFMT_FLOAT_16_16 = 65, 149 TFMT_FLOAT_16_16 = 65,
132 TFMT_FLOAT_16_16_16_16 = 67, 150 TFMT_FLOAT_16_16_16_16 = 67,
151 TFMT_UINT_16 = 68,
152 TFMT_UINT_16_16 = 69,
153 TFMT_UINT_16_16_16_16 = 71,
154 TFMT_SINT_16 = 72,
155 TFMT_SINT_16_16 = 73,
156 TFMT_SINT_16_16_16_16 = 75,
133 TFMT_FLOAT_32 = 84, 157 TFMT_FLOAT_32 = 84,
134 TFMT_FLOAT_32_32 = 85, 158 TFMT_FLOAT_32_32 = 85,
135 TFMT_FLOAT_32_32_32_32 = 87, 159 TFMT_FLOAT_32_32_32_32 = 87,
160 TFMT_UINT_32 = 88,
161 TFMT_UINT_32_32 = 89,
162 TFMT_UINT_32_32_32_32 = 91,
163 TFMT_SINT_32 = 92,
164 TFMT_SINT_32_32 = 93,
165 TFMT_SINT_32_32_32_32 = 95,
136}; 166};
137 167
138enum a3xx_tex_fetchsize { 168enum a3xx_tex_fetchsize {
@@ -145,19 +175,34 @@ enum a3xx_tex_fetchsize {
145}; 175};
146 176
147enum a3xx_color_fmt { 177enum a3xx_color_fmt {
178 RB_R5G6B5_UNORM = 0,
179 RB_R5G5B5A1_UNORM = 1,
180 RB_R4G4B4A4_UNORM = 3,
148 RB_R8G8B8_UNORM = 4, 181 RB_R8G8B8_UNORM = 4,
149 RB_R8G8B8A8_UNORM = 8, 182 RB_R8G8B8A8_UNORM = 8,
150 RB_Z16_UNORM = 12, 183 RB_R8G8B8A8_UINT = 10,
184 RB_R8G8B8A8_SINT = 11,
185 RB_R8G8_UNORM = 12,
186 RB_R8_UINT = 14,
187 RB_R8_SINT = 15,
188 RB_R10G10B10A2_UNORM = 16,
151 RB_A8_UNORM = 20, 189 RB_A8_UNORM = 20,
190 RB_R8_UNORM = 21,
152 RB_R16G16B16A16_FLOAT = 27, 191 RB_R16G16B16A16_FLOAT = 27,
192 RB_R11G11B10_FLOAT = 28,
193 RB_R16_SINT = 40,
194 RB_R16G16_SINT = 41,
195 RB_R16G16B16A16_SINT = 43,
196 RB_R16_UINT = 44,
197 RB_R16G16_UINT = 45,
198 RB_R16G16B16A16_UINT = 47,
153 RB_R32G32B32A32_FLOAT = 51, 199 RB_R32G32B32A32_FLOAT = 51,
154}; 200 RB_R32_SINT = 52,
155 201 RB_R32G32_SINT = 53,
156enum a3xx_color_swap { 202 RB_R32G32B32A32_SINT = 55,
157 WZYX = 0, 203 RB_R32_UINT = 56,
158 WXYZ = 1, 204 RB_R32G32_UINT = 57,
159 ZYXW = 2, 205 RB_R32G32B32A32_UINT = 59,
160 XYZW = 3,
161}; 206};
162 207
163enum a3xx_sp_perfcounter_select { 208enum a3xx_sp_perfcounter_select {
@@ -194,6 +239,11 @@ enum a3xx_rb_blend_opcode {
194 BLEND_MAX_DST_SRC = 4, 239 BLEND_MAX_DST_SRC = 4,
195}; 240};
196 241
242enum a3xx_intp_mode {
243 SMOOTH = 0,
244 FLAT = 1,
245};
246
197enum a3xx_tex_filter { 247enum a3xx_tex_filter {
198 A3XX_TEX_NEAREST = 0, 248 A3XX_TEX_NEAREST = 0,
199 A3XX_TEX_LINEAR = 1, 249 A3XX_TEX_LINEAR = 1,
@@ -536,6 +586,10 @@ enum a3xx_tex_type {
536 586
537#define REG_A3XX_CP_MEQ_DATA 0x000001db 587#define REG_A3XX_CP_MEQ_DATA 0x000001db
538 588
589#define REG_A3XX_CP_WFI_PEND_CTR 0x000001f5
590
591#define REG_A3XX_RBBM_PM_OVERRIDE2 0x0000039d
592
539#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445 593#define REG_A3XX_CP_PERFCOUNTER_SELECT 0x00000445
540 594
541#define REG_A3XX_CP_HW_FAULT 0x0000045c 595#define REG_A3XX_CP_HW_FAULT 0x0000045c
@@ -550,6 +604,12 @@ static inline uint32_t REG_A3XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000460
550 604
551#define REG_A3XX_CP_AHB_FAULT 0x0000054d 605#define REG_A3XX_CP_AHB_FAULT 0x0000054d
552 606
607#define REG_A3XX_SQ_GPR_MANAGEMENT 0x00000d00
608
609#define REG_A3XX_SQ_INST_STORE_MANAGMENT 0x00000d02
610
611#define REG_A3XX_TP0_CHICKEN 0x00000e1e
612
553#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22 613#define REG_A3XX_SP_GLOBAL_MEM_SIZE 0x00000e22
554 614
555#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23 615#define REG_A3XX_SP_GLOBAL_MEM_ADDR 0x00000e23
@@ -632,13 +692,13 @@ static inline uint32_t A3XX_GRAS_CL_VPORT_ZSCALE(float val)
632#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0 692#define A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
633static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val) 693static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MIN(float val)
634{ 694{
635 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK; 695 return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
636} 696}
637#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000 697#define A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
638#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16 698#define A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
639static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val) 699static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
640{ 700{
641 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK; 701 return ((((uint32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A3XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
642} 702}
643 703
644#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069 704#define REG_A3XX_GRAS_SU_POINT_SIZE 0x00002069
@@ -646,7 +706,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_MINMAX_MAX(float val)
646#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0 706#define A3XX_GRAS_SU_POINT_SIZE__SHIFT 0
647static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val) 707static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
648{ 708{
649 return ((((uint32_t)(val * 8.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK; 709 return ((((int32_t)(val * 16.0))) << A3XX_GRAS_SU_POINT_SIZE__SHIFT) & A3XX_GRAS_SU_POINT_SIZE__MASK;
650} 710}
651 711
652#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c 712#define REG_A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000206c
@@ -654,7 +714,7 @@ static inline uint32_t A3XX_GRAS_SU_POINT_SIZE(float val)
654#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0 714#define A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT 0
655static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val) 715static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
656{ 716{
657 return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK; 717 return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL__MASK;
658} 718}
659 719
660#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d 720#define REG_A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000206d
@@ -662,7 +722,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_SCALE_VAL(float val)
662#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0 722#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
663static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val) 723static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
664{ 724{
665 return ((((uint32_t)(val * 28.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK; 725 return ((((int32_t)(val * 16384.0))) << A3XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A3XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
666} 726}
667 727
668#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070 728#define REG_A3XX_GRAS_SU_MODE_CONTROL 0x00002070
@@ -673,7 +733,7 @@ static inline uint32_t A3XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
673#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3 733#define A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
674static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val) 734static inline uint32_t A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
675{ 735{
676 return ((((uint32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK; 736 return ((((int32_t)(val * 4.0))) << A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A3XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
677} 737}
678#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800 738#define A3XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
679 739
@@ -863,6 +923,7 @@ static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
863{ 923{
864 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK; 924 return ((val) << A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A3XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
865} 925}
926#define A3XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00004000
866#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000 927#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0xfffe0000
867#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17 928#define A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 17
868static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val) 929static inline uint32_t A3XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
@@ -1001,6 +1062,7 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
1001{ 1062{
1002 return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK; 1063 return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
1003} 1064}
1065#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
1004#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000 1066#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
1005#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14 1067#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
1006static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val) 1068static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1079,7 +1141,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
1079#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101 1141#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
1080 1142
1081#define REG_A3XX_RB_DEPTH_INFO 0x00002102 1143#define REG_A3XX_RB_DEPTH_INFO 0x00002102
1082#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000001 1144#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
1083#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0 1145#define A3XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
1084static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val) 1146static inline uint32_t A3XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum adreno_rb_depth_format val)
1085{ 1147{
@@ -1265,6 +1327,7 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
1265{ 1327{
1266 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK; 1328 return ((val) << A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE__MASK;
1267} 1329}
1330#define A3XX_PC_PRIM_VTX_CNTL_PRIMITIVE_RESTART 0x00100000
1268#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000 1331#define A3XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1269#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000 1332#define A3XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
1270 1333
@@ -1281,7 +1344,12 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize
1281#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200 1344#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1282#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400 1345#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1283#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000 1346#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1284#define A3XX_HLSQ_CONTROL_0_REG_CONSTSWITCHMODE 0x08000000 1347#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
1348#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
1349static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
1350{
1351 return ((val) << A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
1352}
1285#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000 1353#define A3XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1286#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000 1354#define A3XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1287#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000 1355#define A3XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
@@ -1484,6 +1552,8 @@ static inline uint32_t A3XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1484 1552
1485#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245 1553#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1486 1554
1555#define REG_A3XX_VFD_INDEX_OFFSET 0x00002245
1556
1487static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; } 1557static inline uint32_t REG_A3XX_VFD_FETCH(uint32_t i0) { return 0x00002246 + 0x2*i0; }
1488 1558
1489static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; } 1559static inline uint32_t REG_A3XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x00002246 + 0x2*i0; }
@@ -1537,6 +1607,7 @@ static inline uint32_t A3XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1537{ 1607{
1538 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK; 1608 return ((val) << A3XX_VFD_DECODE_INSTR_REGID__SHIFT) & A3XX_VFD_DECODE_INSTR_REGID__MASK;
1539} 1609}
1610#define A3XX_VFD_DECODE_INSTR_INT 0x00100000
1540#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000 1611#define A3XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
1541#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22 1612#define A3XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
1542static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val) 1613static inline uint32_t A3XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
@@ -1604,6 +1675,102 @@ static inline uint32_t A3XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1604static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; } 1675static inline uint32_t REG_A3XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1605 1676
1606static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; } 1677static inline uint32_t REG_A3XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002282 + 0x1*i0; }
1678#define A3XX_VPC_VARYING_INTERP_MODE_C0__MASK 0x00000003
1679#define A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT 0
1680static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C0(enum a3xx_intp_mode val)
1681{
1682 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C0__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C0__MASK;
1683}
1684#define A3XX_VPC_VARYING_INTERP_MODE_C1__MASK 0x0000000c
1685#define A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT 2
1686static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C1(enum a3xx_intp_mode val)
1687{
1688 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C1__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C1__MASK;
1689}
1690#define A3XX_VPC_VARYING_INTERP_MODE_C2__MASK 0x00000030
1691#define A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT 4
1692static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C2(enum a3xx_intp_mode val)
1693{
1694 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C2__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C2__MASK;
1695}
1696#define A3XX_VPC_VARYING_INTERP_MODE_C3__MASK 0x000000c0
1697#define A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT 6
1698static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C3(enum a3xx_intp_mode val)
1699{
1700 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C3__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C3__MASK;
1701}
1702#define A3XX_VPC_VARYING_INTERP_MODE_C4__MASK 0x00000300
1703#define A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT 8
1704static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C4(enum a3xx_intp_mode val)
1705{
1706 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C4__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C4__MASK;
1707}
1708#define A3XX_VPC_VARYING_INTERP_MODE_C5__MASK 0x00000c00
1709#define A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT 10
1710static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C5(enum a3xx_intp_mode val)
1711{
1712 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C5__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C5__MASK;
1713}
1714#define A3XX_VPC_VARYING_INTERP_MODE_C6__MASK 0x00003000
1715#define A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT 12
1716static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C6(enum a3xx_intp_mode val)
1717{
1718 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C6__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C6__MASK;
1719}
1720#define A3XX_VPC_VARYING_INTERP_MODE_C7__MASK 0x0000c000
1721#define A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT 14
1722static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C7(enum a3xx_intp_mode val)
1723{
1724 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C7__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C7__MASK;
1725}
1726#define A3XX_VPC_VARYING_INTERP_MODE_C8__MASK 0x00030000
1727#define A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT 16
1728static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C8(enum a3xx_intp_mode val)
1729{
1730 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C8__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C8__MASK;
1731}
1732#define A3XX_VPC_VARYING_INTERP_MODE_C9__MASK 0x000c0000
1733#define A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT 18
1734static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_C9(enum a3xx_intp_mode val)
1735{
1736 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_C9__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_C9__MASK;
1737}
1738#define A3XX_VPC_VARYING_INTERP_MODE_CA__MASK 0x00300000
1739#define A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT 20
1740static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CA(enum a3xx_intp_mode val)
1741{
1742 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CA__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CA__MASK;
1743}
1744#define A3XX_VPC_VARYING_INTERP_MODE_CB__MASK 0x00c00000
1745#define A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT 22
1746static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CB(enum a3xx_intp_mode val)
1747{
1748 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CB__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CB__MASK;
1749}
1750#define A3XX_VPC_VARYING_INTERP_MODE_CC__MASK 0x03000000
1751#define A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT 24
1752static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CC(enum a3xx_intp_mode val)
1753{
1754 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CC__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CC__MASK;
1755}
1756#define A3XX_VPC_VARYING_INTERP_MODE_CD__MASK 0x0c000000
1757#define A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT 26
1758static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CD(enum a3xx_intp_mode val)
1759{
1760 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CD__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CD__MASK;
1761}
1762#define A3XX_VPC_VARYING_INTERP_MODE_CE__MASK 0x30000000
1763#define A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT 28
1764static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CE(enum a3xx_intp_mode val)
1765{
1766 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CE__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CE__MASK;
1767}
1768#define A3XX_VPC_VARYING_INTERP_MODE_CF__MASK 0xc0000000
1769#define A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT 30
1770static inline uint32_t A3XX_VPC_VARYING_INTERP_MODE_CF(enum a3xx_intp_mode val)
1771{
1772 return ((val) << A3XX_VPC_VARYING_INTERP_MODE_CF__SHIFT) & A3XX_VPC_VARYING_INTERP_MODE_CF__MASK;
1773}
1607 1774
1608static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; } 1775static inline uint32_t REG_A3XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00002286 + 0x1*i0; }
1609 1776
@@ -1928,6 +2095,8 @@ static inline uint32_t A3XX_SP_FS_MRT_REG_REGID(uint32_t val)
1928 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK; 2095 return ((val) << A3XX_SP_FS_MRT_REG_REGID__SHIFT) & A3XX_SP_FS_MRT_REG_REGID__MASK;
1929} 2096}
1930#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100 2097#define A3XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
2098#define A3XX_SP_FS_MRT_REG_SINT 0x00000400
2099#define A3XX_SP_FS_MRT_REG_UINT 0x00000800
1931 2100
1932static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; } 2101static inline uint32_t REG_A3XX_SP_FS_IMAGE_OUTPUT(uint32_t i0) { return 0x000022f4 + 0x1*i0; }
1933 2102
@@ -1947,6 +2116,8 @@ static inline uint32_t A3XX_SP_FS_LENGTH_REG_SHADERLENGTH(uint32_t val)
1947 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK; 2116 return ((val) << A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__SHIFT) & A3XX_SP_FS_LENGTH_REG_SHADERLENGTH__MASK;
1948} 2117}
1949 2118
2119#define REG_A3XX_PA_SC_AA_CONFIG 0x00002301
2120
1950#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340 2121#define REG_A3XX_TPL1_TP_VS_TEX_OFFSET 0x00002340
1951#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff 2122#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__MASK 0x000000ff
1952#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0 2123#define A3XX_TPL1_TP_VS_TEX_OFFSET_SAMPLEROFFSET__SHIFT 0
@@ -2297,11 +2468,11 @@ static inline uint32_t A3XX_VGT_DRAW_INITIATOR_INDEX_SIZE(enum pc_di_index_size
2297#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000 2468#define A3XX_VGT_DRAW_INITIATOR_NOT_EOP 0x00001000
2298#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000 2469#define A3XX_VGT_DRAW_INITIATOR_SMALL_INDEX 0x00002000
2299#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000 2470#define A3XX_VGT_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x00004000
2300#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK 0xffff0000 2471#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK 0xff000000
2301#define A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT 16 2472#define A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT 24
2302static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INDICES(uint32_t val) 2473static inline uint32_t A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES(uint32_t val)
2303{ 2474{
2304 return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INDICES__MASK; 2475 return ((val) << A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__SHIFT) & A3XX_VGT_DRAW_INITIATOR_NUM_INSTANCES__MASK;
2305} 2476}
2306 2477
2307#define REG_A3XX_VGT_IMMED_DATA 0x000021fd 2478#define REG_A3XX_VGT_IMMED_DATA 0x000021fd
@@ -2347,17 +2518,23 @@ static inline uint32_t A3XX_TEX_SAMP_0_COMPARE_FUNC(enum adreno_compare_func val
2347#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000 2518#define A3XX_TEX_SAMP_0_UNNORM_COORDS 0x80000000
2348 2519
2349#define REG_A3XX_TEX_SAMP_1 0x00000001 2520#define REG_A3XX_TEX_SAMP_1 0x00000001
2521#define A3XX_TEX_SAMP_1_LOD_BIAS__MASK 0x000007ff
2522#define A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT 0
2523static inline uint32_t A3XX_TEX_SAMP_1_LOD_BIAS(float val)
2524{
2525 return ((((int32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_LOD_BIAS__SHIFT) & A3XX_TEX_SAMP_1_LOD_BIAS__MASK;
2526}
2350#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000 2527#define A3XX_TEX_SAMP_1_MAX_LOD__MASK 0x003ff000
2351#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12 2528#define A3XX_TEX_SAMP_1_MAX_LOD__SHIFT 12
2352static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val) 2529static inline uint32_t A3XX_TEX_SAMP_1_MAX_LOD(float val)
2353{ 2530{
2354 return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK; 2531 return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A3XX_TEX_SAMP_1_MAX_LOD__MASK;
2355} 2532}
2356#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000 2533#define A3XX_TEX_SAMP_1_MIN_LOD__MASK 0xffc00000
2357#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22 2534#define A3XX_TEX_SAMP_1_MIN_LOD__SHIFT 22
2358static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val) 2535static inline uint32_t A3XX_TEX_SAMP_1_MIN_LOD(float val)
2359{ 2536{
2360 return ((((uint32_t)(val * 12.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK; 2537 return ((((uint32_t)(val * 64.0))) << A3XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A3XX_TEX_SAMP_1_MIN_LOD__MASK;
2361} 2538}
2362 2539
2363#define REG_A3XX_TEX_CONST_0 0x00000000 2540#define REG_A3XX_TEX_CONST_0 0x00000000
@@ -2448,6 +2625,24 @@ static inline uint32_t A3XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2448} 2625}
2449 2626
2450#define REG_A3XX_TEX_CONST_3 0x00000003 2627#define REG_A3XX_TEX_CONST_3 0x00000003
2628#define A3XX_TEX_CONST_3_LAYERSZ1__MASK 0x0000000f
2629#define A3XX_TEX_CONST_3_LAYERSZ1__SHIFT 0
2630static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ1(uint32_t val)
2631{
2632 return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ1__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ1__MASK;
2633}
2634#define A3XX_TEX_CONST_3_DEPTH__MASK 0x0ffe0000
2635#define A3XX_TEX_CONST_3_DEPTH__SHIFT 17
2636static inline uint32_t A3XX_TEX_CONST_3_DEPTH(uint32_t val)
2637{
2638 return ((val) << A3XX_TEX_CONST_3_DEPTH__SHIFT) & A3XX_TEX_CONST_3_DEPTH__MASK;
2639}
2640#define A3XX_TEX_CONST_3_LAYERSZ2__MASK 0xf0000000
2641#define A3XX_TEX_CONST_3_LAYERSZ2__SHIFT 28
2642static inline uint32_t A3XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
2643{
2644 return ((val >> 12) << A3XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A3XX_TEX_CONST_3_LAYERSZ2__MASK;
2645}
2451 2646
2452 2647
2453#endif /* A3XX_XML */ 2648#endif /* A3XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 218c5b060398..b66c53bdc039 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013 Red Hat 2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -406,6 +408,94 @@ static void a3xx_dump(struct msm_gpu *gpu)
406 gpu_read(gpu, REG_A3XX_RBBM_STATUS)); 408 gpu_read(gpu, REG_A3XX_RBBM_STATUS));
407 adreno_dump(gpu); 409 adreno_dump(gpu);
408} 410}
411/* Register offset defines for A3XX */
412static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
413 REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
414 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
415 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
416 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
417 REG_A3XX_CP_PFP_UCODE_DATA),
418 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
419 REG_A3XX_CP_PFP_UCODE_ADDR),
420 REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
421 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
422 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
423 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
424 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
425 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
426 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
427 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
428 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
429 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
430 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
431 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
432 REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
433 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
434 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
435 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
436 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
437 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
438 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
439 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
440 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
441 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
442 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
443 REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
444 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
445 REG_A3XX_CP_PROTECT_STATUS),
446 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
447 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
448 REG_A3XX_RBBM_PERFCTR_CTL),
449 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
450 REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
451 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
452 REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
453 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
454 REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
455 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
456 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
457 REG_A3XX_RBBM_INT_0_STATUS),
458 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
459 REG_A3XX_RBBM_AHB_ERROR_STATUS),
460 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
461 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
462 REG_A3XX_RBBM_INT_CLEAR_CMD),
463 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
464 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
465 REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
466 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
467 REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
468 REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
469 REG_A3XX_VSC_SIZE_ADDRESS),
470 REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
471 REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
472 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
473 REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
474 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
475 REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
476 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
477 REG_A3XX_SP_VS_OBJ_START_REG),
478 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
479 REG_A3XX_SP_FS_OBJ_START_REG),
480 REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
481 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
482 REG_A3XX_RBBM_PM_OVERRIDE2),
483 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
484 REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
485 REG_A3XX_SQ_GPR_MANAGEMENT),
486 REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
487 REG_A3XX_SQ_INST_STORE_MANAGMENT),
488 REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
489 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
490 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
491 REG_A3XX_RBBM_SW_RESET_CMD),
492 REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
493 REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
494 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
495 REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
496 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
497 REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
498};
409 499
410static const struct adreno_gpu_funcs funcs = { 500static const struct adreno_gpu_funcs funcs = {
411 .base = { 501 .base = {
@@ -463,6 +553,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
463 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs); 553 gpu->num_perfcntrs = ARRAY_SIZE(perfcntrs);
464 554
465 adreno_gpu->registers = a3xx_registers; 555 adreno_gpu->registers = a3xx_registers;
556 adreno_gpu->reg_offsets = a3xx_register_offsets;
466 557
467 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs); 558 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
468 if (ret) 559 if (ret)
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
new file mode 100644
index 000000000000..5a24c416d2dd
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -0,0 +1,2144 @@
1#ifndef A4XX_XML
2#define A4XX_XML
3
4/* Autogenerated file, DO NOT EDIT manually!
5
6This file was generated by the rules-ng-ng headergen tool in this git repository:
7http://github.com/freedreno/envytools/
8git clone https://github.com/freedreno/envytools.git
9
10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18
19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark)
21
22Permission is hereby granted, free of charge, to any person obtaining
23a copy of this software and associated documentation files (the
24"Software"), to deal in the Software without restriction, including
25without limitation the rights to use, copy, modify, merge, publish,
26distribute, sublicense, and/or sell copies of the Software, and to
27permit persons to whom the Software is furnished to do so, subject to
28the following conditions:
29
30The above copyright notice and this permission notice (including the
31next paragraph) shall be included in all copies or substantial
32portions of the Software.
33
34THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
35EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
37IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
38LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
39OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
40WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
41*/
42
43
44enum a4xx_color_fmt {
45 RB4_A8_UNORM = 1,
46 RB4_R5G6R5_UNORM = 14,
47 RB4_Z16_UNORM = 15,
48 RB4_R8G8B8_UNORM = 25,
49 RB4_R8G8B8A8_UNORM = 26,
50};
51
52enum a4xx_tile_mode {
53 TILE4_LINEAR = 0,
54 TILE4_3 = 3,
55};
56
57enum a4xx_rb_blend_opcode {
58 BLEND_DST_PLUS_SRC = 0,
59 BLEND_SRC_MINUS_DST = 1,
60 BLEND_DST_MINUS_SRC = 2,
61 BLEND_MIN_DST_SRC = 3,
62 BLEND_MAX_DST_SRC = 4,
63};
64
65enum a4xx_vtx_fmt {
66 VFMT4_FLOAT_32 = 1,
67 VFMT4_FLOAT_32_32 = 2,
68 VFMT4_FLOAT_32_32_32 = 3,
69 VFMT4_FLOAT_32_32_32_32 = 4,
70 VFMT4_FLOAT_16 = 5,
71 VFMT4_FLOAT_16_16 = 6,
72 VFMT4_FLOAT_16_16_16 = 7,
73 VFMT4_FLOAT_16_16_16_16 = 8,
74 VFMT4_FIXED_32 = 9,
75 VFMT4_FIXED_32_32 = 10,
76 VFMT4_FIXED_32_32_32 = 11,
77 VFMT4_FIXED_32_32_32_32 = 12,
78 VFMT4_SHORT_16 = 16,
79 VFMT4_SHORT_16_16 = 17,
80 VFMT4_SHORT_16_16_16 = 18,
81 VFMT4_SHORT_16_16_16_16 = 19,
82 VFMT4_USHORT_16 = 20,
83 VFMT4_USHORT_16_16 = 21,
84 VFMT4_USHORT_16_16_16 = 22,
85 VFMT4_USHORT_16_16_16_16 = 23,
86 VFMT4_NORM_SHORT_16 = 24,
87 VFMT4_NORM_SHORT_16_16 = 25,
88 VFMT4_NORM_SHORT_16_16_16 = 26,
89 VFMT4_NORM_SHORT_16_16_16_16 = 27,
90 VFMT4_NORM_USHORT_16 = 28,
91 VFMT4_NORM_USHORT_16_16 = 29,
92 VFMT4_NORM_USHORT_16_16_16 = 30,
93 VFMT4_NORM_USHORT_16_16_16_16 = 31,
94 VFMT4_UBYTE_8 = 40,
95 VFMT4_UBYTE_8_8 = 41,
96 VFMT4_UBYTE_8_8_8 = 42,
97 VFMT4_UBYTE_8_8_8_8 = 43,
98 VFMT4_NORM_UBYTE_8 = 44,
99 VFMT4_NORM_UBYTE_8_8 = 45,
100 VFMT4_NORM_UBYTE_8_8_8 = 46,
101 VFMT4_NORM_UBYTE_8_8_8_8 = 47,
102 VFMT4_BYTE_8 = 48,
103 VFMT4_BYTE_8_8 = 49,
104 VFMT4_BYTE_8_8_8 = 50,
105 VFMT4_BYTE_8_8_8_8 = 51,
106 VFMT4_NORM_BYTE_8 = 52,
107 VFMT4_NORM_BYTE_8_8 = 53,
108 VFMT4_NORM_BYTE_8_8_8 = 54,
109 VFMT4_NORM_BYTE_8_8_8_8 = 55,
110 VFMT4_UINT_10_10_10_2 = 60,
111 VFMT4_NORM_UINT_10_10_10_2 = 61,
112 VFMT4_INT_10_10_10_2 = 62,
113 VFMT4_NORM_INT_10_10_10_2 = 63,
114};
115
116enum a4xx_tex_fmt {
117 TFMT4_NORM_USHORT_565 = 11,
118 TFMT4_NORM_USHORT_5551 = 10,
119 TFMT4_NORM_USHORT_4444 = 8,
120 TFMT4_NORM_UINT_X8Z24 = 71,
121 TFMT4_NORM_UINT_2_10_10_10 = 33,
122 TFMT4_NORM_UINT_A8 = 3,
123 TFMT4_NORM_UINT_L8_A8 = 13,
124 TFMT4_NORM_UINT_8 = 4,
125 TFMT4_NORM_UINT_8_8_8_8 = 28,
126 TFMT4_FLOAT_16 = 20,
127 TFMT4_FLOAT_16_16 = 40,
128 TFMT4_FLOAT_16_16_16_16 = 53,
129 TFMT4_FLOAT_32 = 43,
130 TFMT4_FLOAT_32_32 = 56,
131 TFMT4_FLOAT_32_32_32_32 = 63,
132};
133
134enum a4xx_depth_format {
135 DEPTH4_NONE = 0,
136 DEPTH4_16 = 1,
137 DEPTH4_24_8 = 2,
138};
139
140enum a4xx_tex_filter {
141 A4XX_TEX_NEAREST = 0,
142 A4XX_TEX_LINEAR = 1,
143};
144
145enum a4xx_tex_clamp {
146 A4XX_TEX_REPEAT = 0,
147 A4XX_TEX_CLAMP_TO_EDGE = 1,
148 A4XX_TEX_MIRROR_REPEAT = 2,
149 A4XX_TEX_CLAMP_NONE = 3,
150};
151
152enum a4xx_tex_swiz {
153 A4XX_TEX_X = 0,
154 A4XX_TEX_Y = 1,
155 A4XX_TEX_Z = 2,
156 A4XX_TEX_W = 3,
157 A4XX_TEX_ZERO = 4,
158 A4XX_TEX_ONE = 5,
159};
160
161enum a4xx_tex_type {
162 A4XX_TEX_1D = 0,
163 A4XX_TEX_2D = 1,
164 A4XX_TEX_CUBE = 2,
165 A4XX_TEX_3D = 3,
166};
167
168#define A4XX_CGC_HLSQ_EARLY_CYC__MASK 0x00700000
169#define A4XX_CGC_HLSQ_EARLY_CYC__SHIFT 20
170static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
171{
172 return ((val) << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT) & A4XX_CGC_HLSQ_EARLY_CYC__MASK;
173}
174#define A4XX_INT0_RBBM_GPU_IDLE 0x00000001
175#define A4XX_INT0_RBBM_AHB_ERROR 0x00000002
176#define A4XX_INT0_RBBM_REG_TIMEOUT 0x00000004
177#define A4XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
178#define A4XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
179#define A4XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00000020
180#define A4XX_INT0_VFD_ERROR 0x00000040
181#define A4XX_INT0_CP_SW_INT 0x00000080
182#define A4XX_INT0_CP_T0_PACKET_IN_IB 0x00000100
183#define A4XX_INT0_CP_OPCODE_ERROR 0x00000200
184#define A4XX_INT0_CP_RESERVED_BIT_ERROR 0x00000400
185#define A4XX_INT0_CP_HW_FAULT 0x00000800
186#define A4XX_INT0_CP_DMA 0x00001000
187#define A4XX_INT0_CP_IB2_INT 0x00002000
188#define A4XX_INT0_CP_IB1_INT 0x00004000
189#define A4XX_INT0_CP_RB_INT 0x00008000
190#define A4XX_INT0_CP_REG_PROTECT_FAULT 0x00010000
191#define A4XX_INT0_CP_RB_DONE_TS 0x00020000
192#define A4XX_INT0_CP_VS_DONE_TS 0x00040000
193#define A4XX_INT0_CP_PS_DONE_TS 0x00080000
194#define A4XX_INT0_CACHE_FLUSH_TS 0x00100000
195#define A4XX_INT0_CP_AHB_ERROR_HALT 0x00200000
196#define A4XX_INT0_MISC_HANG_DETECT 0x01000000
197#define A4XX_INT0_UCHE_OOB_ACCESS 0x02000000
198#define REG_A4XX_RB_GMEM_BASE_ADDR 0x00000cc0
199
200#define REG_A4XX_RB_PERFCTR_RB_SEL_0 0x00000cc7
201
202#define REG_A4XX_RB_PERFCTR_RB_SEL_1 0x00000cc8
203
204#define REG_A4XX_RB_PERFCTR_RB_SEL_2 0x00000cc9
205
206#define REG_A4XX_RB_PERFCTR_RB_SEL_3 0x00000cca
207
208#define REG_A4XX_RB_PERFCTR_RB_SEL_4 0x00000ccb
209
210#define REG_A4XX_RB_PERFCTR_RB_SEL_5 0x00000ccc
211
212#define REG_A4XX_RB_PERFCTR_RB_SEL_6 0x00000ccd
213
214#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
215
216#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
217
218#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
219#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK 0x00003fff
220#define A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT 0
221static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH(uint32_t val)
222{
223 return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_WIDTH__MASK;
224}
225#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK 0x3fff0000
226#define A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT 16
227static inline uint32_t A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT(uint32_t val)
228{
229 return ((val) << A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__SHIFT) & A4XX_RB_FRAME_BUFFER_DIMENSION_HEIGHT__MASK;
230}
231
232#define REG_A4XX_RB_CLEAR_COLOR_DW0 0x000020cc
233
234#define REG_A4XX_RB_CLEAR_COLOR_DW1 0x000020cd
235
236#define REG_A4XX_RB_CLEAR_COLOR_DW2 0x000020ce
237
238#define REG_A4XX_RB_CLEAR_COLOR_DW3 0x000020cf
239
240#define REG_A4XX_RB_MODE_CONTROL 0x000020a0
241#define A4XX_RB_MODE_CONTROL_WIDTH__MASK 0x0000003f
242#define A4XX_RB_MODE_CONTROL_WIDTH__SHIFT 0
243static inline uint32_t A4XX_RB_MODE_CONTROL_WIDTH(uint32_t val)
244{
245 return ((val >> 5) << A4XX_RB_MODE_CONTROL_WIDTH__SHIFT) & A4XX_RB_MODE_CONTROL_WIDTH__MASK;
246}
247#define A4XX_RB_MODE_CONTROL_HEIGHT__MASK 0x00003f00
248#define A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT 8
249static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
250{
251 return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
252}
253
254#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
255#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
256#define A4XX_RB_RENDER_CONTROL_DISABLE_COLOR_PIPE 0x00000020
257
258#define REG_A4XX_RB_MSAA_CONTROL 0x000020a2
259#define A4XX_RB_MSAA_CONTROL_DISABLE 0x00001000
260#define A4XX_RB_MSAA_CONTROL_SAMPLES__MASK 0x0000e000
261#define A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT 13
262static inline uint32_t A4XX_RB_MSAA_CONTROL_SAMPLES(uint32_t val)
263{
264 return ((val) << A4XX_RB_MSAA_CONTROL_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL_SAMPLES__MASK;
265}
266
267#define REG_A4XX_RB_MSAA_CONTROL2 0x000020a3
268#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK 0x00000380
269#define A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT 7
270static inline uint32_t A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES(uint32_t val)
271{
272 return ((val) << A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__SHIFT) & A4XX_RB_MSAA_CONTROL2_MSAA_SAMPLES__MASK;
273}
274#define A4XX_RB_MSAA_CONTROL2_VARYING 0x00001000
275
276static inline uint32_t REG_A4XX_RB_MRT(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
277
278static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4 + 0x5*i0; }
279#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
280#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
281#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
282#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
283#define A4XX_RB_MRT_CONTROL_B11 0x00000800
284#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
285#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
286static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
287{
288 return ((val) << A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
289}
290
291static inline uint32_t REG_A4XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x000020a5 + 0x5*i0; }
292#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000003f
293#define A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
294static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a4xx_color_fmt val)
295{
296 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
297}
298#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00000600
299#define A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 9
300static inline uint32_t A4XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
301{
302 return ((val) << A4XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A4XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
303}
304#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00001800
305#define A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 11
306static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
307{
308 return ((val) << A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
309}
310#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK 0x007fc000
311#define A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT 14
312static inline uint32_t A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH(uint32_t val)
313{
314 return ((val >> 4) << A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__SHIFT) & A4XX_RB_MRT_BUF_INFO_COLOR_BUF_PITCH__MASK;
315}
316
317static inline uint32_t REG_A4XX_RB_MRT_BASE(uint32_t i0) { return 0x000020a6 + 0x5*i0; }
318
319static inline uint32_t REG_A4XX_RB_MRT_CONTROL3(uint32_t i0) { return 0x000020a7 + 0x5*i0; }
320#define A4XX_RB_MRT_CONTROL3_STRIDE__MASK 0x0001fff8
321#define A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT 3
322static inline uint32_t A4XX_RB_MRT_CONTROL3_STRIDE(uint32_t val)
323{
324 return ((val) << A4XX_RB_MRT_CONTROL3_STRIDE__SHIFT) & A4XX_RB_MRT_CONTROL3_STRIDE__MASK;
325}
326
327static inline uint32_t REG_A4XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x000020a8 + 0x5*i0; }
328#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
329#define A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
330static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
331{
332 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
333}
334#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
335#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
336static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
337{
338 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
339}
340#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
341#define A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
342static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
343{
344 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
345}
346#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
347#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
348static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
349{
350 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
351}
352#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
353#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
354static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
355{
356 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
357}
358#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
359#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
360static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
361{
362 return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
363}
364
365#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
366#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
367#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
368#define A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
369static inline uint32_t A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
370{
371 return ((val) << A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A4XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
372}
373
374#define REG_A4XX_RB_FS_OUTPUT 0x000020f9
375#define A4XX_RB_FS_OUTPUT_ENABLE_COLOR_PIPE 0x00000001
376#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
377#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
378#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
379static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
380{
381 return ((val) << A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT) & A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK;
382}
383
384#define REG_A4XX_RB_RENDER_CONTROL3 0x000020fb
385#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK 0x0000001f
386#define A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT 0
387static inline uint32_t A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE(uint32_t val)
388{
389 return ((val) << A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__SHIFT) & A4XX_RB_RENDER_CONTROL3_COMPONENT_ENABLE__MASK;
390}
391
392#define REG_A4XX_RB_COPY_CONTROL 0x000020fc
393#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK 0x00000003
394#define A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT 0
395static inline uint32_t A4XX_RB_COPY_CONTROL_MSAA_RESOLVE(enum a3xx_msaa_samples val)
396{
397 return ((val) << A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__SHIFT) & A4XX_RB_COPY_CONTROL_MSAA_RESOLVE__MASK;
398}
399#define A4XX_RB_COPY_CONTROL_MODE__MASK 0x00000070
400#define A4XX_RB_COPY_CONTROL_MODE__SHIFT 4
401static inline uint32_t A4XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mode val)
402{
403 return ((val) << A4XX_RB_COPY_CONTROL_MODE__SHIFT) & A4XX_RB_COPY_CONTROL_MODE__MASK;
404}
405#define A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
406#define A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
407static inline uint32_t A4XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
408{
409 return ((val) << A4XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A4XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
410}
411#define A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
412#define A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
413static inline uint32_t A4XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
414{
415 return ((val >> 14) << A4XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT) & A4XX_RB_COPY_CONTROL_GMEM_BASE__MASK;
416}
417
418#define REG_A4XX_RB_COPY_DEST_BASE 0x000020fd
419#define A4XX_RB_COPY_DEST_BASE_BASE__MASK 0xfffffff0
420#define A4XX_RB_COPY_DEST_BASE_BASE__SHIFT 4
421static inline uint32_t A4XX_RB_COPY_DEST_BASE_BASE(uint32_t val)
422{
423 return ((val >> 4) << A4XX_RB_COPY_DEST_BASE_BASE__SHIFT) & A4XX_RB_COPY_DEST_BASE_BASE__MASK;
424}
425
426#define REG_A4XX_RB_COPY_DEST_PITCH 0x000020fe
427#define A4XX_RB_COPY_DEST_PITCH_PITCH__MASK 0xffffffff
428#define A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT 0
429static inline uint32_t A4XX_RB_COPY_DEST_PITCH_PITCH(uint32_t val)
430{
431 return ((val >> 5) << A4XX_RB_COPY_DEST_PITCH_PITCH__SHIFT) & A4XX_RB_COPY_DEST_PITCH_PITCH__MASK;
432}
433
434#define REG_A4XX_RB_COPY_DEST_INFO 0x000020ff
435#define A4XX_RB_COPY_DEST_INFO_FORMAT__MASK 0x000000fc
436#define A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT 2
437static inline uint32_t A4XX_RB_COPY_DEST_INFO_FORMAT(enum a4xx_color_fmt val)
438{
439 return ((val) << A4XX_RB_COPY_DEST_INFO_FORMAT__SHIFT) & A4XX_RB_COPY_DEST_INFO_FORMAT__MASK;
440}
441#define A4XX_RB_COPY_DEST_INFO_SWAP__MASK 0x00000300
442#define A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT 8
443static inline uint32_t A4XX_RB_COPY_DEST_INFO_SWAP(enum a3xx_color_swap val)
444{
445 return ((val) << A4XX_RB_COPY_DEST_INFO_SWAP__SHIFT) & A4XX_RB_COPY_DEST_INFO_SWAP__MASK;
446}
447#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK 0x00000c00
448#define A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT 10
449static inline uint32_t A4XX_RB_COPY_DEST_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
450{
451 return ((val) << A4XX_RB_COPY_DEST_INFO_DITHER_MODE__SHIFT) & A4XX_RB_COPY_DEST_INFO_DITHER_MODE__MASK;
452}
453#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK 0x0003c000
454#define A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT 14
455static inline uint32_t A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE(uint32_t val)
456{
457 return ((val) << A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__SHIFT) & A4XX_RB_COPY_DEST_INFO_COMPONENT_ENABLE__MASK;
458}
459#define A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK 0x001c0000
460#define A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT 18
461static inline uint32_t A4XX_RB_COPY_DEST_INFO_ENDIAN(enum adreno_rb_surface_endian val)
462{
463 return ((val) << A4XX_RB_COPY_DEST_INFO_ENDIAN__SHIFT) & A4XX_RB_COPY_DEST_INFO_ENDIAN__MASK;
464}
465#define A4XX_RB_COPY_DEST_INFO_TILE__MASK 0x03000000
466#define A4XX_RB_COPY_DEST_INFO_TILE__SHIFT 24
467static inline uint32_t A4XX_RB_COPY_DEST_INFO_TILE(enum a4xx_tile_mode val)
468{
469 return ((val) << A4XX_RB_COPY_DEST_INFO_TILE__SHIFT) & A4XX_RB_COPY_DEST_INFO_TILE__MASK;
470}
471
472#define REG_A4XX_RB_FS_OUTPUT_REG 0x00002100
473#define A4XX_RB_FS_OUTPUT_REG_COLOR_PIPE_ENABLE 0x00000001
474#define A4XX_RB_FS_OUTPUT_REG_FRAG_WRITES_Z 0x00000020
475
476#define REG_A4XX_RB_DEPTH_CONTROL 0x00002101
477#define A4XX_RB_DEPTH_CONTROL_FRAG_WRITES_Z 0x00000001
478#define A4XX_RB_DEPTH_CONTROL_Z_ENABLE 0x00000002
479#define A4XX_RB_DEPTH_CONTROL_Z_WRITE_ENABLE 0x00000004
480#define A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK 0x00000070
481#define A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT 4
482static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
483{
484 return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
485}
486#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
487#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
488#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
489
490#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
491
492#define REG_A4XX_RB_DEPTH_INFO 0x00002103
493#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK 0x00000003
494#define A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT 0
495static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_FORMAT(enum a4xx_depth_format val)
496{
497 return ((val) << A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_FORMAT__MASK;
498}
499#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK 0xfffff000
500#define A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT 12
501static inline uint32_t A4XX_RB_DEPTH_INFO_DEPTH_BASE(uint32_t val)
502{
503 return ((val >> 12) << A4XX_RB_DEPTH_INFO_DEPTH_BASE__SHIFT) & A4XX_RB_DEPTH_INFO_DEPTH_BASE__MASK;
504}
505
506#define REG_A4XX_RB_DEPTH_PITCH 0x00002104
507#define A4XX_RB_DEPTH_PITCH__MASK 0xffffffff
508#define A4XX_RB_DEPTH_PITCH__SHIFT 0
509static inline uint32_t A4XX_RB_DEPTH_PITCH(uint32_t val)
510{
511 return ((val >> 4) << A4XX_RB_DEPTH_PITCH__SHIFT) & A4XX_RB_DEPTH_PITCH__MASK;
512}
513
514#define REG_A4XX_RB_DEPTH_PITCH2 0x00002105
515#define A4XX_RB_DEPTH_PITCH2__MASK 0xffffffff
516#define A4XX_RB_DEPTH_PITCH2__SHIFT 0
517static inline uint32_t A4XX_RB_DEPTH_PITCH2(uint32_t val)
518{
519 return ((val >> 4) << A4XX_RB_DEPTH_PITCH2__SHIFT) & A4XX_RB_DEPTH_PITCH2__MASK;
520}
521
522#define REG_A4XX_RB_STENCIL_CONTROL 0x00002106
523#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
524#define A4XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
525#define A4XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
526#define A4XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
527#define A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
528static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
529{
530 return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC__MASK;
531}
532#define A4XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
533#define A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
534static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
535{
536 return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL__MASK;
537}
538#define A4XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
539#define A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
540static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
541{
542 return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS__MASK;
543}
544#define A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
545#define A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
546static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
547{
548 return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
549}
550#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
551#define A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
552static inline uint32_t A4XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
553{
554 return ((val) << A4XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
555}
556#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
557#define A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
558static inline uint32_t A4XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
559{
560 return ((val) << A4XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
561}
562#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
563#define A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
564static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
565{
566 return ((val) << A4XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
567}
568#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
569#define A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
570static inline uint32_t A4XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
571{
572 return ((val) << A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A4XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
573}
574
575#define REG_A4XX_RB_STENCIL_CONTROL2 0x00002107
576#define A4XX_RB_STENCIL_CONTROL2_STENCIL_BUFFER 0x00000001
577
578#define REG_A4XX_RB_STENCILREFMASK 0x0000210b
579#define A4XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
580#define A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
581static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
582{
583 return ((val) << A4XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILREF__MASK;
584}
585#define A4XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
586#define A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
587static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
588{
589 return ((val) << A4XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILMASK__MASK;
590}
591#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
592#define A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
593static inline uint32_t A4XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
594{
595 return ((val) << A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
596}
597
598#define REG_A4XX_RB_STENCILREFMASK_BF 0x0000210c
599#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
600#define A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
601static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
602{
603 return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
604}
605#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
606#define A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
607static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
608{
609 return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
610}
611#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
612#define A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
613static inline uint32_t A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
614{
615 return ((val) << A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A4XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
616}
617
618#define REG_A4XX_RB_BIN_OFFSET 0x0000210d
619#define A4XX_RB_BIN_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
620#define A4XX_RB_BIN_OFFSET_X__MASK 0x00007fff
621#define A4XX_RB_BIN_OFFSET_X__SHIFT 0
622static inline uint32_t A4XX_RB_BIN_OFFSET_X(uint32_t val)
623{
624 return ((val) << A4XX_RB_BIN_OFFSET_X__SHIFT) & A4XX_RB_BIN_OFFSET_X__MASK;
625}
626#define A4XX_RB_BIN_OFFSET_Y__MASK 0x7fff0000
627#define A4XX_RB_BIN_OFFSET_Y__SHIFT 16
628static inline uint32_t A4XX_RB_BIN_OFFSET_Y(uint32_t val)
629{
630 return ((val) << A4XX_RB_BIN_OFFSET_Y__SHIFT) & A4XX_RB_BIN_OFFSET_Y__MASK;
631}
632
633#define REG_A4XX_RB_VPORT_Z_CLAMP_MAX_15 0x0000213f
634
635#define REG_A4XX_RBBM_HW_VERSION 0x00000000
636
637#define REG_A4XX_RBBM_HW_CONFIGURATION 0x00000002
638
639static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP(uint32_t i0) { return 0x00000004 + 0x1*i0; }
640
641static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_TP_REG(uint32_t i0) { return 0x00000004 + 0x1*i0; }
642
643static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP(uint32_t i0) { return 0x00000008 + 0x1*i0; }
644
645static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_TP_REG(uint32_t i0) { return 0x00000008 + 0x1*i0; }
646
647static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP(uint32_t i0) { return 0x0000000c + 0x1*i0; }
648
649static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_TP_REG(uint32_t i0) { return 0x0000000c + 0x1*i0; }
650
651static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP(uint32_t i0) { return 0x00000010 + 0x1*i0; }
652
653static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x00000010 + 0x1*i0; }
654
655#define REG_A4XX_RBBM_CLOCK_CTL_UCHE 0x00000014
656
657#define REG_A4XX_RBBM_CLOCK_CTL2_UCHE 0x00000015
658
659#define REG_A4XX_RBBM_CLOCK_CTL3_UCHE 0x00000016
660
661#define REG_A4XX_RBBM_CLOCK_CTL4_UCHE 0x00000017
662
663#define REG_A4XX_RBBM_CLOCK_HYST_UCHE 0x00000018
664
665#define REG_A4XX_RBBM_CLOCK_DELAY_UCHE 0x00000019
666
667#define REG_A4XX_RBBM_CLOCK_MODE_GPC 0x0000001a
668
669#define REG_A4XX_RBBM_CLOCK_DELAY_GPC 0x0000001b
670
671#define REG_A4XX_RBBM_CLOCK_HYST_GPC 0x0000001c
672
673#define REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM 0x0000001d
674
675#define REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000001e
676
677#define REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x0000001f
678
679#define REG_A4XX_RBBM_CLOCK_CTL 0x00000020
680
681#define REG_A4XX_RBBM_SP_HYST_CNT 0x00000021
682
683#define REG_A4XX_RBBM_SW_RESET_CMD 0x00000022
684
685#define REG_A4XX_RBBM_AHB_CTL0 0x00000023
686
687#define REG_A4XX_RBBM_AHB_CTL1 0x00000024
688
689#define REG_A4XX_RBBM_AHB_CMD 0x00000025
690
691#define REG_A4XX_RBBM_RB_SUB_BLOCK_SEL_CTL 0x00000026
692
693#define REG_A4XX_RBBM_RAM_ACC_63_32 0x00000028
694
695#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL 0x0000002b
696
697#define REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL 0x0000002f
698
699#define REG_A4XX_RBBM_INTERFACE_HANG_MASK_CTL4 0x00000034
700
701#define REG_A4XX_RBBM_INT_CLEAR_CMD 0x00000036
702
703#define REG_A4XX_RBBM_INT_0_MASK 0x00000037
704
705#define REG_A4XX_RBBM_RBBM_CTL 0x0000003e
706
707#define REG_A4XX_RBBM_AHB_DEBUG_CTL 0x0000003f
708
709#define REG_A4XX_RBBM_VBIF_DEBUG_CTL 0x00000041
710
711#define REG_A4XX_RBBM_CLOCK_CTL2 0x00000042
712
713#define REG_A4XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
714
715#define REG_A4XX_RBBM_RESET_CYCLES 0x00000047
716
717#define REG_A4XX_RBBM_EXT_TRACE_BUS_CTL 0x00000049
718
719#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_A 0x0000004a
720
721#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_B 0x0000004b
722
723#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_C 0x0000004c
724
725#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
726
727#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
728
729static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
730
731static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
732
733static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP(uint32_t i0) { return 0x0000006c + 0x1*i0; }
734
735static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_SP_REG(uint32_t i0) { return 0x0000006c + 0x1*i0; }
736
737static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP(uint32_t i0) { return 0x00000070 + 0x1*i0; }
738
739static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_SP_REG(uint32_t i0) { return 0x00000070 + 0x1*i0; }
740
741static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP(uint32_t i0) { return 0x00000074 + 0x1*i0; }
742
743static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_SP_REG(uint32_t i0) { return 0x00000074 + 0x1*i0; }
744
745static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB(uint32_t i0) { return 0x00000078 + 0x1*i0; }
746
747static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_RB_REG(uint32_t i0) { return 0x00000078 + 0x1*i0; }
748
749static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB(uint32_t i0) { return 0x0000007c + 0x1*i0; }
750
751static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL2_RB_REG(uint32_t i0) { return 0x0000007c + 0x1*i0; }
752
753static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(uint32_t i0) { return 0x00000082 + 0x1*i0; }
754
755static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU_REG(uint32_t i0) { return 0x00000082 + 0x1*i0; }
756
757static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(uint32_t i0) { return 0x00000086 + 0x1*i0; }
758
759static inline uint32_t REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU_REG(uint32_t i0) { return 0x00000086 + 0x1*i0; }
760
761#define REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM 0x00000080
762
763#define REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM 0x00000081
764
765#define REG_A4XX_RBBM_CLOCK_CTL_HLSQ 0x0000008a
766
767#define REG_A4XX_RBBM_CLOCK_HYST_HLSQ 0x0000008b
768
769#define REG_A4XX_RBBM_CLOCK_DELAY_HLSQ 0x0000008c
770
771#define REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM 0x0000008d
772
773static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { return 0x0000008e + 0x1*i0; }
774
775static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
776
777#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
778
779#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
780
781#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD0 0x00000171
782
783#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD1 0x00000172
784
785#define REG_A4XX_RBBM_PERFCTR_LOAD_CMD2 0x00000173
786
787#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000174
788
789#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
790
791#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
792
793#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
794
795#define REG_A4XX_RBBM_CLOCK_STATUS 0x00000182
796
797#define REG_A4XX_RBBM_AHB_STATUS 0x00000189
798
799#define REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS 0x0000018c
800
801#define REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS 0x0000018d
802
803#define REG_A4XX_RBBM_AHB_ERROR_STATUS 0x0000018f
804
805#define REG_A4XX_RBBM_STATUS 0x00000191
806#define A4XX_RBBM_STATUS_HI_BUSY 0x00000001
807#define A4XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
808#define A4XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
809#define A4XX_RBBM_STATUS_CP_NRT_BUSY 0x00004000
810#define A4XX_RBBM_STATUS_VBIF_BUSY 0x00008000
811#define A4XX_RBBM_STATUS_TSE_BUSY 0x00010000
812#define A4XX_RBBM_STATUS_RAS_BUSY 0x00020000
813#define A4XX_RBBM_STATUS_RB_BUSY 0x00040000
814#define A4XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
815#define A4XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
816#define A4XX_RBBM_STATUS_VFD_BUSY 0x00200000
817#define A4XX_RBBM_STATUS_VPC_BUSY 0x00400000
818#define A4XX_RBBM_STATUS_UCHE_BUSY 0x00800000
819#define A4XX_RBBM_STATUS_SP_BUSY 0x01000000
820#define A4XX_RBBM_STATUS_TPL1_BUSY 0x02000000
821#define A4XX_RBBM_STATUS_MARB_BUSY 0x04000000
822#define A4XX_RBBM_STATUS_VSC_BUSY 0x08000000
823#define A4XX_RBBM_STATUS_ARB_BUSY 0x10000000
824#define A4XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
825#define A4XX_RBBM_STATUS_GPU_BUSY_NOHC 0x40000000
826#define A4XX_RBBM_STATUS_GPU_BUSY 0x80000000
827
828#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
829
830#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
831
832#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
833
834#define REG_A4XX_CP_RB_BASE 0x00000200
835
836#define REG_A4XX_CP_RB_CNTL 0x00000201
837
838#define REG_A4XX_CP_RB_WPTR 0x00000205
839
840#define REG_A4XX_CP_RB_RPTR_ADDR 0x00000203
841
842#define REG_A4XX_CP_RB_RPTR 0x00000204
843
844#define REG_A4XX_CP_IB1_BASE 0x00000206
845
846#define REG_A4XX_CP_IB1_BUFSZ 0x00000207
847
848#define REG_A4XX_CP_IB2_BASE 0x00000208
849
850#define REG_A4XX_CP_IB2_BUFSZ 0x00000209
851
852#define REG_A4XX_CP_ME_RB_DONE_DATA 0x00000217
853
854#define REG_A4XX_CP_QUEUE_THRESH2 0x00000219
855
856#define REG_A4XX_CP_MERCIU_SIZE 0x0000021b
857
858#define REG_A4XX_CP_ROQ_ADDR 0x0000021c
859
860#define REG_A4XX_CP_ROQ_DATA 0x0000021d
861
862#define REG_A4XX_CP_MEQ_ADDR 0x0000021e
863
864#define REG_A4XX_CP_MEQ_DATA 0x0000021f
865
866#define REG_A4XX_CP_MERCIU_ADDR 0x00000220
867
868#define REG_A4XX_CP_MERCIU_DATA 0x00000221
869
870#define REG_A4XX_CP_MERCIU_DATA2 0x00000222
871
872#define REG_A4XX_CP_PFP_UCODE_ADDR 0x00000223
873
874#define REG_A4XX_CP_PFP_UCODE_DATA 0x00000224
875
876#define REG_A4XX_CP_ME_RAM_WADDR 0x00000225
877
878#define REG_A4XX_CP_ME_RAM_RADDR 0x00000226
879
880#define REG_A4XX_CP_ME_RAM_DATA 0x00000227
881
882#define REG_A4XX_CP_PREEMPT 0x0000022a
883
884#define REG_A4XX_CP_CNTL 0x0000022c
885
886#define REG_A4XX_CP_ME_CNTL 0x0000022d
887
888#define REG_A4XX_CP_DEBUG 0x0000022e
889
890#define REG_A4XX_CP_DEBUG_ECO_CONTROL 0x00000231
891
892#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
893
894#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
895
896static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
897
898static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
899
900#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
901
902#define REG_A4XX_CP_ST_BASE 0x000004c0
903
904#define REG_A4XX_CP_STQ_AVAIL 0x000004ce
905
906#define REG_A4XX_CP_MERCIU_STAT 0x000004d0
907
908#define REG_A4XX_CP_WFI_PEND_CTR 0x000004d2
909
910#define REG_A4XX_CP_HW_FAULT 0x000004d8
911
912#define REG_A4XX_CP_PROTECT_STATUS 0x000004da
913
914#define REG_A4XX_CP_EVENTS_IN_FLIGHT 0x000004dd
915
916#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
917
918#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
919
920static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
921
922static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578 + 0x1*i0; }
923
924#define REG_A4XX_SP_VS_STATUS 0x00000ec0
925
926#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
927
928#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
929#define A4XX_SP_SP_CTRL_REG_BINNING_PASS 0x00080000
930
931#define REG_A4XX_SP_INSTR_CACHE_CTRL 0x000022c1
932
933#define REG_A4XX_SP_VS_CTRL_REG0 0x000022c4
934#define A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK 0x00000001
935#define A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT 0
936static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
937{
938 return ((val) << A4XX_SP_VS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADMODE__MASK;
939}
940#define A4XX_SP_VS_CTRL_REG0_VARYING 0x00000002
941#define A4XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
942#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
943#define A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
944static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
945{
946 return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
947}
948#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
949#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
950static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
951{
952 return ((val) << A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
953}
954#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
955#define A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
956static inline uint32_t A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
957{
958 return ((val) << A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
959}
960#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
961#define A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
962static inline uint32_t A4XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
963{
964 return ((val) << A4XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
965}
966#define A4XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
967#define A4XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
968
969#define REG_A4XX_SP_VS_CTRL_REG1 0x000022c5
970#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
971#define A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT 0
972static inline uint32_t A4XX_SP_VS_CTRL_REG1_CONSTLENGTH(uint32_t val)
973{
974 return ((val) << A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_VS_CTRL_REG1_CONSTLENGTH__MASK;
975}
976#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK 0x7f000000
977#define A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT 24
978static inline uint32_t A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
979{
980 return ((val) << A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A4XX_SP_VS_CTRL_REG1_INITIALOUTSTANDING__MASK;
981}
982
983#define REG_A4XX_SP_VS_PARAM_REG 0x000022c6
984#define A4XX_SP_VS_PARAM_REG_POSREGID__MASK 0x000000ff
985#define A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT 0
986static inline uint32_t A4XX_SP_VS_PARAM_REG_POSREGID(uint32_t val)
987{
988 return ((val) << A4XX_SP_VS_PARAM_REG_POSREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_POSREGID__MASK;
989}
990#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK 0x0000ff00
991#define A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT 8
992static inline uint32_t A4XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
993{
994 return ((val) << A4XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A4XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
995}
996#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
997#define A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
998static inline uint32_t A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
999{
1000 return ((val) << A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT) & A4XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK;
1001}
1002
1003static inline uint32_t REG_A4XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1004
1005static inline uint32_t REG_A4XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
1006#define A4XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
1007#define A4XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
1008static inline uint32_t A4XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
1009{
1010 return ((val) << A4XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_A_REGID__MASK;
1011}
1012#define A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
1013#define A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
1014static inline uint32_t A4XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
1015{
1016 return ((val) << A4XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
1017}
1018#define A4XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
1019#define A4XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
1020static inline uint32_t A4XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
1021{
1022 return ((val) << A4XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A4XX_SP_VS_OUT_REG_B_REGID__MASK;
1023}
1024#define A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
1025#define A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
1026static inline uint32_t A4XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
1027{
1028 return ((val) << A4XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A4XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
1029}
1030
1031static inline uint32_t REG_A4XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
1032
1033static inline uint32_t REG_A4XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d8 + 0x1*i0; }
1034#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
1035#define A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
1036static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
1037{
1038 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
1039}
1040#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
1041#define A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
1042static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
1043{
1044 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
1045}
1046#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
1047#define A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
1048static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
1049{
1050 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
1051}
1052#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
1053#define A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
1054static inline uint32_t A4XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
1055{
1056 return ((val) << A4XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A4XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
1057}
1058
1059#define REG_A4XX_SP_VS_OBJ_OFFSET_REG 0x000022e0
1060#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1061#define A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1062static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1063{
1064 return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1065}
1066#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1067#define A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1068static inline uint32_t A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1069{
1070 return ((val) << A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1071}
1072
1073#define REG_A4XX_SP_VS_OBJ_START 0x000022e1
1074
1075#define REG_A4XX_SP_VS_PVT_MEM_PARAM 0x000022e2
1076
1077#define REG_A4XX_SP_VS_PVT_MEM_ADDR 0x000022e3
1078
1079#define REG_A4XX_SP_VS_LENGTH_REG 0x000022e5
1080
1081#define REG_A4XX_SP_FS_CTRL_REG0 0x000022e8
1082#define A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK 0x00000001
1083#define A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT 0
1084static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADMODE(enum a3xx_threadmode val)
1085{
1086 return ((val) << A4XX_SP_FS_CTRL_REG0_THREADMODE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADMODE__MASK;
1087}
1088#define A4XX_SP_FS_CTRL_REG0_VARYING 0x00000002
1089#define A4XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
1090#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
1091#define A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
1092static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
1093{
1094 return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
1095}
1096#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
1097#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
1098static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
1099{
1100 return ((val) << A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
1101}
1102#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
1103#define A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
1104static inline uint32_t A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
1105{
1106 return ((val) << A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A4XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
1107}
1108#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
1109#define A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
1110static inline uint32_t A4XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
1111{
1112 return ((val) << A4XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A4XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
1113}
1114#define A4XX_SP_FS_CTRL_REG0_SUPERTHREADMODE 0x00200000
1115#define A4XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00400000
1116
1117#define REG_A4XX_SP_FS_CTRL_REG1 0x000022e9
1118#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK 0x000000ff
1119#define A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT 0
1120static inline uint32_t A4XX_SP_FS_CTRL_REG1_CONSTLENGTH(uint32_t val)
1121{
1122 return ((val) << A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__SHIFT) & A4XX_SP_FS_CTRL_REG1_CONSTLENGTH__MASK;
1123}
1124#define A4XX_SP_FS_CTRL_REG1_VARYING 0x00100000
1125
1126#define REG_A4XX_SP_FS_OBJ_OFFSET_REG 0x000022ea
1127#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1128#define A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1129static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1130{
1131 return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1132}
1133#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1134#define A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1135static inline uint32_t A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1136{
1137 return ((val) << A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1138}
1139
1140#define REG_A4XX_SP_FS_OBJ_START 0x000022eb
1141
1142#define REG_A4XX_SP_FS_PVT_MEM_PARAM 0x000022ec
1143
1144#define REG_A4XX_SP_FS_PVT_MEM_ADDR 0x000022ed
1145
1146#define REG_A4XX_SP_FS_LENGTH_REG 0x000022ef
1147
1148#define REG_A4XX_SP_FS_OUTPUT_REG 0x000022f0
1149#define A4XX_SP_FS_OUTPUT_REG_DEPTH_ENABLE 0x00000080
1150#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK 0x0000ff00
1151#define A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT 8
1152static inline uint32_t A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID(uint32_t val)
1153{
1154 return ((val) << A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__SHIFT) & A4XX_SP_FS_OUTPUT_REG_DEPTH_REGID__MASK;
1155}
1156
1157static inline uint32_t REG_A4XX_SP_FS_MRT(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
1158
1159static inline uint32_t REG_A4XX_SP_FS_MRT_REG(uint32_t i0) { return 0x000022f1 + 0x1*i0; }
1160#define A4XX_SP_FS_MRT_REG_REGID__MASK 0x000000ff
1161#define A4XX_SP_FS_MRT_REG_REGID__SHIFT 0
1162static inline uint32_t A4XX_SP_FS_MRT_REG_REGID(uint32_t val)
1163{
1164 return ((val) << A4XX_SP_FS_MRT_REG_REGID__SHIFT) & A4XX_SP_FS_MRT_REG_REGID__MASK;
1165}
1166#define A4XX_SP_FS_MRT_REG_HALF_PRECISION 0x00000100
1167#define A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK 0x0003f000
1168#define A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT 12
1169static inline uint32_t A4XX_SP_FS_MRT_REG_MRTFORMAT(enum a4xx_color_fmt val)
1170{
1171 return ((val) << A4XX_SP_FS_MRT_REG_MRTFORMAT__SHIFT) & A4XX_SP_FS_MRT_REG_MRTFORMAT__MASK;
1172}
1173
1174#define REG_A4XX_SP_HS_OBJ_OFFSET_REG 0x0000230d
1175#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1176#define A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1177static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1178{
1179 return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1180}
1181#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1182#define A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1183static inline uint32_t A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1184{
1185 return ((val) << A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_HS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1186}
1187
1188#define REG_A4XX_SP_DS_OBJ_OFFSET_REG 0x00002334
1189#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1190#define A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1191static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1192{
1193 return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1194}
1195#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1196#define A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1197static inline uint32_t A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1198{
1199 return ((val) << A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_DS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1200}
1201
1202#define REG_A4XX_SP_GS_OBJ_OFFSET_REG 0x0000235b
1203#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
1204#define A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
1205static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
1206{
1207 return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK;
1208}
1209#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK 0xfe000000
1210#define A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT 25
1211static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
1212{
1213 return ((val) << A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__SHIFT) & A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET__MASK;
1214}
1215
1216#define REG_A4XX_SP_GS_LENGTH_REG 0x00002360
1217
1218#define REG_A4XX_VPC_DEBUG_RAM_SEL 0x00000e60
1219
1220#define REG_A4XX_VPC_DEBUG_RAM_READ 0x00000e61
1221
1222#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
1223
1224#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
1225
1226#define REG_A4XX_VPC_ATTR 0x00002140
1227#define A4XX_VPC_ATTR_TOTALATTR__MASK 0x000001ff
1228#define A4XX_VPC_ATTR_TOTALATTR__SHIFT 0
1229static inline uint32_t A4XX_VPC_ATTR_TOTALATTR(uint32_t val)
1230{
1231 return ((val) << A4XX_VPC_ATTR_TOTALATTR__SHIFT) & A4XX_VPC_ATTR_TOTALATTR__MASK;
1232}
1233#define A4XX_VPC_ATTR_PSIZE 0x00000200
1234#define A4XX_VPC_ATTR_THRDASSIGN__MASK 0x00003000
1235#define A4XX_VPC_ATTR_THRDASSIGN__SHIFT 12
1236static inline uint32_t A4XX_VPC_ATTR_THRDASSIGN(uint32_t val)
1237{
1238 return ((val) << A4XX_VPC_ATTR_THRDASSIGN__SHIFT) & A4XX_VPC_ATTR_THRDASSIGN__MASK;
1239}
1240#define A4XX_VPC_ATTR_ENABLE 0x02000000
1241
1242#define REG_A4XX_VPC_PACK 0x00002141
1243#define A4XX_VPC_PACK_NUMBYPASSVAR__MASK 0x000000ff
1244#define A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT 0
1245static inline uint32_t A4XX_VPC_PACK_NUMBYPASSVAR(uint32_t val)
1246{
1247 return ((val) << A4XX_VPC_PACK_NUMBYPASSVAR__SHIFT) & A4XX_VPC_PACK_NUMBYPASSVAR__MASK;
1248}
1249#define A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK 0x0000ff00
1250#define A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT 8
1251static inline uint32_t A4XX_VPC_PACK_NUMFPNONPOSVAR(uint32_t val)
1252{
1253 return ((val) << A4XX_VPC_PACK_NUMFPNONPOSVAR__SHIFT) & A4XX_VPC_PACK_NUMFPNONPOSVAR__MASK;
1254}
1255#define A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK 0x00ff0000
1256#define A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT 16
1257static inline uint32_t A4XX_VPC_PACK_NUMNONPOSVSVAR(uint32_t val)
1258{
1259 return ((val) << A4XX_VPC_PACK_NUMNONPOSVSVAR__SHIFT) & A4XX_VPC_PACK_NUMNONPOSVSVAR__MASK;
1260}
1261
1262static inline uint32_t REG_A4XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00002142 + 0x1*i0; }
1263
1264static inline uint32_t REG_A4XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00002142 + 0x1*i0; }
1265
1266static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000214a + 0x1*i0; }
1267
1268static inline uint32_t REG_A4XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000214a + 0x1*i0; }
1269
1270#define REG_A4XX_VPC_SO_FLUSH_WADDR_3 0x0000216e
1271
1272#define REG_A4XX_VSC_BIN_SIZE 0x00000c00
1273#define A4XX_VSC_BIN_SIZE_WIDTH__MASK 0x0000001f
1274#define A4XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
1275static inline uint32_t A4XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
1276{
1277 return ((val >> 5) << A4XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A4XX_VSC_BIN_SIZE_WIDTH__MASK;
1278}
1279#define A4XX_VSC_BIN_SIZE_HEIGHT__MASK 0x000003e0
1280#define A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT 5
1281static inline uint32_t A4XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
1282{
1283 return ((val >> 5) << A4XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A4XX_VSC_BIN_SIZE_HEIGHT__MASK;
1284}
1285
1286#define REG_A4XX_VSC_SIZE_ADDRESS 0x00000c01
1287
1288#define REG_A4XX_VSC_SIZE_ADDRESS2 0x00000c02
1289
1290#define REG_A4XX_VSC_DEBUG_ECO_CONTROL 0x00000c03
1291
1292static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
1293
1294static inline uint32_t REG_A4XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c08 + 0x1*i0; }
1295#define A4XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
1296#define A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
1297static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
1298{
1299 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_X__MASK;
1300}
1301#define A4XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
1302#define A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
1303static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
1304{
1305 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_Y__MASK;
1306}
1307#define A4XX_VSC_PIPE_CONFIG_REG_W__MASK 0x00f00000
1308#define A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
1309static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
1310{
1311 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_W__MASK;
1312}
1313#define A4XX_VSC_PIPE_CONFIG_REG_H__MASK 0x0f000000
1314#define A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT 24
1315static inline uint32_t A4XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
1316{
1317 return ((val) << A4XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A4XX_VSC_PIPE_CONFIG_REG_H__MASK;
1318}
1319
1320static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
1321
1322static inline uint32_t REG_A4XX_VSC_PIPE_DATA_ADDRESS_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
1323
1324static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
1325
1326static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0x00000c18 + 0x1*i0; }
1327
1328#define REG_A4XX_VSC_PIPE_PARTIAL_POSN_1 0x00000c41
1329
1330#define REG_A4XX_VSC_PERFCTR_VSC_SEL_0 0x00000c50
1331
1332#define REG_A4XX_VSC_PERFCTR_VSC_SEL_1 0x00000c51
1333
1334#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
1335
1336#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
1337
1338#define REG_A4XX_VFD_CONTROL_0 0x00002200
1339#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK 0x000000ff
1340#define A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT 0
1341static inline uint32_t A4XX_VFD_CONTROL_0_TOTALATTRTOVS(uint32_t val)
1342{
1343 return ((val) << A4XX_VFD_CONTROL_0_TOTALATTRTOVS__SHIFT) & A4XX_VFD_CONTROL_0_TOTALATTRTOVS__MASK;
1344}
1345#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK 0x0001fe00
1346#define A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT 9
1347static inline uint32_t A4XX_VFD_CONTROL_0_BYPASSATTROVS(uint32_t val)
1348{
1349 return ((val) << A4XX_VFD_CONTROL_0_BYPASSATTROVS__SHIFT) & A4XX_VFD_CONTROL_0_BYPASSATTROVS__MASK;
1350}
1351#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x03f00000
1352#define A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 20
1353static inline uint32_t A4XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
1354{
1355 return ((val) << A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
1356}
1357#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK 0xfc000000
1358#define A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT 26
1359static inline uint32_t A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
1360{
1361 return ((val) << A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__SHIFT) & A4XX_VFD_CONTROL_0_STRMFETCHINSTRCNT__MASK;
1362}
1363
1364#define REG_A4XX_VFD_CONTROL_1 0x00002201
1365#define A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
1366#define A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
1367static inline uint32_t A4XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
1368{
1369 return ((val) << A4XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A4XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
1370}
1371#define A4XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
1372#define A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
1373static inline uint32_t A4XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
1374{
1375 return ((val) << A4XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A4XX_VFD_CONTROL_1_REGID4VTX__MASK;
1376}
1377#define A4XX_VFD_CONTROL_1_REGID4INST__MASK 0xff000000
1378#define A4XX_VFD_CONTROL_1_REGID4INST__SHIFT 24
1379static inline uint32_t A4XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
1380{
1381 return ((val) << A4XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A4XX_VFD_CONTROL_1_REGID4INST__MASK;
1382}
1383
1384#define REG_A4XX_VFD_CONTROL_2 0x00002202
1385
1386#define REG_A4XX_VFD_CONTROL_3 0x00002203
1387
1388#define REG_A4XX_VFD_CONTROL_4 0x00002204
1389
1390#define REG_A4XX_VFD_INDEX_OFFSET 0x00002208
1391
1392static inline uint32_t REG_A4XX_VFD_FETCH(uint32_t i0) { return 0x0000220a + 0x4*i0; }
1393
1394static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_0(uint32_t i0) { return 0x0000220a + 0x4*i0; }
1395#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK 0x0000007f
1396#define A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT 0
1397static inline uint32_t A4XX_VFD_FETCH_INSTR_0_FETCHSIZE(uint32_t val)
1398{
1399 return ((val) << A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_FETCHSIZE__MASK;
1400}
1401#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK 0x0001ff80
1402#define A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT 7
1403static inline uint32_t A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE(uint32_t val)
1404{
1405 return ((val) << A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_BUFSTRIDE__MASK;
1406}
1407#define A4XX_VFD_FETCH_INSTR_0_SWITCHNEXT 0x00080000
1408#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK 0xff000000
1409#define A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT 24
1410static inline uint32_t A4XX_VFD_FETCH_INSTR_0_STEPRATE(uint32_t val)
1411{
1412 return ((val) << A4XX_VFD_FETCH_INSTR_0_STEPRATE__SHIFT) & A4XX_VFD_FETCH_INSTR_0_STEPRATE__MASK;
1413}
1414
1415static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_1(uint32_t i0) { return 0x0000220b + 0x4*i0; }
1416
1417static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_2(uint32_t i0) { return 0x0000220c + 0x4*i0; }
1418#define A4XX_VFD_FETCH_INSTR_2_SIZE__MASK 0xfffffff0
1419#define A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT 4
1420static inline uint32_t A4XX_VFD_FETCH_INSTR_2_SIZE(uint32_t val)
1421{
1422 return ((val >> 4) << A4XX_VFD_FETCH_INSTR_2_SIZE__SHIFT) & A4XX_VFD_FETCH_INSTR_2_SIZE__MASK;
1423}
1424
1425static inline uint32_t REG_A4XX_VFD_FETCH_INSTR_3(uint32_t i0) { return 0x0000220d + 0x4*i0; }
1426
1427static inline uint32_t REG_A4XX_VFD_DECODE(uint32_t i0) { return 0x0000228a + 0x1*i0; }
1428
1429static inline uint32_t REG_A4XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000228a + 0x1*i0; }
1430#define A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK 0x0000000f
1431#define A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT 0
1432static inline uint32_t A4XX_VFD_DECODE_INSTR_WRITEMASK(uint32_t val)
1433{
1434 return ((val) << A4XX_VFD_DECODE_INSTR_WRITEMASK__SHIFT) & A4XX_VFD_DECODE_INSTR_WRITEMASK__MASK;
1435}
1436#define A4XX_VFD_DECODE_INSTR_CONSTFILL 0x00000010
1437#define A4XX_VFD_DECODE_INSTR_FORMAT__MASK 0x00000fc0
1438#define A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT 6
1439static inline uint32_t A4XX_VFD_DECODE_INSTR_FORMAT(enum a4xx_vtx_fmt val)
1440{
1441 return ((val) << A4XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A4XX_VFD_DECODE_INSTR_FORMAT__MASK;
1442}
1443#define A4XX_VFD_DECODE_INSTR_REGID__MASK 0x000ff000
1444#define A4XX_VFD_DECODE_INSTR_REGID__SHIFT 12
1445static inline uint32_t A4XX_VFD_DECODE_INSTR_REGID(uint32_t val)
1446{
1447 return ((val) << A4XX_VFD_DECODE_INSTR_REGID__SHIFT) & A4XX_VFD_DECODE_INSTR_REGID__MASK;
1448}
1449#define A4XX_VFD_DECODE_INSTR_SWAP__MASK 0x00c00000
1450#define A4XX_VFD_DECODE_INSTR_SWAP__SHIFT 22
1451static inline uint32_t A4XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
1452{
1453 return ((val) << A4XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A4XX_VFD_DECODE_INSTR_SWAP__MASK;
1454}
1455#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK 0x1f000000
1456#define A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT 24
1457static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
1458{
1459 return ((val) << A4XX_VFD_DECODE_INSTR_SHIFTCNT__SHIFT) & A4XX_VFD_DECODE_INSTR_SHIFTCNT__MASK;
1460}
1461#define A4XX_VFD_DECODE_INSTR_LASTCOMPVALID 0x20000000
1462#define A4XX_VFD_DECODE_INSTR_SWITCHNEXT 0x40000000
1463
1464#define REG_A4XX_TPL1_DEBUG_ECO_CONTROL 0x00000f00
1465
1466#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
1467
1468#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
1469
1470#define REG_A4XX_TPL1_TP_CS_TEXMEMOBJ_BASE_ADDR 0x000023a6
1471
1472#define REG_A4XX_GRAS_TSE_STATUS 0x00000c80
1473
1474#define REG_A4XX_GRAS_DEBUG_ECO_CONTROL 0x00000c81
1475
1476#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
1477
1478#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
1479
1480#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
1481
1482#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
1483#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
1484
1485#define REG_A4XX_GRAS_CL_GB_CLIP_ADJ 0x00002004
1486#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK 0x000003ff
1487#define A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT 0
1488static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ(uint32_t val)
1489{
1490 return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_HORZ__MASK;
1491}
1492#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK 0x000ffc00
1493#define A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT 10
1494static inline uint32_t A4XX_GRAS_CL_GB_CLIP_ADJ_VERT(uint32_t val)
1495{
1496 return ((val) << A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__SHIFT) & A4XX_GRAS_CL_GB_CLIP_ADJ_VERT__MASK;
1497}
1498
1499#define REG_A4XX_GRAS_CL_VPORT_XOFFSET_0 0x00002008
1500#define A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
1501#define A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
1502static inline uint32_t A4XX_GRAS_CL_VPORT_XOFFSET_0(float val)
1503{
1504 return ((fui(val)) << A4XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
1505}
1506
1507#define REG_A4XX_GRAS_CL_VPORT_XSCALE_0 0x00002009
1508#define A4XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
1509#define A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
1510static inline uint32_t A4XX_GRAS_CL_VPORT_XSCALE_0(float val)
1511{
1512 return ((fui(val)) << A4XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_XSCALE_0__MASK;
1513}
1514
1515#define REG_A4XX_GRAS_CL_VPORT_YOFFSET_0 0x0000200a
1516#define A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
1517#define A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
1518static inline uint32_t A4XX_GRAS_CL_VPORT_YOFFSET_0(float val)
1519{
1520 return ((fui(val)) << A4XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
1521}
1522
1523#define REG_A4XX_GRAS_CL_VPORT_YSCALE_0 0x0000200b
1524#define A4XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
1525#define A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
1526static inline uint32_t A4XX_GRAS_CL_VPORT_YSCALE_0(float val)
1527{
1528 return ((fui(val)) << A4XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_YSCALE_0__MASK;
1529}
1530
1531#define REG_A4XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000200c
1532#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
1533#define A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
1534static inline uint32_t A4XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
1535{
1536 return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
1537}
1538
1539#define REG_A4XX_GRAS_CL_VPORT_ZSCALE_0 0x0000200d
1540#define A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
1541#define A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
1542static inline uint32_t A4XX_GRAS_CL_VPORT_ZSCALE_0(float val)
1543{
1544 return ((fui(val)) << A4XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A4XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
1545}
1546
1547#define REG_A4XX_GRAS_SU_POINT_MINMAX 0x00002070
1548#define A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
1549#define A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
1550static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MIN(float val)
1551{
1552 return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
1553}
1554#define A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
1555#define A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
1556static inline uint32_t A4XX_GRAS_SU_POINT_MINMAX_MAX(float val)
1557{
1558 return ((((uint32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A4XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
1559}
1560
1561#define REG_A4XX_GRAS_SU_POINT_SIZE 0x00002071
1562#define A4XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
1563#define A4XX_GRAS_SU_POINT_SIZE__SHIFT 0
1564static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
1565{
1566 return ((((int32_t)(val * 16.0))) << A4XX_GRAS_SU_POINT_SIZE__SHIFT) & A4XX_GRAS_SU_POINT_SIZE__MASK;
1567}
1568
1569#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
1570#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
1571
1572#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
1573#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
1574#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
1575static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
1576{
1577 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
1578}
1579
1580#define REG_A4XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00002075
1581#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
1582#define A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
1583static inline uint32_t A4XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
1584{
1585 return ((fui(val)) << A4XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A4XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
1586}
1587
1588#define REG_A4XX_GRAS_SC_EXTENT_WINDOW_TL 0x0000209f
1589
1590#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_TL 0x0000207c
1591#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
1592#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK 0x00007fff
1593#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT 0
1594static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X(uint32_t val)
1595{
1596 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_X__MASK;
1597}
1598#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK 0x7fff0000
1599#define A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT 16
1600static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y(uint32_t val)
1601{
1602 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_TL_Y__MASK;
1603}
1604
1605#define REG_A4XX_GRAS_SC_SCREEN_SCISSOR_BR 0x0000207d
1606#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
1607#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK 0x00007fff
1608#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT 0
1609static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X(uint32_t val)
1610{
1611 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_X__MASK;
1612}
1613#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK 0x7fff0000
1614#define A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT 16
1615static inline uint32_t A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y(uint32_t val)
1616{
1617 return ((val) << A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_SCREEN_SCISSOR_BR_Y__MASK;
1618}
1619
1620#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000209c
1621#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
1622#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
1623#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
1624static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
1625{
1626 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
1627}
1628#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
1629#define A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
1630static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
1631{
1632 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
1633}
1634
1635#define REG_A4XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000209d
1636#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
1637#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
1638#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
1639static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
1640{
1641 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
1642}
1643#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
1644#define A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
1645static inline uint32_t A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
1646{
1647 return ((val) << A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A4XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
1648}
1649
1650#define REG_A4XX_GRAS_DEPTH_CONTROL 0x00002077
1651#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK 0x00000003
1652#define A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT 0
1653static inline uint32_t A4XX_GRAS_DEPTH_CONTROL_FORMAT(enum a4xx_depth_format val)
1654{
1655 return ((val) << A4XX_GRAS_DEPTH_CONTROL_FORMAT__SHIFT) & A4XX_GRAS_DEPTH_CONTROL_FORMAT__MASK;
1656}
1657
1658#define REG_A4XX_GRAS_SU_MODE_CONTROL 0x00002078
1659#define A4XX_GRAS_SU_MODE_CONTROL_CULL_FRONT 0x00000001
1660#define A4XX_GRAS_SU_MODE_CONTROL_CULL_BACK 0x00000002
1661#define A4XX_GRAS_SU_MODE_CONTROL_FRONT_CW 0x00000004
1662#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK 0x000007f8
1663#define A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT 3
1664static inline uint32_t A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH(float val)
1665{
1666 return ((((int32_t)(val * 4.0))) << A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__SHIFT) & A4XX_GRAS_SU_MODE_CONTROL_LINEHALFWIDTH__MASK;
1667}
1668#define A4XX_GRAS_SU_MODE_CONTROL_POLY_OFFSET 0x00000800
1669#define A4XX_GRAS_SU_MODE_CONTROL_RENDERING_PASS 0x00100000
1670
1671#define REG_A4XX_GRAS_SC_CONTROL 0x0000207b
1672#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK 0x0000000c
1673#define A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT 2
1674static inline uint32_t A4XX_GRAS_SC_CONTROL_RENDER_MODE(enum a3xx_render_mode val)
1675{
1676 return ((val) << A4XX_GRAS_SC_CONTROL_RENDER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RENDER_MODE__MASK;
1677}
1678#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK 0x00000380
1679#define A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT 7
1680static inline uint32_t A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES(uint32_t val)
1681{
1682 return ((val) << A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__SHIFT) & A4XX_GRAS_SC_CONTROL_MSAA_SAMPLES__MASK;
1683}
1684#define A4XX_GRAS_SC_CONTROL_MSAA_DISABLE 0x00000800
1685#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK 0x0000f000
1686#define A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT 12
1687static inline uint32_t A4XX_GRAS_SC_CONTROL_RASTER_MODE(uint32_t val)
1688{
1689 return ((val) << A4XX_GRAS_SC_CONTROL_RASTER_MODE__SHIFT) & A4XX_GRAS_SC_CONTROL_RASTER_MODE__MASK;
1690}
1691
1692#define REG_A4XX_UCHE_CACHE_MODE_CONTROL 0x00000e80
1693
1694#define REG_A4XX_UCHE_TRAP_BASE_LO 0x00000e83
1695
1696#define REG_A4XX_UCHE_TRAP_BASE_HI 0x00000e84
1697
1698#define REG_A4XX_UCHE_CACHE_STATUS 0x00000e88
1699
1700#define REG_A4XX_UCHE_INVALIDATE0 0x00000e8a
1701
1702#define REG_A4XX_UCHE_INVALIDATE1 0x00000e8b
1703
1704#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
1705
1706#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
1707
1708#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
1709
1710#define REG_A4XX_HLSQ_DEBUG_ECO_CONTROL 0x00000e04
1711
1712#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
1713
1714#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
1715#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
1716#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
1717static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
1718{
1719 return ((val) << A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
1720}
1721#define A4XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
1722#define A4XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
1723#define A4XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
1724#define A4XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
1725#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
1726#define A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
1727static inline uint32_t A4XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
1728{
1729 return ((val) << A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT) & A4XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK;
1730}
1731#define A4XX_HLSQ_CONTROL_0_REG_LAZYUPDATEDISABLE 0x10000000
1732#define A4XX_HLSQ_CONTROL_0_REG_SPCONSTFULLUPDATE 0x20000000
1733#define A4XX_HLSQ_CONTROL_0_REG_TPFULLUPDATE 0x40000000
1734#define A4XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
1735
1736#define REG_A4XX_HLSQ_CONTROL_1_REG 0x000023c1
1737#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
1738#define A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
1739static inline uint32_t A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
1740{
1741 return ((val) << A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A4XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
1742}
1743#define A4XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
1744#define A4XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
1745#define A4XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
1746
1747#define REG_A4XX_HLSQ_CONTROL_2_REG 0x000023c2
1748#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
1749#define A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
1750static inline uint32_t A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
1751{
1752 return ((val) << A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT) & A4XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK;
1753}
1754
1755#define REG_A4XX_HLSQ_CONTROL_3_REG 0x000023c3
1756#define A4XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
1757#define A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
1758static inline uint32_t A4XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
1759{
1760 return ((val) << A4XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A4XX_HLSQ_CONTROL_3_REG_REGID__MASK;
1761}
1762
1763#define REG_A4XX_HLSQ_VS_CONTROL_REG 0x000023c5
1764#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1765#define A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1766static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1767{
1768 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
1769}
1770#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1771#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1772static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1773{
1774 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1775}
1776#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1777#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1778static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1779{
1780 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1781}
1782#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1783#define A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1784static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1785{
1786 return ((val) << A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH__MASK;
1787}
1788
1789#define REG_A4XX_HLSQ_FS_CONTROL_REG 0x000023c6
1790#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1791#define A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1792static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1793{
1794 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
1795}
1796#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1797#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1798static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1799{
1800 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1801}
1802#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1803#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1804static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1805{
1806 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1807}
1808#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1809#define A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1810static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1811{
1812 return ((val) << A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH__MASK;
1813}
1814
1815#define REG_A4XX_HLSQ_HS_CONTROL_REG 0x000023c7
1816#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1817#define A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1818static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1819{
1820 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
1821}
1822#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1823#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1824static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1825{
1826 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1827}
1828#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1829#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1830static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1831{
1832 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1833}
1834#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1835#define A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1836static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1837{
1838 return ((val) << A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_INSTRLENGTH__MASK;
1839}
1840
1841#define REG_A4XX_HLSQ_DS_CONTROL_REG 0x000023c8
1842#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1843#define A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1844static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1845{
1846 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
1847}
1848#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1849#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1850static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1851{
1852 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1853}
1854#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1855#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1856static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1857{
1858 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1859}
1860#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1861#define A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1862static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1863{
1864 return ((val) << A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_INSTRLENGTH__MASK;
1865}
1866
1867#define REG_A4XX_HLSQ_GS_CONTROL_REG 0x000023c9
1868#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
1869#define A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT 0
1870static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
1871{
1872 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
1873}
1874#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
1875#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
1876static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
1877{
1878 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
1879}
1880#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
1881#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
1882static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
1883{
1884 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
1885}
1886#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
1887#define A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT 24
1888static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
1889{
1890 return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
1891}
1892
1893#define REG_A4XX_HLSQ_UPDATE_CONTROL 0x000023db
1894
1895#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
1896#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
1897
1898#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
1899
1900#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
1901
1902#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
1903
1904#define REG_A4XX_PC_BIN_BASE 0x000021c0
1905
1906#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
1907#define A4XX_PC_PRIM_VTX_CNTL_VAROUT 0x00000001
1908#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
1909#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
1910
1911#define REG_A4XX_UNKNOWN_21C5 0x000021c5
1912
1913#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
1914
1915#define REG_A4XX_PC_GS_PARAM 0x000021e5
1916
1917#define REG_A4XX_PC_HS_PARAM 0x000021e7
1918
1919#define REG_A4XX_VBIF_VERSION 0x00003000
1920
1921#define REG_A4XX_VBIF_CLKON 0x00003001
1922#define A4XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000001
1923
1924#define REG_A4XX_VBIF_ABIT_SORT 0x0000301c
1925
1926#define REG_A4XX_VBIF_ABIT_SORT_CONF 0x0000301d
1927
1928#define REG_A4XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
1929
1930#define REG_A4XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
1931
1932#define REG_A4XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
1933
1934#define REG_A4XX_VBIF_IN_WR_LIM_CONF0 0x00003030
1935
1936#define REG_A4XX_VBIF_IN_WR_LIM_CONF1 0x00003031
1937
1938#define REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
1939
1940#define REG_A4XX_UNKNOWN_0CC5 0x00000cc5
1941
1942#define REG_A4XX_UNKNOWN_0CC6 0x00000cc6
1943
1944#define REG_A4XX_UNKNOWN_0D01 0x00000d01
1945
1946#define REG_A4XX_UNKNOWN_0E05 0x00000e05
1947
1948#define REG_A4XX_UNKNOWN_0E42 0x00000e42
1949
1950#define REG_A4XX_UNKNOWN_0EC2 0x00000ec2
1951
1952#define REG_A4XX_UNKNOWN_0EC3 0x00000ec3
1953
1954#define REG_A4XX_UNKNOWN_0F03 0x00000f03
1955
1956#define REG_A4XX_UNKNOWN_2001 0x00002001
1957
1958#define REG_A4XX_UNKNOWN_209B 0x0000209b
1959
1960#define REG_A4XX_UNKNOWN_20EF 0x000020ef
1961
1962#define REG_A4XX_UNKNOWN_20F0 0x000020f0
1963
1964#define REG_A4XX_UNKNOWN_20F1 0x000020f1
1965
1966#define REG_A4XX_UNKNOWN_20F2 0x000020f2
1967
1968#define REG_A4XX_UNKNOWN_20F3 0x000020f3
1969
1970#define REG_A4XX_UNKNOWN_20F4 0x000020f4
1971
1972#define REG_A4XX_UNKNOWN_20F5 0x000020f5
1973
1974#define REG_A4XX_UNKNOWN_20F6 0x000020f6
1975
1976#define REG_A4XX_UNKNOWN_20F7 0x000020f7
1977
1978#define REG_A4XX_UNKNOWN_2152 0x00002152
1979
1980#define REG_A4XX_UNKNOWN_2153 0x00002153
1981
1982#define REG_A4XX_UNKNOWN_2154 0x00002154
1983
1984#define REG_A4XX_UNKNOWN_2155 0x00002155
1985
1986#define REG_A4XX_UNKNOWN_2156 0x00002156
1987
1988#define REG_A4XX_UNKNOWN_2157 0x00002157
1989
1990#define REG_A4XX_UNKNOWN_21C3 0x000021c3
1991
1992#define REG_A4XX_UNKNOWN_21E6 0x000021e6
1993
1994#define REG_A4XX_UNKNOWN_2209 0x00002209
1995
1996#define REG_A4XX_UNKNOWN_22D7 0x000022d7
1997
1998#define REG_A4XX_UNKNOWN_2381 0x00002381
1999
2000#define REG_A4XX_UNKNOWN_23A0 0x000023a0
2001
2002#define REG_A4XX_TEX_SAMP_0 0x00000000
2003#define A4XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
2004#define A4XX_TEX_SAMP_0_XY_MAG__SHIFT 1
2005static inline uint32_t A4XX_TEX_SAMP_0_XY_MAG(enum a4xx_tex_filter val)
2006{
2007 return ((val) << A4XX_TEX_SAMP_0_XY_MAG__SHIFT) & A4XX_TEX_SAMP_0_XY_MAG__MASK;
2008}
2009#define A4XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
2010#define A4XX_TEX_SAMP_0_XY_MIN__SHIFT 3
2011static inline uint32_t A4XX_TEX_SAMP_0_XY_MIN(enum a4xx_tex_filter val)
2012{
2013 return ((val) << A4XX_TEX_SAMP_0_XY_MIN__SHIFT) & A4XX_TEX_SAMP_0_XY_MIN__MASK;
2014}
2015#define A4XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
2016#define A4XX_TEX_SAMP_0_WRAP_S__SHIFT 5
2017static inline uint32_t A4XX_TEX_SAMP_0_WRAP_S(enum a4xx_tex_clamp val)
2018{
2019 return ((val) << A4XX_TEX_SAMP_0_WRAP_S__SHIFT) & A4XX_TEX_SAMP_0_WRAP_S__MASK;
2020}
2021#define A4XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
2022#define A4XX_TEX_SAMP_0_WRAP_T__SHIFT 8
2023static inline uint32_t A4XX_TEX_SAMP_0_WRAP_T(enum a4xx_tex_clamp val)
2024{
2025 return ((val) << A4XX_TEX_SAMP_0_WRAP_T__SHIFT) & A4XX_TEX_SAMP_0_WRAP_T__MASK;
2026}
2027#define A4XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
2028#define A4XX_TEX_SAMP_0_WRAP_R__SHIFT 11
2029static inline uint32_t A4XX_TEX_SAMP_0_WRAP_R(enum a4xx_tex_clamp val)
2030{
2031 return ((val) << A4XX_TEX_SAMP_0_WRAP_R__SHIFT) & A4XX_TEX_SAMP_0_WRAP_R__MASK;
2032}
2033
2034#define REG_A4XX_TEX_SAMP_1 0x00000001
2035#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
2036#define A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
2037static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
2038{
2039 return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
2040}
2041#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
2042#define A4XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
2043static inline uint32_t A4XX_TEX_SAMP_1_MAX_LOD(float val)
2044{
2045 return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A4XX_TEX_SAMP_1_MAX_LOD__MASK;
2046}
2047#define A4XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
2048#define A4XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
2049static inline uint32_t A4XX_TEX_SAMP_1_MIN_LOD(float val)
2050{
2051 return ((((uint32_t)(val * 64.0))) << A4XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A4XX_TEX_SAMP_1_MIN_LOD__MASK;
2052}
2053
2054#define REG_A4XX_TEX_CONST_0 0x00000000
2055#define A4XX_TEX_CONST_0_TILED 0x00000001
2056#define A4XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
2057#define A4XX_TEX_CONST_0_SWIZ_X__SHIFT 4
2058static inline uint32_t A4XX_TEX_CONST_0_SWIZ_X(enum a4xx_tex_swiz val)
2059{
2060 return ((val) << A4XX_TEX_CONST_0_SWIZ_X__SHIFT) & A4XX_TEX_CONST_0_SWIZ_X__MASK;
2061}
2062#define A4XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
2063#define A4XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
2064static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Y(enum a4xx_tex_swiz val)
2065{
2066 return ((val) << A4XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Y__MASK;
2067}
2068#define A4XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
2069#define A4XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
2070static inline uint32_t A4XX_TEX_CONST_0_SWIZ_Z(enum a4xx_tex_swiz val)
2071{
2072 return ((val) << A4XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A4XX_TEX_CONST_0_SWIZ_Z__MASK;
2073}
2074#define A4XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
2075#define A4XX_TEX_CONST_0_SWIZ_W__SHIFT 13
2076static inline uint32_t A4XX_TEX_CONST_0_SWIZ_W(enum a4xx_tex_swiz val)
2077{
2078 return ((val) << A4XX_TEX_CONST_0_SWIZ_W__SHIFT) & A4XX_TEX_CONST_0_SWIZ_W__MASK;
2079}
2080#define A4XX_TEX_CONST_0_FMT__MASK 0x1fc00000
2081#define A4XX_TEX_CONST_0_FMT__SHIFT 22
2082static inline uint32_t A4XX_TEX_CONST_0_FMT(enum a4xx_tex_fmt val)
2083{
2084 return ((val) << A4XX_TEX_CONST_0_FMT__SHIFT) & A4XX_TEX_CONST_0_FMT__MASK;
2085}
2086#define A4XX_TEX_CONST_0_TYPE__MASK 0x60000000
2087#define A4XX_TEX_CONST_0_TYPE__SHIFT 29
2088static inline uint32_t A4XX_TEX_CONST_0_TYPE(enum a4xx_tex_type val)
2089{
2090 return ((val) << A4XX_TEX_CONST_0_TYPE__SHIFT) & A4XX_TEX_CONST_0_TYPE__MASK;
2091}
2092
2093#define REG_A4XX_TEX_CONST_1 0x00000001
2094#define A4XX_TEX_CONST_1_HEIGHT__MASK 0x00007fff
2095#define A4XX_TEX_CONST_1_HEIGHT__SHIFT 0
2096static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
2097{
2098 return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
2099}
2100#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
2101#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
2102static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
2103{
2104 return ((val) << A4XX_TEX_CONST_1_WIDTH__SHIFT) & A4XX_TEX_CONST_1_WIDTH__MASK;
2105}
2106
2107#define REG_A4XX_TEX_CONST_2 0x00000002
2108#define A4XX_TEX_CONST_2_PITCH__MASK 0x3ffffe00
2109#define A4XX_TEX_CONST_2_PITCH__SHIFT 9
2110static inline uint32_t A4XX_TEX_CONST_2_PITCH(uint32_t val)
2111{
2112 return ((val) << A4XX_TEX_CONST_2_PITCH__SHIFT) & A4XX_TEX_CONST_2_PITCH__MASK;
2113}
2114#define A4XX_TEX_CONST_2_SWAP__MASK 0xc0000000
2115#define A4XX_TEX_CONST_2_SWAP__SHIFT 30
2116static inline uint32_t A4XX_TEX_CONST_2_SWAP(enum a3xx_color_swap val)
2117{
2118 return ((val) << A4XX_TEX_CONST_2_SWAP__SHIFT) & A4XX_TEX_CONST_2_SWAP__MASK;
2119}
2120
2121#define REG_A4XX_TEX_CONST_3 0x00000003
2122#define A4XX_TEX_CONST_3_LAYERSZ__MASK 0x0000000f
2123#define A4XX_TEX_CONST_3_LAYERSZ__SHIFT 0
2124static inline uint32_t A4XX_TEX_CONST_3_LAYERSZ(uint32_t val)
2125{
2126 return ((val >> 12) << A4XX_TEX_CONST_3_LAYERSZ__SHIFT) & A4XX_TEX_CONST_3_LAYERSZ__MASK;
2127}
2128
2129#define REG_A4XX_TEX_CONST_4 0x00000004
2130#define A4XX_TEX_CONST_4_BASE__MASK 0xffffffff
2131#define A4XX_TEX_CONST_4_BASE__SHIFT 0
2132static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
2133{
2134 return ((val) << A4XX_TEX_CONST_4_BASE__SHIFT) & A4XX_TEX_CONST_4_BASE__MASK;
2135}
2136
2137#define REG_A4XX_TEX_CONST_5 0x00000005
2138
2139#define REG_A4XX_TEX_CONST_6 0x00000006
2140
2141#define REG_A4XX_TEX_CONST_7 0x00000007
2142
2143
2144#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
new file mode 100644
index 000000000000..91221836c5ad
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -0,0 +1,604 @@
1/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include "a4xx_gpu.h"
14#ifdef CONFIG_MSM_OCMEM
15# include <soc/qcom/ocmem.h>
16#endif
17
18#define A4XX_INT0_MASK \
19 (A4XX_INT0_RBBM_AHB_ERROR | \
20 A4XX_INT0_RBBM_ATB_BUS_OVERFLOW | \
21 A4XX_INT0_CP_T0_PACKET_IN_IB | \
22 A4XX_INT0_CP_OPCODE_ERROR | \
23 A4XX_INT0_CP_RESERVED_BIT_ERROR | \
24 A4XX_INT0_CP_HW_FAULT | \
25 A4XX_INT0_CP_IB1_INT | \
26 A4XX_INT0_CP_IB2_INT | \
27 A4XX_INT0_CP_RB_INT | \
28 A4XX_INT0_CP_REG_PROTECT_FAULT | \
29 A4XX_INT0_CP_AHB_ERROR_HALT | \
30 A4XX_INT0_UCHE_OOB_ACCESS)
31
32extern bool hang_debug;
33static void a4xx_dump(struct msm_gpu *gpu);
34
35/*
36 * a4xx_enable_hwcg() - Program the clock control registers
37 * @device: The adreno device pointer
38 */
39static void a4xx_enable_hwcg(struct msm_gpu *gpu)
40{
41 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
42 unsigned int i;
43 for (i = 0; i < 4; i++)
44 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202);
45 for (i = 0; i < 4; i++)
46 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222);
47 for (i = 0; i < 4; i++)
48 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TP(i), 0x0E739CE7);
49 for (i = 0; i < 4; i++)
50 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TP(i), 0x00111111);
51 for (i = 0; i < 4; i++)
52 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_SP(i), 0x22222222);
53 for (i = 0; i < 4; i++)
54 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_SP(i), 0x00222222);
55 for (i = 0; i < 4; i++)
56 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_SP(i), 0x00000104);
57 for (i = 0; i < 4; i++)
58 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_SP(i), 0x00000081);
59 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_UCHE, 0x22222222);
60 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_UCHE, 0x02222222);
61 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL3_UCHE, 0x00000000);
62 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL4_UCHE, 0x00000000);
63 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_UCHE, 0x00004444);
64 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_UCHE, 0x00001112);
65 for (i = 0; i < 4; i++)
66 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_RB(i), 0x22222222);
67
68 /* Disable L1 clocking in A420 due to CCU issues with it */
69 for (i = 0; i < 4; i++) {
70 if (adreno_is_a420(adreno_gpu)) {
71 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
72 0x00002020);
73 } else {
74 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_RB(i),
75 0x00022020);
76 }
77 }
78
79 for (i = 0; i < 4; i++) {
80 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_MARB_CCU(i),
81 0x00000922);
82 }
83
84 for (i = 0; i < 4; i++) {
85 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_RB_MARB_CCU(i),
86 0x00000000);
87 }
88
89 for (i = 0; i < 4; i++) {
90 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(i),
91 0x00000001);
92 }
93
94 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_MODE_GPC, 0x02222222);
95 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_GPC, 0x04100104);
96 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_GPC, 0x00022222);
97 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_COM_DCOM, 0x00000022);
98 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_COM_DCOM, 0x0000010F);
99 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_COM_DCOM, 0x00000022);
100 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TSE_RAS_RBBM, 0x00222222);
101 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00004104);
102 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
103 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
104 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
105 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
106 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
107 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
108}
109
110static void a4xx_me_init(struct msm_gpu *gpu)
111{
112 struct msm_ringbuffer *ring = gpu->rb;
113
114 OUT_PKT3(ring, CP_ME_INIT, 17);
115 OUT_RING(ring, 0x000003f7);
116 OUT_RING(ring, 0x00000000);
117 OUT_RING(ring, 0x00000000);
118 OUT_RING(ring, 0x00000000);
119 OUT_RING(ring, 0x00000080);
120 OUT_RING(ring, 0x00000100);
121 OUT_RING(ring, 0x00000180);
122 OUT_RING(ring, 0x00006600);
123 OUT_RING(ring, 0x00000150);
124 OUT_RING(ring, 0x0000014e);
125 OUT_RING(ring, 0x00000154);
126 OUT_RING(ring, 0x00000001);
127 OUT_RING(ring, 0x00000000);
128 OUT_RING(ring, 0x00000000);
129 OUT_RING(ring, 0x00000000);
130 OUT_RING(ring, 0x00000000);
131 OUT_RING(ring, 0x00000000);
132
133 gpu->funcs->flush(gpu);
134 gpu->funcs->idle(gpu);
135}
136
137static int a4xx_hw_init(struct msm_gpu *gpu)
138{
139 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
140 struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
141 uint32_t *ptr, len;
142 int i, ret;
143
144 if (adreno_is_a4xx(adreno_gpu)) {
145 gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
146 gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
147 gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
148 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
149 gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
150 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
151 gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
152 gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
153 } else {
154 BUG();
155 }
156
157 /* Make all blocks contribute to the GPU BUSY perf counter */
158 gpu_write(gpu, REG_A4XX_RBBM_GPU_BUSY_MASKED, 0xffffffff);
159
160 /* Tune the hystersis counters for SP and CP idle detection */
161 gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
162 gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
163
164 /* Enable the RBBM error reporting bits */
165 gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
166
167 /* Enable AHB error reporting*/
168 gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL1, 0xa6ffffff);
169
170 /* Enable power counters*/
171 gpu_write(gpu, REG_A4XX_RBBM_RBBM_CTL, 0x00000030);
172
173 /*
174 * Turn on hang detection - this spews a lot of useful information
175 * into the RBBM registers on a hang:
176 */
177 gpu_write(gpu, REG_A4XX_RBBM_INTERFACE_HANG_INT_CTL,
178 (1 << 30) | 0xFFFF);
179
180 gpu_write(gpu, REG_A4XX_RB_GMEM_BASE_ADDR,
181 (unsigned int)(a4xx_gpu->ocmem_base >> 14));
182
183 /* Turn on performance counters: */
184 gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
185
186 /* Disable L2 bypass to avoid UCHE out of bounds errors */
187 gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
188 gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
189
190 gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
191 (adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
192
193 a4xx_enable_hwcg(gpu);
194
195 /*
196 * For A420 set RBBM_CLOCK_DELAY_HLSQ.CGC_HLSQ_TP_EARLY_CYC >= 2
197 * due to timing issue with HLSQ_TP_CLK_EN
198 */
199 if (adreno_is_a420(adreno_gpu)) {
200 unsigned int val;
201 val = gpu_read(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ);
202 val &= ~A4XX_CGC_HLSQ_EARLY_CYC__MASK;
203 val |= 2 << A4XX_CGC_HLSQ_EARLY_CYC__SHIFT;
204 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
205 }
206
207 ret = adreno_hw_init(gpu);
208 if (ret)
209 return ret;
210
211 /* setup access protection: */
212 gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
213
214 /* RBBM registers */
215 gpu_write(gpu, REG_A4XX_CP_PROTECT(0), 0x62000010);
216 gpu_write(gpu, REG_A4XX_CP_PROTECT(1), 0x63000020);
217 gpu_write(gpu, REG_A4XX_CP_PROTECT(2), 0x64000040);
218 gpu_write(gpu, REG_A4XX_CP_PROTECT(3), 0x65000080);
219 gpu_write(gpu, REG_A4XX_CP_PROTECT(4), 0x66000100);
220 gpu_write(gpu, REG_A4XX_CP_PROTECT(5), 0x64000200);
221
222 /* CP registers */
223 gpu_write(gpu, REG_A4XX_CP_PROTECT(6), 0x67000800);
224 gpu_write(gpu, REG_A4XX_CP_PROTECT(7), 0x64001600);
225
226
227 /* RB registers */
228 gpu_write(gpu, REG_A4XX_CP_PROTECT(8), 0x60003300);
229
230 /* HLSQ registers */
231 gpu_write(gpu, REG_A4XX_CP_PROTECT(9), 0x60003800);
232
233 /* VPC registers */
234 gpu_write(gpu, REG_A4XX_CP_PROTECT(10), 0x61003980);
235
236 /* SMMU registers */
237 gpu_write(gpu, REG_A4XX_CP_PROTECT(11), 0x6e010000);
238
239 gpu_write(gpu, REG_A4XX_RBBM_INT_0_MASK, A4XX_INT0_MASK);
240
241 ret = adreno_hw_init(gpu);
242 if (ret)
243 return ret;
244
245 /* Load PM4: */
246 ptr = (uint32_t *)(adreno_gpu->pm4->data);
247 len = adreno_gpu->pm4->size / 4;
248 DBG("loading PM4 ucode version: %u", ptr[0]);
249 gpu_write(gpu, REG_A4XX_CP_ME_RAM_WADDR, 0);
250 for (i = 1; i < len; i++)
251 gpu_write(gpu, REG_A4XX_CP_ME_RAM_DATA, ptr[i]);
252
253 /* Load PFP: */
254 ptr = (uint32_t *)(adreno_gpu->pfp->data);
255 len = adreno_gpu->pfp->size / 4;
256 DBG("loading PFP ucode version: %u", ptr[0]);
257
258 gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_ADDR, 0);
259 for (i = 1; i < len; i++)
260 gpu_write(gpu, REG_A4XX_CP_PFP_UCODE_DATA, ptr[i]);
261
262 /* clear ME_HALT to start micro engine */
263 gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
264
265 a4xx_me_init(gpu);
266 return 0;
267}
268
269static void a4xx_recover(struct msm_gpu *gpu)
270{
271 /* dump registers before resetting gpu, if enabled: */
272 if (hang_debug)
273 a4xx_dump(gpu);
274
275 gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 1);
276 gpu_read(gpu, REG_A4XX_RBBM_SW_RESET_CMD);
277 gpu_write(gpu, REG_A4XX_RBBM_SW_RESET_CMD, 0);
278 adreno_recover(gpu);
279}
280
281static void a4xx_destroy(struct msm_gpu *gpu)
282{
283 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
284 struct a4xx_gpu *a4xx_gpu = to_a4xx_gpu(adreno_gpu);
285
286 DBG("%s", gpu->name);
287
288 adreno_gpu_cleanup(adreno_gpu);
289
290#ifdef CONFIG_MSM_OCMEM
291 if (a4xx_gpu->ocmem_base)
292 ocmem_free(OCMEM_GRAPHICS, a4xx_gpu->ocmem_hdl);
293#endif
294
295 kfree(a4xx_gpu);
296}
297
298static void a4xx_idle(struct msm_gpu *gpu)
299{
300 /* wait for ringbuffer to drain: */
301 adreno_idle(gpu);
302
303 /* then wait for GPU to finish: */
304 if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
305 A4XX_RBBM_STATUS_GPU_BUSY)))
306 DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
307
308 /* TODO maybe we need to reset GPU here to recover from hang? */
309}
310
311static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
312{
313 uint32_t status;
314
315 status = gpu_read(gpu, REG_A4XX_RBBM_INT_0_STATUS);
316 DBG("%s: Int status %08x", gpu->name, status);
317
318 gpu_write(gpu, REG_A4XX_RBBM_INT_CLEAR_CMD, status);
319
320 msm_gpu_retire(gpu);
321
322 return IRQ_HANDLED;
323}
324
325static const unsigned int a4xx_registers[] = {
326 /* RBBM */
327 0x0000, 0x0002, 0x0004, 0x0021, 0x0023, 0x0024, 0x0026, 0x0026,
328 0x0028, 0x002B, 0x002E, 0x0034, 0x0037, 0x0044, 0x0047, 0x0066,
329 0x0068, 0x0095, 0x009C, 0x0170, 0x0174, 0x01AF,
330 /* CP */
331 0x0200, 0x0233, 0x0240, 0x0250, 0x04C0, 0x04DD, 0x0500, 0x050B,
332 0x0578, 0x058F,
333 /* VSC */
334 0x0C00, 0x0C03, 0x0C08, 0x0C41, 0x0C50, 0x0C51,
335 /* GRAS */
336 0x0C80, 0x0C81, 0x0C88, 0x0C8F,
337 /* RB */
338 0x0CC0, 0x0CC0, 0x0CC4, 0x0CD2,
339 /* PC */
340 0x0D00, 0x0D0C, 0x0D10, 0x0D17, 0x0D20, 0x0D23,
341 /* VFD */
342 0x0E40, 0x0E4A,
343 /* VPC */
344 0x0E60, 0x0E61, 0x0E63, 0x0E68,
345 /* UCHE */
346 0x0E80, 0x0E84, 0x0E88, 0x0E95,
347 /* VMIDMT */
348 0x1000, 0x1000, 0x1002, 0x1002, 0x1004, 0x1004, 0x1008, 0x100A,
349 0x100C, 0x100D, 0x100F, 0x1010, 0x1012, 0x1016, 0x1024, 0x1024,
350 0x1027, 0x1027, 0x1100, 0x1100, 0x1102, 0x1102, 0x1104, 0x1104,
351 0x1110, 0x1110, 0x1112, 0x1116, 0x1124, 0x1124, 0x1300, 0x1300,
352 0x1380, 0x1380,
353 /* GRAS CTX 0 */
354 0x2000, 0x2004, 0x2008, 0x2067, 0x2070, 0x2078, 0x207B, 0x216E,
355 /* PC CTX 0 */
356 0x21C0, 0x21C6, 0x21D0, 0x21D0, 0x21D9, 0x21D9, 0x21E5, 0x21E7,
357 /* VFD CTX 0 */
358 0x2200, 0x2204, 0x2208, 0x22A9,
359 /* GRAS CTX 1 */
360 0x2400, 0x2404, 0x2408, 0x2467, 0x2470, 0x2478, 0x247B, 0x256E,
361 /* PC CTX 1 */
362 0x25C0, 0x25C6, 0x25D0, 0x25D0, 0x25D9, 0x25D9, 0x25E5, 0x25E7,
363 /* VFD CTX 1 */
364 0x2600, 0x2604, 0x2608, 0x26A9,
365 /* XPU */
366 0x2C00, 0x2C01, 0x2C10, 0x2C10, 0x2C12, 0x2C16, 0x2C1D, 0x2C20,
367 0x2C28, 0x2C28, 0x2C30, 0x2C30, 0x2C32, 0x2C36, 0x2C40, 0x2C40,
368 0x2C50, 0x2C50, 0x2C52, 0x2C56, 0x2C80, 0x2C80, 0x2C94, 0x2C95,
369 /* VBIF */
370 0x3000, 0x3007, 0x300C, 0x3014, 0x3018, 0x301D, 0x3020, 0x3022,
371 0x3024, 0x3026, 0x3028, 0x302A, 0x302C, 0x302D, 0x3030, 0x3031,
372 0x3034, 0x3036, 0x3038, 0x3038, 0x303C, 0x303D, 0x3040, 0x3040,
373 0x3049, 0x3049, 0x3058, 0x3058, 0x305B, 0x3061, 0x3064, 0x3068,
374 0x306C, 0x306D, 0x3080, 0x3088, 0x308B, 0x308C, 0x3090, 0x3094,
375 0x3098, 0x3098, 0x309C, 0x309C, 0x30C0, 0x30C0, 0x30C8, 0x30C8,
376 0x30D0, 0x30D0, 0x30D8, 0x30D8, 0x30E0, 0x30E0, 0x3100, 0x3100,
377 0x3108, 0x3108, 0x3110, 0x3110, 0x3118, 0x3118, 0x3120, 0x3120,
378 0x3124, 0x3125, 0x3129, 0x3129, 0x3131, 0x3131, 0x330C, 0x330C,
379 0x3310, 0x3310, 0x3400, 0x3401, 0x3410, 0x3410, 0x3412, 0x3416,
380 0x341D, 0x3420, 0x3428, 0x3428, 0x3430, 0x3430, 0x3432, 0x3436,
381 0x3440, 0x3440, 0x3450, 0x3450, 0x3452, 0x3456, 0x3480, 0x3480,
382 0x3494, 0x3495, 0x4000, 0x4000, 0x4002, 0x4002, 0x4004, 0x4004,
383 0x4008, 0x400A, 0x400C, 0x400D, 0x400F, 0x4012, 0x4014, 0x4016,
384 0x401D, 0x401D, 0x4020, 0x4027, 0x4060, 0x4062, 0x4200, 0x4200,
385 0x4300, 0x4300, 0x4400, 0x4400, 0x4500, 0x4500, 0x4800, 0x4802,
386 0x480F, 0x480F, 0x4811, 0x4811, 0x4813, 0x4813, 0x4815, 0x4816,
387 0x482B, 0x482B, 0x4857, 0x4857, 0x4883, 0x4883, 0x48AF, 0x48AF,
388 0x48C5, 0x48C5, 0x48E5, 0x48E5, 0x4905, 0x4905, 0x4925, 0x4925,
389 0x4945, 0x4945, 0x4950, 0x4950, 0x495B, 0x495B, 0x4980, 0x498E,
390 0x4B00, 0x4B00, 0x4C00, 0x4C00, 0x4D00, 0x4D00, 0x4E00, 0x4E00,
391 0x4E80, 0x4E80, 0x4F00, 0x4F00, 0x4F08, 0x4F08, 0x4F10, 0x4F10,
392 0x4F18, 0x4F18, 0x4F20, 0x4F20, 0x4F30, 0x4F30, 0x4F60, 0x4F60,
393 0x4F80, 0x4F81, 0x4F88, 0x4F89, 0x4FEE, 0x4FEE, 0x4FF3, 0x4FF3,
394 0x6000, 0x6001, 0x6008, 0x600F, 0x6014, 0x6016, 0x6018, 0x601B,
395 0x61FD, 0x61FD, 0x623C, 0x623C, 0x6380, 0x6380, 0x63A0, 0x63A0,
396 0x63C0, 0x63C1, 0x63C8, 0x63C9, 0x63D0, 0x63D4, 0x63D6, 0x63D6,
397 0x63EE, 0x63EE, 0x6400, 0x6401, 0x6408, 0x640F, 0x6414, 0x6416,
398 0x6418, 0x641B, 0x65FD, 0x65FD, 0x663C, 0x663C, 0x6780, 0x6780,
399 0x67A0, 0x67A0, 0x67C0, 0x67C1, 0x67C8, 0x67C9, 0x67D0, 0x67D4,
400 0x67D6, 0x67D6, 0x67EE, 0x67EE, 0x6800, 0x6801, 0x6808, 0x680F,
401 0x6814, 0x6816, 0x6818, 0x681B, 0x69FD, 0x69FD, 0x6A3C, 0x6A3C,
402 0x6B80, 0x6B80, 0x6BA0, 0x6BA0, 0x6BC0, 0x6BC1, 0x6BC8, 0x6BC9,
403 0x6BD0, 0x6BD4, 0x6BD6, 0x6BD6, 0x6BEE, 0x6BEE,
404 ~0 /* sentinel */
405};
406
407#ifdef CONFIG_DEBUG_FS
408static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
409{
410 gpu->funcs->pm_resume(gpu);
411
412 seq_printf(m, "status: %08x\n",
413 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
414 gpu->funcs->pm_suspend(gpu);
415
416 adreno_show(gpu, m);
417
418}
419#endif
420
421/* Register offset defines for A4XX, in order of enum adreno_regs */
422static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
423 REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
424 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
425 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
426 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
427 REG_A4XX_CP_PFP_UCODE_DATA),
428 REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
429 REG_A4XX_CP_PFP_UCODE_ADDR),
430 REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
431 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
432 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
433 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
434 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
435 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
436 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
437 REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
438 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
439 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
440 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
441 REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
442 REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
443 REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
444 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
445 REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
446 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
447 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
448 REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
449 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
450 REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
451 REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
452 REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
453 REG_A4XX_CP_PROTECT_STATUS),
454 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
455 REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
456 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
457 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
458 REG_A4XX_RBBM_PERFCTR_CTL),
459 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
460 REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
461 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
462 REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
463 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
464 REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
465 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
466 REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
467 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
468 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
469 REG_A4XX_RBBM_INT_0_STATUS),
470 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
471 REG_A4XX_RBBM_AHB_ERROR_STATUS),
472 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
473 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
474 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
475 REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
476 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
477 REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
478 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
479 REG_A4XX_VPC_DEBUG_RAM_SEL),
480 REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
481 REG_A4XX_VPC_DEBUG_RAM_READ),
482 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
483 REG_A4XX_RBBM_INT_CLEAR_CMD),
484 REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
485 REG_A4XX_VSC_SIZE_ADDRESS),
486 REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
487 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
488 REG_A4XX_SP_VS_PVT_MEM_ADDR),
489 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
490 REG_A4XX_SP_FS_PVT_MEM_ADDR),
491 REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
492 REG_A4XX_SP_VS_OBJ_START),
493 REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
494 REG_A4XX_SP_FS_OBJ_START),
495 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
496 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
497 REG_A4XX_RBBM_SW_RESET_CMD),
498 REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
499 REG_A4XX_UCHE_INVALIDATE0),
500 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
501 REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
502 REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
503 REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
504};
505
506static void a4xx_dump(struct msm_gpu *gpu)
507{
508 adreno_dump(gpu);
509 printk("status: %08x\n",
510 gpu_read(gpu, REG_A4XX_RBBM_STATUS));
511 adreno_dump(gpu);
512}
513
514static const struct adreno_gpu_funcs funcs = {
515 .base = {
516 .get_param = adreno_get_param,
517 .hw_init = a4xx_hw_init,
518 .pm_suspend = msm_gpu_pm_suspend,
519 .pm_resume = msm_gpu_pm_resume,
520 .recover = a4xx_recover,
521 .last_fence = adreno_last_fence,
522 .submit = adreno_submit,
523 .flush = adreno_flush,
524 .idle = a4xx_idle,
525 .irq = a4xx_irq,
526 .destroy = a4xx_destroy,
527#ifdef CONFIG_DEBUG_FS
528 .show = a4xx_show,
529#endif
530 },
531};
532
533struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
534{
535 struct a4xx_gpu *a4xx_gpu = NULL;
536 struct adreno_gpu *adreno_gpu;
537 struct msm_gpu *gpu;
538 struct msm_drm_private *priv = dev->dev_private;
539 struct platform_device *pdev = priv->gpu_pdev;
540 int ret;
541
542 if (!pdev) {
543 dev_err(dev->dev, "no a4xx device\n");
544 ret = -ENXIO;
545 goto fail;
546 }
547
548 a4xx_gpu = kzalloc(sizeof(*a4xx_gpu), GFP_KERNEL);
549 if (!a4xx_gpu) {
550 ret = -ENOMEM;
551 goto fail;
552 }
553
554 adreno_gpu = &a4xx_gpu->base;
555 gpu = &adreno_gpu->base;
556
557 a4xx_gpu->pdev = pdev;
558
559 gpu->perfcntrs = NULL;
560 gpu->num_perfcntrs = 0;
561
562 adreno_gpu->registers = a4xx_registers;
563 adreno_gpu->reg_offsets = a4xx_register_offsets;
564
565 ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
566 if (ret)
567 goto fail;
568
569 /* if needed, allocate gmem: */
570 if (adreno_is_a4xx(adreno_gpu)) {
571#ifdef CONFIG_MSM_OCMEM
572 /* TODO this is different/missing upstream: */
573 struct ocmem_buf *ocmem_hdl =
574 ocmem_allocate(OCMEM_GRAPHICS, adreno_gpu->gmem);
575
576 a4xx_gpu->ocmem_hdl = ocmem_hdl;
577 a4xx_gpu->ocmem_base = ocmem_hdl->addr;
578 adreno_gpu->gmem = ocmem_hdl->len;
579 DBG("using %dK of OCMEM at 0x%08x", adreno_gpu->gmem / 1024,
580 a4xx_gpu->ocmem_base);
581#endif
582 }
583
584 if (!gpu->mmu) {
585 /* TODO we think it is possible to configure the GPU to
586 * restrict access to VRAM carveout. But the required
587 * registers are unknown. For now just bail out and
588 * limp along with just modesetting. If it turns out
589 * to not be possible to restrict access, then we must
590 * implement a cmdstream validator.
591 */
592 dev_err(dev->dev, "No memory protection without IOMMU\n");
593 ret = -ENXIO;
594 goto fail;
595 }
596
597 return gpu;
598
599fail:
600 if (a4xx_gpu)
601 a4xx_destroy(&a4xx_gpu->base.base);
602
603 return ERR_PTR(ret);
604}
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.h b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
new file mode 100644
index 000000000000..01247204ac92
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.h
@@ -0,0 +1,34 @@
1/* Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#ifndef __A4XX_GPU_H__
14#define __A4XX_GPU_H__
15
16#include "adreno_gpu.h"
17
18/* arrg, somehow fb.h is getting pulled in: */
19#undef ROP_COPY
20#undef ROP_XOR
21
22#include "a4xx.xml.h"
23
24struct a4xx_gpu {
25 struct adreno_gpu base;
26 struct platform_device *pdev;
27
28 /* if OCMEM is used for GMEM: */
29 uint32_t ocmem_base;
30 void *ocmem_hdl;
31};
32#define to_a4xx_gpu(x) container_of(x, struct a4xx_gpu, base)
33
34#endif /* __A4XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index cc341bc62b51..a4b33af9338d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -105,6 +105,7 @@ enum adreno_rb_dither_mode {
105enum adreno_rb_depth_format { 105enum adreno_rb_depth_format {
106 DEPTHX_16 = 0, 106 DEPTHX_16 = 0,
107 DEPTHX_24_8 = 1, 107 DEPTHX_24_8 = 1,
108 DEPTHX_32 = 2,
108}; 109};
109 110
110enum adreno_rb_copy_control_mode { 111enum adreno_rb_copy_control_mode {
@@ -132,6 +133,7 @@ enum a3xx_threadmode {
132}; 133};
133 134
134enum a3xx_instrbuffermode { 135enum a3xx_instrbuffermode {
136 CACHE = 0,
135 BUFFER = 1, 137 BUFFER = 1,
136}; 138};
137 139
@@ -140,6 +142,13 @@ enum a3xx_threadsize {
140 FOUR_QUADS = 1, 142 FOUR_QUADS = 1,
141}; 143};
142 144
145enum a3xx_color_swap {
146 WZYX = 0,
147 WXYZ = 1,
148 ZYXW = 2,
149 XYZW = 3,
150};
151
143#define REG_AXXX_CP_RB_BASE 0x000001c0 152#define REG_AXXX_CP_RB_BASE 0x000001c0
144 153
145#define REG_AXXX_CP_RB_CNTL 0x000001c1 154#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 7ab85af3a7db..be83dee83d08 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013-2014 Red Hat 2 * Copyright (C) 2013-2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -28,6 +30,7 @@ MODULE_PARM_DESC(hang_debug, "Dump registers when hang is detected (can be slow!
28module_param_named(hang_debug, hang_debug, bool, 0600); 30module_param_named(hang_debug, hang_debug, bool, 0600);
29 31
30struct msm_gpu *a3xx_gpu_init(struct drm_device *dev); 32struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
33struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
31 34
32static const struct adreno_info gpulist[] = { 35static const struct adreno_info gpulist[] = {
33 { 36 {
@@ -54,6 +57,14 @@ static const struct adreno_info gpulist[] = {
54 .pfpfw = "a330_pfp.fw", 57 .pfpfw = "a330_pfp.fw",
55 .gmem = SZ_1M, 58 .gmem = SZ_1M,
56 .init = a3xx_gpu_init, 59 .init = a3xx_gpu_init,
60 }, {
61 .rev = ADRENO_REV(4, 2, 0, ANY_ID),
62 .revn = 420,
63 .name = "A420",
64 .pm4fw = "a420_pm4.fw",
65 .pfpfw = "a420_pfp.fw",
66 .gmem = (SZ_1M + SZ_512K),
67 .init = a4xx_gpu_init,
57 }, 68 },
58}; 69};
59 70
@@ -61,6 +72,8 @@ MODULE_FIRMWARE("a300_pm4.fw");
61MODULE_FIRMWARE("a300_pfp.fw"); 72MODULE_FIRMWARE("a300_pfp.fw");
62MODULE_FIRMWARE("a330_pm4.fw"); 73MODULE_FIRMWARE("a330_pm4.fw");
63MODULE_FIRMWARE("a330_pfp.fw"); 74MODULE_FIRMWARE("a330_pfp.fw");
75MODULE_FIRMWARE("a420_pm4.fw");
76MODULE_FIRMWARE("a420_pfp.fw");
64 77
65static inline bool _rev_match(uint8_t entry, uint8_t id) 78static inline bool _rev_match(uint8_t entry, uint8_t id)
66{ 79{
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 6afa29167fee..aa873048308b 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013 Red Hat 2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -63,19 +65,21 @@ int adreno_hw_init(struct msm_gpu *gpu)
63 } 65 }
64 66
65 /* Setup REG_CP_RB_CNTL: */ 67 /* Setup REG_CP_RB_CNTL: */
66 gpu_write(gpu, REG_AXXX_CP_RB_CNTL, 68 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
67 /* size is log2(quad-words): */ 69 /* size is log2(quad-words): */
68 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | 70 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
69 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8))); 71 AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
70 72
71 /* Setup ringbuffer address: */ 73 /* Setup ringbuffer address: */
72 gpu_write(gpu, REG_AXXX_CP_RB_BASE, gpu->rb_iova); 74 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
73 gpu_write(gpu, REG_AXXX_CP_RB_RPTR_ADDR, rbmemptr(adreno_gpu, rptr)); 75 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
76 rbmemptr(adreno_gpu, rptr));
74 77
75 /* Setup scratch/timestamp: */ 78 /* Setup scratch/timestamp: */
76 gpu_write(gpu, REG_AXXX_SCRATCH_ADDR, rbmemptr(adreno_gpu, fence)); 79 adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
80 rbmemptr(adreno_gpu, fence));
77 81
78 gpu_write(gpu, REG_AXXX_SCRATCH_UMSK, 0x1); 82 adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
79 83
80 return 0; 84 return 0;
81} 85}
@@ -151,7 +155,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
151 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1); 155 OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
152 OUT_RING(ring, submit->fence); 156 OUT_RING(ring, submit->fence);
153 157
154 if (adreno_is_a3xx(adreno_gpu)) { 158 if (adreno_is_a3xx(adreno_gpu) || adreno_is_a4xx(adreno_gpu)) {
155 /* Flush HLSQ lazy updates to make sure there is nothing 159 /* Flush HLSQ lazy updates to make sure there is nothing
156 * pending for indirect loads after the timestamp has 160 * pending for indirect loads after the timestamp has
157 * passed: 161 * passed:
@@ -188,12 +192,13 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
188 192
189void adreno_flush(struct msm_gpu *gpu) 193void adreno_flush(struct msm_gpu *gpu)
190{ 194{
195 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
191 uint32_t wptr = get_wptr(gpu->rb); 196 uint32_t wptr = get_wptr(gpu->rb);
192 197
193 /* ensure writes to ringbuffer have hit system memory: */ 198 /* ensure writes to ringbuffer have hit system memory: */
194 mb(); 199 mb();
195 200
196 gpu_write(gpu, REG_AXXX_CP_RB_WPTR, wptr); 201 adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
197} 202}
198 203
199void adreno_idle(struct msm_gpu *gpu) 204void adreno_idle(struct msm_gpu *gpu)
@@ -319,6 +324,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
319 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u", 324 DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
320 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq); 325 gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
321 326
327 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
328 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
329 RB_SIZE);
330 if (ret)
331 return ret;
332
322 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); 333 ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
323 if (ret) { 334 if (ret) {
324 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", 335 dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -333,12 +344,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
333 return ret; 344 return ret;
334 } 345 }
335 346
336 ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
337 adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
338 RB_SIZE);
339 if (ret)
340 return ret;
341
342 mmu = gpu->mmu; 347 mmu = gpu->mmu;
343 if (mmu) { 348 if (mmu) {
344 ret = mmu->funcs->attach(mmu, iommu_ports, 349 ret = mmu->funcs->attach(mmu, iommu_ports,
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 52f051579753..a0cc30977e67 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,6 +2,8 @@
2 * Copyright (C) 2013 Red Hat 2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 3 * Author: Rob Clark <robdclark@gmail.com>
4 * 4 *
5 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
6 *
5 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by 8 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation. 9 * the Free Software Foundation.
@@ -25,6 +27,81 @@
25#include "adreno_common.xml.h" 27#include "adreno_common.xml.h"
26#include "adreno_pm4.xml.h" 28#include "adreno_pm4.xml.h"
27 29
30#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
31/**
32 * adreno_regs: List of registers that are used in across all
33 * 3D devices. Each device type has different offset value for the same
34 * register, so an array of register offsets are declared for every device
35 * and are indexed by the enumeration values defined in this enum
36 */
37enum adreno_regs {
38 REG_ADRENO_CP_DEBUG,
39 REG_ADRENO_CP_ME_RAM_WADDR,
40 REG_ADRENO_CP_ME_RAM_DATA,
41 REG_ADRENO_CP_PFP_UCODE_DATA,
42 REG_ADRENO_CP_PFP_UCODE_ADDR,
43 REG_ADRENO_CP_WFI_PEND_CTR,
44 REG_ADRENO_CP_RB_BASE,
45 REG_ADRENO_CP_RB_RPTR_ADDR,
46 REG_ADRENO_CP_RB_RPTR,
47 REG_ADRENO_CP_RB_WPTR,
48 REG_ADRENO_CP_PROTECT_CTRL,
49 REG_ADRENO_CP_ME_CNTL,
50 REG_ADRENO_CP_RB_CNTL,
51 REG_ADRENO_CP_IB1_BASE,
52 REG_ADRENO_CP_IB1_BUFSZ,
53 REG_ADRENO_CP_IB2_BASE,
54 REG_ADRENO_CP_IB2_BUFSZ,
55 REG_ADRENO_CP_TIMESTAMP,
56 REG_ADRENO_CP_ME_RAM_RADDR,
57 REG_ADRENO_CP_ROQ_ADDR,
58 REG_ADRENO_CP_ROQ_DATA,
59 REG_ADRENO_CP_MERCIU_ADDR,
60 REG_ADRENO_CP_MERCIU_DATA,
61 REG_ADRENO_CP_MERCIU_DATA2,
62 REG_ADRENO_CP_MEQ_ADDR,
63 REG_ADRENO_CP_MEQ_DATA,
64 REG_ADRENO_CP_HW_FAULT,
65 REG_ADRENO_CP_PROTECT_STATUS,
66 REG_ADRENO_SCRATCH_ADDR,
67 REG_ADRENO_SCRATCH_UMSK,
68 REG_ADRENO_SCRATCH_REG2,
69 REG_ADRENO_RBBM_STATUS,
70 REG_ADRENO_RBBM_PERFCTR_CTL,
71 REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
72 REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
73 REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
74 REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
75 REG_ADRENO_RBBM_INT_0_MASK,
76 REG_ADRENO_RBBM_INT_0_STATUS,
77 REG_ADRENO_RBBM_AHB_ERROR_STATUS,
78 REG_ADRENO_RBBM_PM_OVERRIDE2,
79 REG_ADRENO_RBBM_AHB_CMD,
80 REG_ADRENO_RBBM_INT_CLEAR_CMD,
81 REG_ADRENO_RBBM_SW_RESET_CMD,
82 REG_ADRENO_RBBM_CLOCK_CTL,
83 REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
84 REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
85 REG_ADRENO_VPC_DEBUG_RAM_SEL,
86 REG_ADRENO_VPC_DEBUG_RAM_READ,
87 REG_ADRENO_VSC_SIZE_ADDRESS,
88 REG_ADRENO_VFD_CONTROL_0,
89 REG_ADRENO_VFD_INDEX_MAX,
90 REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
91 REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
92 REG_ADRENO_SP_VS_OBJ_START_REG,
93 REG_ADRENO_SP_FS_OBJ_START_REG,
94 REG_ADRENO_PA_SC_AA_CONFIG,
95 REG_ADRENO_SQ_GPR_MANAGEMENT,
96 REG_ADRENO_SQ_INST_STORE_MANAGMENT,
97 REG_ADRENO_TP0_CHICKEN,
98 REG_ADRENO_RBBM_RBBM_CTL,
99 REG_ADRENO_UCHE_INVALIDATE0,
100 REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
101 REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
102 REG_ADRENO_REGISTER_MAX,
103};
104
28struct adreno_rev { 105struct adreno_rev {
29 uint8_t core; 106 uint8_t core;
30 uint8_t major; 107 uint8_t major;
@@ -76,6 +153,13 @@ struct adreno_gpu {
76 struct adreno_rbmemptrs *memptrs; 153 struct adreno_rbmemptrs *memptrs;
77 struct drm_gem_object *memptrs_bo; 154 struct drm_gem_object *memptrs_bo;
78 uint32_t memptrs_iova; 155 uint32_t memptrs_iova;
156
157 /*
158 * Register offsets are different between some GPUs.
159 * GPU specific offsets will be exported by GPU specific
160 * code (a3xx_gpu.c) and stored in this common location.
161 */
162 const unsigned int *reg_offsets;
79}; 163};
80#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base) 164#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
81 165
@@ -128,6 +212,16 @@ static inline bool adreno_is_a330v2(struct adreno_gpu *gpu)
128 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0); 212 return adreno_is_a330(gpu) && (gpu->rev.patchid > 0);
129} 213}
130 214
215static inline bool adreno_is_a4xx(struct adreno_gpu *gpu)
216{
217 return (gpu->revn >= 400) && (gpu->revn < 500);
218}
219
220static inline int adreno_is_a420(struct adreno_gpu *gpu)
221{
222 return gpu->revn == 420;
223}
224
131int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value); 225int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
132int adreno_hw_init(struct msm_gpu *gpu); 226int adreno_hw_init(struct msm_gpu *gpu);
133uint32_t adreno_last_fence(struct msm_gpu *gpu); 227uint32_t adreno_last_fence(struct msm_gpu *gpu);
@@ -171,5 +265,37 @@ OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
171 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8)); 265 OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
172} 266}
173 267
268/*
269 * adreno_checkreg_off() - Checks the validity of a register enum
270 * @gpu: Pointer to struct adreno_gpu
271 * @offset_name: The register enum that is checked
272 */
273static inline bool adreno_reg_check(struct adreno_gpu *gpu,
274 enum adreno_regs offset_name)
275{
276 if (offset_name >= REG_ADRENO_REGISTER_MAX ||
277 !gpu->reg_offsets[offset_name]) {
278 BUG();
279 }
280 return true;
281}
282
283static inline u32 adreno_gpu_read(struct adreno_gpu *gpu,
284 enum adreno_regs offset_name)
285{
286 u32 reg = gpu->reg_offsets[offset_name];
287 u32 val = 0;
288 if(adreno_reg_check(gpu,offset_name))
289 val = gpu_read(&gpu->base, reg - 1);
290 return val;
291}
292
293static inline void adreno_gpu_write(struct adreno_gpu *gpu,
294 enum adreno_regs offset_name, u32 data)
295{
296 u32 reg = gpu->reg_offsets[offset_name];
297 if(adreno_reg_check(gpu, offset_name))
298 gpu_write(&gpu->base, reg - 1, data);
299}
174 300
175#endif /* __ADRENO_GPU_H__ */ 301#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index 6ef43f66c30a..6a75cee94d81 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -11,10 +11,10 @@ The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15) 11- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 364 bytes, from 2013-11-30 14:47:15)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30) 13- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2014-06-02 15:21:30)
14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 9859 bytes, from 2014-06-02 15:21:30) 14- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10551 bytes, from 2014-11-13 22:44:30)
15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14960 bytes, from 2014-07-27 17:22:13) 15- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 15053 bytes, from 2014-11-09 15:45:47)
16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 58020 bytes, from 2014-08-01 12:22:48) 16- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 63169 bytes, from 2014-11-13 22:44:18)
17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 41068 bytes, from 2014-08-01 12:22:48) 17- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 49097 bytes, from 2014-11-14 15:38:00)
18 18
19Copyright (C) 2013-2014 by the following authors: 19Copyright (C) 2013-2014 by the following authors:
20- Rob Clark <robdclark@gmail.com> (robclark) 20- Rob Clark <robdclark@gmail.com> (robclark)
@@ -157,6 +157,7 @@ enum adreno_pm4_type3_packets {
157 CP_IM_STORE = 44, 157 CP_IM_STORE = 44,
158 CP_SET_DRAW_INIT_FLAGS = 75, 158 CP_SET_DRAW_INIT_FLAGS = 75,
159 CP_SET_PROTECTED_MODE = 95, 159 CP_SET_PROTECTED_MODE = 95,
160 CP_BOOTSTRAP_UCODE = 111,
160 CP_LOAD_STATE = 48, 161 CP_LOAD_STATE = 48,
161 CP_COND_INDIRECT_BUFFER_PFE = 58, 162 CP_COND_INDIRECT_BUFFER_PFE = 58,
162 CP_COND_INDIRECT_BUFFER_PFD = 50, 163 CP_COND_INDIRECT_BUFFER_PFD = 50,
@@ -278,11 +279,11 @@ static inline uint32_t CP_DRAW_INDX_1_INDEX_SIZE(enum pc_di_index_size val)
278#define CP_DRAW_INDX_1_NOT_EOP 0x00001000 279#define CP_DRAW_INDX_1_NOT_EOP 0x00001000
279#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000 280#define CP_DRAW_INDX_1_SMALL_INDEX 0x00002000
280#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 281#define CP_DRAW_INDX_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
281#define CP_DRAW_INDX_1_NUM_INDICES__MASK 0xffff0000 282#define CP_DRAW_INDX_1_NUM_INSTANCES__MASK 0xff000000
282#define CP_DRAW_INDX_1_NUM_INDICES__SHIFT 16 283#define CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT 24
283static inline uint32_t CP_DRAW_INDX_1_NUM_INDICES(uint32_t val) 284static inline uint32_t CP_DRAW_INDX_1_NUM_INSTANCES(uint32_t val)
284{ 285{
285 return ((val) << CP_DRAW_INDX_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_1_NUM_INDICES__MASK; 286 return ((val) << CP_DRAW_INDX_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_1_NUM_INSTANCES__MASK;
286} 287}
287 288
288#define REG_CP_DRAW_INDX_2 0x00000002 289#define REG_CP_DRAW_INDX_2 0x00000002
@@ -293,20 +294,20 @@ static inline uint32_t CP_DRAW_INDX_2_NUM_INDICES(uint32_t val)
293 return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK; 294 return ((val) << CP_DRAW_INDX_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_NUM_INDICES__MASK;
294} 295}
295 296
296#define REG_CP_DRAW_INDX_2 0x00000002 297#define REG_CP_DRAW_INDX_3 0x00000003
297#define CP_DRAW_INDX_2_INDX_BASE__MASK 0xffffffff 298#define CP_DRAW_INDX_3_INDX_BASE__MASK 0xffffffff
298#define CP_DRAW_INDX_2_INDX_BASE__SHIFT 0 299#define CP_DRAW_INDX_3_INDX_BASE__SHIFT 0
299static inline uint32_t CP_DRAW_INDX_2_INDX_BASE(uint32_t val) 300static inline uint32_t CP_DRAW_INDX_3_INDX_BASE(uint32_t val)
300{ 301{
301 return ((val) << CP_DRAW_INDX_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_2_INDX_BASE__MASK; 302 return ((val) << CP_DRAW_INDX_3_INDX_BASE__SHIFT) & CP_DRAW_INDX_3_INDX_BASE__MASK;
302} 303}
303 304
304#define REG_CP_DRAW_INDX_2 0x00000002 305#define REG_CP_DRAW_INDX_4 0x00000004
305#define CP_DRAW_INDX_2_INDX_SIZE__MASK 0xffffffff 306#define CP_DRAW_INDX_4_INDX_SIZE__MASK 0xffffffff
306#define CP_DRAW_INDX_2_INDX_SIZE__SHIFT 0 307#define CP_DRAW_INDX_4_INDX_SIZE__SHIFT 0
307static inline uint32_t CP_DRAW_INDX_2_INDX_SIZE(uint32_t val) 308static inline uint32_t CP_DRAW_INDX_4_INDX_SIZE(uint32_t val)
308{ 309{
309 return ((val) << CP_DRAW_INDX_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_2_INDX_SIZE__MASK; 310 return ((val) << CP_DRAW_INDX_4_INDX_SIZE__SHIFT) & CP_DRAW_INDX_4_INDX_SIZE__MASK;
310} 311}
311 312
312#define REG_CP_DRAW_INDX_2_0 0x00000000 313#define REG_CP_DRAW_INDX_2_0 0x00000000
@@ -345,11 +346,11 @@ static inline uint32_t CP_DRAW_INDX_2_1_INDEX_SIZE(enum pc_di_index_size val)
345#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000 346#define CP_DRAW_INDX_2_1_NOT_EOP 0x00001000
346#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000 347#define CP_DRAW_INDX_2_1_SMALL_INDEX 0x00002000
347#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000 348#define CP_DRAW_INDX_2_1_PRE_DRAW_INITIATOR_ENABLE 0x00004000
348#define CP_DRAW_INDX_2_1_NUM_INDICES__MASK 0xffff0000 349#define CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK 0xff000000
349#define CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT 16 350#define CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT 24
350static inline uint32_t CP_DRAW_INDX_2_1_NUM_INDICES(uint32_t val) 351static inline uint32_t CP_DRAW_INDX_2_1_NUM_INSTANCES(uint32_t val)
351{ 352{
352 return ((val) << CP_DRAW_INDX_2_1_NUM_INDICES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INDICES__MASK; 353 return ((val) << CP_DRAW_INDX_2_1_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_2_1_NUM_INSTANCES__MASK;
353} 354}
354 355
355#define REG_CP_DRAW_INDX_2_2 0x00000002 356#define REG_CP_DRAW_INDX_2_2 0x00000002
@@ -388,11 +389,11 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum pc_di_index_size va
388#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000 389#define CP_DRAW_INDX_OFFSET_0_NOT_EOP 0x00001000
389#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000 390#define CP_DRAW_INDX_OFFSET_0_SMALL_INDEX 0x00002000
390#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000 391#define CP_DRAW_INDX_OFFSET_0_PRE_DRAW_INITIATOR_ENABLE 0x00004000
391#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK 0xffff0000 392#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK 0xffff0000
392#define CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT 16 393#define CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT 16
393static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INDICES(uint32_t val) 394static inline uint32_t CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES(uint32_t val)
394{ 395{
395 return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INDICES__MASK; 396 return ((val) << CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__SHIFT) & CP_DRAW_INDX_OFFSET_0_NUM_INSTANCES__MASK;
396} 397}
397 398
398#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001 399#define REG_CP_DRAW_INDX_OFFSET_1 0x00000001
@@ -405,20 +406,22 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_2_NUM_INDICES(uint32_t val)
405 return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK; 406 return ((val) << CP_DRAW_INDX_OFFSET_2_NUM_INDICES__SHIFT) & CP_DRAW_INDX_OFFSET_2_NUM_INDICES__MASK;
406} 407}
407 408
408#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 409#define REG_CP_DRAW_INDX_OFFSET_3 0x00000003
409#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK 0xffffffff 410
410#define CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT 0 411#define REG_CP_DRAW_INDX_OFFSET_4 0x00000004
411static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_BASE(uint32_t val) 412#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK 0xffffffff
413#define CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT 0
414static inline uint32_t CP_DRAW_INDX_OFFSET_4_INDX_BASE(uint32_t val)
412{ 415{
413 return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_BASE__MASK; 416 return ((val) << CP_DRAW_INDX_OFFSET_4_INDX_BASE__SHIFT) & CP_DRAW_INDX_OFFSET_4_INDX_BASE__MASK;
414} 417}
415 418
416#define REG_CP_DRAW_INDX_OFFSET_2 0x00000002 419#define REG_CP_DRAW_INDX_OFFSET_5 0x00000005
417#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK 0xffffffff 420#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK 0xffffffff
418#define CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT 0 421#define CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT 0
419static inline uint32_t CP_DRAW_INDX_OFFSET_2_INDX_SIZE(uint32_t val) 422static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
420{ 423{
421 return ((val) << CP_DRAW_INDX_OFFSET_2_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_2_INDX_SIZE__MASK; 424 return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
422} 425}
423 426
424#define REG_CP_SET_DRAW_STATE_0 0x00000000 427#define REG_CP_SET_DRAW_STATE_0 0x00000000
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index e965898dfda6..448438b759b4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index f2bdda957205..c102a7f074ac 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index e5b071ffd865..a900134bdf33 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 9d00dcba6959..062c68725376 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -15,6 +15,7 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/of_irq.h>
18#include "hdmi.h" 19#include "hdmi.h"
19 20
20void hdmi_set_mode(struct hdmi *hdmi, bool power_on) 21void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
@@ -39,7 +40,7 @@ void hdmi_set_mode(struct hdmi *hdmi, bool power_on)
39 power_on ? "Enable" : "Disable", ctrl); 40 power_on ? "Enable" : "Disable", ctrl);
40} 41}
41 42
42irqreturn_t hdmi_irq(int irq, void *dev_id) 43static irqreturn_t hdmi_irq(int irq, void *dev_id)
43{ 44{
44 struct hdmi *hdmi = dev_id; 45 struct hdmi *hdmi = dev_id;
45 46
@@ -54,9 +55,8 @@ irqreturn_t hdmi_irq(int irq, void *dev_id)
54 return IRQ_HANDLED; 55 return IRQ_HANDLED;
55} 56}
56 57
57void hdmi_destroy(struct kref *kref) 58static void hdmi_destroy(struct hdmi *hdmi)
58{ 59{
59 struct hdmi *hdmi = container_of(kref, struct hdmi, refcount);
60 struct hdmi_phy *phy = hdmi->phy; 60 struct hdmi_phy *phy = hdmi->phy;
61 61
62 if (phy) 62 if (phy)
@@ -68,37 +68,24 @@ void hdmi_destroy(struct kref *kref)
68 platform_set_drvdata(hdmi->pdev, NULL); 68 platform_set_drvdata(hdmi->pdev, NULL);
69} 69}
70 70
71/* initialize connector */ 71/* construct hdmi at bind/probe time, grab all the resources. If
72struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder) 72 * we are to EPROBE_DEFER we want to do it here, rather than later
73 * at modeset_init() time
74 */
75static struct hdmi *hdmi_init(struct platform_device *pdev)
73{ 76{
77 struct hdmi_platform_config *config = pdev->dev.platform_data;
74 struct hdmi *hdmi = NULL; 78 struct hdmi *hdmi = NULL;
75 struct msm_drm_private *priv = dev->dev_private;
76 struct platform_device *pdev = priv->hdmi_pdev;
77 struct hdmi_platform_config *config;
78 int i, ret; 79 int i, ret;
79 80
80 if (!pdev) { 81 hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
81 dev_err(dev->dev, "no hdmi device\n");
82 ret = -ENXIO;
83 goto fail;
84 }
85
86 config = pdev->dev.platform_data;
87
88 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
89 if (!hdmi) { 82 if (!hdmi) {
90 ret = -ENOMEM; 83 ret = -ENOMEM;
91 goto fail; 84 goto fail;
92 } 85 }
93 86
94 kref_init(&hdmi->refcount);
95
96 hdmi->dev = dev;
97 hdmi->pdev = pdev; 87 hdmi->pdev = pdev;
98 hdmi->config = config; 88 hdmi->config = config;
99 hdmi->encoder = encoder;
100
101 hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
102 89
103 /* not sure about which phy maps to which msm.. probably I miss some */ 90 /* not sure about which phy maps to which msm.. probably I miss some */
104 if (config->phy_init) 91 if (config->phy_init)
@@ -108,7 +95,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
108 95
109 if (IS_ERR(hdmi->phy)) { 96 if (IS_ERR(hdmi->phy)) {
110 ret = PTR_ERR(hdmi->phy); 97 ret = PTR_ERR(hdmi->phy);
111 dev_err(dev->dev, "failed to load phy: %d\n", ret); 98 dev_err(&pdev->dev, "failed to load phy: %d\n", ret);
112 hdmi->phy = NULL; 99 hdmi->phy = NULL;
113 goto fail; 100 goto fail;
114 } 101 }
@@ -127,7 +114,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
127 config->hpd_reg_names[i]); 114 config->hpd_reg_names[i]);
128 if (IS_ERR(reg)) { 115 if (IS_ERR(reg)) {
129 ret = PTR_ERR(reg); 116 ret = PTR_ERR(reg);
130 dev_err(dev->dev, "failed to get hpd regulator: %s (%d)\n", 117 dev_err(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
131 config->hpd_reg_names[i], ret); 118 config->hpd_reg_names[i], ret);
132 goto fail; 119 goto fail;
133 } 120 }
@@ -143,7 +130,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
143 config->pwr_reg_names[i]); 130 config->pwr_reg_names[i]);
144 if (IS_ERR(reg)) { 131 if (IS_ERR(reg)) {
145 ret = PTR_ERR(reg); 132 ret = PTR_ERR(reg);
146 dev_err(dev->dev, "failed to get pwr regulator: %s (%d)\n", 133 dev_err(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
147 config->pwr_reg_names[i], ret); 134 config->pwr_reg_names[i], ret);
148 goto fail; 135 goto fail;
149 } 136 }
@@ -158,7 +145,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
158 clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]); 145 clk = devm_clk_get(&pdev->dev, config->hpd_clk_names[i]);
159 if (IS_ERR(clk)) { 146 if (IS_ERR(clk)) {
160 ret = PTR_ERR(clk); 147 ret = PTR_ERR(clk);
161 dev_err(dev->dev, "failed to get hpd clk: %s (%d)\n", 148 dev_err(&pdev->dev, "failed to get hpd clk: %s (%d)\n",
162 config->hpd_clk_names[i], ret); 149 config->hpd_clk_names[i], ret);
163 goto fail; 150 goto fail;
164 } 151 }
@@ -173,7 +160,7 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
173 clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]); 160 clk = devm_clk_get(&pdev->dev, config->pwr_clk_names[i]);
174 if (IS_ERR(clk)) { 161 if (IS_ERR(clk)) {
175 ret = PTR_ERR(clk); 162 ret = PTR_ERR(clk);
176 dev_err(dev->dev, "failed to get pwr clk: %s (%d)\n", 163 dev_err(&pdev->dev, "failed to get pwr clk: %s (%d)\n",
177 config->pwr_clk_names[i], ret); 164 config->pwr_clk_names[i], ret);
178 goto fail; 165 goto fail;
179 } 166 }
@@ -184,11 +171,40 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
184 hdmi->i2c = hdmi_i2c_init(hdmi); 171 hdmi->i2c = hdmi_i2c_init(hdmi);
185 if (IS_ERR(hdmi->i2c)) { 172 if (IS_ERR(hdmi->i2c)) {
186 ret = PTR_ERR(hdmi->i2c); 173 ret = PTR_ERR(hdmi->i2c);
187 dev_err(dev->dev, "failed to get i2c: %d\n", ret); 174 dev_err(&pdev->dev, "failed to get i2c: %d\n", ret);
188 hdmi->i2c = NULL; 175 hdmi->i2c = NULL;
189 goto fail; 176 goto fail;
190 } 177 }
191 178
179 return hdmi;
180
181fail:
182 if (hdmi)
183 hdmi_destroy(hdmi);
184
185 return ERR_PTR(ret);
186}
187
188/* Second part of initialization, the drm/kms level modeset_init,
189 * constructs/initializes mode objects, etc, is called from master
190 * driver (not hdmi sub-device's probe/bind!)
191 *
192 * Any resource (regulator/clk/etc) which could be missing at boot
193 * should be handled in hdmi_init() so that failure happens from
194 * hdmi sub-device's probe.
195 */
196int hdmi_modeset_init(struct hdmi *hdmi,
197 struct drm_device *dev, struct drm_encoder *encoder)
198{
199 struct msm_drm_private *priv = dev->dev_private;
200 struct platform_device *pdev = hdmi->pdev;
201 int ret;
202
203 hdmi->dev = dev;
204 hdmi->encoder = encoder;
205
206 hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
207
192 hdmi->bridge = hdmi_bridge_init(hdmi); 208 hdmi->bridge = hdmi_bridge_init(hdmi);
193 if (IS_ERR(hdmi->bridge)) { 209 if (IS_ERR(hdmi->bridge)) {
194 ret = PTR_ERR(hdmi->bridge); 210 ret = PTR_ERR(hdmi->bridge);
@@ -205,22 +221,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
205 goto fail; 221 goto fail;
206 } 222 }
207 223
208 if (!config->shared_irq) { 224 hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
209 hdmi->irq = platform_get_irq(pdev, 0); 225 if (hdmi->irq < 0) {
210 if (hdmi->irq < 0) { 226 ret = hdmi->irq;
211 ret = hdmi->irq; 227 dev_err(dev->dev, "failed to get irq: %d\n", ret);
212 dev_err(dev->dev, "failed to get irq: %d\n", ret); 228 goto fail;
213 goto fail; 229 }
214 }
215 230
216 ret = devm_request_threaded_irq(&pdev->dev, hdmi->irq, 231 ret = devm_request_irq(&pdev->dev, hdmi->irq,
217 NULL, hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, 232 hdmi_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
218 "hdmi_isr", hdmi); 233 "hdmi_isr", hdmi);
219 if (ret < 0) { 234 if (ret < 0) {
220 dev_err(dev->dev, "failed to request IRQ%u: %d\n", 235 dev_err(dev->dev, "failed to request IRQ%u: %d\n",
221 hdmi->irq, ret); 236 hdmi->irq, ret);
222 goto fail; 237 goto fail;
223 }
224 } 238 }
225 239
226 encoder->bridge = hdmi->bridge; 240 encoder->bridge = hdmi->bridge;
@@ -230,19 +244,20 @@ struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder)
230 244
231 platform_set_drvdata(pdev, hdmi); 245 platform_set_drvdata(pdev, hdmi);
232 246
233 return hdmi; 247 return 0;
234 248
235fail: 249fail:
236 if (hdmi) { 250 /* bridge/connector are normally destroyed by drm: */
237 /* bridge/connector are normally destroyed by drm: */ 251 if (hdmi->bridge) {
238 if (hdmi->bridge) 252 hdmi->bridge->funcs->destroy(hdmi->bridge);
239 hdmi->bridge->funcs->destroy(hdmi->bridge); 253 hdmi->bridge = NULL;
240 if (hdmi->connector) 254 }
241 hdmi->connector->funcs->destroy(hdmi->connector); 255 if (hdmi->connector) {
242 hdmi_destroy(&hdmi->refcount); 256 hdmi->connector->funcs->destroy(hdmi->connector);
257 hdmi->connector = NULL;
243 } 258 }
244 259
245 return ERR_PTR(ret); 260 return ret;
246} 261}
247 262
248/* 263/*
@@ -251,13 +266,6 @@ fail:
251 266
252#include <linux/of_gpio.h> 267#include <linux/of_gpio.h>
253 268
254static void set_hdmi_pdev(struct drm_device *dev,
255 struct platform_device *pdev)
256{
257 struct msm_drm_private *priv = dev->dev_private;
258 priv->hdmi_pdev = pdev;
259}
260
261#ifdef CONFIG_OF 269#ifdef CONFIG_OF
262static int get_gpio(struct device *dev, struct device_node *of_node, const char *name) 270static int get_gpio(struct device *dev, struct device_node *of_node, const char *name)
263{ 271{
@@ -278,7 +286,10 @@ static int get_gpio(struct device *dev, struct device_node *of_node, const char
278 286
279static int hdmi_bind(struct device *dev, struct device *master, void *data) 287static int hdmi_bind(struct device *dev, struct device *master, void *data)
280{ 288{
289 struct drm_device *drm = dev_get_drvdata(master);
290 struct msm_drm_private *priv = drm->dev_private;
281 static struct hdmi_platform_config config = {}; 291 static struct hdmi_platform_config config = {};
292 struct hdmi *hdmi;
282#ifdef CONFIG_OF 293#ifdef CONFIG_OF
283 struct device_node *of_node = dev->of_node; 294 struct device_node *of_node = dev->of_node;
284 295
@@ -298,7 +309,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
298 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); 309 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
299 config.pwr_clk_names = pwr_clk_names; 310 config.pwr_clk_names = pwr_clk_names;
300 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); 311 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
301 config.shared_irq = true;
302 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) { 312 } else if (of_device_is_compatible(of_node, "qcom,hdmi-tx-8960")) {
303 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"}; 313 static const char *hpd_clk_names[] = {"core_clk", "master_iface_clk", "slave_iface_clk"};
304 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"}; 314 static const char *hpd_reg_names[] = {"core-vdda", "hdmi-mux"};
@@ -369,14 +379,22 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
369 } 379 }
370#endif 380#endif
371 dev->platform_data = &config; 381 dev->platform_data = &config;
372 set_hdmi_pdev(dev_get_drvdata(master), to_platform_device(dev)); 382 hdmi = hdmi_init(to_platform_device(dev));
383 if (IS_ERR(hdmi))
384 return PTR_ERR(hdmi);
385 priv->hdmi = hdmi;
373 return 0; 386 return 0;
374} 387}
375 388
376static void hdmi_unbind(struct device *dev, struct device *master, 389static void hdmi_unbind(struct device *dev, struct device *master,
377 void *data) 390 void *data)
378{ 391{
379 set_hdmi_pdev(dev_get_drvdata(master), NULL); 392 struct drm_device *drm = dev_get_drvdata(master);
393 struct msm_drm_private *priv = drm->dev_private;
394 if (priv->hdmi) {
395 hdmi_destroy(priv->hdmi);
396 priv->hdmi = NULL;
397 }
380} 398}
381 399
382static const struct component_ops hdmi_ops = { 400static const struct component_ops hdmi_ops = {
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index b981995410b5..43e654f751b7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -38,8 +38,6 @@ struct hdmi_audio {
38}; 38};
39 39
40struct hdmi { 40struct hdmi {
41 struct kref refcount;
42
43 struct drm_device *dev; 41 struct drm_device *dev;
44 struct platform_device *pdev; 42 struct platform_device *pdev;
45 43
@@ -97,13 +95,9 @@ struct hdmi_platform_config {
97 /* gpio's: */ 95 /* gpio's: */
98 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio; 96 int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
99 int mux_lpm_gpio; 97 int mux_lpm_gpio;
100
101 /* older devices had their own irq, mdp5+ it is shared w/ mdp: */
102 bool shared_irq;
103}; 98};
104 99
105void hdmi_set_mode(struct hdmi *hdmi, bool power_on); 100void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
106void hdmi_destroy(struct kref *kref);
107 101
108static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data) 102static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
109{ 103{
@@ -115,17 +109,6 @@ static inline u32 hdmi_read(struct hdmi *hdmi, u32 reg)
115 return msm_readl(hdmi->mmio + reg); 109 return msm_readl(hdmi->mmio + reg);
116} 110}
117 111
118static inline struct hdmi * hdmi_reference(struct hdmi *hdmi)
119{
120 kref_get(&hdmi->refcount);
121 return hdmi;
122}
123
124static inline void hdmi_unreference(struct hdmi *hdmi)
125{
126 kref_put(&hdmi->refcount, hdmi_destroy);
127}
128
129/* 112/*
130 * The phy appears to be different, for example between 8960 and 8x60, 113 * The phy appears to be different, for example between 8960 and 8x60,
131 * so split the phy related functions out and load the correct one at 114 * so split the phy related functions out and load the correct one at
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 76fd0cfc6558..5b0844befbab 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index f6cf745c249e..6902ad6da710 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -26,7 +26,6 @@ struct hdmi_bridge {
26static void hdmi_bridge_destroy(struct drm_bridge *bridge) 26static void hdmi_bridge_destroy(struct drm_bridge *bridge)
27{ 27{
28 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge); 28 struct hdmi_bridge *hdmi_bridge = to_hdmi_bridge(bridge);
29 hdmi_unreference(hdmi_bridge->hdmi);
30 drm_bridge_cleanup(bridge); 29 drm_bridge_cleanup(bridge);
31 kfree(hdmi_bridge); 30 kfree(hdmi_bridge);
32} 31}
@@ -218,7 +217,7 @@ struct drm_bridge *hdmi_bridge_init(struct hdmi *hdmi)
218 goto fail; 217 goto fail;
219 } 218 }
220 219
221 hdmi_bridge->hdmi = hdmi_reference(hdmi); 220 hdmi_bridge->hdmi = hdmi;
222 221
223 bridge = &hdmi_bridge->base; 222 bridge = &hdmi_bridge->base;
224 223
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 4aca2a3c667c..fbebb0405d76 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -330,8 +330,6 @@ static void hdmi_connector_destroy(struct drm_connector *connector)
330 drm_connector_unregister(connector); 330 drm_connector_unregister(connector);
331 drm_connector_cleanup(connector); 331 drm_connector_cleanup(connector);
332 332
333 hdmi_unreference(hdmi_connector->hdmi);
334
335 kfree(hdmi_connector); 333 kfree(hdmi_connector);
336} 334}
337 335
@@ -401,6 +399,9 @@ static const struct drm_connector_funcs hdmi_connector_funcs = {
401 .detect = hdmi_connector_detect, 399 .detect = hdmi_connector_detect,
402 .fill_modes = drm_helper_probe_single_connector_modes, 400 .fill_modes = drm_helper_probe_single_connector_modes,
403 .destroy = hdmi_connector_destroy, 401 .destroy = hdmi_connector_destroy,
402 .reset = drm_atomic_helper_connector_reset,
403 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
404 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
404}; 405};
405 406
406static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = { 407static const struct drm_connector_helper_funcs hdmi_connector_helper_funcs = {
@@ -422,7 +423,7 @@ struct drm_connector *hdmi_connector_init(struct hdmi *hdmi)
422 goto fail; 423 goto fail;
423 } 424 }
424 425
425 hdmi_connector->hdmi = hdmi_reference(hdmi); 426 hdmi_connector->hdmi = hdmi;
426 INIT_WORK(&hdmi_connector->hpd_work, hotplug_work); 427 INIT_WORK(&hdmi_connector->hpd_work, hotplug_work);
427 428
428 connector = &hdmi_connector->base; 429 connector = &hdmi_connector->base;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
index f408b69486a8..eeed006eed13 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8960.c
@@ -510,7 +510,7 @@ struct hdmi_phy *hdmi_phy_8960_init(struct hdmi *hdmi)
510 510
511#ifdef CONFIG_COMMON_CLK 511#ifdef CONFIG_COMMON_CLK
512 phy_8960->pll_hw.init = &pll_init; 512 phy_8960->pll_hw.init = &pll_init;
513 phy_8960->pll = devm_clk_register(hdmi->dev->dev, &phy_8960->pll_hw); 513 phy_8960->pll = devm_clk_register(&hdmi->pdev->dev, &phy_8960->pll_hw);
514 if (IS_ERR(phy_8960->pll)) { 514 if (IS_ERR(phy_8960->pll)) {
515 ret = PTR_ERR(phy_8960->pll); 515 ret = PTR_ERR(phy_8960->pll);
516 phy_8960->pll = NULL; 516 phy_8960->pll = NULL;
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index d53c29327df9..29bd796797de 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index 03c0bd9cd5b9..a4a7f8c7122a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -10,12 +10,12 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20457 bytes, from 2014-08-01 12:22:48) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2014-07-17 15:34:33) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-07-17 15:34:33) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-08-01 12:23:53) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 7d00f7fb5773..a7672e100d8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -25,8 +25,6 @@
25struct mdp4_crtc { 25struct mdp4_crtc {
26 struct drm_crtc base; 26 struct drm_crtc base;
27 char name[8]; 27 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id; 28 int id;
31 int ovlp; 29 int ovlp;
32 enum mdp4_dma dma; 30 enum mdp4_dma dma;
@@ -52,25 +50,11 @@ struct mdp4_crtc {
52 50
53 /* if there is a pending flip, these will be non-null: */ 51 /* if there is a pending flip, these will be non-null: */
54 struct drm_pending_vblank_event *event; 52 struct drm_pending_vblank_event *event;
55 struct msm_fence_cb pageflip_cb;
56 53
57#define PENDING_CURSOR 0x1 54#define PENDING_CURSOR 0x1
58#define PENDING_FLIP 0x2 55#define PENDING_FLIP 0x2
59 atomic_t pending; 56 atomic_t pending;
60 57
61 /* the fb that we logically (from PoV of KMS API) hold a ref
62 * to. Which we may not yet be scanning out (we may still
63 * be scanning out previous in case of page_flip while waiting
64 * for gpu rendering to complete:
65 */
66 struct drm_framebuffer *fb;
67
68 /* the fb that we currently hold a scanout ref to: */
69 struct drm_framebuffer *scanout_fb;
70
71 /* for unref'ing framebuffers after scanout completes: */
72 struct drm_flip_work unref_fb_work;
73
74 /* for unref'ing cursor bo's after scanout completes: */ 58 /* for unref'ing cursor bo's after scanout completes: */
75 struct drm_flip_work unref_cursor_work; 59 struct drm_flip_work unref_cursor_work;
76 60
@@ -97,15 +81,14 @@ static void crtc_flush(struct drm_crtc *crtc)
97{ 81{
98 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 82 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
99 struct mdp4_kms *mdp4_kms = get_kms(crtc); 83 struct mdp4_kms *mdp4_kms = get_kms(crtc);
100 uint32_t i, flush = 0; 84 struct drm_plane *plane;
85 uint32_t flush = 0;
101 86
102 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 87 drm_atomic_crtc_for_each_plane(plane, crtc) {
103 struct drm_plane *plane = mdp4_crtc->planes[i]; 88 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
104 if (plane) { 89 flush |= pipe2flush(pipe_id);
105 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
106 flush |= pipe2flush(pipe_id);
107 }
108 } 90 }
91
109 flush |= ovlp2flush(mdp4_crtc->ovlp); 92 flush |= ovlp2flush(mdp4_crtc->ovlp);
110 93
111 DBG("%s: flush=%08x", mdp4_crtc->name, flush); 94 DBG("%s: flush=%08x", mdp4_crtc->name, flush);
@@ -113,47 +96,6 @@ static void crtc_flush(struct drm_crtc *crtc)
113 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush); 96 mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
114} 97}
115 98
116static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
117{
118 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
119 struct drm_framebuffer *old_fb = mdp4_crtc->fb;
120
121 /* grab reference to incoming scanout fb: */
122 drm_framebuffer_reference(new_fb);
123 mdp4_crtc->base.primary->fb = new_fb;
124 mdp4_crtc->fb = new_fb;
125
126 if (old_fb)
127 drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
128}
129
130/* unlike update_fb(), take a ref to the new scanout fb *before* updating
131 * plane, then call this. Needed to ensure we don't unref the buffer that
132 * is actually still being scanned out.
133 *
134 * Note that this whole thing goes away with atomic.. since we can defer
135 * calling into driver until rendering is done.
136 */
137static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
138{
139 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
140
141 /* flush updates, to make sure hw is updated to new scanout fb,
142 * so that we can safely queue unref to current fb (ie. next
143 * vblank we know hw is done w/ previous scanout_fb).
144 */
145 crtc_flush(crtc);
146
147 if (mdp4_crtc->scanout_fb)
148 drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
149 mdp4_crtc->scanout_fb);
150
151 mdp4_crtc->scanout_fb = fb;
152
153 /* enable vblank to complete flip: */
154 request_pending(crtc, PENDING_FLIP);
155}
156
157/* if file!=NULL, this is preclose potential cancel-flip path */ 99/* if file!=NULL, this is preclose potential cancel-flip path */
158static void complete_flip(struct drm_crtc *crtc, struct drm_file *file) 100static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
159{ 101{
@@ -171,38 +113,13 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
171 */ 113 */
172 if (!file || (event->base.file_priv == file)) { 114 if (!file || (event->base.file_priv == file)) {
173 mdp4_crtc->event = NULL; 115 mdp4_crtc->event = NULL;
116 DBG("%s: send event: %p", mdp4_crtc->name, event);
174 drm_send_vblank_event(dev, mdp4_crtc->id, event); 117 drm_send_vblank_event(dev, mdp4_crtc->id, event);
175 } 118 }
176 } 119 }
177 spin_unlock_irqrestore(&dev->event_lock, flags); 120 spin_unlock_irqrestore(&dev->event_lock, flags);
178} 121}
179 122
180static void pageflip_cb(struct msm_fence_cb *cb)
181{
182 struct mdp4_crtc *mdp4_crtc =
183 container_of(cb, struct mdp4_crtc, pageflip_cb);
184 struct drm_crtc *crtc = &mdp4_crtc->base;
185 struct drm_framebuffer *fb = crtc->primary->fb;
186
187 if (!fb)
188 return;
189
190 drm_framebuffer_reference(fb);
191 mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
192 update_scanout(crtc, fb);
193}
194
195static void unref_fb_worker(struct drm_flip_work *work, void *val)
196{
197 struct mdp4_crtc *mdp4_crtc =
198 container_of(work, struct mdp4_crtc, unref_fb_work);
199 struct drm_device *dev = mdp4_crtc->base.dev;
200
201 mutex_lock(&dev->mode_config.mutex);
202 drm_framebuffer_unreference(val);
203 mutex_unlock(&dev->mode_config.mutex);
204}
205
206static void unref_cursor_worker(struct drm_flip_work *work, void *val) 123static void unref_cursor_worker(struct drm_flip_work *work, void *val)
207{ 124{
208 struct mdp4_crtc *mdp4_crtc = 125 struct mdp4_crtc *mdp4_crtc =
@@ -218,7 +135,6 @@ static void mdp4_crtc_destroy(struct drm_crtc *crtc)
218 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 135 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
219 136
220 drm_crtc_cleanup(crtc); 137 drm_crtc_cleanup(crtc);
221 drm_flip_work_cleanup(&mdp4_crtc->unref_fb_work);
222 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work); 138 drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
223 139
224 kfree(mdp4_crtc); 140 kfree(mdp4_crtc);
@@ -251,57 +167,70 @@ static bool mdp4_crtc_mode_fixup(struct drm_crtc *crtc,
251 return true; 167 return true;
252} 168}
253 169
254static void blend_setup(struct drm_crtc *crtc) 170/* statically (for now) map planes to mixer stage (z-order): */
171static const int idxs[] = {
172 [VG1] = 1,
173 [VG2] = 2,
174 [RGB1] = 0,
175 [RGB2] = 0,
176 [RGB3] = 0,
177 [VG3] = 3,
178 [VG4] = 4,
179
180};
181
182/* setup mixer config, for which we need to consider all crtc's and
183 * the planes attached to them
184 *
185 * TODO may possibly need some extra locking here
186 */
187static void setup_mixer(struct mdp4_kms *mdp4_kms)
255{ 188{
256 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 189 struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
257 struct mdp4_kms *mdp4_kms = get_kms(crtc); 190 struct drm_crtc *crtc;
258 int i, ovlp = mdp4_crtc->ovlp;
259 uint32_t mixer_cfg = 0; 191 uint32_t mixer_cfg = 0;
260 static const enum mdp_mixer_stage_id stages[] = { 192 static const enum mdp_mixer_stage_id stages[] = {
261 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3, 193 STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
262 }; 194 };
263 /* statically (for now) map planes to mixer stage (z-order): */
264 static const int idxs[] = {
265 [VG1] = 1,
266 [VG2] = 2,
267 [RGB1] = 0,
268 [RGB2] = 0,
269 [RGB3] = 0,
270 [VG3] = 3,
271 [VG4] = 4,
272 195
273 }; 196 list_for_each_entry(crtc, &config->crtc_list, head) {
274 bool alpha[4]= { false, false, false, false }; 197 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
198 struct drm_plane *plane;
275 199
276 /* Don't rely on value read back from hw, but instead use our 200 drm_atomic_crtc_for_each_plane(plane, crtc) {
277 * own shadowed value. Possibly disable/reenable looses the 201 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
278 * previous value and goes back to power-on default? 202 int idx = idxs[pipe_id];
279 */ 203 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
280 mixer_cfg = mdp4_kms->mixer_cfg; 204 pipe_id, stages[idx]);
205 }
206 }
207
208 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
209}
210
211static void blend_setup(struct drm_crtc *crtc)
212{
213 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
214 struct mdp4_kms *mdp4_kms = get_kms(crtc);
215 struct drm_plane *plane;
216 int i, ovlp = mdp4_crtc->ovlp;
217 bool alpha[4]= { false, false, false, false };
281 218
282 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0); 219 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
283 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0); 220 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
284 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0); 221 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
285 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0); 222 mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
286 223
287 for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) { 224 drm_atomic_crtc_for_each_plane(plane, crtc) {
288 struct drm_plane *plane = mdp4_crtc->planes[i]; 225 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
289 if (plane) { 226 int idx = idxs[pipe_id];
290 enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane); 227 if (idx > 0) {
291 int idx = idxs[pipe_id]; 228 const struct mdp_format *format =
292 if (idx > 0) {
293 const struct mdp_format *format =
294 to_mdp_format(msm_framebuffer_format(plane->fb)); 229 to_mdp_format(msm_framebuffer_format(plane->fb));
295 alpha[idx-1] = format->alpha_enable; 230 alpha[idx-1] = format->alpha_enable;
296 }
297 mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
298 pipe_id, stages[idx]);
299 } 231 }
300 } 232 }
301 233
302 /* this shouldn't happen.. and seems to cause underflow: */
303 WARN_ON(!mixer_cfg);
304
305 for (i = 0; i < 4; i++) { 234 for (i = 0; i < 4; i++) {
306 uint32_t op; 235 uint32_t op;
307 236
@@ -324,22 +253,21 @@ static void blend_setup(struct drm_crtc *crtc)
324 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0); 253 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
325 } 254 }
326 255
327 mdp4_kms->mixer_cfg = mixer_cfg; 256 setup_mixer(mdp4_kms);
328 mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
329} 257}
330 258
331static int mdp4_crtc_mode_set(struct drm_crtc *crtc, 259static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
332 struct drm_display_mode *mode,
333 struct drm_display_mode *adjusted_mode,
334 int x, int y,
335 struct drm_framebuffer *old_fb)
336{ 260{
337 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 261 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
338 struct mdp4_kms *mdp4_kms = get_kms(crtc); 262 struct mdp4_kms *mdp4_kms = get_kms(crtc);
339 enum mdp4_dma dma = mdp4_crtc->dma; 263 enum mdp4_dma dma = mdp4_crtc->dma;
340 int ret, ovlp = mdp4_crtc->ovlp; 264 int ovlp = mdp4_crtc->ovlp;
265 struct drm_display_mode *mode;
266
267 if (WARN_ON(!crtc->state))
268 return;
341 269
342 mode = adjusted_mode; 270 mode = &crtc->state->adjusted_mode;
343 271
344 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 272 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
345 mdp4_crtc->name, mode->base.id, mode->name, 273 mdp4_crtc->name, mode->base.id, mode->name,
@@ -350,28 +278,13 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
350 mode->vsync_end, mode->vtotal, 278 mode->vsync_end, mode->vtotal,
351 mode->type, mode->flags); 279 mode->type, mode->flags);
352 280
353 /* grab extra ref for update_scanout() */
354 drm_framebuffer_reference(crtc->primary->fb);
355
356 ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->primary->fb,
357 0, 0, mode->hdisplay, mode->vdisplay,
358 x << 16, y << 16,
359 mode->hdisplay << 16, mode->vdisplay << 16);
360 if (ret) {
361 drm_framebuffer_unreference(crtc->primary->fb);
362 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
363 mdp4_crtc->name, ret);
364 return ret;
365 }
366
367 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma), 281 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
368 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) | 282 MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
369 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay)); 283 MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
370 284
371 /* take data from pipe: */ 285 /* take data from pipe: */
372 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0); 286 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
373 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 287 mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
374 crtc->primary->fb->pitches[0]);
375 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma), 288 mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
376 MDP4_DMA_DST_SIZE_WIDTH(0) | 289 MDP4_DMA_DST_SIZE_WIDTH(0) |
377 MDP4_DMA_DST_SIZE_HEIGHT(0)); 290 MDP4_DMA_DST_SIZE_HEIGHT(0));
@@ -380,8 +293,7 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
380 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp), 293 mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
381 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) | 294 MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
382 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay)); 295 MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
383 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 296 mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
384 crtc->primary->fb->pitches[0]);
385 297
386 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1); 298 mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
387 299
@@ -390,11 +302,6 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
390 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000); 302 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
391 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000); 303 mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
392 } 304 }
393
394 update_fb(crtc, crtc->primary->fb);
395 update_scanout(crtc, crtc->primary->fb);
396
397 return 0;
398} 305}
399 306
400static void mdp4_crtc_prepare(struct drm_crtc *crtc) 307static void mdp4_crtc_prepare(struct drm_crtc *crtc)
@@ -416,60 +323,51 @@ static void mdp4_crtc_commit(struct drm_crtc *crtc)
416 drm_crtc_vblank_put(crtc); 323 drm_crtc_vblank_put(crtc);
417} 324}
418 325
419static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 326static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
420 struct drm_framebuffer *old_fb) 327{
328}
329
330static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
331 struct drm_crtc_state *state)
421{ 332{
422 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
423 struct drm_plane *plane = mdp4_crtc->plane; 334 struct drm_device *dev = crtc->dev;
424 struct drm_display_mode *mode = &crtc->mode;
425 int ret;
426 335
427 /* grab extra ref for update_scanout() */ 336 DBG("%s: check", mdp4_crtc->name);
428 drm_framebuffer_reference(crtc->primary->fb);
429 337
430 ret = mdp4_plane_mode_set(plane, crtc, crtc->primary->fb, 338 if (mdp4_crtc->event) {
431 0, 0, mode->hdisplay, mode->vdisplay, 339 dev_err(dev->dev, "already pending flip!\n");
432 x << 16, y << 16, 340 return -EBUSY;
433 mode->hdisplay << 16, mode->vdisplay << 16);
434 if (ret) {
435 drm_framebuffer_unreference(crtc->primary->fb);
436 return ret;
437 } 341 }
438 342
439 update_fb(crtc, crtc->primary->fb); 343 // TODO anything else to check?
440 update_scanout(crtc, crtc->primary->fb);
441 344
442 return 0; 345 return 0;
443} 346}
444 347
445static void mdp4_crtc_load_lut(struct drm_crtc *crtc) 348static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc)
446{ 349{
350 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
351 DBG("%s: begin", mdp4_crtc->name);
447} 352}
448 353
449static int mdp4_crtc_page_flip(struct drm_crtc *crtc, 354static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
450 struct drm_framebuffer *new_fb,
451 struct drm_pending_vblank_event *event,
452 uint32_t page_flip_flags)
453{ 355{
454 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 356 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
455 struct drm_device *dev = crtc->dev; 357 struct drm_device *dev = crtc->dev;
456 struct drm_gem_object *obj;
457 unsigned long flags; 358 unsigned long flags;
458 359
459 if (mdp4_crtc->event) { 360 DBG("%s: flush", mdp4_crtc->name);
460 dev_err(dev->dev, "already pending flip!\n");
461 return -EBUSY;
462 }
463 361
464 obj = msm_framebuffer_bo(new_fb, 0); 362 WARN_ON(mdp4_crtc->event);
465 363
466 spin_lock_irqsave(&dev->event_lock, flags); 364 spin_lock_irqsave(&dev->event_lock, flags);
467 mdp4_crtc->event = event; 365 mdp4_crtc->event = crtc->state->event;
468 spin_unlock_irqrestore(&dev->event_lock, flags); 366 spin_unlock_irqrestore(&dev->event_lock, flags);
469 367
470 update_fb(crtc, new_fb); 368 blend_setup(crtc);
471 369 crtc_flush(crtc);
472 return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb); 370 request_pending(crtc, PENDING_FLIP);
473} 371}
474 372
475static int mdp4_crtc_set_property(struct drm_crtc *crtc, 373static int mdp4_crtc_set_property(struct drm_crtc *crtc,
@@ -607,22 +505,29 @@ static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
607} 505}
608 506
609static const struct drm_crtc_funcs mdp4_crtc_funcs = { 507static const struct drm_crtc_funcs mdp4_crtc_funcs = {
610 .set_config = drm_crtc_helper_set_config, 508 .set_config = drm_atomic_helper_set_config,
611 .destroy = mdp4_crtc_destroy, 509 .destroy = mdp4_crtc_destroy,
612 .page_flip = mdp4_crtc_page_flip, 510 .page_flip = drm_atomic_helper_page_flip,
613 .set_property = mdp4_crtc_set_property, 511 .set_property = mdp4_crtc_set_property,
614 .cursor_set = mdp4_crtc_cursor_set, 512 .cursor_set = mdp4_crtc_cursor_set,
615 .cursor_move = mdp4_crtc_cursor_move, 513 .cursor_move = mdp4_crtc_cursor_move,
514 .reset = drm_atomic_helper_crtc_reset,
515 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
516 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
616}; 517};
617 518
618static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = { 519static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
619 .dpms = mdp4_crtc_dpms, 520 .dpms = mdp4_crtc_dpms,
620 .mode_fixup = mdp4_crtc_mode_fixup, 521 .mode_fixup = mdp4_crtc_mode_fixup,
621 .mode_set = mdp4_crtc_mode_set, 522 .mode_set_nofb = mdp4_crtc_mode_set_nofb,
523 .mode_set = drm_helper_crtc_mode_set,
524 .mode_set_base = drm_helper_crtc_mode_set_base,
622 .prepare = mdp4_crtc_prepare, 525 .prepare = mdp4_crtc_prepare,
623 .commit = mdp4_crtc_commit, 526 .commit = mdp4_crtc_commit,
624 .mode_set_base = mdp4_crtc_mode_set_base,
625 .load_lut = mdp4_crtc_load_lut, 527 .load_lut = mdp4_crtc_load_lut,
528 .atomic_check = mdp4_crtc_atomic_check,
529 .atomic_begin = mdp4_crtc_atomic_begin,
530 .atomic_flush = mdp4_crtc_atomic_flush,
626}; 531};
627 532
628static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 533static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -638,7 +543,6 @@ static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
638 543
639 if (pending & PENDING_FLIP) { 544 if (pending & PENDING_FLIP) {
640 complete_flip(crtc, NULL); 545 complete_flip(crtc, NULL);
641 drm_flip_work_commit(&mdp4_crtc->unref_fb_work, priv->wq);
642 } 546 }
643 547
644 if (pending & PENDING_CURSOR) { 548 if (pending & PENDING_CURSOR) {
@@ -663,7 +567,8 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
663 567
664void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file) 568void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
665{ 569{
666 DBG("cancel: %p", file); 570 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
571 DBG("%s: cancel: %p", mdp4_crtc->name, file);
667 complete_flip(crtc, file); 572 complete_flip(crtc, file);
668} 573}
669 574
@@ -717,35 +622,6 @@ void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
717 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel); 622 mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
718} 623}
719 624
720static void set_attach(struct drm_crtc *crtc, enum mdp4_pipe pipe_id,
721 struct drm_plane *plane)
722{
723 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
724
725 BUG_ON(pipe_id >= ARRAY_SIZE(mdp4_crtc->planes));
726
727 if (mdp4_crtc->planes[pipe_id] == plane)
728 return;
729
730 mdp4_crtc->planes[pipe_id] = plane;
731 blend_setup(crtc);
732 if (mdp4_crtc->enabled && (plane != mdp4_crtc->plane))
733 crtc_flush(crtc);
734}
735
736void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
737{
738 set_attach(crtc, mdp4_plane_pipe(plane), plane);
739}
740
741void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
742{
743 /* don't actually detatch our primary plane: */
744 if (to_mdp4_crtc(crtc)->plane == plane)
745 return;
746 set_attach(crtc, mdp4_plane_pipe(plane), NULL);
747}
748
749static const char *dma_names[] = { 625static const char *dma_names[] = {
750 "DMA_P", "DMA_S", "DMA_E", 626 "DMA_P", "DMA_S", "DMA_E",
751}; 627};
@@ -757,17 +633,13 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
757{ 633{
758 struct drm_crtc *crtc = NULL; 634 struct drm_crtc *crtc = NULL;
759 struct mdp4_crtc *mdp4_crtc; 635 struct mdp4_crtc *mdp4_crtc;
760 int ret;
761 636
762 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL); 637 mdp4_crtc = kzalloc(sizeof(*mdp4_crtc), GFP_KERNEL);
763 if (!mdp4_crtc) { 638 if (!mdp4_crtc)
764 ret = -ENOMEM; 639 return ERR_PTR(-ENOMEM);
765 goto fail;
766 }
767 640
768 crtc = &mdp4_crtc->base; 641 crtc = &mdp4_crtc->base;
769 642
770 mdp4_crtc->plane = plane;
771 mdp4_crtc->id = id; 643 mdp4_crtc->id = id;
772 644
773 mdp4_crtc->ovlp = ovlp_id; 645 mdp4_crtc->ovlp = ovlp_id;
@@ -784,26 +656,14 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
784 656
785 spin_lock_init(&mdp4_crtc->cursor.lock); 657 spin_lock_init(&mdp4_crtc->cursor.lock);
786 658
787 ret = drm_flip_work_init(&mdp4_crtc->unref_fb_work, 16, 659 drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
788 "unref fb", unref_fb_worker);
789 if (ret)
790 goto fail;
791
792 ret = drm_flip_work_init(&mdp4_crtc->unref_cursor_work, 64,
793 "unref cursor", unref_cursor_worker); 660 "unref cursor", unref_cursor_worker);
794 661
795 INIT_FENCE_CB(&mdp4_crtc->pageflip_cb, pageflip_cb);
796
797 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs); 662 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp4_crtc_funcs);
798 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs); 663 drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
664 plane->crtc = crtc;
799 665
800 mdp4_plane_install_properties(mdp4_crtc->plane, &crtc->base); 666 mdp4_plane_install_properties(plane, &crtc->base);
801 667
802 return crtc; 668 return crtc;
803
804fail:
805 if (crtc)
806 mdp4_crtc_destroy(crtc);
807
808 return ERR_PTR(ret);
809} 669}
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 79d804e61cc4..a62109e4ae0d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -228,7 +228,6 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
228 struct drm_encoder *encoder; 228 struct drm_encoder *encoder;
229 struct drm_connector *connector; 229 struct drm_connector *connector;
230 struct drm_panel *panel; 230 struct drm_panel *panel;
231 struct hdmi *hdmi;
232 int ret; 231 int ret;
233 232
234 /* construct non-private planes: */ 233 /* construct non-private planes: */
@@ -326,11 +325,13 @@ static int modeset_init(struct mdp4_kms *mdp4_kms)
326 priv->crtcs[priv->num_crtcs++] = crtc; 325 priv->crtcs[priv->num_crtcs++] = crtc;
327 priv->encoders[priv->num_encoders++] = encoder; 326 priv->encoders[priv->num_encoders++] = encoder;
328 327
329 hdmi = hdmi_init(dev, encoder); 328 if (priv->hdmi) {
330 if (IS_ERR(hdmi)) { 329 /* Construct bridge/connector for HDMI: */
331 ret = PTR_ERR(hdmi); 330 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
332 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 331 if (ret) {
333 goto fail; 332 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
333 goto fail;
334 }
334 } 335 }
335 336
336 return 0; 337 return 0;
@@ -381,6 +382,10 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
381 if (IS_ERR(mdp4_kms->dsi_pll_vddio)) 382 if (IS_ERR(mdp4_kms->dsi_pll_vddio))
382 mdp4_kms->dsi_pll_vddio = NULL; 383 mdp4_kms->dsi_pll_vddio = NULL;
383 384
385 /* NOTE: driver for this regulator still missing upstream.. use
386 * _get_exclusive() and ignore the error if it does not exist
387 * (and hope that the bootloader left it on for us)
388 */
384 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); 389 mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd");
385 if (IS_ERR(mdp4_kms->vdd)) 390 if (IS_ERR(mdp4_kms->vdd))
386 mdp4_kms->vdd = NULL; 391 mdp4_kms->vdd = NULL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 9ff6e7ccfe90..cbd77bc626d5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -32,13 +32,6 @@ struct mdp4_kms {
32 32
33 int rev; 33 int rev;
34 34
35 /* Shadow value for MDP4_LAYERMIXER_IN_CFG.. since setup for all
36 * crtcs/encoders is in one shared register, we need to update it
37 * via read/modify/write. But to avoid getting confused by power-
38 * on-default values after resume, use this shadow value instead:
39 */
40 uint32_t mixer_cfg;
41
42 /* mapper-id used to request GEM buffer mapped for scanout: */ 35 /* mapper-id used to request GEM buffer mapped for scanout: */
43 int id; 36 int id;
44 37
@@ -194,14 +187,6 @@ uint32_t mdp4_get_formats(enum mdp4_pipe pipe_id, uint32_t *pixel_formats,
194 187
195void mdp4_plane_install_properties(struct drm_plane *plane, 188void mdp4_plane_install_properties(struct drm_plane *plane,
196 struct drm_mode_object *obj); 189 struct drm_mode_object *obj);
197void mdp4_plane_set_scanout(struct drm_plane *plane,
198 struct drm_framebuffer *fb);
199int mdp4_plane_mode_set(struct drm_plane *plane,
200 struct drm_crtc *crtc, struct drm_framebuffer *fb,
201 int crtc_x, int crtc_y,
202 unsigned int crtc_w, unsigned int crtc_h,
203 uint32_t src_x, uint32_t src_y,
204 uint32_t src_w, uint32_t src_h);
205enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane); 190enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane);
206struct drm_plane *mdp4_plane_init(struct drm_device *dev, 191struct drm_plane *mdp4_plane_init(struct drm_device *dev,
207 enum mdp4_pipe pipe_id, bool private_plane); 192 enum mdp4_pipe pipe_id, bool private_plane);
@@ -210,8 +195,6 @@ uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc);
210void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 195void mdp4_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
211void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config); 196void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config);
212void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer); 197void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer);
213void mdp4_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
214void mdp4_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
215struct drm_crtc *mdp4_crtc_init(struct drm_device *dev, 198struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
216 struct drm_plane *plane, int id, int ovlp_id, 199 struct drm_plane *plane, int id, int ovlp_id,
217 enum mdp4_dma dma_id); 200 enum mdp4_dma dma_id);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
index 310034688c15..4ddc28e1275b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_lvds_connector.c
@@ -98,6 +98,9 @@ static const struct drm_connector_funcs mdp4_lvds_connector_funcs = {
98 .detect = mdp4_lvds_connector_detect, 98 .detect = mdp4_lvds_connector_detect,
99 .fill_modes = drm_helper_probe_single_connector_modes, 99 .fill_modes = drm_helper_probe_single_connector_modes,
100 .destroy = mdp4_lvds_connector_destroy, 100 .destroy = mdp4_lvds_connector_destroy,
101 .reset = drm_atomic_helper_connector_reset,
102 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
103 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
101}; 104};
102 105
103static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = { 106static const struct drm_connector_helper_funcs mdp4_lvds_connector_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 66f33dba1ebb..1e5ebe83647d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -31,47 +31,26 @@ struct mdp4_plane {
31}; 31};
32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base) 32#define to_mdp4_plane(x) container_of(x, struct mdp4_plane, base)
33 33
34static struct mdp4_kms *get_kms(struct drm_plane *plane) 34static void mdp4_plane_set_scanout(struct drm_plane *plane,
35{ 35 struct drm_framebuffer *fb);
36 struct msm_drm_private *priv = plane->dev->dev_private; 36static int mdp4_plane_mode_set(struct drm_plane *plane,
37 return to_mdp4_kms(to_mdp_kms(priv->kms));
38}
39
40static int mdp4_plane_update(struct drm_plane *plane,
41 struct drm_crtc *crtc, struct drm_framebuffer *fb, 37 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y, 38 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h, 39 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y, 40 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h) 41 uint32_t src_w, uint32_t src_h);
46{
47 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
48
49 mdp4_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp4_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59}
60 42
61static int mdp4_plane_disable(struct drm_plane *plane) 43static struct mdp4_kms *get_kms(struct drm_plane *plane)
62{ 44{
63 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 45 struct msm_drm_private *priv = plane->dev->dev_private;
64 DBG("%s: disable", mdp4_plane->name); 46 return to_mdp4_kms(to_mdp_kms(priv->kms));
65 if (plane->crtc)
66 mdp4_crtc_detach(plane->crtc, plane);
67 return 0;
68} 47}
69 48
70static void mdp4_plane_destroy(struct drm_plane *plane) 49static void mdp4_plane_destroy(struct drm_plane *plane)
71{ 50{
72 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 51 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
73 52
74 mdp4_plane_disable(plane); 53 drm_plane_helper_disable(plane);
75 drm_plane_cleanup(plane); 54 drm_plane_cleanup(plane);
76 55
77 kfree(mdp4_plane); 56 kfree(mdp4_plane);
@@ -92,19 +71,75 @@ int mdp4_plane_set_property(struct drm_plane *plane,
92} 71}
93 72
94static const struct drm_plane_funcs mdp4_plane_funcs = { 73static const struct drm_plane_funcs mdp4_plane_funcs = {
95 .update_plane = mdp4_plane_update, 74 .update_plane = drm_atomic_helper_update_plane,
96 .disable_plane = mdp4_plane_disable, 75 .disable_plane = drm_atomic_helper_disable_plane,
97 .destroy = mdp4_plane_destroy, 76 .destroy = mdp4_plane_destroy,
98 .set_property = mdp4_plane_set_property, 77 .set_property = mdp4_plane_set_property,
78 .reset = drm_atomic_helper_plane_reset,
79 .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
80 .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
99}; 81};
100 82
101void mdp4_plane_set_scanout(struct drm_plane *plane, 83static int mdp4_plane_prepare_fb(struct drm_plane *plane,
84 struct drm_framebuffer *fb)
85{
86 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
87 struct mdp4_kms *mdp4_kms = get_kms(plane);
88
89 DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
90 return msm_framebuffer_prepare(fb, mdp4_kms->id);
91}
92
93static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
94 struct drm_framebuffer *fb)
95{
96 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
97 struct mdp4_kms *mdp4_kms = get_kms(plane);
98
99 DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
100 msm_framebuffer_cleanup(fb, mdp4_kms->id);
101}
102
103
104static int mdp4_plane_atomic_check(struct drm_plane *plane,
105 struct drm_plane_state *state)
106{
107 return 0;
108}
109
110static void mdp4_plane_atomic_update(struct drm_plane *plane,
111 struct drm_plane_state *old_state)
112{
113 struct drm_plane_state *state = plane->state;
114 int ret;
115
116 ret = mdp4_plane_mode_set(plane,
117 state->crtc, state->fb,
118 state->crtc_x, state->crtc_y,
119 state->crtc_w, state->crtc_h,
120 state->src_x, state->src_y,
121 state->src_w, state->src_h);
122 /* atomic_check should have ensured that this doesn't fail */
123 WARN_ON(ret < 0);
124}
125
126static const struct drm_plane_helper_funcs mdp4_plane_helper_funcs = {
127 .prepare_fb = mdp4_plane_prepare_fb,
128 .cleanup_fb = mdp4_plane_cleanup_fb,
129 .atomic_check = mdp4_plane_atomic_check,
130 .atomic_update = mdp4_plane_atomic_update,
131};
132
133static void mdp4_plane_set_scanout(struct drm_plane *plane,
102 struct drm_framebuffer *fb) 134 struct drm_framebuffer *fb)
103{ 135{
104 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane); 136 struct mdp4_plane *mdp4_plane = to_mdp4_plane(plane);
105 struct mdp4_kms *mdp4_kms = get_kms(plane); 137 struct mdp4_kms *mdp4_kms = get_kms(plane);
106 enum mdp4_pipe pipe = mdp4_plane->pipe; 138 enum mdp4_pipe pipe = mdp4_plane->pipe;
107 uint32_t iova; 139 uint32_t iova = msm_framebuffer_iova(fb, mdp4_kms->id, 0);
140
141 DBG("%s: set_scanout: %08x (%u)", mdp4_plane->name,
142 iova, fb->pitches[0]);
108 143
109 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe), 144 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRC_STRIDE_A(pipe),
110 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 145 MDP4_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
@@ -114,7 +149,6 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
114 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) | 149 MDP4_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
115 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3])); 150 MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
116 151
117 msm_gem_get_iova(msm_framebuffer_bo(fb, 0), mdp4_kms->id, &iova);
118 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova); 152 mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe), iova);
119 153
120 plane->fb = fb; 154 plane->fb = fb;
@@ -122,7 +156,7 @@ void mdp4_plane_set_scanout(struct drm_plane *plane,
122 156
123#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000 157#define MDP4_VG_PHASE_STEP_DEFAULT 0x20000000
124 158
125int mdp4_plane_mode_set(struct drm_plane *plane, 159static int mdp4_plane_mode_set(struct drm_plane *plane,
126 struct drm_crtc *crtc, struct drm_framebuffer *fb, 160 struct drm_crtc *crtc, struct drm_framebuffer *fb,
127 int crtc_x, int crtc_y, 161 int crtc_x, int crtc_y,
128 unsigned int crtc_w, unsigned int crtc_h, 162 unsigned int crtc_w, unsigned int crtc_h,
@@ -137,6 +171,11 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
137 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 171 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
138 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 172 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
139 173
174 if (!(crtc && fb)) {
175 DBG("%s: disabled!", mdp4_plane->name);
176 return 0;
177 }
178
140 /* src values are in Q16 fixed point, convert to integer: */ 179 /* src values are in Q16 fixed point, convert to integer: */
141 src_x = src_x >> 16; 180 src_x = src_x >> 16;
142 src_y = src_y >> 16; 181 src_y = src_y >> 16;
@@ -197,9 +236,6 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
197 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step); 236 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEX_STEP(pipe), phasex_step);
198 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step); 237 mdp4_write(mdp4_kms, REG_MDP4_PIPE_PHASEY_STEP(pipe), phasey_step);
199 238
200 /* TODO detach from old crtc (if we had more than one) */
201 mdp4_crtc_attach(crtc, plane);
202
203 return 0; 239 return 0;
204} 240}
205 241
@@ -239,9 +275,12 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
239 ARRAY_SIZE(mdp4_plane->formats)); 275 ARRAY_SIZE(mdp4_plane->formats));
240 276
241 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 277 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
242 drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs, 278 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
243 mdp4_plane->formats, mdp4_plane->nformats, 279 mdp4_plane->formats, mdp4_plane->nformats, type);
244 type); 280 if (ret)
281 goto fail;
282
283 drm_plane_helper_add(plane, &mdp4_plane_helper_funcs);
245 284
246 mdp4_plane_install_properties(plane, &plane->base); 285 mdp4_plane_install_properties(plane, &plane->base);
247 286
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index 67f4f896ba8c..e87ef5512cb0 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -10,14 +10,14 @@ git clone https://github.com/freedreno/envytools.git
10The rules-ng-ng source files this header was generated from are: 10The rules-ng-ng source files this header was generated from are:
11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35) 11- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 647 bytes, from 2013-11-30 14:45:35)
12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27) 12- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2013-03-31 16:51:27)
13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 17996 bytes, from 2013-12-01 19:10:31) 13- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20136 bytes, from 2014-10-31 16:51:39)
14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1615 bytes, from 2013-11-30 15:00:52) 14- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 1940 bytes, from 2014-10-31 16:51:39)
15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 22517 bytes, from 2014-06-25 12:55:02) 15- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 23963 bytes, from 2014-10-31 16:51:46)
16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43) 16- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 11712 bytes, from 2013-08-17 17:13:43)
17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32) 17- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 344 bytes, from 2013-08-11 19:26:32)
18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1544 bytes, from 2013-08-16 19:17:05) 18- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2014-10-31 16:48:57)
19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12) 19- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2013-07-05 19:21:12)
20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-06-25 12:53:44) 20- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 23613 bytes, from 2014-07-17 15:33:30)
21 21
22Copyright (C) 2013-2014 by the following authors: 22Copyright (C) 2013-2014 by the following authors:
23- Rob Clark <robdclark@gmail.com> (robclark) 23- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
new file mode 100644
index 000000000000..b0a44310cf2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -0,0 +1,207 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_cfg.h"
16
17struct mdp5_cfg_handler {
18 int revision;
19 struct mdp5_cfg config;
20};
21
22/* mdp5_cfg must be exposed (used in mdp5.xml.h) */
23const struct mdp5_cfg_hw *mdp5_cfg = NULL;
24
25const struct mdp5_cfg_hw msm8x74_config = {
26 .name = "msm8x74",
27 .smp = {
28 .mmb_count = 22,
29 .mmb_size = 4096,
30 },
31 .ctl = {
32 .count = 5,
33 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
34 },
35 .pipe_vig = {
36 .count = 3,
37 .base = { 0x01200, 0x01600, 0x01a00 },
38 },
39 .pipe_rgb = {
40 .count = 3,
41 .base = { 0x01e00, 0x02200, 0x02600 },
42 },
43 .pipe_dma = {
44 .count = 2,
45 .base = { 0x02a00, 0x02e00 },
46 },
47 .lm = {
48 .count = 5,
49 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
50 .nb_stages = 5,
51 },
52 .dspp = {
53 .count = 3,
54 .base = { 0x04600, 0x04a00, 0x04e00 },
55 },
56 .ad = {
57 .count = 2,
58 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
59 },
60 .intf = {
61 .count = 4,
62 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
63 },
64 .max_clk = 200000000,
65};
66
67const struct mdp5_cfg_hw apq8084_config = {
68 .name = "apq8084",
69 .smp = {
70 .mmb_count = 44,
71 .mmb_size = 8192,
72 .reserved_state[0] = GENMASK(7, 0), /* first 8 MMBs */
73 .reserved[CID_RGB0] = 2,
74 .reserved[CID_RGB1] = 2,
75 .reserved[CID_RGB2] = 2,
76 .reserved[CID_RGB3] = 2,
77 },
78 .ctl = {
79 .count = 5,
80 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
81 },
82 .pipe_vig = {
83 .count = 4,
84 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
85 },
86 .pipe_rgb = {
87 .count = 4,
88 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
89 },
90 .pipe_dma = {
91 .count = 2,
92 .base = { 0x03200, 0x03600 },
93 },
94 .lm = {
95 .count = 6,
96 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
97 .nb_stages = 5,
98 },
99 .dspp = {
100 .count = 4,
101 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
102
103 },
104 .ad = {
105 .count = 3,
106 .base = { 0x13500, 0x13700, 0x13900 },
107 },
108 .intf = {
109 .count = 5,
110 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
111 },
112 .max_clk = 320000000,
113};
114
115static const struct mdp5_cfg_handler cfg_handlers[] = {
116 { .revision = 0, .config = { .hw = &msm8x74_config } },
117 { .revision = 2, .config = { .hw = &msm8x74_config } },
118 { .revision = 3, .config = { .hw = &apq8084_config } },
119};
120
121
122static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev);
123
124const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_handler)
125{
126 return cfg_handler->config.hw;
127}
128
129struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_handler)
130{
131 return &cfg_handler->config;
132}
133
134int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_handler)
135{
136 return cfg_handler->revision;
137}
138
139void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_handler)
140{
141 kfree(cfg_handler);
142}
143
144struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
145 uint32_t major, uint32_t minor)
146{
147 struct drm_device *dev = mdp5_kms->dev;
148 struct platform_device *pdev = dev->platformdev;
149 struct mdp5_cfg_handler *cfg_handler;
150 struct mdp5_cfg_platform *pconfig;
151 int i, ret = 0;
152
153 cfg_handler = kzalloc(sizeof(*cfg_handler), GFP_KERNEL);
154 if (unlikely(!cfg_handler)) {
155 ret = -ENOMEM;
156 goto fail;
157 }
158
159 if (major != 1) {
160 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
161 major, minor);
162 ret = -ENXIO;
163 goto fail;
164 }
165
166 /* only after mdp5_cfg global pointer's init can we access the hw */
167 for (i = 0; i < ARRAY_SIZE(cfg_handlers); i++) {
168 if (cfg_handlers[i].revision != minor)
169 continue;
170 mdp5_cfg = cfg_handlers[i].config.hw;
171
172 break;
173 }
174 if (unlikely(!mdp5_cfg)) {
175 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
176 major, minor);
177 ret = -ENXIO;
178 goto fail;
179 }
180
181 cfg_handler->revision = minor;
182 cfg_handler->config.hw = mdp5_cfg;
183
184 pconfig = mdp5_get_config(pdev);
185 memcpy(&cfg_handler->config.platform, pconfig, sizeof(*pconfig));
186
187 DBG("MDP5: %s hw config selected", mdp5_cfg->name);
188
189 return cfg_handler;
190
191fail:
192 if (cfg_handler)
193 mdp5_cfg_destroy(cfg_handler);
194
195 return NULL;
196}
197
198static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
199{
200 static struct mdp5_cfg_platform config = {};
201#ifdef CONFIG_OF
202 /* TODO */
203#endif
204 config.iommu = iommu_domain_alloc(&platform_bus_type);
205
206 return &config;
207}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
new file mode 100644
index 000000000000..dba4d52cceeb
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CFG_H__
15#define __MDP5_CFG_H__
16
17#include "msm_drv.h"
18
19/*
20 * mdp5_cfg
21 *
22 * This module configures the dynamic offsets used by mdp5.xml.h
23 * (initialized in mdp5_cfg.c)
24 */
25extern const struct mdp5_cfg_hw *mdp5_cfg;
26
27#define MAX_CTL 8
28#define MAX_BASES 8
29#define MAX_SMP_BLOCKS 44
30#define MAX_CLIENTS 32
31
32typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
33
34#define MDP5_SUB_BLOCK_DEFINITION \
35 int count; \
36 uint32_t base[MAX_BASES]
37
38struct mdp5_sub_block {
39 MDP5_SUB_BLOCK_DEFINITION;
40};
41
42struct mdp5_lm_block {
43 MDP5_SUB_BLOCK_DEFINITION;
44 uint32_t nb_stages; /* number of stages per blender */
45};
46
47struct mdp5_smp_block {
48 int mmb_count; /* number of SMP MMBs */
49 int mmb_size; /* MMB: size in bytes */
50 mdp5_smp_state_t reserved_state;/* SMP MMBs statically allocated */
51 int reserved[MAX_CLIENTS]; /* # of MMBs allocated per client */
52};
53
54struct mdp5_cfg_hw {
55 char *name;
56
57 struct mdp5_smp_block smp;
58 struct mdp5_sub_block ctl;
59 struct mdp5_sub_block pipe_vig;
60 struct mdp5_sub_block pipe_rgb;
61 struct mdp5_sub_block pipe_dma;
62 struct mdp5_lm_block lm;
63 struct mdp5_sub_block dspp;
64 struct mdp5_sub_block ad;
65 struct mdp5_sub_block intf;
66
67 uint32_t max_clk;
68};
69
70/* platform config data (ie. from DT, or pdata) */
71struct mdp5_cfg_platform {
72 struct iommu_domain *iommu;
73};
74
75struct mdp5_cfg {
76 const struct mdp5_cfg_hw *hw;
77 struct mdp5_cfg_platform platform;
78};
79
80struct mdp5_kms;
81struct mdp5_cfg_handler;
82
83const struct mdp5_cfg_hw *mdp5_cfg_get_hw_config(struct mdp5_cfg_handler *cfg_hnd);
84struct mdp5_cfg *mdp5_cfg_get_config(struct mdp5_cfg_handler *cfg_hnd);
85int mdp5_cfg_get_hw_rev(struct mdp5_cfg_handler *cfg_hnd);
86
87struct mdp5_cfg_handler *mdp5_cfg_init(struct mdp5_kms *mdp5_kms,
88 uint32_t major, uint32_t minor);
89void mdp5_cfg_destroy(struct mdp5_cfg_handler *cfg_hnd);
90
91#endif /* __MDP5_CFG_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index ebe2e60f3ab1..0e9a2e3a82d7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,43 +18,35 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#include <linux/sort.h>
20#include <drm/drm_mode.h> 22#include <drm/drm_mode.h>
21#include "drm_crtc.h" 23#include "drm_crtc.h"
22#include "drm_crtc_helper.h" 24#include "drm_crtc_helper.h"
23#include "drm_flip_work.h" 25#include "drm_flip_work.h"
24 26
27#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
25struct mdp5_crtc { 29struct mdp5_crtc {
26 struct drm_crtc base; 30 struct drm_crtc base;
27 char name[8]; 31 char name[8];
28 struct drm_plane *plane;
29 struct drm_plane *planes[8];
30 int id; 32 int id;
31 bool enabled; 33 bool enabled;
32 34
33 /* which mixer/encoder we route output to: */ 35 /* layer mixer used for this CRTC (+ its lock): */
34 int mixer; 36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
35 42
36 /* if there is a pending flip, these will be non-null: */ 43 /* if there is a pending flip, these will be non-null: */
37 struct drm_pending_vblank_event *event; 44 struct drm_pending_vblank_event *event;
38 struct msm_fence_cb pageflip_cb;
39 45
40#define PENDING_CURSOR 0x1 46#define PENDING_CURSOR 0x1
41#define PENDING_FLIP 0x2 47#define PENDING_FLIP 0x2
42 atomic_t pending; 48 atomic_t pending;
43 49
44 /* the fb that we logically (from PoV of KMS API) hold a ref
45 * to. Which we may not yet be scanning out (we may still
46 * be scanning out previous in case of page_flip while waiting
47 * for gpu rendering to complete:
48 */
49 struct drm_framebuffer *fb;
50
51 /* the fb that we currently hold a scanout ref to: */
52 struct drm_framebuffer *scanout_fb;
53
54 /* for unref'ing framebuffers after scanout completes: */
55 struct drm_flip_work unref_fb_work;
56
57 struct mdp_irq vblank; 50 struct mdp_irq vblank;
58 struct mdp_irq err; 51 struct mdp_irq err;
59}; 52};
@@ -73,67 +66,38 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending)
73 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank); 66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
74} 67}
75 68
76static void crtc_flush(struct drm_crtc *crtc) 69#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
77{
78 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
79 struct mdp5_kms *mdp5_kms = get_kms(crtc);
80 int id = mdp5_crtc->id;
81 uint32_t i, flush = 0;
82
83 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) {
84 struct drm_plane *plane = mdp5_crtc->planes[i];
85 if (plane) {
86 enum mdp5_pipe pipe = mdp5_plane_pipe(plane);
87 flush |= pipe2flush(pipe);
88 }
89 }
90 flush |= mixer2flush(mdp5_crtc->id);
91 flush |= MDP5_CTL_FLUSH_CTL;
92
93 DBG("%s: flush=%08x", mdp5_crtc->name, flush);
94
95 mdp5_write(mdp5_kms, REG_MDP5_CTL_FLUSH(id), flush);
96}
97 70
98static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb) 71static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
99{ 72{
100 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
101 struct drm_framebuffer *old_fb = mdp5_crtc->fb;
102
103 /* grab reference to incoming scanout fb: */
104 drm_framebuffer_reference(new_fb);
105 mdp5_crtc->base.primary->fb = new_fb;
106 mdp5_crtc->fb = new_fb;
107 74
108 if (old_fb) 75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
109 drm_flip_work_queue(&mdp5_crtc->unref_fb_work, old_fb); 76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
110} 77}
111 78
112/* unlike update_fb(), take a ref to the new scanout fb *before* updating 79/*
113 * plane, then call this. Needed to ensure we don't unref the buffer that 80 * flush updates, to make sure hw is updated to new scanout fb,
114 * is actually still being scanned out. 81 * so that we can safely queue unref to current fb (ie. next
115 * 82 * vblank we know hw is done w/ previous scanout_fb).
116 * Note that this whole thing goes away with atomic.. since we can defer
117 * calling into driver until rendering is done.
118 */ 83 */
119static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb) 84static void crtc_flush_all(struct drm_crtc *crtc)
120{ 85{
121 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 struct drm_plane *plane;
88 uint32_t flush_mask = 0;
122 89
123 /* flush updates, to make sure hw is updated to new scanout fb, 90 /* we could have already released CTL in the disable path: */
124 * so that we can safely queue unref to current fb (ie. next 91 if (!mdp5_crtc->ctl)
125 * vblank we know hw is done w/ previous scanout_fb). 92 return;
126 */
127 crtc_flush(crtc);
128
129 if (mdp5_crtc->scanout_fb)
130 drm_flip_work_queue(&mdp5_crtc->unref_fb_work,
131 mdp5_crtc->scanout_fb);
132 93
133 mdp5_crtc->scanout_fb = fb; 94 drm_atomic_crtc_for_each_plane(plane, crtc) {
95 flush_mask |= mdp5_plane_get_flush(plane);
96 }
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
134 99
135 /* enable vblank to complete flip: */ 100 crtc_flush(crtc, flush_mask);
136 request_pending(crtc, PENDING_FLIP);
137} 101}
138 102
139/* if file!=NULL, this is preclose potential cancel-flip path */ 103/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -142,7 +106,8 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
142 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 106 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
143 struct drm_device *dev = crtc->dev; 107 struct drm_device *dev = crtc->dev;
144 struct drm_pending_vblank_event *event; 108 struct drm_pending_vblank_event *event;
145 unsigned long flags, i; 109 struct drm_plane *plane;
110 unsigned long flags;
146 111
147 spin_lock_irqsave(&dev->event_lock, flags); 112 spin_lock_irqsave(&dev->event_lock, flags);
148 event = mdp5_crtc->event; 113 event = mdp5_crtc->event;
@@ -153,50 +118,22 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
153 */ 118 */
154 if (!file || (event->base.file_priv == file)) { 119 if (!file || (event->base.file_priv == file)) {
155 mdp5_crtc->event = NULL; 120 mdp5_crtc->event = NULL;
121 DBG("%s: send event: %p", mdp5_crtc->name, event);
156 drm_send_vblank_event(dev, mdp5_crtc->id, event); 122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
157 } 123 }
158 } 124 }
159 spin_unlock_irqrestore(&dev->event_lock, flags); 125 spin_unlock_irqrestore(&dev->event_lock, flags);
160 126
161 for (i = 0; i < ARRAY_SIZE(mdp5_crtc->planes); i++) { 127 drm_atomic_crtc_for_each_plane(plane, crtc) {
162 struct drm_plane *plane = mdp5_crtc->planes[i]; 128 mdp5_plane_complete_flip(plane);
163 if (plane)
164 mdp5_plane_complete_flip(plane);
165 } 129 }
166} 130}
167 131
168static void pageflip_cb(struct msm_fence_cb *cb)
169{
170 struct mdp5_crtc *mdp5_crtc =
171 container_of(cb, struct mdp5_crtc, pageflip_cb);
172 struct drm_crtc *crtc = &mdp5_crtc->base;
173 struct drm_framebuffer *fb = mdp5_crtc->fb;
174
175 if (!fb)
176 return;
177
178 drm_framebuffer_reference(fb);
179 mdp5_plane_set_scanout(mdp5_crtc->plane, fb);
180 update_scanout(crtc, fb);
181}
182
183static void unref_fb_worker(struct drm_flip_work *work, void *val)
184{
185 struct mdp5_crtc *mdp5_crtc =
186 container_of(work, struct mdp5_crtc, unref_fb_work);
187 struct drm_device *dev = mdp5_crtc->base.dev;
188
189 mutex_lock(&dev->mode_config.mutex);
190 drm_framebuffer_unreference(val);
191 mutex_unlock(&dev->mode_config.mutex);
192}
193
194static void mdp5_crtc_destroy(struct drm_crtc *crtc) 132static void mdp5_crtc_destroy(struct drm_crtc *crtc)
195{ 133{
196 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 134 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
197 135
198 drm_crtc_cleanup(crtc); 136 drm_crtc_cleanup(crtc);
199 drm_flip_work_cleanup(&mdp5_crtc->unref_fb_work);
200 137
201 kfree(mdp5_crtc); 138 kfree(mdp5_crtc);
202} 139}
@@ -214,6 +151,8 @@ static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
214 mdp5_enable(mdp5_kms); 151 mdp5_enable(mdp5_kms);
215 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err); 152 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
216 } else { 153 } else {
154 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
217 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err); 156 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
218 mdp5_disable(mdp5_kms); 157 mdp5_disable(mdp5_kms);
219 } 158 }
@@ -228,54 +167,78 @@ static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
228 return true; 167 return true;
229} 168}
230 169
170/*
171 * blend_setup() - blend all the planes of a CRTC
172 *
173 * When border is enabled, the border color will ALWAYS be the base layer.
174 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175 * If disabled, the first plane starts at STAGE_BASE.
176 *
177 * Note:
178 * Border is not enabled here because the private plane is exactly
179 * the CRTC resolution.
180 */
231static void blend_setup(struct drm_crtc *crtc) 181static void blend_setup(struct drm_crtc *crtc)
232{ 182{
233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 183 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234 struct mdp5_kms *mdp5_kms = get_kms(crtc); 184 struct mdp5_kms *mdp5_kms = get_kms(crtc);
235 int id = mdp5_crtc->id; 185 struct drm_plane *plane;
186 const struct mdp5_cfg_hw *hw_cfg;
187 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
188 unsigned long flags;
189#define blender(stage) ((stage) - STAGE_BASE)
236 190
237 /* 191 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
238 * Hard-coded setup for now until I figure out how the
239 * layer-mixer works
240 */
241 192
242 /* LM[id]: */ 193 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
243 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_COLOR_OUT(id), 194
244 MDP5_LM_BLEND_COLOR_OUT_STAGE0_FG_ALPHA); 195 /* ctl could be released already when we are shutting down: */
245 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_OP_MODE(id, 0), 196 if (!mdp5_crtc->ctl)
246 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) | 197 goto out;
247 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(FG_PIXEL) |
248 MDP5_LM_BLEND_OP_MODE_BG_INV_ALPHA);
249 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(id, 0), 0xff);
250 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(id, 0), 0x00);
251
252 /* NOTE: seems that LM[n] and CTL[m], we do not need n==m.. but
253 * we want to be setting CTL[m].LAYER[n]. Not sure what the
254 * point of having CTL[m].LAYER[o] (for o!=n).. maybe that is
255 * used when chaining up mixers for high resolution displays?
256 */
257 198
258 /* CTL[id]: */ 199 drm_atomic_crtc_for_each_plane(plane, crtc) {
259 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 0), 200 enum mdp_mixer_stage_id stage =
260 MDP5_CTL_LAYER_REG_RGB0(STAGE0) | 201 to_mdp5_plane_state(plane->state)->stage;
261 MDP5_CTL_LAYER_REG_BORDER_COLOR); 202
262 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 1), 0); 203 /*
263 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 2), 0); 204 * Note: This cannot happen with current implementation but
264 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 3), 0); 205 * we need to check this condition once z property is added
265 mdp5_write(mdp5_kms, REG_MDP5_CTL_LAYER_REG(id, 4), 0); 206 */
207 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209 /* LM */
210 mdp5_write(mdp5_kms,
211 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215 blender(stage)), 0xff);
216 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217 blender(stage)), 0x00);
218 /* CTL */
219 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221 pipe2name(mdp5_plane_pipe(plane)), stage);
222 }
223
224 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227out:
228 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
266} 229}
267 230
268static int mdp5_crtc_mode_set(struct drm_crtc *crtc, 231static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
269 struct drm_display_mode *mode,
270 struct drm_display_mode *adjusted_mode,
271 int x, int y,
272 struct drm_framebuffer *old_fb)
273{ 232{
274 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
275 struct mdp5_kms *mdp5_kms = get_kms(crtc); 234 struct mdp5_kms *mdp5_kms = get_kms(crtc);
276 int ret; 235 unsigned long flags;
236 struct drm_display_mode *mode;
277 237
278 mode = adjusted_mode; 238 if (WARN_ON(!crtc->state))
239 return;
240
241 mode = &crtc->state->adjusted_mode;
279 242
280 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x", 243 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
281 mdp5_crtc->name, mode->base.id, mode->name, 244 mdp5_crtc->name, mode->base.id, mode->name,
@@ -286,28 +249,11 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
286 mode->vsync_end, mode->vtotal, 249 mode->vsync_end, mode->vtotal,
287 mode->type, mode->flags); 250 mode->type, mode->flags);
288 251
289 /* grab extra ref for update_scanout() */ 252 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
290 drm_framebuffer_reference(crtc->primary->fb); 253 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
291
292 ret = mdp5_plane_mode_set(mdp5_crtc->plane, crtc, crtc->primary->fb,
293 0, 0, mode->hdisplay, mode->vdisplay,
294 x << 16, y << 16,
295 mode->hdisplay << 16, mode->vdisplay << 16);
296 if (ret) {
297 drm_framebuffer_unreference(crtc->primary->fb);
298 dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
299 mdp5_crtc->name, ret);
300 return ret;
301 }
302
303 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->id),
304 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) | 254 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
305 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay)); 255 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
306 256 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
307 update_fb(crtc, crtc->primary->fb);
308 update_scanout(crtc, crtc->primary->fb);
309
310 return 0;
311} 257}
312 258
313static void mdp5_crtc_prepare(struct drm_crtc *crtc) 259static void mdp5_crtc_prepare(struct drm_crtc *crtc)
@@ -321,66 +267,119 @@ static void mdp5_crtc_prepare(struct drm_crtc *crtc)
321 267
322static void mdp5_crtc_commit(struct drm_crtc *crtc) 268static void mdp5_crtc_commit(struct drm_crtc *crtc)
323{ 269{
270 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271 DBG("%s", mdp5_crtc->name);
324 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 272 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
325 crtc_flush(crtc); 273 crtc_flush_all(crtc);
326 /* drop the ref to mdp clk's that we got in prepare: */ 274 /* drop the ref to mdp clk's that we got in prepare: */
327 mdp5_disable(get_kms(crtc)); 275 mdp5_disable(get_kms(crtc));
328} 276}
329 277
330static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, 278static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
331 struct drm_framebuffer *old_fb) 279{
280}
281
282struct plane_state {
283 struct drm_plane *plane;
284 struct mdp5_plane_state *state;
285};
286
287static int pstate_cmp(const void *a, const void *b)
288{
289 struct plane_state *pa = (struct plane_state *)a;
290 struct plane_state *pb = (struct plane_state *)b;
291 return pa->state->zpos - pb->state->zpos;
292}
293
294static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
295 struct drm_crtc_state *state)
332{ 296{
333 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 297 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
334 struct drm_plane *plane = mdp5_crtc->plane; 298 struct mdp5_kms *mdp5_kms = get_kms(crtc);
335 struct drm_display_mode *mode = &crtc->mode; 299 struct drm_plane *plane;
336 int ret; 300 struct drm_device *dev = crtc->dev;
337 301 struct plane_state pstates[STAGE3 + 1];
338 /* grab extra ref for update_scanout() */ 302 int cnt = 0, i;
339 drm_framebuffer_reference(crtc->primary->fb); 303
340 304 DBG("%s: check", mdp5_crtc->name);
341 ret = mdp5_plane_mode_set(plane, crtc, crtc->primary->fb, 305
342 0, 0, mode->hdisplay, mode->vdisplay, 306 if (mdp5_crtc->event) {
343 x << 16, y << 16, 307 dev_err(dev->dev, "already pending flip!\n");
344 mode->hdisplay << 16, mode->vdisplay << 16); 308 return -EBUSY;
345 if (ret) {
346 drm_framebuffer_unreference(crtc->primary->fb);
347 return ret;
348 } 309 }
349 310
350 update_fb(crtc, crtc->primary->fb); 311 /* request a free CTL, if none is already allocated for this CRTC */
351 update_scanout(crtc, crtc->primary->fb); 312 if (state->enable && !mdp5_crtc->ctl) {
313 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
314 if (WARN_ON(!mdp5_crtc->ctl))
315 return -EINVAL;
316 }
317
318 /* verify that there are not too many planes attached to crtc
319 * and that we don't have conflicting mixer stages:
320 */
321 drm_atomic_crtc_state_for_each_plane(plane, state) {
322 struct drm_plane_state *pstate;
323
324 if (cnt >= ARRAY_SIZE(pstates)) {
325 dev_err(dev->dev, "too many planes!\n");
326 return -EINVAL;
327 }
328
329 pstate = state->state->plane_states[drm_plane_index(plane)];
330
331 /* plane might not have changed, in which case take
332 * current state:
333 */
334 if (!pstate)
335 pstate = plane->state;
336
337 pstates[cnt].plane = plane;
338 pstates[cnt].state = to_mdp5_plane_state(pstate);
339
340 cnt++;
341 }
342
343 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
344
345 for (i = 0; i < cnt; i++) {
346 pstates[i].state->stage = STAGE_BASE + i;
347 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
348 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
349 pstates[i].state->stage);
350 }
352 351
353 return 0; 352 return 0;
354} 353}
355 354
356static void mdp5_crtc_load_lut(struct drm_crtc *crtc) 355static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
357{ 356{
357 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
358 DBG("%s: begin", mdp5_crtc->name);
358} 359}
359 360
360static int mdp5_crtc_page_flip(struct drm_crtc *crtc, 361static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
361 struct drm_framebuffer *new_fb,
362 struct drm_pending_vblank_event *event,
363 uint32_t page_flip_flags)
364{ 362{
365 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 363 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
366 struct drm_device *dev = crtc->dev; 364 struct drm_device *dev = crtc->dev;
367 struct drm_gem_object *obj;
368 unsigned long flags; 365 unsigned long flags;
369 366
370 if (mdp5_crtc->event) { 367 DBG("%s: flush", mdp5_crtc->name);
371 dev_err(dev->dev, "already pending flip!\n");
372 return -EBUSY;
373 }
374 368
375 obj = msm_framebuffer_bo(new_fb, 0); 369 WARN_ON(mdp5_crtc->event);
376 370
377 spin_lock_irqsave(&dev->event_lock, flags); 371 spin_lock_irqsave(&dev->event_lock, flags);
378 mdp5_crtc->event = event; 372 mdp5_crtc->event = crtc->state->event;
379 spin_unlock_irqrestore(&dev->event_lock, flags); 373 spin_unlock_irqrestore(&dev->event_lock, flags);
380 374
381 update_fb(crtc, new_fb); 375 blend_setup(crtc);
376 crtc_flush_all(crtc);
377 request_pending(crtc, PENDING_FLIP);
382 378
383 return msm_gem_queue_inactive_cb(obj, &mdp5_crtc->pageflip_cb); 379 if (mdp5_crtc->ctl && !crtc->state->enable) {
380 mdp5_ctl_release(mdp5_crtc->ctl);
381 mdp5_crtc->ctl = NULL;
382 }
384} 383}
385 384
386static int mdp5_crtc_set_property(struct drm_crtc *crtc, 385static int mdp5_crtc_set_property(struct drm_crtc *crtc,
@@ -391,27 +390,33 @@ static int mdp5_crtc_set_property(struct drm_crtc *crtc,
391} 390}
392 391
393static const struct drm_crtc_funcs mdp5_crtc_funcs = { 392static const struct drm_crtc_funcs mdp5_crtc_funcs = {
394 .set_config = drm_crtc_helper_set_config, 393 .set_config = drm_atomic_helper_set_config,
395 .destroy = mdp5_crtc_destroy, 394 .destroy = mdp5_crtc_destroy,
396 .page_flip = mdp5_crtc_page_flip, 395 .page_flip = drm_atomic_helper_page_flip,
397 .set_property = mdp5_crtc_set_property, 396 .set_property = mdp5_crtc_set_property,
397 .reset = drm_atomic_helper_crtc_reset,
398 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
399 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
398}; 400};
399 401
400static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = { 402static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
401 .dpms = mdp5_crtc_dpms, 403 .dpms = mdp5_crtc_dpms,
402 .mode_fixup = mdp5_crtc_mode_fixup, 404 .mode_fixup = mdp5_crtc_mode_fixup,
403 .mode_set = mdp5_crtc_mode_set, 405 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
406 .mode_set = drm_helper_crtc_mode_set,
407 .mode_set_base = drm_helper_crtc_mode_set_base,
404 .prepare = mdp5_crtc_prepare, 408 .prepare = mdp5_crtc_prepare,
405 .commit = mdp5_crtc_commit, 409 .commit = mdp5_crtc_commit,
406 .mode_set_base = mdp5_crtc_mode_set_base,
407 .load_lut = mdp5_crtc_load_lut, 410 .load_lut = mdp5_crtc_load_lut,
411 .atomic_check = mdp5_crtc_atomic_check,
412 .atomic_begin = mdp5_crtc_atomic_begin,
413 .atomic_flush = mdp5_crtc_atomic_flush,
408}; 414};
409 415
410static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus) 416static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
411{ 417{
412 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank); 418 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
413 struct drm_crtc *crtc = &mdp5_crtc->base; 419 struct drm_crtc *crtc = &mdp5_crtc->base;
414 struct msm_drm_private *priv = crtc->dev->dev_private;
415 unsigned pending; 420 unsigned pending;
416 421
417 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank); 422 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
@@ -420,16 +425,14 @@ static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
420 425
421 if (pending & PENDING_FLIP) { 426 if (pending & PENDING_FLIP) {
422 complete_flip(crtc, NULL); 427 complete_flip(crtc, NULL);
423 drm_flip_work_commit(&mdp5_crtc->unref_fb_work, priv->wq);
424 } 428 }
425} 429}
426 430
427static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus) 431static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
428{ 432{
429 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err); 433 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
430 struct drm_crtc *crtc = &mdp5_crtc->base; 434
431 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus); 435 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
432 crtc_flush(crtc);
433} 436}
434 437
435uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc) 438uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
@@ -450,10 +453,9 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
450{ 453{
451 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 454 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
452 struct mdp5_kms *mdp5_kms = get_kms(crtc); 455 struct mdp5_kms *mdp5_kms = get_kms(crtc);
453 static const enum mdp5_intfnum intfnum[] = { 456 uint32_t flush_mask = 0;
454 INTF0, INTF1, INTF2, INTF3,
455 };
456 uint32_t intf_sel; 457 uint32_t intf_sel;
458 unsigned long flags;
457 459
458 /* now that we know what irq's we want: */ 460 /* now that we know what irq's we want: */
459 mdp5_crtc->err.irqmask = intf2err(intf); 461 mdp5_crtc->err.irqmask = intf2err(intf);
@@ -463,6 +465,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
463 if (!mdp5_kms) 465 if (!mdp5_kms)
464 return; 466 return;
465 467
468 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
466 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL); 469 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
467 470
468 switch (intf) { 471 switch (intf) {
@@ -487,45 +490,25 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
487 break; 490 break;
488 } 491 }
489 492
490 blend_setup(crtc); 493 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
494 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
491 495
492 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel); 496 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
497 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
498 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
499 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
493 500
494 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel); 501 crtc_flush(crtc, flush_mask);
495 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(mdp5_crtc->id),
496 MDP5_CTL_OP_MODE(MODE_NONE) |
497 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
498
499 crtc_flush(crtc);
500} 502}
501 503
502static void set_attach(struct drm_crtc *crtc, enum mdp5_pipe pipe_id, 504int mdp5_crtc_get_lm(struct drm_crtc *crtc)
503 struct drm_plane *plane)
504{ 505{
505 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); 506 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
506 507
507 BUG_ON(pipe_id >= ARRAY_SIZE(mdp5_crtc->planes)); 508 if (WARN_ON(!crtc))
509 return -EINVAL;
508 510
509 if (mdp5_crtc->planes[pipe_id] == plane) 511 return mdp5_crtc->lm;
510 return;
511
512 mdp5_crtc->planes[pipe_id] = plane;
513 blend_setup(crtc);
514 if (mdp5_crtc->enabled && (plane != mdp5_crtc->plane))
515 crtc_flush(crtc);
516}
517
518void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane)
519{
520 set_attach(crtc, mdp5_plane_pipe(plane), plane);
521}
522
523void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane)
524{
525 /* don't actually detatch our primary plane: */
526 if (to_mdp5_crtc(crtc)->plane == plane)
527 return;
528 set_attach(crtc, mdp5_plane_pipe(plane), NULL);
529} 512}
530 513
531/* initialize crtc */ 514/* initialize crtc */
@@ -534,18 +517,17 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
534{ 517{
535 struct drm_crtc *crtc = NULL; 518 struct drm_crtc *crtc = NULL;
536 struct mdp5_crtc *mdp5_crtc; 519 struct mdp5_crtc *mdp5_crtc;
537 int ret;
538 520
539 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL); 521 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
540 if (!mdp5_crtc) { 522 if (!mdp5_crtc)
541 ret = -ENOMEM; 523 return ERR_PTR(-ENOMEM);
542 goto fail;
543 }
544 524
545 crtc = &mdp5_crtc->base; 525 crtc = &mdp5_crtc->base;
546 526
547 mdp5_crtc->plane = plane;
548 mdp5_crtc->id = id; 527 mdp5_crtc->id = id;
528 mdp5_crtc->lm = GET_LM_ID(id);
529
530 spin_lock_init(&mdp5_crtc->lm_lock);
549 531
550 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; 532 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
551 mdp5_crtc->err.irq = mdp5_crtc_err_irq; 533 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
@@ -553,23 +535,11 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
553 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d", 535 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
554 pipe2name(mdp5_plane_pipe(plane)), id); 536 pipe2name(mdp5_plane_pipe(plane)), id);
555 537
556 ret = drm_flip_work_init(&mdp5_crtc->unref_fb_work, 16,
557 "unref fb", unref_fb_worker);
558 if (ret)
559 goto fail;
560
561 INIT_FENCE_CB(&mdp5_crtc->pageflip_cb, pageflip_cb);
562
563 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs); 538 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
564 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs); 539 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
540 plane->crtc = crtc;
565 541
566 mdp5_plane_install_properties(mdp5_crtc->plane, &crtc->base); 542 mdp5_plane_install_properties(plane, &crtc->base);
567 543
568 return crtc; 544 return crtc;
569
570fail:
571 if (crtc)
572 mdp5_crtc_destroy(crtc);
573
574 return ERR_PTR(ret);
575} 545}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
new file mode 100644
index 000000000000..dea4505ac963
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.c
@@ -0,0 +1,322 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include "mdp5_kms.h"
15#include "mdp5_ctl.h"
16
17/*
18 * CTL - MDP Control Pool Manager
19 *
20 * Controls are shared between all CRTCs.
21 *
22 * They are intended to be used for data path configuration.
23 * The top level register programming describes the complete data path for
24 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
25 *
26 * Hardware capabilities determine the number of concurrent data paths
27 *
28 * In certain use cases (high-resolution dual pipe), one single CTL can be
29 * shared across multiple CRTCs.
30 *
31 * Because the number of CTLs can be less than the number of CRTCs,
32 * CTLs are dynamically allocated from a pool of CTLs, only once a CRTC is
33 * requested by the client (in mdp5_crtc_mode_set()).
34 */
35
36struct mdp5_ctl {
37 struct mdp5_ctl_manager *ctlm;
38
39 u32 id;
40
41 /* whether this CTL has been allocated or not: */
42 bool busy;
43
44 /* memory output connection (@see mdp5_ctl_mode): */
45 u32 mode;
46
47 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
48 spinlock_t hw_lock;
49 u32 reg_offset;
50
51 /* flush mask used to commit CTL registers */
52 u32 flush_mask;
53
54 bool cursor_on;
55
56 struct drm_crtc *crtc;
57};
58
59struct mdp5_ctl_manager {
60 struct drm_device *dev;
61
62 /* number of CTL / Layer Mixers in this hw config: */
63 u32 nlm;
64 u32 nctl;
65
66 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
67 spinlock_t pool_lock;
68 struct mdp5_ctl ctls[MAX_CTL];
69};
70
71static inline
72struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
73{
74 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
75
76 return to_mdp5_kms(to_mdp_kms(priv->kms));
77}
78
79static inline
80void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
81{
82 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
83
84 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
85 mdp5_write(mdp5_kms, reg, data);
86}
87
88static inline
89u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
90{
91 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
92
93 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
94 return mdp5_read(mdp5_kms, reg);
95}
96
97
98int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf)
99{
100 unsigned long flags;
101 static const enum mdp5_intfnum intfnum[] = {
102 INTF0, INTF1, INTF2, INTF3,
103 };
104
105 spin_lock_irqsave(&ctl->hw_lock, flags);
106 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id),
107 MDP5_CTL_OP_MODE(ctl->mode) |
108 MDP5_CTL_OP_INTF_NUM(intfnum[intf]));
109 spin_unlock_irqrestore(&ctl->hw_lock, flags);
110
111 return 0;
112}
113
114int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable)
115{
116 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
117 unsigned long flags;
118 u32 blend_cfg;
119 int lm;
120
121 lm = mdp5_crtc_get_lm(ctl->crtc);
122 if (unlikely(WARN_ON(lm < 0))) {
123 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
124 ctl->id, lm);
125 return -EINVAL;
126 }
127
128 spin_lock_irqsave(&ctl->hw_lock, flags);
129
130 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm));
131
132 if (enable)
133 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
134 else
135 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
136
137 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
138
139 spin_unlock_irqrestore(&ctl->hw_lock, flags);
140
141 ctl->cursor_on = enable;
142
143 return 0;
144}
145
146
147int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg)
148{
149 unsigned long flags;
150
151 if (ctl->cursor_on)
152 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
153 else
154 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
155
156 spin_lock_irqsave(&ctl->hw_lock, flags);
157 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, lm), blend_cfg);
158 spin_unlock_irqrestore(&ctl->hw_lock, flags);
159
160 return 0;
161}
162
163int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask)
164{
165 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
166 unsigned long flags;
167
168 if (flush_mask & MDP5_CTL_FLUSH_CURSOR_DUMMY) {
169 int lm = mdp5_crtc_get_lm(ctl->crtc);
170
171 if (unlikely(WARN_ON(lm < 0))) {
172 dev_err(ctl_mgr->dev->dev, "CTL %d cannot find LM: %d",
173 ctl->id, lm);
174 return -EINVAL;
175 }
176
177 /* for current targets, cursor bit is the same as LM bit */
178 flush_mask |= mdp_ctl_flush_mask_lm(lm);
179 }
180
181 spin_lock_irqsave(&ctl->hw_lock, flags);
182 ctl_write(ctl, REG_MDP5_CTL_FLUSH(ctl->id), flush_mask);
183 spin_unlock_irqrestore(&ctl->hw_lock, flags);
184
185 return 0;
186}
187
188u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl)
189{
190 return ctl->flush_mask;
191}
192
193void mdp5_ctl_release(struct mdp5_ctl *ctl)
194{
195 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
196 unsigned long flags;
197
198 if (unlikely(WARN_ON(ctl->id >= MAX_CTL) || !ctl->busy)) {
199 dev_err(ctl_mgr->dev->dev, "CTL %d in bad state (%d)",
200 ctl->id, ctl->busy);
201 return;
202 }
203
204 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
205 ctl->busy = false;
206 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
207
208 DBG("CTL %d released", ctl->id);
209}
210
211/*
212 * mdp5_ctl_request() - CTL dynamic allocation
213 *
214 * Note: Current implementation considers that we can only have one CRTC per CTL
215 *
216 * @return first free CTL
217 */
218struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
219 struct drm_crtc *crtc)
220{
221 struct mdp5_ctl *ctl = NULL;
222 unsigned long flags;
223 int c;
224
225 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
226
227 for (c = 0; c < ctl_mgr->nctl; c++)
228 if (!ctl_mgr->ctls[c].busy)
229 break;
230
231 if (unlikely(c >= ctl_mgr->nctl)) {
232 dev_err(ctl_mgr->dev->dev, "No more CTL available!");
233 goto unlock;
234 }
235
236 ctl = &ctl_mgr->ctls[c];
237
238 ctl->crtc = crtc;
239 ctl->busy = true;
240 DBG("CTL %d allocated", ctl->id);
241
242unlock:
243 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
244 return ctl;
245}
246
247void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
248{
249 unsigned long flags;
250 int c;
251
252 for (c = 0; c < ctl_mgr->nctl; c++) {
253 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
254
255 spin_lock_irqsave(&ctl->hw_lock, flags);
256 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
257 spin_unlock_irqrestore(&ctl->hw_lock, flags);
258 }
259}
260
261void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctl_mgr)
262{
263 kfree(ctl_mgr);
264}
265
266struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
267 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg)
268{
269 struct mdp5_ctl_manager *ctl_mgr;
270 const struct mdp5_sub_block *ctl_cfg = &hw_cfg->ctl;
271 unsigned long flags;
272 int c, ret;
273
274 ctl_mgr = kzalloc(sizeof(*ctl_mgr), GFP_KERNEL);
275 if (!ctl_mgr) {
276 dev_err(dev->dev, "failed to allocate CTL manager\n");
277 ret = -ENOMEM;
278 goto fail;
279 }
280
281 if (unlikely(WARN_ON(ctl_cfg->count > MAX_CTL))) {
282 dev_err(dev->dev, "Increase static pool size to at least %d\n",
283 ctl_cfg->count);
284 ret = -ENOSPC;
285 goto fail;
286 }
287
288 /* initialize the CTL manager: */
289 ctl_mgr->dev = dev;
290 ctl_mgr->nlm = hw_cfg->lm.count;
291 ctl_mgr->nctl = ctl_cfg->count;
292 spin_lock_init(&ctl_mgr->pool_lock);
293
294 /* initialize each CTL of the pool: */
295 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
296 for (c = 0; c < ctl_mgr->nctl; c++) {
297 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
298
299 if (WARN_ON(!ctl_cfg->base[c])) {
300 dev_err(dev->dev, "CTL_%d: base is null!\n", c);
301 ret = -EINVAL;
302 goto fail;
303 }
304 ctl->ctlm = ctl_mgr;
305 ctl->id = c;
306 ctl->mode = MODE_NONE;
307 ctl->reg_offset = ctl_cfg->base[c];
308 ctl->flush_mask = MDP5_CTL_FLUSH_CTL;
309 ctl->busy = false;
310 spin_lock_init(&ctl->hw_lock);
311 }
312 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
313 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
314
315 return ctl_mgr;
316
317fail:
318 if (ctl_mgr)
319 mdp5_ctlm_destroy(ctl_mgr);
320
321 return ERR_PTR(ret);
322}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
new file mode 100644
index 000000000000..1018519b6af2
--- /dev/null
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_ctl.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef __MDP5_CTL_H__
15#define __MDP5_CTL_H__
16
17#include "msm_drv.h"
18
19/*
20 * CTL Manager prototypes:
21 * mdp5_ctlm_init() returns a ctlm (CTL Manager) handler,
22 * which is then used to call the other mdp5_ctlm_*(ctlm, ...) functions.
23 */
24struct mdp5_ctl_manager;
25struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
26 void __iomem *mmio_base, const struct mdp5_cfg_hw *hw_cfg);
27void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctlm);
28void mdp5_ctlm_destroy(struct mdp5_ctl_manager *ctlm);
29
30/*
31 * CTL prototypes:
32 * mdp5_ctl_request(ctlm, ...) returns a ctl (CTL resource) handler,
33 * which is then used to call the other mdp5_ctl_*(ctl, ...) functions.
34 */
35struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctlm, struct drm_crtc *crtc);
36
37int mdp5_ctl_set_intf(struct mdp5_ctl *ctl, enum mdp5_intf intf);
38
39int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, bool enable);
40
41/* @blend_cfg: see LM blender config definition below */
42int mdp5_ctl_blend(struct mdp5_ctl *ctl, u32 lm, u32 blend_cfg);
43
44/* @flush_mask: see CTL flush masks definitions below */
45int mdp5_ctl_commit(struct mdp5_ctl *ctl, u32 flush_mask);
46u32 mdp5_ctl_get_flush(struct mdp5_ctl *ctl);
47
48void mdp5_ctl_release(struct mdp5_ctl *ctl);
49
50/*
51 * blend_cfg (LM blender config):
52 *
53 * The function below allows the caller of mdp5_ctl_blend() to specify how pipes
54 * are being blended according to their stage (z-order), through @blend_cfg arg.
55 */
56static inline u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
57 enum mdp_mixer_stage_id stage)
58{
59 switch (pipe) {
60 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
61 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
62 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
63 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
64 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
65 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
66 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
67 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
68 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
69 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
70 default: return 0;
71 }
72}
73
74/*
75 * flush_mask (CTL flush masks):
76 *
77 * The following functions allow each DRM entity to get and store
78 * their own flush mask.
79 * Once stored, these masks will then be accessed through each DRM's
80 * interface and used by the caller of mdp5_ctl_commit() to specify
81 * which block(s) need to be flushed through @flush_mask parameter.
82 */
83
84#define MDP5_CTL_FLUSH_CURSOR_DUMMY 0x80000000
85
86static inline u32 mdp_ctl_flush_mask_cursor(int cursor_id)
87{
88 /* TODO: use id once multiple cursor support is present */
89 (void)cursor_id;
90
91 return MDP5_CTL_FLUSH_CURSOR_DUMMY;
92}
93
94static inline u32 mdp_ctl_flush_mask_lm(int lm)
95{
96 switch (lm) {
97 case 0: return MDP5_CTL_FLUSH_LM0;
98 case 1: return MDP5_CTL_FLUSH_LM1;
99 case 2: return MDP5_CTL_FLUSH_LM2;
100 case 5: return MDP5_CTL_FLUSH_LM5;
101 default: return 0;
102 }
103}
104
105static inline u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
106{
107 switch (pipe) {
108 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
109 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
110 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
111 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
112 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
113 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
114 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
115 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
116 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
117 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
118 default: return 0;
119 }
120}
121
122#endif /* __MDP5_CTL_H__ */
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
index edec7bfaa952..0254bfdeb92f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_encoder.c
@@ -24,6 +24,7 @@ struct mdp5_encoder {
24 struct drm_encoder base; 24 struct drm_encoder base;
25 int intf; 25 int intf;
26 enum mdp5_intf intf_id; 26 enum mdp5_intf intf_id;
27 spinlock_t intf_lock; /* protect REG_MDP5_INTF_* registers */
27 bool enabled; 28 bool enabled;
28 uint32_t bsc; 29 uint32_t bsc;
29}; 30};
@@ -115,6 +116,7 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
115 struct mdp5_kms *mdp5_kms = get_kms(encoder); 116 struct mdp5_kms *mdp5_kms = get_kms(encoder);
116 int intf = mdp5_encoder->intf; 117 int intf = mdp5_encoder->intf;
117 bool enabled = (mode == DRM_MODE_DPMS_ON); 118 bool enabled = (mode == DRM_MODE_DPMS_ON);
119 unsigned long flags;
118 120
119 DBG("mode=%d", mode); 121 DBG("mode=%d", mode);
120 122
@@ -123,9 +125,24 @@ static void mdp5_encoder_dpms(struct drm_encoder *encoder, int mode)
123 125
124 if (enabled) { 126 if (enabled) {
125 bs_set(mdp5_encoder, 1); 127 bs_set(mdp5_encoder, 1);
128 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
126 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1); 129 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 1);
130 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
127 } else { 131 } else {
132 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
128 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0); 133 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(intf), 0);
134 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
135
136 /*
137 * Wait for a vsync so we know the ENABLE=0 latched before
138 * the (connector) source of the vsync's gets disabled,
139 * otherwise we end up in a funny state if we re-enable
140 * before the disable latches, which results that some of
141 * the settings changes for the new modeset (like new
142 * scanout buffer) don't latch properly..
143 */
144 mdp_irq_wait(&mdp5_kms->base, intf2vblank(intf));
145
129 bs_set(mdp5_encoder, 0); 146 bs_set(mdp5_encoder, 0);
130 } 147 }
131 148
@@ -150,6 +167,7 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
150 uint32_t display_v_start, display_v_end; 167 uint32_t display_v_start, display_v_end;
151 uint32_t hsync_start_x, hsync_end_x; 168 uint32_t hsync_start_x, hsync_end_x;
152 uint32_t format; 169 uint32_t format;
170 unsigned long flags;
153 171
154 mode = adjusted_mode; 172 mode = adjusted_mode;
155 173
@@ -180,6 +198,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
180 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew; 198 display_v_start = (mode->vtotal - mode->vsync_start) * mode->htotal + dtv_hsync_skew;
181 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1; 199 display_v_end = vsync_period - ((mode->vsync_start - mode->vdisplay) * mode->htotal) + dtv_hsync_skew - 1;
182 200
201 spin_lock_irqsave(&mdp5_encoder->intf_lock, flags);
202
183 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf), 203 mdp5_write(mdp5_kms, REG_MDP5_INTF_HSYNC_CTL(intf),
184 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) | 204 MDP5_INTF_HSYNC_CTL_PULSEW(mode->hsync_end - mode->hsync_start) |
185 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal)); 205 MDP5_INTF_HSYNC_CTL_PERIOD(mode->htotal));
@@ -201,6 +221,8 @@ static void mdp5_encoder_mode_set(struct drm_encoder *encoder,
201 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0); 221 mdp5_write(mdp5_kms, REG_MDP5_INTF_ACTIVE_VEND_F0(intf), 0);
202 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format); 222 mdp5_write(mdp5_kms, REG_MDP5_INTF_PANEL_FORMAT(intf), format);
203 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */ 223 mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(intf), 0x3); /* frame+line? */
224
225 spin_unlock_irqrestore(&mdp5_encoder->intf_lock, flags);
204} 226}
205 227
206static void mdp5_encoder_prepare(struct drm_encoder *encoder) 228static void mdp5_encoder_prepare(struct drm_encoder *encoder)
@@ -242,6 +264,8 @@ struct drm_encoder *mdp5_encoder_init(struct drm_device *dev, int intf,
242 mdp5_encoder->intf_id = intf_id; 264 mdp5_encoder->intf_id = intf_id;
243 encoder = &mdp5_encoder->base; 265 encoder = &mdp5_encoder->base;
244 266
267 spin_lock_init(&mdp5_encoder->intf_lock);
268
245 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs, 269 drm_encoder_init(dev, encoder, &mdp5_encoder_funcs,
246 DRM_MODE_ENCODER_TMDS); 270 DRM_MODE_ENCODER_TMDS);
247 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs); 271 drm_encoder_helper_add(encoder, &mdp5_encoder_helper_funcs);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
index f2b985bc2adf..70ac81edd40f 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
@@ -15,6 +15,8 @@
15 * this program. If not, see <http://www.gnu.org/licenses/>. 15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/irqdomain.h>
19#include <linux/irq.h>
18 20
19#include "msm_drv.h" 21#include "msm_drv.h"
20#include "mdp5_kms.h" 22#include "mdp5_kms.h"
@@ -88,11 +90,17 @@ irqreturn_t mdp5_irq(struct msm_kms *kms)
88 90
89 VERB("intr=%08x", intr); 91 VERB("intr=%08x", intr);
90 92
91 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) 93 if (intr & MDP5_HW_INTR_STATUS_INTR_MDP) {
92 mdp5_irq_mdp(mdp_kms); 94 mdp5_irq_mdp(mdp_kms);
95 intr &= ~MDP5_HW_INTR_STATUS_INTR_MDP;
96 }
93 97
94 if (intr & MDP5_HW_INTR_STATUS_INTR_HDMI) 98 while (intr) {
95 hdmi_irq(0, mdp5_kms->hdmi); 99 irq_hw_number_t hwirq = fls(intr) - 1;
100 generic_handle_irq(irq_find_mapping(
101 mdp5_kms->irqcontroller.domain, hwirq));
102 intr &= ~(1 << hwirq);
103 }
96 104
97 return IRQ_HANDLED; 105 return IRQ_HANDLED;
98} 106}
@@ -109,3 +117,82 @@ void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
109 mdp_update_vblank_mask(to_mdp_kms(kms), 117 mdp_update_vblank_mask(to_mdp_kms(kms),
110 mdp5_crtc_vblank(crtc), false); 118 mdp5_crtc_vblank(crtc), false);
111} 119}
120
121/*
122 * interrupt-controller implementation, so sub-blocks (hdmi/eDP/dsi/etc)
123 * can register to get their irq's delivered
124 */
125
126#define VALID_IRQS (MDP5_HW_INTR_STATUS_INTR_DSI0 | \
127 MDP5_HW_INTR_STATUS_INTR_DSI1 | \
128 MDP5_HW_INTR_STATUS_INTR_HDMI | \
129 MDP5_HW_INTR_STATUS_INTR_EDP)
130
131static void mdp5_hw_mask_irq(struct irq_data *irqd)
132{
133 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
134 smp_mb__before_atomic();
135 clear_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
136 smp_mb__after_atomic();
137}
138
139static void mdp5_hw_unmask_irq(struct irq_data *irqd)
140{
141 struct mdp5_kms *mdp5_kms = irq_data_get_irq_chip_data(irqd);
142 smp_mb__before_atomic();
143 set_bit(irqd->hwirq, &mdp5_kms->irqcontroller.enabled_mask);
144 smp_mb__after_atomic();
145}
146
147static struct irq_chip mdp5_hw_irq_chip = {
148 .name = "mdp5",
149 .irq_mask = mdp5_hw_mask_irq,
150 .irq_unmask = mdp5_hw_unmask_irq,
151};
152
153static int mdp5_hw_irqdomain_map(struct irq_domain *d,
154 unsigned int irq, irq_hw_number_t hwirq)
155{
156 struct mdp5_kms *mdp5_kms = d->host_data;
157
158 if (!(VALID_IRQS & (1 << hwirq)))
159 return -EPERM;
160
161 irq_set_chip_and_handler(irq, &mdp5_hw_irq_chip, handle_level_irq);
162 irq_set_chip_data(irq, mdp5_kms);
163 set_irq_flags(irq, IRQF_VALID);
164
165 return 0;
166}
167
168static struct irq_domain_ops mdp5_hw_irqdomain_ops = {
169 .map = mdp5_hw_irqdomain_map,
170 .xlate = irq_domain_xlate_onecell,
171};
172
173
174int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms)
175{
176 struct device *dev = mdp5_kms->dev->dev;
177 struct irq_domain *d;
178
179 d = irq_domain_add_linear(dev->of_node, 32,
180 &mdp5_hw_irqdomain_ops, mdp5_kms);
181 if (!d) {
182 dev_err(dev, "mdp5 irq domain add failed\n");
183 return -ENXIO;
184 }
185
186 mdp5_kms->irqcontroller.enabled_mask = 0;
187 mdp5_kms->irqcontroller.domain = d;
188
189 return 0;
190}
191
192void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms)
193{
194 if (mdp5_kms->irqcontroller.domain) {
195 irq_domain_remove(mdp5_kms->irqcontroller.domain);
196 mdp5_kms->irqcontroller.domain = NULL;
197 }
198}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 31a2c6331a1d..a11f1b80c488 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -24,145 +25,11 @@ static const char *iommu_ports[] = {
24 "mdp_0", 25 "mdp_0",
25}; 26};
26 27
27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
28
29const struct mdp5_config *mdp5_cfg;
30
31static const struct mdp5_config msm8x74_config = {
32 .name = "msm8x74",
33 .ctl = {
34 .count = 5,
35 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
36 },
37 .pipe_vig = {
38 .count = 3,
39 .base = { 0x01200, 0x01600, 0x01a00 },
40 },
41 .pipe_rgb = {
42 .count = 3,
43 .base = { 0x01e00, 0x02200, 0x02600 },
44 },
45 .pipe_dma = {
46 .count = 2,
47 .base = { 0x02a00, 0x02e00 },
48 },
49 .lm = {
50 .count = 5,
51 .base = { 0x03200, 0x03600, 0x03a00, 0x03e00, 0x04200 },
52 },
53 .dspp = {
54 .count = 3,
55 .base = { 0x04600, 0x04a00, 0x04e00 },
56 },
57 .ad = {
58 .count = 2,
59 .base = { 0x13100, 0x13300 }, /* NOTE: no ad in v1.0 */
60 },
61 .intf = {
62 .count = 4,
63 .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
64 },
65};
66
67static const struct mdp5_config apq8084_config = {
68 .name = "apq8084",
69 .ctl = {
70 .count = 5,
71 .base = { 0x00600, 0x00700, 0x00800, 0x00900, 0x00a00 },
72 },
73 .pipe_vig = {
74 .count = 4,
75 .base = { 0x01200, 0x01600, 0x01a00, 0x01e00 },
76 },
77 .pipe_rgb = {
78 .count = 4,
79 .base = { 0x02200, 0x02600, 0x02a00, 0x02e00 },
80 },
81 .pipe_dma = {
82 .count = 2,
83 .base = { 0x03200, 0x03600 },
84 },
85 .lm = {
86 .count = 6,
87 .base = { 0x03a00, 0x03e00, 0x04200, 0x04600, 0x04a00, 0x04e00 },
88 },
89 .dspp = {
90 .count = 4,
91 .base = { 0x05200, 0x05600, 0x05a00, 0x05e00 },
92
93 },
94 .ad = {
95 .count = 3,
96 .base = { 0x13500, 0x13700, 0x13900 },
97 },
98 .intf = {
99 .count = 5,
100 .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
101 },
102};
103
104struct mdp5_config_entry {
105 int revision;
106 const struct mdp5_config *config;
107};
108
109static const struct mdp5_config_entry mdp5_configs[] = {
110 { .revision = 0, .config = &msm8x74_config },
111 { .revision = 2, .config = &msm8x74_config },
112 { .revision = 3, .config = &apq8084_config },
113};
114
115static int mdp5_select_hw_cfg(struct msm_kms *kms)
116{
117 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
118 struct drm_device *dev = mdp5_kms->dev;
119 uint32_t version, major, minor;
120 int i, ret = 0;
121
122 mdp5_enable(mdp5_kms);
123 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
124 mdp5_disable(mdp5_kms);
125
126 major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
127 minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
128
129 DBG("found MDP5 version v%d.%d", major, minor);
130
131 if (major != 1) {
132 dev_err(dev->dev, "unexpected MDP major version: v%d.%d\n",
133 major, minor);
134 ret = -ENXIO;
135 goto out;
136 }
137
138 mdp5_kms->rev = minor;
139
140 /* only after mdp5_cfg global pointer's init can we access the hw */
141 for (i = 0; i < ARRAY_SIZE(mdp5_configs); i++) {
142 if (mdp5_configs[i].revision != minor)
143 continue;
144 mdp5_kms->hw_cfg = mdp5_cfg = mdp5_configs[i].config;
145 break;
146 }
147 if (unlikely(!mdp5_kms->hw_cfg)) {
148 dev_err(dev->dev, "unexpected MDP minor revision: v%d.%d\n",
149 major, minor);
150 ret = -ENXIO;
151 goto out;
152 }
153
154 DBG("MDP5: %s config selected", mdp5_kms->hw_cfg->name);
155
156 return 0;
157out:
158 return ret;
159}
160
161static int mdp5_hw_init(struct msm_kms *kms) 28static int mdp5_hw_init(struct msm_kms *kms)
162{ 29{
163 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 30 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
164 struct drm_device *dev = mdp5_kms->dev; 31 struct drm_device *dev = mdp5_kms->dev;
165 int i; 32 unsigned long flags;
166 33
167 pm_runtime_get_sync(dev->dev); 34 pm_runtime_get_sync(dev->dev);
168 35
@@ -190,10 +57,11 @@ static int mdp5_hw_init(struct msm_kms *kms)
190 * care. 57 * care.
191 */ 58 */
192 59
60 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
193 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0); 61 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, 0);
62 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
194 63
195 for (i = 0; i < mdp5_kms->hw_cfg->ctl.count; i++) 64 mdp5_ctlm_hw_reset(mdp5_kms->ctlm);
196 mdp5_write(mdp5_kms, REG_MDP5_CTL_OP(i), 0);
197 65
198 pm_runtime_put_sync(dev->dev); 66 pm_runtime_put_sync(dev->dev);
199 67
@@ -221,10 +89,20 @@ static void mdp5_destroy(struct msm_kms *kms)
221 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 89 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
222 struct msm_mmu *mmu = mdp5_kms->mmu; 90 struct msm_mmu *mmu = mdp5_kms->mmu;
223 91
92 mdp5_irq_domain_fini(mdp5_kms);
93
224 if (mmu) { 94 if (mmu) {
225 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); 95 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
226 mmu->funcs->destroy(mmu); 96 mmu->funcs->destroy(mmu);
227 } 97 }
98
99 if (mdp5_kms->ctlm)
100 mdp5_ctlm_destroy(mdp5_kms->ctlm);
101 if (mdp5_kms->smp)
102 mdp5_smp_destroy(mdp5_kms->smp);
103 if (mdp5_kms->cfg)
104 mdp5_cfg_destroy(mdp5_kms->cfg);
105
228 kfree(mdp5_kms); 106 kfree(mdp5_kms);
229} 107}
230 108
@@ -274,17 +152,31 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
274 static const enum mdp5_pipe crtcs[] = { 152 static const enum mdp5_pipe crtcs[] = {
275 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3, 153 SSPP_RGB0, SSPP_RGB1, SSPP_RGB2, SSPP_RGB3,
276 }; 154 };
155 static const enum mdp5_pipe pub_planes[] = {
156 SSPP_VIG0, SSPP_VIG1, SSPP_VIG2, SSPP_VIG3,
157 };
277 struct drm_device *dev = mdp5_kms->dev; 158 struct drm_device *dev = mdp5_kms->dev;
278 struct msm_drm_private *priv = dev->dev_private; 159 struct msm_drm_private *priv = dev->dev_private;
279 struct drm_encoder *encoder; 160 struct drm_encoder *encoder;
161 const struct mdp5_cfg_hw *hw_cfg;
280 int i, ret; 162 int i, ret;
281 163
282 /* construct CRTCs: */ 164 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
283 for (i = 0; i < mdp5_kms->hw_cfg->pipe_rgb.count; i++) { 165
166 /* register our interrupt-controller for hdmi/eDP/dsi/etc
167 * to use for irqs routed through mdp:
168 */
169 ret = mdp5_irq_domain_init(mdp5_kms);
170 if (ret)
171 goto fail;
172
173 /* construct CRTCs and their private planes: */
174 for (i = 0; i < hw_cfg->pipe_rgb.count; i++) {
284 struct drm_plane *plane; 175 struct drm_plane *plane;
285 struct drm_crtc *crtc; 176 struct drm_crtc *crtc;
286 177
287 plane = mdp5_plane_init(dev, crtcs[i], true); 178 plane = mdp5_plane_init(dev, crtcs[i], true,
179 hw_cfg->pipe_rgb.base[i]);
288 if (IS_ERR(plane)) { 180 if (IS_ERR(plane)) {
289 ret = PTR_ERR(plane); 181 ret = PTR_ERR(plane);
290 dev_err(dev->dev, "failed to construct plane for %s (%d)\n", 182 dev_err(dev->dev, "failed to construct plane for %s (%d)\n",
@@ -302,6 +194,20 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
302 priv->crtcs[priv->num_crtcs++] = crtc; 194 priv->crtcs[priv->num_crtcs++] = crtc;
303 } 195 }
304 196
197 /* Construct public planes: */
198 for (i = 0; i < hw_cfg->pipe_vig.count; i++) {
199 struct drm_plane *plane;
200
201 plane = mdp5_plane_init(dev, pub_planes[i], false,
202 hw_cfg->pipe_vig.base[i]);
203 if (IS_ERR(plane)) {
204 ret = PTR_ERR(plane);
205 dev_err(dev->dev, "failed to construct %s plane: %d\n",
206 pipe2name(pub_planes[i]), ret);
207 goto fail;
208 }
209 }
210
305 /* Construct encoder for HDMI: */ 211 /* Construct encoder for HDMI: */
306 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI); 212 encoder = mdp5_encoder_init(dev, 3, INTF_HDMI);
307 if (IS_ERR(encoder)) { 213 if (IS_ERR(encoder)) {
@@ -324,11 +230,12 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
324 priv->encoders[priv->num_encoders++] = encoder; 230 priv->encoders[priv->num_encoders++] = encoder;
325 231
326 /* Construct bridge/connector for HDMI: */ 232 /* Construct bridge/connector for HDMI: */
327 mdp5_kms->hdmi = hdmi_init(dev, encoder); 233 if (priv->hdmi) {
328 if (IS_ERR(mdp5_kms->hdmi)) { 234 ret = hdmi_modeset_init(priv->hdmi, dev, encoder);
329 ret = PTR_ERR(mdp5_kms->hdmi); 235 if (ret) {
330 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret); 236 dev_err(dev->dev, "failed to initialize HDMI: %d\n", ret);
331 goto fail; 237 goto fail;
238 }
332 } 239 }
333 240
334 return 0; 241 return 0;
@@ -337,6 +244,21 @@ fail:
337 return ret; 244 return ret;
338} 245}
339 246
247static void read_hw_revision(struct mdp5_kms *mdp5_kms,
248 uint32_t *major, uint32_t *minor)
249{
250 uint32_t version;
251
252 mdp5_enable(mdp5_kms);
253 version = mdp5_read(mdp5_kms, REG_MDP5_MDP_VERSION);
254 mdp5_disable(mdp5_kms);
255
256 *major = FIELD(version, MDP5_MDP_VERSION_MAJOR);
257 *minor = FIELD(version, MDP5_MDP_VERSION_MINOR);
258
259 DBG("MDP5 version v%d.%d", *major, *minor);
260}
261
340static int get_clk(struct platform_device *pdev, struct clk **clkp, 262static int get_clk(struct platform_device *pdev, struct clk **clkp,
341 const char *name) 263 const char *name)
342{ 264{
@@ -353,10 +275,11 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
353struct msm_kms *mdp5_kms_init(struct drm_device *dev) 275struct msm_kms *mdp5_kms_init(struct drm_device *dev)
354{ 276{
355 struct platform_device *pdev = dev->platformdev; 277 struct platform_device *pdev = dev->platformdev;
356 struct mdp5_platform_config *config = mdp5_get_config(pdev); 278 struct mdp5_cfg *config;
357 struct mdp5_kms *mdp5_kms; 279 struct mdp5_kms *mdp5_kms;
358 struct msm_kms *kms = NULL; 280 struct msm_kms *kms = NULL;
359 struct msm_mmu *mmu; 281 struct msm_mmu *mmu;
282 uint32_t major, minor;
360 int i, ret; 283 int i, ret;
361 284
362 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL); 285 mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
@@ -366,12 +289,13 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
366 goto fail; 289 goto fail;
367 } 290 }
368 291
292 spin_lock_init(&mdp5_kms->resource_lock);
293
369 mdp_kms_init(&mdp5_kms->base, &kms_funcs); 294 mdp_kms_init(&mdp5_kms->base, &kms_funcs);
370 295
371 kms = &mdp5_kms->base.base; 296 kms = &mdp5_kms->base.base;
372 297
373 mdp5_kms->dev = dev; 298 mdp5_kms->dev = dev;
374 mdp5_kms->smp_blk_cnt = config->smp_blk_cnt;
375 299
376 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5"); 300 mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
377 if (IS_ERR(mdp5_kms->mmio)) { 301 if (IS_ERR(mdp5_kms->mmio)) {
@@ -416,24 +340,52 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
416 if (ret) 340 if (ret)
417 goto fail; 341 goto fail;
418 342
419 ret = clk_set_rate(mdp5_kms->src_clk, config->max_clk); 343 /* we need to set a default rate before enabling. Set a safe
344 * rate first, then figure out hw revision, and then set a
345 * more optimal rate:
346 */
347 clk_set_rate(mdp5_kms->src_clk, 200000000);
348
349 read_hw_revision(mdp5_kms, &major, &minor);
420 350
421 ret = mdp5_select_hw_cfg(kms); 351 mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
422 if (ret) 352 if (IS_ERR(mdp5_kms->cfg)) {
353 ret = PTR_ERR(mdp5_kms->cfg);
354 mdp5_kms->cfg = NULL;
423 goto fail; 355 goto fail;
356 }
357
358 config = mdp5_cfg_get_config(mdp5_kms->cfg);
359
360 /* TODO: compute core clock rate at runtime */
361 clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);
362
363 mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
364 if (IS_ERR(mdp5_kms->smp)) {
365 ret = PTR_ERR(mdp5_kms->smp);
366 mdp5_kms->smp = NULL;
367 goto fail;
368 }
369
370 mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, config->hw);
371 if (IS_ERR(mdp5_kms->ctlm)) {
372 ret = PTR_ERR(mdp5_kms->ctlm);
373 mdp5_kms->ctlm = NULL;
374 goto fail;
375 }
424 376
425 /* make sure things are off before attaching iommu (bootloader could 377 /* make sure things are off before attaching iommu (bootloader could
426 * have left things on, in which case we'll start getting faults if 378 * have left things on, in which case we'll start getting faults if
427 * we don't disable): 379 * we don't disable):
428 */ 380 */
429 mdp5_enable(mdp5_kms); 381 mdp5_enable(mdp5_kms);
430 for (i = 0; i < mdp5_kms->hw_cfg->intf.count; i++) 382 for (i = 0; i < config->hw->intf.count; i++)
431 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0); 383 mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
432 mdp5_disable(mdp5_kms); 384 mdp5_disable(mdp5_kms);
433 mdelay(16); 385 mdelay(16);
434 386
435 if (config->iommu) { 387 if (config->platform.iommu) {
436 mmu = msm_iommu_new(&pdev->dev, config->iommu); 388 mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
437 if (IS_ERR(mmu)) { 389 if (IS_ERR(mmu)) {
438 ret = PTR_ERR(mmu); 390 ret = PTR_ERR(mmu);
439 dev_err(dev->dev, "failed to init iommu: %d\n", ret); 391 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
@@ -474,18 +426,3 @@ fail:
474 mdp5_destroy(kms); 426 mdp5_destroy(kms);
475 return ERR_PTR(ret); 427 return ERR_PTR(ret);
476} 428}
477
478static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev)
479{
480 static struct mdp5_platform_config config = {};
481#ifdef CONFIG_OF
482 /* TODO */
483#endif
484 config.iommu = iommu_domain_alloc(&platform_bus_type);
485 /* TODO hard-coded in downstream mdss, but should it be? */
486 config.max_clk = 200000000;
487 /* TODO get from DT: */
488 config.smp_blk_cnt = 22;
489
490 return &config;
491}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 5bf340dd0f00..dd69c77c0d64 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -21,25 +21,9 @@
21#include "msm_drv.h" 21#include "msm_drv.h"
22#include "msm_kms.h" 22#include "msm_kms.h"
23#include "mdp/mdp_kms.h" 23#include "mdp/mdp_kms.h"
24/* dynamic offsets used by mdp5.xml.h (initialized in mdp5_kms.c) */ 24#include "mdp5_cfg.h" /* must be included before mdp5.xml.h */
25#define MDP5_MAX_BASES 8
26struct mdp5_sub_block {
27 int count;
28 uint32_t base[MDP5_MAX_BASES];
29};
30struct mdp5_config {
31 char *name;
32 struct mdp5_sub_block ctl;
33 struct mdp5_sub_block pipe_vig;
34 struct mdp5_sub_block pipe_rgb;
35 struct mdp5_sub_block pipe_dma;
36 struct mdp5_sub_block lm;
37 struct mdp5_sub_block dspp;
38 struct mdp5_sub_block ad;
39 struct mdp5_sub_block intf;
40};
41extern const struct mdp5_config *mdp5_cfg;
42#include "mdp5.xml.h" 25#include "mdp5.xml.h"
26#include "mdp5_ctl.h"
43#include "mdp5_smp.h" 27#include "mdp5_smp.h"
44 28
45struct mdp5_kms { 29struct mdp5_kms {
@@ -47,17 +31,14 @@ struct mdp5_kms {
47 31
48 struct drm_device *dev; 32 struct drm_device *dev;
49 33
50 int rev; 34 struct mdp5_cfg_handler *cfg;
51 const struct mdp5_config *hw_cfg;
52 35
53 /* mapper-id used to request GEM buffer mapped for scanout: */ 36 /* mapper-id used to request GEM buffer mapped for scanout: */
54 int id; 37 int id;
55 struct msm_mmu *mmu; 38 struct msm_mmu *mmu;
56 39
57 /* for tracking smp allocation amongst pipes: */ 40 struct mdp5_smp *smp;
58 mdp5_smp_state_t smp_state; 41 struct mdp5_ctl_manager *ctlm;
59 struct mdp5_client_smp_state smp_client_state[CID_MAX];
60 int smp_blk_cnt;
61 42
62 /* io/register spaces: */ 43 /* io/register spaces: */
63 void __iomem *mmio, *vbif; 44 void __iomem *mmio, *vbif;
@@ -71,18 +52,47 @@ struct mdp5_kms {
71 struct clk *lut_clk; 52 struct clk *lut_clk;
72 struct clk *vsync_clk; 53 struct clk *vsync_clk;
73 54
74 struct hdmi *hdmi; 55 /*
56 * lock to protect access to global resources: ie., following register:
57 * - REG_MDP5_DISP_INTF_SEL
58 */
59 spinlock_t resource_lock;
75 60
76 struct mdp_irq error_handler; 61 struct mdp_irq error_handler;
62
63 struct {
64 volatile unsigned long enabled_mask;
65 struct irq_domain *domain;
66 } irqcontroller;
77}; 67};
78#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base) 68#define to_mdp5_kms(x) container_of(x, struct mdp5_kms, base)
79 69
80/* platform config data (ie. from DT, or pdata) */ 70struct mdp5_plane_state {
81struct mdp5_platform_config { 71 struct drm_plane_state base;
82 struct iommu_domain *iommu; 72
83 uint32_t max_clk; 73 /* "virtual" zpos.. we calculate actual mixer-stage at runtime
84 int smp_blk_cnt; 74 * by sorting the attached planes by zpos and then assigning
75 * mixer stage lowest to highest. Private planes get default
76 * zpos of zero, and public planes a unique value that is
77 * greater than zero. This way, things work out if a naive
78 * userspace assigns planes to a crtc without setting zpos.
79 */
80 int zpos;
81
82 /* the actual mixer stage, calculated in crtc->atomic_check()
83 * NOTE: this should move to mdp5_crtc_state, when that exists
84 */
85 enum mdp_mixer_stage_id stage;
86
87 /* some additional transactional status to help us know in the
88 * apply path whether we need to update SMP allocation, and
89 * whether current update is still pending:
90 */
91 bool mode_changed : 1;
92 bool pending : 1;
85}; 93};
94#define to_mdp5_plane_state(x) \
95 container_of(x, struct mdp5_plane_state, base)
86 96
87static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data) 97static inline void mdp5_write(struct mdp5_kms *mdp5_kms, u32 reg, u32 data)
88{ 98{
@@ -107,23 +117,6 @@ static inline const char *pipe2name(enum mdp5_pipe pipe)
107 return names[pipe]; 117 return names[pipe];
108} 118}
109 119
110static inline uint32_t pipe2flush(enum mdp5_pipe pipe)
111{
112 switch (pipe) {
113 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
114 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
115 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
116 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
117 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
118 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
119 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
120 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
121 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
122 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
123 default: return 0;
124 }
125}
126
127static inline int pipe2nclients(enum mdp5_pipe pipe) 120static inline int pipe2nclients(enum mdp5_pipe pipe)
128{ 121{
129 switch (pipe) { 122 switch (pipe) {
@@ -137,34 +130,6 @@ static inline int pipe2nclients(enum mdp5_pipe pipe)
137 } 130 }
138} 131}
139 132
140static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
141{
142 WARN_ON(plane >= pipe2nclients(pipe));
143 switch (pipe) {
144 case SSPP_VIG0: return CID_VIG0_Y + plane;
145 case SSPP_VIG1: return CID_VIG1_Y + plane;
146 case SSPP_VIG2: return CID_VIG2_Y + plane;
147 case SSPP_RGB0: return CID_RGB0;
148 case SSPP_RGB1: return CID_RGB1;
149 case SSPP_RGB2: return CID_RGB2;
150 case SSPP_DMA0: return CID_DMA0_Y + plane;
151 case SSPP_DMA1: return CID_DMA1_Y + plane;
152 case SSPP_VIG3: return CID_VIG3_Y + plane;
153 case SSPP_RGB3: return CID_RGB3;
154 default: return CID_UNUSED;
155 }
156}
157
158static inline uint32_t mixer2flush(int lm)
159{
160 switch (lm) {
161 case 0: return MDP5_CTL_FLUSH_LM0;
162 case 1: return MDP5_CTL_FLUSH_LM1;
163 case 2: return MDP5_CTL_FLUSH_LM2;
164 default: return 0;
165 }
166}
167
168static inline uint32_t intf2err(int intf) 133static inline uint32_t intf2err(int intf)
169{ 134{
170 switch (intf) { 135 switch (intf) {
@@ -197,6 +162,8 @@ void mdp5_irq_uninstall(struct msm_kms *kms);
197irqreturn_t mdp5_irq(struct msm_kms *kms); 162irqreturn_t mdp5_irq(struct msm_kms *kms);
198int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 163int mdp5_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
199void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc); 164void mdp5_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
165int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
166void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
200 167
201static inline 168static inline
202uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats, 169uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
@@ -210,26 +177,18 @@ uint32_t mdp5_get_formats(enum mdp5_pipe pipe, uint32_t *pixel_formats,
210 177
211void mdp5_plane_install_properties(struct drm_plane *plane, 178void mdp5_plane_install_properties(struct drm_plane *plane,
212 struct drm_mode_object *obj); 179 struct drm_mode_object *obj);
213void mdp5_plane_set_scanout(struct drm_plane *plane, 180uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
214 struct drm_framebuffer *fb);
215int mdp5_plane_mode_set(struct drm_plane *plane,
216 struct drm_crtc *crtc, struct drm_framebuffer *fb,
217 int crtc_x, int crtc_y,
218 unsigned int crtc_w, unsigned int crtc_h,
219 uint32_t src_x, uint32_t src_y,
220 uint32_t src_w, uint32_t src_h);
221void mdp5_plane_complete_flip(struct drm_plane *plane); 181void mdp5_plane_complete_flip(struct drm_plane *plane);
222enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 182enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
223struct drm_plane *mdp5_plane_init(struct drm_device *dev, 183struct drm_plane *mdp5_plane_init(struct drm_device *dev,
224 enum mdp5_pipe pipe, bool private_plane); 184 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
225 185
226uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc); 186uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc);
227 187
188int mdp5_crtc_get_lm(struct drm_crtc *crtc);
228void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file); 189void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
229void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf, 190void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
230 enum mdp5_intf intf_id); 191 enum mdp5_intf intf_id);
231void mdp5_crtc_attach(struct drm_crtc *crtc, struct drm_plane *plane);
232void mdp5_crtc_detach(struct drm_crtc *crtc, struct drm_plane *plane);
233struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, 192struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
234 struct drm_plane *plane, int id); 193 struct drm_plane *plane, int id);
235 194
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index f3daec4412ad..26e5fdea6594 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -17,6 +18,7 @@
17 18
18#include "mdp5_kms.h" 19#include "mdp5_kms.h"
19 20
21#define MAX_PLANE 4
20 22
21struct mdp5_plane { 23struct mdp5_plane {
22 struct drm_plane base; 24 struct drm_plane base;
@@ -24,6 +26,11 @@ struct mdp5_plane {
24 26
25 enum mdp5_pipe pipe; 27 enum mdp5_pipe pipe;
26 28
29 spinlock_t pipe_lock; /* protect REG_MDP5_PIPE_* registers */
30 uint32_t reg_offset;
31
32 uint32_t flush_mask; /* used to commit pipe registers */
33
27 uint32_t nformats; 34 uint32_t nformats;
28 uint32_t formats[32]; 35 uint32_t formats[32];
29 36
@@ -31,31 +38,24 @@ struct mdp5_plane {
31}; 38};
32#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 39#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
33 40
41static int mdp5_plane_mode_set(struct drm_plane *plane,
42 struct drm_crtc *crtc, struct drm_framebuffer *fb,
43 int crtc_x, int crtc_y,
44 unsigned int crtc_w, unsigned int crtc_h,
45 uint32_t src_x, uint32_t src_y,
46 uint32_t src_w, uint32_t src_h);
47static void set_scanout_locked(struct drm_plane *plane,
48 struct drm_framebuffer *fb);
49
34static struct mdp5_kms *get_kms(struct drm_plane *plane) 50static struct mdp5_kms *get_kms(struct drm_plane *plane)
35{ 51{
36 struct msm_drm_private *priv = plane->dev->dev_private; 52 struct msm_drm_private *priv = plane->dev->dev_private;
37 return to_mdp5_kms(to_mdp_kms(priv->kms)); 53 return to_mdp5_kms(to_mdp_kms(priv->kms));
38} 54}
39 55
40static int mdp5_plane_update(struct drm_plane *plane, 56static bool plane_enabled(struct drm_plane_state *state)
41 struct drm_crtc *crtc, struct drm_framebuffer *fb,
42 int crtc_x, int crtc_y,
43 unsigned int crtc_w, unsigned int crtc_h,
44 uint32_t src_x, uint32_t src_y,
45 uint32_t src_w, uint32_t src_h)
46{ 57{
47 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 58 return state->fb && state->crtc;
48
49 mdp5_plane->enabled = true;
50
51 if (plane->fb)
52 drm_framebuffer_unreference(plane->fb);
53
54 drm_framebuffer_reference(fb);
55
56 return mdp5_plane_mode_set(plane, crtc, fb,
57 crtc_x, crtc_y, crtc_w, crtc_h,
58 src_x, src_y, src_w, src_h);
59} 59}
60 60
61static int mdp5_plane_disable(struct drm_plane *plane) 61static int mdp5_plane_disable(struct drm_plane *plane)
@@ -63,21 +63,13 @@ static int mdp5_plane_disable(struct drm_plane *plane)
63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 63 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
64 struct mdp5_kms *mdp5_kms = get_kms(plane); 64 struct mdp5_kms *mdp5_kms = get_kms(plane);
65 enum mdp5_pipe pipe = mdp5_plane->pipe; 65 enum mdp5_pipe pipe = mdp5_plane->pipe;
66 int i;
67 66
68 DBG("%s: disable", mdp5_plane->name); 67 DBG("%s: disable", mdp5_plane->name);
69 68
70 /* update our SMP request to zero (release all our blks): */ 69 if (mdp5_kms) {
71 for (i = 0; i < pipe2nclients(pipe); i++) 70 /* Release the memory we requested earlier from the SMP: */
72 mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), 0); 71 mdp5_smp_release(mdp5_kms->smp, pipe);
73 72 }
74 /* TODO detaching now will cause us not to get the last
75 * vblank and mdp5_smp_commit().. so other planes will
76 * still see smp blocks previously allocated to us as
77 * in-use..
78 */
79 if (plane->crtc)
80 mdp5_crtc_detach(plane->crtc, plane);
81 73
82 return 0; 74 return 0;
83} 75}
@@ -85,11 +77,8 @@ static int mdp5_plane_disable(struct drm_plane *plane)
85static void mdp5_plane_destroy(struct drm_plane *plane) 77static void mdp5_plane_destroy(struct drm_plane *plane)
86{ 78{
87 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 79 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
88 struct msm_drm_private *priv = plane->dev->dev_private;
89
90 if (priv->kms)
91 mdp5_plane_disable(plane);
92 80
81 drm_plane_helper_disable(plane);
93 drm_plane_cleanup(plane); 82 drm_plane_cleanup(plane);
94 83
95 kfree(mdp5_plane); 84 kfree(mdp5_plane);
@@ -109,109 +98,186 @@ int mdp5_plane_set_property(struct drm_plane *plane,
109 return -EINVAL; 98 return -EINVAL;
110} 99}
111 100
101static void mdp5_plane_reset(struct drm_plane *plane)
102{
103 struct mdp5_plane_state *mdp5_state;
104
105 if (plane->state && plane->state->fb)
106 drm_framebuffer_unreference(plane->state->fb);
107
108 kfree(to_mdp5_plane_state(plane->state));
109 mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
110
111 if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
112 mdp5_state->zpos = 0;
113 } else {
114 mdp5_state->zpos = 1 + drm_plane_index(plane);
115 }
116
117 plane->state = &mdp5_state->base;
118}
119
120static struct drm_plane_state *
121mdp5_plane_duplicate_state(struct drm_plane *plane)
122{
123 struct mdp5_plane_state *mdp5_state;
124
125 if (WARN_ON(!plane->state))
126 return NULL;
127
128 mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
129 sizeof(*mdp5_state), GFP_KERNEL);
130
131 if (mdp5_state && mdp5_state->base.fb)
132 drm_framebuffer_reference(mdp5_state->base.fb);
133
134 mdp5_state->mode_changed = false;
135 mdp5_state->pending = false;
136
137 return &mdp5_state->base;
138}
139
140static void mdp5_plane_destroy_state(struct drm_plane *plane,
141 struct drm_plane_state *state)
142{
143 if (state->fb)
144 drm_framebuffer_unreference(state->fb);
145
146 kfree(to_mdp5_plane_state(state));
147}
148
112static const struct drm_plane_funcs mdp5_plane_funcs = { 149static const struct drm_plane_funcs mdp5_plane_funcs = {
113 .update_plane = mdp5_plane_update, 150 .update_plane = drm_atomic_helper_update_plane,
114 .disable_plane = mdp5_plane_disable, 151 .disable_plane = drm_atomic_helper_disable_plane,
115 .destroy = mdp5_plane_destroy, 152 .destroy = mdp5_plane_destroy,
116 .set_property = mdp5_plane_set_property, 153 .set_property = mdp5_plane_set_property,
154 .reset = mdp5_plane_reset,
155 .atomic_duplicate_state = mdp5_plane_duplicate_state,
156 .atomic_destroy_state = mdp5_plane_destroy_state,
117}; 157};
118 158
119void mdp5_plane_set_scanout(struct drm_plane *plane, 159static int mdp5_plane_prepare_fb(struct drm_plane *plane,
120 struct drm_framebuffer *fb) 160 struct drm_framebuffer *fb)
121{ 161{
122 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 162 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
123 struct mdp5_kms *mdp5_kms = get_kms(plane); 163 struct mdp5_kms *mdp5_kms = get_kms(plane);
124 enum mdp5_pipe pipe = mdp5_plane->pipe;
125 uint32_t nplanes = drm_format_num_planes(fb->pixel_format);
126 uint32_t iova[4];
127 int i;
128
129 for (i = 0; i < nplanes; i++) {
130 struct drm_gem_object *bo = msm_framebuffer_bo(fb, i);
131 msm_gem_get_iova(bo, mdp5_kms->id, &iova[i]);
132 }
133 for (; i < 4; i++)
134 iova[i] = 0;
135 164
136 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe), 165 DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
137 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) | 166 return msm_framebuffer_prepare(fb, mdp5_kms->id);
138 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
139
140 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
141 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
142 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
143
144 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe), iova[0]);
145 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe), iova[1]);
146 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe), iova[2]);
147 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe), iova[3]);
148
149 plane->fb = fb;
150} 167}
151 168
152/* NOTE: looks like if horizontal decimation is used (if we supported that) 169static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
153 * then the width used to calculate SMP block requirements is the post- 170 struct drm_framebuffer *fb)
154 * decimated width. Ie. SMP buffering sits downstream of decimation (which
155 * presumably happens during the dma from scanout buffer).
156 */
157static int request_smp_blocks(struct drm_plane *plane, uint32_t format,
158 uint32_t nplanes, uint32_t width)
159{ 171{
160 struct drm_device *dev = plane->dev;
161 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 172 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
162 struct mdp5_kms *mdp5_kms = get_kms(plane); 173 struct mdp5_kms *mdp5_kms = get_kms(plane);
163 enum mdp5_pipe pipe = mdp5_plane->pipe;
164 int i, hsub, nlines, nblks, ret;
165 174
166 hsub = drm_format_horz_chroma_subsampling(format); 175 DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
176 msm_framebuffer_cleanup(fb, mdp5_kms->id);
177}
167 178
168 /* different if BWC (compressed framebuffer?) enabled: */ 179static int mdp5_plane_atomic_check(struct drm_plane *plane,
169 nlines = 2; 180 struct drm_plane_state *state)
181{
182 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
183 struct drm_plane_state *old_state = plane->state;
170 184
171 for (i = 0, nblks = 0; i < nplanes; i++) { 185 DBG("%s: check (%d -> %d)", mdp5_plane->name,
172 int n, fetch_stride, cpp; 186 plane_enabled(old_state), plane_enabled(state));
173 187
174 cpp = drm_format_plane_cpp(format, i); 188 if (plane_enabled(state) && plane_enabled(old_state)) {
175 fetch_stride = width * cpp / (i ? hsub : 1); 189 /* we cannot change SMP block configuration during scanout: */
190 bool full_modeset = false;
191 if (state->fb->pixel_format != old_state->fb->pixel_format) {
192 DBG("%s: pixel_format change!", mdp5_plane->name);
193 full_modeset = true;
194 }
195 if (state->src_w != old_state->src_w) {
196 DBG("%s: src_w change!", mdp5_plane->name);
197 full_modeset = true;
198 }
199 if (to_mdp5_plane_state(old_state)->pending) {
200 DBG("%s: still pending!", mdp5_plane->name);
201 full_modeset = true;
202 }
203 if (full_modeset) {
204 struct drm_crtc_state *crtc_state =
205 drm_atomic_get_crtc_state(state->state, state->crtc);
206 crtc_state->mode_changed = true;
207 to_mdp5_plane_state(state)->mode_changed = true;
208 }
209 } else {
210 to_mdp5_plane_state(state)->mode_changed = true;
211 }
176 212
177 n = DIV_ROUND_UP(fetch_stride * nlines, SMP_BLK_SIZE); 213 return 0;
214}
178 215
179 /* for hw rev v1.00 */ 216static void mdp5_plane_atomic_update(struct drm_plane *plane,
180 if (mdp5_kms->rev == 0) 217 struct drm_plane_state *old_state)
181 n = roundup_pow_of_two(n); 218{
219 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
220 struct drm_plane_state *state = plane->state;
182 221
183 DBG("%s[%d]: request %d SMP blocks", mdp5_plane->name, i, n); 222 DBG("%s: update", mdp5_plane->name);
184 ret = mdp5_smp_request(mdp5_kms, pipe2client(pipe, i), n);
185 if (ret) {
186 dev_err(dev->dev, "Could not allocate %d SMP blocks: %d\n",
187 n, ret);
188 return ret;
189 }
190 223
191 nblks += n; 224 if (!plane_enabled(state)) {
225 to_mdp5_plane_state(state)->pending = true;
226 mdp5_plane_disable(plane);
227 } else if (to_mdp5_plane_state(state)->mode_changed) {
228 int ret;
229 to_mdp5_plane_state(state)->pending = true;
230 ret = mdp5_plane_mode_set(plane,
231 state->crtc, state->fb,
232 state->crtc_x, state->crtc_y,
233 state->crtc_w, state->crtc_h,
234 state->src_x, state->src_y,
235 state->src_w, state->src_h);
236 /* atomic_check should have ensured that this doesn't fail */
237 WARN_ON(ret < 0);
238 } else {
239 unsigned long flags;
240 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
241 set_scanout_locked(plane, state->fb);
242 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
192 } 243 }
193
194 /* in success case, return total # of blocks allocated: */
195 return nblks;
196} 244}
197 245
198static void set_fifo_thresholds(struct drm_plane *plane, int nblks) 246static const struct drm_plane_helper_funcs mdp5_plane_helper_funcs = {
247 .prepare_fb = mdp5_plane_prepare_fb,
248 .cleanup_fb = mdp5_plane_cleanup_fb,
249 .atomic_check = mdp5_plane_atomic_check,
250 .atomic_update = mdp5_plane_atomic_update,
251};
252
253static void set_scanout_locked(struct drm_plane *plane,
254 struct drm_framebuffer *fb)
199{ 255{
200 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 256 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
201 struct mdp5_kms *mdp5_kms = get_kms(plane); 257 struct mdp5_kms *mdp5_kms = get_kms(plane);
202 enum mdp5_pipe pipe = mdp5_plane->pipe; 258 enum mdp5_pipe pipe = mdp5_plane->pipe;
203 uint32_t val;
204 259
205 /* 1/4 of SMP pool that is being fetched */ 260 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_A(pipe),
206 val = (nblks * SMP_ENTRIES_PER_BLK) / 4; 261 MDP5_PIPE_SRC_STRIDE_A_P0(fb->pitches[0]) |
262 MDP5_PIPE_SRC_STRIDE_A_P1(fb->pitches[1]));
207 263
208 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1); 264 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_STRIDE_B(pipe),
209 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2); 265 MDP5_PIPE_SRC_STRIDE_B_P2(fb->pitches[2]) |
210 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3); 266 MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
267
268 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
269 msm_framebuffer_iova(fb, mdp5_kms->id, 0));
270 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
271 msm_framebuffer_iova(fb, mdp5_kms->id, 1));
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
273 msm_framebuffer_iova(fb, mdp5_kms->id, 2));
274 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
275 msm_framebuffer_iova(fb, mdp5_kms->id, 4));
211 276
277 plane->fb = fb;
212} 278}
213 279
214int mdp5_plane_mode_set(struct drm_plane *plane, 280static int mdp5_plane_mode_set(struct drm_plane *plane,
215 struct drm_crtc *crtc, struct drm_framebuffer *fb, 281 struct drm_crtc *crtc, struct drm_framebuffer *fb,
216 int crtc_x, int crtc_y, 282 int crtc_x, int crtc_y,
217 unsigned int crtc_w, unsigned int crtc_h, 283 unsigned int crtc_w, unsigned int crtc_h,
@@ -225,7 +291,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
225 uint32_t nplanes, config = 0; 291 uint32_t nplanes, config = 0;
226 uint32_t phasex_step = 0, phasey_step = 0; 292 uint32_t phasex_step = 0, phasey_step = 0;
227 uint32_t hdecm = 0, vdecm = 0; 293 uint32_t hdecm = 0, vdecm = 0;
228 int i, nblks; 294 unsigned long flags;
295 int ret;
229 296
230 nplanes = drm_format_num_planes(fb->pixel_format); 297 nplanes = drm_format_num_planes(fb->pixel_format);
231 298
@@ -243,12 +310,11 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
243 fb->base.id, src_x, src_y, src_w, src_h, 310 fb->base.id, src_x, src_y, src_w, src_h,
244 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h); 311 crtc->base.id, crtc_x, crtc_y, crtc_w, crtc_h);
245 312
246 /* 313 /* Request some memory from the SMP: */
247 * Calculate and request required # of smp blocks: 314 ret = mdp5_smp_request(mdp5_kms->smp,
248 */ 315 mdp5_plane->pipe, fb->pixel_format, src_w);
249 nblks = request_smp_blocks(plane, fb->pixel_format, nplanes, src_w); 316 if (ret)
250 if (nblks < 0) 317 return ret;
251 return nblks;
252 318
253 /* 319 /*
254 * Currently we update the hw for allocations/requests immediately, 320 * Currently we update the hw for allocations/requests immediately,
@@ -256,8 +322,7 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
256 * would move into atomic->check_plane_state(), while updating the 322 * would move into atomic->check_plane_state(), while updating the
257 * hw would remain here: 323 * hw would remain here:
258 */ 324 */
259 for (i = 0; i < pipe2nclients(pipe); i++) 325 mdp5_smp_configure(mdp5_kms->smp, pipe);
260 mdp5_smp_configure(mdp5_kms, pipe2client(pipe, i));
261 326
262 if (src_w != crtc_w) { 327 if (src_w != crtc_w) {
263 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN; 328 config |= MDP5_PIPE_SCALE_CONFIG_SCALEX_EN;
@@ -269,6 +334,8 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
269 /* TODO calc phasey_step, vdecm */ 334 /* TODO calc phasey_step, vdecm */
270 } 335 }
271 336
337 spin_lock_irqsave(&mdp5_plane->pipe_lock, flags);
338
272 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe), 339 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_IMG_SIZE(pipe),
273 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) | 340 MDP5_PIPE_SRC_IMG_SIZE_WIDTH(src_w) |
274 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h)); 341 MDP5_PIPE_SRC_IMG_SIZE_HEIGHT(src_h));
@@ -289,8 +356,6 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
289 MDP5_PIPE_OUT_XY_X(crtc_x) | 356 MDP5_PIPE_OUT_XY_X(crtc_x) |
290 MDP5_PIPE_OUT_XY_Y(crtc_y)); 357 MDP5_PIPE_OUT_XY_Y(crtc_y));
291 358
292 mdp5_plane_set_scanout(plane, fb);
293
294 format = to_mdp_format(msm_framebuffer_format(fb)); 359 format = to_mdp_format(msm_framebuffer_format(fb));
295 360
296 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe), 361 mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC_FORMAT(pipe),
@@ -330,22 +395,24 @@ int mdp5_plane_mode_set(struct drm_plane *plane,
330 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) | 395 MDP5_PIPE_SCALE_CONFIG_SCALEX_MAX_FILTER(SCALE_FILTER_NEAREST) |
331 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST)); 396 MDP5_PIPE_SCALE_CONFIG_SCALEY_MAX_FILTER(SCALE_FILTER_NEAREST));
332 397
333 set_fifo_thresholds(plane, nblks); 398 set_scanout_locked(plane, fb);
334 399
335 /* TODO detach from old crtc (if we had more than one) */ 400 spin_unlock_irqrestore(&mdp5_plane->pipe_lock, flags);
336 mdp5_crtc_attach(crtc, plane);
337 401
338 return 0; 402 return ret;
339} 403}
340 404
341void mdp5_plane_complete_flip(struct drm_plane *plane) 405void mdp5_plane_complete_flip(struct drm_plane *plane)
342{ 406{
343 struct mdp5_kms *mdp5_kms = get_kms(plane); 407 struct mdp5_kms *mdp5_kms = get_kms(plane);
344 enum mdp5_pipe pipe = to_mdp5_plane(plane)->pipe; 408 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
345 int i; 409 enum mdp5_pipe pipe = mdp5_plane->pipe;
410
411 DBG("%s: complete flip", mdp5_plane->name);
346 412
347 for (i = 0; i < pipe2nclients(pipe); i++) 413 mdp5_smp_commit(mdp5_kms->smp, pipe);
348 mdp5_smp_commit(mdp5_kms, pipe2client(pipe, i)); 414
415 to_mdp5_plane_state(plane->state)->pending = false;
349} 416}
350 417
351enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane) 418enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
@@ -354,9 +421,16 @@ enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane)
354 return mdp5_plane->pipe; 421 return mdp5_plane->pipe;
355} 422}
356 423
424uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
425{
426 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
427
428 return mdp5_plane->flush_mask;
429}
430
357/* initialize plane */ 431/* initialize plane */
358struct drm_plane *mdp5_plane_init(struct drm_device *dev, 432struct drm_plane *mdp5_plane_init(struct drm_device *dev,
359 enum mdp5_pipe pipe, bool private_plane) 433 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
360{ 434{
361 struct drm_plane *plane = NULL; 435 struct drm_plane *plane = NULL;
362 struct mdp5_plane *mdp5_plane; 436 struct mdp5_plane *mdp5_plane;
@@ -377,10 +451,18 @@ struct drm_plane *mdp5_plane_init(struct drm_device *dev,
377 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats, 451 mdp5_plane->nformats = mdp5_get_formats(pipe, mdp5_plane->formats,
378 ARRAY_SIZE(mdp5_plane->formats)); 452 ARRAY_SIZE(mdp5_plane->formats));
379 453
454 mdp5_plane->flush_mask = mdp_ctl_flush_mask_pipe(pipe);
455 mdp5_plane->reg_offset = reg_offset;
456 spin_lock_init(&mdp5_plane->pipe_lock);
457
380 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY; 458 type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
381 drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs, 459 ret = drm_universal_plane_init(dev, plane, 0xff, &mdp5_plane_funcs,
382 mdp5_plane->formats, mdp5_plane->nformats, 460 mdp5_plane->formats, mdp5_plane->nformats,
383 type); 461 type);
462 if (ret)
463 goto fail;
464
465 drm_plane_helper_add(plane, &mdp5_plane_helper_funcs);
384 466
385 mdp5_plane_install_properties(plane, &plane->base); 467 mdp5_plane_install_properties(plane, &plane->base);
386 468
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 2d0236b963a6..bf551885e019 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -29,8 +30,11 @@
29 * Based on the size of the attached scanout buffer, a certain # of 30 * Based on the size of the attached scanout buffer, a certain # of
30 * blocks must be allocated to that client out of the shared pool. 31 * blocks must be allocated to that client out of the shared pool.
31 * 32 *
32 * For each block, it can be either free, or pending/in-use by a 33 * In some hw, some blocks are statically allocated for certain pipes
33 * client. The updates happen in three steps: 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 *
36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps:
34 * 38 *
35 * 1) mdp5_smp_request(): 39 * 1) mdp5_smp_request():
36 * When plane scanout is setup, calculate required number of 40 * When plane scanout is setup, calculate required number of
@@ -61,21 +65,68 @@
61 * inuse and pending state of all clients.. 65 * inuse and pending state of all clients..
62 */ 66 */
63 67
64static DEFINE_SPINLOCK(smp_lock); 68struct mdp5_smp {
69 struct drm_device *dev;
70
71 int blk_cnt;
72 int blk_size;
73
74 spinlock_t state_lock;
75 mdp5_smp_state_t state; /* to track smp allocation amongst pipes: */
76
77 struct mdp5_client_smp_state client_state[CID_MAX];
78};
65 79
80static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{
83 struct msm_drm_private *priv = smp->dev->dev_private;
84
85 return to_mdp5_kms(to_mdp_kms(priv->kms));
86}
87
88static inline enum mdp5_client_id pipe2client(enum mdp5_pipe pipe, int plane)
89{
90 WARN_ON(plane >= pipe2nclients(pipe));
91 switch (pipe) {
92 case SSPP_VIG0: return CID_VIG0_Y + plane;
93 case SSPP_VIG1: return CID_VIG1_Y + plane;
94 case SSPP_VIG2: return CID_VIG2_Y + plane;
95 case SSPP_RGB0: return CID_RGB0;
96 case SSPP_RGB1: return CID_RGB1;
97 case SSPP_RGB2: return CID_RGB2;
98 case SSPP_DMA0: return CID_DMA0_Y + plane;
99 case SSPP_DMA1: return CID_DMA1_Y + plane;
100 case SSPP_VIG3: return CID_VIG3_Y + plane;
101 case SSPP_RGB3: return CID_RGB3;
102 default: return CID_UNUSED;
103 }
104}
66 105
67/* step #1: update # of blocks pending for the client: */ 106/* step #1: update # of blocks pending for the client: */
68int mdp5_smp_request(struct mdp5_kms *mdp5_kms, 107static int smp_request_block(struct mdp5_smp *smp,
69 enum mdp5_client_id cid, int nblks) 108 enum mdp5_client_id cid, int nblks)
70{ 109{
71 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 110 struct mdp5_kms *mdp5_kms = get_kms(smp);
72 int i, ret, avail, cur_nblks, cnt = mdp5_kms->smp_blk_cnt; 111 const struct mdp5_cfg_hw *hw_cfg;
112 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
113 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt;
114 int reserved;
73 unsigned long flags; 115 unsigned long flags;
74 116
75 spin_lock_irqsave(&smp_lock, flags); 117 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
118 reserved = hw_cfg->smp.reserved[cid];
119
120 spin_lock_irqsave(&smp->state_lock, flags);
76 121
77 avail = cnt - bitmap_weight(mdp5_kms->smp_state, cnt); 122 nblks -= reserved;
123 if (reserved)
124 DBG("%d MMBs allocated (%d reserved)", nblks, reserved);
125
126 avail = cnt - bitmap_weight(smp->state, cnt);
78 if (nblks > avail) { 127 if (nblks > avail) {
128 dev_err(mdp5_kms->dev->dev, "out of blks (req=%d > avail=%d)\n",
129 nblks, avail);
79 ret = -ENOSPC; 130 ret = -ENOSPC;
80 goto fail; 131 goto fail;
81 } 132 }
@@ -84,9 +135,9 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
84 if (nblks > cur_nblks) { 135 if (nblks > cur_nblks) {
85 /* grow the existing pending reservation: */ 136 /* grow the existing pending reservation: */
86 for (i = cur_nblks; i < nblks; i++) { 137 for (i = cur_nblks; i < nblks; i++) {
87 int blk = find_first_zero_bit(mdp5_kms->smp_state, cnt); 138 int blk = find_first_zero_bit(smp->state, cnt);
88 set_bit(blk, ps->pending); 139 set_bit(blk, ps->pending);
89 set_bit(blk, mdp5_kms->smp_state); 140 set_bit(blk, smp->state);
90 } 141 }
91 } else { 142 } else {
92 /* shrink the existing pending reservation: */ 143 /* shrink the existing pending reservation: */
@@ -98,15 +149,88 @@ int mdp5_smp_request(struct mdp5_kms *mdp5_kms,
98 } 149 }
99 150
100fail: 151fail:
101 spin_unlock_irqrestore(&smp_lock, flags); 152 spin_unlock_irqrestore(&smp->state_lock, flags);
153 return 0;
154}
155
156static void set_fifo_thresholds(struct mdp5_smp *smp,
157 enum mdp5_pipe pipe, int nblks)
158{
159 struct mdp5_kms *mdp5_kms = get_kms(smp);
160 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE);
161 u32 val;
162
163 /* 1/4 of SMP pool that is being fetched */
164 val = (nblks * smp_entries_per_blk) / 4;
165
166 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_0(pipe), val * 1);
167 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_1(pipe), val * 2);
168 mdp5_write(mdp5_kms, REG_MDP5_PIPE_REQPRIO_FIFO_WM_2(pipe), val * 3);
169}
170
171/*
172 * NOTE: looks like if horizontal decimation is used (if we supported that)
173 * then the width used to calculate SMP block requirements is the post-
174 * decimated width. Ie. SMP buffering sits downstream of decimation (which
175 * presumably happens during the dma from scanout buffer).
176 */
177int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width)
178{
179 struct mdp5_kms *mdp5_kms = get_kms(smp);
180 struct drm_device *dev = mdp5_kms->dev;
181 int rev = mdp5_cfg_get_hw_rev(mdp5_kms->cfg);
182 int i, hsub, nplanes, nlines, nblks, ret;
183
184 nplanes = drm_format_num_planes(fmt);
185 hsub = drm_format_horz_chroma_subsampling(fmt);
186
187 /* different if BWC (compressed framebuffer?) enabled: */
188 nlines = 2;
189
190 for (i = 0, nblks = 0; i < nplanes; i++) {
191 int n, fetch_stride, cpp;
192
193 cpp = drm_format_plane_cpp(fmt, i);
194 fetch_stride = width * cpp / (i ? hsub : 1);
195
196 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size);
197
198 /* for hw rev v1.00 */
199 if (rev == 0)
200 n = roundup_pow_of_two(n);
201
202 DBG("%s[%d]: request %d SMP blocks", pipe2name(pipe), i, n);
203 ret = smp_request_block(smp, pipe2client(pipe, i), n);
204 if (ret) {
205 dev_err(dev->dev, "Cannot allocate %d SMP blocks: %d\n",
206 n, ret);
207 return ret;
208 }
209
210 nblks += n;
211 }
212
213 set_fifo_thresholds(smp, pipe, nblks);
214
102 return 0; 215 return 0;
103} 216}
104 217
105static void update_smp_state(struct mdp5_kms *mdp5_kms, 218/* Release SMP blocks for all clients of the pipe */
219void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
220{
221 int i, nblks;
222
223 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
224 smp_request_block(smp, pipe2client(pipe, i), 0);
225 set_fifo_thresholds(smp, pipe, 0);
226}
227
228static void update_smp_state(struct mdp5_smp *smp,
106 enum mdp5_client_id cid, mdp5_smp_state_t *assigned) 229 enum mdp5_client_id cid, mdp5_smp_state_t *assigned)
107{ 230{
108 int cnt = mdp5_kms->smp_blk_cnt; 231 struct mdp5_kms *mdp5_kms = get_kms(smp);
109 uint32_t blk, val; 232 int cnt = smp->blk_cnt;
233 u32 blk, val;
110 234
111 for_each_set_bit(blk, *assigned, cnt) { 235 for_each_set_bit(blk, *assigned, cnt) {
112 int idx = blk / 3; 236 int idx = blk / 3;
@@ -135,39 +259,80 @@ static void update_smp_state(struct mdp5_kms *mdp5_kms,
135} 259}
136 260
137/* step #2: configure hw for union(pending, inuse): */ 261/* step #2: configure hw for union(pending, inuse): */
138void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 262void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
139{ 263{
140 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 264 int cnt = smp->blk_cnt;
141 int cnt = mdp5_kms->smp_blk_cnt;
142 mdp5_smp_state_t assigned; 265 mdp5_smp_state_t assigned;
266 int i;
143 267
144 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 268 for (i = 0; i < pipe2nclients(pipe); i++) {
145 update_smp_state(mdp5_kms, cid, &assigned); 269 enum mdp5_client_id cid = pipe2client(pipe, i);
270 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
271
272 bitmap_or(assigned, ps->inuse, ps->pending, cnt);
273 update_smp_state(smp, cid, &assigned);
274 }
146} 275}
147 276
148/* step #3: after vblank, copy pending -> inuse: */ 277/* step #3: after vblank, copy pending -> inuse: */
149void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid) 278void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
150{ 279{
151 struct mdp5_client_smp_state *ps = &mdp5_kms->smp_client_state[cid]; 280 int cnt = smp->blk_cnt;
152 int cnt = mdp5_kms->smp_blk_cnt;
153 mdp5_smp_state_t released; 281 mdp5_smp_state_t released;
282 int i;
283
284 for (i = 0; i < pipe2nclients(pipe); i++) {
285 enum mdp5_client_id cid = pipe2client(pipe, i);
286 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
287
288 /*
289 * Figure out if there are any blocks we where previously
290 * using, which can be released and made available to other
291 * clients:
292 */
293 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
294 unsigned long flags;
295
296 spin_lock_irqsave(&smp->state_lock, flags);
297 /* clear released blocks: */
298 bitmap_andnot(smp->state, smp->state, released, cnt);
299 spin_unlock_irqrestore(&smp->state_lock, flags);
154 300
155 /* 301 update_smp_state(smp, CID_UNUSED, &released);
156 * Figure out if there are any blocks we where previously 302 }
157 * using, which can be released and made available to other 303
158 * clients: 304 bitmap_copy(ps->inuse, ps->pending, cnt);
159 */
160 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) {
161 unsigned long flags;
162
163 spin_lock_irqsave(&smp_lock, flags);
164 /* clear released blocks: */
165 bitmap_andnot(mdp5_kms->smp_state, mdp5_kms->smp_state,
166 released, cnt);
167 spin_unlock_irqrestore(&smp_lock, flags);
168
169 update_smp_state(mdp5_kms, CID_UNUSED, &released);
170 } 305 }
306}
307
308void mdp5_smp_destroy(struct mdp5_smp *smp)
309{
310 kfree(smp);
311}
312
313struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg)
314{
315 struct mdp5_smp *smp = NULL;
316 int ret;
317
318 smp = kzalloc(sizeof(*smp), GFP_KERNEL);
319 if (unlikely(!smp)) {
320 ret = -ENOMEM;
321 goto fail;
322 }
323
324 smp->dev = dev;
325 smp->blk_cnt = cfg->mmb_count;
326 smp->blk_size = cfg->mmb_size;
327
328 /* statically tied MMBs cannot be re-allocated: */
329 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt);
330 spin_lock_init(&smp->state_lock);
331
332 return smp;
333fail:
334 if (smp)
335 mdp5_smp_destroy(smp);
171 336
172 bitmap_copy(ps->inuse, ps->pending, cnt); 337 return ERR_PTR(ret);
173} 338}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index 0ab739e1a1dd..e47179f63585 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -1,4 +1,5 @@
1/* 1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 * Copyright (C) 2013 Red Hat 3 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com> 4 * Author: Rob Clark <robdclark@gmail.com>
4 * 5 *
@@ -20,22 +21,26 @@
20 21
21#include "msm_drv.h" 22#include "msm_drv.h"
22 23
23#define MAX_SMP_BLOCKS 22
24#define SMP_BLK_SIZE 4096
25#define SMP_ENTRIES_PER_BLK (SMP_BLK_SIZE / 16)
26
27typedef DECLARE_BITMAP(mdp5_smp_state_t, MAX_SMP_BLOCKS);
28
29struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
30 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
31 mdp5_smp_state_t pending; 26 mdp5_smp_state_t pending;
32}; 27};
33 28
34struct mdp5_kms; 29struct mdp5_kms;
30struct mdp5_smp;
31
32/*
33 * SMP module prototypes:
34 * mdp5_smp_init() returns a SMP @handler,
35 * which is then used to call the other mdp5_smp_*(handler, ...) functions.
36 */
35 37
36int mdp5_smp_request(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid, int nblks); 38struct mdp5_smp *mdp5_smp_init(struct drm_device *dev, const struct mdp5_smp_block *cfg);
37void mdp5_smp_configure(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid); 39void mdp5_smp_destroy(struct mdp5_smp *smp);
38void mdp5_smp_commit(struct mdp5_kms *mdp5_kms, enum mdp5_client_id cid);
39 40
41int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width);
42void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe);
43void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe);
44void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe);
40 45
41#endif /* __MDP5_SMP_H__ */ 46#endif /* __MDP5_SMP_H__ */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
new file mode 100644
index 000000000000..f0de412e13dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -0,0 +1,163 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "msm_drv.h"
19#include "msm_kms.h"
20#include "msm_gem.h"
21
22struct msm_commit {
23 struct drm_atomic_state *state;
24 uint32_t fence;
25 struct msm_fence_cb fence_cb;
26};
27
28static void fence_cb(struct msm_fence_cb *cb);
29
30static struct msm_commit *new_commit(struct drm_atomic_state *state)
31{
32 struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
33
34 if (!c)
35 return NULL;
36
37 c->state = state;
38 /* TODO we might need a way to indicate to run the cb on a
39 * different wq so wait_for_vblanks() doesn't block retiring
40 * bo's..
41 */
42 INIT_FENCE_CB(&c->fence_cb, fence_cb);
43
44 return c;
45}
46
47/* The (potentially) asynchronous part of the commit. At this point
48 * nothing can fail short of armageddon.
49 */
50static void complete_commit(struct msm_commit *c)
51{
52 struct drm_atomic_state *state = c->state;
53 struct drm_device *dev = state->dev;
54
55 drm_atomic_helper_commit_pre_planes(dev, state);
56
57 drm_atomic_helper_commit_planes(dev, state);
58
59 drm_atomic_helper_commit_post_planes(dev, state);
60
61 drm_atomic_helper_wait_for_vblanks(dev, state);
62
63 drm_atomic_helper_cleanup_planes(dev, state);
64
65 drm_atomic_state_free(state);
66
67 kfree(c);
68}
69
70static void fence_cb(struct msm_fence_cb *cb)
71{
72 struct msm_commit *c =
73 container_of(cb, struct msm_commit, fence_cb);
74 complete_commit(c);
75}
76
77static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
78{
79 struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
80 c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
81}
82
83
84/**
85 * drm_atomic_helper_commit - commit validated state object
86 * @dev: DRM device
87 * @state: the driver state object
88 * @async: asynchronous commit
89 *
90 * This function commits a with drm_atomic_helper_check() pre-validated state
91 * object. This can still fail when e.g. the framebuffer reservation fails. For
92 * now this doesn't implement asynchronous commits.
93 *
94 * RETURNS
95 * Zero for success or -errno.
96 */
97int msm_atomic_commit(struct drm_device *dev,
98 struct drm_atomic_state *state, bool async)
99{
100 struct msm_commit *c;
101 int nplanes = dev->mode_config.num_total_plane;
102 int i, ret;
103
104 ret = drm_atomic_helper_prepare_planes(dev, state);
105 if (ret)
106 return ret;
107
108 c = new_commit(state);
109
110 /*
111 * Figure out what fence to wait for:
112 */
113 for (i = 0; i < nplanes; i++) {
114 struct drm_plane *plane = state->planes[i];
115 struct drm_plane_state *new_state = state->plane_states[i];
116
117 if (!plane)
118 continue;
119
120 if ((plane->state->fb != new_state->fb) && new_state->fb)
121 add_fb(c, new_state->fb);
122 }
123
124 /*
125 * This is the point of no return - everything below never fails except
126 * when the hw goes bonghits. Which means we can commit the new state on
127 * the software side now.
128 */
129
130 drm_atomic_helper_swap_state(dev, state);
131
132 /*
133 * Everything below can be run asynchronously without the need to grab
134 * any modeset locks at all under one conditions: It must be guaranteed
135 * that the asynchronous work has either been cancelled (if the driver
136 * supports it, which at least requires that the framebuffers get
137 * cleaned up with drm_atomic_helper_cleanup_planes()) or completed
138 * before the new state gets committed on the software side with
139 * drm_atomic_helper_swap_state().
140 *
141 * This scheme allows new atomic state updates to be prepared and
142 * checked in parallel to the asynchronous completion of the previous
143 * update. Which is important since compositors need to figure out the
144 * composition of the next frame right after having submitted the
145 * current layout.
146 */
147
148 if (async) {
149 msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
150 return 0;
151 }
152
153 ret = msm_wait_fence_interruptable(dev, c->fence, NULL);
154 if (ret) {
155 WARN_ON(ret); // TODO unswap state back? or??
156 kfree(c);
157 return ret;
158 }
159
160 complete_commit(c);
161
162 return 0;
163}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 42e1c48eef28..c795217e1bfc 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -29,6 +29,8 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
29static const struct drm_mode_config_funcs mode_config_funcs = { 29static const struct drm_mode_config_funcs mode_config_funcs = {
30 .fb_create = msm_framebuffer_create, 30 .fb_create = msm_framebuffer_create,
31 .output_poll_changed = msm_fb_output_poll_changed, 31 .output_poll_changed = msm_fb_output_poll_changed,
32 .atomic_check = drm_atomic_helper_check,
33 .atomic_commit = msm_atomic_commit,
32}; 34};
33 35
34int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu) 36int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
@@ -294,6 +296,8 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
294 goto fail; 296 goto fail;
295 } 297 }
296 298
299 drm_mode_config_reset(dev);
300
297#ifdef CONFIG_DRM_MSM_FBDEV 301#ifdef CONFIG_DRM_MSM_FBDEV
298 priv->fbdev = msm_fbdev_init(dev); 302 priv->fbdev = msm_fbdev_init(dev);
299#endif 303#endif
@@ -619,6 +623,26 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
619 return ret; 623 return ret;
620} 624}
621 625
626int msm_queue_fence_cb(struct drm_device *dev,
627 struct msm_fence_cb *cb, uint32_t fence)
628{
629 struct msm_drm_private *priv = dev->dev_private;
630 int ret = 0;
631
632 mutex_lock(&dev->struct_mutex);
633 if (!list_empty(&cb->work.entry)) {
634 ret = -EINVAL;
635 } else if (fence > priv->completed_fence) {
636 cb->fence = fence;
637 list_add_tail(&cb->work.entry, &priv->fence_cbs);
638 } else {
639 queue_work(priv->wq, &cb->work);
640 }
641 mutex_unlock(&dev->struct_mutex);
642
643 return ret;
644}
645
622/* called from workqueue */ 646/* called from workqueue */
623void msm_update_fence(struct drm_device *dev, uint32_t fence) 647void msm_update_fence(struct drm_device *dev, uint32_t fence)
624{ 648{
@@ -832,6 +856,7 @@ static struct drm_driver msm_driver = {
832 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, 856 .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
833 .gem_prime_vmap = msm_gem_prime_vmap, 857 .gem_prime_vmap = msm_gem_prime_vmap,
834 .gem_prime_vunmap = msm_gem_prime_vunmap, 858 .gem_prime_vunmap = msm_gem_prime_vunmap,
859 .gem_prime_mmap = msm_gem_prime_mmap,
835#ifdef CONFIG_DEBUG_FS 860#ifdef CONFIG_DEBUG_FS
836 .debugfs_init = msm_debugfs_init, 861 .debugfs_init = msm_debugfs_init,
837 .debugfs_cleanup = msm_debugfs_cleanup, 862 .debugfs_cleanup = msm_debugfs_cleanup,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 67f9d0a2332c..136303818436 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,15 +32,6 @@
32#include <linux/types.h> 32#include <linux/types.h>
33#include <asm/sizes.h> 33#include <asm/sizes.h>
34 34
35
36#if defined(CONFIG_COMPILE_TEST) && !defined(CONFIG_ARCH_QCOM)
37/* stubs we need for compile-test: */
38static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
39{
40 return NULL;
41}
42#endif
43
44#ifndef CONFIG_OF 35#ifndef CONFIG_OF
45#include <mach/board.h> 36#include <mach/board.h>
46#include <mach/socinfo.h> 37#include <mach/socinfo.h>
@@ -48,7 +39,10 @@ static inline struct device *msm_iommu_get_ctx(const char *ctx_name)
48#endif 39#endif
49 40
50#include <drm/drmP.h> 41#include <drm/drmP.h>
42#include <drm/drm_atomic.h>
43#include <drm/drm_atomic_helper.h>
51#include <drm/drm_crtc_helper.h> 44#include <drm/drm_crtc_helper.h>
45#include <drm/drm_plane_helper.h>
52#include <drm/drm_fb_helper.h> 46#include <drm/drm_fb_helper.h>
53#include <drm/msm_drm.h> 47#include <drm/msm_drm.h>
54#include <drm/drm_gem.h> 48#include <drm/drm_gem.h>
@@ -75,7 +69,12 @@ struct msm_drm_private {
75 struct msm_kms *kms; 69 struct msm_kms *kms;
76 70
77 /* subordinate devices, if present: */ 71 /* subordinate devices, if present: */
78 struct platform_device *hdmi_pdev, *gpu_pdev; 72 struct platform_device *gpu_pdev;
73
74 /* possibly this should be in the kms component, but it is
75 * shared by both mdp4 and mdp5..
76 */
77 struct hdmi *hdmi;
79 78
80 /* when we have more than one 'msm_gpu' these need to be an array: */ 79 /* when we have more than one 'msm_gpu' these need to be an array: */
81 struct msm_gpu *gpu; 80 struct msm_gpu *gpu;
@@ -145,21 +144,29 @@ void __msm_fence_worker(struct work_struct *work);
145 (_cb)->func = _func; \ 144 (_cb)->func = _func; \
146 } while (0) 145 } while (0)
147 146
147int msm_atomic_commit(struct drm_device *dev,
148 struct drm_atomic_state *state, bool async);
149
148int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 150int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
149 151
150int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 152int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
151 struct timespec *timeout); 153 struct timespec *timeout);
154int msm_queue_fence_cb(struct drm_device *dev,
155 struct msm_fence_cb *cb, uint32_t fence);
152void msm_update_fence(struct drm_device *dev, uint32_t fence); 156void msm_update_fence(struct drm_device *dev, uint32_t fence);
153 157
154int msm_ioctl_gem_submit(struct drm_device *dev, void *data, 158int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
155 struct drm_file *file); 159 struct drm_file *file);
156 160
161int msm_gem_mmap_obj(struct drm_gem_object *obj,
162 struct vm_area_struct *vma);
157int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 163int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
158int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 164int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
159uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); 165uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
160int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id, 166int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
161 uint32_t *iova); 167 uint32_t *iova);
162int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova); 168int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
169uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
163struct page **msm_gem_get_pages(struct drm_gem_object *obj); 170struct page **msm_gem_get_pages(struct drm_gem_object *obj);
164void msm_gem_put_pages(struct drm_gem_object *obj); 171void msm_gem_put_pages(struct drm_gem_object *obj);
165void msm_gem_put_iova(struct drm_gem_object *obj, int id); 172void msm_gem_put_iova(struct drm_gem_object *obj, int id);
@@ -170,6 +177,7 @@ int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
170struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); 177struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
171void *msm_gem_prime_vmap(struct drm_gem_object *obj); 178void *msm_gem_prime_vmap(struct drm_gem_object *obj);
172void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); 179void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
180int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
173struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 181struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
174 struct dma_buf_attachment *attach, struct sg_table *sg); 182 struct dma_buf_attachment *attach, struct sg_table *sg);
175int msm_gem_prime_pin(struct drm_gem_object *obj); 183int msm_gem_prime_pin(struct drm_gem_object *obj);
@@ -192,6 +200,9 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
192struct drm_gem_object *msm_gem_import(struct drm_device *dev, 200struct drm_gem_object *msm_gem_import(struct drm_device *dev,
193 uint32_t size, struct sg_table *sgt); 201 uint32_t size, struct sg_table *sgt);
194 202
203int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
204void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
205uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
195struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane); 206struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
196const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb); 207const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
197struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev, 208struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -202,8 +213,8 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
202struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev); 213struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
203 214
204struct hdmi; 215struct hdmi;
205struct hdmi *hdmi_init(struct drm_device *dev, struct drm_encoder *encoder); 216int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
206irqreturn_t hdmi_irq(int irq, void *dev_id); 217 struct drm_encoder *encoder);
207void __init hdmi_register(void); 218void __init hdmi_register(void);
208void __exit hdmi_unregister(void); 219void __exit hdmi_unregister(void);
209 220
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 81bafdf19ab3..84dec161d836 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -24,7 +24,7 @@
24struct msm_framebuffer { 24struct msm_framebuffer {
25 struct drm_framebuffer base; 25 struct drm_framebuffer base;
26 const struct msm_format *format; 26 const struct msm_format *format;
27 struct drm_gem_object *planes[2]; 27 struct drm_gem_object *planes[3];
28}; 28};
29#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base) 29#define to_msm_framebuffer(x) container_of(x, struct msm_framebuffer, base)
30 30
@@ -87,6 +87,44 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
87} 87}
88#endif 88#endif
89 89
90/* prepare/pin all the fb's bo's for scanout. Note that it is not valid
91 * to prepare an fb more multiple different initiator 'id's. But that
92 * should be fine, since only the scanout (mdpN) side of things needs
93 * this, the gpu doesn't care about fb's.
94 */
95int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
96{
97 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
98 int ret, i, n = drm_format_num_planes(fb->pixel_format);
99 uint32_t iova;
100
101 for (i = 0; i < n; i++) {
102 ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
103 DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
104 if (ret)
105 return ret;
106 }
107
108 return 0;
109}
110
111void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
112{
113 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
114 int i, n = drm_format_num_planes(fb->pixel_format);
115
116 for (i = 0; i < n; i++)
117 msm_gem_put_iova(msm_fb->planes[i], id);
118}
119
120uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
121{
122 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
123 if (!msm_fb->planes[plane])
124 return 0;
125 return msm_gem_iova(msm_fb->planes[plane], id);
126}
127
90struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane) 128struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
91{ 129{
92 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); 130 struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
@@ -166,6 +204,11 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
166 204
167 msm_fb->format = format; 205 msm_fb->format = format;
168 206
207 if (n > ARRAY_SIZE(msm_fb->planes)) {
208 ret = -EINVAL;
209 goto fail;
210 }
211
169 for (i = 0; i < n; i++) { 212 for (i = 0; i < n; i++) {
170 unsigned int width = mode_cmd->width / (i ? hsub : 1); 213 unsigned int width = mode_cmd->width / (i ? hsub : 1);
171 unsigned int height = mode_cmd->height / (i ? vsub : 1); 214 unsigned int height = mode_cmd->height / (i ? vsub : 1);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index ab5bfd2d0ebf..94d55e526b4e 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -93,9 +93,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
93 uint32_t paddr; 93 uint32_t paddr;
94 int ret, size; 94 int ret, size;
95 95
96 sizes->surface_bpp = 32;
97 sizes->surface_depth = 24;
98
99 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width, 96 DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
100 sizes->surface_height, sizes->surface_bpp, 97 sizes->surface_height, sizes->surface_bpp,
101 sizes->fb_width, sizes->fb_height); 98 sizes->fb_width, sizes->fb_height);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4b1b82adabde..4a6f0e49d5b5 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -309,6 +309,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
309 return ret; 309 return ret;
310} 310}
311 311
312/* get iova, taking a reference. Should have a matching put */
312int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) 313int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
313{ 314{
314 struct msm_gem_object *msm_obj = to_msm_bo(obj); 315 struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -328,6 +329,16 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
328 return ret; 329 return ret;
329} 330}
330 331
332/* get iova without taking a reference, used in places where you have
333 * already done a 'msm_gem_get_iova()'.
334 */
335uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
336{
337 struct msm_gem_object *msm_obj = to_msm_bo(obj);
338 WARN_ON(!msm_obj->domain[id].iova);
339 return msm_obj->domain[id].iova;
340}
341
331void msm_gem_put_iova(struct drm_gem_object *obj, int id) 342void msm_gem_put_iova(struct drm_gem_object *obj, int id)
332{ 343{
333 // XXX TODO .. 344 // XXX TODO ..
@@ -397,23 +408,10 @@ void *msm_gem_vaddr(struct drm_gem_object *obj)
397int msm_gem_queue_inactive_cb(struct drm_gem_object *obj, 408int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
398 struct msm_fence_cb *cb) 409 struct msm_fence_cb *cb)
399{ 410{
400 struct drm_device *dev = obj->dev;
401 struct msm_drm_private *priv = dev->dev_private;
402 struct msm_gem_object *msm_obj = to_msm_bo(obj); 411 struct msm_gem_object *msm_obj = to_msm_bo(obj);
403 int ret = 0; 412 uint32_t fence = msm_gem_fence(msm_obj,
404 413 MSM_PREP_READ | MSM_PREP_WRITE);
405 mutex_lock(&dev->struct_mutex); 414 return msm_queue_fence_cb(obj->dev, cb, fence);
406 if (!list_empty(&cb->work.entry)) {
407 ret = -EINVAL;
408 } else if (is_active(msm_obj)) {
409 cb->fence = max(msm_obj->read_fence, msm_obj->write_fence);
410 list_add_tail(&cb->work.entry, &priv->fence_cbs);
411 } else {
412 queue_work(priv->wq, &cb->work);
413 }
414 mutex_unlock(&dev->struct_mutex);
415
416 return ret;
417} 415}
418 416
419void msm_gem_move_to_active(struct drm_gem_object *obj, 417void msm_gem_move_to_active(struct drm_gem_object *obj,
@@ -452,12 +450,8 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op,
452 int ret = 0; 450 int ret = 0;
453 451
454 if (is_active(msm_obj)) { 452 if (is_active(msm_obj)) {
455 uint32_t fence = 0; 453 uint32_t fence = msm_gem_fence(msm_obj, op);
456 454
457 if (op & MSM_PREP_READ)
458 fence = msm_obj->write_fence;
459 if (op & MSM_PREP_WRITE)
460 fence = max(fence, msm_obj->read_fence);
461 if (op & MSM_PREP_NOSYNC) 455 if (op & MSM_PREP_NOSYNC)
462 timeout = NULL; 456 timeout = NULL;
463 457
@@ -525,13 +519,11 @@ void msm_gem_free_object(struct drm_gem_object *obj)
525 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) { 519 for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
526 struct msm_mmu *mmu = priv->mmus[id]; 520 struct msm_mmu *mmu = priv->mmus[id];
527 if (mmu && msm_obj->domain[id].iova) { 521 if (mmu && msm_obj->domain[id].iova) {
528 uint32_t offset = (uint32_t)mmap_offset(obj); 522 uint32_t offset = msm_obj->domain[id].iova;
529 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size); 523 mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
530 } 524 }
531 } 525 }
532 526
533 drm_gem_free_mmap_offset(obj);
534
535 if (obj->import_attach) { 527 if (obj->import_attach) {
536 if (msm_obj->vaddr) 528 if (msm_obj->vaddr)
537 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr); 529 dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index bfb052688f8e..8fbbd0594c46 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -70,6 +70,19 @@ static inline bool is_active(struct msm_gem_object *msm_obj)
70 return msm_obj->gpu != NULL; 70 return msm_obj->gpu != NULL;
71} 71}
72 72
73static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
74 uint32_t op)
75{
76 uint32_t fence = 0;
77
78 if (op & MSM_PREP_READ)
79 fence = msm_obj->write_fence;
80 if (op & MSM_PREP_WRITE)
81 fence = max(fence, msm_obj->read_fence);
82
83 return fence;
84}
85
73#define MAX_CMDS 4 86#define MAX_CMDS 4
74 87
75/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc, 88/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index ad772fe36115..dd7a7ab603e2 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -37,6 +37,19 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
37 /* TODO msm_gem_vunmap() */ 37 /* TODO msm_gem_vunmap() */
38} 38}
39 39
40int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
41{
42 int ret;
43
44 mutex_lock(&obj->dev->struct_mutex);
45 ret = drm_gem_mmap_obj(obj, obj->size, vma);
46 mutex_unlock(&obj->dev->struct_mutex);
47 if (ret < 0)
48 return ret;
49
50 return msm_gem_mmap_obj(vma->vm_private_data, vma);
51}
52
40struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, 53struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
41 struct dma_buf_attachment *attach, struct sg_table *sg) 54 struct dma_buf_attachment *attach, struct sg_table *sg)
42{ 55{
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 12c24c8abf7f..6461e3565afe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -41,17 +41,28 @@ nouveau-y += core/subdev/bios/extdev.o
41nouveau-y += core/subdev/bios/fan.o 41nouveau-y += core/subdev/bios/fan.o
42nouveau-y += core/subdev/bios/gpio.o 42nouveau-y += core/subdev/bios/gpio.o
43nouveau-y += core/subdev/bios/i2c.o 43nouveau-y += core/subdev/bios/i2c.o
44nouveau-y += core/subdev/bios/image.o
44nouveau-y += core/subdev/bios/init.o 45nouveau-y += core/subdev/bios/init.o
45nouveau-y += core/subdev/bios/mxm.o 46nouveau-y += core/subdev/bios/mxm.o
47nouveau-y += core/subdev/bios/npde.o
48nouveau-y += core/subdev/bios/pcir.o
46nouveau-y += core/subdev/bios/perf.o 49nouveau-y += core/subdev/bios/perf.o
47nouveau-y += core/subdev/bios/pll.o 50nouveau-y += core/subdev/bios/pll.o
51nouveau-y += core/subdev/bios/pmu.o
48nouveau-y += core/subdev/bios/ramcfg.o 52nouveau-y += core/subdev/bios/ramcfg.o
49nouveau-y += core/subdev/bios/rammap.o 53nouveau-y += core/subdev/bios/rammap.o
54nouveau-y += core/subdev/bios/shadow.o
55nouveau-y += core/subdev/bios/shadowacpi.o
56nouveau-y += core/subdev/bios/shadowof.o
57nouveau-y += core/subdev/bios/shadowpci.o
58nouveau-y += core/subdev/bios/shadowramin.o
59nouveau-y += core/subdev/bios/shadowrom.o
50nouveau-y += core/subdev/bios/timing.o 60nouveau-y += core/subdev/bios/timing.o
51nouveau-y += core/subdev/bios/therm.o 61nouveau-y += core/subdev/bios/therm.o
52nouveau-y += core/subdev/bios/vmap.o 62nouveau-y += core/subdev/bios/vmap.o
53nouveau-y += core/subdev/bios/volt.o 63nouveau-y += core/subdev/bios/volt.o
54nouveau-y += core/subdev/bios/xpio.o 64nouveau-y += core/subdev/bios/xpio.o
65nouveau-y += core/subdev/bios/M0203.o
55nouveau-y += core/subdev/bios/M0205.o 66nouveau-y += core/subdev/bios/M0205.o
56nouveau-y += core/subdev/bios/M0209.o 67nouveau-y += core/subdev/bios/M0209.o
57nouveau-y += core/subdev/bios/P0260.o 68nouveau-y += core/subdev/bios/P0260.o
@@ -86,6 +97,7 @@ nouveau-y += core/subdev/devinit/nva3.o
86nouveau-y += core/subdev/devinit/nvaf.o 97nouveau-y += core/subdev/devinit/nvaf.o
87nouveau-y += core/subdev/devinit/nvc0.o 98nouveau-y += core/subdev/devinit/nvc0.o
88nouveau-y += core/subdev/devinit/gm107.o 99nouveau-y += core/subdev/devinit/gm107.o
100nouveau-y += core/subdev/devinit/gm204.o
89nouveau-y += core/subdev/fb/base.o 101nouveau-y += core/subdev/fb/base.o
90nouveau-y += core/subdev/fb/nv04.o 102nouveau-y += core/subdev/fb/nv04.o
91nouveau-y += core/subdev/fb/nv10.o 103nouveau-y += core/subdev/fb/nv10.o
@@ -129,6 +141,7 @@ nouveau-y += core/subdev/fb/ramgk20a.o
129nouveau-y += core/subdev/fb/ramgm107.o 141nouveau-y += core/subdev/fb/ramgm107.o
130nouveau-y += core/subdev/fb/sddr2.o 142nouveau-y += core/subdev/fb/sddr2.o
131nouveau-y += core/subdev/fb/sddr3.o 143nouveau-y += core/subdev/fb/sddr3.o
144nouveau-y += core/subdev/fb/gddr3.o
132nouveau-y += core/subdev/fb/gddr5.o 145nouveau-y += core/subdev/fb/gddr5.o
133nouveau-y += core/subdev/fuse/base.o 146nouveau-y += core/subdev/fuse/base.o
134nouveau-y += core/subdev/fuse/g80.o 147nouveau-y += core/subdev/fuse/g80.o
@@ -147,6 +160,7 @@ nouveau-y += core/subdev/i2c/bit.o
147nouveau-y += core/subdev/i2c/pad.o 160nouveau-y += core/subdev/i2c/pad.o
148nouveau-y += core/subdev/i2c/padnv04.o 161nouveau-y += core/subdev/i2c/padnv04.o
149nouveau-y += core/subdev/i2c/padnv94.o 162nouveau-y += core/subdev/i2c/padnv94.o
163nouveau-y += core/subdev/i2c/padgm204.o
150nouveau-y += core/subdev/i2c/nv04.o 164nouveau-y += core/subdev/i2c/nv04.o
151nouveau-y += core/subdev/i2c/nv4e.o 165nouveau-y += core/subdev/i2c/nv4e.o
152nouveau-y += core/subdev/i2c/nv50.o 166nouveau-y += core/subdev/i2c/nv50.o
@@ -154,6 +168,7 @@ nouveau-y += core/subdev/i2c/nv94.o
154nouveau-y += core/subdev/i2c/nvd0.o 168nouveau-y += core/subdev/i2c/nvd0.o
155nouveau-y += core/subdev/i2c/gf117.o 169nouveau-y += core/subdev/i2c/gf117.o
156nouveau-y += core/subdev/i2c/nve0.o 170nouveau-y += core/subdev/i2c/nve0.o
171nouveau-y += core/subdev/i2c/gm204.o
157nouveau-y += core/subdev/ibus/nvc0.o 172nouveau-y += core/subdev/ibus/nvc0.o
158nouveau-y += core/subdev/ibus/nve0.o 173nouveau-y += core/subdev/ibus/nve0.o
159nouveau-y += core/subdev/ibus/gk20a.o 174nouveau-y += core/subdev/ibus/gk20a.o
@@ -211,6 +226,7 @@ nouveau-y += core/subdev/vm/nvc0.o
211nouveau-y += core/subdev/volt/base.o 226nouveau-y += core/subdev/volt/base.o
212nouveau-y += core/subdev/volt/gpio.o 227nouveau-y += core/subdev/volt/gpio.o
213nouveau-y += core/subdev/volt/nv40.o 228nouveau-y += core/subdev/volt/nv40.o
229nouveau-y += core/subdev/volt/gk20a.o
214 230
215nouveau-y += core/engine/falcon.o 231nouveau-y += core/engine/falcon.o
216nouveau-y += core/engine/xtensa.o 232nouveau-y += core/engine/xtensa.o
@@ -254,6 +270,7 @@ nouveau-y += core/engine/disp/nvd0.o
254nouveau-y += core/engine/disp/nve0.o 270nouveau-y += core/engine/disp/nve0.o
255nouveau-y += core/engine/disp/nvf0.o 271nouveau-y += core/engine/disp/nvf0.o
256nouveau-y += core/engine/disp/gm107.o 272nouveau-y += core/engine/disp/gm107.o
273nouveau-y += core/engine/disp/gm204.o
257nouveau-y += core/engine/disp/dacnv50.o 274nouveau-y += core/engine/disp/dacnv50.o
258nouveau-y += core/engine/disp/dport.o 275nouveau-y += core/engine/disp/dport.o
259nouveau-y += core/engine/disp/hdanva3.o 276nouveau-y += core/engine/disp/hdanva3.o
@@ -266,6 +283,7 @@ nouveau-y += core/engine/disp/piornv50.o
266nouveau-y += core/engine/disp/sornv50.o 283nouveau-y += core/engine/disp/sornv50.o
267nouveau-y += core/engine/disp/sornv94.o 284nouveau-y += core/engine/disp/sornv94.o
268nouveau-y += core/engine/disp/sornvd0.o 285nouveau-y += core/engine/disp/sornvd0.o
286nouveau-y += core/engine/disp/sorgm204.o
269nouveau-y += core/engine/disp/vga.o 287nouveau-y += core/engine/disp/vga.o
270nouveau-y += core/engine/fifo/base.o 288nouveau-y += core/engine/fifo/base.o
271nouveau-y += core/engine/fifo/nv04.o 289nouveau-y += core/engine/fifo/nv04.o
diff --git a/drivers/gpu/drm/nouveau/core/core/handle.c b/drivers/gpu/drm/nouveau/core/core/handle.c
index a490b805d7e3..13f816cb08bd 100644
--- a/drivers/gpu/drm/nouveau/core/core/handle.c
+++ b/drivers/gpu/drm/nouveau/core/core/handle.c
@@ -222,116 +222,3 @@ nouveau_handle_put(struct nouveau_handle *handle)
222 if (handle) 222 if (handle)
223 nouveau_namedb_put(handle); 223 nouveau_namedb_put(handle);
224} 224}
225
226int
227nouveau_handle_new(struct nouveau_object *client, u32 _parent, u32 _handle,
228 u16 _oclass, void *data, u32 size,
229 struct nouveau_object **pobject)
230{
231 struct nouveau_object *parent = NULL;
232 struct nouveau_object *engctx = NULL;
233 struct nouveau_object *object = NULL;
234 struct nouveau_object *engine;
235 struct nouveau_oclass *oclass;
236 struct nouveau_handle *handle;
237 int ret;
238
239 /* lookup parent object and ensure it *is* a parent */
240 parent = nouveau_handle_ref(client, _parent);
241 if (!parent) {
242 nv_error(client, "parent 0x%08x not found\n", _parent);
243 return -ENOENT;
244 }
245
246 if (!nv_iclass(parent, NV_PARENT_CLASS)) {
247 nv_error(parent, "cannot have children\n");
248 ret = -EINVAL;
249 goto fail_class;
250 }
251
252 /* check that parent supports the requested subclass */
253 ret = nouveau_parent_sclass(parent, _oclass, &engine, &oclass);
254 if (ret) {
255 nv_debug(parent, "illegal class 0x%04x\n", _oclass);
256 goto fail_class;
257 }
258
259 /* make sure engine init has been completed *before* any objects
260 * it controls are created - the constructors may depend on
261 * state calculated at init (ie. default context construction)
262 */
263 if (engine) {
264 ret = nouveau_object_inc(engine);
265 if (ret)
266 goto fail_class;
267 }
268
269 /* if engine requires it, create a context object to insert
270 * between the parent and its children (eg. PGRAPH context)
271 */
272 if (engine && nv_engine(engine)->cclass) {
273 ret = nouveau_object_ctor(parent, engine,
274 nv_engine(engine)->cclass,
275 data, size, &engctx);
276 if (ret)
277 goto fail_engctx;
278 } else {
279 nouveau_object_ref(parent, &engctx);
280 }
281
282 /* finally, create new object and bind it to its handle */
283 ret = nouveau_object_ctor(engctx, engine, oclass, data, size, &object);
284 *pobject = object;
285 if (ret)
286 goto fail_ctor;
287
288 ret = nouveau_object_inc(object);
289 if (ret)
290 goto fail_init;
291
292 ret = nouveau_handle_create(parent, _parent, _handle, object, &handle);
293 if (ret)
294 goto fail_handle;
295
296 ret = nouveau_handle_init(handle);
297 if (ret)
298 nouveau_handle_destroy(handle);
299
300fail_handle:
301 nouveau_object_dec(object, false);
302fail_init:
303 nouveau_object_ref(NULL, &object);
304fail_ctor:
305 nouveau_object_ref(NULL, &engctx);
306fail_engctx:
307 if (engine)
308 nouveau_object_dec(engine, false);
309fail_class:
310 nouveau_object_ref(NULL, &parent);
311 return ret;
312}
313
314int
315nouveau_handle_del(struct nouveau_object *client, u32 _parent, u32 _handle)
316{
317 struct nouveau_object *parent = NULL;
318 struct nouveau_object *namedb = NULL;
319 struct nouveau_handle *handle = NULL;
320
321 parent = nouveau_handle_ref(client, _parent);
322 if (!parent)
323 return -ENOENT;
324
325 namedb = nv_pclass(parent, NV_NAMEDB_CLASS);
326 if (namedb) {
327 handle = nouveau_namedb_get(nv_namedb(namedb), _handle);
328 if (handle) {
329 nouveau_namedb_put(handle);
330 nouveau_handle_fini(handle, false);
331 nouveau_handle_destroy(handle);
332 }
333 }
334
335 nouveau_object_ref(NULL, &parent);
336 return handle ? 0 : -EINVAL;
337}
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 0ef5a5713182..137e0b0faeae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -29,6 +29,7 @@
29#include <nvif/unpack.h> 29#include <nvif/unpack.h>
30#include <nvif/class.h> 30#include <nvif/class.h>
31 31
32#include <subdev/bios.h>
32#include <subdev/fb.h> 33#include <subdev/fb.h>
33#include <subdev/instmem.h> 34#include <subdev/instmem.h>
34 35
@@ -138,7 +139,7 @@ nouveau_devobj_info(struct nouveau_object *object, void *data, u32 size)
138 } 139 }
139 140
140 args->v0.chipset = device->chipset; 141 args->v0.chipset = device->chipset;
141 args->v0.revision = device->chipset >= 0x10 ? nv_rd32(device, 0) : 0x00; 142 args->v0.revision = device->chiprev;
142 if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size; 143 if (pfb) args->v0.ram_size = args->v0.ram_user = pfb->ram->size;
143 else args->v0.ram_size = args->v0.ram_user = 0; 144 else args->v0.ram_size = args->v0.ram_user = 0;
144 if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved; 145 if (imem) args->v0.ram_user = args->v0.ram_user - imem->reserved;
@@ -222,6 +223,7 @@ static const u64 disable_map[] = {
222 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE, 223 [NVDEV_SUBDEV_VOLT] = NV_DEVICE_V0_DISABLE_CORE,
223 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE, 224 [NVDEV_SUBDEV_THERM] = NV_DEVICE_V0_DISABLE_CORE,
224 [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE, 225 [NVDEV_SUBDEV_PWR] = NV_DEVICE_V0_DISABLE_CORE,
226 [NVDEV_SUBDEV_FUSE] = NV_DEVICE_V0_DISABLE_CORE,
225 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE, 227 [NVDEV_ENGINE_DMAOBJ] = NV_DEVICE_V0_DISABLE_CORE,
226 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE, 228 [NVDEV_ENGINE_PERFMON] = NV_DEVICE_V0_DISABLE_CORE,
227 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO, 229 [NVDEV_ENGINE_FIFO] = NV_DEVICE_V0_DISABLE_FIFO,
@@ -235,6 +237,7 @@ static const u64 disable_map[] = {
235 [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP, 237 [NVDEV_ENGINE_PPP] = NV_DEVICE_V0_DISABLE_PPP,
236 [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0, 238 [NVDEV_ENGINE_COPY0] = NV_DEVICE_V0_DISABLE_COPY0,
237 [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1, 239 [NVDEV_ENGINE_COPY1] = NV_DEVICE_V0_DISABLE_COPY1,
240 [NVDEV_ENGINE_COPY2] = NV_DEVICE_V0_DISABLE_COPY1,
238 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC, 241 [NVDEV_ENGINE_VIC] = NV_DEVICE_V0_DISABLE_VIC,
239 [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC, 242 [NVDEV_ENGINE_VENC] = NV_DEVICE_V0_DISABLE_VENC,
240 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP, 243 [NVDEV_ENGINE_DISP] = NV_DEVICE_V0_DISABLE_DISP,
@@ -352,12 +355,14 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
352 /* determine chipset and derive architecture from it */ 355 /* determine chipset and derive architecture from it */
353 if ((boot0 & 0x1f000000) > 0) { 356 if ((boot0 & 0x1f000000) > 0) {
354 device->chipset = (boot0 & 0x1ff00000) >> 20; 357 device->chipset = (boot0 & 0x1ff00000) >> 20;
358 device->chiprev = (boot0 & 0x000000ff);
355 switch (device->chipset & 0x1f0) { 359 switch (device->chipset & 0x1f0) {
356 case 0x010: { 360 case 0x010: {
357 if (0x461 & (1 << (device->chipset & 0xf))) 361 if (0x461 & (1 << (device->chipset & 0xf)))
358 device->card_type = NV_10; 362 device->card_type = NV_10;
359 else 363 else
360 device->card_type = NV_11; 364 device->card_type = NV_11;
365 device->chiprev = 0x00;
361 break; 366 break;
362 } 367 }
363 case 0x020: device->card_type = NV_20; break; 368 case 0x020: device->card_type = NV_20; break;
@@ -373,7 +378,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
373 case 0x0e0: 378 case 0x0e0:
374 case 0x0f0: 379 case 0x0f0:
375 case 0x100: device->card_type = NV_E0; break; 380 case 0x100: device->card_type = NV_E0; break;
376 case 0x110: device->card_type = GM100; break; 381 case 0x110:
382 case 0x120: device->card_type = GM100; break;
377 default: 383 default:
378 break; 384 break;
379 } 385 }
@@ -427,6 +433,10 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
427 } 433 }
428 434
429 nv_debug(device, "crystal freq: %dKHz\n", device->crystal); 435 nv_debug(device, "crystal freq: %dKHz\n", device->crystal);
436 } else
437 if ( (args->v0.disable & NV_DEVICE_V0_DISABLE_IDENTIFY)) {
438 device->cname = "NULL";
439 device->oclass[NVDEV_SUBDEV_VBIOS] = &nouveau_bios_oclass;
430 } 440 }
431 441
432 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) && 442 if (!(args->v0.disable & NV_DEVICE_V0_DISABLE_MMIO) &&
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
index 6295668e29a5..4e74a3376de8 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/gm100.c
@@ -98,6 +98,49 @@ gm100_identify(struct nouveau_device *device)
98 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 98 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
99#endif 99#endif
100 break; 100 break;
101 case 0x124:
102 device->cname = "GM204";
103 device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
104 device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
105 device->oclass[NVDEV_SUBDEV_I2C ] = gm204_i2c_oclass;
106 device->oclass[NVDEV_SUBDEV_FUSE ] = &gm107_fuse_oclass;
107#if 0
108 /* looks to be some non-trivial changes */
109 device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
110 /* priv ring says no to 0x10eb14 writes */
111 device->oclass[NVDEV_SUBDEV_THERM ] = &gm107_therm_oclass;
112#endif
113 device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
114 device->oclass[NVDEV_SUBDEV_DEVINIT] = gm204_devinit_oclass;
115 device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
116 device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
117 device->oclass[NVDEV_SUBDEV_TIMER ] = &gk20a_timer_oclass;
118 device->oclass[NVDEV_SUBDEV_FB ] = gm107_fb_oclass;
119 device->oclass[NVDEV_SUBDEV_LTC ] = gm107_ltc_oclass;
120 device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
121 device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
122 device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
123 device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
124 device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
125#if 0
126 device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
127#endif
128 device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
129#if 0
130 device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
131 device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
132 device->oclass[NVDEV_ENGINE_GR ] = gm107_graph_oclass;
133#endif
134 device->oclass[NVDEV_ENGINE_DISP ] = gm204_disp_oclass;
135#if 0
136 device->oclass[NVDEV_ENGINE_COPY0 ] = &gm204_copy0_oclass;
137 device->oclass[NVDEV_ENGINE_COPY1 ] = &gm204_copy1_oclass;
138 device->oclass[NVDEV_ENGINE_COPY2 ] = &gm204_copy2_oclass;
139 device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
140 device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
141 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
142#endif
143 break;
101 default: 144 default:
102 nv_fatal(device, "unknown Maxwell chipset\n"); 145 nv_fatal(device, "unknown Maxwell chipset\n");
103 return -EINVAL; 146 return -EINVAL;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index b1b2e484ecfa..674da1f095b2 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -179,6 +179,7 @@ nve0_identify(struct nouveau_device *device)
179 device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass; 179 device->oclass[NVDEV_ENGINE_GR ] = gk20a_graph_oclass;
180 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; 180 device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
181 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass; 181 device->oclass[NVDEV_ENGINE_PERFMON] = &nve0_perfmon_oclass;
182 device->oclass[NVDEV_SUBDEV_VOLT ] = &gk20a_volt_oclass;
182 break; 183 break;
183 case 0xf0: 184 case 0xf0:
184 device->cname = "GK110"; 185 device->cname = "GK110";
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
index 39890221b91c..16db08dfba6e 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/dport.c
@@ -28,7 +28,7 @@
28#include <subdev/bios/init.h> 28#include <subdev/bios/init.h>
29#include <subdev/i2c.h> 29#include <subdev/i2c.h>
30 30
31#include <engine/disp.h> 31#include "nv50.h"
32 32
33#include <nvif/class.h> 33#include <nvif/class.h>
34 34
@@ -326,7 +326,7 @@ void
326nouveau_dp_train(struct work_struct *w) 326nouveau_dp_train(struct work_struct *w)
327{ 327{
328 struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work); 328 struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work);
329 struct nouveau_disp *disp = nouveau_disp(outp); 329 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
330 const struct dp_rates *cfg = nouveau_dp_rates; 330 const struct dp_rates *cfg = nouveau_dp_rates;
331 struct dp_state _dp = { 331 struct dp_state _dp = {
332 .outp = outp, 332 .outp = outp,
@@ -334,8 +334,11 @@ nouveau_dp_train(struct work_struct *w)
334 u32 datarate = 0; 334 u32 datarate = 0;
335 int ret; 335 int ret;
336 336
337 if (!outp->base.info.location && priv->sor.magic)
338 priv->sor.magic(&outp->base);
339
337 /* bring capabilities within encoder limits */ 340 /* bring capabilities within encoder limits */
338 if (nv_mclass(disp) < GF110_DISP) 341 if (nv_mclass(priv) < GF110_DISP)
339 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; 342 outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED;
340 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { 343 if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) {
341 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; 344 outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
index b3df3fe2dc09..e2ad0543fb31 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm107.c
@@ -35,8 +35,8 @@
35 35
36static struct nouveau_oclass 36static struct nouveau_oclass
37gm107_disp_sclass[] = { 37gm107_disp_sclass[] = {
38 { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, 38 { GM107_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, 39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, 40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, 41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, 42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ gm107_disp_sclass[] = {
44}; 44};
45 45
46static struct nouveau_oclass 46static struct nouveau_oclass
47gm107_disp_base_oclass[] = { 47gm107_disp_main_oclass[] = {
48 { GM107_DISP, &nvd0_disp_base_ofuncs }, 48 { GM107_DISP, &nvd0_disp_main_ofuncs },
49 {} 49 {}
50}; 50};
51 51
@@ -72,7 +72,7 @@ gm107_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
72 if (ret) 72 if (ret)
73 return ret; 73 return ret;
74 74
75 nv_engine(priv)->sclass = gm107_disp_base_oclass; 75 nv_engine(priv)->sclass = gm107_disp_main_oclass;
76 nv_engine(priv)->cclass = &nv50_disp_cclass; 76 nv_engine(priv)->cclass = &nv50_disp_cclass;
77 nv_subdev(priv)->intr = nvd0_disp_intr; 77 nv_subdev(priv)->intr = nvd0_disp_intr;
78 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); 78 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ gm107_disp_oclass = &(struct nv50_disp_impl) {
99 }, 99 },
100 .base.vblank = &nvd0_disp_vblank_func, 100 .base.vblank = &nvd0_disp_vblank_func,
101 .base.outp = nvd0_disp_outp_sclass, 101 .base.outp = nvd0_disp_outp_sclass,
102 .mthd.core = &nve0_disp_mast_mthd_chan, 102 .mthd.core = &nve0_disp_core_mthd_chan,
103 .mthd.base = &nvd0_disp_sync_mthd_chan, 103 .mthd.base = &nvd0_disp_base_mthd_chan,
104 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 104 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
105 .mthd.prev = -0x020000, 105 .mthd.prev = -0x020000,
106 .head.scanoutpos = nvd0_disp_base_scanoutpos, 106 .head.scanoutpos = nvd0_disp_main_scanoutpos,
107}.base.base; 107}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
new file mode 100644
index 000000000000..672ded79b2a9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/gm204.c
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <engine/software.h>
26#include <engine/disp.h>
27
28#include <nvif/class.h>
29
30#include "nv50.h"
31
32/*******************************************************************************
33 * Base display object
34 ******************************************************************************/
35
36static struct nouveau_oclass
37gm204_disp_sclass[] = {
38 { GM204_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
43 {}
44};
45
46static struct nouveau_oclass
47gm204_disp_main_oclass[] = {
48 { GM204_DISP, &nvd0_disp_main_ofuncs },
49 {}
50};
51
52/*******************************************************************************
53 * Display engine implementation
54 ******************************************************************************/
55
56static int
57gm204_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
58 struct nouveau_oclass *oclass, void *data, u32 size,
59 struct nouveau_object **pobject)
60{
61 struct nv50_disp_priv *priv;
62 int heads = nv_rd32(parent, 0x022448);
63 int ret;
64
65 ret = nouveau_disp_create(parent, engine, oclass, heads,
66 "PDISP", "display", &priv);
67 *pobject = nv_object(priv);
68 if (ret)
69 return ret;
70
71 ret = nvkm_event_init(&nvd0_disp_chan_uevent, 1, 17, &priv->uevent);
72 if (ret)
73 return ret;
74
75 nv_engine(priv)->sclass = gm204_disp_main_oclass;
76 nv_engine(priv)->cclass = &nv50_disp_cclass;
77 nv_subdev(priv)->intr = nvd0_disp_intr;
78 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
79 priv->sclass = gm204_disp_sclass;
80 priv->head.nr = heads;
81 priv->dac.nr = 3;
82 priv->sor.nr = 4;
83 priv->dac.power = nv50_dac_power;
84 priv->dac.sense = nv50_dac_sense;
85 priv->sor.power = nv50_sor_power;
86 priv->sor.hda_eld = nvd0_hda_eld;
87 priv->sor.hdmi = nvd0_hdmi_ctrl;
88 priv->sor.magic = gm204_sor_magic;
89 return 0;
90}
91
92struct nouveau_oclass *
93gm204_disp_outp_sclass[] = {
94 &gm204_sor_dp_impl.base.base,
95 NULL
96};
97
98struct nouveau_oclass *
99gm204_disp_oclass = &(struct nv50_disp_impl) {
100 .base.base.handle = NV_ENGINE(DISP, 0x07),
101 .base.base.ofuncs = &(struct nouveau_ofuncs) {
102 .ctor = gm204_disp_ctor,
103 .dtor = _nouveau_disp_dtor,
104 .init = _nouveau_disp_init,
105 .fini = _nouveau_disp_fini,
106 },
107 .base.vblank = &nvd0_disp_vblank_func,
108 .base.outp = gm204_disp_outp_sclass,
109 .mthd.core = &nve0_disp_core_mthd_chan,
110 .mthd.base = &nvd0_disp_base_mthd_chan,
111 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
112 .mthd.prev = -0x020000,
113 .head.scanoutpos = nvd0_disp_main_scanoutpos,
114}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 2df3a937037d..44a8290aaea5 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -88,12 +88,14 @@ nv50_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
88{ 88{
89 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent); 89 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
90 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index); 90 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000000 << index);
91 nv_wr32(priv, 0x610020, 0x00000001 << index);
91} 92}
92 93
93static void 94static void
94nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) 95nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
95{ 96{
96 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent); 97 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
98 nv_wr32(priv, 0x610020, 0x00000001 << index);
97 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index); 99 nv_mask(priv, 0x610028, 0x00000001 << index, 0x00000001 << index);
98} 100}
99 101
@@ -374,7 +376,7 @@ nv50_disp_mthd_chan(struct nv50_disp_priv *priv, int debug, int head,
374} 376}
375 377
376const struct nv50_disp_mthd_list 378const struct nv50_disp_mthd_list
377nv50_disp_mast_mthd_base = { 379nv50_disp_core_mthd_base = {
378 .mthd = 0x0000, 380 .mthd = 0x0000,
379 .addr = 0x000000, 381 .addr = 0x000000,
380 .data = { 382 .data = {
@@ -387,7 +389,7 @@ nv50_disp_mast_mthd_base = {
387}; 389};
388 390
389static const struct nv50_disp_mthd_list 391static const struct nv50_disp_mthd_list
390nv50_disp_mast_mthd_dac = { 392nv50_disp_core_mthd_dac = {
391 .mthd = 0x0080, 393 .mthd = 0x0080,
392 .addr = 0x000008, 394 .addr = 0x000008,
393 .data = { 395 .data = {
@@ -399,7 +401,7 @@ nv50_disp_mast_mthd_dac = {
399}; 401};
400 402
401const struct nv50_disp_mthd_list 403const struct nv50_disp_mthd_list
402nv50_disp_mast_mthd_sor = { 404nv50_disp_core_mthd_sor = {
403 .mthd = 0x0040, 405 .mthd = 0x0040,
404 .addr = 0x000008, 406 .addr = 0x000008,
405 .data = { 407 .data = {
@@ -409,7 +411,7 @@ nv50_disp_mast_mthd_sor = {
409}; 411};
410 412
411const struct nv50_disp_mthd_list 413const struct nv50_disp_mthd_list
412nv50_disp_mast_mthd_pior = { 414nv50_disp_core_mthd_pior = {
413 .mthd = 0x0040, 415 .mthd = 0x0040,
414 .addr = 0x000008, 416 .addr = 0x000008,
415 .data = { 417 .data = {
@@ -419,7 +421,7 @@ nv50_disp_mast_mthd_pior = {
419}; 421};
420 422
421static const struct nv50_disp_mthd_list 423static const struct nv50_disp_mthd_list
422nv50_disp_mast_mthd_head = { 424nv50_disp_core_mthd_head = {
423 .mthd = 0x0400, 425 .mthd = 0x0400,
424 .addr = 0x000540, 426 .addr = 0x000540,
425 .data = { 427 .data = {
@@ -466,21 +468,21 @@ nv50_disp_mast_mthd_head = {
466}; 468};
467 469
468static const struct nv50_disp_mthd_chan 470static const struct nv50_disp_mthd_chan
469nv50_disp_mast_mthd_chan = { 471nv50_disp_core_mthd_chan = {
470 .name = "Core", 472 .name = "Core",
471 .addr = 0x000000, 473 .addr = 0x000000,
472 .data = { 474 .data = {
473 { "Global", 1, &nv50_disp_mast_mthd_base }, 475 { "Global", 1, &nv50_disp_core_mthd_base },
474 { "DAC", 3, &nv50_disp_mast_mthd_dac }, 476 { "DAC", 3, &nv50_disp_core_mthd_dac },
475 { "SOR", 2, &nv50_disp_mast_mthd_sor }, 477 { "SOR", 2, &nv50_disp_core_mthd_sor },
476 { "PIOR", 3, &nv50_disp_mast_mthd_pior }, 478 { "PIOR", 3, &nv50_disp_core_mthd_pior },
477 { "HEAD", 2, &nv50_disp_mast_mthd_head }, 479 { "HEAD", 2, &nv50_disp_core_mthd_head },
478 {} 480 {}
479 } 481 }
480}; 482};
481 483
482int 484int
483nv50_disp_mast_ctor(struct nouveau_object *parent, 485nv50_disp_core_ctor(struct nouveau_object *parent,
484 struct nouveau_object *engine, 486 struct nouveau_object *engine,
485 struct nouveau_oclass *oclass, void *data, u32 size, 487 struct nouveau_oclass *oclass, void *data, u32 size,
486 struct nouveau_object **pobject) 488 struct nouveau_object **pobject)
@@ -509,7 +511,7 @@ nv50_disp_mast_ctor(struct nouveau_object *parent,
509} 511}
510 512
511static int 513static int
512nv50_disp_mast_init(struct nouveau_object *object) 514nv50_disp_core_init(struct nouveau_object *object)
513{ 515{
514 struct nv50_disp_priv *priv = (void *)object->engine; 516 struct nv50_disp_priv *priv = (void *)object->engine;
515 struct nv50_disp_dmac *mast = (void *)object; 517 struct nv50_disp_dmac *mast = (void *)object;
@@ -546,7 +548,7 @@ nv50_disp_mast_init(struct nouveau_object *object)
546} 548}
547 549
548static int 550static int
549nv50_disp_mast_fini(struct nouveau_object *object, bool suspend) 551nv50_disp_core_fini(struct nouveau_object *object, bool suspend)
550{ 552{
551 struct nv50_disp_priv *priv = (void *)object->engine; 553 struct nv50_disp_priv *priv = (void *)object->engine;
552 struct nv50_disp_dmac *mast = (void *)object; 554 struct nv50_disp_dmac *mast = (void *)object;
@@ -567,11 +569,11 @@ nv50_disp_mast_fini(struct nouveau_object *object, bool suspend)
567} 569}
568 570
569struct nv50_disp_chan_impl 571struct nv50_disp_chan_impl
570nv50_disp_mast_ofuncs = { 572nv50_disp_core_ofuncs = {
571 .base.ctor = nv50_disp_mast_ctor, 573 .base.ctor = nv50_disp_core_ctor,
572 .base.dtor = nv50_disp_dmac_dtor, 574 .base.dtor = nv50_disp_dmac_dtor,
573 .base.init = nv50_disp_mast_init, 575 .base.init = nv50_disp_core_init,
574 .base.fini = nv50_disp_mast_fini, 576 .base.fini = nv50_disp_core_fini,
575 .base.map = nv50_disp_chan_map, 577 .base.map = nv50_disp_chan_map,
576 .base.ntfy = nv50_disp_chan_ntfy, 578 .base.ntfy = nv50_disp_chan_ntfy,
577 .base.rd32 = nv50_disp_chan_rd32, 579 .base.rd32 = nv50_disp_chan_rd32,
@@ -586,7 +588,7 @@ nv50_disp_mast_ofuncs = {
586 ******************************************************************************/ 588 ******************************************************************************/
587 589
588static const struct nv50_disp_mthd_list 590static const struct nv50_disp_mthd_list
589nv50_disp_sync_mthd_base = { 591nv50_disp_base_mthd_base = {
590 .mthd = 0x0000, 592 .mthd = 0x0000,
591 .addr = 0x000000, 593 .addr = 0x000000,
592 .data = { 594 .data = {
@@ -611,7 +613,7 @@ nv50_disp_sync_mthd_base = {
611}; 613};
612 614
613const struct nv50_disp_mthd_list 615const struct nv50_disp_mthd_list
614nv50_disp_sync_mthd_image = { 616nv50_disp_base_mthd_image = {
615 .mthd = 0x0400, 617 .mthd = 0x0400,
616 .addr = 0x000000, 618 .addr = 0x000000,
617 .data = { 619 .data = {
@@ -625,18 +627,18 @@ nv50_disp_sync_mthd_image = {
625}; 627};
626 628
627static const struct nv50_disp_mthd_chan 629static const struct nv50_disp_mthd_chan
628nv50_disp_sync_mthd_chan = { 630nv50_disp_base_mthd_chan = {
629 .name = "Base", 631 .name = "Base",
630 .addr = 0x000540, 632 .addr = 0x000540,
631 .data = { 633 .data = {
632 { "Global", 1, &nv50_disp_sync_mthd_base }, 634 { "Global", 1, &nv50_disp_base_mthd_base },
633 { "Image", 2, &nv50_disp_sync_mthd_image }, 635 { "Image", 2, &nv50_disp_base_mthd_image },
634 {} 636 {}
635 } 637 }
636}; 638};
637 639
638int 640int
639nv50_disp_sync_ctor(struct nouveau_object *parent, 641nv50_disp_base_ctor(struct nouveau_object *parent,
640 struct nouveau_object *engine, 642 struct nouveau_object *engine,
641 struct nouveau_oclass *oclass, void *data, u32 size, 643 struct nouveau_oclass *oclass, void *data, u32 size,
642 struct nouveau_object **pobject) 644 struct nouveau_object **pobject)
@@ -669,8 +671,8 @@ nv50_disp_sync_ctor(struct nouveau_object *parent,
669} 671}
670 672
671struct nv50_disp_chan_impl 673struct nv50_disp_chan_impl
672nv50_disp_sync_ofuncs = { 674nv50_disp_base_ofuncs = {
673 .base.ctor = nv50_disp_sync_ctor, 675 .base.ctor = nv50_disp_base_ctor,
674 .base.dtor = nv50_disp_dmac_dtor, 676 .base.dtor = nv50_disp_dmac_dtor,
675 .base.init = nv50_disp_dmac_init, 677 .base.init = nv50_disp_dmac_init,
676 .base.fini = nv50_disp_dmac_fini, 678 .base.fini = nv50_disp_dmac_fini,
@@ -942,7 +944,7 @@ nv50_disp_curs_ofuncs = {
942 ******************************************************************************/ 944 ******************************************************************************/
943 945
944int 946int
945nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0) 947nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
946{ 948{
947 const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540)); 949 const u32 blanke = nv_rd32(priv, 0x610aec + (head * 0x540));
948 const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540)); 950 const u32 blanks = nv_rd32(priv, 0x610af4 + (head * 0x540));
@@ -974,7 +976,7 @@ nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
974} 976}
975 977
976int 978int
977nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd, 979nv50_disp_main_mthd(struct nouveau_object *object, u32 mthd,
978 void *data, u32 size) 980 void *data, u32 size)
979{ 981{
980 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine); 982 const struct nv50_disp_impl *impl = (void *)nv_oclass(object->engine);
@@ -1098,7 +1100,7 @@ nv50_disp_base_mthd(struct nouveau_object *object, u32 mthd,
1098} 1100}
1099 1101
1100int 1102int
1101nv50_disp_base_ctor(struct nouveau_object *parent, 1103nv50_disp_main_ctor(struct nouveau_object *parent,
1102 struct nouveau_object *engine, 1104 struct nouveau_object *engine,
1103 struct nouveau_oclass *oclass, void *data, u32 size, 1105 struct nouveau_oclass *oclass, void *data, u32 size,
1104 struct nouveau_object **pobject) 1106 struct nouveau_object **pobject)
@@ -1118,7 +1120,7 @@ nv50_disp_base_ctor(struct nouveau_object *parent,
1118} 1120}
1119 1121
1120void 1122void
1121nv50_disp_base_dtor(struct nouveau_object *object) 1123nv50_disp_main_dtor(struct nouveau_object *object)
1122{ 1124{
1123 struct nv50_disp_base *base = (void *)object; 1125 struct nv50_disp_base *base = (void *)object;
1124 nouveau_ramht_ref(NULL, &base->ramht); 1126 nouveau_ramht_ref(NULL, &base->ramht);
@@ -1126,7 +1128,7 @@ nv50_disp_base_dtor(struct nouveau_object *object)
1126} 1128}
1127 1129
1128static int 1130static int
1129nv50_disp_base_init(struct nouveau_object *object) 1131nv50_disp_main_init(struct nouveau_object *object)
1130{ 1132{
1131 struct nv50_disp_priv *priv = (void *)object->engine; 1133 struct nv50_disp_priv *priv = (void *)object->engine;
1132 struct nv50_disp_base *base = (void *)object; 1134 struct nv50_disp_base *base = (void *)object;
@@ -1194,7 +1196,7 @@ nv50_disp_base_init(struct nouveau_object *object)
1194} 1196}
1195 1197
1196static int 1198static int
1197nv50_disp_base_fini(struct nouveau_object *object, bool suspend) 1199nv50_disp_main_fini(struct nouveau_object *object, bool suspend)
1198{ 1200{
1199 struct nv50_disp_priv *priv = (void *)object->engine; 1201 struct nv50_disp_priv *priv = (void *)object->engine;
1200 struct nv50_disp_base *base = (void *)object; 1202 struct nv50_disp_base *base = (void *)object;
@@ -1207,25 +1209,25 @@ nv50_disp_base_fini(struct nouveau_object *object, bool suspend)
1207} 1209}
1208 1210
1209struct nouveau_ofuncs 1211struct nouveau_ofuncs
1210nv50_disp_base_ofuncs = { 1212nv50_disp_main_ofuncs = {
1211 .ctor = nv50_disp_base_ctor, 1213 .ctor = nv50_disp_main_ctor,
1212 .dtor = nv50_disp_base_dtor, 1214 .dtor = nv50_disp_main_dtor,
1213 .init = nv50_disp_base_init, 1215 .init = nv50_disp_main_init,
1214 .fini = nv50_disp_base_fini, 1216 .fini = nv50_disp_main_fini,
1215 .mthd = nv50_disp_base_mthd, 1217 .mthd = nv50_disp_main_mthd,
1216 .ntfy = nouveau_disp_ntfy, 1218 .ntfy = nouveau_disp_ntfy,
1217}; 1219};
1218 1220
1219static struct nouveau_oclass 1221static struct nouveau_oclass
1220nv50_disp_base_oclass[] = { 1222nv50_disp_main_oclass[] = {
1221 { NV50_DISP, &nv50_disp_base_ofuncs }, 1223 { NV50_DISP, &nv50_disp_main_ofuncs },
1222 {} 1224 {}
1223}; 1225};
1224 1226
1225static struct nouveau_oclass 1227static struct nouveau_oclass
1226nv50_disp_sclass[] = { 1228nv50_disp_sclass[] = {
1227 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, 1229 { NV50_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
1228 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, 1230 { NV50_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
1229 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 1231 { NV50_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
1230 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 1232 { NV50_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
1231 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 1233 { NV50_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -1974,7 +1976,7 @@ nv50_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1974 if (ret) 1976 if (ret)
1975 return ret; 1977 return ret;
1976 1978
1977 nv_engine(priv)->sclass = nv50_disp_base_oclass; 1979 nv_engine(priv)->sclass = nv50_disp_main_oclass;
1978 nv_engine(priv)->cclass = &nv50_disp_cclass; 1980 nv_engine(priv)->cclass = &nv50_disp_cclass;
1979 nv_subdev(priv)->intr = nv50_disp_intr; 1981 nv_subdev(priv)->intr = nv50_disp_intr;
1980 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); 1982 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -2007,9 +2009,9 @@ nv50_disp_oclass = &(struct nv50_disp_impl) {
2007 }, 2009 },
2008 .base.vblank = &nv50_disp_vblank_func, 2010 .base.vblank = &nv50_disp_vblank_func,
2009 .base.outp = nv50_disp_outp_sclass, 2011 .base.outp = nv50_disp_outp_sclass,
2010 .mthd.core = &nv50_disp_mast_mthd_chan, 2012 .mthd.core = &nv50_disp_core_mthd_chan,
2011 .mthd.base = &nv50_disp_sync_mthd_chan, 2013 .mthd.base = &nv50_disp_base_mthd_chan,
2012 .mthd.ovly = &nv50_disp_ovly_mthd_chan, 2014 .mthd.ovly = &nv50_disp_ovly_mthd_chan,
2013 .mthd.prev = 0x000004, 2015 .mthd.prev = 0x000004,
2014 .head.scanoutpos = nv50_disp_base_scanoutpos, 2016 .head.scanoutpos = nv50_disp_main_scanoutpos,
2015}.base.base; 2017}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
index 5279feefec06..7f08078ee925 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.h
@@ -42,6 +42,7 @@ struct nv50_disp_priv {
42 int (*hda_eld)(NV50_DISP_MTHD_V1); 42 int (*hda_eld)(NV50_DISP_MTHD_V1);
43 int (*hdmi)(NV50_DISP_MTHD_V1); 43 int (*hdmi)(NV50_DISP_MTHD_V1);
44 u32 lvdsconf; 44 u32 lvdsconf;
45 void (*magic)(struct nvkm_output *);
45 } sor; 46 } sor;
46 struct { 47 struct {
47 int nr; 48 int nr;
@@ -63,10 +64,10 @@ struct nv50_disp_impl {
63 } head; 64 } head;
64}; 65};
65 66
66int nv50_disp_base_scanoutpos(NV50_DISP_MTHD_V0); 67int nv50_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
67int nv50_disp_base_mthd(struct nouveau_object *, u32, void *, u32); 68int nv50_disp_main_mthd(struct nouveau_object *, u32, void *, u32);
68 69
69int nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0); 70int nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0);
70 71
71int nv50_dac_power(NV50_DISP_MTHD_V1); 72int nv50_dac_power(NV50_DISP_MTHD_V1);
72int nv50_dac_sense(NV50_DISP_MTHD_V1); 73int nv50_dac_sense(NV50_DISP_MTHD_V1);
@@ -169,18 +170,18 @@ struct nv50_disp_mthd_chan {
169 } data[]; 170 } data[];
170}; 171};
171 172
172extern struct nv50_disp_chan_impl nv50_disp_mast_ofuncs; 173extern struct nv50_disp_chan_impl nv50_disp_core_ofuncs;
173int nv50_disp_mast_ctor(struct nouveau_object *, struct nouveau_object *, 174int nv50_disp_core_ctor(struct nouveau_object *, struct nouveau_object *,
174 struct nouveau_oclass *, void *, u32, 175 struct nouveau_oclass *, void *, u32,
175 struct nouveau_object **); 176 struct nouveau_object **);
176extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_base; 177extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_base;
177extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_sor; 178extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_sor;
178extern const struct nv50_disp_mthd_list nv50_disp_mast_mthd_pior; 179extern const struct nv50_disp_mthd_list nv50_disp_core_mthd_pior;
179extern struct nv50_disp_chan_impl nv50_disp_sync_ofuncs; 180extern struct nv50_disp_chan_impl nv50_disp_base_ofuncs;
180int nv50_disp_sync_ctor(struct nouveau_object *, struct nouveau_object *, 181int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *,
181 struct nouveau_oclass *, void *, u32, 182 struct nouveau_oclass *, void *, u32,
182 struct nouveau_object **); 183 struct nouveau_object **);
183extern const struct nv50_disp_mthd_list nv50_disp_sync_mthd_image; 184extern const struct nv50_disp_mthd_list nv50_disp_base_mthd_image;
184extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs; 185extern struct nv50_disp_chan_impl nv50_disp_ovly_ofuncs;
185int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *, 186int nv50_disp_ovly_ctor(struct nouveau_object *, struct nouveau_object *,
186 struct nouveau_oclass *, void *, u32, 187 struct nouveau_oclass *, void *, u32,
@@ -194,12 +195,12 @@ extern struct nv50_disp_chan_impl nv50_disp_curs_ofuncs;
194int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *, 195int nv50_disp_curs_ctor(struct nouveau_object *, struct nouveau_object *,
195 struct nouveau_oclass *, void *, u32, 196 struct nouveau_oclass *, void *, u32,
196 struct nouveau_object **); 197 struct nouveau_object **);
197extern struct nouveau_ofuncs nv50_disp_base_ofuncs; 198extern struct nouveau_ofuncs nv50_disp_main_ofuncs;
198int nv50_disp_base_ctor(struct nouveau_object *, struct nouveau_object *, 199int nv50_disp_main_ctor(struct nouveau_object *, struct nouveau_object *,
199 struct nouveau_oclass *, void *, u32, 200 struct nouveau_oclass *, void *, u32,
200 struct nouveau_object **); 201 struct nouveau_object **);
201void nv50_disp_base_dtor(struct nouveau_object *); 202void nv50_disp_main_dtor(struct nouveau_object *);
202extern struct nouveau_omthds nv50_disp_base_omthds[]; 203extern struct nouveau_omthds nv50_disp_main_omthds[];
203extern struct nouveau_oclass nv50_disp_cclass; 204extern struct nouveau_oclass nv50_disp_cclass;
204void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head, 205void nv50_disp_mthd_chan(struct nv50_disp_priv *, int debug, int head,
205 const struct nv50_disp_mthd_chan *); 206 const struct nv50_disp_mthd_chan *);
@@ -207,31 +208,31 @@ void nv50_disp_intr_supervisor(struct work_struct *);
207void nv50_disp_intr(struct nouveau_subdev *); 208void nv50_disp_intr(struct nouveau_subdev *);
208extern const struct nvkm_event_func nv50_disp_vblank_func; 209extern const struct nvkm_event_func nv50_disp_vblank_func;
209 210
210extern const struct nv50_disp_mthd_chan nv84_disp_mast_mthd_chan; 211extern const struct nv50_disp_mthd_chan nv84_disp_core_mthd_chan;
211extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_dac; 212extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_dac;
212extern const struct nv50_disp_mthd_list nv84_disp_mast_mthd_head; 213extern const struct nv50_disp_mthd_list nv84_disp_core_mthd_head;
213extern const struct nv50_disp_mthd_chan nv84_disp_sync_mthd_chan; 214extern const struct nv50_disp_mthd_chan nv84_disp_base_mthd_chan;
214extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan; 215extern const struct nv50_disp_mthd_chan nv84_disp_ovly_mthd_chan;
215 216
216extern const struct nv50_disp_mthd_chan nv94_disp_mast_mthd_chan; 217extern const struct nv50_disp_mthd_chan nv94_disp_core_mthd_chan;
217 218
218extern struct nv50_disp_chan_impl nvd0_disp_mast_ofuncs; 219extern struct nv50_disp_chan_impl nvd0_disp_core_ofuncs;
219extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_base; 220extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_base;
220extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_dac; 221extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_dac;
221extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_sor; 222extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_sor;
222extern const struct nv50_disp_mthd_list nvd0_disp_mast_mthd_pior; 223extern const struct nv50_disp_mthd_list nvd0_disp_core_mthd_pior;
223extern struct nv50_disp_chan_impl nvd0_disp_sync_ofuncs; 224extern struct nv50_disp_chan_impl nvd0_disp_base_ofuncs;
224extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs; 225extern struct nv50_disp_chan_impl nvd0_disp_ovly_ofuncs;
225extern const struct nv50_disp_mthd_chan nvd0_disp_sync_mthd_chan; 226extern const struct nv50_disp_mthd_chan nvd0_disp_base_mthd_chan;
226extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs; 227extern struct nv50_disp_chan_impl nvd0_disp_oimm_ofuncs;
227extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs; 228extern struct nv50_disp_chan_impl nvd0_disp_curs_ofuncs;
228extern struct nouveau_ofuncs nvd0_disp_base_ofuncs; 229extern struct nouveau_ofuncs nvd0_disp_main_ofuncs;
229extern struct nouveau_oclass nvd0_disp_cclass; 230extern struct nouveau_oclass nvd0_disp_cclass;
230void nvd0_disp_intr_supervisor(struct work_struct *); 231void nvd0_disp_intr_supervisor(struct work_struct *);
231void nvd0_disp_intr(struct nouveau_subdev *); 232void nvd0_disp_intr(struct nouveau_subdev *);
232extern const struct nvkm_event_func nvd0_disp_vblank_func; 233extern const struct nvkm_event_func nvd0_disp_vblank_func;
233 234
234extern const struct nv50_disp_mthd_chan nve0_disp_mast_mthd_chan; 235extern const struct nv50_disp_mthd_chan nve0_disp_core_mthd_chan;
235extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan; 236extern const struct nv50_disp_mthd_chan nve0_disp_ovly_mthd_chan;
236 237
237extern struct nvkm_output_dp_impl nv50_pior_dp_impl; 238extern struct nvkm_output_dp_impl nv50_pior_dp_impl;
@@ -242,6 +243,10 @@ int nv94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int);
242extern struct nouveau_oclass *nv94_disp_outp_sclass[]; 243extern struct nouveau_oclass *nv94_disp_outp_sclass[];
243 244
244extern struct nvkm_output_dp_impl nvd0_sor_dp_impl; 245extern struct nvkm_output_dp_impl nvd0_sor_dp_impl;
246int nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool);
245extern struct nouveau_oclass *nvd0_disp_outp_sclass[]; 247extern struct nouveau_oclass *nvd0_disp_outp_sclass[];
246 248
249void gm204_sor_magic(struct nvkm_output *outp);
250extern struct nvkm_output_dp_impl gm204_sor_dp_impl;
251
247#endif 252#endif
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
index d36284715b2a..13eff5e4ee51 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv84.c
@@ -34,7 +34,7 @@
34 ******************************************************************************/ 34 ******************************************************************************/
35 35
36const struct nv50_disp_mthd_list 36const struct nv50_disp_mthd_list
37nv84_disp_mast_mthd_dac = { 37nv84_disp_core_mthd_dac = {
38 .mthd = 0x0080, 38 .mthd = 0x0080,
39 .addr = 0x000008, 39 .addr = 0x000008,
40 .data = { 40 .data = {
@@ -46,7 +46,7 @@ nv84_disp_mast_mthd_dac = {
46}; 46};
47 47
48const struct nv50_disp_mthd_list 48const struct nv50_disp_mthd_list
49nv84_disp_mast_mthd_head = { 49nv84_disp_core_mthd_head = {
50 .mthd = 0x0400, 50 .mthd = 0x0400,
51 .addr = 0x000540, 51 .addr = 0x000540,
52 .data = { 52 .data = {
@@ -98,15 +98,15 @@ nv84_disp_mast_mthd_head = {
98}; 98};
99 99
100const struct nv50_disp_mthd_chan 100const struct nv50_disp_mthd_chan
101nv84_disp_mast_mthd_chan = { 101nv84_disp_core_mthd_chan = {
102 .name = "Core", 102 .name = "Core",
103 .addr = 0x000000, 103 .addr = 0x000000,
104 .data = { 104 .data = {
105 { "Global", 1, &nv50_disp_mast_mthd_base }, 105 { "Global", 1, &nv50_disp_core_mthd_base },
106 { "DAC", 3, &nv84_disp_mast_mthd_dac }, 106 { "DAC", 3, &nv84_disp_core_mthd_dac },
107 { "SOR", 2, &nv50_disp_mast_mthd_sor }, 107 { "SOR", 2, &nv50_disp_core_mthd_sor },
108 { "PIOR", 3, &nv50_disp_mast_mthd_pior }, 108 { "PIOR", 3, &nv50_disp_core_mthd_pior },
109 { "HEAD", 2, &nv84_disp_mast_mthd_head }, 109 { "HEAD", 2, &nv84_disp_core_mthd_head },
110 {} 110 {}
111 } 111 }
112}; 112};
@@ -116,7 +116,7 @@ nv84_disp_mast_mthd_chan = {
116 ******************************************************************************/ 116 ******************************************************************************/
117 117
118static const struct nv50_disp_mthd_list 118static const struct nv50_disp_mthd_list
119nv84_disp_sync_mthd_base = { 119nv84_disp_base_mthd_base = {
120 .mthd = 0x0000, 120 .mthd = 0x0000,
121 .addr = 0x000000, 121 .addr = 0x000000,
122 .data = { 122 .data = {
@@ -146,12 +146,12 @@ nv84_disp_sync_mthd_base = {
146}; 146};
147 147
148const struct nv50_disp_mthd_chan 148const struct nv50_disp_mthd_chan
149nv84_disp_sync_mthd_chan = { 149nv84_disp_base_mthd_chan = {
150 .name = "Base", 150 .name = "Base",
151 .addr = 0x000540, 151 .addr = 0x000540,
152 .data = { 152 .data = {
153 { "Global", 1, &nv84_disp_sync_mthd_base }, 153 { "Global", 1, &nv84_disp_base_mthd_base },
154 { "Image", 2, &nv50_disp_sync_mthd_image }, 154 { "Image", 2, &nv50_disp_base_mthd_image },
155 {} 155 {}
156 } 156 }
157}; 157};
@@ -204,8 +204,8 @@ nv84_disp_ovly_mthd_chan = {
204 204
205static struct nouveau_oclass 205static struct nouveau_oclass
206nv84_disp_sclass[] = { 206nv84_disp_sclass[] = {
207 { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, 207 { G82_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
208 { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, 208 { G82_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
209 { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 209 { G82_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
210 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 210 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
211 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 211 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -213,8 +213,8 @@ nv84_disp_sclass[] = {
213}; 213};
214 214
215static struct nouveau_oclass 215static struct nouveau_oclass
216nv84_disp_base_oclass[] = { 216nv84_disp_main_oclass[] = {
217 { G82_DISP, &nv50_disp_base_ofuncs }, 217 { G82_DISP, &nv50_disp_main_ofuncs },
218 {} 218 {}
219}; 219};
220 220
@@ -240,7 +240,7 @@ nv84_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
240 if (ret) 240 if (ret)
241 return ret; 241 return ret;
242 242
243 nv_engine(priv)->sclass = nv84_disp_base_oclass; 243 nv_engine(priv)->sclass = nv84_disp_main_oclass;
244 nv_engine(priv)->cclass = &nv50_disp_cclass; 244 nv_engine(priv)->cclass = &nv50_disp_cclass;
245 nv_subdev(priv)->intr = nv50_disp_intr; 245 nv_subdev(priv)->intr = nv50_disp_intr;
246 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); 246 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -268,9 +268,9 @@ nv84_disp_oclass = &(struct nv50_disp_impl) {
268 }, 268 },
269 .base.vblank = &nv50_disp_vblank_func, 269 .base.vblank = &nv50_disp_vblank_func,
270 .base.outp = nv50_disp_outp_sclass, 270 .base.outp = nv50_disp_outp_sclass,
271 .mthd.core = &nv84_disp_mast_mthd_chan, 271 .mthd.core = &nv84_disp_core_mthd_chan,
272 .mthd.base = &nv84_disp_sync_mthd_chan, 272 .mthd.base = &nv84_disp_base_mthd_chan,
273 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 273 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
274 .mthd.prev = 0x000004, 274 .mthd.prev = 0x000004,
275 .head.scanoutpos = nv50_disp_base_scanoutpos, 275 .head.scanoutpos = nv50_disp_main_scanoutpos,
276}.base.base; 276}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
index a117064002b1..2bb7ac5cd0e6 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv94.c
@@ -34,7 +34,7 @@
34 ******************************************************************************/ 34 ******************************************************************************/
35 35
36const struct nv50_disp_mthd_list 36const struct nv50_disp_mthd_list
37nv94_disp_mast_mthd_sor = { 37nv94_disp_core_mthd_sor = {
38 .mthd = 0x0040, 38 .mthd = 0x0040,
39 .addr = 0x000008, 39 .addr = 0x000008,
40 .data = { 40 .data = {
@@ -44,15 +44,15 @@ nv94_disp_mast_mthd_sor = {
44}; 44};
45 45
46const struct nv50_disp_mthd_chan 46const struct nv50_disp_mthd_chan
47nv94_disp_mast_mthd_chan = { 47nv94_disp_core_mthd_chan = {
48 .name = "Core", 48 .name = "Core",
49 .addr = 0x000000, 49 .addr = 0x000000,
50 .data = { 50 .data = {
51 { "Global", 1, &nv50_disp_mast_mthd_base }, 51 { "Global", 1, &nv50_disp_core_mthd_base },
52 { "DAC", 3, &nv84_disp_mast_mthd_dac }, 52 { "DAC", 3, &nv84_disp_core_mthd_dac },
53 { "SOR", 4, &nv94_disp_mast_mthd_sor }, 53 { "SOR", 4, &nv94_disp_core_mthd_sor },
54 { "PIOR", 3, &nv50_disp_mast_mthd_pior }, 54 { "PIOR", 3, &nv50_disp_core_mthd_pior },
55 { "HEAD", 2, &nv84_disp_mast_mthd_head }, 55 { "HEAD", 2, &nv84_disp_core_mthd_head },
56 {} 56 {}
57 } 57 }
58}; 58};
@@ -63,8 +63,8 @@ nv94_disp_mast_mthd_chan = {
63 63
64static struct nouveau_oclass 64static struct nouveau_oclass
65nv94_disp_sclass[] = { 65nv94_disp_sclass[] = {
66 { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, 66 { GT206_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
67 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, 67 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
68 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 68 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
69 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 69 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
70 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 70 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -72,8 +72,8 @@ nv94_disp_sclass[] = {
72}; 72};
73 73
74static struct nouveau_oclass 74static struct nouveau_oclass
75nv94_disp_base_oclass[] = { 75nv94_disp_main_oclass[] = {
76 { GT206_DISP, &nv50_disp_base_ofuncs }, 76 { GT206_DISP, &nv50_disp_main_ofuncs },
77 {} 77 {}
78}; 78};
79 79
@@ -99,7 +99,7 @@ nv94_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
99 if (ret) 99 if (ret)
100 return ret; 100 return ret;
101 101
102 nv_engine(priv)->sclass = nv94_disp_base_oclass; 102 nv_engine(priv)->sclass = nv94_disp_main_oclass;
103 nv_engine(priv)->cclass = &nv50_disp_cclass; 103 nv_engine(priv)->cclass = &nv50_disp_cclass;
104 nv_subdev(priv)->intr = nv50_disp_intr; 104 nv_subdev(priv)->intr = nv50_disp_intr;
105 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); 105 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -134,9 +134,9 @@ nv94_disp_oclass = &(struct nv50_disp_impl) {
134 }, 134 },
135 .base.vblank = &nv50_disp_vblank_func, 135 .base.vblank = &nv50_disp_vblank_func,
136 .base.outp = nv94_disp_outp_sclass, 136 .base.outp = nv94_disp_outp_sclass,
137 .mthd.core = &nv94_disp_mast_mthd_chan, 137 .mthd.core = &nv94_disp_core_mthd_chan,
138 .mthd.base = &nv84_disp_sync_mthd_chan, 138 .mthd.base = &nv84_disp_base_mthd_chan,
139 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 139 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
140 .mthd.prev = 0x000004, 140 .mthd.prev = 0x000004,
141 .head.scanoutpos = nv50_disp_base_scanoutpos, 141 .head.scanoutpos = nv50_disp_main_scanoutpos,
142}.base.base; 142}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
index c67e68aadd45..b32456c9494f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva0.c
@@ -80,8 +80,8 @@ nva0_disp_ovly_mthd_chan = {
80 80
81static struct nouveau_oclass 81static struct nouveau_oclass
82nva0_disp_sclass[] = { 82nva0_disp_sclass[] = {
83 { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, 83 { GT200_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
84 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, 84 { GT200_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
85 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 85 { GT200_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
86 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 86 { G82_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
87 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 87 { G82_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -89,8 +89,8 @@ nva0_disp_sclass[] = {
89}; 89};
90 90
91static struct nouveau_oclass 91static struct nouveau_oclass
92nva0_disp_base_oclass[] = { 92nva0_disp_main_oclass[] = {
93 { GT200_DISP, &nv50_disp_base_ofuncs }, 93 { GT200_DISP, &nv50_disp_main_ofuncs },
94 {} 94 {}
95}; 95};
96 96
@@ -116,7 +116,7 @@ nva0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
116 if (ret) 116 if (ret)
117 return ret; 117 return ret;
118 118
119 nv_engine(priv)->sclass = nva0_disp_base_oclass; 119 nv_engine(priv)->sclass = nva0_disp_main_oclass;
120 nv_engine(priv)->cclass = &nv50_disp_cclass; 120 nv_engine(priv)->cclass = &nv50_disp_cclass;
121 nv_subdev(priv)->intr = nv50_disp_intr; 121 nv_subdev(priv)->intr = nv50_disp_intr;
122 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); 122 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -144,9 +144,9 @@ nva0_disp_oclass = &(struct nv50_disp_impl) {
144 }, 144 },
145 .base.vblank = &nv50_disp_vblank_func, 145 .base.vblank = &nv50_disp_vblank_func,
146 .base.outp = nv50_disp_outp_sclass, 146 .base.outp = nv50_disp_outp_sclass,
147 .mthd.core = &nv84_disp_mast_mthd_chan, 147 .mthd.core = &nv84_disp_core_mthd_chan,
148 .mthd.base = &nv84_disp_sync_mthd_chan, 148 .mthd.base = &nv84_disp_base_mthd_chan,
149 .mthd.ovly = &nva0_disp_ovly_mthd_chan, 149 .mthd.ovly = &nva0_disp_ovly_mthd_chan,
150 .mthd.prev = 0x000004, 150 .mthd.prev = 0x000004,
151 .head.scanoutpos = nv50_disp_base_scanoutpos, 151 .head.scanoutpos = nv50_disp_main_scanoutpos,
152}.base.base; 152}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
index 22969f355aae..951d79f9b781 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nva3.c
@@ -35,8 +35,8 @@
35 35
36static struct nouveau_oclass 36static struct nouveau_oclass
37nva3_disp_sclass[] = { 37nva3_disp_sclass[] = {
38 { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_mast_ofuncs.base }, 38 { GT214_DISP_CORE_CHANNEL_DMA, &nv50_disp_core_ofuncs.base },
39 { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_sync_ofuncs.base }, 39 { GT214_DISP_BASE_CHANNEL_DMA, &nv50_disp_base_ofuncs.base },
40 { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base }, 40 { GT214_DISP_OVERLAY_CHANNEL_DMA, &nv50_disp_ovly_ofuncs.base },
41 { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base }, 41 { GT214_DISP_OVERLAY, &nv50_disp_oimm_ofuncs.base },
42 { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base }, 42 { GT214_DISP_CURSOR, &nv50_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nva3_disp_sclass[] = {
44}; 44};
45 45
46static struct nouveau_oclass 46static struct nouveau_oclass
47nva3_disp_base_oclass[] = { 47nva3_disp_main_oclass[] = {
48 { GT214_DISP, &nv50_disp_base_ofuncs }, 48 { GT214_DISP, &nv50_disp_main_ofuncs },
49 {} 49 {}
50}; 50};
51 51
@@ -71,7 +71,7 @@ nva3_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
71 if (ret) 71 if (ret)
72 return ret; 72 return ret;
73 73
74 nv_engine(priv)->sclass = nva3_disp_base_oclass; 74 nv_engine(priv)->sclass = nva3_disp_main_oclass;
75 nv_engine(priv)->cclass = &nv50_disp_cclass; 75 nv_engine(priv)->cclass = &nv50_disp_cclass;
76 nv_subdev(priv)->intr = nv50_disp_intr; 76 nv_subdev(priv)->intr = nv50_disp_intr;
77 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor); 77 INIT_WORK(&priv->supervisor, nv50_disp_intr_supervisor);
@@ -100,9 +100,9 @@ nva3_disp_oclass = &(struct nv50_disp_impl) {
100 }, 100 },
101 .base.vblank = &nv50_disp_vblank_func, 101 .base.vblank = &nv50_disp_vblank_func,
102 .base.outp = nv94_disp_outp_sclass, 102 .base.outp = nv94_disp_outp_sclass,
103 .mthd.core = &nv94_disp_mast_mthd_chan, 103 .mthd.core = &nv94_disp_core_mthd_chan,
104 .mthd.base = &nv84_disp_sync_mthd_chan, 104 .mthd.base = &nv84_disp_base_mthd_chan,
105 .mthd.ovly = &nv84_disp_ovly_mthd_chan, 105 .mthd.ovly = &nv84_disp_ovly_mthd_chan,
106 .mthd.prev = 0x000004, 106 .mthd.prev = 0x000004,
107 .head.scanoutpos = nv50_disp_base_scanoutpos, 107 .head.scanoutpos = nv50_disp_main_scanoutpos,
108}.base.base; 108}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 747e64bb9c06..181a2d57e356 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -51,12 +51,14 @@ nvd0_disp_chan_uevent_fini(struct nvkm_event *event, int type, int index)
51{ 51{
52 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent); 52 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
53 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index); 53 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000000 << index);
54 nv_wr32(priv, 0x61008c, 0x00000001 << index);
54} 55}
55 56
56static void 57static void
57nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) 58nvd0_disp_chan_uevent_init(struct nvkm_event *event, int types, int index)
58{ 59{
59 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent); 60 struct nv50_disp_priv *priv = container_of(event, typeof(*priv), uevent);
61 nv_wr32(priv, 0x61008c, 0x00000001 << index);
60 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index); 62 nv_mask(priv, 0x610090, 0x00000001 << index, 0x00000001 << index);
61} 63}
62 64
@@ -151,7 +153,7 @@ nvd0_disp_dmac_fini(struct nouveau_object *object, bool suspend)
151 ******************************************************************************/ 153 ******************************************************************************/
152 154
153const struct nv50_disp_mthd_list 155const struct nv50_disp_mthd_list
154nvd0_disp_mast_mthd_base = { 156nvd0_disp_core_mthd_base = {
155 .mthd = 0x0000, 157 .mthd = 0x0000,
156 .addr = 0x000000, 158 .addr = 0x000000,
157 .data = { 159 .data = {
@@ -164,7 +166,7 @@ nvd0_disp_mast_mthd_base = {
164}; 166};
165 167
166const struct nv50_disp_mthd_list 168const struct nv50_disp_mthd_list
167nvd0_disp_mast_mthd_dac = { 169nvd0_disp_core_mthd_dac = {
168 .mthd = 0x0020, 170 .mthd = 0x0020,
169 .addr = 0x000020, 171 .addr = 0x000020,
170 .data = { 172 .data = {
@@ -177,7 +179,7 @@ nvd0_disp_mast_mthd_dac = {
177}; 179};
178 180
179const struct nv50_disp_mthd_list 181const struct nv50_disp_mthd_list
180nvd0_disp_mast_mthd_sor = { 182nvd0_disp_core_mthd_sor = {
181 .mthd = 0x0020, 183 .mthd = 0x0020,
182 .addr = 0x000020, 184 .addr = 0x000020,
183 .data = { 185 .data = {
@@ -190,7 +192,7 @@ nvd0_disp_mast_mthd_sor = {
190}; 192};
191 193
192const struct nv50_disp_mthd_list 194const struct nv50_disp_mthd_list
193nvd0_disp_mast_mthd_pior = { 195nvd0_disp_core_mthd_pior = {
194 .mthd = 0x0020, 196 .mthd = 0x0020,
195 .addr = 0x000020, 197 .addr = 0x000020,
196 .data = { 198 .data = {
@@ -203,7 +205,7 @@ nvd0_disp_mast_mthd_pior = {
203}; 205};
204 206
205static const struct nv50_disp_mthd_list 207static const struct nv50_disp_mthd_list
206nvd0_disp_mast_mthd_head = { 208nvd0_disp_core_mthd_head = {
207 .mthd = 0x0300, 209 .mthd = 0x0300,
208 .addr = 0x000300, 210 .addr = 0x000300,
209 .data = { 211 .data = {
@@ -277,21 +279,21 @@ nvd0_disp_mast_mthd_head = {
277}; 279};
278 280
279static const struct nv50_disp_mthd_chan 281static const struct nv50_disp_mthd_chan
280nvd0_disp_mast_mthd_chan = { 282nvd0_disp_core_mthd_chan = {
281 .name = "Core", 283 .name = "Core",
282 .addr = 0x000000, 284 .addr = 0x000000,
283 .data = { 285 .data = {
284 { "Global", 1, &nvd0_disp_mast_mthd_base }, 286 { "Global", 1, &nvd0_disp_core_mthd_base },
285 { "DAC", 3, &nvd0_disp_mast_mthd_dac }, 287 { "DAC", 3, &nvd0_disp_core_mthd_dac },
286 { "SOR", 8, &nvd0_disp_mast_mthd_sor }, 288 { "SOR", 8, &nvd0_disp_core_mthd_sor },
287 { "PIOR", 4, &nvd0_disp_mast_mthd_pior }, 289 { "PIOR", 4, &nvd0_disp_core_mthd_pior },
288 { "HEAD", 4, &nvd0_disp_mast_mthd_head }, 290 { "HEAD", 4, &nvd0_disp_core_mthd_head },
289 {} 291 {}
290 } 292 }
291}; 293};
292 294
293static int 295static int
294nvd0_disp_mast_init(struct nouveau_object *object) 296nvd0_disp_core_init(struct nouveau_object *object)
295{ 297{
296 struct nv50_disp_priv *priv = (void *)object->engine; 298 struct nv50_disp_priv *priv = (void *)object->engine;
297 struct nv50_disp_dmac *mast = (void *)object; 299 struct nv50_disp_dmac *mast = (void *)object;
@@ -322,7 +324,7 @@ nvd0_disp_mast_init(struct nouveau_object *object)
322} 324}
323 325
324static int 326static int
325nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend) 327nvd0_disp_core_fini(struct nouveau_object *object, bool suspend)
326{ 328{
327 struct nv50_disp_priv *priv = (void *)object->engine; 329 struct nv50_disp_priv *priv = (void *)object->engine;
328 struct nv50_disp_dmac *mast = (void *)object; 330 struct nv50_disp_dmac *mast = (void *)object;
@@ -344,11 +346,11 @@ nvd0_disp_mast_fini(struct nouveau_object *object, bool suspend)
344} 346}
345 347
346struct nv50_disp_chan_impl 348struct nv50_disp_chan_impl
347nvd0_disp_mast_ofuncs = { 349nvd0_disp_core_ofuncs = {
348 .base.ctor = nv50_disp_mast_ctor, 350 .base.ctor = nv50_disp_core_ctor,
349 .base.dtor = nv50_disp_dmac_dtor, 351 .base.dtor = nv50_disp_dmac_dtor,
350 .base.init = nvd0_disp_mast_init, 352 .base.init = nvd0_disp_core_init,
351 .base.fini = nvd0_disp_mast_fini, 353 .base.fini = nvd0_disp_core_fini,
352 .base.ntfy = nv50_disp_chan_ntfy, 354 .base.ntfy = nv50_disp_chan_ntfy,
353 .base.map = nv50_disp_chan_map, 355 .base.map = nv50_disp_chan_map,
354 .base.rd32 = nv50_disp_chan_rd32, 356 .base.rd32 = nv50_disp_chan_rd32,
@@ -363,7 +365,7 @@ nvd0_disp_mast_ofuncs = {
363 ******************************************************************************/ 365 ******************************************************************************/
364 366
365static const struct nv50_disp_mthd_list 367static const struct nv50_disp_mthd_list
366nvd0_disp_sync_mthd_base = { 368nvd0_disp_base_mthd_base = {
367 .mthd = 0x0000, 369 .mthd = 0x0000,
368 .addr = 0x000000, 370 .addr = 0x000000,
369 .data = { 371 .data = {
@@ -413,7 +415,7 @@ nvd0_disp_sync_mthd_base = {
413}; 415};
414 416
415static const struct nv50_disp_mthd_list 417static const struct nv50_disp_mthd_list
416nvd0_disp_sync_mthd_image = { 418nvd0_disp_base_mthd_image = {
417 .mthd = 0x0400, 419 .mthd = 0x0400,
418 .addr = 0x000400, 420 .addr = 0x000400,
419 .data = { 421 .data = {
@@ -427,19 +429,19 @@ nvd0_disp_sync_mthd_image = {
427}; 429};
428 430
429const struct nv50_disp_mthd_chan 431const struct nv50_disp_mthd_chan
430nvd0_disp_sync_mthd_chan = { 432nvd0_disp_base_mthd_chan = {
431 .name = "Base", 433 .name = "Base",
432 .addr = 0x001000, 434 .addr = 0x001000,
433 .data = { 435 .data = {
434 { "Global", 1, &nvd0_disp_sync_mthd_base }, 436 { "Global", 1, &nvd0_disp_base_mthd_base },
435 { "Image", 2, &nvd0_disp_sync_mthd_image }, 437 { "Image", 2, &nvd0_disp_base_mthd_image },
436 {} 438 {}
437 } 439 }
438}; 440};
439 441
440struct nv50_disp_chan_impl 442struct nv50_disp_chan_impl
441nvd0_disp_sync_ofuncs = { 443nvd0_disp_base_ofuncs = {
442 .base.ctor = nv50_disp_sync_ctor, 444 .base.ctor = nv50_disp_base_ctor,
443 .base.dtor = nv50_disp_dmac_dtor, 445 .base.dtor = nv50_disp_dmac_dtor,
444 .base.init = nvd0_disp_dmac_init, 446 .base.init = nvd0_disp_dmac_init,
445 .base.fini = nvd0_disp_dmac_fini, 447 .base.fini = nvd0_disp_dmac_fini,
@@ -624,7 +626,7 @@ nvd0_disp_curs_ofuncs = {
624 ******************************************************************************/ 626 ******************************************************************************/
625 627
626int 628int
627nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0) 629nvd0_disp_main_scanoutpos(NV50_DISP_MTHD_V0)
628{ 630{
629 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300)); 631 const u32 total = nv_rd32(priv, 0x640414 + (head * 0x300));
630 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300)); 632 const u32 blanke = nv_rd32(priv, 0x64041c + (head * 0x300));
@@ -656,7 +658,7 @@ nvd0_disp_base_scanoutpos(NV50_DISP_MTHD_V0)
656} 658}
657 659
658static int 660static int
659nvd0_disp_base_init(struct nouveau_object *object) 661nvd0_disp_main_init(struct nouveau_object *object)
660{ 662{
661 struct nv50_disp_priv *priv = (void *)object->engine; 663 struct nv50_disp_priv *priv = (void *)object->engine;
662 struct nv50_disp_base *base = (void *)object; 664 struct nv50_disp_base *base = (void *)object;
@@ -725,7 +727,7 @@ nvd0_disp_base_init(struct nouveau_object *object)
725} 727}
726 728
727static int 729static int
728nvd0_disp_base_fini(struct nouveau_object *object, bool suspend) 730nvd0_disp_main_fini(struct nouveau_object *object, bool suspend)
729{ 731{
730 struct nv50_disp_priv *priv = (void *)object->engine; 732 struct nv50_disp_priv *priv = (void *)object->engine;
731 struct nv50_disp_base *base = (void *)object; 733 struct nv50_disp_base *base = (void *)object;
@@ -737,25 +739,25 @@ nvd0_disp_base_fini(struct nouveau_object *object, bool suspend)
737} 739}
738 740
739struct nouveau_ofuncs 741struct nouveau_ofuncs
740nvd0_disp_base_ofuncs = { 742nvd0_disp_main_ofuncs = {
741 .ctor = nv50_disp_base_ctor, 743 .ctor = nv50_disp_main_ctor,
742 .dtor = nv50_disp_base_dtor, 744 .dtor = nv50_disp_main_dtor,
743 .init = nvd0_disp_base_init, 745 .init = nvd0_disp_main_init,
744 .fini = nvd0_disp_base_fini, 746 .fini = nvd0_disp_main_fini,
745 .mthd = nv50_disp_base_mthd, 747 .mthd = nv50_disp_main_mthd,
746 .ntfy = nouveau_disp_ntfy, 748 .ntfy = nouveau_disp_ntfy,
747}; 749};
748 750
749static struct nouveau_oclass 751static struct nouveau_oclass
750nvd0_disp_base_oclass[] = { 752nvd0_disp_main_oclass[] = {
751 { GF110_DISP, &nvd0_disp_base_ofuncs }, 753 { GF110_DISP, &nvd0_disp_main_ofuncs },
752 {} 754 {}
753}; 755};
754 756
755static struct nouveau_oclass 757static struct nouveau_oclass
756nvd0_disp_sclass[] = { 758nvd0_disp_sclass[] = {
757 { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, 759 { GF110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
758 { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, 760 { GF110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
759 { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, 761 { GF110_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
760 { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, 762 { GF110_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
761 { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, 763 { GF110_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -1055,6 +1057,9 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1055 1057
1056 if (nvkm_output_dp_train(outp, pclk, true)) 1058 if (nvkm_output_dp_train(outp, pclk, true))
1057 ERR("link not trained before attach\n"); 1059 ERR("link not trained before attach\n");
1060 } else {
1061 if (priv->sor.magic)
1062 priv->sor.magic(outp);
1058 } 1063 }
1059 1064
1060 exec_clkcmp(priv, head, 0, pclk, &conf); 1065 exec_clkcmp(priv, head, 0, pclk, &conf);
@@ -1063,10 +1068,18 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1063 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800; 1068 addr = 0x612280 + (ffs(outp->info.or) - 1) * 0x800;
1064 data = 0x00000000; 1069 data = 0x00000000;
1065 } else { 1070 } else {
1066 if (outp->info.type == DCB_OUTPUT_DP)
1067 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1068 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800; 1071 addr = 0x612300 + (ffs(outp->info.or) - 1) * 0x800;
1069 data = (conf & 0x0100) ? 0x00000101 : 0x00000000; 1072 data = (conf & 0x0100) ? 0x00000101 : 0x00000000;
1073 switch (outp->info.type) {
1074 case DCB_OUTPUT_TMDS:
1075 nv_mask(priv, addr, 0x007c0000, 0x00280000);
1076 break;
1077 case DCB_OUTPUT_DP:
1078 nvd0_disp_intr_unk2_2_tu(priv, head, &outp->info);
1079 break;
1080 default:
1081 break;
1082 }
1070 } 1083 }
1071 1084
1072 nv_mask(priv, addr, 0x00000707, data); 1085 nv_mask(priv, addr, 0x00000707, data);
@@ -1259,7 +1272,7 @@ nvd0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1259 if (ret) 1272 if (ret)
1260 return ret; 1273 return ret;
1261 1274
1262 nv_engine(priv)->sclass = nvd0_disp_base_oclass; 1275 nv_engine(priv)->sclass = nvd0_disp_main_oclass;
1263 nv_engine(priv)->cclass = &nv50_disp_cclass; 1276 nv_engine(priv)->cclass = &nv50_disp_cclass;
1264 nv_subdev(priv)->intr = nvd0_disp_intr; 1277 nv_subdev(priv)->intr = nvd0_disp_intr;
1265 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); 1278 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -1292,9 +1305,9 @@ nvd0_disp_oclass = &(struct nv50_disp_impl) {
1292 }, 1305 },
1293 .base.vblank = &nvd0_disp_vblank_func, 1306 .base.vblank = &nvd0_disp_vblank_func,
1294 .base.outp = nvd0_disp_outp_sclass, 1307 .base.outp = nvd0_disp_outp_sclass,
1295 .mthd.core = &nvd0_disp_mast_mthd_chan, 1308 .mthd.core = &nvd0_disp_core_mthd_chan,
1296 .mthd.base = &nvd0_disp_sync_mthd_chan, 1309 .mthd.base = &nvd0_disp_base_mthd_chan,
1297 .mthd.ovly = &nvd0_disp_ovly_mthd_chan, 1310 .mthd.ovly = &nvd0_disp_ovly_mthd_chan,
1298 .mthd.prev = -0x020000, 1311 .mthd.prev = -0x020000,
1299 .head.scanoutpos = nvd0_disp_base_scanoutpos, 1312 .head.scanoutpos = nvd0_disp_main_scanoutpos,
1300}.base.base; 1313}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
index db144b2cf06b..55debec7e68f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nve0.c
@@ -34,7 +34,7 @@
34 ******************************************************************************/ 34 ******************************************************************************/
35 35
36static const struct nv50_disp_mthd_list 36static const struct nv50_disp_mthd_list
37nve0_disp_mast_mthd_head = { 37nve0_disp_core_mthd_head = {
38 .mthd = 0x0300, 38 .mthd = 0x0300,
39 .addr = 0x000300, 39 .addr = 0x000300,
40 .data = { 40 .data = {
@@ -113,15 +113,15 @@ nve0_disp_mast_mthd_head = {
113}; 113};
114 114
115const struct nv50_disp_mthd_chan 115const struct nv50_disp_mthd_chan
116nve0_disp_mast_mthd_chan = { 116nve0_disp_core_mthd_chan = {
117 .name = "Core", 117 .name = "Core",
118 .addr = 0x000000, 118 .addr = 0x000000,
119 .data = { 119 .data = {
120 { "Global", 1, &nvd0_disp_mast_mthd_base }, 120 { "Global", 1, &nvd0_disp_core_mthd_base },
121 { "DAC", 3, &nvd0_disp_mast_mthd_dac }, 121 { "DAC", 3, &nvd0_disp_core_mthd_dac },
122 { "SOR", 8, &nvd0_disp_mast_mthd_sor }, 122 { "SOR", 8, &nvd0_disp_core_mthd_sor },
123 { "PIOR", 4, &nvd0_disp_mast_mthd_pior }, 123 { "PIOR", 4, &nvd0_disp_core_mthd_pior },
124 { "HEAD", 4, &nve0_disp_mast_mthd_head }, 124 { "HEAD", 4, &nve0_disp_core_mthd_head },
125 {} 125 {}
126 } 126 }
127}; 127};
@@ -200,8 +200,8 @@ nve0_disp_ovly_mthd_chan = {
200 200
201static struct nouveau_oclass 201static struct nouveau_oclass
202nve0_disp_sclass[] = { 202nve0_disp_sclass[] = {
203 { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, 203 { GK104_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
204 { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, 204 { GK104_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
205 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, 205 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
206 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, 206 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
207 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, 207 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -209,8 +209,8 @@ nve0_disp_sclass[] = {
209}; 209};
210 210
211static struct nouveau_oclass 211static struct nouveau_oclass
212nve0_disp_base_oclass[] = { 212nve0_disp_main_oclass[] = {
213 { GK104_DISP, &nvd0_disp_base_ofuncs }, 213 { GK104_DISP, &nvd0_disp_main_ofuncs },
214 {} 214 {}
215}; 215};
216 216
@@ -237,7 +237,7 @@ nve0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
237 if (ret) 237 if (ret)
238 return ret; 238 return ret;
239 239
240 nv_engine(priv)->sclass = nve0_disp_base_oclass; 240 nv_engine(priv)->sclass = nve0_disp_main_oclass;
241 nv_engine(priv)->cclass = &nv50_disp_cclass; 241 nv_engine(priv)->cclass = &nv50_disp_cclass;
242 nv_subdev(priv)->intr = nvd0_disp_intr; 242 nv_subdev(priv)->intr = nvd0_disp_intr;
243 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); 243 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -264,9 +264,9 @@ nve0_disp_oclass = &(struct nv50_disp_impl) {
264 }, 264 },
265 .base.vblank = &nvd0_disp_vblank_func, 265 .base.vblank = &nvd0_disp_vblank_func,
266 .base.outp = nvd0_disp_outp_sclass, 266 .base.outp = nvd0_disp_outp_sclass,
267 .mthd.core = &nve0_disp_mast_mthd_chan, 267 .mthd.core = &nve0_disp_core_mthd_chan,
268 .mthd.base = &nvd0_disp_sync_mthd_chan, 268 .mthd.base = &nvd0_disp_base_mthd_chan,
269 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 269 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
270 .mthd.prev = -0x020000, 270 .mthd.prev = -0x020000,
271 .head.scanoutpos = nvd0_disp_base_scanoutpos, 271 .head.scanoutpos = nvd0_disp_main_scanoutpos,
272}.base.base; 272}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
index 402d7d67d806..3e7e2d28744c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvf0.c
@@ -35,8 +35,8 @@
35 35
36static struct nouveau_oclass 36static struct nouveau_oclass
37nvf0_disp_sclass[] = { 37nvf0_disp_sclass[] = {
38 { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_mast_ofuncs.base }, 38 { GK110_DISP_CORE_CHANNEL_DMA, &nvd0_disp_core_ofuncs.base },
39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_sync_ofuncs.base }, 39 { GK110_DISP_BASE_CHANNEL_DMA, &nvd0_disp_base_ofuncs.base },
40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base }, 40 { GK104_DISP_OVERLAY_CONTROL_DMA, &nvd0_disp_ovly_ofuncs.base },
41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base }, 41 { GK104_DISP_OVERLAY, &nvd0_disp_oimm_ofuncs.base },
42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base }, 42 { GK104_DISP_CURSOR, &nvd0_disp_curs_ofuncs.base },
@@ -44,8 +44,8 @@ nvf0_disp_sclass[] = {
44}; 44};
45 45
46static struct nouveau_oclass 46static struct nouveau_oclass
47nvf0_disp_base_oclass[] = { 47nvf0_disp_main_oclass[] = {
48 { GK110_DISP, &nvd0_disp_base_ofuncs }, 48 { GK110_DISP, &nvd0_disp_main_ofuncs },
49 {} 49 {}
50}; 50};
51 51
@@ -72,7 +72,7 @@ nvf0_disp_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
72 if (ret) 72 if (ret)
73 return ret; 73 return ret;
74 74
75 nv_engine(priv)->sclass = nvf0_disp_base_oclass; 75 nv_engine(priv)->sclass = nvf0_disp_main_oclass;
76 nv_engine(priv)->cclass = &nv50_disp_cclass; 76 nv_engine(priv)->cclass = &nv50_disp_cclass;
77 nv_subdev(priv)->intr = nvd0_disp_intr; 77 nv_subdev(priv)->intr = nvd0_disp_intr;
78 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor); 78 INIT_WORK(&priv->supervisor, nvd0_disp_intr_supervisor);
@@ -99,9 +99,9 @@ nvf0_disp_oclass = &(struct nv50_disp_impl) {
99 }, 99 },
100 .base.vblank = &nvd0_disp_vblank_func, 100 .base.vblank = &nvd0_disp_vblank_func,
101 .base.outp = nvd0_disp_outp_sclass, 101 .base.outp = nvd0_disp_outp_sclass,
102 .mthd.core = &nve0_disp_mast_mthd_chan, 102 .mthd.core = &nve0_disp_core_mthd_chan,
103 .mthd.base = &nvd0_disp_sync_mthd_chan, 103 .mthd.base = &nvd0_disp_base_mthd_chan,
104 .mthd.ovly = &nve0_disp_ovly_mthd_chan, 104 .mthd.ovly = &nve0_disp_ovly_mthd_chan,
105 .mthd.prev = -0x020000, 105 .mthd.prev = -0x020000,
106 .head.scanoutpos = nvd0_disp_base_scanoutpos, 106 .head.scanoutpos = nvd0_disp_main_scanoutpos,
107}.base.base; 107}.base.base;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
index a5ff00a9cedc..bbd9b6fdc90f 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outp.c
@@ -85,7 +85,10 @@ nvkm_output_create_(struct nouveau_object *parent,
85 dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index, 85 dcbE->sorconf.link : 0, dcbE->connector, dcbE->i2c_index,
86 dcbE->bus, dcbE->heads); 86 dcbE->bus, dcbE->heads);
87 87
88 outp->port = i2c->find(i2c, outp->info.i2c_index); 88 if (outp->info.type != DCB_OUTPUT_DP)
89 outp->port = i2c->find(i2c, NV_I2C_PORT(outp->info.i2c_index));
90 else
91 outp->port = i2c->find(i2c, NV_I2C_AUX(outp->info.i2c_index));
89 outp->edid = outp->port; 92 outp->edid = outp->port;
90 93
91 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE); 94 data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr, &connE);
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
new file mode 100644
index 000000000000..0b4fad39e9a6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sorgm204.c
@@ -0,0 +1,144 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <core/os.h>
26
27#include <subdev/bios.h>
28#include <subdev/bios/dcb.h>
29#include <subdev/bios/dp.h>
30#include <subdev/bios/init.h>
31#include <subdev/timer.h>
32
33#include "nv50.h"
34
35static inline u32
36gm204_sor_soff(struct nvkm_output_dp *outp)
37{
38 return (ffs(outp->base.info.or) - 1) * 0x800;
39}
40
41static inline u32
42gm204_sor_loff(struct nvkm_output_dp *outp)
43{
44 return gm204_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
45}
46
47void
48gm204_sor_magic(struct nvkm_output *outp)
49{
50 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
51 const u32 soff = outp->or * 0x100;
52 const u32 data = outp->or + 1;
53 if (outp->info.sorconf.link & 1)
54 nv_mask(priv, 0x612308 + soff, 0x0000001f, 0x00000000 | data);
55 if (outp->info.sorconf.link & 2)
56 nv_mask(priv, 0x612388 + soff, 0x0000001f, 0x00000010 | data);
57}
58
59static inline u32
60gm204_sor_dp_lane_map(struct nv50_disp_priv *priv, u8 lane)
61{
62 return lane * 0x08;
63}
64
65static int
66gm204_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
67{
68 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
69 const u32 soff = gm204_sor_soff(outp);
70 const u32 data = 0x01010101 * pattern;
71 if (outp->base.info.sorconf.link & 1)
72 nv_mask(priv, 0x61c110 + soff, 0x0f0f0f0f, data);
73 else
74 nv_mask(priv, 0x61c12c + soff, 0x0f0f0f0f, data);
75 return 0;
76}
77
78static int
79gm204_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr)
80{
81 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
82 const u32 soff = gm204_sor_soff(outp);
83 const u32 loff = gm204_sor_loff(outp);
84 u32 mask = 0, i;
85
86 for (i = 0; i < nr; i++)
87 mask |= 1 << (gm204_sor_dp_lane_map(priv, i) >> 3);
88
89 nv_mask(priv, 0x61c130 + loff, 0x0000000f, mask);
90 nv_mask(priv, 0x61c034 + soff, 0x80000000, 0x80000000);
91 nv_wait(priv, 0x61c034 + soff, 0x80000000, 0x00000000);
92 return 0;
93}
94
95static int
96gm204_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc)
97{
98 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
99 struct nouveau_bios *bios = nouveau_bios(priv);
100 const u32 shift = gm204_sor_dp_lane_map(priv, ln);
101 const u32 loff = gm204_sor_loff(outp);
102 u32 addr, data[4];
103 u8 ver, hdr, cnt, len;
104 struct nvbios_dpout info;
105 struct nvbios_dpcfg ocfg;
106
107 addr = nvbios_dpout_match(bios, outp->base.info.hasht,
108 outp->base.info.hashm,
109 &ver, &hdr, &cnt, &len, &info);
110 if (!addr)
111 return -ENODEV;
112
113 addr = nvbios_dpcfg_match(bios, addr, pc, vs, pe,
114 &ver, &hdr, &cnt, &len, &ocfg);
115 if (!addr)
116 return -EINVAL;
117
118 data[0] = nv_rd32(priv, 0x61c118 + loff) & ~(0x000000ff << shift);
119 data[1] = nv_rd32(priv, 0x61c120 + loff) & ~(0x000000ff << shift);
120 data[2] = nv_rd32(priv, 0x61c130 + loff);
121 if ((data[2] & 0x0000ff00) < (ocfg.tx_pu << 8) || ln == 0)
122 data[2] = (data[2] & ~0x0000ff00) | (ocfg.tx_pu << 8);
123 nv_wr32(priv, 0x61c118 + loff, data[0] | (ocfg.dc << shift));
124 nv_wr32(priv, 0x61c120 + loff, data[1] | (ocfg.pe << shift));
125 nv_wr32(priv, 0x61c130 + loff, data[2] | (ocfg.tx_pu << 8));
126 data[3] = nv_rd32(priv, 0x61c13c + loff) & ~(0x000000ff << shift);
127 nv_wr32(priv, 0x61c13c + loff, data[3] | (ocfg.pc << shift));
128 return 0;
129}
130
131struct nvkm_output_dp_impl
132gm204_sor_dp_impl = {
133 .base.base.handle = DCB_OUTPUT_DP,
134 .base.base.ofuncs = &(struct nouveau_ofuncs) {
135 .ctor = _nvkm_output_dp_ctor,
136 .dtor = _nvkm_output_dp_dtor,
137 .init = _nvkm_output_dp_init,
138 .fini = _nvkm_output_dp_fini,
139 },
140 .pattern = gm204_sor_dp_pattern,
141 .lnk_pwr = gm204_sor_dp_lnk_pwr,
142 .lnk_ctl = nvd0_sor_dp_lnk_ctl,
143 .drv_ctl = gm204_sor_dp_drv_ctl,
144};
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
index 7b7bbc3e459e..fdab2939070c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornvd0.c
@@ -60,7 +60,7 @@ nvd0_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern)
60 return 0; 60 return 0;
61} 61}
62 62
63static int 63int
64nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) 64nvd0_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef)
65{ 65{
66 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp); 66 struct nv50_disp_priv *priv = (void *)nouveau_disp(outp);
diff --git a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
index 3fc4f0b0eaca..19f5f6522962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/dmaobj/nvd0.c
@@ -51,6 +51,7 @@ nvd0_dmaobj_bind(struct nouveau_dmaobj *dmaobj,
51 case GK104_DISP_CORE_CHANNEL_DMA: 51 case GK104_DISP_CORE_CHANNEL_DMA:
52 case GK110_DISP_CORE_CHANNEL_DMA: 52 case GK110_DISP_CORE_CHANNEL_DMA:
53 case GM107_DISP_CORE_CHANNEL_DMA: 53 case GM107_DISP_CORE_CHANNEL_DMA:
54 case GM204_DISP_CORE_CHANNEL_DMA:
54 case GF110_DISP_BASE_CHANNEL_DMA: 55 case GF110_DISP_BASE_CHANNEL_DMA:
55 case GK104_DISP_BASE_CHANNEL_DMA: 56 case GK104_DISP_BASE_CHANNEL_DMA:
56 case GK110_DISP_BASE_CHANNEL_DMA: 57 case GK110_DISP_BASE_CHANNEL_DMA:
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index f8734eb74eaa..6a8db7c80bd1 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -792,7 +792,7 @@ nve0_fifo_intr_fault(struct nve0_fifo_priv *priv, int unit)
792 nouveau_engctx_put(engctx); 792 nouveau_engctx_put(engctx);
793} 793}
794 794
795static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = { 795static const struct nouveau_bitfield nve0_fifo_pbdma_intr_0[] = {
796 { 0x00000001, "MEMREQ" }, 796 { 0x00000001, "MEMREQ" },
797 { 0x00000002, "MEMACK_TIMEOUT" }, 797 { 0x00000002, "MEMACK_TIMEOUT" },
798 { 0x00000004, "MEMACK_EXTRA" }, 798 { 0x00000004, "MEMACK_EXTRA" },
@@ -827,9 +827,10 @@ static const struct nouveau_bitfield nve0_fifo_pbdma_intr[] = {
827}; 827};
828 828
829static void 829static void
830nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit) 830nve0_fifo_intr_pbdma_0(struct nve0_fifo_priv *priv, int unit)
831{ 831{
832 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)); 832 u32 mask = nv_rd32(priv, 0x04010c + (unit * 0x2000));
833 u32 stat = nv_rd32(priv, 0x040108 + (unit * 0x2000)) & mask;
833 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000)); 834 u32 addr = nv_rd32(priv, 0x0400c0 + (unit * 0x2000));
834 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000)); 835 u32 data = nv_rd32(priv, 0x0400c4 + (unit * 0x2000));
835 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff; 836 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
@@ -840,11 +841,12 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
840 if (stat & 0x00800000) { 841 if (stat & 0x00800000) {
841 if (!nve0_fifo_swmthd(priv, chid, mthd, data)) 842 if (!nve0_fifo_swmthd(priv, chid, mthd, data))
842 show &= ~0x00800000; 843 show &= ~0x00800000;
844 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
843 } 845 }
844 846
845 if (show) { 847 if (show) {
846 nv_error(priv, "PBDMA%d:", unit); 848 nv_error(priv, "PBDMA%d:", unit);
847 nouveau_bitfield_print(nve0_fifo_pbdma_intr, show); 849 nouveau_bitfield_print(nve0_fifo_pbdma_intr_0, show);
848 pr_cont("\n"); 850 pr_cont("\n");
849 nv_error(priv, 851 nv_error(priv,
850 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n", 852 "PBDMA%d: ch %d [%s] subc %d mthd 0x%04x data 0x%08x\n",
@@ -853,10 +855,37 @@ nve0_fifo_intr_pbdma(struct nve0_fifo_priv *priv, int unit)
853 subc, mthd, data); 855 subc, mthd, data);
854 } 856 }
855 857
856 nv_wr32(priv, 0x0400c0 + (unit * 0x2000), 0x80600008);
857 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat); 858 nv_wr32(priv, 0x040108 + (unit * 0x2000), stat);
858} 859}
859 860
861static const struct nouveau_bitfield nve0_fifo_pbdma_intr_1[] = {
862 { 0x00000001, "HCE_RE_ILLEGAL_OP" },
863 { 0x00000002, "HCE_RE_ALIGNB" },
864 { 0x00000004, "HCE_PRIV" },
865 { 0x00000008, "HCE_ILLEGAL_MTHD" },
866 { 0x00000010, "HCE_ILLEGAL_CLASS" },
867 {}
868};
869
870static void
871nve0_fifo_intr_pbdma_1(struct nve0_fifo_priv *priv, int unit)
872{
873 u32 mask = nv_rd32(priv, 0x04014c + (unit * 0x2000));
874 u32 stat = nv_rd32(priv, 0x040148 + (unit * 0x2000)) & mask;
875 u32 chid = nv_rd32(priv, 0x040120 + (unit * 0x2000)) & 0xfff;
876
877 if (stat) {
878 nv_error(priv, "PBDMA%d:", unit);
879 nouveau_bitfield_print(nve0_fifo_pbdma_intr_1, stat);
880 pr_cont("\n");
881 nv_error(priv, "PBDMA%d: ch %d %08x %08x\n", unit, chid,
882 nv_rd32(priv, 0x040150 + (unit * 0x2000)),
883 nv_rd32(priv, 0x040154 + (unit * 0x2000)));
884 }
885
886 nv_wr32(priv, 0x040148 + (unit * 0x2000), stat);
887}
888
860static void 889static void
861nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv) 890nve0_fifo_intr_runlist(struct nve0_fifo_priv *priv)
862{ 891{
@@ -939,7 +968,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
939 u32 mask = nv_rd32(priv, 0x0025a0); 968 u32 mask = nv_rd32(priv, 0x0025a0);
940 while (mask) { 969 while (mask) {
941 u32 unit = __ffs(mask); 970 u32 unit = __ffs(mask);
942 nve0_fifo_intr_pbdma(priv, unit); 971 nve0_fifo_intr_pbdma_0(priv, unit);
972 nve0_fifo_intr_pbdma_1(priv, unit);
943 nv_wr32(priv, 0x0025a0, (1 << unit)); 973 nv_wr32(priv, 0x0025a0, (1 << unit));
944 mask &= ~(1 << unit); 974 mask &= ~(1 << unit);
945 } 975 }
@@ -1022,6 +1052,12 @@ nve0_fifo_init(struct nouveau_object *object)
1022 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */ 1052 nv_wr32(priv, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTREN */
1023 } 1053 }
1024 1054
1055 /* PBDMA[n].HCE */
1056 for (i = 0; i < priv->spoon_nr; i++) {
1057 nv_wr32(priv, 0x040148 + (i * 0x2000), 0xffffffff); /* INTR */
1058 nv_wr32(priv, 0x04014c + (i * 0x2000), 0xffffffff); /* INTREN */
1059 }
1060
1025 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12); 1061 nv_wr32(priv, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
1026 1062
1027 nv_wr32(priv, 0x002100, 0xffffffff); 1063 nv_wr32(priv, 0x002100, 0xffffffff);
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 30fd1dc64f93..17251e4b9e86 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -1557,7 +1557,7 @@ nvc0_graph_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
1557 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) || 1557 nvc0_graph_ctor_fw(priv, "fuc409d", &priv->fuc409d) ||
1558 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) || 1558 nvc0_graph_ctor_fw(priv, "fuc41ac", &priv->fuc41ac) ||
1559 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad)) 1559 nvc0_graph_ctor_fw(priv, "fuc41ad", &priv->fuc41ad))
1560 return -EINVAL; 1560 return -ENODEV;
1561 priv->firmware = true; 1561 priv->firmware = true;
1562 } 1562 }
1563 1563
diff --git a/drivers/gpu/drm/nouveau/core/include/core/device.h b/drivers/gpu/drm/nouveau/core/include/core/device.h
index 1d9d893929bb..2ec2e50d3676 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/device.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/device.h
@@ -16,6 +16,7 @@ enum nv_subdev_type {
16 * to during POST. 16 * to during POST.
17 */ 17 */
18 NVDEV_SUBDEV_DEVINIT, 18 NVDEV_SUBDEV_DEVINIT,
19 NVDEV_SUBDEV_IBUS,
19 NVDEV_SUBDEV_GPIO, 20 NVDEV_SUBDEV_GPIO,
20 NVDEV_SUBDEV_I2C, 21 NVDEV_SUBDEV_I2C,
21 NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C, 22 NVDEV_SUBDEV_DEVINIT_LAST = NVDEV_SUBDEV_I2C,
@@ -31,7 +32,6 @@ enum nv_subdev_type {
31 NVDEV_SUBDEV_TIMER, 32 NVDEV_SUBDEV_TIMER,
32 NVDEV_SUBDEV_FB, 33 NVDEV_SUBDEV_FB,
33 NVDEV_SUBDEV_LTC, 34 NVDEV_SUBDEV_LTC,
34 NVDEV_SUBDEV_IBUS,
35 NVDEV_SUBDEV_INSTMEM, 35 NVDEV_SUBDEV_INSTMEM,
36 NVDEV_SUBDEV_VM, 36 NVDEV_SUBDEV_VM,
37 NVDEV_SUBDEV_BAR, 37 NVDEV_SUBDEV_BAR,
@@ -92,6 +92,7 @@ struct nouveau_device {
92 GM100 = 0x110, 92 GM100 = 0x110,
93 } card_type; 93 } card_type;
94 u32 chipset; 94 u32 chipset;
95 u8 chiprev;
95 u32 crystal; 96 u32 crystal;
96 97
97 struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR]; 98 struct nouveau_oclass *oclass[NVDEV_SUBDEV_NR];
@@ -158,6 +159,12 @@ nv_device_is_pci(struct nouveau_device *device)
158 return device->pdev != NULL; 159 return device->pdev != NULL;
159} 160}
160 161
162static inline bool
163nv_device_is_cpu_coherent(struct nouveau_device *device)
164{
165 return (!IS_ENABLED(CONFIG_ARM) && nv_device_is_pci(device));
166}
167
161static inline struct device * 168static inline struct device *
162nv_device_base(struct nouveau_device *device) 169nv_device_base(struct nouveau_device *device)
163{ 170{
diff --git a/drivers/gpu/drm/nouveau/core/include/core/handle.h b/drivers/gpu/drm/nouveau/core/include/core/handle.h
index ceb67d770875..d22a59138a9b 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/handle.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/handle.h
@@ -23,11 +23,6 @@ void nouveau_handle_destroy(struct nouveau_handle *);
23int nouveau_handle_init(struct nouveau_handle *); 23int nouveau_handle_init(struct nouveau_handle *);
24int nouveau_handle_fini(struct nouveau_handle *, bool suspend); 24int nouveau_handle_fini(struct nouveau_handle *, bool suspend);
25 25
26int nouveau_handle_new(struct nouveau_object *, u32 parent, u32 handle,
27 u16 oclass, void *data, u32 size,
28 struct nouveau_object **);
29int nouveau_handle_del(struct nouveau_object *, u32 parent, u32 handle);
30
31struct nouveau_object * 26struct nouveau_object *
32nouveau_handle_ref(struct nouveau_object *, u32 name); 27nouveau_handle_ref(struct nouveau_object *, u32 name);
33 28
diff --git a/drivers/gpu/drm/nouveau/core/include/core/object.h b/drivers/gpu/drm/nouveau/core/include/core/object.h
index d7039482d6fd..2e2afa502c99 100644
--- a/drivers/gpu/drm/nouveau/core/include/core/object.h
+++ b/drivers/gpu/drm/nouveau/core/include/core/object.h
@@ -203,21 +203,4 @@ nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
203 return 0; 203 return 0;
204} 204}
205 205
206#include <core/handle.h>
207
208static inline int
209nouveau_object_new(struct nouveau_object *client, u32 parent, u32 handle,
210 u16 oclass, void *data, u32 size,
211 struct nouveau_object **pobject)
212{
213 return nouveau_handle_new(client, parent, handle, oclass,
214 data, size, pobject);
215}
216
217static inline int
218nouveau_object_del(struct nouveau_object *client, u32 parent, u32 handle)
219{
220 return nouveau_handle_del(client, parent, handle);
221}
222
223#endif 206#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/engine/disp.h b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
index 7a64f347b385..fc307f1317ff 100644
--- a/drivers/gpu/drm/nouveau/core/include/engine/disp.h
+++ b/drivers/gpu/drm/nouveau/core/include/engine/disp.h
@@ -31,5 +31,6 @@ extern struct nouveau_oclass *nvd0_disp_oclass;
31extern struct nouveau_oclass *nve0_disp_oclass; 31extern struct nouveau_oclass *nve0_disp_oclass;
32extern struct nouveau_oclass *nvf0_disp_oclass; 32extern struct nouveau_oclass *nvf0_disp_oclass;
33extern struct nouveau_oclass *gm107_disp_oclass; 33extern struct nouveau_oclass *gm107_disp_oclass;
34extern struct nouveau_oclass *gm204_disp_oclass;
34 35
35#endif 36#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
new file mode 100644
index 000000000000..1f84d3612dd8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/M0203.h
@@ -0,0 +1,31 @@
1#ifndef __NVBIOS_M0203_H__
2#define __NVBIOS_M0203_H__
3
4struct nvbios_M0203T {
5#define M0203T_TYPE_RAMCFG 0x00
6 u8 type;
7 u16 pointer;
8};
9
10u32 nvbios_M0203Te(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
11u32 nvbios_M0203Tp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
12 struct nvbios_M0203T *);
13
14struct nvbios_M0203E {
15#define M0203E_TYPE_DDR2 0x0
16#define M0203E_TYPE_DDR3 0x1
17#define M0203E_TYPE_GDDR3 0x2
18#define M0203E_TYPE_GDDR5 0x3
19#define M0203E_TYPE_SKIP 0xf
20 u8 type;
21 u8 strap;
22 u8 group;
23};
24
25u32 nvbios_M0203Ee(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
26u32 nvbios_M0203Ep(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
27 struct nvbios_M0203E *);
28u32 nvbios_M0203Em(struct nouveau_bios *, u8 ramcfg, u8 *ver, u8 *hdr,
29 struct nvbios_M0203E *);
30
31#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
index 10b57a19a7de..c9bb112895af 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/i2c.h
@@ -4,11 +4,14 @@
4struct nouveau_bios; 4struct nouveau_bios;
5 5
6enum dcb_i2c_type { 6enum dcb_i2c_type {
7 DCB_I2C_NV04_BIT = 0, 7 /* matches bios type field prior to ccb 4.1 */
8 DCB_I2C_NV4E_BIT = 4, 8 DCB_I2C_NV04_BIT = 0x00,
9 DCB_I2C_NVIO_BIT = 5, 9 DCB_I2C_NV4E_BIT = 0x04,
10 DCB_I2C_NVIO_AUX = 6, 10 DCB_I2C_NVIO_BIT = 0x05,
11 DCB_I2C_UNUSED = 0xff 11 DCB_I2C_NVIO_AUX = 0x06,
12 /* made up - mostly */
13 DCB_I2C_PMGR = 0x80,
14 DCB_I2C_UNUSED = 0xff
12}; 15};
13 16
14struct dcb_i2c_entry { 17struct dcb_i2c_entry {
@@ -16,6 +19,7 @@ struct dcb_i2c_entry {
16 u8 drive; 19 u8 drive;
17 u8 sense; 20 u8 sense;
18 u8 share; 21 u8 share;
22 u8 auxch;
19}; 23};
20 24
21u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); 25u16 dcb_i2c_table(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
new file mode 100644
index 000000000000..3348b4580843
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/image.h
@@ -0,0 +1,13 @@
1#ifndef __NVBIOS_IMAGE_H__
2#define __NVBIOS_IMAGE_H__
3
4struct nvbios_image {
5 u32 base;
6 u32 size;
7 u8 type;
8 bool last;
9};
10
11bool nvbios_image(struct nouveau_bios *, int, struct nvbios_image *);
12
13#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
new file mode 100644
index 000000000000..b18413d951e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/npde.h
@@ -0,0 +1,12 @@
1#ifndef __NVBIOS_NPDE_H__
2#define __NVBIOS_NPDE_H__
3
4struct nvbios_npdeT {
5 u32 image_size;
6 bool last;
7};
8
9u32 nvbios_npdeTe(struct nouveau_bios *, u32);
10u32 nvbios_npdeTp(struct nouveau_bios *, u32, struct nvbios_npdeT *);
11
12#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
new file mode 100644
index 000000000000..3d634a06dca1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pcir.h
@@ -0,0 +1,18 @@
1#ifndef __NVBIOS_PCIR_H__
2#define __NVBIOS_PCIR_H__
3
4struct nvbios_pcirT {
5 u16 vendor_id;
6 u16 device_id;
7 u8 class_code[3];
8 u32 image_size;
9 u16 image_rev;
10 u8 image_type;
11 bool last;
12};
13
14u32 nvbios_pcirTe(struct nouveau_bios *, u32, u8 *ver, u16 *hdr);
15u32 nvbios_pcirTp(struct nouveau_bios *, u32, u8 *ver, u16 *hdr,
16 struct nvbios_pcirT *);
17
18#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
new file mode 100644
index 000000000000..9de593deaea8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/pmu.h
@@ -0,0 +1,37 @@
1#ifndef __NVBIOS_PMU_H__
2#define __NVBIOS_PMU_H__
3
4struct nvbios_pmuT {
5};
6
7u32 nvbios_pmuTe(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len);
8u32 nvbios_pmuTp(struct nouveau_bios *, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
9 struct nvbios_pmuT *);
10
11struct nvbios_pmuE {
12 u8 type;
13 u32 data;
14};
15
16u32 nvbios_pmuEe(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr);
17u32 nvbios_pmuEp(struct nouveau_bios *, int idx, u8 *ver, u8 *hdr,
18 struct nvbios_pmuE *);
19
20struct nvbios_pmuR {
21 u32 boot_addr_pmu;
22 u32 boot_addr;
23 u32 boot_size;
24 u32 code_addr_pmu;
25 u32 code_addr;
26 u32 code_size;
27 u32 init_addr_pmu;
28
29 u32 data_addr_pmu;
30 u32 data_addr;
31 u32 data_size;
32 u32 args_addr_pmu;
33};
34
35bool nvbios_pmuRm(struct nouveau_bios *, u8 type, struct nvbios_pmuR *);
36
37#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
index a685bbd04568..4a0e0ceb41ba 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/bios/ramcfg.h
@@ -43,8 +43,9 @@ struct nvbios_ramcfg {
43 unsigned ramcfg_10_02_08:1; 43 unsigned ramcfg_10_02_08:1;
44 unsigned ramcfg_10_02_10:1; 44 unsigned ramcfg_10_02_10:1;
45 unsigned ramcfg_10_02_20:1; 45 unsigned ramcfg_10_02_20:1;
46 unsigned ramcfg_10_02_40:1; 46 unsigned ramcfg_10_DLLoff:1;
47 unsigned ramcfg_10_03_0f:4; 47 unsigned ramcfg_10_03_0f:4;
48 unsigned ramcfg_10_04_01:1;
48 unsigned ramcfg_10_05:8; 49 unsigned ramcfg_10_05:8;
49 unsigned ramcfg_10_06:8; 50 unsigned ramcfg_10_06:8;
50 unsigned ramcfg_10_07:8; 51 unsigned ramcfg_10_07:8;
@@ -95,9 +96,29 @@ struct nvbios_ramcfg {
95 union { 96 union {
96 struct { 97 struct {
97 unsigned timing_10_WR:8; 98 unsigned timing_10_WR:8;
99 unsigned timing_10_WTR:8;
98 unsigned timing_10_CL:8; 100 unsigned timing_10_CL:8;
101 unsigned timing_10_RC:8;
102 /*empty: 4 */
103 unsigned timing_10_RFC:8; /* Byte 5 */
104 /*empty: 6 */
105 unsigned timing_10_RAS:8; /* Byte 7 */
106 /*empty: 8 */
107 unsigned timing_10_RP:8; /* Byte 9 */
108 unsigned timing_10_RCDRD:8;
109 unsigned timing_10_RCDWR:8;
110 unsigned timing_10_RRD:8;
111 unsigned timing_10_13:8;
99 unsigned timing_10_ODT:3; 112 unsigned timing_10_ODT:3;
113 /* empty: 15 */
114 unsigned timing_10_16:8;
115 /* empty: 17 */
116 unsigned timing_10_18:8;
100 unsigned timing_10_CWL:8; 117 unsigned timing_10_CWL:8;
118 unsigned timing_10_20:8;
119 unsigned timing_10_21:8;
120 /* empty: 22, 23 */
121 unsigned timing_10_24:8;
101 }; 122 };
102 struct { 123 struct {
103 unsigned timing_20_2e_03:2; 124 unsigned timing_20_2e_03:2;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
index e292271a84e4..e007a9d44683 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/devinit.h
@@ -30,5 +30,6 @@ extern struct nouveau_oclass *nva3_devinit_oclass;
30extern struct nouveau_oclass *nvaf_devinit_oclass; 30extern struct nouveau_oclass *nvaf_devinit_oclass;
31extern struct nouveau_oclass *nvc0_devinit_oclass; 31extern struct nouveau_oclass *nvc0_devinit_oclass;
32extern struct nouveau_oclass *gm107_devinit_oclass; 32extern struct nouveau_oclass *gm107_devinit_oclass;
33extern struct nouveau_oclass *gm204_devinit_oclass;
33 34
34#endif 35#endif
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
index 1b937c2c25ae..d94ccacb40bf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
@@ -8,6 +8,8 @@
8#include <subdev/bios/i2c.h> 8#include <subdev/bios/i2c.h>
9 9
10#define NV_I2C_PORT(n) (0x00 + (n)) 10#define NV_I2C_PORT(n) (0x00 + (n))
11#define NV_I2C_AUX(n) (0x10 + (n))
12#define NV_I2C_EXT(n) (0x20 + (n))
11#define NV_I2C_DEFAULT(n) (0x80 + (n)) 13#define NV_I2C_DEFAULT(n) (0x80 + (n))
12 14
13#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n)) 15#define NV_I2C_TYPE_DCBI2C(n) (0x0000 | (n))
@@ -89,6 +91,7 @@ extern struct nouveau_oclass *nv94_i2c_oclass;
89extern struct nouveau_oclass *nvd0_i2c_oclass; 91extern struct nouveau_oclass *nvd0_i2c_oclass;
90extern struct nouveau_oclass *gf117_i2c_oclass; 92extern struct nouveau_oclass *gf117_i2c_oclass;
91extern struct nouveau_oclass *nve0_i2c_oclass; 93extern struct nouveau_oclass *nve0_i2c_oclass;
94extern struct nouveau_oclass *gm204_i2c_oclass;
92 95
93static inline int 96static inline int
94nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg) 97nv_rdi2cr(struct nouveau_i2c_port *port, u8 addr, u8 reg)
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
index bf3d1f611333..f2427bf5aeed 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/pwr.h
@@ -48,6 +48,8 @@ void nouveau_memx_wait(struct nouveau_memx *,
48 u32 addr, u32 mask, u32 data, u32 nsec); 48 u32 addr, u32 mask, u32 data, u32 nsec);
49void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec); 49void nouveau_memx_nsec(struct nouveau_memx *, u32 nsec);
50void nouveau_memx_wait_vblank(struct nouveau_memx *); 50void nouveau_memx_wait_vblank(struct nouveau_memx *);
51void nouveau_memx_train(struct nouveau_memx *);
52int nouveau_memx_train_result(struct nouveau_pwr *, u32 *, int);
51void nouveau_memx_block(struct nouveau_memx *); 53void nouveau_memx_block(struct nouveau_memx *);
52void nouveau_memx_unblock(struct nouveau_memx *); 54void nouveau_memx_unblock(struct nouveau_memx *);
53 55
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
index 820b62ffd75b..67db5e58880d 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/volt.h
@@ -52,6 +52,7 @@ int _nouveau_volt_init(struct nouveau_object *);
52#define _nouveau_volt_fini _nouveau_subdev_fini 52#define _nouveau_volt_fini _nouveau_subdev_fini
53 53
54extern struct nouveau_oclass nv40_volt_oclass; 54extern struct nouveau_oclass nv40_volt_oclass;
55extern struct nouveau_oclass gk20a_volt_oclass;
55 56
56int nouveau_voltgpio_init(struct nouveau_volt *); 57int nouveau_voltgpio_init(struct nouveau_volt *);
57int nouveau_voltgpio_get(struct nouveau_volt *); 58int nouveau_voltgpio_get(struct nouveau_volt *);
diff --git a/drivers/gpu/drm/nouveau/core/os.h b/drivers/gpu/drm/nouveau/core/os.h
index ccfa21d72ddc..bdd05ee7ec72 100644
--- a/drivers/gpu/drm/nouveau/core/os.h
+++ b/drivers/gpu/drm/nouveau/core/os.h
@@ -23,6 +23,7 @@
23#include <linux/pm_runtime.h> 23#include <linux/pm_runtime.h>
24#include <linux/power_supply.h> 24#include <linux/power_supply.h>
25#include <linux/clk.h> 25#include <linux/clk.h>
26#include <linux/regulator/consumer.h>
26 27
27#include <asm/unaligned.h> 28#include <asm/unaligned.h>
28 29
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
new file mode 100644
index 000000000000..28906b16d4e5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/M0203.c
@@ -0,0 +1,129 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/M0203.h>
28
29u32
30nvbios_M0203Te(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
31{
32 struct bit_entry bit_M;
33 u32 data = 0x00000000;
34
35 if (!bit_entry(bios, 'M', &bit_M)) {
36 if (bit_M.version == 2 && bit_M.length > 0x04)
37 data = nv_ro16(bios, bit_M.offset + 0x03);
38 if (data) {
39 *ver = nv_ro08(bios, data + 0x00);
40 switch (*ver) {
41 case 0x10:
42 *hdr = nv_ro08(bios, data + 0x01);
43 *len = nv_ro08(bios, data + 0x02);
44 *cnt = nv_ro08(bios, data + 0x03);
45 return data;
46 default:
47 break;
48 }
49 }
50 }
51
52 return 0x00000000;
53}
54
55u32
56nvbios_M0203Tp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
57 struct nvbios_M0203T *info)
58{
59 u32 data = nvbios_M0203Te(bios, ver, hdr, cnt, len);
60 memset(info, 0x00, sizeof(*info));
61 switch (!!data * *ver) {
62 case 0x10:
63 info->type = nv_ro08(bios, data + 0x04);
64 info->pointer = nv_ro16(bios, data + 0x05);
65 break;
66 default:
67 break;
68 }
69 return data;
70}
71
72u32
73nvbios_M0203Ee(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
74{
75 u8 cnt, len;
76 u32 data = nvbios_M0203Te(bios, ver, hdr, &cnt, &len);
77 if (data && idx < cnt) {
78 data = data + *hdr + idx * len;
79 *hdr = len;
80 return data;
81 }
82 return 0x00000000;
83}
84
85u32
86nvbios_M0203Ep(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
87 struct nvbios_M0203E *info)
88{
89 u32 data = nvbios_M0203Ee(bios, idx, ver, hdr);
90 memset(info, 0x00, sizeof(*info));
91 switch (!!data * *ver) {
92 case 0x10:
93 info->type = (nv_ro08(bios, data + 0x00) & 0x0f) >> 0;
94 info->strap = (nv_ro08(bios, data + 0x00) & 0xf0) >> 4;
95 info->group = (nv_ro08(bios, data + 0x01) & 0x0f) >> 0;
96 return data;
97 default:
98 break;
99 }
100 return 0x00000000;
101}
102
103u32
104nvbios_M0203Em(struct nouveau_bios *bios, u8 ramcfg, u8 *ver, u8 *hdr,
105 struct nvbios_M0203E *info)
106{
107 struct nvbios_M0203T M0203T;
108 u8 cnt, len, idx = 0xff;
109 u32 data;
110
111 if (!nvbios_M0203Tp(bios, ver, hdr, &cnt, &len, &M0203T)) {
112 nv_warn(bios, "M0203T not found\n");
113 return 0x00000000;
114 }
115
116 while ((data = nvbios_M0203Ep(bios, ++idx, ver, hdr, info))) {
117 switch (M0203T.type) {
118 case M0203T_TYPE_RAMCFG:
119 if (info->strap != ramcfg)
120 continue;
121 return data;
122 default:
123 nv_warn(bios, "M0203T type %02x\n", M0203T.type);
124 return 0x00000000;
125 }
126 }
127
128 return data;
129}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index d45704a2c2df..7df3a273553d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -31,6 +31,8 @@
31#include <subdev/bios/bmp.h> 31#include <subdev/bios/bmp.h>
32#include <subdev/bios/bit.h> 32#include <subdev/bios/bit.h>
33 33
34#include "priv.h"
35
34u8 36u8
35nvbios_checksum(const u8 *data, int size) 37nvbios_checksum(const u8 *data, int size)
36{ 38{
@@ -56,362 +58,21 @@ nvbios_findstr(const u8 *data, int size, const char *str, int len)
56 return 0; 58 return 0;
57} 59}
58 60
59#if defined(__powerpc__) 61int
60static void 62nvbios_extend(struct nouveau_bios *bios, u32 length)
61nouveau_bios_shadow_of(struct nouveau_bios *bios)
62{ 63{
63 struct pci_dev *pdev = nv_device(bios)->pdev; 64 if (bios->size < length) {
64 struct device_node *dn; 65 u8 *prev = bios->data;
65 const u32 *data; 66 if (!(bios->data = kmalloc(length, GFP_KERNEL))) {
66 int size; 67 bios->data = prev;
67 68 return -ENOMEM;
68 dn = pci_device_to_OF_node(pdev);
69 if (!dn) {
70 nv_info(bios, "Unable to get the OF node\n");
71 return;
72 }
73
74 data = of_get_property(dn, "NVDA,BMP", &size);
75 if (data && size) {
76 bios->size = size;
77 bios->data = kmalloc(bios->size, GFP_KERNEL);
78 if (bios->data)
79 memcpy(bios->data, data, size);
80 }
81}
82#endif
83
84static void
85nouveau_bios_shadow_pramin(struct nouveau_bios *bios)
86{
87 struct nouveau_device *device = nv_device(bios);
88 u64 addr = 0;
89 u32 bar0 = 0;
90 int i;
91
92 if (device->card_type >= NV_50) {
93 if (device->card_type >= NV_C0 && device->card_type < GM100) {
94 if (nv_rd32(bios, 0x022500) & 0x00000001)
95 return;
96 } else
97 if (device->card_type >= GM100) {
98 if (nv_rd32(bios, 0x021c04) & 0x00000001)
99 return;
100 }
101
102 addr = nv_rd32(bios, 0x619f04);
103 if (!(addr & 0x00000008)) {
104 nv_debug(bios, "... not enabled\n");
105 return;
106 } 69 }
107 if ( (addr & 0x00000003) != 1) { 70 memcpy(bios->data, prev, bios->size);
108 nv_debug(bios, "... not in vram\n"); 71 bios->size = length;
109 return; 72 kfree(prev);
110 } 73 return 1;
111
112 addr = (addr & 0xffffff00) << 8;
113 if (!addr) {
114 addr = (u64)nv_rd32(bios, 0x001700) << 16;
115 addr += 0xf0000;
116 }
117
118 bar0 = nv_mask(bios, 0x001700, 0xffffffff, addr >> 16);
119 }
120
121 /* bail if no rom signature */
122 if (nv_rd08(bios, 0x700000) != 0x55 ||
123 nv_rd08(bios, 0x700001) != 0xaa)
124 goto out;
125
126 bios->size = nv_rd08(bios, 0x700002) * 512;
127 if (!bios->size)
128 goto out;
129
130 bios->data = kmalloc(bios->size, GFP_KERNEL);
131 if (bios->data) {
132 for (i = 0; i < bios->size; i++)
133 nv_wo08(bios, i, nv_rd08(bios, 0x700000 + i));
134 }
135
136out:
137 if (device->card_type >= NV_50)
138 nv_wr32(bios, 0x001700, bar0);
139}
140
141static void
142nouveau_bios_shadow_prom(struct nouveau_bios *bios)
143{
144 struct nouveau_device *device = nv_device(bios);
145 u32 pcireg, access;
146 u16 pcir;
147 int i;
148
149 /* there is no prom on nv4x IGP's */
150 if (device->card_type == NV_40 && device->chipset >= 0x4c)
151 return;
152
153 /* enable access to rom */
154 if (device->card_type >= NV_50)
155 pcireg = 0x088050;
156 else
157 pcireg = 0x001850;
158 access = nv_mask(bios, pcireg, 0x00000001, 0x00000000);
159
160 /* WARNING: PROM accesses should always be 32-bits aligned. Other
161 * accesses work on most chipset but do not on Kepler chipsets
162 */
163
164 /* bail if no rom signature, with a workaround for a PROM reading
165 * issue on some chipsets. the first read after a period of
166 * inactivity returns the wrong result, so retry the first header
167 * byte a few times before giving up as a workaround
168 */
169 i = 16;
170 do {
171 u32 data = le32_to_cpu(nv_rd32(bios, 0x300000)) & 0xffff;
172 if (data == 0xaa55)
173 break;
174 } while (i--);
175
176 if (!i)
177 goto out;
178
179 /* read entire bios image to system memory */
180 bios->size = (le32_to_cpu(nv_rd32(bios, 0x300000)) >> 16) & 0xff;
181 bios->size = bios->size * 512;
182 if (!bios->size)
183 goto out;
184
185 bios->data = kmalloc(bios->size, GFP_KERNEL);
186 if (!bios->data)
187 goto out;
188
189 for (i = 0; i < bios->size; i += 4)
190 ((u32 *)bios->data)[i/4] = nv_rd32(bios, 0x300000 + i);
191
192 /* check the PCI record header */
193 pcir = nv_ro16(bios, 0x0018);
194 if (bios->data[pcir + 0] != 'P' ||
195 bios->data[pcir + 1] != 'C' ||
196 bios->data[pcir + 2] != 'I' ||
197 bios->data[pcir + 3] != 'R') {
198 bios->size = 0;
199 kfree(bios->data);
200 }
201
202out:
203 /* disable access to rom */
204 nv_wr32(bios, pcireg, access);
205}
206
207#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
208int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
209bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
210#else
211static inline bool
212nouveau_acpi_rom_supported(struct pci_dev *pdev) {
213 return false;
214}
215
216static inline int
217nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) {
218 return -EINVAL;
219}
220#endif
221
222static void
223nouveau_bios_shadow_acpi(struct nouveau_bios *bios)
224{
225 struct pci_dev *pdev = nv_device(bios)->pdev;
226 int ret, cnt, i;
227
228 if (!nouveau_acpi_rom_supported(pdev)) {
229 bios->data = NULL;
230 return;
231 }
232
233 bios->size = 0;
234 bios->data = kmalloc(4096, GFP_KERNEL);
235 if (bios->data) {
236 if (nouveau_acpi_get_bios_chunk(bios->data, 0, 4096) == 4096)
237 bios->size = bios->data[2] * 512;
238 kfree(bios->data);
239 } 74 }
240 75 return 0;
241 if (!bios->size)
242 return;
243
244 bios->data = kmalloc(bios->size, GFP_KERNEL);
245 if (bios->data) {
246 /* disobey the acpi spec - much faster on at least w530 ... */
247 ret = nouveau_acpi_get_bios_chunk(bios->data, 0, bios->size);
248 if (ret != bios->size ||
249 nvbios_checksum(bios->data, bios->size)) {
250 /* ... that didn't work, ok, i'll be good now */
251 for (i = 0; i < bios->size; i += cnt) {
252 cnt = min((bios->size - i), (u32)4096);
253 ret = nouveau_acpi_get_bios_chunk(bios->data, i, cnt);
254 if (ret != cnt)
255 break;
256 }
257 }
258 }
259}
260
261static void
262nouveau_bios_shadow_pci(struct nouveau_bios *bios)
263{
264 struct pci_dev *pdev = nv_device(bios)->pdev;
265 size_t size;
266
267 if (!pci_enable_rom(pdev)) {
268 void __iomem *rom = pci_map_rom(pdev, &size);
269 if (rom && size) {
270 bios->data = kmalloc(size, GFP_KERNEL);
271 if (bios->data) {
272 memcpy_fromio(bios->data, rom, size);
273 bios->size = size;
274 }
275 }
276 if (rom)
277 pci_unmap_rom(pdev, rom);
278
279 pci_disable_rom(pdev);
280 }
281}
282
283static void
284nouveau_bios_shadow_platform(struct nouveau_bios *bios)
285{
286 struct pci_dev *pdev = nv_device(bios)->pdev;
287 size_t size;
288
289 void __iomem *rom = pci_platform_rom(pdev, &size);
290 if (rom && size) {
291 bios->data = kmalloc(size, GFP_KERNEL);
292 if (bios->data) {
293 memcpy_fromio(bios->data, rom, size);
294 bios->size = size;
295 }
296 }
297}
298
299static int
300nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
301{
302 if (bios->size < 3 || !bios->data || bios->data[0] != 0x55 ||
303 bios->data[1] != 0xAA) {
304 nv_info(bios, "... signature not found\n");
305 return 0;
306 }
307
308 if (nvbios_checksum(bios->data,
309 min_t(u32, bios->data[2] * 512, bios->size))) {
310 nv_info(bios, "... checksum invalid\n");
311 /* if a ro image is somewhat bad, it's probably all rubbish */
312 return writeable ? 2 : 1;
313 }
314
315 nv_info(bios, "... appears to be valid\n");
316 return 3;
317}
318
319struct methods {
320 const char desc[16];
321 void (*shadow)(struct nouveau_bios *);
322 const bool rw;
323 int score;
324 u32 size;
325 u8 *data;
326};
327
328static int
329nouveau_bios_shadow(struct nouveau_bios *bios)
330{
331 struct methods shadow_methods[] = {
332#if defined(__powerpc__)
333 { "OpenFirmware", nouveau_bios_shadow_of, true, 0, 0, NULL },
334#endif
335 { "PRAMIN", nouveau_bios_shadow_pramin, true, 0, 0, NULL },
336 { "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
337 { "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
338 { "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
339 { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
340 {}
341 };
342 struct methods *mthd, *best;
343 const struct firmware *fw;
344 const char *optarg;
345 int optlen, ret;
346 char *source;
347
348 optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
349 source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
350 if (source) {
351 /* try to match one of the built-in methods */
352 mthd = shadow_methods;
353 do {
354 if (strcasecmp(source, mthd->desc))
355 continue;
356 nv_info(bios, "source: %s\n", mthd->desc);
357
358 mthd->shadow(bios);
359 mthd->score = nouveau_bios_score(bios, mthd->rw);
360 if (mthd->score) {
361 kfree(source);
362 return 0;
363 }
364 } while ((++mthd)->shadow);
365
366 /* attempt to load firmware image */
367 ret = request_firmware(&fw, source, &nv_device(bios)->pdev->dev);
368 if (ret == 0) {
369 bios->size = fw->size;
370 bios->data = kmemdup(fw->data, fw->size, GFP_KERNEL);
371 release_firmware(fw);
372
373 nv_info(bios, "image: %s\n", source);
374 if (nouveau_bios_score(bios, 1)) {
375 kfree(source);
376 return 0;
377 }
378
379 kfree(bios->data);
380 bios->data = NULL;
381 }
382
383 nv_error(bios, "source \'%s\' invalid\n", source);
384 kfree(source);
385 }
386
387 mthd = shadow_methods;
388 do {
389 nv_info(bios, "checking %s for image...\n", mthd->desc);
390 mthd->shadow(bios);
391 mthd->score = nouveau_bios_score(bios, mthd->rw);
392 mthd->size = bios->size;
393 mthd->data = bios->data;
394 bios->data = NULL;
395 } while (mthd->score != 3 && (++mthd)->shadow);
396
397 mthd = shadow_methods;
398 best = mthd;
399 do {
400 if (mthd->score > best->score) {
401 kfree(best->data);
402 best = mthd;
403 }
404 } while ((++mthd)->shadow);
405
406 if (best->score) {
407 nv_info(bios, "using image from %s\n", best->desc);
408 bios->size = best->size;
409 bios->data = best->data;
410 return 0;
411 }
412
413 nv_error(bios, "unable to locate usable image\n");
414 return -EINVAL;
415} 76}
416 77
417static u8 78static u8
@@ -472,7 +133,7 @@ nouveau_bios_ctor(struct nouveau_object *parent,
472 if (ret) 133 if (ret)
473 return ret; 134 return ret;
474 135
475 ret = nouveau_bios_shadow(bios); 136 ret = nvbios_shadow(bios);
476 if (ret) 137 if (ret)
477 return ret; 138 return ret;
478 139
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
index bd8d348385b3..96099aff8b41 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
@@ -42,7 +42,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
42 42
43 *ver = nv_ro08(bios, dcb); 43 *ver = nv_ro08(bios, dcb);
44 44
45 if (*ver >= 0x41) { 45 if (*ver >= 0x42) {
46 nv_warn(bios, "DCB version 0x%02x unknown\n", *ver); 46 nv_warn(bios, "DCB version 0x%02x unknown\n", *ver);
47 return 0x0000; 47 return 0x0000;
48 } else 48 } else
@@ -157,17 +157,20 @@ dcb_outp_parse(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len,
157 break; 157 break;
158 } 158 }
159 159
160 switch (conf & 0x0f000000) { 160 outp->dpconf.link_nr = (conf & 0x0f000000) >> 24;
161 case 0x0f000000: 161 if (*ver < 0x41) {
162 outp->dpconf.link_nr = 4; 162 switch (outp->dpconf.link_nr) {
163 break; 163 case 0x0f:
164 case 0x03000000: 164 outp->dpconf.link_nr = 4;
165 outp->dpconf.link_nr = 2; 165 break;
166 break; 166 case 0x03:
167 case 0x01000000: 167 outp->dpconf.link_nr = 2;
168 default: 168 break;
169 outp->dpconf.link_nr = 1; 169 case 0x01:
170 break; 170 default:
171 outp->dpconf.link_nr = 1;
172 break;
173 }
171 } 174 }
172 175
173 /* fall-through... */ 176 /* fall-through... */
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
index 7f16e52d9bea..51f355599694 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/disp.c
@@ -40,6 +40,7 @@ nvbios_disp_table(struct nouveau_bios *bios,
40 switch (*ver) { 40 switch (*ver) {
41 case 0x20: 41 case 0x20:
42 case 0x21: 42 case 0x21:
43 case 0x22:
43 *hdr = nv_ro08(bios, data + 0x01); 44 *hdr = nv_ro08(bios, data + 0x01);
44 *len = nv_ro08(bios, data + 0x02); 45 *len = nv_ro08(bios, data + 0x02);
45 *cnt = nv_ro08(bios, data + 0x03); 46 *cnt = nv_ro08(bios, data + 0x03);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
index f309dd657250..cef53f81f12b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/dp.c
@@ -41,6 +41,7 @@ nvbios_dp_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
41 case 0x21: 41 case 0x21:
42 case 0x30: 42 case 0x30:
43 case 0x40: 43 case 0x40:
44 case 0x41:
44 *hdr = nv_ro08(bios, data + 0x01); 45 *hdr = nv_ro08(bios, data + 0x01);
45 *len = nv_ro08(bios, data + 0x02); 46 *len = nv_ro08(bios, data + 0x02);
46 *cnt = nv_ro08(bios, data + 0x03); 47 *cnt = nv_ro08(bios, data + 0x03);
@@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nouveau_bios *bios, u8 idx,
70 *cnt = nv_ro08(bios, outp + 0x04); 71 *cnt = nv_ro08(bios, outp + 0x04);
71 break; 72 break;
72 case 0x40: 73 case 0x40:
74 case 0x41:
73 *hdr = nv_ro08(bios, data + 0x04); 75 *hdr = nv_ro08(bios, data + 0x04);
74 *cnt = 0; 76 *cnt = 0;
75 *len = 0; 77 *len = 0;
@@ -108,6 +110,7 @@ nvbios_dpout_parse(struct nouveau_bios *bios, u8 idx,
108 info->script[4] = nv_ro16(bios, data + 0x10); 110 info->script[4] = nv_ro16(bios, data + 0x10);
109 break; 111 break;
110 case 0x40: 112 case 0x40:
113 case 0x41:
111 info->flags = nv_ro08(bios, data + 0x04); 114 info->flags = nv_ro08(bios, data + 0x04);
112 info->script[0] = nv_ro16(bios, data + 0x05); 115 info->script[0] = nv_ro16(bios, data + 0x05);
113 info->script[1] = nv_ro16(bios, data + 0x07); 116 info->script[1] = nv_ro16(bios, data + 0x07);
@@ -172,10 +175,11 @@ nvbios_dpcfg_parse(struct nouveau_bios *bios, u16 outp, u8 idx,
172 break; 175 break;
173 case 0x30: 176 case 0x30:
174 case 0x40: 177 case 0x40:
178 case 0x41:
175 info->pc = nv_ro08(bios, data + 0x00); 179 info->pc = nv_ro08(bios, data + 0x00);
176 info->dc = nv_ro08(bios, data + 0x01); 180 info->dc = nv_ro08(bios, data + 0x01);
177 info->pe = nv_ro08(bios, data + 0x02); 181 info->pe = nv_ro08(bios, data + 0x02);
178 info->tx_pu = nv_ro08(bios, data + 0x03); 182 info->tx_pu = nv_ro08(bios, data + 0x03) & 0x0f;
179 break; 183 break;
180 default: 184 default:
181 data = 0x0000; 185 data = 0x0000;
@@ -194,6 +198,10 @@ nvbios_dpcfg_match(struct nouveau_bios *bios, u16 outp, u8 pc, u8 vs, u8 pe,
194 u16 data; 198 u16 data;
195 199
196 if (*ver >= 0x30) { 200 if (*ver >= 0x30) {
201 /*XXX: there's a second set of these on at least 4.1, that
202 * i've witnessed nvidia using instead of the first
203 * on gm204. figure out what/why
204 */
197 const u8 vsoff[] = { 0, 4, 7, 9 }; 205 const u8 vsoff[] = { 0, 4, 7, 9 };
198 idx = (pc * 10) + vsoff[vs] + pe; 206 idx = (pc * 10) + vsoff[vs] + pe;
199 } else { 207 } else {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
index b2a676e53580..49285d4f7ca5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/extdev.c
@@ -90,7 +90,7 @@ nvbios_extdev_find(struct nouveau_bios *bios, enum nvbios_extdev_type type,
90 u16 entry; 90 u16 entry;
91 91
92 i = 0; 92 i = 0;
93 while (!(entry = nvbios_extdev_entry(bios, i++, &ver, &len))) { 93 while ((entry = nvbios_extdev_entry(bios, i++, &ver, &len))) {
94 extdev_parse_entry(bios, entry, func); 94 extdev_parse_entry(bios, entry, func);
95 if (func->type == type) 95 if (func->type == type)
96 return 0; 96 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
index cfb9288c6d28..282320ba9264 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/i2c.c
@@ -39,6 +39,11 @@ dcb_i2c_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
39 i2c = nv_ro16(bios, dcb + 4); 39 i2c = nv_ro16(bios, dcb + 4);
40 } 40 }
41 41
42 if (i2c && *ver >= 0x42) {
43 nv_warn(bios, "ccb %02x not supported\n", *ver);
44 return 0x0000;
45 }
46
42 if (i2c && *ver >= 0x30) { 47 if (i2c && *ver >= 0x30) {
43 *ver = nv_ro08(bios, i2c + 0); 48 *ver = nv_ro08(bios, i2c + 0);
44 *hdr = nv_ro08(bios, i2c + 1); 49 *hdr = nv_ro08(bios, i2c + 1);
@@ -70,14 +75,25 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
70 u8 ver, len; 75 u8 ver, len;
71 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len); 76 u16 ent = dcb_i2c_entry(bios, idx, &ver, &len);
72 if (ent) { 77 if (ent) {
73 info->type = nv_ro08(bios, ent + 3); 78 if (ver >= 0x41) {
74 info->share = DCB_I2C_UNUSED; 79 if (!(nv_ro32(bios, ent) & 0x80000000))
75 if (ver < 0x30) { 80 info->type = DCB_I2C_UNUSED;
76 info->type &= 0x07; 81 else
82 info->type = DCB_I2C_PMGR;
83 } else
84 if (ver >= 0x30) {
85 info->type = nv_ro08(bios, ent + 0x03);
86 } else {
87 info->type = nv_ro08(bios, ent + 0x03) & 0x07;
77 if (info->type == 0x07) 88 if (info->type == 0x07)
78 info->type = DCB_I2C_UNUSED; 89 info->type = DCB_I2C_UNUSED;
79 } 90 }
80 91
92 info->drive = DCB_I2C_UNUSED;
93 info->sense = DCB_I2C_UNUSED;
94 info->share = DCB_I2C_UNUSED;
95 info->auxch = DCB_I2C_UNUSED;
96
81 switch (info->type) { 97 switch (info->type) {
82 case DCB_I2C_NV04_BIT: 98 case DCB_I2C_NV04_BIT:
83 info->drive = nv_ro08(bios, ent + 0); 99 info->drive = nv_ro08(bios, ent + 0);
@@ -87,12 +103,23 @@ dcb_i2c_parse(struct nouveau_bios *bios, u8 idx, struct dcb_i2c_entry *info)
87 info->drive = nv_ro08(bios, ent + 1); 103 info->drive = nv_ro08(bios, ent + 1);
88 return 0; 104 return 0;
89 case DCB_I2C_NVIO_BIT: 105 case DCB_I2C_NVIO_BIT:
90 case DCB_I2C_NVIO_AUX:
91 info->drive = nv_ro08(bios, ent + 0) & 0x0f; 106 info->drive = nv_ro08(bios, ent + 0) & 0x0f;
92 if (nv_ro08(bios, ent + 1) & 0x01) { 107 if (nv_ro08(bios, ent + 1) & 0x01)
93 info->share = nv_ro08(bios, ent + 1) >> 1; 108 info->share = nv_ro08(bios, ent + 1) >> 1;
94 info->share &= 0x0f; 109 return 0;
95 } 110 case DCB_I2C_NVIO_AUX:
111 info->auxch = nv_ro08(bios, ent + 0) & 0x0f;
112 if (nv_ro08(bios, ent + 1) & 0x01)
113 info->share = info->auxch;
114 return 0;
115 case DCB_I2C_PMGR:
116 info->drive = (nv_ro16(bios, ent + 0) & 0x01f) >> 0;
117 if (info->drive == 0x1f)
118 info->drive = DCB_I2C_UNUSED;
119 info->auxch = (nv_ro16(bios, ent + 0) & 0x3e0) >> 5;
120 if (info->auxch == 0x1f)
121 info->auxch = DCB_I2C_UNUSED;
122 info->share = info->auxch;
96 return 0; 123 return 0;
97 case DCB_I2C_UNUSED: 124 case DCB_I2C_UNUSED:
98 return 0; 125 return 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/image.c b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
new file mode 100644
index 000000000000..373f9a564ac9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/image.c
@@ -0,0 +1,78 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/image.h>
27#include <subdev/bios/pcir.h>
28#include <subdev/bios/npde.h>
29
30static bool
31nvbios_imagen(struct nouveau_bios *bios, struct nvbios_image *image)
32{
33 struct nvbios_pcirT pcir;
34 struct nvbios_npdeT npde;
35 u8 ver;
36 u16 hdr;
37 u32 data;
38
39 switch ((data = nv_ro16(bios, image->base + 0x00))) {
40 case 0xaa55:
41 case 0xbb77:
42 case 0x4e56: /* NV */
43 break;
44 default:
45 nv_debug(bios, "%08x: ROM signature (%04x) unknown\n",
46 image->base, data);
47 return false;
48 }
49
50 if (!(data = nvbios_pcirTp(bios, image->base, &ver, &hdr, &pcir)))
51 return false;
52 image->size = pcir.image_size;
53 image->type = pcir.image_type;
54 image->last = pcir.last;
55
56 if (image->type != 0x70) {
57 if (!(data = nvbios_npdeTp(bios, image->base, &npde)))
58 return true;
59 image->size = npde.image_size;
60 image->last = npde.last;
61 } else {
62 image->last = true;
63 }
64
65 return true;
66}
67
68bool
69nvbios_image(struct nouveau_bios *bios, int idx, struct nvbios_image *image)
70{
71 memset(image, 0x00, sizeof(*image));
72 do {
73 image->base += image->size;
74 if (image->last || !nvbios_imagen(bios, image))
75 return false;
76 } while(idx--);
77 return true;
78}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 626380f9e4c0..c6579ef32cd1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -255,6 +255,8 @@ init_i2c(struct nvbios_init *init, int index)
255 } 255 }
256 256
257 index = init->outp->i2c_index; 257 index = init->outp->i2c_index;
258 if (init->outp->type == DCB_OUTPUT_DP)
259 index += NV_I2C_AUX(0);
258 } 260 }
259 261
260 return i2c->find(i2c, index); 262 return i2c->find(i2c, index);
@@ -278,7 +280,7 @@ init_wri2cr(struct nvbios_init *init, u8 index, u8 addr, u8 reg, u8 val)
278 return -ENODEV; 280 return -ENODEV;
279} 281}
280 282
281static int 283static u8
282init_rdauxr(struct nvbios_init *init, u32 addr) 284init_rdauxr(struct nvbios_init *init, u32 addr)
283{ 285{
284 struct nouveau_i2c_port *port = init_i2c(init, -2); 286 struct nouveau_i2c_port *port = init_i2c(init, -2);
@@ -286,20 +288,24 @@ init_rdauxr(struct nvbios_init *init, u32 addr)
286 288
287 if (port && init_exec(init)) { 289 if (port && init_exec(init)) {
288 int ret = nv_rdaux(port, addr, &data, 1); 290 int ret = nv_rdaux(port, addr, &data, 1);
289 if (ret) 291 if (ret == 0)
290 return ret; 292 return data;
291 return data; 293 trace("auxch read failed with %d\n", ret);
292 } 294 }
293 295
294 return -ENODEV; 296 return 0x00;
295} 297}
296 298
297static int 299static int
298init_wrauxr(struct nvbios_init *init, u32 addr, u8 data) 300init_wrauxr(struct nvbios_init *init, u32 addr, u8 data)
299{ 301{
300 struct nouveau_i2c_port *port = init_i2c(init, -2); 302 struct nouveau_i2c_port *port = init_i2c(init, -2);
301 if (port && init_exec(init)) 303 if (port && init_exec(init)) {
302 return nv_wraux(port, addr, &data, 1); 304 int ret = nv_wraux(port, addr, &data, 1);
305 if (ret)
306 trace("auxch write failed with %d\n", ret);
307 return ret;
308 }
303 return -ENODEV; 309 return -ENODEV;
304} 310}
305 311
@@ -838,6 +844,40 @@ init_io_or(struct nvbios_init *init)
838} 844}
839 845
840/** 846/**
847 * INIT_ANDN_REG - opcode 0x47
848 *
849 */
850static void
851init_andn_reg(struct nvbios_init *init)
852{
853 struct nouveau_bios *bios = init->bios;
854 u32 reg = nv_ro32(bios, init->offset + 1);
855 u32 mask = nv_ro32(bios, init->offset + 5);
856
857 trace("ANDN_REG\tR[0x%06x] &= ~0x%08x\n", reg, mask);
858 init->offset += 9;
859
860 init_mask(init, reg, mask, 0);
861}
862
863/**
864 * INIT_OR_REG - opcode 0x48
865 *
866 */
867static void
868init_or_reg(struct nvbios_init *init)
869{
870 struct nouveau_bios *bios = init->bios;
871 u32 reg = nv_ro32(bios, init->offset + 1);
872 u32 mask = nv_ro32(bios, init->offset + 5);
873
874 trace("OR_REG\tR[0x%06x] |= 0x%08x\n", reg, mask);
875 init->offset += 9;
876
877 init_mask(init, reg, 0, mask);
878}
879
880/**
841 * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49 881 * INIT_INDEX_ADDRESS_LATCHED - opcode 0x49
842 * 882 *
843 */ 883 */
@@ -2068,6 +2108,8 @@ static struct nvbios_init_opcode {
2068 [0x3a] = { init_dp_condition }, 2108 [0x3a] = { init_dp_condition },
2069 [0x3b] = { init_io_mask_or }, 2109 [0x3b] = { init_io_mask_or },
2070 [0x3c] = { init_io_or }, 2110 [0x3c] = { init_io_or },
2111 [0x47] = { init_andn_reg },
2112 [0x48] = { init_or_reg },
2071 [0x49] = { init_idx_addr_latched }, 2113 [0x49] = { init_idx_addr_latched },
2072 [0x4a] = { init_io_restrict_pll2 }, 2114 [0x4a] = { init_io_restrict_pll2 },
2073 [0x4b] = { init_pll2 }, 2115 [0x4b] = { init_pll2 },
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
new file mode 100644
index 000000000000..d694716a166c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/npde.c
@@ -0,0 +1,59 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/npde.h>
27#include <subdev/bios/pcir.h>
28
29u32
30nvbios_npdeTe(struct nouveau_bios *bios, u32 base)
31{
32 struct nvbios_pcirT pcir;
33 u8 ver; u16 hdr;
34 u32 data = nvbios_pcirTp(bios, base, &ver, &hdr, &pcir);
35 if (data = (data + hdr + 0x0f) & ~0x0f, data) {
36 switch (nv_ro32(bios, data + 0x00)) {
37 case 0x4544504e: /* NPDE */
38 break;
39 default:
40 nv_debug(bios, "%08x: NPDE signature (%08x) unknown\n",
41 data, nv_ro32(bios, data + 0x00));
42 data = 0;
43 break;
44 }
45 }
46 return data;
47}
48
49u32
50nvbios_npdeTp(struct nouveau_bios *bios, u32 base, struct nvbios_npdeT *info)
51{
52 u32 data = nvbios_npdeTe(bios, base);
53 memset(info, 0x00, sizeof(*info));
54 if (data) {
55 info->image_size = nv_ro16(bios, data + 0x08) * 512;
56 info->last = nv_ro08(bios, data + 0x0a) & 0x80;
57 }
58 return data;
59}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
new file mode 100644
index 000000000000..91dae26bc50f
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pcir.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/pcir.h>
27
28u32
29nvbios_pcirTe(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr)
30{
31 u32 data = nv_ro16(bios, base + 0x18);
32 if (data) {
33 data += base;
34 switch (nv_ro32(bios, data + 0x00)) {
35 case 0x52494350: /* PCIR */
36 case 0x53494752: /* RGIS */
37 case 0x5344504e: /* NPDS */
38 *hdr = nv_ro16(bios, data + 0x0a);
39 *ver = nv_ro08(bios, data + 0x0c);
40 break;
41 default:
42 nv_debug(bios, "%08x: PCIR signature (%08x) unknown\n",
43 data, nv_ro32(bios, data + 0x00));
44 data = 0;
45 break;
46 }
47 }
48 return data;
49}
50
51u32
52nvbios_pcirTp(struct nouveau_bios *bios, u32 base, u8 *ver, u16 *hdr,
53 struct nvbios_pcirT *info)
54{
55 u32 data = nvbios_pcirTe(bios, base, ver, hdr);
56 memset(info, 0x00, sizeof(*info));
57 if (data) {
58 info->vendor_id = nv_ro16(bios, data + 0x04);
59 info->device_id = nv_ro16(bios, data + 0x06);
60 info->class_code[0] = nv_ro08(bios, data + 0x0d);
61 info->class_code[1] = nv_ro08(bios, data + 0x0e);
62 info->class_code[2] = nv_ro08(bios, data + 0x0f);
63 info->image_size = nv_ro16(bios, data + 0x10) * 512;
64 info->image_rev = nv_ro16(bios, data + 0x12);
65 info->image_type = nv_ro08(bios, data + 0x14);
66 info->last = nv_ro08(bios, data + 0x15) & 0x80;
67 }
68 return data;
69}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
new file mode 100644
index 000000000000..66c56ba07d1b
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/pmu.c
@@ -0,0 +1,135 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/image.h>
28#include <subdev/bios/pmu.h>
29
30static u32
31weirdo_pointer(struct nouveau_bios *bios, u32 data)
32{
33 struct nvbios_image image;
34 int idx = 0;
35 if (nvbios_image(bios, idx++, &image)) {
36 data -= image.size;
37 while (nvbios_image(bios, idx++, &image)) {
38 if (image.type == 0xe0)
39 return image.base + data;
40 }
41 }
42 return 0;
43}
44
45u32
46nvbios_pmuTe(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
47{
48 struct bit_entry bit_p;
49 u32 data = 0;
50
51 if (!bit_entry(bios, 'p', &bit_p)) {
52 if (bit_p.version == 2 && bit_p.length >= 4)
53 data = nv_ro32(bios, bit_p.offset + 0x00);
54 if ((data = weirdo_pointer(bios, data))) {
55 *ver = nv_ro08(bios, data + 0x00); /* maybe? */
56 *hdr = nv_ro08(bios, data + 0x01);
57 *len = nv_ro08(bios, data + 0x02);
58 *cnt = nv_ro08(bios, data + 0x03);
59 }
60 }
61
62 return data;
63}
64
65u32
66nvbios_pmuTp(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len,
67 struct nvbios_pmuT *info)
68{
69 u32 data = nvbios_pmuTe(bios, ver, hdr, cnt, len);
70 memset(info, 0x00, sizeof(*info));
71 switch (!!data * *ver) {
72 default:
73 break;
74 }
75 return data;
76}
77
78u32
79nvbios_pmuEe(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr)
80{
81 u8 cnt, len;
82 u32 data = nvbios_pmuTe(bios, ver, hdr, &cnt, &len);
83 if (data && idx < cnt) {
84 data = data + *hdr + (idx * len);
85 *hdr = len;
86 return data;
87 }
88 return 0;
89}
90
91u32
92nvbios_pmuEp(struct nouveau_bios *bios, int idx, u8 *ver, u8 *hdr,
93 struct nvbios_pmuE *info)
94{
95 u32 data = nvbios_pmuEe(bios, idx, ver, hdr);
96 memset(info, 0x00, sizeof(*info));
97 switch (!!data * *ver) {
98 default:
99 info->type = nv_ro08(bios, data + 0x00);
100 info->data = nv_ro32(bios, data + 0x02);
101 break;
102 }
103 return data;
104}
105
106bool
107nvbios_pmuRm(struct nouveau_bios *bios, u8 type, struct nvbios_pmuR *info)
108{
109 struct nvbios_pmuE pmuE;
110 u8 ver, hdr, idx = 0;
111 u32 data;
112 memset(info, 0x00, sizeof(*info));
113 while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) {
114 if ( pmuE.type == type &&
115 (data = weirdo_pointer(bios, pmuE.data))) {
116 info->init_addr_pmu = nv_ro32(bios, data + 0x08);
117 info->args_addr_pmu = nv_ro32(bios, data + 0x0c);
118 info->boot_addr = data + 0x30;
119 info->boot_addr_pmu = nv_ro32(bios, data + 0x10) +
120 nv_ro32(bios, data + 0x18);
121 info->boot_size = nv_ro32(bios, data + 0x1c) -
122 nv_ro32(bios, data + 0x18);
123 info->code_addr = info->boot_addr + info->boot_size;
124 info->code_addr_pmu = info->boot_addr_pmu +
125 info->boot_size;
126 info->code_size = nv_ro32(bios, data + 0x20);
127 info->data_addr = data + 0x30 +
128 nv_ro32(bios, data + 0x24);
129 info->data_addr_pmu = nv_ro32(bios, data + 0x28);
130 info->data_size = nv_ro32(bios, data + 0x2c);
131 return true;
132 }
133 }
134 return false;
135}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
new file mode 100644
index 000000000000..187d225bd1e9
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/priv.h
@@ -0,0 +1,25 @@
1#ifndef __NVKM_BIOS_PRIV_H__
2#define __NVKM_BIOS_PRIV_H__
3
4#include <subdev/bios.h>
5
6struct nvbios_source {
7 const char *name;
8 void *(*init)(struct nouveau_bios *, const char *);
9 void (*fini)(void *);
10 u32 (*read)(void *, u32 offset, u32 length, struct nouveau_bios *);
11 bool rw;
12};
13
14int nvbios_extend(struct nouveau_bios *, u32 length);
15int nvbios_shadow(struct nouveau_bios *);
16
17extern const struct nvbios_source nvbios_rom;
18extern const struct nvbios_source nvbios_ramin;
19extern const struct nvbios_source nvbios_acpi_fast;
20extern const struct nvbios_source nvbios_acpi_slow;
21extern const struct nvbios_source nvbios_pcirom;
22extern const struct nvbios_source nvbios_platform;
23extern const struct nvbios_source nvbios_of;
24
25#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
index 6c401f70ab99..1623c8dfe797 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/ramcfg.c
@@ -25,6 +25,7 @@
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h> 26#include <subdev/bios/bit.h>
27#include <subdev/bios/ramcfg.h> 27#include <subdev/bios/ramcfg.h>
28#include <subdev/bios/M0203.h>
28 29
29static u8 30static u8
30nvbios_ramcfg_strap(struct nouveau_subdev *subdev) 31nvbios_ramcfg_strap(struct nouveau_subdev *subdev)
@@ -54,12 +55,22 @@ nvbios_ramcfg_index(struct nouveau_subdev *subdev)
54 u8 strap = nvbios_ramcfg_strap(subdev); 55 u8 strap = nvbios_ramcfg_strap(subdev);
55 u32 xlat = 0x00000000; 56 u32 xlat = 0x00000000;
56 struct bit_entry bit_M; 57 struct bit_entry bit_M;
58 struct nvbios_M0203E M0203E;
59 u8 ver, hdr;
57 60
58 if (!bit_entry(bios, 'M', &bit_M)) { 61 if (!bit_entry(bios, 'M', &bit_M)) {
59 if (bit_M.version == 1 && bit_M.length >= 5) 62 if (bit_M.version == 1 && bit_M.length >= 5)
60 xlat = nv_ro16(bios, bit_M.offset + 3); 63 xlat = nv_ro16(bios, bit_M.offset + 3);
61 if (bit_M.version == 2 && bit_M.length >= 3) 64 if (bit_M.version == 2 && bit_M.length >= 3) {
65 /*XXX: is M ever shorter than this?
66 * if not - what is xlat used for now?
67 * also - sigh..
68 */
69 if (bit_M.length >= 7 &&
70 nvbios_M0203Em(bios, strap, &ver, &hdr, &M0203E))
71 return M0203E.group;
62 xlat = nv_ro16(bios, bit_M.offset + 1); 72 xlat = nv_ro16(bios, bit_M.offset + 1);
73 }
63 } 74 }
64 75
65 if (xlat) 76 if (xlat)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
index 585e69331ccc..c5685228c322 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/rammap.c
@@ -162,8 +162,9 @@ nvbios_rammapSp(struct nouveau_bios *bios, u32 data,
162 p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3; 162 p->ramcfg_10_02_08 = (nv_ro08(bios, data + 0x02) & 0x08) >> 3;
163 p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4; 163 p->ramcfg_10_02_10 = (nv_ro08(bios, data + 0x02) & 0x10) >> 4;
164 p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5; 164 p->ramcfg_10_02_20 = (nv_ro08(bios, data + 0x02) & 0x20) >> 5;
165 p->ramcfg_10_02_40 = (nv_ro08(bios, data + 0x02) & 0x40) >> 6; 165 p->ramcfg_10_DLLoff = (nv_ro08(bios, data + 0x02) & 0x40) >> 6;
166 p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0; 166 p->ramcfg_10_03_0f = (nv_ro08(bios, data + 0x03) & 0x0f) >> 0;
167 p->ramcfg_10_04_01 = (nv_ro08(bios, data + 0x04) & 0x01) >> 0;
167 p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0; 168 p->ramcfg_10_05 = (nv_ro08(bios, data + 0x05) & 0xff) >> 0;
168 p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0; 169 p->ramcfg_10_06 = (nv_ro08(bios, data + 0x06) & 0xff) >> 0;
169 p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0; 170 p->ramcfg_10_07 = (nv_ro08(bios, data + 0x07) & 0xff) >> 0;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
new file mode 100644
index 000000000000..bb9e0018d936
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadow.c
@@ -0,0 +1,270 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 */
24
25#include "priv.h"
26#include <core/option.h>
27#include <subdev/bios/image.h>
28
29struct shadow {
30 struct nouveau_oclass base;
31 u32 skip;
32 const struct nvbios_source *func;
33 void *data;
34 u32 size;
35 int score;
36};
37
38static bool
39shadow_fetch(struct nouveau_bios *bios, u32 upto)
40{
41 struct shadow *mthd = (void *)nv_object(bios)->oclass;
42 const u32 limit = (upto + 3) & ~3;
43 const u32 start = bios->size;
44 void *data = mthd->data;
45 if (nvbios_extend(bios, limit) > 0) {
46 u32 read = mthd->func->read(data, start, limit - start, bios);
47 bios->size = start + read;
48 }
49 return bios->size >= limit;
50}
51
52static u8
53shadow_rd08(struct nouveau_object *object, u64 addr)
54{
55 struct nouveau_bios *bios = (void *)object;
56 if (shadow_fetch(bios, addr + 1))
57 return bios->data[addr];
58 return 0x00;
59}
60
61static u16
62shadow_rd16(struct nouveau_object *object, u64 addr)
63{
64 struct nouveau_bios *bios = (void *)object;
65 if (shadow_fetch(bios, addr + 2))
66 return get_unaligned_le16(&bios->data[addr]);
67 return 0x0000;
68}
69
70static u32
71shadow_rd32(struct nouveau_object *object, u64 addr)
72{
73 struct nouveau_bios *bios = (void *)object;
74 if (shadow_fetch(bios, addr + 4))
75 return get_unaligned_le32(&bios->data[addr]);
76 return 0x00000000;
77}
78
79static struct nouveau_oclass
80shadow_class = {
81 .handle = NV_SUBDEV(VBIOS, 0x00),
82 .ofuncs = &(struct nouveau_ofuncs) {
83 .rd08 = shadow_rd08,
84 .rd16 = shadow_rd16,
85 .rd32 = shadow_rd32,
86 },
87};
88
89static int
90shadow_image(struct nouveau_bios *bios, int idx, struct shadow *mthd)
91{
92 struct nvbios_image image;
93 int score = 1;
94
95 if (!nvbios_image(bios, idx, &image)) {
96 nv_debug(bios, "image %d invalid\n", idx);
97 return 0;
98 }
99 nv_debug(bios, "%08x: type %02x, %d bytes\n",
100 image.base, image.type, image.size);
101
102 if (!shadow_fetch(bios, image.size)) {
103 nv_debug(bios, "%08x: fetch failed\n", image.base);
104 return 0;
105 }
106
107 switch (image.type) {
108 case 0x00:
109 if (nvbios_checksum(&bios->data[image.base], image.size)) {
110 nv_debug(bios, "%08x: checksum failed\n", image.base);
111 if (mthd->func->rw)
112 score += 1;
113 score += 1;
114 } else {
115 score += 3;
116 }
117 break;
118 default:
119 score += 3;
120 break;
121 }
122
123 if (!image.last)
124 score += shadow_image(bios, idx + 1, mthd);
125 return score;
126}
127
128static int
129shadow_score(struct nouveau_bios *bios, struct shadow *mthd)
130{
131 struct nouveau_oclass *oclass = nv_object(bios)->oclass;
132 int score;
133 nv_object(bios)->oclass = &mthd->base;
134 score = shadow_image(bios, 0, mthd);
135 nv_object(bios)->oclass = oclass;
136 return score;
137
138}
139
140static int
141shadow_method(struct nouveau_bios *bios, struct shadow *mthd, const char *name)
142{
143 const struct nvbios_source *func = mthd->func;
144 if (func->name) {
145 nv_debug(bios, "trying %s...\n", name ? name : func->name);
146 if (func->init) {
147 mthd->data = func->init(bios, name);
148 if (IS_ERR(mthd->data)) {
149 mthd->data = NULL;
150 return 0;
151 }
152 }
153 mthd->score = shadow_score(bios, mthd);
154 if (func->fini)
155 func->fini(mthd->data);
156 nv_debug(bios, "scored %d\n", mthd->score);
157 mthd->data = bios->data;
158 mthd->size = bios->size;
159 bios->data = NULL;
160 bios->size = 0;
161 }
162 return mthd->score;
163}
164
165static u32
166shadow_fw_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
167{
168 const struct firmware *fw = data;
169 if (offset + length <= fw->size) {
170 memcpy(bios->data + offset, fw->data + offset, length);
171 return length;
172 }
173 return 0;
174}
175
176static void *
177shadow_fw_init(struct nouveau_bios *bios, const char *name)
178{
179 struct device *dev = &nv_device(bios)->pdev->dev;
180 const struct firmware *fw;
181 int ret = request_firmware(&fw, name, dev);
182 if (ret)
183 return ERR_PTR(-ENOENT);
184 return (void *)fw;
185}
186
187static const struct nvbios_source
188shadow_fw = {
189 .name = "firmware",
190 .init = shadow_fw_init,
191 .fini = (void(*)(void *))release_firmware,
192 .read = shadow_fw_read,
193 .rw = false,
194};
195
196int
197nvbios_shadow(struct nouveau_bios *bios)
198{
199 struct shadow mthds[] = {
200 { shadow_class, 0, &nvbios_of },
201 { shadow_class, 0, &nvbios_ramin },
202 { shadow_class, 0, &nvbios_rom },
203 { shadow_class, 0, &nvbios_acpi_fast },
204 { shadow_class, 4, &nvbios_acpi_slow },
205 { shadow_class, 1, &nvbios_pcirom },
206 { shadow_class, 1, &nvbios_platform },
207 { shadow_class }
208 }, *mthd = mthds, *best = NULL;
209 const char *optarg;
210 char *source;
211 int optlen;
212
213 /* handle user-specified bios source */
214 optarg = nouveau_stropt(nv_device(bios)->cfgopt, "NvBios", &optlen);
215 source = optarg ? kstrndup(optarg, optlen, GFP_KERNEL) : NULL;
216 if (source) {
217 /* try to match one of the built-in methods */
218 for (mthd = mthds; mthd->func; mthd++) {
219 if (mthd->func->name &&
220 !strcasecmp(source, mthd->func->name)) {
221 best = mthd;
222 if (shadow_method(bios, mthd, NULL))
223 break;
224 }
225 }
226
227 /* otherwise, attempt to load as firmware */
228 if (!best && (best = mthd)) {
229 mthd->func = &shadow_fw;
230 shadow_method(bios, mthd, source);
231 mthd->func = NULL;
232 }
233
234 if (!best->score) {
235 nv_error(bios, "%s invalid\n", source);
236 kfree(source);
237 source = NULL;
238 }
239 }
240
241 /* scan all potential bios sources, looking for best image */
242 if (!best || !best->score) {
243 for (mthd = mthds, best = mthd; mthd->func; mthd++) {
244 if (!mthd->skip || best->score < mthd->skip) {
245 if (shadow_method(bios, mthd, NULL)) {
246 if (mthd->score > best->score)
247 best = mthd;
248 }
249 }
250 }
251 }
252
253 /* cleanup the ones we didn't use */
254 for (mthd = mthds; mthd->func; mthd++) {
255 if (mthd != best)
256 kfree(mthd->data);
257 }
258
259 if (!best->score) {
260 nv_fatal(bios, "unable to locate usable image\n");
261 return -EINVAL;
262 }
263
264 nv_info(bios, "using image from %s\n", best->func ?
265 best->func->name : source);
266 bios->data = best->data;
267 bios->size = best->size;
268 kfree(source);
269 return 0;
270}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
new file mode 100644
index 000000000000..bc130c12ec06
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowacpi.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "priv.h"
25
26#if defined(CONFIG_ACPI) && defined(CONFIG_X86)
27int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
28bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
29#else
30static inline bool
31nouveau_acpi_rom_supported(struct pci_dev *pdev)
32{
33 return false;
34}
35
36static inline int
37nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len)
38{
39 return -EINVAL;
40}
41#endif
42
43/* This version of the shadow function disobeys the ACPI spec and tries
44 * to fetch in units of more than 4KiB at a time. This is a LOT faster
45 * on some systems, such as Lenovo W530.
46 */
47static u32
48acpi_read_fast(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
49{
50 u32 limit = (offset + length + 0xfff) & ~0xfff;
51 u32 start = offset & ~0x00000fff;
52 u32 fetch = limit - start;
53
54 if (nvbios_extend(bios, limit) > 0) {
55 int ret = nouveau_acpi_get_bios_chunk(bios->data, start, fetch);
56 if (ret == fetch)
57 return fetch;
58 }
59
60 return 0;
61}
62
63/* Other systems, such as the one in fdo#55948, will report a success
64 * but only return 4KiB of data. The common bios fetching logic will
65 * detect an invalid image, and fall back to this version of the read
66 * function.
67 */
68static u32
69acpi_read_slow(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
70{
71 u32 limit = (offset + length + 0xfff) & ~0xfff;
72 u32 start = offset & ~0xfff;
73 u32 fetch = 0;
74
75 if (nvbios_extend(bios, limit) > 0) {
76 while (start + fetch < limit) {
77 int ret = nouveau_acpi_get_bios_chunk(bios->data,
78 start + fetch,
79 0x1000);
80 if (ret != 0x1000)
81 break;
82 fetch += 0x1000;
83 }
84 }
85
86 return fetch;
87}
88
89static void *
90acpi_init(struct nouveau_bios *bios, const char *name)
91{
92 if (!nouveau_acpi_rom_supported(nv_device(bios)->pdev))
93 return ERR_PTR(-ENODEV);
94 return NULL;
95}
96
97const struct nvbios_source
98nvbios_acpi_fast = {
99 .name = "ACPI",
100 .init = acpi_init,
101 .read = acpi_read_fast,
102 .rw = false,
103};
104
105const struct nvbios_source
106nvbios_acpi_slow = {
107 .name = "ACPI",
108 .init = acpi_init,
109 .read = acpi_read_slow,
110 .rw = false,
111};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
new file mode 100644
index 000000000000..3abe487a6025
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowof.c
@@ -0,0 +1,71 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "priv.h"
25
26#if defined(__powerpc__)
27struct priv {
28 const void __iomem *data;
29 int size;
30};
31
32static u32
33of_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
34{
35 struct priv *priv = data;
36 if (offset + length <= priv->size) {
37 memcpy_fromio(bios->data + offset, priv->data + offset, length);
38 return length;
39 }
40 return 0;
41}
42
43static void *
44of_init(struct nouveau_bios *bios, const char *name)
45{
46 struct pci_dev *pdev = nv_device(bios)->pdev;
47 struct device_node *dn;
48 struct priv *priv;
49 if (!(dn = pci_device_to_OF_node(pdev)))
50 return ERR_PTR(-ENODEV);
51 if (!(priv = kzalloc(sizeof(*priv), GFP_KERNEL)))
52 return ERR_PTR(-ENOMEM);
53 if ((priv->data = of_get_property(dn, "NVDA,BMP", &priv->size)))
54 return priv;
55 kfree(priv);
56 return ERR_PTR(-EINVAL);
57}
58
59const struct nvbios_source
60nvbios_of = {
61 .name = "OpenFirmware",
62 .init = of_init,
63 .fini = (void(*)(void *))kfree,
64 .read = of_read,
65 .rw = false,
66};
67#else
68const struct nvbios_source
69nvbios_of = {
70};
71#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
new file mode 100644
index 000000000000..1d0389c0abef
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowpci.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "priv.h"
25
26struct priv {
27 struct pci_dev *pdev;
28 void __iomem *rom;
29 size_t size;
30};
31
32static u32
33pcirom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
34{
35 struct priv *priv = data;
36 if (offset + length <= priv->size) {
37 memcpy_fromio(bios->data + offset, priv->rom + offset, length);
38 return length;
39 }
40 return 0;
41}
42
43static void
44pcirom_fini(void *data)
45{
46 struct priv *priv = data;
47 pci_unmap_rom(priv->pdev, priv->rom);
48 pci_disable_rom(priv->pdev);
49 kfree(priv);
50}
51
52static void *
53pcirom_init(struct nouveau_bios *bios, const char *name)
54{
55 struct pci_dev *pdev = nv_device(bios)->pdev;
56 struct priv *priv = NULL;
57 int ret;
58
59 if (!(ret = pci_enable_rom(pdev))) {
60 if (ret = -ENOMEM,
61 (priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
62 if (ret = -EFAULT,
63 (priv->rom = pci_map_rom(pdev, &priv->size))) {
64 priv->pdev = pdev;
65 return priv;
66 }
67 kfree(priv);
68 }
69 pci_disable_rom(pdev);
70 }
71
72 return ERR_PTR(ret);
73}
74
75const struct nvbios_source
76nvbios_pcirom = {
77 .name = "PCIROM",
78 .init = pcirom_init,
79 .fini = pcirom_fini,
80 .read = pcirom_read,
81 .rw = true,
82};
83
84static void *
85platform_init(struct nouveau_bios *bios, const char *name)
86{
87 struct pci_dev *pdev = nv_device(bios)->pdev;
88 struct priv *priv;
89 int ret = -ENOMEM;
90
91 if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
92 if (ret = -ENODEV,
93 (priv->rom = pci_platform_rom(pdev, &priv->size)))
94 return priv;
95 kfree(priv);
96 }
97
98 return ERR_PTR(ret);
99}
100
101const struct nvbios_source
102nvbios_platform = {
103 .name = "PLATFORM",
104 .init = platform_init,
105 .fini = (void(*)(void *))kfree,
106 .read = pcirom_read,
107 .rw = true,
108};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
new file mode 100644
index 000000000000..5e58bba0dd5c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -0,0 +1,112 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "priv.h"
25
26struct priv {
27 struct nouveau_bios *bios;
28 u32 bar0;
29};
30
31static u32
32pramin_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
33{
34 u32 i;
35 if (offset + length <= 0x00100000) {
36 for (i = offset; i < offset + length; i += 4)
37 *(u32 *)&bios->data[i] = nv_rd32(bios, 0x700000 + i);
38 return length;
39 }
40 return 0;
41}
42
43static void
44pramin_fini(void *data)
45{
46 struct priv *priv = data;
47 nv_wr32(priv->bios, 0x001700, priv->bar0);
48 kfree(priv);
49}
50
51static void *
52pramin_init(struct nouveau_bios *bios, const char *name)
53{
54 struct priv *priv = NULL;
55 u64 addr = 0;
56
57 /* PRAMIN always potentially available prior to nv50 */
58 if (nv_device(bios)->card_type < NV_50)
59 return NULL;
60
61 /* we can't get the bios image pointer without PDISP */
62 if (nv_device(bios)->card_type >= GM100)
63 addr = nv_rd32(bios, 0x021c04);
64 else
65 if (nv_device(bios)->card_type >= NV_C0)
66 addr = nv_rd32(bios, 0x022500);
67 if (addr & 0x00000001) {
68 nv_debug(bios, "... display disabled\n");
69 return ERR_PTR(-ENODEV);
70 }
71
72 /* check that the window is enabled and in vram, particularly
73 * important as we don't want to be touching vram on an
74 * uninitialised board
75 */
76 addr = nv_rd32(bios, 0x619f04);
77 if (!(addr & 0x00000008)) {
78 nv_debug(bios, "... not enabled\n");
79 return ERR_PTR(-ENODEV);
80 }
81 if ( (addr & 0x00000003) != 1) {
82 nv_debug(bios, "... not in vram\n");
83 return ERR_PTR(-ENODEV);
84 }
85
86 /* some alternate method inherited from xf86-video-nv... */
87 addr = (addr & 0xffffff00) << 8;
88 if (!addr) {
89 addr = (u64)nv_rd32(bios, 0x001700) << 16;
90 addr += 0xf0000;
91 }
92
93 /* modify bar0 PRAMIN window to cover the bios image */
94 if (!(priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
95 nv_error(bios, "... out of memory\n");
96 return ERR_PTR(-ENOMEM);
97 }
98
99 priv->bios = bios;
100 priv->bar0 = nv_rd32(bios, 0x001700);
101 nv_wr32(bios, 0x001700, addr >> 16);
102 return priv;
103}
104
105const struct nvbios_source
106nvbios_ramin = {
107 .name = "PRAMIN",
108 .init = pramin_init,
109 .fini = pramin_fini,
110 .read = pramin_read,
111 .rw = true,
112};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
new file mode 100644
index 000000000000..b7992bc3ffa5
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowrom.c
@@ -0,0 +1,69 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "priv.h"
25
26static u32
27prom_read(void *data, u32 offset, u32 length, struct nouveau_bios *bios)
28{
29 u32 i;
30 if (offset + length <= 0x00100000) {
31 for (i = offset; i < offset + length; i += 4)
32 *(u32 *)&bios->data[i] = nv_rd32(bios, 0x300000 + i);
33 return length;
34 }
35 return 0;
36}
37
38static void
39prom_fini(void *data)
40{
41 struct nouveau_bios *bios = data;
42 if (nv_device(bios)->card_type < NV_50)
43 nv_mask(bios, 0x001850, 0x00000001, 0x00000001);
44 else
45 nv_mask(bios, 0x088050, 0x00000001, 0x00000001);
46}
47
48static void *
49prom_init(struct nouveau_bios *bios, const char *name)
50{
51 if (nv_device(bios)->card_type < NV_50) {
52 if (nv_device(bios)->card_type == NV_40 &&
53 nv_device(bios)->chipset >= 0x4c)
54 return ERR_PTR(-ENODEV);
55 nv_mask(bios, 0x001850, 0x00000001, 0x00000000);
56 } else {
57 nv_mask(bios, 0x088050, 0x00000001, 0x00000000);
58 }
59 return bios;
60}
61
62const struct nvbios_source
63nvbios_rom = {
64 .name = "PROM",
65 .init = prom_init,
66 .fini = prom_fini,
67 .read = prom_read,
68 .rw = false,
69};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
index 46d955eb51eb..8521eca1ed9c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/timing.c
@@ -93,10 +93,44 @@ nvbios_timingEp(struct nouveau_bios *bios, int idx,
93 p->timing_hdr = *hdr; 93 p->timing_hdr = *hdr;
94 switch (!!data * *ver) { 94 switch (!!data * *ver) {
95 case 0x10: 95 case 0x10:
96 p->timing_10_WR = nv_ro08(bios, data + 0x00); 96 p->timing_10_WR = nv_ro08(bios, data + 0x00);
97 p->timing_10_CL = nv_ro08(bios, data + 0x02); 97 p->timing_10_WTR = nv_ro08(bios, data + 0x01);
98 p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07; 98 p->timing_10_CL = nv_ro08(bios, data + 0x02);
99 p->timing_10_CWL = nv_ro08(bios, data + 0x13); 99 p->timing_10_RC = nv_ro08(bios, data + 0x03);
100 p->timing_10_RFC = nv_ro08(bios, data + 0x05);
101 p->timing_10_RAS = nv_ro08(bios, data + 0x07);
102 p->timing_10_RP = nv_ro08(bios, data + 0x09);
103 p->timing_10_RCDRD = nv_ro08(bios, data + 0x0a);
104 p->timing_10_RCDWR = nv_ro08(bios, data + 0x0b);
105 p->timing_10_RRD = nv_ro08(bios, data + 0x0c);
106 p->timing_10_13 = nv_ro08(bios, data + 0x0d);
107 p->timing_10_ODT = nv_ro08(bios, data + 0x0e) & 0x07;
108
109 p->timing_10_24 = 0xff;
110 p->timing_10_21 = 0;
111 p->timing_10_20 = 0;
112 p->timing_10_CWL = 0;
113 p->timing_10_18 = 0;
114 p->timing_10_16 = 0;
115
116 switch (min_t(u8, *hdr, 25)) {
117 case 25:
118 p->timing_10_24 = nv_ro08(bios, data + 0x18);
119 case 24:
120 case 23:
121 case 22:
122 p->timing_10_21 = nv_ro08(bios, data + 0x15);
123 case 21:
124 p->timing_10_20 = nv_ro08(bios, data + 0x14);
125 case 20:
126 p->timing_10_CWL = nv_ro08(bios, data + 0x13);
127 case 19:
128 p->timing_10_18 = nv_ro08(bios, data + 0x12);
129 case 18:
130 case 17:
131 p->timing_10_16 = nv_ro08(bios, data + 0x10);
132 }
133
100 break; 134 break;
101 case 0x20: 135 case 0x20:
102 p->timing[0] = nv_ro32(bios, data + 0x00); 136 p->timing[0] = nv_ro32(bios, data + 0x00);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
index 425a8d5e9129..fb4fad374bdd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/gk20a.c
@@ -109,7 +109,7 @@ struct gk20a_clk_pllg_params {
109}; 109};
110 110
111static const struct gk20a_clk_pllg_params gk20a_pllg_params = { 111static const struct gk20a_clk_pllg_params gk20a_pllg_params = {
112 .min_vco = 1000, .max_vco = 1700, 112 .min_vco = 1000, .max_vco = 2064,
113 .min_u = 12, .max_u = 38, 113 .min_u = 12, .max_u = 38,
114 .min_m = 1, .max_m = 255, 114 .min_m = 1, .max_m = 255,
115 .min_n = 8, .max_n = 255, 115 .min_n = 8, .max_n = 255,
@@ -470,76 +470,91 @@ gk20a_pstates[] = {
470 { 470 {
471 .base = { 471 .base = {
472 .domain[nv_clk_src_gpc] = 72000, 472 .domain[nv_clk_src_gpc] = 72000,
473 .voltage = 0,
473 }, 474 },
474 }, 475 },
475 { 476 {
476 .base = { 477 .base = {
477 .domain[nv_clk_src_gpc] = 108000, 478 .domain[nv_clk_src_gpc] = 108000,
479 .voltage = 1,
478 }, 480 },
479 }, 481 },
480 { 482 {
481 .base = { 483 .base = {
482 .domain[nv_clk_src_gpc] = 180000, 484 .domain[nv_clk_src_gpc] = 180000,
485 .voltage = 2,
483 }, 486 },
484 }, 487 },
485 { 488 {
486 .base = { 489 .base = {
487 .domain[nv_clk_src_gpc] = 252000, 490 .domain[nv_clk_src_gpc] = 252000,
491 .voltage = 3,
488 }, 492 },
489 }, 493 },
490 { 494 {
491 .base = { 495 .base = {
492 .domain[nv_clk_src_gpc] = 324000, 496 .domain[nv_clk_src_gpc] = 324000,
497 .voltage = 4,
493 }, 498 },
494 }, 499 },
495 { 500 {
496 .base = { 501 .base = {
497 .domain[nv_clk_src_gpc] = 396000, 502 .domain[nv_clk_src_gpc] = 396000,
503 .voltage = 5,
498 }, 504 },
499 }, 505 },
500 { 506 {
501 .base = { 507 .base = {
502 .domain[nv_clk_src_gpc] = 468000, 508 .domain[nv_clk_src_gpc] = 468000,
509 .voltage = 6,
503 }, 510 },
504 }, 511 },
505 { 512 {
506 .base = { 513 .base = {
507 .domain[nv_clk_src_gpc] = 540000, 514 .domain[nv_clk_src_gpc] = 540000,
515 .voltage = 7,
508 }, 516 },
509 }, 517 },
510 { 518 {
511 .base = { 519 .base = {
512 .domain[nv_clk_src_gpc] = 612000, 520 .domain[nv_clk_src_gpc] = 612000,
521 .voltage = 8,
513 }, 522 },
514 }, 523 },
515 { 524 {
516 .base = { 525 .base = {
517 .domain[nv_clk_src_gpc] = 648000, 526 .domain[nv_clk_src_gpc] = 648000,
527 .voltage = 9,
518 }, 528 },
519 }, 529 },
520 { 530 {
521 .base = { 531 .base = {
522 .domain[nv_clk_src_gpc] = 684000, 532 .domain[nv_clk_src_gpc] = 684000,
533 .voltage = 10,
523 }, 534 },
524 }, 535 },
525 { 536 {
526 .base = { 537 .base = {
527 .domain[nv_clk_src_gpc] = 708000, 538 .domain[nv_clk_src_gpc] = 708000,
539 .voltage = 11,
528 }, 540 },
529 }, 541 },
530 { 542 {
531 .base = { 543 .base = {
532 .domain[nv_clk_src_gpc] = 756000, 544 .domain[nv_clk_src_gpc] = 756000,
545 .voltage = 12,
533 }, 546 },
534 }, 547 },
535 { 548 {
536 .base = { 549 .base = {
537 .domain[nv_clk_src_gpc] = 804000, 550 .domain[nv_clk_src_gpc] = 804000,
551 .voltage = 13,
538 }, 552 },
539 }, 553 },
540 { 554 {
541 .base = { 555 .base = {
542 .domain[nv_clk_src_gpc] = 852000, 556 .domain[nv_clk_src_gpc] = 852000,
557 .voltage = 14,
543 }, 558 },
544 }, 559 },
545}; 560};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
index 094551d8ad9b..07ad01247675 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
@@ -510,7 +510,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
510 int ret; 510 int ret;
511 511
512 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0, 512 ret = nouveau_clock_create(parent, engine, oclass, nva3_domain, NULL, 0,
513 false, &priv); 513 true, &priv);
514 *pobject = nv_object(priv); 514 *pobject = nv_object(priv);
515 if (ret) 515 if (ret)
516 return ret; 516 return ret;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
index 239acfe876c3..0e45cee82463 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/base.c
@@ -24,8 +24,6 @@
24 24
25#include <core/option.h> 25#include <core/option.h>
26 26
27#include <subdev/bios.h>
28#include <subdev/bios/init.h>
29#include <subdev/vga.h> 27#include <subdev/vga.h>
30 28
31#include "priv.h" 29#include "priv.h"
@@ -56,7 +54,7 @@ _nouveau_devinit_init(struct nouveau_object *object)
56 if (ret) 54 if (ret)
57 return ret; 55 return ret;
58 56
59 ret = nvbios_init(&devinit->base, devinit->post); 57 ret = impl->post(&devinit->base, devinit->post);
60 if (ret) 58 if (ret)
61 return ret; 59 return ret;
62 60
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
index c69bc7f54e37..4ba43d6a1ec8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm107.c
@@ -24,7 +24,7 @@
24 24
25#include "nv50.h" 25#include "nv50.h"
26 26
27static u64 27u64
28gm107_devinit_disable(struct nouveau_devinit *devinit) 28gm107_devinit_disable(struct nouveau_devinit *devinit)
29{ 29{
30 struct nv50_devinit_priv *priv = (void *)devinit; 30 struct nv50_devinit_priv *priv = (void *)devinit;
@@ -53,4 +53,5 @@ gm107_devinit_oclass = &(struct nouveau_devinit_impl) {
53 }, 53 },
54 .pll_set = nvc0_devinit_pll_set, 54 .pll_set = nvc0_devinit_pll_set,
55 .disable = gm107_devinit_disable, 55 .disable = gm107_devinit_disable,
56 .post = nvbios_init,
56}.base; 57}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
new file mode 100644
index 000000000000..e44a86662a2a
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/gm204.c
@@ -0,0 +1,173 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include <subdev/bios.h>
26#include <subdev/bios/bit.h>
27#include <subdev/bios/pmu.h>
28
29#include "nv50.h"
30
31static void
32pmu_code(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len, bool sec)
33{
34 struct nouveau_bios *bios = nouveau_bios(priv);
35 int i;
36
37 nv_wr32(priv, 0x10a180, 0x01000000 | (sec ? 0x10000000 : 0) | pmu);
38 for (i = 0; i < len; i += 4) {
39 if ((i & 0xff) == 0)
40 nv_wr32(priv, 0x10a188, (pmu + i) >> 8);
41 nv_wr32(priv, 0x10a184, nv_ro32(bios, img + i));
42 }
43
44 while (i & 0xff) {
45 nv_wr32(priv, 0x10a184, 0x00000000);
46 i += 4;
47 }
48}
49
50static void
51pmu_data(struct nv50_devinit_priv *priv, u32 pmu, u32 img, u32 len)
52{
53 struct nouveau_bios *bios = nouveau_bios(priv);
54 int i;
55
56 nv_wr32(priv, 0x10a1c0, 0x01000000 | pmu);
57 for (i = 0; i < len; i += 4)
58 nv_wr32(priv, 0x10a1c4, nv_ro32(bios, img + i));
59}
60
61static u32
62pmu_args(struct nv50_devinit_priv *priv, u32 argp, u32 argi)
63{
64 nv_wr32(priv, 0x10a1c0, argp);
65 nv_wr32(priv, 0x10a1c0, nv_rd32(priv, 0x10a1c4) + argi);
66 return nv_rd32(priv, 0x10a1c4);
67}
68
69static void
70pmu_exec(struct nv50_devinit_priv *priv, u32 init_addr)
71{
72 nv_wr32(priv, 0x10a104, init_addr);
73 nv_wr32(priv, 0x10a10c, 0x00000000);
74 nv_wr32(priv, 0x10a100, 0x00000002);
75}
76
77static int
78pmu_load(struct nv50_devinit_priv *priv, u8 type, bool post,
79 u32 *init_addr_pmu, u32 *args_addr_pmu)
80{
81 struct nouveau_bios *bios = nouveau_bios(priv);
82 struct nvbios_pmuR pmu;
83
84 if (!nvbios_pmuRm(bios, type, &pmu)) {
85 nv_error(priv, "VBIOS PMU fuc %02x not found\n", type);
86 return -EINVAL;
87 }
88
89 if (!post)
90 return 0;
91
92 pmu_code(priv, pmu.boot_addr_pmu, pmu.boot_addr, pmu.boot_size, false);
93 pmu_code(priv, pmu.code_addr_pmu, pmu.code_addr, pmu.code_size, true);
94 pmu_data(priv, pmu.data_addr_pmu, pmu.data_addr, pmu.data_size);
95
96 if (init_addr_pmu) {
97 *init_addr_pmu = pmu.init_addr_pmu;
98 *args_addr_pmu = pmu.args_addr_pmu;
99 return 0;
100 }
101
102 return pmu_exec(priv, pmu.init_addr_pmu), 0;
103}
104
105static int
106gm204_devinit_post(struct nouveau_subdev *subdev, bool post)
107{
108 struct nv50_devinit_priv *priv = (void *)nouveau_devinit(subdev);
109 struct nouveau_bios *bios = nouveau_bios(priv);
110 struct bit_entry bit_I;
111 u32 init, args;
112 int ret;
113
114 if (bit_entry(bios, 'I', &bit_I) || bit_I.version != 1 ||
115 bit_I.length < 0x1c) {
116 nv_error(priv, "VBIOS PMU init data not found\n");
117 return -EINVAL;
118 }
119
120 /* reset PMU and load init table parser ucode */
121 if (post) {
122 nv_mask(priv, 0x000200, 0x00002000, 0x00000000);
123 nv_mask(priv, 0x000200, 0x00002000, 0x00002000);
124 nv_rd32(priv, 0x000200);
125 while (nv_rd32(priv, 0x10a10c) & 0x00000006) {
126 }
127 }
128
129 ret = pmu_load(priv, 0x04, post, &init, &args);
130 if (ret)
131 return ret;
132
133 /* upload first chunk of init data */
134 if (post) {
135 u32 pmu = pmu_args(priv, args + 0x08, 0x08);
136 u32 img = nv_ro16(bios, bit_I.offset + 0x14);
137 u32 len = nv_ro16(bios, bit_I.offset + 0x16);
138 pmu_data(priv, pmu, img, len);
139 }
140
141 /* upload second chunk of init data */
142 if (post) {
143 u32 pmu = pmu_args(priv, args + 0x08, 0x10);
144 u32 img = nv_ro16(bios, bit_I.offset + 0x18);
145 u32 len = nv_ro16(bios, bit_I.offset + 0x1a);
146 pmu_data(priv, pmu, img, len);
147 }
148
149 /* execute init tables */
150 if (post) {
151 nv_wr32(priv, 0x10a040, 0x00005000);
152 pmu_exec(priv, init);
153 while (!(nv_rd32(priv, 0x10a040) & 0x00002000)) {
154 }
155 }
156
157 /* load and execute some other ucode image (bios therm?) */
158 return pmu_load(priv, 0x01, post, NULL, NULL);
159}
160
161struct nouveau_oclass *
162gm204_devinit_oclass = &(struct nouveau_devinit_impl) {
163 .base.handle = NV_SUBDEV(DEVINIT, 0x07),
164 .base.ofuncs = &(struct nouveau_ofuncs) {
165 .ctor = nv50_devinit_ctor,
166 .dtor = _nouveau_devinit_dtor,
167 .init = nv50_devinit_init,
168 .fini = _nouveau_devinit_fini,
169 },
170 .pll_set = nvc0_devinit_pll_set,
171 .disable = gm107_devinit_disable,
172 .post = gm204_devinit_post,
173}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
index 052ad690b468..65651c50f6ea 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv04.c
@@ -464,4 +464,5 @@ nv04_devinit_oclass = &(struct nouveau_devinit_impl) {
464 }, 464 },
465 .meminit = nv04_devinit_meminit, 465 .meminit = nv04_devinit_meminit,
466 .pll_set = nv04_devinit_pll_set, 466 .pll_set = nv04_devinit_pll_set,
467 .post = nvbios_init,
467}.base; 468}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
index 4a19c10e5178..a2007a3efc4d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv05.c
@@ -136,4 +136,5 @@ nv05_devinit_oclass = &(struct nouveau_devinit_impl) {
136 }, 136 },
137 .meminit = nv05_devinit_meminit, 137 .meminit = nv05_devinit_meminit,
138 .pll_set = nv04_devinit_pll_set, 138 .pll_set = nv04_devinit_pll_set,
139 .post = nvbios_init,
139}.base; 140}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
index 3b8d657da279..178b46f79b50 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv10.c
@@ -107,4 +107,5 @@ nv10_devinit_oclass = &(struct nouveau_devinit_impl) {
107 }, 107 },
108 .meminit = nv10_devinit_meminit, 108 .meminit = nv10_devinit_meminit,
109 .pll_set = nv04_devinit_pll_set, 109 .pll_set = nv04_devinit_pll_set,
110 .post = nvbios_init,
110}.base; 111}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
index 526d0c6faacd..995dd97af3e9 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv1a.c
@@ -34,4 +34,5 @@ nv1a_devinit_oclass = &(struct nouveau_devinit_impl) {
34 .fini = nv04_devinit_fini, 34 .fini = nv04_devinit_fini,
35 }, 35 },
36 .pll_set = nv04_devinit_pll_set, 36 .pll_set = nv04_devinit_pll_set,
37 .post = nvbios_init,
37}.base; 38}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
index 04bc9732644c..915089fb46f7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv20.c
@@ -71,4 +71,5 @@ nv20_devinit_oclass = &(struct nouveau_devinit_impl) {
71 }, 71 },
72 .meminit = nv20_devinit_meminit, 72 .meminit = nv20_devinit_meminit,
73 .pll_set = nv04_devinit_pll_set, 73 .pll_set = nv04_devinit_pll_set,
74 .post = nvbios_init,
74}.base; 75}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
index b46c62a1d5d8..968334d1dca4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.c
@@ -26,6 +26,7 @@
26#include <subdev/bios/dcb.h> 26#include <subdev/bios/dcb.h>
27#include <subdev/bios/disp.h> 27#include <subdev/bios/disp.h>
28#include <subdev/bios/init.h> 28#include <subdev/bios/init.h>
29#include <subdev/ibus.h>
29#include <subdev/vga.h> 30#include <subdev/vga.h>
30 31
31#include "nv50.h" 32#include "nv50.h"
@@ -91,6 +92,7 @@ int
91nv50_devinit_init(struct nouveau_object *object) 92nv50_devinit_init(struct nouveau_object *object)
92{ 93{
93 struct nouveau_bios *bios = nouveau_bios(object); 94 struct nouveau_bios *bios = nouveau_bios(object);
95 struct nouveau_ibus *ibus = nouveau_ibus(object);
94 struct nv50_devinit_priv *priv = (void *)object; 96 struct nv50_devinit_priv *priv = (void *)object;
95 struct nvbios_outp info; 97 struct nvbios_outp info;
96 struct dcb_output outp; 98 struct dcb_output outp;
@@ -105,6 +107,13 @@ nv50_devinit_init(struct nouveau_object *object)
105 } 107 }
106 } 108 }
107 109
110 /* some boards appear to require certain priv register timeouts
111 * to be bumped before runing devinit scripts. not a clue why
112 * the vbios engineers didn't make the scripts just work...
113 */
114 if (priv->base.post && ibus)
115 nv_ofuncs(ibus)->init(nv_object(ibus));
116
108 ret = nouveau_devinit_init(&priv->base); 117 ret = nouveau_devinit_init(&priv->base);
109 if (ret) 118 if (ret)
110 return ret; 119 return ret;
@@ -160,4 +169,5 @@ nv50_devinit_oclass = &(struct nouveau_devinit_impl) {
160 }, 169 },
161 .pll_set = nv50_devinit_pll_set, 170 .pll_set = nv50_devinit_pll_set,
162 .disable = nv50_devinit_disable, 171 .disable = nv50_devinit_disable,
172 .post = nvbios_init,
163}.base; 173}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
index 51d5076333ec..f412bb7f780e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv50.h
@@ -18,4 +18,6 @@ int nva3_devinit_pll_set(struct nouveau_devinit *, u32, u32);
18 18
19int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32); 19int nvc0_devinit_pll_set(struct nouveau_devinit *, u32, u32);
20 20
21u64 gm107_devinit_disable(struct nouveau_devinit *);
22
21#endif 23#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
index 787422505d87..a7c80ded77cd 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv84.c
@@ -60,4 +60,5 @@ nv84_devinit_oclass = &(struct nouveau_devinit_impl) {
60 }, 60 },
61 .pll_set = nv50_devinit_pll_set, 61 .pll_set = nv50_devinit_pll_set,
62 .disable = nv84_devinit_disable, 62 .disable = nv84_devinit_disable,
63 .post = nvbios_init,
63}.base; 64}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
index 2b0e963fc6f0..a773253a17f6 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nv98.c
@@ -59,4 +59,5 @@ nv98_devinit_oclass = &(struct nouveau_devinit_impl) {
59 }, 59 },
60 .pll_set = nv50_devinit_pll_set, 60 .pll_set = nv50_devinit_pll_set,
61 .disable = nv98_devinit_disable, 61 .disable = nv98_devinit_disable,
62 .post = nvbios_init,
62}.base; 63}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
index 006cf348bda7..b9cd9e53f760 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nva3.c
@@ -142,4 +142,5 @@ nva3_devinit_oclass = &(struct nouveau_devinit_impl) {
142 .pll_set = nva3_devinit_pll_set, 142 .pll_set = nva3_devinit_pll_set,
143 .disable = nva3_devinit_disable, 143 .disable = nva3_devinit_disable,
144 .mmio = nva3_devinit_mmio, 144 .mmio = nva3_devinit_mmio,
145 .post = nvbios_init,
145}.base; 146}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
index 4fc68d27eff3..3729846a8e5c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvaf.c
@@ -60,4 +60,5 @@ nvaf_devinit_oclass = &(struct nouveau_devinit_impl) {
60 }, 60 },
61 .pll_set = nva3_devinit_pll_set, 61 .pll_set = nva3_devinit_pll_set,
62 .disable = nvaf_devinit_disable, 62 .disable = nvaf_devinit_disable,
63 .post = nvbios_init,
63}.base; 64}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
index 30c765747eea..80bd7f5eda3d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/nvc0.c
@@ -115,4 +115,5 @@ nvc0_devinit_oclass = &(struct nouveau_devinit_impl) {
115 }, 115 },
116 .pll_set = nvc0_devinit_pll_set, 116 .pll_set = nvc0_devinit_pll_set,
117 .disable = nvc0_devinit_disable, 117 .disable = nvc0_devinit_disable,
118 .post = nvbios_init,
118}.base; 119}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
index f0e8683ad840..cbcd51852472 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/devinit/priv.h
@@ -3,6 +3,7 @@
3 3
4#include <subdev/bios.h> 4#include <subdev/bios.h>
5#include <subdev/bios/pll.h> 5#include <subdev/bios/pll.h>
6#include <subdev/bios/init.h>
6#include <subdev/clock/pll.h> 7#include <subdev/clock/pll.h>
7#include <subdev/devinit.h> 8#include <subdev/devinit.h>
8 9
@@ -12,6 +13,7 @@ struct nouveau_devinit_impl {
12 int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq); 13 int (*pll_set)(struct nouveau_devinit *, u32 type, u32 freq);
13 u64 (*disable)(struct nouveau_devinit *); 14 u64 (*disable)(struct nouveau_devinit *);
14 u32 (*mmio)(struct nouveau_devinit *, u32); 15 u32 (*mmio)(struct nouveau_devinit *, u32);
16 int (*post)(struct nouveau_subdev *, bool);
15}; 17};
16 18
17#define nouveau_devinit_create(p,e,o,d) \ 19#define nouveau_devinit_create(p,e,o,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
index f009d8a39d9d..c866148c440f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/base.c
@@ -23,37 +23,30 @@
23 */ 23 */
24 24
25#include <subdev/bios.h> 25#include <subdev/bios.h>
26#include <subdev/bios/bit.h> 26#include <subdev/bios/M0203.h>
27 27
28#include "priv.h" 28#include "priv.h"
29 29
30int 30int
31nouveau_fb_bios_memtype(struct nouveau_bios *bios) 31nouveau_fb_bios_memtype(struct nouveau_bios *bios)
32{ 32{
33 struct bit_entry M; 33 const u8 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2;
34 u8 ramcfg; 34 struct nvbios_M0203E M0203E;
35 35 u8 ver, hdr;
36 ramcfg = (nv_rd32(bios, 0x101000) & 0x0000003c) >> 2; 36
37 if (!bit_entry(bios, 'M', &M) && M.version == 2 && M.length >= 5) { 37 if (nvbios_M0203Em(bios, ramcfg, &ver, &hdr, &M0203E)) {
38 u16 table = nv_ro16(bios, M.offset + 3); 38 switch (M0203E.type) {
39 u8 version = nv_ro08(bios, table + 0); 39 case M0203E_TYPE_DDR2 : return NV_MEM_TYPE_DDR2;
40 u8 header = nv_ro08(bios, table + 1); 40 case M0203E_TYPE_DDR3 : return NV_MEM_TYPE_DDR3;
41 u8 record = nv_ro08(bios, table + 2); 41 case M0203E_TYPE_GDDR3: return NV_MEM_TYPE_GDDR3;
42 u8 entries = nv_ro08(bios, table + 3); 42 case M0203E_TYPE_GDDR5: return NV_MEM_TYPE_GDDR5;
43 if (table && version == 0x10 && ramcfg < entries) { 43 default:
44 u16 entry = table + header + (ramcfg * record); 44 nv_warn(bios, "M0203E type %02x\n", M0203E.type);
45 switch (nv_ro08(bios, entry) & 0x0f) { 45 return NV_MEM_TYPE_UNKNOWN;
46 case 0: return NV_MEM_TYPE_DDR2;
47 case 1: return NV_MEM_TYPE_DDR3;
48 case 2: return NV_MEM_TYPE_GDDR3;
49 case 3: return NV_MEM_TYPE_GDDR5;
50 default:
51 break;
52 }
53
54 } 46 }
55 } 47 }
56 48
49 nv_warn(bios, "M0203E not matched!\n");
57 return NV_MEM_TYPE_UNKNOWN; 50 return NV_MEM_TYPE_UNKNOWN;
58} 51}
59 52
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
new file mode 100644
index 000000000000..d85a25d027ee
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gddr3.c
@@ -0,0 +1,117 @@
1/*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs <bskeggs@redhat.com>
23 * Roy Spliet <rspliet@eclipso.eu>
24 */
25
26#include <subdev/bios.h>
27#include "priv.h"
28
29struct ramxlat {
30 int id;
31 u8 enc;
32};
33
34static inline int
35ramxlat(const struct ramxlat *xlat, int id)
36{
37 while (xlat->id >= 0) {
38 if (xlat->id == id)
39 return xlat->enc;
40 xlat++;
41 }
42 return -EINVAL;
43}
44
45static const struct ramxlat
46ramgddr3_cl_lo[] = {
47 { 7, 7 }, { 8, 0 }, { 9, 1 }, { 10, 2 }, { 11, 3 },
48 /* the below are mentioned in some, but not all, gddr3 docs */
49 { 12, 4 }, { 13, 5 }, { 14, 6 },
50 /* XXX: Per Samsung docs, are these used? They overlap with Qimonda */
51 /* { 4, 4 }, { 5, 5 }, { 6, 6 }, { 12, 8 }, { 13, 9 }, { 14, 10 },
52 * { 15, 11 }, */
53 { -1 }
54};
55
56static const struct ramxlat
57ramgddr3_cl_hi[] = {
58 { 10, 2 }, { 11, 3 }, { 12, 4 }, { 13, 5 }, { 14, 6 }, { 15, 7 },
59 { 16, 0 }, { 17, 1 },
60 { -1 }
61};
62
63static const struct ramxlat
64ramgddr3_wr_lo[] = {
65 { 5, 2 }, { 7, 4 }, { 8, 5 }, { 9, 6 }, { 10, 7 },
66 { 11, 0 },
67 /* the below are mentioned in some, but not all, gddr3 docs */
68 { 4, 1 }, { 6, 3 }, { 12, 1 }, { 13 , 2 },
69 { -1 }
70};
71
72int
73nouveau_gddr3_calc(struct nouveau_ram *ram)
74{
75 int CL, WR, CWL, DLL = 0, ODT = 0, hi;
76
77 switch (ram->next->bios.timing_ver) {
78 case 0x10:
79 CWL = ram->next->bios.timing_10_CWL;
80 CL = ram->next->bios.timing_10_CL;
81 WR = ram->next->bios.timing_10_WR;
82 DLL = !ram->next->bios.ramcfg_10_DLLoff;
83 ODT = ram->next->bios.timing_10_ODT;
84 break;
85 case 0x20:
86 CWL = (ram->next->bios.timing[1] & 0x00000f80) >> 7;
87 CL = (ram->next->bios.timing[1] & 0x0000001f) >> 0;
88 WR = (ram->next->bios.timing[2] & 0x007f0000) >> 16;
89 /* XXX: Get these values from the VBIOS instead */
90 DLL = !(ram->mr[1] & 0x1);
91 ODT = (ram->mr[1] & 0x004) >> 2 |
92 (ram->mr[1] & 0x040) >> 5 |
93 (ram->mr[1] & 0x200) >> 7;
94 break;
95 default:
96 return -ENOSYS;
97 }
98
99 hi = ram->mr[2] & 0x1;
100 CL = ramxlat(hi ? ramgddr3_cl_hi : ramgddr3_cl_lo, CL);
101 WR = ramxlat(ramgddr3_wr_lo, WR);
102 if (CL < 0 || CWL < 1 || CWL > 7 || WR < 0)
103 return -EINVAL;
104
105 ram->mr[0] &= ~0xf74;
106 ram->mr[0] |= (CWL & 0x07) << 9;
107 ram->mr[0] |= (CL & 0x07) << 4;
108 ram->mr[0] |= (CL & 0x08) >> 1;
109
110 ram->mr[1] &= ~0x3fc;
111 ram->mr[1] |= (ODT & 0x03) << 2;
112 ram->mr[1] |= (ODT & 0x03) << 8;
113 ram->mr[1] |= (WR & 0x03) << 4;
114 ram->mr[1] |= (WR & 0x04) << 5;
115 ram->mr[1] |= !DLL << 6;
116 return 0;
117}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
index 60322e906dd4..283863f7aa9b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h
@@ -37,6 +37,7 @@ extern struct nouveau_oclass gm107_ram_oclass;
37 37
38int nouveau_sddr2_calc(struct nouveau_ram *ram); 38int nouveau_sddr2_calc(struct nouveau_ram *ram);
39int nouveau_sddr3_calc(struct nouveau_ram *ram); 39int nouveau_sddr3_calc(struct nouveau_ram *ram);
40int nouveau_gddr3_calc(struct nouveau_ram *ram);
40int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts); 41int nouveau_gddr5_calc(struct nouveau_ram *ram, bool nuts);
41 42
42#define nouveau_fb_create(p,e,c,d) \ 43#define nouveau_fb_create(p,e,c,d) \
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index d1fbbe4b00a2..0ac7256443bb 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -141,6 +141,20 @@ ramfuc_wait_vblank(struct ramfuc *ram)
141} 141}
142 142
143static inline void 143static inline void
144ramfuc_train(struct ramfuc *ram)
145{
146 nouveau_memx_train(ram->memx);
147}
148
149static inline int
150ramfuc_train_result(struct nouveau_fb *pfb, u32 *result, u32 rsize)
151{
152 struct nouveau_pwr *ppwr = nouveau_pwr(pfb);
153
154 return nouveau_memx_train_result(ppwr, result, rsize);
155}
156
157static inline void
144ramfuc_block(struct ramfuc *ram) 158ramfuc_block(struct ramfuc *ram)
145{ 159{
146 nouveau_memx_block(ram->memx); 160 nouveau_memx_block(ram->memx);
@@ -162,6 +176,8 @@ ramfuc_unblock(struct ramfuc *ram)
162#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n)) 176#define ram_wait(s,r,m,d,n) ramfuc_wait(&(s)->base, (r), (m), (d), (n))
163#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n)) 177#define ram_nsec(s,n) ramfuc_nsec(&(s)->base, (n))
164#define ram_wait_vblank(s) ramfuc_wait_vblank(&(s)->base) 178#define ram_wait_vblank(s) ramfuc_wait_vblank(&(s)->base)
179#define ram_train(s) ramfuc_train(&(s)->base)
180#define ram_train_result(s,r,l) ramfuc_train_result((s), (r), (l))
165#define ram_block(s) ramfuc_block(&(s)->base) 181#define ram_block(s) ramfuc_block(&(s)->base)
166#define ram_unblock(s) ramfuc_unblock(&(s)->base) 182#define ram_unblock(s) ramfuc_unblock(&(s)->base)
167 183
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
index 3601deca0bd5..3b38a538845d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnva3.c
@@ -20,86 +20,512 @@
20 * OTHER DEALINGS IN THE SOFTWARE. 20 * OTHER DEALINGS IN THE SOFTWARE.
21 * 21 *
22 * Authors: Ben Skeggs 22 * Authors: Ben Skeggs
23 * Roy Spliet <rspliet@eclipso.eu>
23 */ 24 */
24 25
25#include <subdev/bios.h> 26#include <subdev/bios.h>
26#include <subdev/bios/bit.h> 27#include <subdev/bios/bit.h>
27#include <subdev/bios/pll.h> 28#include <subdev/bios/pll.h>
28#include <subdev/bios/rammap.h> 29#include <subdev/bios/rammap.h>
30#include <subdev/bios/M0205.h>
29#include <subdev/bios/timing.h> 31#include <subdev/bios/timing.h>
30 32
31#include <subdev/clock/nva3.h> 33#include <subdev/clock/nva3.h>
32#include <subdev/clock/pll.h> 34#include <subdev/clock/pll.h>
33 35
36#include <subdev/gpio.h>
37
38#include <subdev/timer.h>
39
40#include <engine/fifo.h>
41
34#include <core/option.h> 42#include <core/option.h>
35 43
36#include "ramfuc.h" 44#include "ramfuc.h"
37 45
38#include "nv50.h" 46#include "nv50.h"
39 47
48/* XXX: Remove when memx gains GPIO support */
49extern int nv50_gpio_location(int line, u32 *reg, u32 *shift);
50
40struct nva3_ramfuc { 51struct nva3_ramfuc {
41 struct ramfuc base; 52 struct ramfuc base;
53 struct ramfuc_reg r_0x001610;
54 struct ramfuc_reg r_0x001700;
55 struct ramfuc_reg r_0x002504;
42 struct ramfuc_reg r_0x004000; 56 struct ramfuc_reg r_0x004000;
43 struct ramfuc_reg r_0x004004; 57 struct ramfuc_reg r_0x004004;
44 struct ramfuc_reg r_0x004018; 58 struct ramfuc_reg r_0x004018;
45 struct ramfuc_reg r_0x004128; 59 struct ramfuc_reg r_0x004128;
46 struct ramfuc_reg r_0x004168; 60 struct ramfuc_reg r_0x004168;
61 struct ramfuc_reg r_0x100080;
47 struct ramfuc_reg r_0x100200; 62 struct ramfuc_reg r_0x100200;
48 struct ramfuc_reg r_0x100210; 63 struct ramfuc_reg r_0x100210;
49 struct ramfuc_reg r_0x100220[9]; 64 struct ramfuc_reg r_0x100220[9];
65 struct ramfuc_reg r_0x100264;
50 struct ramfuc_reg r_0x1002d0; 66 struct ramfuc_reg r_0x1002d0;
51 struct ramfuc_reg r_0x1002d4; 67 struct ramfuc_reg r_0x1002d4;
52 struct ramfuc_reg r_0x1002dc; 68 struct ramfuc_reg r_0x1002dc;
53 struct ramfuc_reg r_0x10053c; 69 struct ramfuc_reg r_0x10053c;
54 struct ramfuc_reg r_0x1005a0; 70 struct ramfuc_reg r_0x1005a0;
55 struct ramfuc_reg r_0x1005a4; 71 struct ramfuc_reg r_0x1005a4;
72 struct ramfuc_reg r_0x100700;
56 struct ramfuc_reg r_0x100714; 73 struct ramfuc_reg r_0x100714;
57 struct ramfuc_reg r_0x100718; 74 struct ramfuc_reg r_0x100718;
58 struct ramfuc_reg r_0x10071c; 75 struct ramfuc_reg r_0x10071c;
76 struct ramfuc_reg r_0x100720;
59 struct ramfuc_reg r_0x100760; 77 struct ramfuc_reg r_0x100760;
60 struct ramfuc_reg r_0x1007a0; 78 struct ramfuc_reg r_0x1007a0;
61 struct ramfuc_reg r_0x1007e0; 79 struct ramfuc_reg r_0x1007e0;
80 struct ramfuc_reg r_0x100da0;
62 struct ramfuc_reg r_0x10f804; 81 struct ramfuc_reg r_0x10f804;
63 struct ramfuc_reg r_0x1110e0; 82 struct ramfuc_reg r_0x1110e0;
64 struct ramfuc_reg r_0x111100; 83 struct ramfuc_reg r_0x111100;
65 struct ramfuc_reg r_0x111104; 84 struct ramfuc_reg r_0x111104;
85 struct ramfuc_reg r_0x1111e0;
86 struct ramfuc_reg r_0x111400;
66 struct ramfuc_reg r_0x611200; 87 struct ramfuc_reg r_0x611200;
67 struct ramfuc_reg r_mr[4]; 88 struct ramfuc_reg r_mr[4];
89 struct ramfuc_reg r_gpioFBVREF;
90};
91
92struct nva3_ltrain {
93 enum {
94 NVA3_TRAIN_UNKNOWN,
95 NVA3_TRAIN_UNSUPPORTED,
96 NVA3_TRAIN_ONCE,
97 NVA3_TRAIN_EXEC,
98 NVA3_TRAIN_DONE
99 } state;
100 u32 r_100720;
101 u32 r_1111e0;
102 u32 r_111400;
103 struct nouveau_mem *mem;
68}; 104};
69 105
70struct nva3_ram { 106struct nva3_ram {
71 struct nouveau_ram base; 107 struct nouveau_ram base;
72 struct nva3_ramfuc fuc; 108 struct nva3_ramfuc fuc;
109 struct nva3_ltrain ltrain;
73}; 110};
74 111
112void
113nva3_link_train_calc(u32 *vals, struct nva3_ltrain *train)
114{
115 int i, lo, hi;
116 u8 median[8], bins[4] = {0, 0, 0, 0}, bin = 0, qty = 0;
117
118 for (i = 0; i < 8; i++) {
119 for (lo = 0; lo < 0x40; lo++) {
120 if (!(vals[lo] & 0x80000000))
121 continue;
122 if (vals[lo] & (0x101 << i))
123 break;
124 }
125
126 if (lo == 0x40)
127 return;
128
129 for (hi = lo + 1; hi < 0x40; hi++) {
130 if (!(vals[lo] & 0x80000000))
131 continue;
132 if (!(vals[hi] & (0x101 << i))) {
133 hi--;
134 break;
135 }
136 }
137
138 median[i] = ((hi - lo) >> 1) + lo;
139 bins[(median[i] & 0xf0) >> 4]++;
140 median[i] += 0x30;
141 }
142
143 /* Find the best value for 0x1111e0 */
144 for (i = 0; i < 4; i++) {
145 if (bins[i] > qty) {
146 bin = i + 3;
147 qty = bins[i];
148 }
149 }
150
151 train->r_100720 = 0;
152 for (i = 0; i < 8; i++) {
153 median[i] = max(median[i], (u8) (bin << 4));
154 median[i] = min(median[i], (u8) ((bin << 4) | 0xf));
155
156 train->r_100720 |= ((median[i] & 0x0f) << (i << 2));
157 }
158
159 train->r_1111e0 = 0x02000000 | (bin * 0x101);
160 train->r_111400 = 0x0;
161}
162
163/*
164 * Link training for (at least) DDR3
165 */
166int
167nva3_link_train(struct nouveau_fb *pfb)
168{
169 struct nouveau_bios *bios = nouveau_bios(pfb);
170 struct nva3_ram *ram = (void *)pfb->ram;
171 struct nouveau_clock *clk = nouveau_clock(pfb);
172 struct nva3_ltrain *train = &ram->ltrain;
173 struct nouveau_device *device = nv_device(pfb);
174 struct nva3_ramfuc *fuc = &ram->fuc;
175 u32 *result, r1700;
176 int ret, i;
177 struct nvbios_M0205T M0205T = { 0 };
178 u8 ver, hdr, cnt, len, snr, ssz;
179 unsigned int clk_current;
180 unsigned long flags;
181 unsigned long *f = &flags;
182
183 if (nouveau_boolopt(device->cfgopt, "NvMemExec", true) != true)
184 return -ENOSYS;
185
186 /* XXX: Multiple partitions? */
187 result = kmalloc(64 * sizeof(u32), GFP_KERNEL);
188 if (!result)
189 return -ENOMEM;
190
191 train->state = NVA3_TRAIN_EXEC;
192
193 /* Clock speeds for training and back */
194 nvbios_M0205Tp(bios, &ver, &hdr, &cnt, &len, &snr, &ssz, &M0205T);
195 if (M0205T.freq == 0)
196 return -ENOENT;
197
198 clk_current = clk->read(clk, nv_clk_src_mem);
199
200 ret = nva3_clock_pre(clk, f);
201 if (ret)
202 goto out;
203
204 /* First: clock up/down */
205 ret = ram->base.calc(pfb, (u32) M0205T.freq * 1000);
206 if (ret)
207 goto out;
208
209 /* Do this *after* calc, eliminates write in script */
210 nv_wr32(pfb, 0x111400, 0x00000000);
211 /* XXX: Magic writes that improve train reliability? */
212 nv_mask(pfb, 0x100674, 0x0000ffff, 0x00000000);
213 nv_mask(pfb, 0x1005e4, 0x0000ffff, 0x00000000);
214 nv_mask(pfb, 0x100b0c, 0x000000ff, 0x00000000);
215 nv_wr32(pfb, 0x100c04, 0x00000400);
216
217 /* Now the training script */
218 r1700 = ram_rd32(fuc, 0x001700);
219
220 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
221 ram_wr32(fuc, 0x611200, 0x3300);
222 ram_wait_vblank(fuc);
223 ram_wait(fuc, 0x611200, 0x00000003, 0x00000000, 500000);
224 ram_mask(fuc, 0x001610, 0x00000083, 0x00000003);
225 ram_mask(fuc, 0x100080, 0x00000020, 0x00000000);
226 ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
227 ram_wr32(fuc, 0x001700, 0x00000000);
228
229 ram_train(fuc);
230
231 /* Reset */
232 ram_mask(fuc, 0x10f804, 0x80000000, 0x80000000);
233 ram_wr32(fuc, 0x10053c, 0x0);
234 ram_wr32(fuc, 0x100720, train->r_100720);
235 ram_wr32(fuc, 0x1111e0, train->r_1111e0);
236 ram_wr32(fuc, 0x111400, train->r_111400);
237 ram_nuke(fuc, 0x100080);
238 ram_mask(fuc, 0x100080, 0x00000020, 0x00000020);
239 ram_nsec(fuc, 1000);
240
241 ram_wr32(fuc, 0x001700, r1700);
242 ram_mask(fuc, 0x001610, 0x00000083, 0x00000080);
243 ram_wr32(fuc, 0x611200, 0x3330);
244 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
245
246 ram_exec(fuc, true);
247
248 ram->base.calc(pfb, clk_current);
249 ram_exec(fuc, true);
250
251 /* Post-processing, avoids flicker */
252 nv_mask(pfb, 0x616308, 0x10, 0x10);
253 nv_mask(pfb, 0x616b08, 0x10, 0x10);
254
255 nva3_clock_post(clk, f);
256
257 ram_train_result(pfb, result, 64);
258 for (i = 0; i < 64; i++)
259 nv_debug(pfb, "Train: %08x", result[i]);
260 nva3_link_train_calc(result, train);
261
262 nv_debug(pfb, "Train: %08x %08x %08x", train->r_100720,
263 train->r_1111e0, train->r_111400);
264
265 kfree(result);
266
267 train->state = NVA3_TRAIN_DONE;
268
269 return ret;
270
271out:
272 if(ret == -EBUSY)
273 f = NULL;
274
275 train->state = NVA3_TRAIN_UNSUPPORTED;
276
277 nva3_clock_post(clk, f);
278 return ret;
279}
280
281int
282nva3_link_train_init(struct nouveau_fb *pfb)
283{
284 static const u32 pattern[16] = {
285 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee,
286 0x00000000, 0x11111111, 0x44444444, 0xdddddddd,
287 0x33333333, 0x55555555, 0x77777777, 0x66666666,
288 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb,
289 };
290 struct nouveau_bios *bios = nouveau_bios(pfb);
291 struct nva3_ram *ram = (void *)pfb->ram;
292 struct nva3_ltrain *train = &ram->ltrain;
293 struct nouveau_mem *mem;
294 struct nvbios_M0205E M0205E;
295 u8 ver, hdr, cnt, len;
296 u32 r001700;
297 int ret, i = 0;
298
299 train->state = NVA3_TRAIN_UNSUPPORTED;
300
301 /* We support type "5"
302 * XXX: training pattern table appears to be unused for this routine */
303 if (!nvbios_M0205Ep(bios, i, &ver, &hdr, &cnt, &len, &M0205E))
304 return -ENOENT;
305
306 if (M0205E.type != 5)
307 return 0;
308
309 train->state = NVA3_TRAIN_ONCE;
310
311 ret = pfb->ram->get(pfb, 0x8000, 0x10000, 0, 0x800, &ram->ltrain.mem);
312 if (ret)
313 return ret;
314
315 mem = ram->ltrain.mem;
316
317 nv_wr32(pfb, 0x100538, 0x10000000 | (mem->offset >> 16));
318 nv_wr32(pfb, 0x1005a8, 0x0000ffff);
319 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
320
321 for (i = 0; i < 0x30; i++) {
322 nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
323 nv_wr32(pfb, 0x10f900, pattern[i % 16]);
324 }
325
326 for (i = 0; i < 0x30; i++) {
327 nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
328 nv_wr32(pfb, 0x10f920, pattern[i % 16]);
329 }
330
331 /* And upload the pattern */
332 r001700 = nv_rd32(pfb, 0x1700);
333 nv_wr32(pfb, 0x1700, mem->offset >> 16);
334 for (i = 0; i < 16; i++)
335 nv_wr32(pfb, 0x700000 + (i << 2), pattern[i]);
336 for (i = 0; i < 16; i++)
337 nv_wr32(pfb, 0x700100 + (i << 2), pattern[i]);
338 nv_wr32(pfb, 0x1700, r001700);
339
340 train->r_100720 = nv_rd32(pfb, 0x100720);
341 train->r_1111e0 = nv_rd32(pfb, 0x1111e0);
342 train->r_111400 = nv_rd32(pfb, 0x111400);
343
344 return 0;
345}
346
347void
348nva3_link_train_fini(struct nouveau_fb *pfb)
349{
350 struct nva3_ram *ram = (void *)pfb->ram;
351
352 if (ram->ltrain.mem)
353 pfb->ram->put(pfb, &ram->ltrain.mem);
354}
355
356/*
357 * RAM reclocking
358 */
359#define T(t) cfg->timing_10_##t
360static int
361nva3_ram_timing_calc(struct nouveau_fb *pfb, u32 *timing)
362{
363 struct nva3_ram *ram = (void *)pfb->ram;
364 struct nvbios_ramcfg *cfg = &ram->base.target.bios;
365 int tUNK_base, tUNK_40_0, prevCL;
366 u32 cur2, cur3, cur7, cur8;
367
368 cur2 = nv_rd32(pfb, 0x100228);
369 cur3 = nv_rd32(pfb, 0x10022c);
370 cur7 = nv_rd32(pfb, 0x10023c);
371 cur8 = nv_rd32(pfb, 0x100240);
372
373
374 switch ((!T(CWL)) * ram->base.type) {
375 case NV_MEM_TYPE_DDR2:
376 T(CWL) = T(CL) - 1;
377 break;
378 case NV_MEM_TYPE_GDDR3:
379 T(CWL) = ((cur2 & 0xff000000) >> 24) + 1;
380 break;
381 }
382
383 prevCL = (cur3 & 0x000000ff) + 1;
384 tUNK_base = ((cur7 & 0x00ff0000) >> 16) - prevCL;
385
386 timing[0] = (T(RP) << 24 | T(RAS) << 16 | T(RFC) << 8 | T(RC));
387 timing[1] = (T(WR) + 1 + T(CWL)) << 24 |
388 max_t(u8,T(18), 1) << 16 |
389 (T(WTR) + 1 + T(CWL)) << 8 |
390 (5 + T(CL) - T(CWL));
391 timing[2] = (T(CWL) - 1) << 24 |
392 (T(RRD) << 16) |
393 (T(RCDWR) << 8) |
394 T(RCDRD);
395 timing[3] = (cur3 & 0x00ff0000) |
396 (0x30 + T(CL)) << 24 |
397 (0xb + T(CL)) << 8 |
398 (T(CL) - 1);
399 timing[4] = T(20) << 24 |
400 T(21) << 16 |
401 T(13) << 8 |
402 T(13);
403 timing[5] = T(RFC) << 24 |
404 max_t(u8,T(RCDRD), T(RCDWR)) << 16 |
405 max_t(u8, (T(CWL) + 6), (T(CL) + 2)) << 8 |
406 T(RP);
407 timing[6] = (0x5a + T(CL)) << 16 |
408 max_t(u8, 1, (6 - T(CL) + T(CWL))) << 8 |
409 (0x50 + T(CL) - T(CWL));
410 timing[7] = (cur7 & 0xff000000) |
411 ((tUNK_base + T(CL)) << 16) |
412 0x202;
413 timing[8] = cur8 & 0xffffff00;
414
415 switch (ram->base.type) {
416 case NV_MEM_TYPE_DDR2:
417 case NV_MEM_TYPE_GDDR3:
418 tUNK_40_0 = prevCL - (cur8 & 0xff);
419 if (tUNK_40_0 > 0)
420 timing[8] |= T(CL);
421 break;
422 default:
423 break;
424 }
425
426 nv_debug(pfb, "Entry: 220: %08x %08x %08x %08x\n",
427 timing[0], timing[1], timing[2], timing[3]);
428 nv_debug(pfb, " 230: %08x %08x %08x %08x\n",
429 timing[4], timing[5], timing[6], timing[7]);
430 nv_debug(pfb, " 240: %08x\n", timing[8]);
431 return 0;
432}
433#undef T
434
435static void
436nouveau_sddr2_dll_reset(struct nva3_ramfuc *fuc)
437{
438 ram_mask(fuc, mr[0], 0x100, 0x100);
439 ram_nsec(fuc, 1000);
440 ram_mask(fuc, mr[0], 0x100, 0x000);
441 ram_nsec(fuc, 1000);
442}
443
444static void
445nouveau_sddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
446{
447 u32 mr1_old = ram_rd32(fuc, mr[1]);
448
449 if (!(mr1_old & 0x1)) {
450 ram_wr32(fuc, 0x1002d4, 0x00000001);
451 ram_wr32(fuc, mr[1], mr[1]);
452 ram_nsec(fuc, 1000);
453 }
454}
455
456static void
457nouveau_gddr3_dll_disable(struct nva3_ramfuc *fuc, u32 *mr)
458{
459 u32 mr1_old = ram_rd32(fuc, mr[1]);
460
461 if (!(mr1_old & 0x40)) {
462 ram_wr32(fuc, mr[1], mr[1]);
463 ram_nsec(fuc, 1000);
464 }
465}
466
467static void
468nva3_ram_lock_pll(struct nva3_ramfuc *fuc, struct nva3_clock_info *mclk)
469{
470 ram_wr32(fuc, 0x004004, mclk->pll);
471 ram_mask(fuc, 0x004000, 0x00000001, 0x00000001);
472 ram_mask(fuc, 0x004000, 0x00000010, 0x00000000);
473 ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
474 ram_mask(fuc, 0x004000, 0x00000010, 0x00000010);
475}
476
477static void
478nva3_ram_fbvref(struct nva3_ramfuc *fuc, u32 val)
479{
480 struct nouveau_gpio *gpio = nouveau_gpio(fuc->base.pfb);
481 struct dcb_gpio_func func;
482 u32 reg, sh, gpio_val;
483 int ret;
484
485 if (gpio->get(gpio, 0, 0x2e, DCB_GPIO_UNUSED) != val) {
486 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
487 if (ret)
488 return;
489
490 nv50_gpio_location(func.line, &reg, &sh);
491 gpio_val = ram_rd32(fuc, gpioFBVREF);
492 if (gpio_val & (8 << sh))
493 val = !val;
494
495 ram_mask(fuc, gpioFBVREF, (0x3 << sh), ((val | 0x2) << sh));
496 ram_nsec(fuc, 20000);
497 }
498}
499
75static int 500static int
76nva3_ram_calc(struct nouveau_fb *pfb, u32 freq) 501nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
77{ 502{
78 struct nouveau_bios *bios = nouveau_bios(pfb); 503 struct nouveau_bios *bios = nouveau_bios(pfb);
79 struct nva3_ram *ram = (void *)pfb->ram; 504 struct nva3_ram *ram = (void *)pfb->ram;
80 struct nva3_ramfuc *fuc = &ram->fuc; 505 struct nva3_ramfuc *fuc = &ram->fuc;
506 struct nva3_ltrain *train = &ram->ltrain;
81 struct nva3_clock_info mclk; 507 struct nva3_clock_info mclk;
82 struct nouveau_ram_data *next; 508 struct nouveau_ram_data *next;
83 u8 ver, hdr, cnt, len, strap; 509 u8 ver, hdr, cnt, len, strap;
84 u32 data; 510 u32 data;
85 u32 r004018, r100760, ctrl; 511 u32 r004018, r100760, r100da0, r111100, ctrl;
86 u32 unk714, unk718, unk71c; 512 u32 unk714, unk718, unk71c;
87 int ret, i; 513 int ret, i;
514 u32 timing[9];
515 bool pll2pll;
88 516
89 next = &ram->base.target; 517 next = &ram->base.target;
90 next->freq = freq; 518 next->freq = freq;
91 ram->base.next = next; 519 ram->base.next = next;
92 520
521 if (ram->ltrain.state == NVA3_TRAIN_ONCE)
522 nva3_link_train(pfb);
523
93 /* lookup memory config data relevant to the target frequency */ 524 /* lookup memory config data relevant to the target frequency */
94 i = 0; 525 i = 0;
95 while ((data = nvbios_rammapEp(bios, i++, &ver, &hdr, &cnt, &len, 526 data = nvbios_rammapEm(bios, freq / 1000, &ver, &hdr, &cnt, &len,
96 &next->bios))) { 527 &next->bios);
97 if (freq / 1000 >= next->bios.rammap_min && 528 if (!data || ver != 0x10 || hdr < 0x05) {
98 freq / 1000 <= next->bios.rammap_max)
99 break;
100 }
101
102 if (!data || ver != 0x10 || hdr < 0x0e) {
103 nv_error(pfb, "invalid/missing rammap entry\n"); 529 nv_error(pfb, "invalid/missing rammap entry\n");
104 return -EINVAL; 530 return -EINVAL;
105 } 531 }
@@ -113,7 +539,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
113 539
114 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap, 540 data = nvbios_rammapSp(bios, data, ver, hdr, cnt, len, strap,
115 &ver, &hdr, &next->bios); 541 &ver, &hdr, &next->bios);
116 if (!data || ver != 0x10 || hdr < 0x0e) { 542 if (!data || ver != 0x10 || hdr < 0x09) {
117 nv_error(pfb, "invalid/missing ramcfg entry\n"); 543 nv_error(pfb, "invalid/missing ramcfg entry\n");
118 return -EINVAL; 544 return -EINVAL;
119 } 545 }
@@ -123,7 +549,7 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
123 data = nvbios_timingEp(bios, next->bios.ramcfg_timing, 549 data = nvbios_timingEp(bios, next->bios.ramcfg_timing,
124 &ver, &hdr, &cnt, &len, 550 &ver, &hdr, &cnt, &len,
125 &next->bios); 551 &next->bios);
126 if (!data || ver != 0x10 || hdr < 0x19) { 552 if (!data || ver != 0x10 || hdr < 0x17) {
127 nv_error(pfb, "invalid/missing timing entry\n"); 553 nv_error(pfb, "invalid/missing timing entry\n");
128 return -EINVAL; 554 return -EINVAL;
129 } 555 }
@@ -135,53 +561,99 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
135 return ret; 561 return ret;
136 } 562 }
137 563
564 nva3_ram_timing_calc(pfb, timing);
565
138 ret = ram_init(fuc, pfb); 566 ret = ram_init(fuc, pfb);
139 if (ret) 567 if (ret)
140 return ret; 568 return ret;
141 569
570 /* Determine ram-specific MR values */
571 ram->base.mr[0] = ram_rd32(fuc, mr[0]);
572 ram->base.mr[1] = ram_rd32(fuc, mr[1]);
573 ram->base.mr[2] = ram_rd32(fuc, mr[2]);
574
575 switch (ram->base.type) {
576 case NV_MEM_TYPE_DDR2:
577 ret = nouveau_sddr2_calc(&ram->base);
578 break;
579 case NV_MEM_TYPE_DDR3:
580 ret = nouveau_sddr3_calc(&ram->base);
581 break;
582 case NV_MEM_TYPE_GDDR3:
583 ret = nouveau_gddr3_calc(&ram->base);
584 break;
585 default:
586 ret = -ENOSYS;
587 break;
588 }
589
590 if (ret)
591 return ret;
592
142 /* XXX: where the fuck does 750MHz come from? */ 593 /* XXX: where the fuck does 750MHz come from? */
143 if (freq <= 750000) { 594 if (freq <= 750000) {
144 r004018 = 0x10000000; 595 r004018 = 0x10000000;
145 r100760 = 0x22222222; 596 r100760 = 0x22222222;
597 r100da0 = 0x00000010;
146 } else { 598 } else {
147 r004018 = 0x00000000; 599 r004018 = 0x00000000;
148 r100760 = 0x00000000; 600 r100760 = 0x00000000;
601 r100da0 = 0x00000000;
149 } 602 }
150 603
604 if (!next->bios.ramcfg_10_DLLoff)
605 r004018 |= 0x00004000;
606
607 /* pll2pll requires to switch to a safe clock first */
151 ctrl = ram_rd32(fuc, 0x004000); 608 ctrl = ram_rd32(fuc, 0x004000);
152 if (ctrl & 0x00000008) { 609 pll2pll = (!(ctrl & 0x00000008)) && mclk.pll;
153 if (mclk.pll) {
154 ram_mask(fuc, 0x004128, 0x00000101, 0x00000101);
155 ram_wr32(fuc, 0x004004, mclk.pll);
156 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001));
157 ram_wr32(fuc, 0x004000, (ctrl &= 0xffffffef));
158 ram_wait(fuc, 0x004000, 0x00020000, 0x00020000, 64000);
159 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000010));
160 ram_wr32(fuc, 0x004018, 0x00005000 | r004018);
161 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000004));
162 }
163 } else {
164 u32 ssel = 0x00000101;
165 if (mclk.clk)
166 ssel |= mclk.clk;
167 else
168 ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
169 ram_mask(fuc, 0x004168, 0x003f3141, ctrl);
170 }
171 610
611 /* Pre, NVIDIA does this outside the script */
172 if (next->bios.ramcfg_10_02_10) { 612 if (next->bios.ramcfg_10_02_10) {
173 ram_mask(fuc, 0x111104, 0x00000600, 0x00000000); 613 ram_mask(fuc, 0x111104, 0x00000600, 0x00000000);
174 } else { 614 } else {
175 ram_mask(fuc, 0x111100, 0x40000000, 0x40000000); 615 ram_mask(fuc, 0x111100, 0x40000000, 0x40000000);
176 ram_mask(fuc, 0x111104, 0x00000180, 0x00000000); 616 ram_mask(fuc, 0x111104, 0x00000180, 0x00000000);
177 } 617 }
618 /* Always disable this bit during reclock */
619 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000);
620
621 /* If switching from non-pll to pll, lock before disabling FB */
622 if (mclk.pll && !pll2pll) {
623 ram_mask(fuc, 0x004128, 0x003f3141, mclk.clk | 0x00000101);
624 nva3_ram_lock_pll(fuc, &mclk);
625 }
626
627 /* Start with disabling some CRTCs and PFIFO? */
628 ram_wait_vblank(fuc);
629 ram_wr32(fuc, 0x611200, 0x3300);
630 ram_mask(fuc, 0x002504, 0x1, 0x1);
631 ram_nsec(fuc, 10000);
632 ram_wait(fuc, 0x002504, 0x10, 0x10, 20000); /* XXX: or longer? */
633 ram_block(fuc);
634 ram_nsec(fuc, 2000);
635
636 if (!next->bios.ramcfg_10_02_10) {
637 if (ram->base.type == NV_MEM_TYPE_GDDR3)
638 ram_mask(fuc, 0x111100, 0x04020000, 0x00020000);
639 else
640 ram_mask(fuc, 0x111100, 0x04020000, 0x04020000);
641 }
642
643 /* If we're disabling the DLL, do it now */
644 switch (next->bios.ramcfg_10_DLLoff * ram->base.type) {
645 case NV_MEM_TYPE_DDR3:
646 nouveau_sddr3_dll_disable(fuc, ram->base.mr);
647 break;
648 case NV_MEM_TYPE_GDDR3:
649 nouveau_gddr3_dll_disable(fuc, ram->base.mr);
650 break;
651 }
178 652
179 if (!next->bios.rammap_10_04_02) 653 if (fuc->r_gpioFBVREF.addr && next->bios.timing_10_ODT)
180 ram_mask(fuc, 0x100200, 0x00000800, 0x00000000); 654 nva3_ram_fbvref(fuc, 0);
181 ram_wr32(fuc, 0x611200, 0x00003300);
182 if (!next->bios.ramcfg_10_02_10)
183 ram_wr32(fuc, 0x111100, 0x4c020000); /*XXX*/
184 655
656 /* Brace RAM for impact */
185 ram_wr32(fuc, 0x1002d4, 0x00000001); 657 ram_wr32(fuc, 0x1002d4, 0x00000001);
186 ram_wr32(fuc, 0x1002d0, 0x00000001); 658 ram_wr32(fuc, 0x1002d0, 0x00000001);
187 ram_wr32(fuc, 0x1002d0, 0x00000001); 659 ram_wr32(fuc, 0x1002d0, 0x00000001);
@@ -189,24 +661,38 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
189 ram_wr32(fuc, 0x1002dc, 0x00000001); 661 ram_wr32(fuc, 0x1002dc, 0x00000001);
190 ram_nsec(fuc, 2000); 662 ram_nsec(fuc, 2000);
191 663
192 ctrl = ram_rd32(fuc, 0x004000); 664 if (nv_device(pfb)->chipset == 0xa3 && freq <= 500000)
193 if (!(ctrl & 0x00000008) && mclk.pll) { 665 ram_mask(fuc, 0x100700, 0x00000006, 0x00000006);
194 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008)); 666
667 /* Fiddle with clocks */
668 /* There's 4 scenario's
669 * pll->pll: first switch to a 324MHz clock, set up new PLL, switch
670 * clk->pll: Set up new PLL, switch
671 * pll->clk: Set up clock, switch
672 * clk->clk: Overwrite ctrl and other bits, switch */
673
674 /* Switch to regular clock - 324MHz */
675 if (pll2pll) {
676 ram_mask(fuc, 0x004000, 0x00000004, 0x00000004);
677 ram_mask(fuc, 0x004168, 0x003f3141, 0x00083101);
678 ram_mask(fuc, 0x004000, 0x00000008, 0x00000008);
195 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000); 679 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
196 ram_wr32(fuc, 0x004018, 0x00001000); 680 ram_wr32(fuc, 0x004018, 0x00001000);
197 ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000001)); 681 nva3_ram_lock_pll(fuc, &mclk);
198 ram_wr32(fuc, 0x004004, mclk.pll); 682 }
199 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000001)); 683
200 udelay(64); 684 if (mclk.pll) {
201 ram_wr32(fuc, 0x004018, 0x00005000 | r004018); 685 ram_mask(fuc, 0x004000, 0x00000105, 0x00000105);
202 udelay(20); 686 ram_wr32(fuc, 0x004018, 0x00001000 | r004018);
203 } else 687 ram_wr32(fuc, 0x100da0, r100da0);
204 if (!mclk.pll) { 688 } else {
205 ram_mask(fuc, 0x004168, 0x003f3040, mclk.clk); 689 ram_mask(fuc, 0x004168, 0x003f3141, mclk.clk | 0x00000101);
206 ram_wr32(fuc, 0x004000, (ctrl |= 0x00000008)); 690 ram_mask(fuc, 0x004000, 0x00000108, 0x00000008);
207 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000); 691 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00088000);
208 ram_wr32(fuc, 0x004018, 0x0000d000 | r004018); 692 ram_wr32(fuc, 0x004018, 0x00009000 | r004018);
693 ram_wr32(fuc, 0x100da0, r100da0);
209 } 694 }
695 ram_nsec(fuc, 20000);
210 696
211 if (next->bios.rammap_10_04_08) { 697 if (next->bios.rammap_10_04_08) {
212 ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 | 698 ram_wr32(fuc, 0x1005a0, next->bios.ramcfg_10_06 << 16 |
@@ -220,6 +706,12 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
220 0x80000000); 706 0x80000000);
221 ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000); 707 ram_mask(fuc, 0x10053c, 0x00001000, 0x00000000);
222 } else { 708 } else {
709 if (train->state == NVA3_TRAIN_DONE) {
710 ram_wr32(fuc, 0x100080, 0x1020);
711 ram_mask(fuc, 0x111400, 0xffffffff, train->r_111400);
712 ram_mask(fuc, 0x1111e0, 0xffffffff, train->r_1111e0);
713 ram_mask(fuc, 0x100720, 0xffffffff, train->r_100720);
714 }
223 ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000); 715 ram_mask(fuc, 0x10053c, 0x00001000, 0x00001000);
224 ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000); 716 ram_mask(fuc, 0x10f804, 0x80000000, 0x00000000);
225 ram_mask(fuc, 0x100760, 0x22222222, r100760); 717 ram_mask(fuc, 0x100760, 0x22222222, r100760);
@@ -227,65 +719,131 @@ nva3_ram_calc(struct nouveau_fb *pfb, u32 freq)
227 ram_mask(fuc, 0x1007e0, 0x22222222, r100760); 719 ram_mask(fuc, 0x1007e0, 0x22222222, r100760);
228 } 720 }
229 721
722 if (nv_device(pfb)->chipset == 0xa3 && freq > 500000) {
723 ram_mask(fuc, 0x100700, 0x00000006, 0x00000000);
724 }
725
726 /* Final switch */
230 if (mclk.pll) { 727 if (mclk.pll) {
231 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000); 728 ram_mask(fuc, 0x1110e0, 0x00088000, 0x00011000);
232 ram_wr32(fuc, 0x004000, (ctrl &= ~0x00000008)); 729 ram_mask(fuc, 0x004000, 0x00000008, 0x00000000);
233 } 730 }
234 731
235 /*XXX: LEAVE */
236 ram_wr32(fuc, 0x1002dc, 0x00000000); 732 ram_wr32(fuc, 0x1002dc, 0x00000000);
237 ram_wr32(fuc, 0x1002d4, 0x00000001); 733 ram_wr32(fuc, 0x1002d4, 0x00000001);
238 ram_wr32(fuc, 0x100210, 0x80000000); 734 ram_wr32(fuc, 0x100210, 0x80000000);
239 ram_nsec(fuc, 1000); 735 ram_nsec(fuc, 2000);
240 ram_nsec(fuc, 1000);
241 736
242 ram_mask(fuc, mr[2], 0x00000000, 0x00000000); 737 /* Set RAM MR parameters and timings */
243 ram_nsec(fuc, 1000); 738 for (i = 2; i >= 0; i--) {
244 ram_nuke(fuc, mr[0]); 739 if (ram_rd32(fuc, mr[i]) != ram->base.mr[i]) {
245 ram_mask(fuc, mr[0], 0x00000000, 0x00000000); 740 ram_wr32(fuc, mr[i], ram->base.mr[i]);
246 ram_nsec(fuc, 1000); 741 ram_nsec(fuc, 1000);
742 }
743 }
247 744
248 ram_mask(fuc, 0x100220[3], 0x00000000, 0x00000000); 745 ram_wr32(fuc, 0x100220[3], timing[3]);
249 ram_mask(fuc, 0x100220[1], 0x00000000, 0x00000000); 746 ram_wr32(fuc, 0x100220[1], timing[1]);
250 ram_mask(fuc, 0x100220[6], 0x00000000, 0x00000000); 747 ram_wr32(fuc, 0x100220[6], timing[6]);
251 ram_mask(fuc, 0x100220[7], 0x00000000, 0x00000000); 748 ram_wr32(fuc, 0x100220[7], timing[7]);
252 ram_mask(fuc, 0x100220[2], 0x00000000, 0x00000000); 749 ram_wr32(fuc, 0x100220[2], timing[2]);
253 ram_mask(fuc, 0x100220[4], 0x00000000, 0x00000000); 750 ram_wr32(fuc, 0x100220[4], timing[4]);
254 ram_mask(fuc, 0x100220[5], 0x00000000, 0x00000000); 751 ram_wr32(fuc, 0x100220[5], timing[5]);
255 ram_mask(fuc, 0x100220[0], 0x00000000, 0x00000000); 752 ram_wr32(fuc, 0x100220[0], timing[0]);
256 ram_mask(fuc, 0x100220[8], 0x00000000, 0x00000000); 753 ram_wr32(fuc, 0x100220[8], timing[8]);
257 754
755 /* Misc */
258 ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12); 756 ram_mask(fuc, 0x100200, 0x00001000, !next->bios.ramcfg_10_02_08 << 12);
259 757
260 unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000010; 758 /* XXX: A lot of "chipset"/"ram type" specific stuff...? */
261 unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100; 759 unk714 = ram_rd32(fuc, 0x100714) & ~0xf0000130;
262 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100; 760 unk718 = ram_rd32(fuc, 0x100718) & ~0x00000100;
761 unk71c = ram_rd32(fuc, 0x10071c) & ~0x00000100;
762 r111100 = ram_rd32(fuc, 0x111100) & ~0x3a800000;
763
764 if (next->bios.ramcfg_10_02_04) {
765 switch (ram->base.type) {
766 case NV_MEM_TYPE_DDR3:
767 if (nv_device(pfb)->chipset != 0xa8)
768 r111100 |= 0x00000004;
769 /* no break */
770 case NV_MEM_TYPE_DDR2:
771 r111100 |= 0x08000000;
772 break;
773 default:
774 break;
775 }
776 } else {
777 switch (ram->base.type) {
778 case NV_MEM_TYPE_DDR2:
779 r111100 |= 0x1a800000;
780 unk714 |= 0x00000010;
781 break;
782 case NV_MEM_TYPE_DDR3:
783 if (nv_device(pfb)->chipset == 0xa8) {
784 r111100 |= 0x08000000;
785 } else {
786 r111100 &= ~0x00000004;
787 r111100 |= 0x12800000;
788 }
789 unk714 |= 0x00000010;
790 break;
791 case NV_MEM_TYPE_GDDR3:
792 r111100 |= 0x30000000;
793 unk714 |= 0x00000020;
794 break;
795 default:
796 break;
797 }
798 }
799
800 unk714 |= (next->bios.ramcfg_10_04_01) << 8;
801
263 if (next->bios.ramcfg_10_02_20) 802 if (next->bios.ramcfg_10_02_20)
264 unk714 |= 0xf0000000; 803 unk714 |= 0xf0000000;
265 if (!next->bios.ramcfg_10_02_04) 804 if (next->bios.ramcfg_10_02_02)
266 unk714 |= 0x00000010; 805 unk718 |= 0x00000100;
267 ram_wr32(fuc, 0x100714, unk714);
268
269 if (next->bios.ramcfg_10_02_01) 806 if (next->bios.ramcfg_10_02_01)
270 unk71c |= 0x00000100; 807 unk71c |= 0x00000100;
271 ram_wr32(fuc, 0x10071c, unk71c); 808 if (next->bios.timing_10_24 != 0xff) {
809 unk718 &= ~0xf0000000;
810 unk718 |= next->bios.timing_10_24 << 28;
811 }
812 if (next->bios.ramcfg_10_02_10)
813 r111100 &= ~0x04020000;
272 814
273 if (next->bios.ramcfg_10_02_02) 815 ram_mask(fuc, 0x100714, 0xffffffff, unk714);
274 unk718 |= 0x00000100; 816 ram_mask(fuc, 0x10071c, 0xffffffff, unk71c);
275 ram_wr32(fuc, 0x100718, unk718); 817 ram_mask(fuc, 0x100718, 0xffffffff, unk718);
818 ram_mask(fuc, 0x111100, 0xffffffff, r111100);
276 819
277 if (next->bios.ramcfg_10_02_10) 820 if (fuc->r_gpioFBVREF.addr && !next->bios.timing_10_ODT)
278 ram_wr32(fuc, 0x111100, 0x48000000); /*XXX*/ 821 nva3_ram_fbvref(fuc, 1);
279 822
280 ram_mask(fuc, mr[0], 0x100, 0x100); 823 /* Reset DLL */
281 ram_nsec(fuc, 1000); 824 if (!next->bios.ramcfg_10_DLLoff)
282 ram_mask(fuc, mr[0], 0x100, 0x000); 825 nouveau_sddr2_dll_reset(fuc);
283 ram_nsec(fuc, 1000);
284 826
285 ram_nsec(fuc, 2000); 827 if (ram->base.type == NV_MEM_TYPE_GDDR3) {
286 ram_nsec(fuc, 12000); 828 ram_nsec(fuc, 31000);
829 } else {
830 ram_nsec(fuc, 14000);
831 }
832
833 if (ram->base.type == NV_MEM_TYPE_DDR3) {
834 ram_wr32(fuc, 0x100264, 0x1);
835 ram_nsec(fuc, 2000);
836 }
287 837
288 ram_wr32(fuc, 0x611200, 0x00003330); 838 ram_nuke(fuc, 0x100700);
839 ram_mask(fuc, 0x100700, 0x01000000, 0x01000000);
840 ram_mask(fuc, 0x100700, 0x01000000, 0x00000000);
841
842 /* Re-enable FB */
843 ram_unblock(fuc);
844 ram_wr32(fuc, 0x611200, 0x3330);
845
846 /* Post fiddlings */
289 if (next->bios.rammap_10_04_02) 847 if (next->bios.rammap_10_04_02)
290 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800); 848 ram_mask(fuc, 0x100200, 0x00000800, 0x00000800);
291 if (next->bios.ramcfg_10_02_10) { 849 if (next->bios.ramcfg_10_02_10) {
@@ -313,7 +871,22 @@ nva3_ram_prog(struct nouveau_fb *pfb)
313 struct nouveau_device *device = nv_device(pfb); 871 struct nouveau_device *device = nv_device(pfb);
314 struct nva3_ram *ram = (void *)pfb->ram; 872 struct nva3_ram *ram = (void *)pfb->ram;
315 struct nva3_ramfuc *fuc = &ram->fuc; 873 struct nva3_ramfuc *fuc = &ram->fuc;
316 ram_exec(fuc, nouveau_boolopt(device->cfgopt, "NvMemExec", true)); 874 bool exec = nouveau_boolopt(device->cfgopt, "NvMemExec", true);
875
876 if (exec) {
877 nv_mask(pfb, 0x001534, 0x2, 0x2);
878
879 ram_exec(fuc, true);
880
881 /* Post-processing, avoids flicker */
882 nv_mask(pfb, 0x002504, 0x1, 0x0);
883 nv_mask(pfb, 0x001534, 0x2, 0x0);
884
885 nv_mask(pfb, 0x616308, 0x10, 0x10);
886 nv_mask(pfb, 0x616b08, 0x10, 0x10);
887 } else {
888 ram_exec(fuc, false);
889 }
317 return 0; 890 return 0;
318} 891}
319 892
@@ -330,38 +903,24 @@ nva3_ram_init(struct nouveau_object *object)
330{ 903{
331 struct nouveau_fb *pfb = (void *)object->parent; 904 struct nouveau_fb *pfb = (void *)object->parent;
332 struct nva3_ram *ram = (void *)object; 905 struct nva3_ram *ram = (void *)object;
333 int ret, i; 906 int ret;
334 907
335 ret = nouveau_ram_init(&ram->base); 908 ret = nouveau_ram_init(&ram->base);
336 if (ret) 909 if (ret)
337 return ret; 910 return ret;
338 911
339 /* prepare for ddr link training, and load training patterns */ 912 nva3_link_train_init(pfb);
340 switch (ram->base.type) { 913
341 case NV_MEM_TYPE_DDR3: { 914 return 0;
342 if (nv_device(pfb)->chipset == 0xa8) { 915}
343 static const u32 pattern[16] = { 916
344 0xaaaaaaaa, 0xcccccccc, 0xdddddddd, 0xeeeeeeee, 917static int
345 0x00000000, 0x11111111, 0x44444444, 0xdddddddd, 918nva3_ram_fini(struct nouveau_object *object, bool suspend)
346 0x33333333, 0x55555555, 0x77777777, 0x66666666, 919{
347 0x99999999, 0x88888888, 0xeeeeeeee, 0xbbbbbbbb, 920 struct nouveau_fb *pfb = (void *)object->parent;
348 }; 921
349 922 if (!suspend)
350 nv_wr32(pfb, 0x100538, 0x10001ff6); /*XXX*/ 923 nva3_link_train_fini(pfb);
351 nv_wr32(pfb, 0x1005a8, 0x0000ffff);
352 nv_mask(pfb, 0x10f800, 0x00000001, 0x00000001);
353 for (i = 0; i < 0x30; i++) {
354 nv_wr32(pfb, 0x10f8c0, (i << 8) | i);
355 nv_wr32(pfb, 0x10f8e0, (i << 8) | i);
356 nv_wr32(pfb, 0x10f900, pattern[i % 16]);
357 nv_wr32(pfb, 0x10f920, pattern[i % 16]);
358 }
359 }
360 }
361 break;
362 default:
363 break;
364 }
365 924
366 return 0; 925 return 0;
367} 926}
@@ -371,8 +930,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
371 struct nouveau_oclass *oclass, void *data, u32 datasize, 930 struct nouveau_oclass *oclass, void *data, u32 datasize,
372 struct nouveau_object **pobject) 931 struct nouveau_object **pobject)
373{ 932{
933 struct nouveau_fb *pfb = nouveau_fb(parent);
934 struct nouveau_gpio *gpio = nouveau_gpio(pfb);
935 struct dcb_gpio_func func;
374 struct nva3_ram *ram; 936 struct nva3_ram *ram;
375 int ret, i; 937 int ret, i;
938 u32 reg, shift;
376 939
377 ret = nv50_ram_create(parent, engine, oclass, &ram); 940 ret = nv50_ram_create(parent, engine, oclass, &ram);
378 *pobject = nv_object(ram); 941 *pobject = nv_object(ram);
@@ -380,7 +943,9 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
380 return ret; 943 return ret;
381 944
382 switch (ram->base.type) { 945 switch (ram->base.type) {
946 case NV_MEM_TYPE_DDR2:
383 case NV_MEM_TYPE_DDR3: 947 case NV_MEM_TYPE_DDR3:
948 case NV_MEM_TYPE_GDDR3:
384 ram->base.calc = nva3_ram_calc; 949 ram->base.calc = nva3_ram_calc;
385 ram->base.prog = nva3_ram_prog; 950 ram->base.prog = nva3_ram_prog;
386 ram->base.tidy = nva3_ram_tidy; 951 ram->base.tidy = nva3_ram_tidy;
@@ -390,31 +955,41 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
390 return 0; 955 return 0;
391 } 956 }
392 957
958 ram->fuc.r_0x001610 = ramfuc_reg(0x001610);
959 ram->fuc.r_0x001700 = ramfuc_reg(0x001700);
960 ram->fuc.r_0x002504 = ramfuc_reg(0x002504);
393 ram->fuc.r_0x004000 = ramfuc_reg(0x004000); 961 ram->fuc.r_0x004000 = ramfuc_reg(0x004000);
394 ram->fuc.r_0x004004 = ramfuc_reg(0x004004); 962 ram->fuc.r_0x004004 = ramfuc_reg(0x004004);
395 ram->fuc.r_0x004018 = ramfuc_reg(0x004018); 963 ram->fuc.r_0x004018 = ramfuc_reg(0x004018);
396 ram->fuc.r_0x004128 = ramfuc_reg(0x004128); 964 ram->fuc.r_0x004128 = ramfuc_reg(0x004128);
397 ram->fuc.r_0x004168 = ramfuc_reg(0x004168); 965 ram->fuc.r_0x004168 = ramfuc_reg(0x004168);
966 ram->fuc.r_0x100080 = ramfuc_reg(0x100080);
398 ram->fuc.r_0x100200 = ramfuc_reg(0x100200); 967 ram->fuc.r_0x100200 = ramfuc_reg(0x100200);
399 ram->fuc.r_0x100210 = ramfuc_reg(0x100210); 968 ram->fuc.r_0x100210 = ramfuc_reg(0x100210);
400 for (i = 0; i < 9; i++) 969 for (i = 0; i < 9; i++)
401 ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4)); 970 ram->fuc.r_0x100220[i] = ramfuc_reg(0x100220 + (i * 4));
971 ram->fuc.r_0x100264 = ramfuc_reg(0x100264);
402 ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0); 972 ram->fuc.r_0x1002d0 = ramfuc_reg(0x1002d0);
403 ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4); 973 ram->fuc.r_0x1002d4 = ramfuc_reg(0x1002d4);
404 ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc); 974 ram->fuc.r_0x1002dc = ramfuc_reg(0x1002dc);
405 ram->fuc.r_0x10053c = ramfuc_reg(0x10053c); 975 ram->fuc.r_0x10053c = ramfuc_reg(0x10053c);
406 ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0); 976 ram->fuc.r_0x1005a0 = ramfuc_reg(0x1005a0);
407 ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4); 977 ram->fuc.r_0x1005a4 = ramfuc_reg(0x1005a4);
978 ram->fuc.r_0x100700 = ramfuc_reg(0x100700);
408 ram->fuc.r_0x100714 = ramfuc_reg(0x100714); 979 ram->fuc.r_0x100714 = ramfuc_reg(0x100714);
409 ram->fuc.r_0x100718 = ramfuc_reg(0x100718); 980 ram->fuc.r_0x100718 = ramfuc_reg(0x100718);
410 ram->fuc.r_0x10071c = ramfuc_reg(0x10071c); 981 ram->fuc.r_0x10071c = ramfuc_reg(0x10071c);
982 ram->fuc.r_0x100720 = ramfuc_reg(0x100720);
411 ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask); 983 ram->fuc.r_0x100760 = ramfuc_stride(0x100760, 4, ram->base.part_mask);
412 ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask); 984 ram->fuc.r_0x1007a0 = ramfuc_stride(0x1007a0, 4, ram->base.part_mask);
413 ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask); 985 ram->fuc.r_0x1007e0 = ramfuc_stride(0x1007e0, 4, ram->base.part_mask);
986 ram->fuc.r_0x100da0 = ramfuc_stride(0x100da0, 4, ram->base.part_mask);
414 ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804); 987 ram->fuc.r_0x10f804 = ramfuc_reg(0x10f804);
415 ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask); 988 ram->fuc.r_0x1110e0 = ramfuc_stride(0x1110e0, 4, ram->base.part_mask);
416 ram->fuc.r_0x111100 = ramfuc_reg(0x111100); 989 ram->fuc.r_0x111100 = ramfuc_reg(0x111100);
417 ram->fuc.r_0x111104 = ramfuc_reg(0x111104); 990 ram->fuc.r_0x111104 = ramfuc_reg(0x111104);
991 ram->fuc.r_0x1111e0 = ramfuc_reg(0x1111e0);
992 ram->fuc.r_0x111400 = ramfuc_reg(0x111400);
418 ram->fuc.r_0x611200 = ramfuc_reg(0x611200); 993 ram->fuc.r_0x611200 = ramfuc_reg(0x611200);
419 994
420 if (ram->base.ranks > 1) { 995 if (ram->base.ranks > 1) {
@@ -429,6 +1004,12 @@ nva3_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
429 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4); 1004 ram->fuc.r_mr[3] = ramfuc_reg(0x1002e4);
430 } 1005 }
431 1006
1007 ret = gpio->find(gpio, 0, 0x2e, DCB_GPIO_UNUSED, &func);
1008 if (ret == 0) {
1009 nv50_gpio_location(func.line, &reg, &shift);
1010 ram->fuc.r_gpioFBVREF = ramfuc_reg(reg);
1011 }
1012
432 return 0; 1013 return 0;
433} 1014}
434 1015
@@ -438,6 +1019,6 @@ nva3_ram_oclass = {
438 .ctor = nva3_ram_ctor, 1019 .ctor = nva3_ram_ctor,
439 .dtor = _nouveau_ram_dtor, 1020 .dtor = _nouveau_ram_dtor,
440 .init = nva3_ram_init, 1021 .init = nva3_ram_init,
441 .fini = _nouveau_ram_fini, 1022 .fini = nva3_ram_fini,
442 }, 1023 },
443}; 1024};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
index bb1eb8f3e639..252575f3aa29 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr2.c
@@ -66,7 +66,7 @@ nouveau_sddr2_calc(struct nouveau_ram *ram)
66 case 0x10: 66 case 0x10:
67 CL = ram->next->bios.timing_10_CL; 67 CL = ram->next->bios.timing_10_CL;
68 WR = ram->next->bios.timing_10_WR; 68 WR = ram->next->bios.timing_10_WR;
69 DLL = !ram->next->bios.ramcfg_10_02_40; 69 DLL = !ram->next->bios.ramcfg_10_DLLoff;
70 ODT = ram->next->bios.timing_10_ODT & 3; 70 ODT = ram->next->bios.timing_10_ODT & 3;
71 break; 71 break;
72 case 0x20: 72 case 0x20:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
index 83949b11833a..a2dca4869e52 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/sddr3.c
@@ -80,7 +80,7 @@ nouveau_sddr3_calc(struct nouveau_ram *ram)
80 CWL = ram->next->bios.timing_10_CWL; 80 CWL = ram->next->bios.timing_10_CWL;
81 CL = ram->next->bios.timing_10_CL; 81 CL = ram->next->bios.timing_10_CL;
82 WR = ram->next->bios.timing_10_WR; 82 WR = ram->next->bios.timing_10_WR;
83 DLL = !ram->next->bios.ramcfg_10_02_40; 83 DLL = !ram->next->bios.ramcfg_10_DLLoff;
84 ODT = ram->next->bios.timing_10_ODT; 84 ODT = ram->next->bios.timing_10_ODT;
85 break; 85 break;
86 case 0x20: 86 case 0x20:
diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
index 1864fa98e6b1..2e30d5a62d6e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c
@@ -54,7 +54,7 @@ nv50_gpio_reset(struct nouveau_gpio *gpio, u8 match)
54 } 54 }
55} 55}
56 56
57static int 57int
58nv50_gpio_location(int line, u32 *reg, u32 *shift) 58nv50_gpio_location(int line, u32 *reg, u32 *shift)
59{ 59{
60 const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 }; 60 const u32 nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
index 2b1bf545e488..0dc605db7ec8 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/base.c
@@ -473,18 +473,56 @@ nouveau_i2c_extdev_sclass[] = {
473 nouveau_anx9805_sclass, 473 nouveau_anx9805_sclass,
474}; 474};
475 475
476static void
477nouveau_i2c_create_port(struct nouveau_i2c *i2c, int index, u8 type,
478 struct dcb_i2c_entry *info)
479{
480 const struct nouveau_i2c_impl *impl = (void *)nv_oclass(i2c);
481 struct nouveau_oclass *oclass;
482 struct nouveau_object *parent;
483 struct nouveau_object *object;
484 int ret, pad;
485
486 if (info->share != DCB_I2C_UNUSED) {
487 pad = info->share;
488 oclass = impl->pad_s;
489 } else {
490 if (type != DCB_I2C_NVIO_AUX)
491 pad = 0x100 + info->drive;
492 else
493 pad = 0x100 + info->auxch;
494 oclass = impl->pad_x;
495 }
496
497 ret = nouveau_object_ctor(NULL, nv_object(i2c), oclass, NULL, pad,
498 &parent);
499 if (ret < 0)
500 return;
501
502 oclass = impl->sclass;
503 do {
504 ret = -EINVAL;
505 if (oclass->handle == type) {
506 ret = nouveau_object_ctor(parent, nv_object(i2c),
507 oclass, info, index,
508 &object);
509 }
510 } while (ret && (++oclass)->handle);
511
512 nouveau_object_ref(NULL, &parent);
513}
514
476int 515int
477nouveau_i2c_create_(struct nouveau_object *parent, 516nouveau_i2c_create_(struct nouveau_object *parent,
478 struct nouveau_object *engine, 517 struct nouveau_object *engine,
479 struct nouveau_oclass *oclass, 518 struct nouveau_oclass *oclass,
480 int length, void **pobject) 519 int length, void **pobject)
481{ 520{
482 const struct nouveau_i2c_impl *impl = (void *)oclass;
483 struct nouveau_bios *bios = nouveau_bios(parent); 521 struct nouveau_bios *bios = nouveau_bios(parent);
484 struct nouveau_i2c *i2c; 522 struct nouveau_i2c *i2c;
485 struct nouveau_object *object; 523 struct nouveau_object *object;
486 struct dcb_i2c_entry info; 524 struct dcb_i2c_entry info;
487 int ret, i, j, index = -1, pad; 525 int ret, i, j, index = -1;
488 struct dcb_output outp; 526 struct dcb_output outp;
489 u8 ver, hdr; 527 u8 ver, hdr;
490 u32 data; 528 u32 data;
@@ -507,43 +545,40 @@ nouveau_i2c_create_(struct nouveau_object *parent,
507 INIT_LIST_HEAD(&i2c->ports); 545 INIT_LIST_HEAD(&i2c->ports);
508 546
509 while (!dcb_i2c_parse(bios, ++index, &info)) { 547 while (!dcb_i2c_parse(bios, ++index, &info)) {
510 if (info.type == DCB_I2C_UNUSED) 548 switch (info.type) {
549 case DCB_I2C_NV04_BIT:
550 case DCB_I2C_NV4E_BIT:
551 case DCB_I2C_NVIO_BIT:
552 nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
553 info.type, &info);
554 break;
555 case DCB_I2C_NVIO_AUX:
556 nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
557 info.type, &info);
558 break;
559 case DCB_I2C_PMGR:
560 if (info.drive != DCB_I2C_UNUSED) {
561 nouveau_i2c_create_port(i2c, NV_I2C_PORT(index),
562 DCB_I2C_NVIO_BIT,
563 &info);
564 }
565 if (info.auxch != DCB_I2C_UNUSED) {
566 nouveau_i2c_create_port(i2c, NV_I2C_AUX(index),
567 DCB_I2C_NVIO_AUX,
568 &info);
569 }
570 break;
571 case DCB_I2C_UNUSED:
572 default:
511 continue; 573 continue;
512
513 if (info.share != DCB_I2C_UNUSED) {
514 if (info.type == DCB_I2C_NVIO_AUX)
515 pad = info.drive;
516 else
517 pad = info.share;
518 oclass = impl->pad_s;
519 } else {
520 pad = 0x100 + info.drive;
521 oclass = impl->pad_x;
522 } 574 }
523
524 ret = nouveau_object_ctor(NULL, *pobject, oclass,
525 NULL, pad, &parent);
526 if (ret < 0)
527 continue;
528
529 oclass = impl->sclass;
530 do {
531 ret = -EINVAL;
532 if (oclass->handle == info.type) {
533 ret = nouveau_object_ctor(parent, *pobject,
534 oclass, &info,
535 index, &object);
536 }
537 } while (ret && (++oclass)->handle);
538
539 nouveau_object_ref(NULL, &parent);
540 } 575 }
541 576
542 /* in addition to the busses specified in the i2c table, there 577 /* in addition to the busses specified in the i2c table, there
543 * may be ddc/aux channels hiding behind external tmds/dp/etc 578 * may be ddc/aux channels hiding behind external tmds/dp/etc
544 * transmitters. 579 * transmitters.
545 */ 580 */
546 index = ((index + 0x0f) / 0x10) * 0x10; 581 index = NV_I2C_EXT(0);
547 i = -1; 582 i = -1;
548 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) { 583 while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
549 if (!outp.location || !outp.extdev) 584 if (!outp.location || !outp.extdev)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
new file mode 100644
index 000000000000..06a2b87ccbf1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/gm204.c
@@ -0,0 +1,221 @@
1/*
2 * Copyright 2012 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "nv50.h"
26
27#define AUX_DBG(fmt, args...) nv_debug(aux, "AUXCH(%d): " fmt, ch, ##args)
28#define AUX_ERR(fmt, args...) nv_error(aux, "AUXCH(%d): " fmt, ch, ##args)
29
30static void
31auxch_fini(struct nouveau_i2c *aux, int ch)
32{
33 nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00310000, 0x00000000);
34}
35
36static int
37auxch_init(struct nouveau_i2c *aux, int ch)
38{
39 const u32 unksel = 1; /* nfi which to use, or if it matters.. */
40 const u32 ureq = unksel ? 0x00100000 : 0x00200000;
41 const u32 urep = unksel ? 0x01000000 : 0x02000000;
42 u32 ctrl, timeout;
43
44 /* wait up to 1ms for any previous transaction to be done... */
45 timeout = 1000;
46 do {
47 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
48 udelay(1);
49 if (!timeout--) {
50 AUX_ERR("begin idle timeout 0x%08x\n", ctrl);
51 return -EBUSY;
52 }
53 } while (ctrl & 0x03010000);
54
55 /* set some magic, and wait up to 1ms for it to appear */
56 nv_mask(aux, 0x00d954 + (ch * 0x50), 0x00300000, ureq);
57 timeout = 1000;
58 do {
59 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
60 udelay(1);
61 if (!timeout--) {
62 AUX_ERR("magic wait 0x%08x\n", ctrl);
63 auxch_fini(aux, ch);
64 return -EBUSY;
65 }
66 } while ((ctrl & 0x03000000) != urep);
67
68 return 0;
69}
70
71int
72gm204_aux(struct nouveau_i2c_port *base, bool retry,
73 u8 type, u32 addr, u8 *data, u8 size)
74{
75 struct nouveau_i2c *aux = nouveau_i2c(base);
76 struct nv50_i2c_port *port = (void *)base;
77 u32 ctrl, stat, timeout, retries;
78 u32 xbuf[4] = {};
79 int ch = port->addr;
80 int ret, i;
81
82 AUX_DBG("%d: 0x%08x %d\n", type, addr, size);
83
84 ret = auxch_init(aux, ch);
85 if (ret)
86 goto out;
87
88 stat = nv_rd32(aux, 0x00d958 + (ch * 0x50));
89 if (!(stat & 0x10000000)) {
90 AUX_DBG("sink not detected\n");
91 ret = -ENXIO;
92 goto out;
93 }
94
95 if (!(type & 1)) {
96 memcpy(xbuf, data, size);
97 for (i = 0; i < 16; i += 4) {
98 AUX_DBG("wr 0x%08x\n", xbuf[i / 4]);
99 nv_wr32(aux, 0x00d930 + (ch * 0x50) + i, xbuf[i / 4]);
100 }
101 }
102
103 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
104 ctrl &= ~0x0001f0ff;
105 ctrl |= type << 12;
106 ctrl |= size - 1;
107 nv_wr32(aux, 0x00d950 + (ch * 0x50), addr);
108
109 /* (maybe) retry transaction a number of times on failure... */
110 for (retries = 0; !ret && retries < 32; retries++) {
111 /* reset, and delay a while if this is a retry */
112 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x80000000 | ctrl);
113 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00000000 | ctrl);
114 if (retries)
115 udelay(400);
116
117 /* transaction request, wait up to 1ms for it to complete */
118 nv_wr32(aux, 0x00d954 + (ch * 0x50), 0x00010000 | ctrl);
119
120 timeout = 1000;
121 do {
122 ctrl = nv_rd32(aux, 0x00d954 + (ch * 0x50));
123 udelay(1);
124 if (!timeout--) {
125 AUX_ERR("tx req timeout 0x%08x\n", ctrl);
126 ret = -EIO;
127 goto out;
128 }
129 } while (ctrl & 0x00010000);
130 ret = 1;
131
132 /* read status, and check if transaction completed ok */
133 stat = nv_mask(aux, 0x00d958 + (ch * 0x50), 0, 0);
134 if ((stat & 0x000f0000) == 0x00080000 ||
135 (stat & 0x000f0000) == 0x00020000)
136 ret = retry ? 0 : 1;
137 if ((stat & 0x00000100))
138 ret = -ETIMEDOUT;
139 if ((stat & 0x00000e00))
140 ret = -EIO;
141
142 AUX_DBG("%02d 0x%08x 0x%08x\n", retries, ctrl, stat);
143 }
144
145 if (type & 1) {
146 for (i = 0; i < 16; i += 4) {
147 xbuf[i / 4] = nv_rd32(aux, 0x00d940 + (ch * 0x50) + i);
148 AUX_DBG("rd 0x%08x\n", xbuf[i / 4]);
149 }
150 memcpy(data, xbuf, size);
151 }
152
153out:
154 auxch_fini(aux, ch);
155 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
156}
157
158static const struct nouveau_i2c_func
159gm204_aux_func = {
160 .aux = gm204_aux,
161};
162
163int
164gm204_aux_port_ctor(struct nouveau_object *parent,
165 struct nouveau_object *engine,
166 struct nouveau_oclass *oclass, void *data, u32 index,
167 struct nouveau_object **pobject)
168{
169 struct dcb_i2c_entry *info = data;
170 struct nv50_i2c_port *port;
171 int ret;
172
173 ret = nouveau_i2c_port_create(parent, engine, oclass, index,
174 &nouveau_i2c_aux_algo, &gm204_aux_func,
175 &port);
176 *pobject = nv_object(port);
177 if (ret)
178 return ret;
179
180 port->base.aux = info->auxch;
181 port->addr = info->auxch;
182 return 0;
183}
184
185struct nouveau_oclass
186gm204_i2c_sclass[] = {
187 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT),
188 .ofuncs = &(struct nouveau_ofuncs) {
189 .ctor = nvd0_i2c_port_ctor,
190 .dtor = _nouveau_i2c_port_dtor,
191 .init = nv50_i2c_port_init,
192 .fini = _nouveau_i2c_port_fini,
193 },
194 },
195 { .handle = NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX),
196 .ofuncs = &(struct nouveau_ofuncs) {
197 .ctor = gm204_aux_port_ctor,
198 .dtor = _nouveau_i2c_port_dtor,
199 .init = _nouveau_i2c_port_init,
200 .fini = _nouveau_i2c_port_fini,
201 },
202 },
203 {}
204};
205
206struct nouveau_oclass *
207gm204_i2c_oclass = &(struct nouveau_i2c_impl) {
208 .base.handle = NV_SUBDEV(I2C, 0x24),
209 .base.ofuncs = &(struct nouveau_ofuncs) {
210 .ctor = _nouveau_i2c_ctor,
211 .dtor = _nouveau_i2c_dtor,
212 .init = _nouveau_i2c_init,
213 .fini = _nouveau_i2c_fini,
214 },
215 .sclass = gm204_i2c_sclass,
216 .pad_x = &nv04_i2c_pad_oclass,
217 .pad_s = &gm204_i2c_pad_oclass,
218 .aux = 8,
219 .aux_stat = nve0_aux_stat,
220 .aux_mask = nve0_aux_mask,
221}.base;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
index 5d2a77421c74..9ef965692fb1 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv50.h
@@ -10,8 +10,6 @@ struct nv50_i2c_priv {
10struct nv50_i2c_port { 10struct nv50_i2c_port {
11 struct nouveau_i2c_port base; 11 struct nouveau_i2c_port base;
12 u32 addr; 12 u32 addr;
13 u32 ctrl;
14 u32 data;
15 u32 state; 13 u32 state;
16}; 14};
17 15
@@ -29,4 +27,8 @@ int nv94_aux_port_ctor(struct nouveau_object *, struct nouveau_object *,
29void nv94_i2c_acquire(struct nouveau_i2c_port *); 27void nv94_i2c_acquire(struct nouveau_i2c_port *);
30void nv94_i2c_release(struct nouveau_i2c_port *); 28void nv94_i2c_release(struct nouveau_i2c_port *);
31 29
30int nvd0_i2c_port_ctor(struct nouveau_object *, struct nouveau_object *,
31 struct nouveau_oclass *, void *, u32,
32 struct nouveau_object **);
33
32#endif 34#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
index f59c3a255462..e383ee81f4d2 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nv94.c
@@ -214,10 +214,6 @@ nv94_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
214 214
215 port->state = 7; 215 port->state = 7;
216 port->addr = nv50_i2c_addr[info->drive]; 216 port->addr = nv50_i2c_addr[info->drive];
217 if (info->share != DCB_I2C_UNUSED) {
218 port->ctrl = 0x00e500 + (info->share * 0x50);
219 port->data = 0x0000e001;
220 }
221 return 0; 217 return 0;
222} 218}
223 219
@@ -242,13 +238,8 @@ nv94_aux_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
242 if (ret) 238 if (ret)
243 return ret; 239 return ret;
244 240
245 port->base.aux = info->drive; 241 port->base.aux = info->auxch;
246 port->addr = info->drive; 242 port->addr = info->auxch;
247 if (info->share != DCB_I2C_UNUSED) {
248 port->ctrl = 0x00e500 + (info->drive * 0x50);
249 port->data = 0x00002002;
250 }
251
252 return 0; 243 return 0;
253} 244}
254 245
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
index 364ddb1c5f03..fd99380502ec 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nvd0.c
@@ -48,7 +48,7 @@ nvd0_i2c_func = {
48 .sense_sda = nvd0_i2c_sense_sda, 48 .sense_sda = nvd0_i2c_sense_sda,
49}; 49};
50 50
51static int 51int
52nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 52nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
53 struct nouveau_oclass *oclass, void *data, u32 index, 53 struct nouveau_oclass *oclass, void *data, u32 index,
54 struct nouveau_object **pobject) 54 struct nouveau_object **pobject)
@@ -66,10 +66,6 @@ nvd0_i2c_port_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
66 66
67 port->state = 0x00000007; 67 port->state = 0x00000007;
68 port->addr = 0x00d014 + (info->drive * 0x20); 68 port->addr = 0x00d014 + (info->drive * 0x20);
69 if (info->share != DCB_I2C_UNUSED) {
70 port->ctrl = 0x00e500 + (info->share * 0x50);
71 port->data = 0x0000e001;
72 }
73 return 0; 69 return 0;
74} 70}
75 71
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
index cae77e1ad8dc..25fe5c2d110e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/nve0.c
@@ -24,7 +24,7 @@
24 24
25#include "nv50.h" 25#include "nv50.h"
26 26
27static void 27void
28nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) 28nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
29{ 29{
30 u32 intr = nv_rd32(i2c, 0x00dc60); 30 u32 intr = nv_rd32(i2c, 0x00dc60);
@@ -38,7 +38,7 @@ nve0_aux_stat(struct nouveau_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx)
38 nv_wr32(i2c, 0x00dc60, intr); 38 nv_wr32(i2c, 0x00dc60, intr);
39} 39}
40 40
41static void 41void
42nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data) 42nve0_aux_mask(struct nouveau_i2c *i2c, u32 type, u32 mask, u32 data)
43{ 43{
44 u32 temp = nv_rd32(i2c, 0x00dc68), i; 44 u32 temp = nv_rd32(i2c, 0x00dc68), i;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
new file mode 100644
index 000000000000..f0e6fbbaa8cd
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/padgm204.c
@@ -0,0 +1,86 @@
1/*
2 * Copyright 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Ben Skeggs
23 */
24
25#include "pad.h"
26
27struct gm204_i2c_pad {
28 struct nvkm_i2c_pad base;
29 int addr;
30};
31
32static int
33gm204_i2c_pad_fini(struct nouveau_object *object, bool suspend)
34{
35 struct nouveau_i2c *i2c = (void *)object->engine;
36 struct gm204_i2c_pad *pad = (void *)object;
37 nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000001);
38 return nvkm_i2c_pad_fini(&pad->base, suspend);
39}
40
41static int
42gm204_i2c_pad_init(struct nouveau_object *object)
43{
44 struct nouveau_i2c *i2c = (void *)object->engine;
45 struct gm204_i2c_pad *pad = (void *)object;
46
47 switch (nv_oclass(pad->base.next)->handle) {
48 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_AUX):
49 nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x00000002);
50 break;
51 case NV_I2C_TYPE_DCBI2C(DCB_I2C_NVIO_BIT):
52 default:
53 nv_mask(i2c, 0x00d970 + pad->addr, 0x0000c003, 0x0000c001);
54 break;
55 }
56
57 nv_mask(i2c, 0x00d97c + pad->addr, 0x00000001, 0x00000000);
58 return nvkm_i2c_pad_init(&pad->base);
59}
60
61static int
62gm204_i2c_pad_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
63 struct nouveau_oclass *oclass, void *data, u32 index,
64 struct nouveau_object **pobject)
65{
66 struct gm204_i2c_pad *pad;
67 int ret;
68
69 ret = nvkm_i2c_pad_create(parent, engine, oclass, index, &pad);
70 *pobject = nv_object(pad);
71 if (ret)
72 return ret;
73
74 pad->addr = index * 0x50;;
75 return 0;
76}
77
78struct nouveau_oclass
79gm204_i2c_pad_oclass = {
80 .ofuncs = &(struct nouveau_ofuncs) {
81 .ctor = gm204_i2c_pad_ctor,
82 .dtor = _nvkm_i2c_pad_dtor,
83 .init = gm204_i2c_pad_init,
84 .fini = gm204_i2c_pad_fini,
85 },
86};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
index 780090b6425a..4fe7ae3fde4e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/i2c/priv.h
@@ -5,6 +5,7 @@
5 5
6extern struct nouveau_oclass nv04_i2c_pad_oclass; 6extern struct nouveau_oclass nv04_i2c_pad_oclass;
7extern struct nouveau_oclass nv94_i2c_pad_oclass; 7extern struct nouveau_oclass nv94_i2c_pad_oclass;
8extern struct nouveau_oclass gm204_i2c_pad_oclass;
8 9
9#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \ 10#define nouveau_i2c_port_create(p,e,o,i,a,f,d) \
10 nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \ 11 nouveau_i2c_port_create_((p), (e), (o), (i), (a), (f), \
@@ -82,4 +83,7 @@ struct nouveau_i2c_impl {
82void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *); 83void nv94_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
83void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32); 84void nv94_aux_mask(struct nouveau_i2c *, u32, u32, u32);
84 85
86void nve0_aux_stat(struct nouveau_i2c *, u32 *, u32 *, u32 *, u32 *);
87void nve0_aux_mask(struct nouveau_i2c *, u32, u32, u32);
88
85#endif 89#endif
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
index e89789a53b80..ec03f9a4290b 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/memx.fuc
@@ -50,6 +50,7 @@ handler(WR32 , 0x0000, 0x0002, #memx_func_wr32)
50handler(WAIT , 0x0004, 0x0000, #memx_func_wait) 50handler(WAIT , 0x0004, 0x0000, #memx_func_wait)
51handler(DELAY , 0x0001, 0x0000, #memx_func_delay) 51handler(DELAY , 0x0001, 0x0000, #memx_func_delay)
52handler(VBLANK, 0x0001, 0x0000, #memx_func_wait_vblank) 52handler(VBLANK, 0x0001, 0x0000, #memx_func_wait_vblank)
53handler(TRAIN , 0x0000, 0x0000, #memx_func_train)
53memx_func_tail: 54memx_func_tail:
54 55
55.equ #memx_func_size #memx_func_next - #memx_func_head 56.equ #memx_func_size #memx_func_next - #memx_func_head
@@ -63,6 +64,10 @@ memx_ts_end:
63memx_data_head: 64memx_data_head:
64.skip 0x0800 65.skip 0x0800
65memx_data_tail: 66memx_data_tail:
67
68memx_train_head:
69.skip 0x0100
70memx_train_tail:
66#endif 71#endif
67 72
68/****************************************************************************** 73/******************************************************************************
@@ -260,6 +265,101 @@ memx_func_delay:
260// description 265// description
261// 266//
262// $r15 - current (memx) 267// $r15 - current (memx)
268// $r4 - packet length
269// $r3 - opcode desciption
270// $r0 - zero
271memx_func_train:
272#if NVKM_PPWR_CHIPSET == GT215
273// $r5 - outer loop counter
274// $r6 - inner loop counter
275// $r7 - entry counter (#memx_train_head + $r7)
276 movw $r5 0x3
277 movw $r7 0x0
278
279// Read random memory to wake up... things
280 imm32($r9, 0x700000)
281 nv_rd32($r8,$r9)
282 movw $r14 0x2710
283 call(nsec)
284
285 memx_func_train_loop_outer:
286 mulu $r8 $r5 0x101
287 sethi $r8 0x02000000
288 imm32($r9, 0x1111e0)
289 nv_wr32($r9, $r8)
290 push $r5
291
292 movw $r6 0x0
293 memx_func_train_loop_inner:
294 movw $r8 0x1111
295 mulu $r9 $r6 $r8
296 shl b32 $r8 $r9 0x10
297 or $r8 $r9
298 imm32($r9, 0x100720)
299 nv_wr32($r9, $r8)
300
301 imm32($r9, 0x100080)
302 nv_rd32($r8, $r9)
303 or $r8 $r8 0x20
304 nv_wr32($r9, $r8)
305
306 imm32($r9, 0x10053c)
307 imm32($r8, 0x80003002)
308 nv_wr32($r9, $r8)
309
310 imm32($r14, 0x100560)
311 imm32($r13, 0x80000000)
312 add b32 $r12 $r13 0
313 imm32($r11, 0x001e8480)
314 call(wait)
315
316 // $r5 - inner inner loop counter
317 // $r9 - result
318 movw $r5 0
319 imm32($r9, 0x8300ffff)
320 memx_func_train_loop_4x:
321 imm32($r10, 0x100080)
322 nv_rd32($r8, $r10)
323 imm32($r11, 0xffffffdf)
324 and $r8 $r11
325 nv_wr32($r10, $r8)
326
327 imm32($r10, 0x10053c)
328 imm32($r8, 0x80003002)
329 nv_wr32($r10, $r8)
330
331 imm32($r14, 0x100560)
332 imm32($r13, 0x80000000)
333 mov b32 $r12 $r13
334 imm32($r11, 0x00002710)
335 call(wait)
336
337 nv_rd32($r13, $r14)
338 and $r9 $r9 $r13
339
340 add b32 $r5 1
341 cmp b16 $r5 0x4
342 bra l #memx_func_train_loop_4x
343
344 add b32 $r10 $r7 #memx_train_head
345 st b32 D[$r10 + 0] $r9
346 add b32 $r6 1
347 add b32 $r7 4
348
349 cmp b16 $r6 0x10
350 bra l #memx_func_train_loop_inner
351
352 pop $r5
353 add b32 $r5 1
354 cmp b16 $r5 7
355 bra l #memx_func_train_loop_outer
356
357#endif
358 ret
359
360// description
361//
362// $r15 - current (memx)
263// $r14 - sender process name 363// $r14 - sender process name
264// $r13 - message (exec) 364// $r13 - message (exec)
265// $r12 - head of script 365// $r12 - head of script
@@ -307,8 +407,19 @@ memx_exec:
307// $r11 - data1 407// $r11 - data1
308// $r0 - zero 408// $r0 - zero
309memx_info: 409memx_info:
410 cmp b16 $r12 0x1
411 bra e #memx_info_train
412
413 memx_info_data:
310 mov $r12 #memx_data_head 414 mov $r12 #memx_data_head
311 mov $r11 #memx_data_tail - #memx_data_head 415 mov $r11 #memx_data_tail - #memx_data_head
416 bra #memx_info_send
417
418 memx_info_train:
419 mov $r12 #memx_train_head
420 mov $r11 #memx_train_tail - #memx_train_head
421
422 memx_info_send:
312 call(send) 423 call(send)
313 ret 424 ret
314 425
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
index 4d278a96b2bb..713e11e2953d 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
@@ -46,8 +46,8 @@ uint32_t nv108_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000061c, 49 0x0000062d,
50 0x0000060e, 50 0x0000061f,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nv108_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x00000620, 71 0x00000631,
72 0x0000061e, 72 0x0000062f,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nv108_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000a24, 93 0x00000a35,
94 0x000008cb, 94 0x000008dc,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nv108_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000a45, 115 0x00000a56,
116 0x00000a26, 116 0x00000a37,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nv108_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000a50, 137 0x00000a61,
138 0x00000a4e, 138 0x00000a5f,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -246,13 +246,15 @@ uint32_t nv108_pwr_data[] = {
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x0000057b, 248 0x0000057b,
249/* 0x03b8: memx_func_tail */ 249 0x00000007,
250/* 0x03b8: memx_ts_start */
251 0x00000000, 250 0x00000000,
252/* 0x03bc: memx_ts_end */ 251 0x000005c3,
252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */
253 0x00000000, 254 0x00000000,
254/* 0x03c0: memx_data_head */ 255/* 0x03c8: memx_ts_end */
255 0x00000000, 256 0x00000000,
257/* 0x03cc: memx_data_head */
256 0x00000000, 258 0x00000000,
257 0x00000000, 259 0x00000000,
258 0x00000000, 260 0x00000000,
@@ -764,8 +766,75 @@ uint32_t nv108_pwr_data[] = {
764 0x00000000, 766 0x00000000,
765 0x00000000, 767 0x00000000,
766 0x00000000, 768 0x00000000,
767/* 0x0bc0: memx_data_tail */ 769 0x00000000,
768/* 0x0bc0: i2c_scl_map */ 770/* 0x0bcc: memx_data_tail */
771/* 0x0bcc: memx_train_head */
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782 0x00000000,
783 0x00000000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836/* 0x0ccc: memx_train_tail */
837/* 0x0ccc: i2c_scl_map */
769 0x00000400, 838 0x00000400,
770 0x00000800, 839 0x00000800,
771 0x00001000, 840 0x00001000,
@@ -776,7 +845,7 @@ uint32_t nv108_pwr_data[] = {
776 0x00020000, 845 0x00020000,
777 0x00040000, 846 0x00040000,
778 0x00080000, 847 0x00080000,
779/* 0x0be8: i2c_sda_map */ 848/* 0x0cf4: i2c_sda_map */
780 0x00100000, 849 0x00100000,
781 0x00200000, 850 0x00200000,
782 0x00400000, 851 0x00400000,
@@ -844,9 +913,6 @@ uint32_t nv108_pwr_data[] = {
844 0x00000000, 913 0x00000000,
845 0x00000000, 914 0x00000000,
846 0x00000000, 915 0x00000000,
847 0x00000000,
848 0x00000000,
849 0x00000000,
850}; 916};
851 917
852uint32_t nv108_pwr_code[] = { 918uint32_t nv108_pwr_code[] = {
@@ -1215,10 +1281,10 @@ uint32_t nv108_pwr_code[] = {
1215 0xf40464f0, 1281 0xf40464f0,
1216 0x2c06f70b, 1282 0x2c06f70b,
1217 0xb50066cf, 1283 0xb50066cf,
1218 0x00f8ee06, 1284 0x00f8f106,
1219/* 0x0500: memx_func_leave */ 1285/* 0x0500: memx_func_leave */
1220 0x66cf2c06, 1286 0x66cf2c06,
1221 0xef06b500, 1287 0xf206b500,
1222 0xe4400406, 1288 0xe4400406,
1223 0x0006f607, 1289 0x0006f607,
1224/* 0x0512: memx_func_leave_wait */ 1290/* 0x0512: memx_func_leave_wait */
@@ -1270,370 +1336,374 @@ uint32_t nv108_pwr_code[] = {
1270 0x9800f800, 1336 0x9800f800,
1271 0x10b6001e, 1337 0x10b6001e,
1272 0x005d7e04, 1338 0x005d7e04,
1273/* 0x05c3: memx_exec */ 1339/* 0x05c3: memx_func_train */
1274 0xf900f800, 1340 0xf800f800,
1275 0xb2d0f9e0, 1341/* 0x05c5: memx_exec */
1276/* 0x05cb: memx_exec_next */ 1342 0xf9e0f900,
1277 0x98b2b2c1, 1343 0xb2c1b2d0,
1278 0x10b60013, 1344/* 0x05cd: memx_exec_next */
1279 0xf034e704, 1345 0x001398b2,
1280 0xe033e701, 1346 0xe70410b6,
1281 0x0132b601, 1347 0xe701f034,
1282 0x980c30f0, 1348 0xb601e033,
1283 0x55f9de35, 1349 0x30f00132,
1284 0x1ef412a6, 1350 0xde35980c,
1285 0xee0b98e5, 1351 0x12a655f9,
1286 0xbbef0c98, 1352 0x98e51ef4,
1287 0xc44b02cb, 1353 0x0c98f10b,
1288 0x00bbcf07, 1354 0x02cbbbf2,
1289 0xe0fcd0fc, 1355 0xcf07c44b,
1290 0x0002c27e, 1356 0xd0fc00bb,
1291/* 0x0602: memx_info */ 1357 0xc27ee0fc,
1292 0xc04c00f8, 1358 0x00f80002,
1359/* 0x0604: memx_info */
1360 0xf401c670,
1361/* 0x060a: memx_info_data */
1362 0xcc4c0c0b,
1293 0x08004b03, 1363 0x08004b03,
1294 0x0002c27e, 1364/* 0x0613: memx_info_train */
1295/* 0x060e: memx_recv */ 1365 0x4c090ef4,
1296 0xd6b000f8, 1366 0x004b0bcc,
1297 0xb20bf401, 1367/* 0x0619: memx_info_send */
1298 0xf400d6b0, 1368 0x02c27e01,
1299 0x00f8eb0b, 1369/* 0x061f: memx_recv */
1300/* 0x061c: memx_init */ 1370 0xb000f800,
1301/* 0x061e: perf_recv */ 1371 0x0bf401d6,
1302 0x00f800f8, 1372 0x00d6b0a3,
1303/* 0x0620: perf_init */ 1373 0xf8dc0bf4,
1304/* 0x0622: i2c_drive_scl */ 1374/* 0x062d: memx_init */
1305 0x36b000f8, 1375/* 0x062f: perf_recv */
1306 0x0d0bf400, 1376 0xf800f800,
1307 0xf607e040, 1377/* 0x0631: perf_init */
1308 0x04bd0001, 1378/* 0x0633: i2c_drive_scl */
1309/* 0x0632: i2c_drive_scl_lo */ 1379 0xb000f800,
1310 0xe44000f8, 1380 0x0bf40036,
1311 0x0001f607, 1381 0x07e0400d,
1312 0x00f804bd, 1382 0xbd0001f6,
1313/* 0x063c: i2c_drive_sda */ 1383/* 0x0643: i2c_drive_scl_lo */
1314 0xf40036b0, 1384 0x4000f804,
1315 0xe0400d0b, 1385 0x01f607e4,
1316 0x0002f607, 1386 0xf804bd00,
1317 0x00f804bd, 1387/* 0x064d: i2c_drive_sda */
1318/* 0x064c: i2c_drive_sda_lo */ 1388 0x0036b000,
1319 0xf607e440, 1389 0x400d0bf4,
1320 0x04bd0002, 1390 0x02f607e0,
1321/* 0x0656: i2c_sense_scl */ 1391 0xf804bd00,
1322 0x32f400f8, 1392/* 0x065d: i2c_drive_sda_lo */
1323 0x07c44301, 1393 0x07e44000,
1324 0xfd0033cf, 1394 0xbd0002f6,
1325 0x0bf40431, 1395/* 0x0667: i2c_sense_scl */
1326 0x0131f406, 1396 0xf400f804,
1327/* 0x0668: i2c_sense_scl_done */ 1397 0xc4430132,
1328/* 0x066a: i2c_sense_sda */ 1398 0x0033cf07,
1329 0x32f400f8, 1399 0xf40431fd,
1330 0x07c44301, 1400 0x31f4060b,
1331 0xfd0033cf, 1401/* 0x0679: i2c_sense_scl_done */
1332 0x0bf40432, 1402/* 0x067b: i2c_sense_sda */
1333 0x0131f406, 1403 0xf400f801,
1334/* 0x067c: i2c_sense_sda_done */ 1404 0xc4430132,
1335/* 0x067e: i2c_raise_scl */ 1405 0x0033cf07,
1336 0x40f900f8, 1406 0xf40432fd,
1337 0x03089844, 1407 0x31f4060b,
1338 0x06227e01, 1408/* 0x068d: i2c_sense_sda_done */
1339/* 0x0689: i2c_raise_scl_wait */ 1409/* 0x068f: i2c_raise_scl */
1340 0x03e84e00, 1410 0xf900f801,
1341 0x00005d7e, 1411 0x08984440,
1342 0x0006567e, 1412 0x337e0103,
1343 0xb60901f4, 1413/* 0x069a: i2c_raise_scl_wait */
1344 0x1bf40142, 1414 0xe84e0006,
1345/* 0x069d: i2c_raise_scl_done */ 1415 0x005d7e03,
1346 0xf840fcef, 1416 0x06677e00,
1347/* 0x06a1: i2c_start */ 1417 0x0901f400,
1348 0x06567e00, 1418 0xf40142b6,
1349 0x0d11f400, 1419/* 0x06ae: i2c_raise_scl_done */
1350 0x00066a7e, 1420 0x40fcef1b,
1351 0xf40611f4, 1421/* 0x06b2: i2c_start */
1352/* 0x06b2: i2c_start_rep */ 1422 0x677e00f8,
1353 0x00032e0e, 1423 0x11f40006,
1354 0x0006227e, 1424 0x067b7e0d,
1355 0x3c7e0103, 1425 0x0611f400,
1356 0x76bb0006, 1426/* 0x06c3: i2c_start_rep */
1357 0x0465b600, 1427 0x032e0ef4,
1358 0x659450f9, 1428 0x06337e00,
1359 0x0256bb04,
1360 0x75fd50bd,
1361 0x7e50fc04,
1362 0xb600067e,
1363 0x11f40464,
1364/* 0x06dd: i2c_start_send */
1365 0x7e00031d,
1366 0x4e00063c,
1367 0x5d7e1388,
1368 0x00030000,
1369 0x0006227e,
1370 0x7e13884e,
1371/* 0x06f7: i2c_start_out */
1372 0xf800005d,
1373/* 0x06f9: i2c_stop */
1374 0x7e000300,
1375 0x03000622,
1376 0x063c7e00,
1377 0x03e84e00,
1378 0x00005d7e,
1379 0x227e0103,
1380 0x884e0006,
1381 0x005d7e13,
1382 0x7e010300, 1429 0x7e010300,
1383 0x4e00063c, 1430 0xbb00064d,
1384 0x5d7e1388,
1385 0x00f80000,
1386/* 0x0728: i2c_bitw */
1387 0x00063c7e,
1388 0x7e03e84e,
1389 0xbb00005d,
1390 0x65b60076, 1431 0x65b60076,
1391 0x9450f904, 1432 0x9450f904,
1392 0x56bb0465, 1433 0x56bb0465,
1393 0xfd50bd02, 1434 0xfd50bd02,
1394 0x50fc0475, 1435 0x50fc0475,
1395 0x00067e7e, 1436 0x00068f7e,
1396 0xf40464b6, 1437 0xf40464b6,
1397 0x884e1711, 1438/* 0x06ee: i2c_start_send */
1398 0x005d7e13, 1439 0x00031d11,
1399 0x7e000300, 1440 0x00064d7e,
1400 0x4e000622, 1441 0x7e13884e,
1401 0x5d7e1388, 1442 0x0300005d,
1402/* 0x0766: i2c_bitw_out */ 1443 0x06337e00,
1403 0x00f80000, 1444 0x13884e00,
1404/* 0x0768: i2c_bitr */ 1445 0x00005d7e,
1405 0x3c7e0103, 1446/* 0x0708: i2c_start_out */
1447/* 0x070a: i2c_stop */
1448 0x000300f8,
1449 0x0006337e,
1450 0x4d7e0003,
1406 0xe84e0006, 1451 0xe84e0006,
1407 0x005d7e03, 1452 0x005d7e03,
1408 0x0076bb00, 1453 0x7e010300,
1409 0xf90465b6, 1454 0x4e000633,
1410 0x04659450, 1455 0x5d7e1388,
1411 0xbd0256bb, 1456 0x01030000,
1412 0x0475fd50, 1457 0x00064d7e,
1413 0x7e7e50fc, 1458 0x7e13884e,
1414 0x64b60006, 1459 0xf800005d,
1415 0x1a11f404, 1460/* 0x0739: i2c_bitw */
1416 0x00066a7e, 1461 0x064d7e00,
1417 0x227e0003, 1462 0x03e84e00,
1418 0x884e0006, 1463 0x00005d7e,
1419 0x005d7e13,
1420 0x013cf000,
1421/* 0x07ab: i2c_bitr_done */
1422 0xf80131f4,
1423/* 0x07ad: i2c_get_byte */
1424 0x04000500,
1425/* 0x07b1: i2c_get_byte_next */
1426 0x0154b608,
1427 0xb60076bb, 1464 0xb60076bb,
1428 0x50f90465, 1465 0x50f90465,
1429 0xbb046594, 1466 0xbb046594,
1430 0x50bd0256, 1467 0x50bd0256,
1431 0xfc0475fd, 1468 0xfc0475fd,
1432 0x07687e50, 1469 0x068f7e50,
1433 0x0464b600, 1470 0x0464b600,
1434 0xfd2a11f4, 1471 0x4e1711f4,
1435 0x42b60553, 1472 0x5d7e1388,
1436 0xd81bf401, 1473 0x00030000,
1437 0x76bb0103, 1474 0x0006337e,
1475 0x7e13884e,
1476/* 0x0777: i2c_bitw_out */
1477 0xf800005d,
1478/* 0x0779: i2c_bitr */
1479 0x7e010300,
1480 0x4e00064d,
1481 0x5d7e03e8,
1482 0x76bb0000,
1438 0x0465b600, 1483 0x0465b600,
1439 0x659450f9, 1484 0x659450f9,
1440 0x0256bb04, 1485 0x0256bb04,
1441 0x75fd50bd, 1486 0x75fd50bd,
1442 0x7e50fc04, 1487 0x7e50fc04,
1443 0xb6000728, 1488 0xb600068f,
1444/* 0x07fa: i2c_get_byte_done */ 1489 0x11f40464,
1445 0x00f80464, 1490 0x067b7e1a,
1446/* 0x07fc: i2c_put_byte */ 1491 0x7e000300,
1447/* 0x07fe: i2c_put_byte_next */ 1492 0x4e000633,
1448 0x42b60804, 1493 0x5d7e1388,
1449 0x3854ff01, 1494 0x3cf00000,
1450 0xb60076bb, 1495 0x0131f401,
1451 0x50f90465, 1496/* 0x07bc: i2c_bitr_done */
1452 0xbb046594, 1497/* 0x07be: i2c_get_byte */
1453 0x50bd0256, 1498 0x000500f8,
1454 0xfc0475fd, 1499/* 0x07c2: i2c_get_byte_next */
1455 0x07287e50, 1500 0x54b60804,
1456 0x0464b600, 1501 0x0076bb01,
1457 0xb03411f4, 1502 0xf90465b6,
1458 0x1bf40046, 1503 0x04659450,
1459 0x0076bbd8, 1504 0xbd0256bb,
1505 0x0475fd50,
1506 0x797e50fc,
1507 0x64b60007,
1508 0x2a11f404,
1509 0xb60553fd,
1510 0x1bf40142,
1511 0xbb0103d8,
1512 0x65b60076,
1513 0x9450f904,
1514 0x56bb0465,
1515 0xfd50bd02,
1516 0x50fc0475,
1517 0x0007397e,
1518/* 0x080b: i2c_get_byte_done */
1519 0xf80464b6,
1520/* 0x080d: i2c_put_byte */
1521/* 0x080f: i2c_put_byte_next */
1522 0xb6080400,
1523 0x54ff0142,
1524 0x0076bb38,
1460 0xf90465b6, 1525 0xf90465b6,
1461 0x04659450, 1526 0x04659450,
1462 0xbd0256bb, 1527 0xbd0256bb,
1463 0x0475fd50, 1528 0x0475fd50,
1464 0x687e50fc, 1529 0x397e50fc,
1465 0x64b60007, 1530 0x64b60007,
1466 0x0f11f404, 1531 0x3411f404,
1467 0xb00076bb, 1532 0xf40046b0,
1468 0x1bf40136, 1533 0x76bbd81b,
1469 0x0132f406,
1470/* 0x0854: i2c_put_byte_done */
1471/* 0x0856: i2c_addr */
1472 0x76bb00f8,
1473 0x0465b600, 1534 0x0465b600,
1474 0x659450f9, 1535 0x659450f9,
1475 0x0256bb04, 1536 0x0256bb04,
1476 0x75fd50bd, 1537 0x75fd50bd,
1477 0x7e50fc04, 1538 0x7e50fc04,
1478 0xb60006a1, 1539 0xb6000779,
1479 0x11f40464, 1540 0x11f40464,
1480 0x2ec3e729, 1541 0x0076bb0f,
1481 0x0134b601, 1542 0xf40136b0,
1482 0xbb0553fd, 1543 0x32f4061b,
1544/* 0x0865: i2c_put_byte_done */
1545/* 0x0867: i2c_addr */
1546 0xbb00f801,
1483 0x65b60076, 1547 0x65b60076,
1484 0x9450f904, 1548 0x9450f904,
1485 0x56bb0465, 1549 0x56bb0465,
1486 0xfd50bd02, 1550 0xfd50bd02,
1487 0x50fc0475, 1551 0x50fc0475,
1488 0x0007fc7e, 1552 0x0006b27e,
1489/* 0x089b: i2c_addr_done */ 1553 0xf40464b6,
1490 0xf80464b6, 1554 0xc3e72911,
1491/* 0x089d: i2c_acquire_addr */ 1555 0x34b6012e,
1492 0xf8cec700, 1556 0x0553fd01,
1493 0xb705e4b6, 1557 0xb60076bb,
1494 0xf8d014e0, 1558 0x50f90465,
1495/* 0x08a9: i2c_acquire */ 1559 0xbb046594,
1496 0x089d7e00, 1560 0x50bd0256,
1497 0x00047e00, 1561 0xfc0475fd,
1498 0x03d9f000, 1562 0x080d7e50,
1499 0x00002e7e, 1563 0x0464b600,
1500/* 0x08ba: i2c_release */ 1564/* 0x08ac: i2c_addr_done */
1501 0x9d7e00f8, 1565/* 0x08ae: i2c_acquire_addr */
1566 0xcec700f8,
1567 0x05e4b6f8,
1568 0xd014e0b7,
1569/* 0x08ba: i2c_acquire */
1570 0xae7e00f8,
1502 0x047e0008, 1571 0x047e0008,
1503 0xdaf00000, 1572 0xd9f00000,
1504 0x002e7e03, 1573 0x002e7e03,
1505/* 0x08cb: i2c_recv */ 1574/* 0x08cb: i2c_release */
1506 0xf400f800, 1575 0x7e00f800,
1507 0xc1c70132, 1576 0x7e0008ae,
1508 0x0214b6f8, 1577 0xf0000004,
1509 0xf52816b0, 1578 0x2e7e03da,
1510 0xb801371f, 1579 0x00f80000,
1511 0x000be813, 1580/* 0x08dc: i2c_recv */
1512 0xb8003298, 1581 0xc70132f4,
1513 0x000bc013, 1582 0x14b6f8c1,
1514 0xf4003198, 1583 0x2816b002,
1515 0xd0f90231, 1584 0x01371ff5,
1516 0xd0f9e0f9, 1585 0x0cf413b8,
1517 0x000067f1, 1586 0x00329800,
1518 0x100063f1, 1587 0x0ccc13b8,
1519 0xbb016792, 1588 0x00319800,
1589 0xf90231f4,
1590 0xf9e0f9d0,
1591 0x0067f1d0,
1592 0x0063f100,
1593 0x01679210,
1594 0xb60076bb,
1595 0x50f90465,
1596 0xbb046594,
1597 0x50bd0256,
1598 0xfc0475fd,
1599 0x08ba7e50,
1600 0x0464b600,
1601 0xd6b0d0fc,
1602 0xb01bf500,
1603 0xbb000500,
1520 0x65b60076, 1604 0x65b60076,
1521 0x9450f904, 1605 0x9450f904,
1522 0x56bb0465, 1606 0x56bb0465,
1523 0xfd50bd02, 1607 0xfd50bd02,
1524 0x50fc0475, 1608 0x50fc0475,
1525 0x0008a97e, 1609 0x0008677e,
1526 0xfc0464b6, 1610 0xf50464b6,
1527 0x00d6b0d0, 1611 0xc700cc11,
1528 0x00b01bf5, 1612 0x76bbe0c5,
1529 0x76bb0005,
1530 0x0465b600, 1613 0x0465b600,
1531 0x659450f9, 1614 0x659450f9,
1532 0x0256bb04, 1615 0x0256bb04,
1533 0x75fd50bd, 1616 0x75fd50bd,
1534 0x7e50fc04, 1617 0x7e50fc04,
1535 0xb6000856, 1618 0xb600080d,
1536 0x11f50464, 1619 0x11f50464,
1537 0xc5c700cc, 1620 0x010500a9,
1538 0x0076bbe0, 1621 0xb60076bb,
1539 0xf90465b6, 1622 0x50f90465,
1540 0x04659450, 1623 0xbb046594,
1541 0xbd0256bb, 1624 0x50bd0256,
1542 0x0475fd50, 1625 0xfc0475fd,
1543 0xfc7e50fc, 1626 0x08677e50,
1544 0x64b60007, 1627 0x0464b600,
1545 0xa911f504, 1628 0x008711f5,
1546 0xbb010500, 1629 0xb60076bb,
1547 0x65b60076, 1630 0x50f90465,
1548 0x9450f904, 1631 0xbb046594,
1549 0x56bb0465, 1632 0x50bd0256,
1550 0xfd50bd02, 1633 0xfc0475fd,
1551 0x50fc0475, 1634 0x07be7e50,
1552 0x0008567e, 1635 0x0464b600,
1553 0xf50464b6, 1636 0xcb6711f4,
1554 0xbb008711, 1637 0x76bbe05b,
1555 0x65b60076, 1638 0x0465b600,
1556 0x9450f904, 1639 0x659450f9,
1557 0x56bb0465, 1640 0x0256bb04,
1558 0xfd50bd02, 1641 0x75fd50bd,
1559 0x50fc0475, 1642 0x7e50fc04,
1560 0x0007ad7e, 1643 0xb600070a,
1561 0xf40464b6, 1644 0x5bb20464,
1562 0x5bcb6711, 1645 0x0ef474bd,
1563 0x0076bbe0, 1646/* 0x09e1: i2c_recv_not_rd08 */
1564 0xf90465b6, 1647 0x01d6b041,
1565 0x04659450, 1648 0x053b1bf4,
1566 0xbd0256bb, 1649 0x08677e00,
1567 0x0475fd50, 1650 0x3211f400,
1568 0xf97e50fc, 1651 0x7ee0c5c7,
1569 0x64b60006, 1652 0xf400080d,
1570 0xbd5bb204, 1653 0x00052811,
1571 0x410ef474, 1654 0x0008677e,
1572/* 0x09d0: i2c_recv_not_rd08 */ 1655 0xc71f11f4,
1573 0xf401d6b0, 1656 0x0d7ee0b5,
1574 0x00053b1b, 1657 0x11f40008,
1575 0x0008567e, 1658 0x070a7e15,
1576 0xc73211f4, 1659 0xc774bd00,
1577 0xfc7ee0c5, 1660 0x1bf408c5,
1578 0x11f40007, 1661 0x0232f409,
1579 0x7e000528, 1662/* 0x0a1f: i2c_recv_not_wr08 */
1580 0xf4000856, 1663/* 0x0a1f: i2c_recv_done */
1581 0xb5c71f11, 1664 0xc7030ef4,
1582 0x07fc7ee0, 1665 0xcb7ef8ce,
1583 0x1511f400, 1666 0xe0fc0008,
1584 0x0006f97e, 1667 0x12f4d0fc,
1585 0xc5c774bd, 1668 0x7e7cb209,
1586 0x091bf408, 1669/* 0x0a33: i2c_recv_exit */
1587 0xf40232f4, 1670 0xf80002c2,
1588/* 0x0a0e: i2c_recv_not_wr08 */ 1671/* 0x0a35: i2c_init */
1589/* 0x0a0e: i2c_recv_done */ 1672/* 0x0a37: test_recv */
1590 0xcec7030e, 1673 0x4100f800,
1591 0x08ba7ef8, 1674 0x11cf0458,
1592 0xfce0fc00,
1593 0x0912f4d0,
1594 0xc27e7cb2,
1595/* 0x0a22: i2c_recv_exit */
1596 0x00f80002,
1597/* 0x0a24: i2c_init */
1598/* 0x0a26: test_recv */
1599 0x584100f8,
1600 0x0011cf04,
1601 0x400110b6,
1602 0x01f60458,
1603 0xf104bd00,
1604 0xf1d900e7,
1605 0x7e134fe3,
1606 0xf8000201,
1607/* 0x0a45: test_init */
1608 0x08004e00,
1609 0x0002017e,
1610/* 0x0a4e: idle_recv */
1611 0x00f800f8,
1612/* 0x0a50: idle */
1613 0x410031f4,
1614 0x11cf0454,
1615 0x0110b600, 1675 0x0110b600,
1616 0xf6045440, 1676 0xf6045840,
1617 0x04bd0001, 1677 0x04bd0001,
1618/* 0x0a64: idle_loop */ 1678 0xd900e7f1,
1619 0x32f45801, 1679 0x134fe3f1,
1620/* 0x0a69: idle_proc */ 1680 0x0002017e,
1621/* 0x0a69: idle_proc_exec */ 1681/* 0x0a56: test_init */
1622 0xb210f902, 1682 0x004e00f8,
1623 0x02cb7e1e, 1683 0x02017e08,
1624 0xf410fc00, 1684/* 0x0a5f: idle_recv */
1625 0x31f40911, 1685 0xf800f800,
1626 0xf00ef402, 1686/* 0x0a61: idle */
1627/* 0x0a7c: idle_proc_next */ 1687 0x0031f400,
1628 0xa65810b6, 1688 0xcf045441,
1629 0xe81bf41f, 1689 0x10b60011,
1630 0xf4e002f4, 1690 0x04544001,
1631 0x0ef40028, 1691 0xbd0001f6,
1632 0x000000c6, 1692/* 0x0a75: idle_loop */
1633 0x00000000, 1693 0xf4580104,
1634 0x00000000, 1694/* 0x0a7a: idle_proc */
1635 0x00000000, 1695/* 0x0a7a: idle_proc_exec */
1636 0x00000000, 1696 0x10f90232,
1697 0xcb7e1eb2,
1698 0x10fc0002,
1699 0xf40911f4,
1700 0x0ef40231,
1701/* 0x0a8d: idle_proc_next */
1702 0x5810b6f0,
1703 0x1bf41fa6,
1704 0xe002f4e8,
1705 0xf40028f4,
1706 0x0000c60e,
1637 0x00000000, 1707 0x00000000,
1638 0x00000000, 1708 0x00000000,
1639 0x00000000, 1709 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
index 64e97baabc3c..d1f9b6cb66d7 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
@@ -46,8 +46,8 @@ uint32_t nva3_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x000006e0, 49 0x00000842,
50 0x000006d2, 50 0x00000834,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nva3_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x000006e4, 71 0x00000846,
72 0x000006e2, 72 0x00000844,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nva3_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000b14, 93 0x00000c76,
94 0x000009b7, 94 0x00000b19,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nva3_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000b3d, 115 0x00000c9f,
116 0x00000b16, 116 0x00000c78,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nva3_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000b49, 137 0x00000cab,
138 0x00000b47, 138 0x00000ca9,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -246,13 +246,15 @@ uint32_t nva3_pwr_data[] = {
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x000005f8, 248 0x000005f8,
249/* 0x03b8: memx_func_tail */ 249 0x00000007,
250/* 0x03b8: memx_ts_start */
251 0x00000000, 250 0x00000000,
252/* 0x03bc: memx_ts_end */ 251 0x0000067e,
252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */
253 0x00000000, 254 0x00000000,
254/* 0x03c0: memx_data_head */ 255/* 0x03c8: memx_ts_end */
255 0x00000000, 256 0x00000000,
257/* 0x03cc: memx_data_head */
256 0x00000000, 258 0x00000000,
257 0x00000000, 259 0x00000000,
258 0x00000000, 260 0x00000000,
@@ -764,8 +766,75 @@ uint32_t nva3_pwr_data[] = {
764 0x00000000, 766 0x00000000,
765 0x00000000, 767 0x00000000,
766 0x00000000, 768 0x00000000,
767/* 0x0bc0: memx_data_tail */ 769 0x00000000,
768/* 0x0bc0: i2c_scl_map */ 770/* 0x0bcc: memx_data_tail */
771/* 0x0bcc: memx_train_head */
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782 0x00000000,
783 0x00000000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836/* 0x0ccc: memx_train_tail */
837/* 0x0ccc: i2c_scl_map */
769 0x00001000, 838 0x00001000,
770 0x00004000, 839 0x00004000,
771 0x00010000, 840 0x00010000,
@@ -776,7 +845,7 @@ uint32_t nva3_pwr_data[] = {
776 0x01000000, 845 0x01000000,
777 0x04000000, 846 0x04000000,
778 0x10000000, 847 0x10000000,
779/* 0x0be8: i2c_sda_map */ 848/* 0x0cf4: i2c_sda_map */
780 0x00002000, 849 0x00002000,
781 0x00008000, 850 0x00008000,
782 0x00020000, 851 0x00020000,
@@ -787,7 +856,7 @@ uint32_t nva3_pwr_data[] = {
787 0x02000000, 856 0x02000000,
788 0x08000000, 857 0x08000000,
789 0x20000000, 858 0x20000000,
790/* 0x0c10: i2c_ctrl */ 859/* 0x0d1c: i2c_ctrl */
791 0x0000e138, 860 0x0000e138,
792 0x0000e150, 861 0x0000e150,
793 0x0000e168, 862 0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nva3_pwr_data[] = {
845 0x00000000, 914 0x00000000,
846 0x00000000, 915 0x00000000,
847 0x00000000, 916 0x00000000,
848 0x00000000,
849 0x00000000,
850 0x00000000,
851}; 917};
852 918
853uint32_t nva3_pwr_code[] = { 919uint32_t nva3_pwr_code[] = {
@@ -1258,11 +1324,11 @@ uint32_t nva3_pwr_code[] = {
1258 0x67f0f30b, 1324 0x67f0f30b,
1259 0x0664b62c, 1325 0x0664b62c,
1260 0x800066cf, 1326 0x800066cf,
1261 0x00f8ee06, 1327 0x00f8f106,
1262/* 0x05a8: memx_func_leave */ 1328/* 0x05a8: memx_func_leave */
1263 0xb62c67f0, 1329 0xb62c67f0,
1264 0x66cf0664, 1330 0x66cf0664,
1265 0xef068000, 1331 0xf2068000,
1266 0xf10467f0, 1332 0xf10467f0,
1267 0xb607e407, 1333 0xb607e407,
1268 0x06d00604, 1334 0x06d00604,
@@ -1323,408 +1389,479 @@ uint32_t nva3_pwr_code[] = {
1323 0x9800f8a4, 1389 0x9800f8a4,
1324 0x10b6001e, 1390 0x10b6001e,
1325 0x7f21f404, 1391 0x7f21f404,
1326/* 0x067e: memx_exec */ 1392/* 0x067e: memx_func_train */
1327 0xe0f900f8, 1393 0x57f100f8,
1328 0xc1b9d0f9, 1394 0x77f10003,
1329 0x02b2b902, 1395 0x97f10000,
1330/* 0x0688: memx_exec_next */ 1396 0x93f00000,
1331 0xb6001398, 1397 0x029eb970,
1332 0x34e70410, 1398 0xb90421f4,
1333 0x33e701f0, 1399 0xe7f102d8,
1334 0x32b601e0, 1400 0x21f42710,
1335 0x0c30f001, 1401/* 0x069d: memx_func_train_loop_outer */
1336 0xf9de3598, 1402 0x0158e07f,
1337 0x0612b855, 1403 0x0083f101,
1338 0x98e41ef4, 1404 0xe097f102,
1339 0x0c98ee0b, 1405 0x1193f011,
1340 0x02cbbbef, 1406 0x80f990f9,
1341 0x07c4b7f1, 1407 0xe0fcd0fc,
1342 0xcf06b4b6, 1408 0xf93f21f4,
1343 0xd0fc00bb, 1409 0x0067f150,
1344 0x21f5e0fc, 1410/* 0x06bd: memx_func_train_loop_inner */
1411 0x1187f100,
1412 0x9068ff11,
1413 0xfd109894,
1414 0x97f10589,
1415 0x93f00720,
1416 0xf990f910,
1417 0xfcd0fc80,
1418 0x3f21f4e0,
1419 0x008097f1,
1420 0xb91093f0,
1421 0x21f4029e,
1422 0x02d8b904,
1423 0xf92088c5,
1424 0xfc80f990,
1425 0xf4e0fcd0,
1426 0x97f13f21,
1427 0x93f0053c,
1428 0x0287f110,
1429 0x0083f130,
1430 0xf990f980,
1431 0xfcd0fc80,
1432 0x3f21f4e0,
1433 0x0560e7f1,
1434 0xf110e3f0,
1435 0xf10000d7,
1436 0x908000d3,
1437 0xb7f100dc,
1438 0xb3f08480,
1439 0xa421f41e,
1440 0x000057f1,
1441 0xffff97f1,
1442 0x830093f1,
1443/* 0x073c: memx_func_train_loop_4x */
1444 0x0080a7f1,
1445 0xb910a3f0,
1446 0x21f402ae,
1447 0x02d8b904,
1448 0xffdfb7f1,
1449 0xffffb3f1,
1450 0xf9048bfd,
1451 0xfc80f9a0,
1452 0xf4e0fcd0,
1453 0xa7f13f21,
1454 0xa3f0053c,
1455 0x0287f110,
1456 0x0083f130,
1457 0xf9a0f980,
1458 0xfcd0fc80,
1459 0x3f21f4e0,
1460 0x0560e7f1,
1461 0xf110e3f0,
1462 0xf10000d7,
1463 0xb98000d3,
1464 0xb7f102dc,
1465 0xb3f02710,
1466 0xa421f400,
1467 0xf402eeb9,
1468 0xddb90421,
1469 0x949dff02,
1470 0x700150b6,
1471 0x1ef40456,
1472 0xcc7aa092,
1473 0x00a9800b,
1474 0xb60160b6,
1475 0x66700470,
1476 0x001ef510,
1477 0xb650fcff,
1478 0x56700150,
1479 0xd41ef507,
1480/* 0x07cf: memx_exec */
1481 0xf900f8fe,
1482 0xb9d0f9e0,
1483 0xb2b902c1,
1484/* 0x07d9: memx_exec_next */
1485 0x00139802,
1486 0xe70410b6,
1487 0xe701f034,
1488 0xb601e033,
1489 0x30f00132,
1490 0xde35980c,
1491 0x12b855f9,
1492 0xe41ef406,
1493 0x98f10b98,
1494 0xcbbbf20c,
1495 0xc4b7f102,
1496 0x06b4b607,
1497 0xfc00bbcf,
1498 0xf5e0fcd0,
1499 0xf8034221,
1500/* 0x0815: memx_info */
1501 0x01c67000,
1502/* 0x081b: memx_info_data */
1503 0xf10e0bf4,
1504 0xf103ccc7,
1505 0xf40800b7,
1506/* 0x0826: memx_info_train */
1507 0xc7f10b0e,
1508 0xb7f10bcc,
1509/* 0x082e: memx_info_send */
1510 0x21f50100,
1345 0x00f80342, 1511 0x00f80342,
1346/* 0x06c4: memx_info */ 1512/* 0x0834: memx_recv */
1347 0x03c0c7f1, 1513 0xf401d6b0,
1348 0x0800b7f1, 1514 0xd6b0980b,
1349 0x034221f5, 1515 0xd80bf400,
1350/* 0x06d2: memx_recv */ 1516/* 0x0842: memx_init */
1351 0xd6b000f8, 1517 0x00f800f8,
1352 0xa90bf401, 1518/* 0x0844: perf_recv */
1353 0xf400d6b0, 1519/* 0x0846: perf_init */
1354 0x00f8e90b,
1355/* 0x06e0: memx_init */
1356/* 0x06e2: perf_recv */
1357 0x00f800f8, 1520 0x00f800f8,
1358/* 0x06e4: perf_init */ 1521/* 0x0848: i2c_drive_scl */
1359/* 0x06e6: i2c_drive_scl */ 1522 0xf40036b0,
1523 0x07f1110b,
1524 0x04b607e0,
1525 0x0001d006,
1526 0x00f804bd,
1527/* 0x085c: i2c_drive_scl_lo */
1528 0x07e407f1,
1529 0xd00604b6,
1530 0x04bd0001,
1531/* 0x086a: i2c_drive_sda */
1360 0x36b000f8, 1532 0x36b000f8,
1361 0x110bf400, 1533 0x110bf400,
1362 0x07e007f1, 1534 0x07e007f1,
1363 0xd00604b6, 1535 0xd00604b6,
1364 0x04bd0001, 1536 0x04bd0002,
1365/* 0x06fa: i2c_drive_scl_lo */ 1537/* 0x087e: i2c_drive_sda_lo */
1366 0x07f100f8, 1538 0x07f100f8,
1367 0x04b607e4, 1539 0x04b607e4,
1368 0x0001d006,
1369 0x00f804bd,
1370/* 0x0708: i2c_drive_sda */
1371 0xf40036b0,
1372 0x07f1110b,
1373 0x04b607e0,
1374 0x0002d006, 1540 0x0002d006,
1375 0x00f804bd, 1541 0x00f804bd,
1376/* 0x071c: i2c_drive_sda_lo */ 1542/* 0x088c: i2c_sense_scl */
1377 0x07e407f1, 1543 0xf10132f4,
1378 0xd00604b6, 1544 0xb607c437,
1379 0x04bd0002, 1545 0x33cf0634,
1380/* 0x072a: i2c_sense_scl */ 1546 0x0431fd00,
1381 0x32f400f8, 1547 0xf4060bf4,
1382 0xc437f101, 1548/* 0x08a2: i2c_sense_scl_done */
1383 0x0634b607, 1549 0x00f80131,
1384 0xfd0033cf, 1550/* 0x08a4: i2c_sense_sda */
1385 0x0bf40431, 1551 0xf10132f4,
1386 0x0131f406, 1552 0xb607c437,
1387/* 0x0740: i2c_sense_scl_done */ 1553 0x33cf0634,
1388/* 0x0742: i2c_sense_sda */ 1554 0x0432fd00,
1389 0x32f400f8, 1555 0xf4060bf4,
1390 0xc437f101, 1556/* 0x08ba: i2c_sense_sda_done */
1391 0x0634b607, 1557 0x00f80131,
1392 0xfd0033cf, 1558/* 0x08bc: i2c_raise_scl */
1393 0x0bf40432, 1559 0x47f140f9,
1394 0x0131f406, 1560 0x37f00898,
1395/* 0x0758: i2c_sense_sda_done */ 1561 0x4821f501,
1396/* 0x075a: i2c_raise_scl */ 1562/* 0x08c9: i2c_raise_scl_wait */
1397 0x40f900f8, 1563 0xe8e7f108,
1398 0x089847f1,
1399 0xf50137f0,
1400/* 0x0767: i2c_raise_scl_wait */
1401 0xf106e621,
1402 0xf403e8e7,
1403 0x21f57f21,
1404 0x01f4072a,
1405 0x0142b609,
1406/* 0x077b: i2c_raise_scl_done */
1407 0xfcef1bf4,
1408/* 0x077f: i2c_start */
1409 0xf500f840,
1410 0xf4072a21,
1411 0x21f50d11,
1412 0x11f40742,
1413 0x300ef406,
1414/* 0x0790: i2c_start_rep */
1415 0xf50037f0,
1416 0xf006e621,
1417 0x21f50137,
1418 0x76bb0708,
1419 0x0465b600,
1420 0x659450f9,
1421 0x0256bb04,
1422 0x75fd50bd,
1423 0xf550fc04,
1424 0xb6075a21,
1425 0x11f40464,
1426/* 0x07bd: i2c_start_send */
1427 0x0037f01f,
1428 0x070821f5,
1429 0x1388e7f1,
1430 0xf07f21f4,
1431 0x21f50037,
1432 0xe7f106e6,
1433 0x21f41388,
1434/* 0x07d9: i2c_start_out */
1435/* 0x07db: i2c_stop */
1436 0xf000f87f,
1437 0x21f50037,
1438 0x37f006e6,
1439 0x0821f500,
1440 0xe8e7f107,
1441 0x7f21f403, 1564 0x7f21f403,
1442 0xf50137f0, 1565 0x088c21f5,
1443 0xf106e621, 1566 0xb60901f4,
1444 0xf41388e7, 1567 0x1bf40142,
1445 0x37f07f21, 1568/* 0x08dd: i2c_raise_scl_done */
1446 0x0821f501, 1569 0xf840fcef,
1447 0x88e7f107, 1570/* 0x08e1: i2c_start */
1448 0x7f21f413, 1571 0x8c21f500,
1449/* 0x080e: i2c_bitw */ 1572 0x0d11f408,
1450 0x21f500f8, 1573 0x08a421f5,
1451 0xe7f10708, 1574 0xf40611f4,
1452 0x21f403e8, 1575/* 0x08f2: i2c_start_rep */
1453 0x0076bb7f, 1576 0x37f0300e,
1454 0xf90465b6, 1577 0x4821f500,
1455 0x04659450, 1578 0x0137f008,
1456 0xbd0256bb, 1579 0x086a21f5,
1457 0x0475fd50, 1580 0xb60076bb,
1458 0x21f550fc, 1581 0x50f90465,
1459 0x64b6075a, 1582 0xbb046594,
1460 0x1811f404, 1583 0x50bd0256,
1461 0x1388e7f1, 1584 0xfc0475fd,
1462 0xf07f21f4, 1585 0xbc21f550,
1586 0x0464b608,
1587/* 0x091f: i2c_start_send */
1588 0xf01f11f4,
1463 0x21f50037, 1589 0x21f50037,
1464 0xe7f106e6, 1590 0xe7f1086a,
1465 0x21f41388, 1591 0x21f41388,
1466/* 0x084d: i2c_bitw_out */ 1592 0x0037f07f,
1467/* 0x084f: i2c_bitr */ 1593 0x084821f5,
1468 0xf000f87f, 1594 0x1388e7f1,
1469 0x21f50137, 1595/* 0x093b: i2c_start_out */
1470 0xe7f10708, 1596 0xf87f21f4,
1471 0x21f403e8, 1597/* 0x093d: i2c_stop */
1472 0x0076bb7f, 1598 0x0037f000,
1473 0xf90465b6, 1599 0x084821f5,
1474 0x04659450,
1475 0xbd0256bb,
1476 0x0475fd50,
1477 0x21f550fc,
1478 0x64b6075a,
1479 0x1b11f404,
1480 0x074221f5,
1481 0xf50037f0, 1600 0xf50037f0,
1482 0xf106e621, 1601 0xf1086a21,
1602 0xf403e8e7,
1603 0x37f07f21,
1604 0x4821f501,
1605 0x88e7f108,
1606 0x7f21f413,
1607 0xf50137f0,
1608 0xf1086a21,
1483 0xf41388e7, 1609 0xf41388e7,
1484 0x3cf07f21, 1610 0x00f87f21,
1485 0x0131f401, 1611/* 0x0970: i2c_bitw */
1486/* 0x0894: i2c_bitr_done */ 1612 0x086a21f5,
1487/* 0x0896: i2c_get_byte */ 1613 0x03e8e7f1,
1488 0x57f000f8, 1614 0xbb7f21f4,
1489 0x0847f000,
1490/* 0x089c: i2c_get_byte_next */
1491 0xbb0154b6,
1492 0x65b60076, 1615 0x65b60076,
1493 0x9450f904, 1616 0x9450f904,
1494 0x56bb0465, 1617 0x56bb0465,
1495 0xfd50bd02, 1618 0xfd50bd02,
1496 0x50fc0475, 1619 0x50fc0475,
1497 0x084f21f5, 1620 0x08bc21f5,
1498 0xf40464b6, 1621 0xf40464b6,
1499 0x53fd2b11, 1622 0xe7f11811,
1500 0x0142b605, 1623 0x21f41388,
1501 0xf0d81bf4, 1624 0x0037f07f,
1502 0x76bb0137, 1625 0x084821f5,
1503 0x0465b600, 1626 0x1388e7f1,
1504 0x659450f9, 1627/* 0x09af: i2c_bitw_out */
1505 0x0256bb04, 1628 0xf87f21f4,
1506 0x75fd50bd, 1629/* 0x09b1: i2c_bitr */
1507 0xf550fc04, 1630 0x0137f000,
1508 0xb6080e21, 1631 0x086a21f5,
1509/* 0x08e6: i2c_get_byte_done */ 1632 0x03e8e7f1,
1510 0x00f80464, 1633 0xbb7f21f4,
1511/* 0x08e8: i2c_put_byte */ 1634 0x65b60076,
1512/* 0x08eb: i2c_put_byte_next */ 1635 0x9450f904,
1513 0xb60847f0, 1636 0x56bb0465,
1514 0x54ff0142, 1637 0xfd50bd02,
1515 0x0076bb38, 1638 0x50fc0475,
1639 0x08bc21f5,
1640 0xf40464b6,
1641 0x21f51b11,
1642 0x37f008a4,
1643 0x4821f500,
1644 0x88e7f108,
1645 0x7f21f413,
1646 0xf4013cf0,
1647/* 0x09f6: i2c_bitr_done */
1648 0x00f80131,
1649/* 0x09f8: i2c_get_byte */
1650 0xf00057f0,
1651/* 0x09fe: i2c_get_byte_next */
1652 0x54b60847,
1653 0x0076bb01,
1516 0xf90465b6, 1654 0xf90465b6,
1517 0x04659450, 1655 0x04659450,
1518 0xbd0256bb, 1656 0xbd0256bb,
1519 0x0475fd50, 1657 0x0475fd50,
1520 0x21f550fc, 1658 0x21f550fc,
1521 0x64b6080e, 1659 0x64b609b1,
1522 0x3411f404, 1660 0x2b11f404,
1523 0xf40046b0, 1661 0xb60553fd,
1524 0x76bbd81b, 1662 0x1bf40142,
1525 0x0465b600, 1663 0x0137f0d8,
1526 0x659450f9, 1664 0xb60076bb,
1527 0x0256bb04, 1665 0x50f90465,
1528 0x75fd50bd, 1666 0xbb046594,
1529 0xf550fc04, 1667 0x50bd0256,
1530 0xb6084f21, 1668 0xfc0475fd,
1531 0x11f40464, 1669 0x7021f550,
1532 0x0076bb0f, 1670 0x0464b609,
1533 0xf40136b0, 1671/* 0x0a48: i2c_get_byte_done */
1534 0x32f4061b, 1672/* 0x0a4a: i2c_put_byte */
1535/* 0x0941: i2c_put_byte_done */ 1673 0x47f000f8,
1536/* 0x0943: i2c_addr */ 1674/* 0x0a4d: i2c_put_byte_next */
1537 0xbb00f801, 1675 0x0142b608,
1676 0xbb3854ff,
1538 0x65b60076, 1677 0x65b60076,
1539 0x9450f904, 1678 0x9450f904,
1540 0x56bb0465, 1679 0x56bb0465,
1541 0xfd50bd02, 1680 0xfd50bd02,
1542 0x50fc0475, 1681 0x50fc0475,
1543 0x077f21f5, 1682 0x097021f5,
1544 0xf40464b6, 1683 0xf40464b6,
1545 0xc3e72911, 1684 0x46b03411,
1546 0x34b6012e, 1685 0xd81bf400,
1547 0x0553fd01,
1548 0xb60076bb, 1686 0xb60076bb,
1549 0x50f90465, 1687 0x50f90465,
1550 0xbb046594, 1688 0xbb046594,
1551 0x50bd0256, 1689 0x50bd0256,
1552 0xfc0475fd, 1690 0xfc0475fd,
1553 0xe821f550, 1691 0xb121f550,
1554 0x0464b608, 1692 0x0464b609,
1555/* 0x0988: i2c_addr_done */ 1693 0xbb0f11f4,
1556/* 0x098a: i2c_acquire_addr */ 1694 0x36b00076,
1557 0xcec700f8, 1695 0x061bf401,
1558 0x02e4b6f8, 1696/* 0x0aa3: i2c_put_byte_done */
1559 0x0c10e0b7, 1697 0xf80132f4,
1560 0xf800ee98, 1698/* 0x0aa5: i2c_addr */
1561/* 0x0999: i2c_acquire */
1562 0x8a21f500,
1563 0x0421f409,
1564 0xf403d9f0,
1565 0x00f83f21,
1566/* 0x09a8: i2c_release */
1567 0x098a21f5,
1568 0xf00421f4,
1569 0x21f403da,
1570/* 0x09b7: i2c_recv */
1571 0xf400f83f,
1572 0xc1c70132,
1573 0x0214b6f8,
1574 0xf52816b0,
1575 0xa0013a1f,
1576 0x980be813,
1577 0x13a00032,
1578 0x31980bc0,
1579 0x0231f400,
1580 0xe0f9d0f9,
1581 0x67f1d0f9,
1582 0x63f10000,
1583 0x67921000,
1584 0x0076bb01,
1585 0xf90465b6,
1586 0x04659450,
1587 0xbd0256bb,
1588 0x0475fd50,
1589 0x21f550fc,
1590 0x64b60999,
1591 0xb0d0fc04,
1592 0x1bf500d6,
1593 0x57f000b3,
1594 0x0076bb00, 1699 0x0076bb00,
1595 0xf90465b6, 1700 0xf90465b6,
1596 0x04659450, 1701 0x04659450,
1597 0xbd0256bb, 1702 0xbd0256bb,
1598 0x0475fd50, 1703 0x0475fd50,
1599 0x21f550fc, 1704 0x21f550fc,
1600 0x64b60943, 1705 0x64b608e1,
1601 0xd011f504, 1706 0x2911f404,
1602 0xe0c5c700, 1707 0x012ec3e7,
1603 0xb60076bb, 1708 0xfd0134b6,
1604 0x50f90465, 1709 0x76bb0553,
1605 0xbb046594, 1710 0x0465b600,
1606 0x50bd0256, 1711 0x659450f9,
1607 0xfc0475fd, 1712 0x0256bb04,
1608 0xe821f550, 1713 0x75fd50bd,
1609 0x0464b608, 1714 0xf550fc04,
1610 0x00ad11f5, 1715 0xb60a4a21,
1611 0xbb0157f0, 1716/* 0x0aea: i2c_addr_done */
1717 0x00f80464,
1718/* 0x0aec: i2c_acquire_addr */
1719 0xb6f8cec7,
1720 0xe0b702e4,
1721 0xee980d1c,
1722/* 0x0afb: i2c_acquire */
1723 0xf500f800,
1724 0xf40aec21,
1725 0xd9f00421,
1726 0x3f21f403,
1727/* 0x0b0a: i2c_release */
1728 0x21f500f8,
1729 0x21f40aec,
1730 0x03daf004,
1731 0xf83f21f4,
1732/* 0x0b19: i2c_recv */
1733 0x0132f400,
1734 0xb6f8c1c7,
1735 0x16b00214,
1736 0x3a1ff528,
1737 0xf413a001,
1738 0x0032980c,
1739 0x0ccc13a0,
1740 0xf4003198,
1741 0xd0f90231,
1742 0xd0f9e0f9,
1743 0x000067f1,
1744 0x100063f1,
1745 0xbb016792,
1612 0x65b60076, 1746 0x65b60076,
1613 0x9450f904, 1747 0x9450f904,
1614 0x56bb0465, 1748 0x56bb0465,
1615 0xfd50bd02, 1749 0xfd50bd02,
1616 0x50fc0475, 1750 0x50fc0475,
1617 0x094321f5, 1751 0x0afb21f5,
1618 0xf50464b6, 1752 0xfc0464b6,
1619 0xbb008a11, 1753 0x00d6b0d0,
1754 0x00b31bf5,
1755 0xbb0057f0,
1620 0x65b60076, 1756 0x65b60076,
1621 0x9450f904, 1757 0x9450f904,
1622 0x56bb0465, 1758 0x56bb0465,
1623 0xfd50bd02, 1759 0xfd50bd02,
1624 0x50fc0475, 1760 0x50fc0475,
1625 0x089621f5, 1761 0x0aa521f5,
1626 0xf40464b6, 1762 0xf50464b6,
1627 0x5bcb6a11, 1763 0xc700d011,
1628 0x0076bbe0, 1764 0x76bbe0c5,
1765 0x0465b600,
1766 0x659450f9,
1767 0x0256bb04,
1768 0x75fd50bd,
1769 0xf550fc04,
1770 0xb60a4a21,
1771 0x11f50464,
1772 0x57f000ad,
1773 0x0076bb01,
1629 0xf90465b6, 1774 0xf90465b6,
1630 0x04659450, 1775 0x04659450,
1631 0xbd0256bb, 1776 0xbd0256bb,
1632 0x0475fd50, 1777 0x0475fd50,
1633 0x21f550fc, 1778 0x21f550fc,
1634 0x64b607db, 1779 0x64b60aa5,
1635 0x025bb904, 1780 0x8a11f504,
1636 0x0ef474bd, 1781 0x0076bb00,
1637/* 0x0abd: i2c_recv_not_rd08 */ 1782 0xf90465b6,
1638 0x01d6b043, 1783 0x04659450,
1639 0xf03d1bf4, 1784 0xbd0256bb,
1640 0x21f50057, 1785 0x0475fd50,
1641 0x11f40943, 1786 0x21f550fc,
1642 0xe0c5c733, 1787 0x64b609f8,
1643 0x08e821f5, 1788 0x6a11f404,
1644 0xf02911f4, 1789 0xbbe05bcb,
1645 0x21f50057, 1790 0x65b60076,
1646 0x11f40943, 1791 0x9450f904,
1647 0xe0b5c71f, 1792 0x56bb0465,
1648 0x08e821f5, 1793 0xfd50bd02,
1649 0xf51511f4, 1794 0x50fc0475,
1650 0xbd07db21, 1795 0x093d21f5,
1651 0x08c5c774, 1796 0xb90464b6,
1652 0xf4091bf4, 1797 0x74bd025b,
1653 0x0ef40232, 1798/* 0x0c1f: i2c_recv_not_rd08 */
1654/* 0x0afd: i2c_recv_not_wr08 */ 1799 0xb0430ef4,
1655/* 0x0afd: i2c_recv_done */ 1800 0x1bf401d6,
1656 0xf8cec703, 1801 0x0057f03d,
1657 0x09a821f5, 1802 0x0aa521f5,
1658 0xd0fce0fc, 1803 0xc73311f4,
1659 0xb90a12f4, 1804 0x21f5e0c5,
1660 0x21f5027c, 1805 0x11f40a4a,
1661/* 0x0b12: i2c_recv_exit */ 1806 0x0057f029,
1662 0x00f80342, 1807 0x0aa521f5,
1663/* 0x0b14: i2c_init */ 1808 0xc71f11f4,
1664/* 0x0b16: test_recv */ 1809 0x21f5e0b5,
1665 0x17f100f8, 1810 0x11f40a4a,
1666 0x14b605d8, 1811 0x3d21f515,
1667 0x0011cf06, 1812 0xc774bd09,
1668 0xf10110b6, 1813 0x1bf408c5,
1669 0xb605d807, 1814 0x0232f409,
1670 0x01d00604, 1815/* 0x0c5f: i2c_recv_not_wr08 */
1671 0xf104bd00, 1816/* 0x0c5f: i2c_recv_done */
1672 0xf1d900e7, 1817 0xc7030ef4,
1673 0xf5134fe3, 1818 0x21f5f8ce,
1674 0xf8026221, 1819 0xe0fc0b0a,
1675/* 0x0b3d: test_init */ 1820 0x12f4d0fc,
1676 0x00e7f100, 1821 0x027cb90a,
1677 0x6221f508, 1822 0x034221f5,
1678/* 0x0b47: idle_recv */ 1823/* 0x0c74: i2c_recv_exit */
1679 0xf800f802, 1824/* 0x0c76: i2c_init */
1680/* 0x0b49: idle */ 1825 0x00f800f8,
1681 0x0031f400, 1826/* 0x0c78: test_recv */
1682 0x05d417f1, 1827 0x05d817f1,
1683 0xcf0614b6, 1828 0xcf0614b6,
1684 0x10b60011, 1829 0x10b60011,
1685 0xd407f101, 1830 0xd807f101,
1686 0x0604b605, 1831 0x0604b605,
1687 0xbd0001d0, 1832 0xbd0001d0,
1688/* 0x0b65: idle_loop */ 1833 0x00e7f104,
1689 0x5817f004, 1834 0x4fe3f1d9,
1690/* 0x0b6b: idle_proc */ 1835 0x6221f513,
1691/* 0x0b6b: idle_proc_exec */ 1836/* 0x0c9f: test_init */
1692 0xf90232f4, 1837 0xf100f802,
1693 0x021eb910, 1838 0xf50800e7,
1694 0x034b21f5, 1839 0xf8026221,
1695 0x11f410fc, 1840/* 0x0ca9: idle_recv */
1696 0x0231f409, 1841/* 0x0cab: idle */
1697/* 0x0b7f: idle_proc_next */ 1842 0xf400f800,
1698 0xb6ef0ef4, 1843 0x17f10031,
1699 0x1fb85810, 1844 0x14b605d4,
1700 0xe61bf406, 1845 0x0011cf06,
1701 0xf4dd02f4, 1846 0xf10110b6,
1702 0x0ef40028, 1847 0xb605d407,
1703 0x000000bb, 1848 0x01d00604,
1704 0x00000000, 1849/* 0x0cc7: idle_loop */
1705 0x00000000, 1850 0xf004bd00,
1706 0x00000000, 1851 0x32f45817,
1707 0x00000000, 1852/* 0x0ccd: idle_proc */
1708 0x00000000, 1853/* 0x0ccd: idle_proc_exec */
1709 0x00000000, 1854 0xb910f902,
1710 0x00000000, 1855 0x21f5021e,
1711 0x00000000, 1856 0x10fc034b,
1712 0x00000000, 1857 0xf40911f4,
1713 0x00000000, 1858 0x0ef40231,
1714 0x00000000, 1859/* 0x0ce1: idle_proc_next */
1715 0x00000000, 1860 0x5810b6ef,
1716 0x00000000, 1861 0xf4061fb8,
1717 0x00000000, 1862 0x02f4e61b,
1718 0x00000000, 1863 0x0028f4dd,
1719 0x00000000, 1864 0x00bb0ef4,
1720 0x00000000,
1721 0x00000000,
1722 0x00000000,
1723 0x00000000,
1724 0x00000000,
1725 0x00000000,
1726 0x00000000,
1727 0x00000000,
1728 0x00000000, 1865 0x00000000,
1729 0x00000000, 1866 0x00000000,
1730 0x00000000, 1867 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
index ca30fa4011b5..90221d973f84 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvc0_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x0000074b, 49 0x0000075e,
50 0x0000073d, 50 0x00000750,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvc0_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x0000074f, 71 0x00000762,
72 0x0000074d, 72 0x00000760,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvc0_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000b7f, 93 0x00000b92,
94 0x00000a22, 94 0x00000a35,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvc0_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000ba8, 115 0x00000bbb,
116 0x00000b81, 116 0x00000b94,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvc0_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000bb4, 137 0x00000bc7,
138 0x00000bb2, 138 0x00000bc5,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvc0_pwr_data[] = {
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x00000663, 248 0x00000663,
249/* 0x03b8: memx_func_tail */ 249 0x00000007,
250/* 0x03b8: memx_ts_start */
251 0x00000000, 250 0x00000000,
252/* 0x03bc: memx_ts_end */ 251 0x000006e9,
252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */
253 0x00000000, 254 0x00000000,
254/* 0x03c0: memx_data_head */ 255/* 0x03c8: memx_ts_end */
255 0x00000000, 256 0x00000000,
257/* 0x03cc: memx_data_head */
256 0x00000000, 258 0x00000000,
257 0x00000000, 259 0x00000000,
258 0x00000000, 260 0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvc0_pwr_data[] = {
764 0x00000000, 766 0x00000000,
765 0x00000000, 767 0x00000000,
766 0x00000000, 768 0x00000000,
767/* 0x0bc0: memx_data_tail */ 769 0x00000000,
768/* 0x0bc0: i2c_scl_map */ 770/* 0x0bcc: memx_data_tail */
771/* 0x0bcc: memx_train_head */
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782 0x00000000,
783 0x00000000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836/* 0x0ccc: memx_train_tail */
837/* 0x0ccc: i2c_scl_map */
769 0x00001000, 838 0x00001000,
770 0x00004000, 839 0x00004000,
771 0x00010000, 840 0x00010000,
@@ -776,7 +845,7 @@ uint32_t nvc0_pwr_data[] = {
776 0x01000000, 845 0x01000000,
777 0x04000000, 846 0x04000000,
778 0x10000000, 847 0x10000000,
779/* 0x0be8: i2c_sda_map */ 848/* 0x0cf4: i2c_sda_map */
780 0x00002000, 849 0x00002000,
781 0x00008000, 850 0x00008000,
782 0x00020000, 851 0x00020000,
@@ -787,7 +856,7 @@ uint32_t nvc0_pwr_data[] = {
787 0x02000000, 856 0x02000000,
788 0x08000000, 857 0x08000000,
789 0x20000000, 858 0x20000000,
790/* 0x0c10: i2c_ctrl */ 859/* 0x0d1c: i2c_ctrl */
791 0x0000e138, 860 0x0000e138,
792 0x0000e150, 861 0x0000e150,
793 0x0000e168, 862 0x0000e168,
@@ -845,9 +914,6 @@ uint32_t nvc0_pwr_data[] = {
845 0x00000000, 914 0x00000000,
846 0x00000000, 915 0x00000000,
847 0x00000000, 916 0x00000000,
848 0x00000000,
849 0x00000000,
850 0x00000000,
851}; 917};
852 918
853uint32_t nvc0_pwr_code[] = { 919uint32_t nvc0_pwr_code[] = {
@@ -1272,10 +1338,10 @@ uint32_t nvc0_pwr_code[] = {
1272 0xcf0664b6, 1338 0xcf0664b6,
1273 0x06800066, 1339 0x06800066,
1274/* 0x05db: memx_func_leave */ 1340/* 0x05db: memx_func_leave */
1275 0xf000f8ee, 1341 0xf000f8f1,
1276 0x64b62c67, 1342 0x64b62c67,
1277 0x0066cf06, 1343 0x0066cf06,
1278 0xf0ef0680, 1344 0xf0f20680,
1279 0x07f10467, 1345 0x07f10467,
1280 0x04b607e4, 1346 0x04b607e4,
1281 0x0006d006, 1347 0x0006d006,
@@ -1350,382 +1416,450 @@ uint32_t nvc0_pwr_code[] = {
1350 0x1e9800f8, 1416 0x1e9800f8,
1351 0x0410b600, 1417 0x0410b600,
1352 0xf87f21f4, 1418 0xf87f21f4,
1353/* 0x06e9: memx_exec */ 1419/* 0x06e9: memx_func_train */
1354 0xf9e0f900, 1420/* 0x06eb: memx_exec */
1355 0x02c1b9d0, 1421 0xf900f800,
1356/* 0x06f3: memx_exec_next */ 1422 0xb9d0f9e0,
1357 0x9802b2b9, 1423 0xb2b902c1,
1358 0x10b60013, 1424/* 0x06f5: memx_exec_next */
1359 0xf034e704, 1425 0x00139802,
1360 0xe033e701, 1426 0xe70410b6,
1361 0x0132b601, 1427 0xe701f034,
1362 0x980c30f0, 1428 0xb601e033,
1363 0x55f9de35, 1429 0x30f00132,
1364 0xf40612b8, 1430 0xde35980c,
1365 0x0b98e41e, 1431 0x12b855f9,
1366 0xef0c98ee, 1432 0xe41ef406,
1367 0xf102cbbb, 1433 0x98f10b98,
1368 0xb607c4b7, 1434 0xcbbbf20c,
1369 0xbbcf06b4, 1435 0xc4b7f102,
1370 0xfcd0fc00, 1436 0x06b4b607,
1371 0x4221f5e0, 1437 0xfc00bbcf,
1372/* 0x072f: memx_info */ 1438 0xf5e0fcd0,
1373 0xf100f803,
1374 0xf103c0c7,
1375 0xf50800b7,
1376 0xf8034221, 1439 0xf8034221,
1377/* 0x073d: memx_recv */ 1440/* 0x0731: memx_info */
1378 0x01d6b000, 1441 0x01c67000,
1379 0xb0a90bf4, 1442/* 0x0737: memx_info_data */
1380 0x0bf400d6, 1443 0xf10e0bf4,
1381/* 0x074b: memx_init */ 1444 0xf103ccc7,
1382 0xf800f8e9, 1445 0xf40800b7,
1383/* 0x074d: perf_recv */ 1446/* 0x0742: memx_info_train */
1384/* 0x074f: perf_init */ 1447 0xc7f10b0e,
1385 0xf800f800, 1448 0xb7f10bcc,
1386/* 0x0751: i2c_drive_scl */ 1449/* 0x074a: memx_info_send */
1387 0x0036b000, 1450 0x21f50100,
1388 0xf1110bf4, 1451 0x00f80342,
1389 0xb607e007, 1452/* 0x0750: memx_recv */
1390 0x01d00604, 1453 0xf401d6b0,
1391 0xf804bd00, 1454 0xd6b0980b,
1392/* 0x0765: i2c_drive_scl_lo */ 1455 0xd80bf400,
1393 0xe407f100, 1456/* 0x075e: memx_init */
1394 0x0604b607, 1457 0x00f800f8,
1395 0xbd0001d0, 1458/* 0x0760: perf_recv */
1396/* 0x0773: i2c_drive_sda */ 1459/* 0x0762: perf_init */
1397 0xb000f804, 1460 0x00f800f8,
1398 0x0bf40036, 1461/* 0x0764: i2c_drive_scl */
1399 0xe007f111, 1462 0xf40036b0,
1400 0x0604b607, 1463 0x07f1110b,
1401 0xbd0002d0, 1464 0x04b607e0,
1402/* 0x0787: i2c_drive_sda_lo */ 1465 0x0001d006,
1403 0xf100f804, 1466 0x00f804bd,
1404 0xb607e407, 1467/* 0x0778: i2c_drive_scl_lo */
1405 0x02d00604, 1468 0x07e407f1,
1406 0xf804bd00, 1469 0xd00604b6,
1407/* 0x0795: i2c_sense_scl */ 1470 0x04bd0001,
1408 0x0132f400, 1471/* 0x0786: i2c_drive_sda */
1409 0x07c437f1, 1472 0x36b000f8,
1410 0xcf0634b6, 1473 0x110bf400,
1411 0x31fd0033, 1474 0x07e007f1,
1412 0x060bf404, 1475 0xd00604b6,
1413/* 0x07ab: i2c_sense_scl_done */ 1476 0x04bd0002,
1414 0xf80131f4, 1477/* 0x079a: i2c_drive_sda_lo */
1415/* 0x07ad: i2c_sense_sda */ 1478 0x07f100f8,
1416 0x0132f400, 1479 0x04b607e4,
1417 0x07c437f1, 1480 0x0002d006,
1418 0xcf0634b6, 1481 0x00f804bd,
1419 0x32fd0033, 1482/* 0x07a8: i2c_sense_scl */
1420 0x060bf404, 1483 0xf10132f4,
1421/* 0x07c3: i2c_sense_sda_done */ 1484 0xb607c437,
1422 0xf80131f4, 1485 0x33cf0634,
1423/* 0x07c5: i2c_raise_scl */ 1486 0x0431fd00,
1424 0xf140f900, 1487 0xf4060bf4,
1425 0xf0089847, 1488/* 0x07be: i2c_sense_scl_done */
1426 0x21f50137, 1489 0x00f80131,
1427/* 0x07d2: i2c_raise_scl_wait */ 1490/* 0x07c0: i2c_sense_sda */
1428 0xe7f10751, 1491 0xf10132f4,
1429 0x21f403e8, 1492 0xb607c437,
1430 0x9521f57f, 1493 0x33cf0634,
1431 0x0901f407, 1494 0x0432fd00,
1432 0xf40142b6, 1495 0xf4060bf4,
1433/* 0x07e6: i2c_raise_scl_done */ 1496/* 0x07d6: i2c_sense_sda_done */
1434 0x40fcef1b, 1497 0x00f80131,
1435/* 0x07ea: i2c_start */ 1498/* 0x07d8: i2c_raise_scl */
1436 0x21f500f8, 1499 0x47f140f9,
1437 0x11f40795, 1500 0x37f00898,
1438 0xad21f50d, 1501 0x6421f501,
1439 0x0611f407, 1502/* 0x07e5: i2c_raise_scl_wait */
1440/* 0x07fb: i2c_start_rep */
1441 0xf0300ef4,
1442 0x21f50037,
1443 0x37f00751,
1444 0x7321f501,
1445 0x0076bb07,
1446 0xf90465b6,
1447 0x04659450,
1448 0xbd0256bb,
1449 0x0475fd50,
1450 0x21f550fc,
1451 0x64b607c5,
1452 0x1f11f404,
1453/* 0x0828: i2c_start_send */
1454 0xf50037f0,
1455 0xf1077321,
1456 0xf41388e7,
1457 0x37f07f21,
1458 0x5121f500,
1459 0x88e7f107,
1460 0x7f21f413,
1461/* 0x0844: i2c_start_out */
1462/* 0x0846: i2c_stop */
1463 0x37f000f8,
1464 0x5121f500,
1465 0x0037f007,
1466 0x077321f5,
1467 0x03e8e7f1,
1468 0xf07f21f4,
1469 0x21f50137,
1470 0xe7f10751,
1471 0x21f41388,
1472 0x0137f07f,
1473 0x077321f5,
1474 0x1388e7f1,
1475 0xf87f21f4,
1476/* 0x0879: i2c_bitw */
1477 0x7321f500,
1478 0xe8e7f107, 1503 0xe8e7f107,
1479 0x7f21f403, 1504 0x7f21f403,
1505 0x07a821f5,
1506 0xb60901f4,
1507 0x1bf40142,
1508/* 0x07f9: i2c_raise_scl_done */
1509 0xf840fcef,
1510/* 0x07fd: i2c_start */
1511 0xa821f500,
1512 0x0d11f407,
1513 0x07c021f5,
1514 0xf40611f4,
1515/* 0x080e: i2c_start_rep */
1516 0x37f0300e,
1517 0x6421f500,
1518 0x0137f007,
1519 0x078621f5,
1480 0xb60076bb, 1520 0xb60076bb,
1481 0x50f90465, 1521 0x50f90465,
1482 0xbb046594, 1522 0xbb046594,
1483 0x50bd0256, 1523 0x50bd0256,
1484 0xfc0475fd, 1524 0xfc0475fd,
1485 0xc521f550, 1525 0xd821f550,
1486 0x0464b607, 1526 0x0464b607,
1487 0xf11811f4, 1527/* 0x083b: i2c_start_send */
1488 0xf41388e7, 1528 0xf01f11f4,
1529 0x21f50037,
1530 0xe7f10786,
1531 0x21f41388,
1532 0x0037f07f,
1533 0x076421f5,
1534 0x1388e7f1,
1535/* 0x0857: i2c_start_out */
1536 0xf87f21f4,
1537/* 0x0859: i2c_stop */
1538 0x0037f000,
1539 0x076421f5,
1540 0xf50037f0,
1541 0xf1078621,
1542 0xf403e8e7,
1489 0x37f07f21, 1543 0x37f07f21,
1490 0x5121f500, 1544 0x6421f501,
1491 0x88e7f107, 1545 0x88e7f107,
1492 0x7f21f413, 1546 0x7f21f413,
1493/* 0x08b8: i2c_bitw_out */ 1547 0xf50137f0,
1494/* 0x08ba: i2c_bitr */ 1548 0xf1078621,
1495 0x37f000f8, 1549 0xf41388e7,
1496 0x7321f501, 1550 0x00f87f21,
1497 0xe8e7f107, 1551/* 0x088c: i2c_bitw */
1498 0x7f21f403, 1552 0x078621f5,
1499 0xb60076bb, 1553 0x03e8e7f1,
1500 0x50f90465, 1554 0xbb7f21f4,
1501 0xbb046594, 1555 0x65b60076,
1502 0x50bd0256, 1556 0x9450f904,
1503 0xfc0475fd, 1557 0x56bb0465,
1504 0xc521f550, 1558 0xfd50bd02,
1505 0x0464b607, 1559 0x50fc0475,
1506 0xf51b11f4, 1560 0x07d821f5,
1507 0xf007ad21, 1561 0xf40464b6,
1508 0x21f50037, 1562 0xe7f11811,
1509 0xe7f10751,
1510 0x21f41388, 1563 0x21f41388,
1511 0x013cf07f, 1564 0x0037f07f,
1512/* 0x08ff: i2c_bitr_done */ 1565 0x076421f5,
1513 0xf80131f4, 1566 0x1388e7f1,
1514/* 0x0901: i2c_get_byte */ 1567/* 0x08cb: i2c_bitw_out */
1515 0x0057f000, 1568 0xf87f21f4,
1516/* 0x0907: i2c_get_byte_next */ 1569/* 0x08cd: i2c_bitr */
1517 0xb60847f0, 1570 0x0137f000,
1518 0x76bb0154, 1571 0x078621f5,
1519 0x0465b600, 1572 0x03e8e7f1,
1520 0x659450f9, 1573 0xbb7f21f4,
1521 0x0256bb04, 1574 0x65b60076,
1522 0x75fd50bd, 1575 0x9450f904,
1523 0xf550fc04, 1576 0x56bb0465,
1524 0xb608ba21, 1577 0xfd50bd02,
1525 0x11f40464, 1578 0x50fc0475,
1526 0x0553fd2b, 1579 0x07d821f5,
1527 0xf40142b6, 1580 0xf40464b6,
1528 0x37f0d81b, 1581 0x21f51b11,
1582 0x37f007c0,
1583 0x6421f500,
1584 0x88e7f107,
1585 0x7f21f413,
1586 0xf4013cf0,
1587/* 0x0912: i2c_bitr_done */
1588 0x00f80131,
1589/* 0x0914: i2c_get_byte */
1590 0xf00057f0,
1591/* 0x091a: i2c_get_byte_next */
1592 0x54b60847,
1529 0x0076bb01, 1593 0x0076bb01,
1530 0xf90465b6, 1594 0xf90465b6,
1531 0x04659450, 1595 0x04659450,
1532 0xbd0256bb, 1596 0xbd0256bb,
1533 0x0475fd50, 1597 0x0475fd50,
1534 0x21f550fc, 1598 0x21f550fc,
1535 0x64b60879, 1599 0x64b608cd,
1536/* 0x0951: i2c_get_byte_done */ 1600 0x2b11f404,
1537/* 0x0953: i2c_put_byte */ 1601 0xb60553fd,
1538 0xf000f804, 1602 0x1bf40142,
1539/* 0x0956: i2c_put_byte_next */ 1603 0x0137f0d8,
1540 0x42b60847,
1541 0x3854ff01,
1542 0xb60076bb, 1604 0xb60076bb,
1543 0x50f90465, 1605 0x50f90465,
1544 0xbb046594, 1606 0xbb046594,
1545 0x50bd0256, 1607 0x50bd0256,
1546 0xfc0475fd, 1608 0xfc0475fd,
1547 0x7921f550, 1609 0x8c21f550,
1548 0x0464b608, 1610 0x0464b608,
1549 0xb03411f4, 1611/* 0x0964: i2c_get_byte_done */
1550 0x1bf40046, 1612/* 0x0966: i2c_put_byte */
1551 0x0076bbd8, 1613 0x47f000f8,
1614/* 0x0969: i2c_put_byte_next */
1615 0x0142b608,
1616 0xbb3854ff,
1617 0x65b60076,
1618 0x9450f904,
1619 0x56bb0465,
1620 0xfd50bd02,
1621 0x50fc0475,
1622 0x088c21f5,
1623 0xf40464b6,
1624 0x46b03411,
1625 0xd81bf400,
1626 0xb60076bb,
1627 0x50f90465,
1628 0xbb046594,
1629 0x50bd0256,
1630 0xfc0475fd,
1631 0xcd21f550,
1632 0x0464b608,
1633 0xbb0f11f4,
1634 0x36b00076,
1635 0x061bf401,
1636/* 0x09bf: i2c_put_byte_done */
1637 0xf80132f4,
1638/* 0x09c1: i2c_addr */
1639 0x0076bb00,
1552 0xf90465b6, 1640 0xf90465b6,
1553 0x04659450, 1641 0x04659450,
1554 0xbd0256bb, 1642 0xbd0256bb,
1555 0x0475fd50, 1643 0x0475fd50,
1556 0x21f550fc, 1644 0x21f550fc,
1557 0x64b608ba, 1645 0x64b607fd,
1558 0x0f11f404, 1646 0x2911f404,
1559 0xb00076bb, 1647 0x012ec3e7,
1560 0x1bf40136, 1648 0xfd0134b6,
1561 0x0132f406, 1649 0x76bb0553,
1562/* 0x09ac: i2c_put_byte_done */
1563/* 0x09ae: i2c_addr */
1564 0x76bb00f8,
1565 0x0465b600, 1650 0x0465b600,
1566 0x659450f9, 1651 0x659450f9,
1567 0x0256bb04, 1652 0x0256bb04,
1568 0x75fd50bd, 1653 0x75fd50bd,
1569 0xf550fc04, 1654 0xf550fc04,
1570 0xb607ea21, 1655 0xb6096621,
1571 0x11f40464, 1656/* 0x0a06: i2c_addr_done */
1572 0x2ec3e729, 1657 0x00f80464,
1573 0x0134b601, 1658/* 0x0a08: i2c_acquire_addr */
1574 0xbb0553fd, 1659 0xb6f8cec7,
1660 0xe0b702e4,
1661 0xee980d1c,
1662/* 0x0a17: i2c_acquire */
1663 0xf500f800,
1664 0xf40a0821,
1665 0xd9f00421,
1666 0x3f21f403,
1667/* 0x0a26: i2c_release */
1668 0x21f500f8,
1669 0x21f40a08,
1670 0x03daf004,
1671 0xf83f21f4,
1672/* 0x0a35: i2c_recv */
1673 0x0132f400,
1674 0xb6f8c1c7,
1675 0x16b00214,
1676 0x3a1ff528,
1677 0xf413a001,
1678 0x0032980c,
1679 0x0ccc13a0,
1680 0xf4003198,
1681 0xd0f90231,
1682 0xd0f9e0f9,
1683 0x000067f1,
1684 0x100063f1,
1685 0xbb016792,
1575 0x65b60076, 1686 0x65b60076,
1576 0x9450f904, 1687 0x9450f904,
1577 0x56bb0465, 1688 0x56bb0465,
1578 0xfd50bd02, 1689 0xfd50bd02,
1579 0x50fc0475, 1690 0x50fc0475,
1580 0x095321f5, 1691 0x0a1721f5,
1581/* 0x09f3: i2c_addr_done */ 1692 0xfc0464b6,
1582 0xf80464b6, 1693 0x00d6b0d0,
1583/* 0x09f5: i2c_acquire_addr */ 1694 0x00b31bf5,
1584 0xf8cec700, 1695 0xbb0057f0,
1585 0xb702e4b6,
1586 0x980c10e0,
1587 0x00f800ee,
1588/* 0x0a04: i2c_acquire */
1589 0x09f521f5,
1590 0xf00421f4,
1591 0x21f403d9,
1592/* 0x0a13: i2c_release */
1593 0xf500f83f,
1594 0xf409f521,
1595 0xdaf00421,
1596 0x3f21f403,
1597/* 0x0a22: i2c_recv */
1598 0x32f400f8,
1599 0xf8c1c701,
1600 0xb00214b6,
1601 0x1ff52816,
1602 0x13a0013a,
1603 0x32980be8,
1604 0xc013a000,
1605 0x0031980b,
1606 0xf90231f4,
1607 0xf9e0f9d0,
1608 0x0067f1d0,
1609 0x0063f100,
1610 0x01679210,
1611 0xb60076bb,
1612 0x50f90465,
1613 0xbb046594,
1614 0x50bd0256,
1615 0xfc0475fd,
1616 0x0421f550,
1617 0x0464b60a,
1618 0xd6b0d0fc,
1619 0xb31bf500,
1620 0x0057f000,
1621 0xb60076bb,
1622 0x50f90465,
1623 0xbb046594,
1624 0x50bd0256,
1625 0xfc0475fd,
1626 0xae21f550,
1627 0x0464b609,
1628 0x00d011f5,
1629 0xbbe0c5c7,
1630 0x65b60076, 1696 0x65b60076,
1631 0x9450f904, 1697 0x9450f904,
1632 0x56bb0465, 1698 0x56bb0465,
1633 0xfd50bd02, 1699 0xfd50bd02,
1634 0x50fc0475, 1700 0x50fc0475,
1635 0x095321f5, 1701 0x09c121f5,
1636 0xf50464b6, 1702 0xf50464b6,
1637 0xf000ad11, 1703 0xc700d011,
1638 0x76bb0157, 1704 0x76bbe0c5,
1639 0x0465b600, 1705 0x0465b600,
1640 0x659450f9, 1706 0x659450f9,
1641 0x0256bb04, 1707 0x0256bb04,
1642 0x75fd50bd, 1708 0x75fd50bd,
1643 0xf550fc04, 1709 0xf550fc04,
1644 0xb609ae21, 1710 0xb6096621,
1645 0x11f50464, 1711 0x11f50464,
1646 0x76bb008a, 1712 0x57f000ad,
1647 0x0465b600, 1713 0x0076bb01,
1648 0x659450f9, 1714 0xf90465b6,
1649 0x0256bb04, 1715 0x04659450,
1650 0x75fd50bd, 1716 0xbd0256bb,
1651 0xf550fc04, 1717 0x0475fd50,
1652 0xb6090121, 1718 0x21f550fc,
1653 0x11f40464, 1719 0x64b609c1,
1654 0xe05bcb6a, 1720 0x8a11f504,
1655 0xb60076bb, 1721 0x0076bb00,
1656 0x50f90465, 1722 0xf90465b6,
1657 0xbb046594, 1723 0x04659450,
1658 0x50bd0256, 1724 0xbd0256bb,
1659 0xfc0475fd, 1725 0x0475fd50,
1660 0x4621f550, 1726 0x21f550fc,
1661 0x0464b608, 1727 0x64b60914,
1662 0xbd025bb9, 1728 0x6a11f404,
1663 0x430ef474, 1729 0xbbe05bcb,
1664/* 0x0b28: i2c_recv_not_rd08 */ 1730 0x65b60076,
1665 0xf401d6b0, 1731 0x9450f904,
1666 0x57f03d1b, 1732 0x56bb0465,
1667 0xae21f500, 1733 0xfd50bd02,
1668 0x3311f409, 1734 0x50fc0475,
1669 0xf5e0c5c7, 1735 0x085921f5,
1670 0xf4095321, 1736 0xb90464b6,
1671 0x57f02911, 1737 0x74bd025b,
1672 0xae21f500, 1738/* 0x0b3b: i2c_recv_not_rd08 */
1673 0x1f11f409, 1739 0xb0430ef4,
1674 0xf5e0b5c7, 1740 0x1bf401d6,
1675 0xf4095321, 1741 0x0057f03d,
1676 0x21f51511, 1742 0x09c121f5,
1677 0x74bd0846, 1743 0xc73311f4,
1678 0xf408c5c7, 1744 0x21f5e0c5,
1679 0x32f4091b, 1745 0x11f40966,
1680 0x030ef402, 1746 0x0057f029,
1681/* 0x0b68: i2c_recv_not_wr08 */ 1747 0x09c121f5,
1682/* 0x0b68: i2c_recv_done */ 1748 0xc71f11f4,
1683 0xf5f8cec7, 1749 0x21f5e0b5,
1684 0xfc0a1321, 1750 0x11f40966,
1685 0xf4d0fce0, 1751 0x5921f515,
1686 0x7cb90a12, 1752 0xc774bd08,
1687 0x4221f502, 1753 0x1bf408c5,
1688/* 0x0b7d: i2c_recv_exit */ 1754 0x0232f409,
1689/* 0x0b7f: i2c_init */ 1755/* 0x0b7b: i2c_recv_not_wr08 */
1690 0xf800f803, 1756/* 0x0b7b: i2c_recv_done */
1691/* 0x0b81: test_recv */ 1757 0xc7030ef4,
1692 0xd817f100, 1758 0x21f5f8ce,
1693 0x0614b605, 1759 0xe0fc0a26,
1694 0xb60011cf, 1760 0x12f4d0fc,
1695 0x07f10110, 1761 0x027cb90a,
1696 0x04b605d8, 1762 0x034221f5,
1697 0x0001d006, 1763/* 0x0b90: i2c_recv_exit */
1698 0xe7f104bd, 1764/* 0x0b92: i2c_init */
1699 0xe3f1d900,
1700 0x21f5134f,
1701 0x00f80262,
1702/* 0x0ba8: test_init */
1703 0x0800e7f1,
1704 0x026221f5,
1705/* 0x0bb2: idle_recv */
1706 0x00f800f8, 1765 0x00f800f8,
1707/* 0x0bb4: idle */ 1766/* 0x0b94: test_recv */
1708 0xf10031f4, 1767 0x05d817f1,
1709 0xb605d417, 1768 0xcf0614b6,
1710 0x11cf0614, 1769 0x10b60011,
1711 0x0110b600, 1770 0xd807f101,
1712 0x05d407f1, 1771 0x0604b605,
1713 0xd00604b6, 1772 0xbd0001d0,
1714 0x04bd0001, 1773 0x00e7f104,
1715/* 0x0bd0: idle_loop */ 1774 0x4fe3f1d9,
1716 0xf45817f0, 1775 0x6221f513,
1717/* 0x0bd6: idle_proc */ 1776/* 0x0bbb: test_init */
1718/* 0x0bd6: idle_proc_exec */ 1777 0xf100f802,
1719 0x10f90232, 1778 0xf50800e7,
1720 0xf5021eb9, 1779 0xf8026221,
1721 0xfc034b21, 1780/* 0x0bc5: idle_recv */
1722 0x0911f410, 1781/* 0x0bc7: idle */
1723 0xf40231f4, 1782 0xf400f800,
1724/* 0x0bea: idle_proc_next */ 1783 0x17f10031,
1725 0x10b6ef0e, 1784 0x14b605d4,
1726 0x061fb858, 1785 0x0011cf06,
1727 0xf4e61bf4, 1786 0xf10110b6,
1728 0x28f4dd02, 1787 0xb605d407,
1729 0xbb0ef400, 1788 0x01d00604,
1789/* 0x0be3: idle_loop */
1790 0xf004bd00,
1791 0x32f45817,
1792/* 0x0be9: idle_proc */
1793/* 0x0be9: idle_proc_exec */
1794 0xb910f902,
1795 0x21f5021e,
1796 0x10fc034b,
1797 0xf40911f4,
1798 0x0ef40231,
1799/* 0x0bfd: idle_proc_next */
1800 0x5810b6ef,
1801 0xf4061fb8,
1802 0x02f4e61b,
1803 0x0028f4dd,
1804 0x00bb0ef4,
1805 0x00000000,
1806 0x00000000,
1807 0x00000000,
1808 0x00000000,
1809 0x00000000,
1810 0x00000000,
1811 0x00000000,
1812 0x00000000,
1813 0x00000000,
1814 0x00000000,
1815 0x00000000,
1816 0x00000000,
1817 0x00000000,
1818 0x00000000,
1819 0x00000000,
1820 0x00000000,
1821 0x00000000,
1822 0x00000000,
1823 0x00000000,
1824 0x00000000,
1825 0x00000000,
1826 0x00000000,
1827 0x00000000,
1828 0x00000000,
1829 0x00000000,
1830 0x00000000,
1831 0x00000000,
1832 0x00000000,
1833 0x00000000,
1834 0x00000000,
1835 0x00000000,
1836 0x00000000,
1837 0x00000000,
1838 0x00000000,
1839 0x00000000,
1840 0x00000000,
1841 0x00000000,
1842 0x00000000,
1843 0x00000000,
1844 0x00000000,
1845 0x00000000,
1846 0x00000000,
1847 0x00000000,
1848 0x00000000,
1849 0x00000000,
1850 0x00000000,
1851 0x00000000,
1852 0x00000000,
1853 0x00000000,
1854 0x00000000,
1855 0x00000000,
1856 0x00000000,
1857 0x00000000,
1858 0x00000000,
1859 0x00000000,
1860 0x00000000,
1861 0x00000000,
1862 0x00000000,
1863 0x00000000,
1730 0x00000000, 1864 0x00000000,
1731}; 1865};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
index 12d86f72ad10..7e16aab44d85 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
@@ -46,8 +46,8 @@ uint32_t nvd0_pwr_data[] = {
46 0x00000000, 46 0x00000000,
47 0x00000000, 47 0x00000000,
48 0x584d454d, 48 0x584d454d,
49 0x00000678, 49 0x0000068b,
50 0x0000066a, 50 0x0000067d,
51 0x00000000, 51 0x00000000,
52 0x00000000, 52 0x00000000,
53 0x00000000, 53 0x00000000,
@@ -68,8 +68,8 @@ uint32_t nvd0_pwr_data[] = {
68 0x00000000, 68 0x00000000,
69 0x00000000, 69 0x00000000,
70 0x46524550, 70 0x46524550,
71 0x0000067c, 71 0x0000068f,
72 0x0000067a, 72 0x0000068d,
73 0x00000000, 73 0x00000000,
74 0x00000000, 74 0x00000000,
75 0x00000000, 75 0x00000000,
@@ -90,8 +90,8 @@ uint32_t nvd0_pwr_data[] = {
90 0x00000000, 90 0x00000000,
91 0x00000000, 91 0x00000000,
92 0x5f433249, 92 0x5f433249,
93 0x00000a97, 93 0x00000aaa,
94 0x0000093a, 94 0x0000094d,
95 0x00000000, 95 0x00000000,
96 0x00000000, 96 0x00000000,
97 0x00000000, 97 0x00000000,
@@ -112,8 +112,8 @@ uint32_t nvd0_pwr_data[] = {
112 0x00000000, 112 0x00000000,
113 0x00000000, 113 0x00000000,
114 0x54534554, 114 0x54534554,
115 0x00000aba, 115 0x00000acd,
116 0x00000a99, 116 0x00000aac,
117 0x00000000, 117 0x00000000,
118 0x00000000, 118 0x00000000,
119 0x00000000, 119 0x00000000,
@@ -134,8 +134,8 @@ uint32_t nvd0_pwr_data[] = {
134 0x00000000, 134 0x00000000,
135 0x00000000, 135 0x00000000,
136 0x454c4449, 136 0x454c4449,
137 0x00000ac6, 137 0x00000ad9,
138 0x00000ac4, 138 0x00000ad7,
139 0x00000000, 139 0x00000000,
140 0x00000000, 140 0x00000000,
141 0x00000000, 141 0x00000000,
@@ -246,13 +246,15 @@ uint32_t nvd0_pwr_data[] = {
246 0x00010006, 246 0x00010006,
247 0x00000000, 247 0x00000000,
248 0x000005d3, 248 0x000005d3,
249/* 0x03b8: memx_func_tail */ 249 0x00000007,
250/* 0x03b8: memx_ts_start */
251 0x00000000, 250 0x00000000,
252/* 0x03bc: memx_ts_end */ 251 0x00000619,
252/* 0x03c4: memx_func_tail */
253/* 0x03c4: memx_ts_start */
253 0x00000000, 254 0x00000000,
254/* 0x03c0: memx_data_head */ 255/* 0x03c8: memx_ts_end */
255 0x00000000, 256 0x00000000,
257/* 0x03cc: memx_data_head */
256 0x00000000, 258 0x00000000,
257 0x00000000, 259 0x00000000,
258 0x00000000, 260 0x00000000,
@@ -764,8 +766,75 @@ uint32_t nvd0_pwr_data[] = {
764 0x00000000, 766 0x00000000,
765 0x00000000, 767 0x00000000,
766 0x00000000, 768 0x00000000,
767/* 0x0bc0: memx_data_tail */ 769 0x00000000,
768/* 0x0bc0: i2c_scl_map */ 770/* 0x0bcc: memx_data_tail */
771/* 0x0bcc: memx_train_head */
772 0x00000000,
773 0x00000000,
774 0x00000000,
775 0x00000000,
776 0x00000000,
777 0x00000000,
778 0x00000000,
779 0x00000000,
780 0x00000000,
781 0x00000000,
782 0x00000000,
783 0x00000000,
784 0x00000000,
785 0x00000000,
786 0x00000000,
787 0x00000000,
788 0x00000000,
789 0x00000000,
790 0x00000000,
791 0x00000000,
792 0x00000000,
793 0x00000000,
794 0x00000000,
795 0x00000000,
796 0x00000000,
797 0x00000000,
798 0x00000000,
799 0x00000000,
800 0x00000000,
801 0x00000000,
802 0x00000000,
803 0x00000000,
804 0x00000000,
805 0x00000000,
806 0x00000000,
807 0x00000000,
808 0x00000000,
809 0x00000000,
810 0x00000000,
811 0x00000000,
812 0x00000000,
813 0x00000000,
814 0x00000000,
815 0x00000000,
816 0x00000000,
817 0x00000000,
818 0x00000000,
819 0x00000000,
820 0x00000000,
821 0x00000000,
822 0x00000000,
823 0x00000000,
824 0x00000000,
825 0x00000000,
826 0x00000000,
827 0x00000000,
828 0x00000000,
829 0x00000000,
830 0x00000000,
831 0x00000000,
832 0x00000000,
833 0x00000000,
834 0x00000000,
835 0x00000000,
836/* 0x0ccc: memx_train_tail */
837/* 0x0ccc: i2c_scl_map */
769 0x00000400, 838 0x00000400,
770 0x00000800, 839 0x00000800,
771 0x00001000, 840 0x00001000,
@@ -776,7 +845,7 @@ uint32_t nvd0_pwr_data[] = {
776 0x00020000, 845 0x00020000,
777 0x00040000, 846 0x00040000,
778 0x00080000, 847 0x00080000,
779/* 0x0be8: i2c_sda_map */ 848/* 0x0cf4: i2c_sda_map */
780 0x00100000, 849 0x00100000,
781 0x00200000, 850 0x00200000,
782 0x00400000, 851 0x00400000,
@@ -844,9 +913,6 @@ uint32_t nvd0_pwr_data[] = {
844 0x00000000, 913 0x00000000,
845 0x00000000, 914 0x00000000,
846 0x00000000, 915 0x00000000,
847 0x00000000,
848 0x00000000,
849 0x00000000,
850}; 916};
851 917
852uint32_t nvd0_pwr_code[] = { 918uint32_t nvd0_pwr_code[] = {
@@ -1236,11 +1302,11 @@ uint32_t nvd0_pwr_code[] = {
1236 0x0bf40464, 1302 0x0bf40464,
1237 0x2c67f0f6, 1303 0x2c67f0f6,
1238 0x800066cf, 1304 0x800066cf,
1239 0x00f8ee06, 1305 0x00f8f106,
1240/* 0x0554: memx_func_leave */ 1306/* 0x0554: memx_func_leave */
1241 0xcf2c67f0, 1307 0xcf2c67f0,
1242 0x06800066, 1308 0x06800066,
1243 0x0467f0ef, 1309 0x0467f0f2,
1244 0x07e407f1, 1310 0x07e407f1,
1245 0xbd0006d0, 1311 0xbd0006d0,
1246/* 0x0569: memx_func_leave_wait */ 1312/* 0x0569: memx_func_leave_wait */
@@ -1292,379 +1358,383 @@ uint32_t nvd0_pwr_code[] = {
1292 0x1e9800f8, 1358 0x1e9800f8,
1293 0x0410b600, 1359 0x0410b600,
1294 0xf86721f4, 1360 0xf86721f4,
1295/* 0x0619: memx_exec */ 1361/* 0x0619: memx_func_train */
1296 0xf9e0f900, 1362/* 0x061b: memx_exec */
1297 0x02c1b9d0, 1363 0xf900f800,
1298/* 0x0623: memx_exec_next */ 1364 0xb9d0f9e0,
1299 0x9802b2b9, 1365 0xb2b902c1,
1300 0x10b60013, 1366/* 0x0625: memx_exec_next */
1301 0xf034e704, 1367 0x00139802,
1302 0xe033e701, 1368 0xe70410b6,
1303 0x0132b601, 1369 0xe701f034,
1304 0x980c30f0, 1370 0xb601e033,
1305 0x55f9de35, 1371 0x30f00132,
1306 0xf40612b8, 1372 0xde35980c,
1307 0x0b98e41e, 1373 0x12b855f9,
1308 0xef0c98ee, 1374 0xe41ef406,
1309 0xf102cbbb, 1375 0x98f10b98,
1310 0xcf07c4b7, 1376 0xcbbbf20c,
1311 0xd0fc00bb, 1377 0xc4b7f102,
1312 0x21f5e0fc, 1378 0x00bbcf07,
1313 0x00f802f1, 1379 0xe0fcd0fc,
1314/* 0x065c: memx_info */
1315 0x03c0c7f1,
1316 0x0800b7f1,
1317 0x02f121f5, 1380 0x02f121f5,
1318/* 0x066a: memx_recv */ 1381/* 0x065e: memx_info */
1319 0xd6b000f8, 1382 0xc67000f8,
1320 0xac0bf401, 1383 0x0e0bf401,
1321 0xf400d6b0, 1384/* 0x0664: memx_info_data */
1322 0x00f8e90b, 1385 0x03ccc7f1,
1323/* 0x0678: memx_init */ 1386 0x0800b7f1,
1324/* 0x067a: perf_recv */ 1387/* 0x066f: memx_info_train */
1325 0x00f800f8, 1388 0xf10b0ef4,
1326/* 0x067c: perf_init */ 1389 0xf10bccc7,
1327/* 0x067e: i2c_drive_scl */ 1390/* 0x0677: memx_info_send */
1328 0x36b000f8, 1391 0xf50100b7,
1329 0x0e0bf400, 1392 0xf802f121,
1330 0x07e007f1, 1393/* 0x067d: memx_recv */
1331 0xbd0001d0, 1394 0x01d6b000,
1332/* 0x068f: i2c_drive_scl_lo */ 1395 0xb09b0bf4,
1333 0xf100f804, 1396 0x0bf400d6,
1334 0xd007e407, 1397/* 0x068b: memx_init */
1398 0xf800f8d8,
1399/* 0x068d: perf_recv */
1400/* 0x068f: perf_init */
1401 0xf800f800,
1402/* 0x0691: i2c_drive_scl */
1403 0x0036b000,
1404 0xf10e0bf4,
1405 0xd007e007,
1335 0x04bd0001, 1406 0x04bd0001,
1336/* 0x069a: i2c_drive_sda */ 1407/* 0x06a2: i2c_drive_scl_lo */
1337 0x36b000f8, 1408 0x07f100f8,
1338 0x0e0bf400, 1409 0x01d007e4,
1339 0x07e007f1, 1410 0xf804bd00,
1340 0xbd0002d0, 1411/* 0x06ad: i2c_drive_sda */
1341/* 0x06ab: i2c_drive_sda_lo */ 1412 0x0036b000,
1342 0xf100f804, 1413 0xf10e0bf4,
1343 0xd007e407, 1414 0xd007e007,
1344 0x04bd0002, 1415 0x04bd0002,
1345/* 0x06b6: i2c_sense_scl */ 1416/* 0x06be: i2c_drive_sda_lo */
1417 0x07f100f8,
1418 0x02d007e4,
1419 0xf804bd00,
1420/* 0x06c9: i2c_sense_scl */
1421 0x0132f400,
1422 0x07c437f1,
1423 0xfd0033cf,
1424 0x0bf40431,
1425 0x0131f406,
1426/* 0x06dc: i2c_sense_scl_done */
1427/* 0x06de: i2c_sense_sda */
1346 0x32f400f8, 1428 0x32f400f8,
1347 0xc437f101, 1429 0xc437f101,
1348 0x0033cf07, 1430 0x0033cf07,
1349 0xf40431fd, 1431 0xf40432fd,
1350 0x31f4060b, 1432 0x31f4060b,
1351/* 0x06c9: i2c_sense_scl_done */ 1433/* 0x06f1: i2c_sense_sda_done */
1352/* 0x06cb: i2c_sense_sda */ 1434/* 0x06f3: i2c_raise_scl */
1353 0xf400f801, 1435 0xf900f801,
1354 0x37f10132, 1436 0x9847f140,
1355 0x33cf07c4, 1437 0x0137f008,
1356 0x0432fd00, 1438 0x069121f5,
1357 0xf4060bf4, 1439/* 0x0700: i2c_raise_scl_wait */
1358/* 0x06de: i2c_sense_sda_done */
1359 0x00f80131,
1360/* 0x06e0: i2c_raise_scl */
1361 0x47f140f9,
1362 0x37f00898,
1363 0x7e21f501,
1364/* 0x06ed: i2c_raise_scl_wait */
1365 0xe8e7f106,
1366 0x6721f403,
1367 0x06b621f5,
1368 0xb60901f4,
1369 0x1bf40142,
1370/* 0x0701: i2c_raise_scl_done */
1371 0xf840fcef,
1372/* 0x0705: i2c_start */
1373 0xb621f500,
1374 0x0d11f406,
1375 0x06cb21f5,
1376 0xf40611f4,
1377/* 0x0716: i2c_start_rep */
1378 0x37f0300e,
1379 0x7e21f500,
1380 0x0137f006,
1381 0x069a21f5,
1382 0xb60076bb,
1383 0x50f90465,
1384 0xbb046594,
1385 0x50bd0256,
1386 0xfc0475fd,
1387 0xe021f550,
1388 0x0464b606,
1389/* 0x0743: i2c_start_send */
1390 0xf01f11f4,
1391 0x21f50037,
1392 0xe7f1069a,
1393 0x21f41388,
1394 0x0037f067,
1395 0x067e21f5,
1396 0x1388e7f1,
1397/* 0x075f: i2c_start_out */
1398 0xf86721f4,
1399/* 0x0761: i2c_stop */
1400 0x0037f000,
1401 0x067e21f5,
1402 0xf50037f0,
1403 0xf1069a21,
1404 0xf403e8e7,
1405 0x37f06721,
1406 0x7e21f501,
1407 0x88e7f106,
1408 0x6721f413,
1409 0xf50137f0,
1410 0xf1069a21,
1411 0xf41388e7,
1412 0x00f86721,
1413/* 0x0794: i2c_bitw */
1414 0x069a21f5,
1415 0x03e8e7f1, 1440 0x03e8e7f1,
1416 0xbb6721f4, 1441 0xf56721f4,
1417 0x65b60076, 1442 0xf406c921,
1418 0x9450f904, 1443 0x42b60901,
1419 0x56bb0465, 1444 0xef1bf401,
1420 0xfd50bd02, 1445/* 0x0714: i2c_raise_scl_done */
1421 0x50fc0475, 1446 0x00f840fc,
1422 0x06e021f5, 1447/* 0x0718: i2c_start */
1423 0xf40464b6, 1448 0x06c921f5,
1424 0xe7f11811, 1449 0xf50d11f4,
1425 0x21f41388, 1450 0xf406de21,
1426 0x0037f067, 1451 0x0ef40611,
1427 0x067e21f5, 1452/* 0x0729: i2c_start_rep */
1428 0x1388e7f1, 1453 0x0037f030,
1429/* 0x07d3: i2c_bitw_out */ 1454 0x069121f5,
1430 0xf86721f4, 1455 0xf50137f0,
1431/* 0x07d5: i2c_bitr */ 1456 0xbb06ad21,
1432 0x0137f000,
1433 0x069a21f5,
1434 0x03e8e7f1,
1435 0xbb6721f4,
1436 0x65b60076, 1457 0x65b60076,
1437 0x9450f904, 1458 0x9450f904,
1438 0x56bb0465, 1459 0x56bb0465,
1439 0xfd50bd02, 1460 0xfd50bd02,
1440 0x50fc0475, 1461 0x50fc0475,
1441 0x06e021f5, 1462 0x06f321f5,
1442 0xf40464b6, 1463 0xf40464b6,
1443 0x21f51b11, 1464/* 0x0756: i2c_start_send */
1444 0x37f006cb, 1465 0x37f01f11,
1445 0x7e21f500, 1466 0xad21f500,
1446 0x88e7f106, 1467 0x88e7f106,
1447 0x6721f413, 1468 0x6721f413,
1448 0xf4013cf0, 1469 0xf50037f0,
1449/* 0x081a: i2c_bitr_done */ 1470 0xf1069121,
1450 0x00f80131, 1471 0xf41388e7,
1451/* 0x081c: i2c_get_byte */ 1472/* 0x0772: i2c_start_out */
1452 0xf00057f0, 1473 0x00f86721,
1453/* 0x0822: i2c_get_byte_next */ 1474/* 0x0774: i2c_stop */
1454 0x54b60847, 1475 0xf50037f0,
1455 0x0076bb01, 1476 0xf0069121,
1456 0xf90465b6, 1477 0x21f50037,
1457 0x04659450, 1478 0xe7f106ad,
1458 0xbd0256bb, 1479 0x21f403e8,
1459 0x0475fd50, 1480 0x0137f067,
1460 0x21f550fc, 1481 0x069121f5,
1461 0x64b607d5, 1482 0x1388e7f1,
1462 0x2b11f404, 1483 0xf06721f4,
1463 0xb60553fd, 1484 0x21f50137,
1464 0x1bf40142, 1485 0xe7f106ad,
1465 0x0137f0d8, 1486 0x21f41388,
1487/* 0x07a7: i2c_bitw */
1488 0xf500f867,
1489 0xf106ad21,
1490 0xf403e8e7,
1491 0x76bb6721,
1492 0x0465b600,
1493 0x659450f9,
1494 0x0256bb04,
1495 0x75fd50bd,
1496 0xf550fc04,
1497 0xb606f321,
1498 0x11f40464,
1499 0x88e7f118,
1500 0x6721f413,
1501 0xf50037f0,
1502 0xf1069121,
1503 0xf41388e7,
1504/* 0x07e6: i2c_bitw_out */
1505 0x00f86721,
1506/* 0x07e8: i2c_bitr */
1507 0xf50137f0,
1508 0xf106ad21,
1509 0xf403e8e7,
1510 0x76bb6721,
1511 0x0465b600,
1512 0x659450f9,
1513 0x0256bb04,
1514 0x75fd50bd,
1515 0xf550fc04,
1516 0xb606f321,
1517 0x11f40464,
1518 0xde21f51b,
1519 0x0037f006,
1520 0x069121f5,
1521 0x1388e7f1,
1522 0xf06721f4,
1523 0x31f4013c,
1524/* 0x082d: i2c_bitr_done */
1525/* 0x082f: i2c_get_byte */
1526 0xf000f801,
1527 0x47f00057,
1528/* 0x0835: i2c_get_byte_next */
1529 0x0154b608,
1466 0xb60076bb, 1530 0xb60076bb,
1467 0x50f90465, 1531 0x50f90465,
1468 0xbb046594, 1532 0xbb046594,
1469 0x50bd0256, 1533 0x50bd0256,
1470 0xfc0475fd, 1534 0xfc0475fd,
1471 0x9421f550, 1535 0xe821f550,
1472 0x0464b607, 1536 0x0464b607,
1473/* 0x086c: i2c_get_byte_done */ 1537 0xfd2b11f4,
1474/* 0x086e: i2c_put_byte */ 1538 0x42b60553,
1475 0x47f000f8, 1539 0xd81bf401,
1476/* 0x0871: i2c_put_byte_next */ 1540 0xbb0137f0,
1477 0x0142b608, 1541 0x65b60076,
1478 0xbb3854ff, 1542 0x9450f904,
1543 0x56bb0465,
1544 0xfd50bd02,
1545 0x50fc0475,
1546 0x07a721f5,
1547/* 0x087f: i2c_get_byte_done */
1548 0xf80464b6,
1549/* 0x0881: i2c_put_byte */
1550 0x0847f000,
1551/* 0x0884: i2c_put_byte_next */
1552 0xff0142b6,
1553 0x76bb3854,
1554 0x0465b600,
1555 0x659450f9,
1556 0x0256bb04,
1557 0x75fd50bd,
1558 0xf550fc04,
1559 0xb607a721,
1560 0x11f40464,
1561 0x0046b034,
1562 0xbbd81bf4,
1479 0x65b60076, 1563 0x65b60076,
1480 0x9450f904, 1564 0x9450f904,
1481 0x56bb0465, 1565 0x56bb0465,
1482 0xfd50bd02, 1566 0xfd50bd02,
1483 0x50fc0475, 1567 0x50fc0475,
1484 0x079421f5, 1568 0x07e821f5,
1485 0xf40464b6, 1569 0xf40464b6,
1486 0x46b03411, 1570 0x76bb0f11,
1487 0xd81bf400, 1571 0x0136b000,
1572 0xf4061bf4,
1573/* 0x08da: i2c_put_byte_done */
1574 0x00f80132,
1575/* 0x08dc: i2c_addr */
1488 0xb60076bb, 1576 0xb60076bb,
1489 0x50f90465, 1577 0x50f90465,
1490 0xbb046594, 1578 0xbb046594,
1491 0x50bd0256, 1579 0x50bd0256,
1492 0xfc0475fd, 1580 0xfc0475fd,
1493 0xd521f550, 1581 0x1821f550,
1494 0x0464b607, 1582 0x0464b607,
1495 0xbb0f11f4, 1583 0xe72911f4,
1496 0x36b00076, 1584 0xb6012ec3,
1497 0x061bf401, 1585 0x53fd0134,
1498/* 0x08c7: i2c_put_byte_done */ 1586 0x0076bb05,
1499 0xf80132f4,
1500/* 0x08c9: i2c_addr */
1501 0x0076bb00,
1502 0xf90465b6, 1587 0xf90465b6,
1503 0x04659450, 1588 0x04659450,
1504 0xbd0256bb, 1589 0xbd0256bb,
1505 0x0475fd50, 1590 0x0475fd50,
1506 0x21f550fc, 1591 0x21f550fc,
1507 0x64b60705, 1592 0x64b60881,
1508 0x2911f404, 1593/* 0x0921: i2c_addr_done */
1509 0x012ec3e7, 1594/* 0x0923: i2c_acquire_addr */
1510 0xfd0134b6, 1595 0xc700f804,
1511 0x76bb0553, 1596 0xe4b6f8ce,
1512 0x0465b600, 1597 0x14e0b705,
1513 0x659450f9, 1598/* 0x092f: i2c_acquire */
1514 0x0256bb04, 1599 0xf500f8d0,
1515 0x75fd50bd, 1600 0xf4092321,
1516 0xf550fc04, 1601 0xd9f00421,
1517 0xb6086e21,
1518/* 0x090e: i2c_addr_done */
1519 0x00f80464,
1520/* 0x0910: i2c_acquire_addr */
1521 0xb6f8cec7,
1522 0xe0b705e4,
1523 0x00f8d014,
1524/* 0x091c: i2c_acquire */
1525 0x091021f5,
1526 0xf00421f4,
1527 0x21f403d9,
1528/* 0x092b: i2c_release */
1529 0xf500f833,
1530 0xf4091021,
1531 0xdaf00421,
1532 0x3321f403, 1602 0x3321f403,
1533/* 0x093a: i2c_recv */ 1603/* 0x093e: i2c_release */
1534 0x32f400f8, 1604 0x21f500f8,
1535 0xf8c1c701, 1605 0x21f40923,
1536 0xb00214b6, 1606 0x03daf004,
1537 0x1ff52816, 1607 0xf83321f4,
1538 0x13a0013a, 1608/* 0x094d: i2c_recv */
1539 0x32980be8, 1609 0x0132f400,
1540 0xc013a000, 1610 0xb6f8c1c7,
1541 0x0031980b, 1611 0x16b00214,
1542 0xf90231f4, 1612 0x3a1ff528,
1543 0xf9e0f9d0, 1613 0xf413a001,
1544 0x0067f1d0, 1614 0x0032980c,
1545 0x0063f100, 1615 0x0ccc13a0,
1546 0x01679210, 1616 0xf4003198,
1547 0xb60076bb, 1617 0xd0f90231,
1548 0x50f90465, 1618 0xd0f9e0f9,
1549 0xbb046594, 1619 0x000067f1,
1550 0x50bd0256, 1620 0x100063f1,
1551 0xfc0475fd, 1621 0xbb016792,
1552 0x1c21f550,
1553 0x0464b609,
1554 0xd6b0d0fc,
1555 0xb31bf500,
1556 0x0057f000,
1557 0xb60076bb,
1558 0x50f90465,
1559 0xbb046594,
1560 0x50bd0256,
1561 0xfc0475fd,
1562 0xc921f550,
1563 0x0464b608,
1564 0x00d011f5,
1565 0xbbe0c5c7,
1566 0x65b60076, 1622 0x65b60076,
1567 0x9450f904, 1623 0x9450f904,
1568 0x56bb0465, 1624 0x56bb0465,
1569 0xfd50bd02, 1625 0xfd50bd02,
1570 0x50fc0475, 1626 0x50fc0475,
1571 0x086e21f5, 1627 0x092f21f5,
1628 0xfc0464b6,
1629 0x00d6b0d0,
1630 0x00b31bf5,
1631 0xbb0057f0,
1632 0x65b60076,
1633 0x9450f904,
1634 0x56bb0465,
1635 0xfd50bd02,
1636 0x50fc0475,
1637 0x08dc21f5,
1572 0xf50464b6, 1638 0xf50464b6,
1573 0xf000ad11, 1639 0xc700d011,
1574 0x76bb0157, 1640 0x76bbe0c5,
1575 0x0465b600, 1641 0x0465b600,
1576 0x659450f9, 1642 0x659450f9,
1577 0x0256bb04, 1643 0x0256bb04,
1578 0x75fd50bd, 1644 0x75fd50bd,
1579 0xf550fc04, 1645 0xf550fc04,
1580 0xb608c921, 1646 0xb6088121,
1581 0x11f50464, 1647 0x11f50464,
1582 0x76bb008a, 1648 0x57f000ad,
1583 0x0465b600, 1649 0x0076bb01,
1584 0x659450f9, 1650 0xf90465b6,
1585 0x0256bb04, 1651 0x04659450,
1586 0x75fd50bd, 1652 0xbd0256bb,
1587 0xf550fc04, 1653 0x0475fd50,
1588 0xb6081c21, 1654 0x21f550fc,
1589 0x11f40464, 1655 0x64b608dc,
1590 0xe05bcb6a, 1656 0x8a11f504,
1591 0xb60076bb, 1657 0x0076bb00,
1592 0x50f90465, 1658 0xf90465b6,
1593 0xbb046594, 1659 0x04659450,
1594 0x50bd0256, 1660 0xbd0256bb,
1595 0xfc0475fd, 1661 0x0475fd50,
1596 0x6121f550, 1662 0x21f550fc,
1597 0x0464b607, 1663 0x64b6082f,
1598 0xbd025bb9, 1664 0x6a11f404,
1599 0x430ef474, 1665 0xbbe05bcb,
1600/* 0x0a40: i2c_recv_not_rd08 */ 1666 0x65b60076,
1601 0xf401d6b0, 1667 0x9450f904,
1602 0x57f03d1b, 1668 0x56bb0465,
1603 0xc921f500, 1669 0xfd50bd02,
1604 0x3311f408, 1670 0x50fc0475,
1605 0xf5e0c5c7, 1671 0x077421f5,
1606 0xf4086e21, 1672 0xb90464b6,
1607 0x57f02911, 1673 0x74bd025b,
1608 0xc921f500, 1674/* 0x0a53: i2c_recv_not_rd08 */
1609 0x1f11f408, 1675 0xb0430ef4,
1610 0xf5e0b5c7, 1676 0x1bf401d6,
1611 0xf4086e21, 1677 0x0057f03d,
1612 0x21f51511, 1678 0x08dc21f5,
1613 0x74bd0761, 1679 0xc73311f4,
1614 0xf408c5c7, 1680 0x21f5e0c5,
1615 0x32f4091b, 1681 0x11f40881,
1616 0x030ef402, 1682 0x0057f029,
1617/* 0x0a80: i2c_recv_not_wr08 */ 1683 0x08dc21f5,
1618/* 0x0a80: i2c_recv_done */ 1684 0xc71f11f4,
1619 0xf5f8cec7, 1685 0x21f5e0b5,
1620 0xfc092b21, 1686 0x11f40881,
1621 0xf4d0fce0, 1687 0x7421f515,
1622 0x7cb90a12, 1688 0xc774bd07,
1623 0xf121f502, 1689 0x1bf408c5,
1624/* 0x0a95: i2c_recv_exit */ 1690 0x0232f409,
1625/* 0x0a97: i2c_init */ 1691/* 0x0a93: i2c_recv_not_wr08 */
1692/* 0x0a93: i2c_recv_done */
1693 0xc7030ef4,
1694 0x21f5f8ce,
1695 0xe0fc093e,
1696 0x12f4d0fc,
1697 0x027cb90a,
1698 0x02f121f5,
1699/* 0x0aa8: i2c_recv_exit */
1700/* 0x0aaa: i2c_init */
1701 0x00f800f8,
1702/* 0x0aac: test_recv */
1703 0x05d817f1,
1704 0xb60011cf,
1705 0x07f10110,
1706 0x01d005d8,
1707 0xf104bd00,
1708 0xf1d900e7,
1709 0xf5134fe3,
1710 0xf8022321,
1711/* 0x0acd: test_init */
1712 0x00e7f100,
1713 0x2321f508,
1714/* 0x0ad7: idle_recv */
1626 0xf800f802, 1715 0xf800f802,
1627/* 0x0a99: test_recv */ 1716/* 0x0ad9: idle */
1628 0xd817f100, 1717 0x0031f400,
1629 0x0011cf05, 1718 0x05d417f1,
1630 0xf10110b6, 1719 0xb60011cf,
1631 0xd005d807, 1720 0x07f10110,
1632 0x04bd0001, 1721 0x01d005d4,
1633 0xd900e7f1, 1722/* 0x0aef: idle_loop */
1634 0x134fe3f1, 1723 0xf004bd00,
1635 0x022321f5, 1724 0x32f45817,
1636/* 0x0aba: test_init */ 1725/* 0x0af5: idle_proc */
1637 0xe7f100f8, 1726/* 0x0af5: idle_proc_exec */
1638 0x21f50800, 1727 0xb910f902,
1639 0x00f80223, 1728 0x21f5021e,
1640/* 0x0ac4: idle_recv */ 1729 0x10fc02fa,
1641/* 0x0ac6: idle */ 1730 0xf40911f4,
1642 0x31f400f8, 1731 0x0ef40231,
1643 0xd417f100, 1732/* 0x0b09: idle_proc_next */
1644 0x0011cf05, 1733 0x5810b6ef,
1645 0xf10110b6, 1734 0xf4061fb8,
1646 0xd005d407, 1735 0x02f4e61b,
1647 0x04bd0001, 1736 0x0028f4dd,
1648/* 0x0adc: idle_loop */ 1737 0x00c10ef4,
1649 0xf45817f0,
1650/* 0x0ae2: idle_proc */
1651/* 0x0ae2: idle_proc_exec */
1652 0x10f90232,
1653 0xf5021eb9,
1654 0xfc02fa21,
1655 0x0911f410,
1656 0xf40231f4,
1657/* 0x0af6: idle_proc_next */
1658 0x10b6ef0e,
1659 0x061fb858,
1660 0xf4e61bf4,
1661 0x28f4dd02,
1662 0xc10ef400,
1663 0x00000000,
1664 0x00000000,
1665 0x00000000,
1666 0x00000000,
1667 0x00000000,
1668 0x00000000, 1738 0x00000000,
1669 0x00000000, 1739 0x00000000,
1670 0x00000000, 1740 0x00000000,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
index 522e3079f824..c8b06cb77e72 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/os.h
@@ -18,6 +18,10 @@
18#define MEMX_MSG_INFO 0 18#define MEMX_MSG_INFO 0
19#define MEMX_MSG_EXEC 1 19#define MEMX_MSG_EXEC 1
20 20
21/* MEMX: info types */
22#define MEMX_INFO_DATA 0
23#define MEMX_INFO_TRAIN 1
24
21/* MEMX: script opcode definitions */ 25/* MEMX: script opcode definitions */
22#define MEMX_ENTER 1 26#define MEMX_ENTER 1
23#define MEMX_LEAVE 2 27#define MEMX_LEAVE 2
@@ -25,6 +29,7 @@
25#define MEMX_WAIT 4 29#define MEMX_WAIT 4
26#define MEMX_DELAY 5 30#define MEMX_DELAY 5
27#define MEMX_VBLANK 6 31#define MEMX_VBLANK 6
32#define MEMX_TRAIN 7
28 33
29/* I2C_: message identifiers */ 34/* I2C_: message identifiers */
30#define I2C__MSG_RD08 0 35#define I2C__MSG_RD08 0
diff --git a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
index 65eaa2546cad..7a9299d7159f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/pwr/memx.c
@@ -47,7 +47,8 @@ nouveau_memx_init(struct nouveau_pwr *ppwr, struct nouveau_memx **pmemx)
47 u32 reply[2]; 47 u32 reply[2];
48 int ret; 48 int ret;
49 49
50 ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO, 0, 0); 50 ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
51 MEMX_INFO_DATA, 0);
51 if (ret) 52 if (ret)
52 return ret; 53 return ret;
53 54
@@ -106,7 +107,7 @@ nouveau_memx_wait(struct nouveau_memx *memx,
106{ 107{
107 nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n", 108 nv_debug(memx->ppwr, "R[%06x] & 0x%08x == 0x%08x, %d us\n",
108 addr, mask, data, nsec); 109 addr, mask, data, nsec);
109 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, ~mask, data, nsec }); 110 memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec });
110 memx_out(memx); /* fuc can't handle multiple */ 111 memx_out(memx); /* fuc can't handle multiple */
111} 112}
112 113
@@ -152,6 +153,38 @@ nouveau_memx_wait_vblank(struct nouveau_memx *memx)
152} 153}
153 154
154void 155void
156nouveau_memx_train(struct nouveau_memx *memx)
157{
158 nv_debug(memx->ppwr, " MEM TRAIN\n");
159 memx_cmd(memx, MEMX_TRAIN, 0, NULL);
160}
161
162int
163nouveau_memx_train_result(struct nouveau_pwr *ppwr, u32 *res, int rsize)
164{
165 u32 reply[2], base, size, i;
166 int ret;
167
168 ret = ppwr->message(ppwr, reply, PROC_MEMX, MEMX_MSG_INFO,
169 MEMX_INFO_TRAIN, 0);
170 if (ret)
171 return ret;
172
173 base = reply[0];
174 size = reply[1] >> 2;
175 if (size > rsize)
176 return -ENOMEM;
177
178 /* read the packet */
179 nv_wr32(ppwr, 0x10a1c0, 0x02000000 | base);
180
181 for (i = 0; i < size; i++)
182 res[i] = nv_rd32(ppwr, 0x10a1c4);
183
184 return 0;
185}
186
187void
155nouveau_memx_block(struct nouveau_memx *memx) 188nouveau_memx_block(struct nouveau_memx *memx)
156{ 189{
157 nv_debug(memx->ppwr, " HOST BLOCKED\n"); 190 nv_debug(memx->ppwr, " HOST BLOCKED\n");
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
index 32794a999106..26ccd8df193f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/base.c
@@ -101,6 +101,41 @@ nouveau_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
101 return ret; 101 return ret;
102} 102}
103 103
104static void nouveau_volt_parse_bios(struct nouveau_bios *bios,
105 struct nouveau_volt *volt)
106{
107 struct nvbios_volt_entry ivid;
108 struct nvbios_volt info;
109 u8 ver, hdr, cnt, len;
110 u16 data;
111 int i;
112
113 data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info);
114 if (data && info.vidmask && info.base && info.step) {
115 for (i = 0; i < info.vidmask + 1; i++) {
116 if (info.base >= info.min &&
117 info.base <= info.max) {
118 volt->vid[volt->vid_nr].uv = info.base;
119 volt->vid[volt->vid_nr].vid = i;
120 volt->vid_nr++;
121 }
122 info.base += info.step;
123 }
124 volt->vid_mask = info.vidmask;
125 } else if (data && info.vidmask) {
126 for (i = 0; i < cnt; i++) {
127 data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
128 &ivid);
129 if (data) {
130 volt->vid[volt->vid_nr].uv = ivid.voltage;
131 volt->vid[volt->vid_nr].vid = ivid.vid;
132 volt->vid_nr++;
133 }
134 }
135 volt->vid_mask = info.vidmask;
136 }
137}
138
104int 139int
105_nouveau_volt_init(struct nouveau_object *object) 140_nouveau_volt_init(struct nouveau_object *object)
106{ 141{
@@ -136,10 +171,6 @@ nouveau_volt_create_(struct nouveau_object *parent,
136{ 171{
137 struct nouveau_bios *bios = nouveau_bios(parent); 172 struct nouveau_bios *bios = nouveau_bios(parent);
138 struct nouveau_volt *volt; 173 struct nouveau_volt *volt;
139 struct nvbios_volt_entry ivid;
140 struct nvbios_volt info;
141 u8 ver, hdr, cnt, len;
142 u16 data;
143 int ret, i; 174 int ret, i;
144 175
145 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT", 176 ret = nouveau_subdev_create_(parent, engine, oclass, 0, "VOLT",
@@ -152,31 +183,9 @@ nouveau_volt_create_(struct nouveau_object *parent,
152 volt->set = nouveau_volt_set; 183 volt->set = nouveau_volt_set;
153 volt->set_id = nouveau_volt_set_id; 184 volt->set_id = nouveau_volt_set_id;
154 185
155 data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info); 186 /* Assuming the non-bios device should build the voltage table later */
156 if (data && info.vidmask && info.base && info.step) { 187 if (bios)
157 for (i = 0; i < info.vidmask + 1; i++) { 188 nouveau_volt_parse_bios(bios, volt);
158 if (info.base >= info.min &&
159 info.base <= info.max) {
160 volt->vid[volt->vid_nr].uv = info.base;
161 volt->vid[volt->vid_nr].vid = i;
162 volt->vid_nr++;
163 }
164 info.base += info.step;
165 }
166 volt->vid_mask = info.vidmask;
167 } else
168 if (data && info.vidmask) {
169 for (i = 0; i < cnt; i++) {
170 data = nvbios_volt_entry_parse(bios, i, &ver, &hdr,
171 &ivid);
172 if (data) {
173 volt->vid[volt->vid_nr].uv = ivid.voltage;
174 volt->vid[volt->vid_nr].vid = ivid.vid;
175 volt->vid_nr++;
176 }
177 }
178 volt->vid_mask = info.vidmask;
179 }
180 189
181 if (volt->vid_nr) { 190 if (volt->vid_nr) {
182 for (i = 0; i < volt->vid_nr; i++) { 191 for (i = 0; i < volt->vid_nr; i++) {
diff --git a/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
new file mode 100644
index 000000000000..717368ef31ac
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/volt/gk20a.c
@@ -0,0 +1,199 @@
1/*
2 * Copyright (c) 2014, NVIDIA CORPORATION. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23#ifdef __KERNEL__
24#include <nouveau_platform.h>
25#endif
26#include <subdev/volt.h>
27
28struct cvb_coef {
29 int c0;
30 int c1;
31 int c2;
32 int c3;
33 int c4;
34 int c5;
35};
36
37struct gk20a_volt_priv {
38 struct nouveau_volt base;
39 struct regulator *vdd;
40};
41
42const struct cvb_coef gk20a_cvb_coef[] = {
43 /* MHz, c0, c1, c2, c3, c4, c5 */
44 /* 72 */ { 1209886, -36468, 515, 417, -13123, 203},
45 /* 108 */ { 1130804, -27659, 296, 298, -10834, 221},
46 /* 180 */ { 1162871, -27110, 247, 238, -10681, 268},
47 /* 252 */ { 1220458, -28654, 247, 179, -10376, 298},
48 /* 324 */ { 1280953, -30204, 247, 119, -9766, 304},
49 /* 396 */ { 1344547, -31777, 247, 119, -8545, 292},
50 /* 468 */ { 1420168, -34227, 269, 60, -7172, 256},
51 /* 540 */ { 1490757, -35955, 274, 60, -5188, 197},
52 /* 612 */ { 1599112, -42583, 398, 0, -1831, 119},
53 /* 648 */ { 1366986, -16459, -274, 0, -3204, 72},
54 /* 684 */ { 1391884, -17078, -274, -60, -1526, 30},
55 /* 708 */ { 1415522, -17497, -274, -60, -458, 0},
56 /* 756 */ { 1464061, -18331, -274, -119, 1831, -72},
57 /* 804 */ { 1524225, -20064, -254, -119, 4272, -155},
58 /* 852 */ { 1608418, -21643, -269, 0, 763, -48},
59};
60
61/**
62 * cvb_mv = ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0)
63 */
64static inline int
65gk20a_volt_get_cvb_voltage(int speedo, int s_scale,
66 const struct cvb_coef *coef)
67{
68 int mv;
69
70 mv = DIV_ROUND_CLOSEST(coef->c2 * speedo, s_scale);
71 mv = DIV_ROUND_CLOSEST((mv + coef->c1) * speedo, s_scale) + coef->c0;
72 return mv;
73}
74
75/**
76 * cvb_t_mv =
77 * ((c2 * speedo / s_scale + c1) * speedo / s_scale + c0) +
78 * ((c3 * speedo / s_scale + c4 + c5 * T / t_scale) * T / t_scale)
79 */
80static inline int
81gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale,
82 const struct cvb_coef *coef)
83{
84 int cvb_mv, mv;
85
86 cvb_mv = gk20a_volt_get_cvb_voltage(speedo, s_scale, coef);
87
88 mv = DIV_ROUND_CLOSEST(coef->c3 * speedo, s_scale) + coef->c4 +
89 DIV_ROUND_CLOSEST(coef->c5 * temp, t_scale);
90 mv = DIV_ROUND_CLOSEST(mv * temp, t_scale) + cvb_mv;
91 return mv;
92}
93
94static int
95gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo)
96{
97 int mv;
98
99 mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef);
100 mv = DIV_ROUND_UP(mv, 1000);
101
102 return mv * 1000;
103}
104
105static int
106gk20a_volt_vid_get(struct nouveau_volt *volt)
107{
108 struct gk20a_volt_priv *priv = (void *)volt;
109 int i, uv;
110
111 uv = regulator_get_voltage(priv->vdd);
112
113 for (i = 0; i < volt->vid_nr; i++)
114 if (volt->vid[i].uv >= uv)
115 return i;
116
117 return -EINVAL;
118}
119
120static int
121gk20a_volt_vid_set(struct nouveau_volt *volt, u8 vid)
122{
123 struct gk20a_volt_priv *priv = (void *)volt;
124
125 nv_debug(volt, "set voltage as %duv\n", volt->vid[vid].uv);
126 return regulator_set_voltage(priv->vdd, volt->vid[vid].uv, 1200000);
127}
128
129static int
130gk20a_volt_set_id(struct nouveau_volt *volt, u8 id, int condition)
131{
132 struct gk20a_volt_priv *priv = (void *)volt;
133 int prev_uv = regulator_get_voltage(priv->vdd);
134 int target_uv = volt->vid[id].uv;
135 int ret;
136
137 nv_debug(volt, "prev=%d, target=%d, condition=%d\n",
138 prev_uv, target_uv, condition);
139 if (!condition ||
140 (condition < 0 && target_uv < prev_uv) ||
141 (condition > 0 && target_uv > prev_uv)) {
142 ret = gk20a_volt_vid_set(volt, volt->vid[id].vid);
143 } else {
144 ret = 0;
145 }
146
147 return ret;
148}
149
150static int
151gk20a_volt_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
152 struct nouveau_oclass *oclass, void *data, u32 size,
153 struct nouveau_object **pobject)
154{
155 struct gk20a_volt_priv *priv;
156 struct nouveau_volt *volt;
157 struct nouveau_platform_device *plat;
158 int i, ret, uv;
159
160 ret = nouveau_volt_create(parent, engine, oclass, &priv);
161 *pobject = nv_object(priv);
162 if (ret)
163 return ret;
164
165 volt = &priv->base;
166
167 plat = nv_device_to_platform(nv_device(parent));
168
169 uv = regulator_get_voltage(plat->gpu->vdd);
170 nv_info(priv, "The default voltage is %duV\n", uv);
171
172 priv->vdd = plat->gpu->vdd;
173 priv->base.vid_get = gk20a_volt_vid_get;
174 priv->base.vid_set = gk20a_volt_vid_set;
175 priv->base.set_id = gk20a_volt_set_id;
176
177 volt->vid_nr = ARRAY_SIZE(gk20a_cvb_coef);
178 nv_debug(priv, "%s - vid_nr = %d\n", __func__, volt->vid_nr);
179 for (i = 0; i < volt->vid_nr; i++) {
180 volt->vid[i].vid = i;
181 volt->vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i],
182 plat->gpu_speedo);
183 nv_debug(priv, "%2d: vid=%d, uv=%d\n", i, volt->vid[i].vid,
184 volt->vid[i].uv);
185 }
186
187 return 0;
188}
189
190struct nouveau_oclass
191gk20a_volt_oclass = {
192 .handle = NV_SUBDEV(VOLT, 0xea),
193 .ofuncs = &(struct nouveau_ofuncs) {
194 .ctor = gk20a_volt_ctor,
195 .dtor = _nouveau_volt_dtor,
196 .init = _nouveau_volt_init,
197 .fini = _nouveau_volt_fini,
198 },
199};
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index fca6a1f9c20c..38402ade6835 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -26,6 +26,7 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/drm_plane_helper.h>
29 30
30#include "nouveau_drm.h" 31#include "nouveau_drm.h"
31#include "nouveau_reg.h" 32#include "nouveau_reg.h"
@@ -613,7 +614,7 @@ nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
613 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 614 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
614 int ret; 615 int ret;
615 616
616 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); 617 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, false);
617 if (ret == 0) { 618 if (ret == 0) {
618 if (disp->image[nv_crtc->index]) 619 if (disp->image[nv_crtc->index])
619 nouveau_bo_unpin(disp->image[nv_crtc->index]); 620 nouveau_bo_unpin(disp->image[nv_crtc->index]);
@@ -1129,7 +1130,7 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
1129 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM, 1130 ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
1130 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo); 1131 0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
1131 if (!ret) { 1132 if (!ret) {
1132 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 1133 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
1133 if (!ret) { 1134 if (!ret) {
1134 ret = nouveau_bo_map(nv_crtc->cursor.nvbo); 1135 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
1135 if (ret) 1136 if (ret)
diff --git a/drivers/gpu/drm/nouveau/dispnv04/overlay.c b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
index 1e9056a8df94..9f2498571d09 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/overlay.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/overlay.c
@@ -126,7 +126,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
126 return -ERANGE; 126 return -ERANGE;
127 } 127 }
128 128
129 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); 129 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
130 if (ret) 130 if (ret)
131 return ret; 131 return ret;
132 132
@@ -373,7 +373,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
373 if (crtc_w < src_w || crtc_h < src_h) 373 if (crtc_w < src_w || crtc_h < src_h)
374 return -ERANGE; 374 return -ERANGE;
375 375
376 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM); 376 ret = nouveau_bo_pin(nv_fb->nvbo, TTM_PL_FLAG_VRAM, false);
377 if (ret) 377 if (ret)
378 return ret; 378 return ret;
379 379
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index a24faa5e2a2a..d39a15000068 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -308,7 +308,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
308 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART, 308 ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
309 0, 0, &chan->ntfy); 309 0, 0, &chan->ntfy);
310 if (ret == 0) 310 if (ret == 0)
311 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT); 311 ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
312 if (ret) 312 if (ret)
313 goto done; 313 goto done;
314 314
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index dae2c96deef8..7df6acc8bb34 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -1258,7 +1258,7 @@ olddcb_table(struct drm_device *dev)
1258 return NULL; 1258 return NULL;
1259 } 1259 }
1260 1260
1261 if (dcb[0] >= 0x41) { 1261 if (dcb[0] >= 0x42) {
1262 NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]); 1262 NV_WARN(drm, "DCB version 0x%02x unknown\n", dcb[0]);
1263 return NULL; 1263 return NULL;
1264 } else 1264 } else
@@ -1481,18 +1481,22 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
1481 entry->dpconf.link_bw = 540000; 1481 entry->dpconf.link_bw = 540000;
1482 break; 1482 break;
1483 } 1483 }
1484 switch ((conf & 0x0f000000) >> 24) { 1484 entry->dpconf.link_nr = (conf & 0x0f000000) >> 24;
1485 case 0xf: 1485 if (dcb->version < 0x41) {
1486 entry->dpconf.link_nr = 4; 1486 switch (entry->dpconf.link_nr) {
1487 break; 1487 case 0xf:
1488 case 0x3: 1488 entry->dpconf.link_nr = 4;
1489 entry->dpconf.link_nr = 2; 1489 break;
1490 break; 1490 case 0x3:
1491 default: 1491 entry->dpconf.link_nr = 2;
1492 entry->dpconf.link_nr = 1; 1492 break;
1493 break; 1493 default:
1494 entry->dpconf.link_nr = 1;
1495 break;
1496 }
1494 } 1497 }
1495 link = entry->dpconf.sor.link; 1498 link = entry->dpconf.sor.link;
1499 entry->i2c_index += NV_I2C_AUX(0);
1496 break; 1500 break;
1497 case DCB_OUTPUT_TMDS: 1501 case DCB_OUTPUT_TMDS:
1498 if (dcb->version >= 0x40) { 1502 if (dcb->version >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3d474ac03f88..21ec561edc99 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -214,6 +214,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
214 nvbo->tile_flags = tile_flags; 214 nvbo->tile_flags = tile_flags;
215 nvbo->bo.bdev = &drm->ttm.bdev; 215 nvbo->bo.bdev = &drm->ttm.bdev;
216 216
217 if (!nv_device_is_cpu_coherent(nvkm_device(&drm->device)))
218 nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
219
217 nvbo->page_shift = 12; 220 nvbo->page_shift = 12;
218 if (drm->client.vm) { 221 if (drm->client.vm) {
219 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) 222 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
@@ -291,8 +294,9 @@ void
291nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 294nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
292{ 295{
293 struct ttm_placement *pl = &nvbo->placement; 296 struct ttm_placement *pl = &nvbo->placement;
294 uint32_t flags = TTM_PL_MASK_CACHING | 297 uint32_t flags = (nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED :
295 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); 298 TTM_PL_MASK_CACHING) |
299 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
296 300
297 pl->placement = nvbo->placements; 301 pl->placement = nvbo->placements;
298 set_placement_list(nvbo->placements, &pl->num_placement, 302 set_placement_list(nvbo->placements, &pl->num_placement,
@@ -306,42 +310,75 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
306} 310}
307 311
308int 312int
309nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) 313nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
310{ 314{
311 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); 315 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
312 struct ttm_buffer_object *bo = &nvbo->bo; 316 struct ttm_buffer_object *bo = &nvbo->bo;
317 bool force = false, evict = false;
313 int ret; 318 int ret;
314 319
315 ret = ttm_bo_reserve(bo, false, false, false, NULL); 320 ret = ttm_bo_reserve(bo, false, false, false, NULL);
316 if (ret) 321 if (ret)
317 goto out; 322 return ret;
318 323
319 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 324 if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
320 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 325 memtype == TTM_PL_FLAG_VRAM && contig) {
321 1 << bo->mem.mem_type, memtype); 326 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
322 ret = -EINVAL; 327 if (bo->mem.mem_type == TTM_PL_VRAM) {
323 goto out; 328 struct nouveau_mem *mem = bo->mem.mm_node;
329 if (!list_is_singular(&mem->regions))
330 evict = true;
331 }
332 nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
333 force = true;
334 }
324 } 335 }
325 336
326 if (nvbo->pin_refcnt++) 337 if (nvbo->pin_refcnt) {
338 if (!(memtype & (1 << bo->mem.mem_type)) || evict) {
339 NV_ERROR(drm, "bo %p pinned elsewhere: "
340 "0x%08x vs 0x%08x\n", bo,
341 1 << bo->mem.mem_type, memtype);
342 ret = -EBUSY;
343 }
344 nvbo->pin_refcnt++;
327 goto out; 345 goto out;
346 }
328 347
348 if (evict) {
349 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 0);
350 ret = nouveau_bo_validate(nvbo, false, false);
351 if (ret)
352 goto out;
353 }
354
355 nvbo->pin_refcnt++;
329 nouveau_bo_placement_set(nvbo, memtype, 0); 356 nouveau_bo_placement_set(nvbo, memtype, 0);
330 357
358 /* drop pin_refcnt temporarily, so we don't trip the assertion
359 * in nouveau_bo_move() that makes sure we're not trying to
360 * move a pinned buffer
361 */
362 nvbo->pin_refcnt--;
331 ret = nouveau_bo_validate(nvbo, false, false); 363 ret = nouveau_bo_validate(nvbo, false, false);
332 if (ret == 0) { 364 if (ret)
333 switch (bo->mem.mem_type) { 365 goto out;
334 case TTM_PL_VRAM: 366 nvbo->pin_refcnt++;
335 drm->gem.vram_available -= bo->mem.size; 367
336 break; 368 switch (bo->mem.mem_type) {
337 case TTM_PL_TT: 369 case TTM_PL_VRAM:
338 drm->gem.gart_available -= bo->mem.size; 370 drm->gem.vram_available -= bo->mem.size;
339 break; 371 break;
340 default: 372 case TTM_PL_TT:
341 break; 373 drm->gem.gart_available -= bo->mem.size;
342 } 374 break;
375 default:
376 break;
343 } 377 }
378
344out: 379out:
380 if (force && ret)
381 nvbo->tile_flags |= NOUVEAU_GEM_TILE_NONCONTIG;
345 ttm_bo_unreserve(bo); 382 ttm_bo_unreserve(bo);
346 return ret; 383 return ret;
347} 384}
@@ -392,7 +429,14 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
392 if (ret) 429 if (ret)
393 return ret; 430 return ret;
394 431
395 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); 432 /*
433 * TTM buffers allocated using the DMA API already have a mapping, let's
434 * use it instead.
435 */
436 if (!nvbo->force_coherent)
437 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
438 &nvbo->kmap);
439
396 ttm_bo_unreserve(&nvbo->bo); 440 ttm_bo_unreserve(&nvbo->bo);
397 return ret; 441 return ret;
398} 442}
@@ -400,10 +444,57 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
400void 444void
401nouveau_bo_unmap(struct nouveau_bo *nvbo) 445nouveau_bo_unmap(struct nouveau_bo *nvbo)
402{ 446{
403 if (nvbo) 447 if (!nvbo)
448 return;
449
450 /*
451 * TTM buffers allocated using the DMA API already had a coherent
452 * mapping which we used, no need to unmap.
453 */
454 if (!nvbo->force_coherent)
404 ttm_bo_kunmap(&nvbo->kmap); 455 ttm_bo_kunmap(&nvbo->kmap);
405} 456}
406 457
458void
459nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
460{
461 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
462 struct nouveau_device *device = nvkm_device(&drm->device);
463 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
464 int i;
465
466 if (!ttm_dma)
467 return;
468
469 /* Don't waste time looping if the object is coherent */
470 if (nvbo->force_coherent)
471 return;
472
473 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
474 dma_sync_single_for_device(nv_device_base(device),
475 ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE);
476}
477
478void
479nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
480{
481 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
482 struct nouveau_device *device = nvkm_device(&drm->device);
483 struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
484 int i;
485
486 if (!ttm_dma)
487 return;
488
489 /* Don't waste time looping if the object is coherent */
490 if (nvbo->force_coherent)
491 return;
492
493 for (i = 0; i < ttm_dma->ttm.num_pages; i++)
494 dma_sync_single_for_cpu(nv_device_base(device),
495 ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE);
496}
497
407int 498int
408nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, 499nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
409 bool no_wait_gpu) 500 bool no_wait_gpu)
@@ -415,15 +506,41 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
415 if (ret) 506 if (ret)
416 return ret; 507 return ret;
417 508
509 nouveau_bo_sync_for_device(nvbo);
510
418 return 0; 511 return 0;
419} 512}
420 513
514static inline void *
515_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz)
516{
517 struct ttm_dma_tt *dma_tt;
518 u8 *m = mem;
519
520 index *= sz;
521
522 if (m) {
523 /* kmap'd address, return the corresponding offset */
524 m += index;
525 } else {
526 /* DMA-API mapping, lookup the right address */
527 dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm;
528 m = dma_tt->cpu_address[index / PAGE_SIZE];
529 m += index % PAGE_SIZE;
530 }
531
532 return m;
533}
534#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m))
535
421u16 536u16
422nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) 537nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
423{ 538{
424 bool is_iomem; 539 bool is_iomem;
425 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 540 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
426 mem = &mem[index]; 541
542 mem = nouveau_bo_mem_index(nvbo, index, mem);
543
427 if (is_iomem) 544 if (is_iomem)
428 return ioread16_native((void __force __iomem *)mem); 545 return ioread16_native((void __force __iomem *)mem);
429 else 546 else
@@ -435,7 +552,9 @@ nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
435{ 552{
436 bool is_iomem; 553 bool is_iomem;
437 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 554 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
438 mem = &mem[index]; 555
556 mem = nouveau_bo_mem_index(nvbo, index, mem);
557
439 if (is_iomem) 558 if (is_iomem)
440 iowrite16_native(val, (void __force __iomem *)mem); 559 iowrite16_native(val, (void __force __iomem *)mem);
441 else 560 else
@@ -447,7 +566,9 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
447{ 566{
448 bool is_iomem; 567 bool is_iomem;
449 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 568 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
450 mem = &mem[index]; 569
570 mem = nouveau_bo_mem_index(nvbo, index, mem);
571
451 if (is_iomem) 572 if (is_iomem)
452 return ioread32_native((void __force __iomem *)mem); 573 return ioread32_native((void __force __iomem *)mem);
453 else 574 else
@@ -459,7 +580,9 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
459{ 580{
460 bool is_iomem; 581 bool is_iomem;
461 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 582 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
462 mem = &mem[index]; 583
584 mem = nouveau_bo_mem_index(nvbo, index, mem);
585
463 if (is_iomem) 586 if (is_iomem)
464 iowrite32_native(val, (void __force __iomem *)mem); 587 iowrite32_native(val, (void __force __iomem *)mem);
465 else 588 else
@@ -1184,6 +1307,9 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
1184 struct nouveau_drm_tile *new_tile = NULL; 1307 struct nouveau_drm_tile *new_tile = NULL;
1185 int ret = 0; 1308 int ret = 0;
1186 1309
1310 if (nvbo->pin_refcnt)
1311 NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
1312
1187 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { 1313 if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
1188 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 1314 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1189 if (ret) 1315 if (ret)
@@ -1376,6 +1502,14 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1376 dev = drm->dev; 1502 dev = drm->dev;
1377 pdev = nv_device_base(device); 1503 pdev = nv_device_base(device);
1378 1504
1505 /*
1506 * Objects matching this condition have been marked as force_coherent,
1507 * so use the DMA API for them.
1508 */
1509 if (!nv_device_is_cpu_coherent(device) &&
1510 ttm->caching_state == tt_uncached)
1511 return ttm_dma_populate(ttm_dma, dev->dev);
1512
1379#if __OS_HAS_AGP 1513#if __OS_HAS_AGP
1380 if (drm->agp.stat == ENABLED) { 1514 if (drm->agp.stat == ENABLED) {
1381 return ttm_agp_tt_populate(ttm); 1515 return ttm_agp_tt_populate(ttm);
@@ -1433,6 +1567,14 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1433 dev = drm->dev; 1567 dev = drm->dev;
1434 pdev = nv_device_base(device); 1568 pdev = nv_device_base(device);
1435 1569
1570 /*
1571 * Objects matching this condition have been marked as force_coherent,
1572 * so use the DMA API for them.
1573 */
1574 if (!nv_device_is_cpu_coherent(device) &&
1575 ttm->caching_state == tt_uncached)
1576 ttm_dma_unpopulate(ttm_dma, dev->dev);
1577
1436#if __OS_HAS_AGP 1578#if __OS_HAS_AGP
1437 if (drm->agp.stat == ENABLED) { 1579 if (drm->agp.stat == ENABLED) {
1438 ttm_agp_tt_unpopulate(ttm); 1580 ttm_agp_tt_unpopulate(ttm);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.h b/drivers/gpu/drm/nouveau/nouveau_bo.h
index 22d2c764d80b..072222efeeb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.h
@@ -13,6 +13,7 @@ struct nouveau_bo {
13 u32 valid_domains; 13 u32 valid_domains;
14 struct ttm_place placements[3]; 14 struct ttm_place placements[3];
15 struct ttm_place busy_placements[3]; 15 struct ttm_place busy_placements[3];
16 bool force_coherent;
16 struct ttm_bo_kmap_obj kmap; 17 struct ttm_bo_kmap_obj kmap;
17 struct list_head head; 18 struct list_head head;
18 19
@@ -72,7 +73,7 @@ int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
72 u32 tile_mode, u32 tile_flags, struct sg_table *sg, 73 u32 tile_mode, u32 tile_flags, struct sg_table *sg,
73 struct reservation_object *robj, 74 struct reservation_object *robj,
74 struct nouveau_bo **); 75 struct nouveau_bo **);
75int nouveau_bo_pin(struct nouveau_bo *, u32 flags); 76int nouveau_bo_pin(struct nouveau_bo *, u32 flags, bool contig);
76int nouveau_bo_unpin(struct nouveau_bo *); 77int nouveau_bo_unpin(struct nouveau_bo *);
77int nouveau_bo_map(struct nouveau_bo *); 78int nouveau_bo_map(struct nouveau_bo *);
78void nouveau_bo_unmap(struct nouveau_bo *); 79void nouveau_bo_unmap(struct nouveau_bo *);
@@ -84,6 +85,8 @@ void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
84void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive); 85void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
85int nouveau_bo_validate(struct nouveau_bo *, bool interruptible, 86int nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
86 bool no_wait_gpu); 87 bool no_wait_gpu);
88void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo);
89void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo);
87 90
88struct nouveau_vma * 91struct nouveau_vma *
89nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *); 92nouveau_bo_vma_find(struct nouveau_bo *, struct nouveau_vm *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index fd3dbd59d73e..aff9099aae6c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -102,14 +102,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
102 chan->drm = drm; 102 chan->drm = drm;
103 103
104 /* allocate memory for dma push buffer */ 104 /* allocate memory for dma push buffer */
105 target = TTM_PL_FLAG_TT; 105 target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
106 if (nouveau_vram_pushbuf) 106 if (nouveau_vram_pushbuf)
107 target = TTM_PL_FLAG_VRAM; 107 target = TTM_PL_FLAG_VRAM;
108 108
109 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL, 109 ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
110 &chan->push.buffer); 110 &chan->push.buffer);
111 if (ret == 0) { 111 if (ret == 0) {
112 ret = nouveau_bo_pin(chan->push.buffer, target); 112 ret = nouveau_bo_pin(chan->push.buffer, target, false);
113 if (ret == 0) 113 if (ret == 0)
114 ret = nouveau_bo_map(chan->push.buffer); 114 ret = nouveau_bo_map(chan->push.buffer);
115 } 115 }
@@ -285,7 +285,6 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
285 struct nouveau_software_chan *swch; 285 struct nouveau_software_chan *swch;
286 struct nv_dma_v0 args = {}; 286 struct nv_dma_v0 args = {};
287 int ret, i; 287 int ret, i;
288 bool save;
289 288
290 nvif_object_map(chan->object); 289 nvif_object_map(chan->object);
291 290
@@ -387,11 +386,7 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
387 } 386 }
388 387
389 /* initialise synchronisation */ 388 /* initialise synchronisation */
390 save = cli->base.super; 389 return nouveau_fence(chan->drm)->context_new(chan);
391 cli->base.super = true; /* hack until fencenv50 fixed */
392 ret = nouveau_fence(chan->drm)->context_new(chan);
393 cli->base.super = save;
394 return ret;
395} 390}
396 391
397int 392int
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index a88e6927f571..5d93902a91ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -479,6 +479,7 @@ nouveau_display_create(struct drm_device *dev)
479 479
480 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { 480 if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
481 static const u16 oclass[] = { 481 static const u16 oclass[] = {
482 GM204_DISP,
482 GM107_DISP, 483 GM107_DISP,
483 GK110_DISP, 484 GK110_DISP,
484 GK104_DISP, 485 GK104_DISP,
@@ -568,9 +569,10 @@ nouveau_display_suspend(struct drm_device *dev, bool runtime)
568 569
569 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 570 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
570 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 571 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
571 572 if (nv_crtc->cursor.nvbo) {
572 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 573 nouveau_bo_unmap(nv_crtc->cursor.nvbo);
573 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 574 nouveau_bo_unpin(nv_crtc->cursor.nvbo);
575 }
574 } 576 }
575 577
576 return 0; 578 return 0;
@@ -591,15 +593,17 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
591 if (!nouveau_fb || !nouveau_fb->nvbo) 593 if (!nouveau_fb || !nouveau_fb->nvbo)
592 continue; 594 continue;
593 595
594 ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM); 596 ret = nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM, true);
595 if (ret) 597 if (ret)
596 NV_ERROR(drm, "Could not pin framebuffer\n"); 598 NV_ERROR(drm, "Could not pin framebuffer\n");
597 } 599 }
598 600
599 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 601 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
600 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 602 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
603 if (!nv_crtc->cursor.nvbo)
604 continue;
601 605
602 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM); 606 ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, true);
603 if (!ret) 607 if (!ret)
604 ret = nouveau_bo_map(nv_crtc->cursor.nvbo); 608 ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
605 if (ret) 609 if (ret)
@@ -630,9 +634,10 @@ nouveau_display_resume(struct drm_device *dev, bool runtime)
630 634
631 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 635 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
632 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 636 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
633 u32 offset = nv_crtc->cursor.nvbo->bo.offset;
634 637
635 nv_crtc->cursor.set_offset(nv_crtc, offset); 638 if (!nv_crtc->cursor.nvbo)
639 continue;
640 nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.nvbo->bo.offset);
636 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x, 641 nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
637 nv_crtc->cursor_saved_y); 642 nv_crtc->cursor_saved_y);
638 } 643 }
@@ -710,7 +715,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
710 return -ENOMEM; 715 return -ENOMEM;
711 716
712 if (new_bo != old_bo) { 717 if (new_bo != old_bo) {
713 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); 718 ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM, true);
714 if (ret) 719 if (ret)
715 goto fail_free; 720 goto fail_free;
716 } 721 }
@@ -871,6 +876,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
871 if (ret) 876 if (ret)
872 return ret; 877 return ret;
873 878
879 bo->gem.dumb = true;
874 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); 880 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
875 drm_gem_object_unreference_unlocked(&bo->gem); 881 drm_gem_object_unreference_unlocked(&bo->gem);
876 return ret; 882 return ret;
@@ -886,6 +892,14 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
886 gem = drm_gem_object_lookup(dev, file_priv, handle); 892 gem = drm_gem_object_lookup(dev, file_priv, handle);
887 if (gem) { 893 if (gem) {
888 struct nouveau_bo *bo = nouveau_gem_object(gem); 894 struct nouveau_bo *bo = nouveau_gem_object(gem);
895
896 /*
897 * We don't allow dumb mmaps on objects created using another
898 * interface.
899 */
900 WARN_ONCE(!(gem->dumb || gem->import_attach),
901 "Illegal dumb map of accelerated buffer.\n");
902
889 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 903 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
890 drm_gem_object_unreference_unlocked(gem); 904 drm_gem_object_unreference_unlocked(gem);
891 return 0; 905 return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 62b97c4eef8d..65910e3aed0c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -613,26 +613,6 @@ fail_display:
613 return ret; 613 return ret;
614} 614}
615 615
616int nouveau_pmops_suspend(struct device *dev)
617{
618 struct pci_dev *pdev = to_pci_dev(dev);
619 struct drm_device *drm_dev = pci_get_drvdata(pdev);
620 int ret;
621
622 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
623 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
624 return 0;
625
626 ret = nouveau_do_suspend(drm_dev, false);
627 if (ret)
628 return ret;
629
630 pci_save_state(pdev);
631 pci_disable_device(pdev);
632 pci_set_power_state(pdev, PCI_D3hot);
633 return 0;
634}
635
636static int 616static int
637nouveau_do_resume(struct drm_device *dev, bool runtime) 617nouveau_do_resume(struct drm_device *dev, bool runtime)
638{ 618{
@@ -667,7 +647,29 @@ nouveau_do_resume(struct drm_device *dev, bool runtime)
667 return 0; 647 return 0;
668} 648}
669 649
670int nouveau_pmops_resume(struct device *dev) 650int
651nouveau_pmops_suspend(struct device *dev)
652{
653 struct pci_dev *pdev = to_pci_dev(dev);
654 struct drm_device *drm_dev = pci_get_drvdata(pdev);
655 int ret;
656
657 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF ||
658 drm_dev->switch_power_state == DRM_SWITCH_POWER_DYNAMIC_OFF)
659 return 0;
660
661 ret = nouveau_do_suspend(drm_dev, false);
662 if (ret)
663 return ret;
664
665 pci_save_state(pdev);
666 pci_disable_device(pdev);
667 pci_set_power_state(pdev, PCI_D3hot);
668 return 0;
669}
670
671int
672nouveau_pmops_resume(struct device *dev)
671{ 673{
672 struct pci_dev *pdev = to_pci_dev(dev); 674 struct pci_dev *pdev = to_pci_dev(dev);
673 struct drm_device *drm_dev = pci_get_drvdata(pdev); 675 struct drm_device *drm_dev = pci_get_drvdata(pdev);
@@ -687,20 +689,122 @@ int nouveau_pmops_resume(struct device *dev)
687 return nouveau_do_resume(drm_dev, false); 689 return nouveau_do_resume(drm_dev, false);
688} 690}
689 691
690static int nouveau_pmops_freeze(struct device *dev) 692static int
693nouveau_pmops_freeze(struct device *dev)
691{ 694{
692 struct pci_dev *pdev = to_pci_dev(dev); 695 struct pci_dev *pdev = to_pci_dev(dev);
693 struct drm_device *drm_dev = pci_get_drvdata(pdev); 696 struct drm_device *drm_dev = pci_get_drvdata(pdev);
694 return nouveau_do_suspend(drm_dev, false); 697 return nouveau_do_suspend(drm_dev, false);
695} 698}
696 699
697static int nouveau_pmops_thaw(struct device *dev) 700static int
701nouveau_pmops_thaw(struct device *dev)
698{ 702{
699 struct pci_dev *pdev = to_pci_dev(dev); 703 struct pci_dev *pdev = to_pci_dev(dev);
700 struct drm_device *drm_dev = pci_get_drvdata(pdev); 704 struct drm_device *drm_dev = pci_get_drvdata(pdev);
701 return nouveau_do_resume(drm_dev, false); 705 return nouveau_do_resume(drm_dev, false);
702} 706}
703 707
708static int
709nouveau_pmops_runtime_suspend(struct device *dev)
710{
711 struct pci_dev *pdev = to_pci_dev(dev);
712 struct drm_device *drm_dev = pci_get_drvdata(pdev);
713 int ret;
714
715 if (nouveau_runtime_pm == 0) {
716 pm_runtime_forbid(dev);
717 return -EBUSY;
718 }
719
720 /* are we optimus enabled? */
721 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
722 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
723 pm_runtime_forbid(dev);
724 return -EBUSY;
725 }
726
727 nv_debug_level(SILENT);
728 drm_kms_helper_poll_disable(drm_dev);
729 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
730 nouveau_switcheroo_optimus_dsm();
731 ret = nouveau_do_suspend(drm_dev, true);
732 pci_save_state(pdev);
733 pci_disable_device(pdev);
734 pci_ignore_hotplug(pdev);
735 pci_set_power_state(pdev, PCI_D3cold);
736 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
737 return ret;
738}
739
740static int
741nouveau_pmops_runtime_resume(struct device *dev)
742{
743 struct pci_dev *pdev = to_pci_dev(dev);
744 struct drm_device *drm_dev = pci_get_drvdata(pdev);
745 struct nvif_device *device = &nouveau_drm(drm_dev)->device;
746 int ret;
747
748 if (nouveau_runtime_pm == 0)
749 return -EINVAL;
750
751 pci_set_power_state(pdev, PCI_D0);
752 pci_restore_state(pdev);
753 ret = pci_enable_device(pdev);
754 if (ret)
755 return ret;
756 pci_set_master(pdev);
757
758 ret = nouveau_do_resume(drm_dev, true);
759 drm_kms_helper_poll_enable(drm_dev);
760 /* do magic */
761 nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
762 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
763 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
764 nv_debug_level(NORMAL);
765 return ret;
766}
767
768static int
769nouveau_pmops_runtime_idle(struct device *dev)
770{
771 struct pci_dev *pdev = to_pci_dev(dev);
772 struct drm_device *drm_dev = pci_get_drvdata(pdev);
773 struct nouveau_drm *drm = nouveau_drm(drm_dev);
774 struct drm_crtc *crtc;
775
776 if (nouveau_runtime_pm == 0) {
777 pm_runtime_forbid(dev);
778 return -EBUSY;
779 }
780
781 /* are we optimus enabled? */
782 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
783 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
784 pm_runtime_forbid(dev);
785 return -EBUSY;
786 }
787
788 /* if we have a hdmi audio device - make sure it has a driver loaded */
789 if (drm->hdmi_device) {
790 if (!drm->hdmi_device->driver) {
791 DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
792 pm_runtime_mark_last_busy(dev);
793 return -EBUSY;
794 }
795 }
796
797 list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
798 if (crtc->enabled) {
799 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
800 return -EBUSY;
801 }
802 }
803 pm_runtime_mark_last_busy(dev);
804 pm_runtime_autosuspend(dev);
805 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
806 return 1;
807}
704 808
705static int 809static int
706nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) 810nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
@@ -907,104 +1011,6 @@ nouveau_drm_pci_table[] = {
907 {} 1011 {}
908}; 1012};
909 1013
910static int nouveau_pmops_runtime_suspend(struct device *dev)
911{
912 struct pci_dev *pdev = to_pci_dev(dev);
913 struct drm_device *drm_dev = pci_get_drvdata(pdev);
914 int ret;
915
916 if (nouveau_runtime_pm == 0) {
917 pm_runtime_forbid(dev);
918 return -EBUSY;
919 }
920
921 /* are we optimus enabled? */
922 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
923 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
924 pm_runtime_forbid(dev);
925 return -EBUSY;
926 }
927
928 nv_debug_level(SILENT);
929 drm_kms_helper_poll_disable(drm_dev);
930 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
931 nouveau_switcheroo_optimus_dsm();
932 ret = nouveau_do_suspend(drm_dev, true);
933 pci_save_state(pdev);
934 pci_disable_device(pdev);
935 pci_ignore_hotplug(pdev);
936 pci_set_power_state(pdev, PCI_D3cold);
937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
938 return ret;
939}
940
941static int nouveau_pmops_runtime_resume(struct device *dev)
942{
943 struct pci_dev *pdev = to_pci_dev(dev);
944 struct drm_device *drm_dev = pci_get_drvdata(pdev);
945 struct nvif_device *device = &nouveau_drm(drm_dev)->device;
946 int ret;
947
948 if (nouveau_runtime_pm == 0)
949 return -EINVAL;
950
951 pci_set_power_state(pdev, PCI_D0);
952 pci_restore_state(pdev);
953 ret = pci_enable_device(pdev);
954 if (ret)
955 return ret;
956 pci_set_master(pdev);
957
958 ret = nouveau_do_resume(drm_dev, true);
959 drm_kms_helper_poll_enable(drm_dev);
960 /* do magic */
961 nvif_mask(device, 0x88488, (1 << 25), (1 << 25));
962 vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
963 drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
964 nv_debug_level(NORMAL);
965 return ret;
966}
967
968static int nouveau_pmops_runtime_idle(struct device *dev)
969{
970 struct pci_dev *pdev = to_pci_dev(dev);
971 struct drm_device *drm_dev = pci_get_drvdata(pdev);
972 struct nouveau_drm *drm = nouveau_drm(drm_dev);
973 struct drm_crtc *crtc;
974
975 if (nouveau_runtime_pm == 0) {
976 pm_runtime_forbid(dev);
977 return -EBUSY;
978 }
979
980 /* are we optimus enabled? */
981 if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
982 DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
983 pm_runtime_forbid(dev);
984 return -EBUSY;
985 }
986
987 /* if we have a hdmi audio device - make sure it has a driver loaded */
988 if (drm->hdmi_device) {
989 if (!drm->hdmi_device->driver) {
990 DRM_DEBUG_DRIVER("failing to power off - no HDMI audio driver loaded\n");
991 pm_runtime_mark_last_busy(dev);
992 return -EBUSY;
993 }
994 }
995
996 list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
997 if (crtc->enabled) {
998 DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
999 return -EBUSY;
1000 }
1001 }
1002 pm_runtime_mark_last_busy(dev);
1003 pm_runtime_autosuspend(dev);
1004 /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
1005 return 1;
1006}
1007
1008static void nouveau_display_options(void) 1014static void nouveau_display_options(void)
1009{ 1015{
1010 DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n"); 1016 DRM_DEBUG_DRIVER("Loading Nouveau with parameters:\n");
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 593ef8a2a069..3ed12a8cfc91 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -341,7 +341,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
341 goto out; 341 goto out;
342 } 342 }
343 343
344 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM); 344 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, false);
345 if (ret) { 345 if (ret) {
346 NV_ERROR(drm, "failed to pin fb: %d\n", ret); 346 NV_ERROR(drm, "failed to pin fb: %d\n", ret);
347 goto out_unref; 347 goto out_unref;
@@ -498,6 +498,23 @@ nouveau_fbcon_set_suspend_work(struct work_struct *work)
498 console_unlock(); 498 console_unlock();
499} 499}
500 500
501void
502nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
503{
504 struct nouveau_drm *drm = nouveau_drm(dev);
505 if (drm->fbcon) {
506 if (state == FBINFO_STATE_RUNNING) {
507 schedule_work(&drm->fbcon->work);
508 return;
509 }
510 flush_work(&drm->fbcon->work);
511 console_lock();
512 fb_set_suspend(drm->fbcon->helper.fbdev, state);
513 nouveau_fbcon_accel_save_disable(dev);
514 console_unlock();
515 }
516}
517
501int 518int
502nouveau_fbcon_init(struct drm_device *dev) 519nouveau_fbcon_init(struct drm_device *dev)
503{ 520{
@@ -557,20 +574,3 @@ nouveau_fbcon_fini(struct drm_device *dev)
557 kfree(drm->fbcon); 574 kfree(drm->fbcon);
558 drm->fbcon = NULL; 575 drm->fbcon = NULL;
559} 576}
560
561void
562nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
563{
564 struct nouveau_drm *drm = nouveau_drm(dev);
565 if (drm->fbcon) {
566 if (state == FBINFO_STATE_RUNNING) {
567 schedule_work(&drm->fbcon->work);
568 return;
569 }
570 flush_work(&drm->fbcon->work);
571 console_lock();
572 fb_set_suspend(drm->fbcon->helper.fbdev, state);
573 nouveau_fbcon_accel_save_disable(dev);
574 console_unlock();
575 }
576}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 36951ee4b157..28d51a22a4bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -444,6 +444,9 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
444 list_for_each_entry(nvbo, list, entry) { 444 list_for_each_entry(nvbo, list, entry) {
445 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 445 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
446 446
447 WARN_ONCE(nvbo->gem.dumb,
448 "GPU use of dumb buffer is illegal.\n");
449
447 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, 450 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
448 b->write_domains, 451 b->write_domains,
449 b->valid_domains); 452 b->valid_domains);
@@ -867,6 +870,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
867 else 870 else
868 ret = lret; 871 ret = lret;
869 } 872 }
873 nouveau_bo_sync_for_cpu(nvbo);
870 drm_gem_object_unreference_unlocked(gem); 874 drm_gem_object_unreference_unlocked(gem);
871 875
872 return ret; 876 return ret;
@@ -876,6 +880,17 @@ int
876nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, 880nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
877 struct drm_file *file_priv) 881 struct drm_file *file_priv)
878{ 882{
883 struct drm_nouveau_gem_cpu_fini *req = data;
884 struct drm_gem_object *gem;
885 struct nouveau_bo *nvbo;
886
887 gem = drm_gem_object_lookup(dev, file_priv, req->handle);
888 if (!gem)
889 return -ENOENT;
890 nvbo = nouveau_gem_object(gem);
891
892 nouveau_bo_sync_for_device(nvbo);
893 drm_gem_object_unreference_unlocked(gem);
879 return 0; 894 return 0;
880} 895}
881 896
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 246a824c16ca..b307bbedd4c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -27,6 +27,7 @@
27#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/reset.h> 28#include <linux/reset.h>
29#include <linux/regulator/consumer.h> 29#include <linux/regulator/consumer.h>
30#include <soc/tegra/fuse.h>
30#include <soc/tegra/pmc.h> 31#include <soc/tegra/pmc.h>
31 32
32#include "nouveau_drm.h" 33#include "nouveau_drm.h"
@@ -128,6 +129,7 @@ static int nouveau_platform_probe(struct platform_device *pdev)
128 } 129 }
129 130
130 device->gpu = gpu; 131 device->gpu = gpu;
132 device->gpu_speedo = tegra_sku_info.gpu_speedo_value;
131 133
132 err = drm_dev_register(drm, 0); 134 err = drm_dev_register(drm, 0);
133 if (err < 0) 135 if (err < 0)
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.h b/drivers/gpu/drm/nouveau/nouveau_platform.h
index 91f66504900e..58c28b5653d5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.h
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.h
@@ -41,6 +41,8 @@ struct nouveau_platform_device {
41 struct nouveau_device device; 41 struct nouveau_device device;
42 42
43 struct nouveau_platform_gpu *gpu; 43 struct nouveau_platform_gpu *gpu;
44
45 int gpu_speedo;
44}; 46};
45 47
46#define nv_device_to_platform(d) \ 48#define nv_device_to_platform(d) \
diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
index 228226ab27fc..dd32ad6db53d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_prime.c
+++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
@@ -93,7 +93,7 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
93 int ret; 93 int ret;
94 94
95 /* pin buffer into GTT */ 95 /* pin buffer into GTT */
96 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT); 96 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_TT, false);
97 if (ret) 97 if (ret)
98 return -EINVAL; 98 return -EINVAL;
99 99
diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c
index 40b461c7d5c5..57860cfa1de5 100644
--- a/drivers/gpu/drm/nouveau/nv17_fence.c
+++ b/drivers/gpu/drm/nouveau/nv17_fence.c
@@ -131,7 +131,7 @@ nv17_fence_create(struct nouveau_drm *drm)
131 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 131 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
132 0, 0x0000, NULL, NULL, &priv->bo); 132 0, 0x0000, NULL, NULL, &priv->bo);
133 if (!ret) { 133 if (!ret) {
134 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 134 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
135 if (!ret) { 135 if (!ret) {
136 ret = nouveau_bo_map(priv->bo); 136 ret = nouveau_bo_map(priv->bo);
137 if (ret) 137 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index eb8b36714fa1..490b90866baf 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -26,6 +26,7 @@
26 26
27#include <drm/drmP.h> 27#include <drm/drmP.h>
28#include <drm/drm_crtc_helper.h> 28#include <drm/drm_crtc_helper.h>
29#include <drm/drm_plane_helper.h>
29#include <drm/drm_dp_helper.h> 30#include <drm/drm_dp_helper.h>
30 31
31#include <nvif/class.h> 32#include <nvif/class.h>
@@ -65,15 +66,29 @@ static int
65nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head, 66nv50_chan_create(struct nvif_object *disp, const u32 *oclass, u8 head,
66 void *data, u32 size, struct nv50_chan *chan) 67 void *data, u32 size, struct nv50_chan *chan)
67{ 68{
69 const u32 handle = (oclass[0] << 16) | head;
70 u32 sclass[8];
71 int ret, i;
72
73 ret = nvif_object_sclass(disp, sclass, ARRAY_SIZE(sclass));
74 WARN_ON(ret > ARRAY_SIZE(sclass));
75 if (ret < 0)
76 return ret;
77
68 while (oclass[0]) { 78 while (oclass[0]) {
69 int ret = nvif_object_init(disp, NULL, (oclass[0] << 16) | head, 79 for (i = 0; i < ARRAY_SIZE(sclass); i++) {
70 oclass[0], data, size, 80 if (sclass[i] == oclass[0]) {
71 &chan->user); 81 ret = nvif_object_init(disp, NULL, handle,
72 if (oclass++, ret == 0) { 82 oclass[0], data, size,
73 nvif_object_map(&chan->user); 83 &chan->user);
74 return ret; 84 if (ret == 0)
85 nvif_object_map(&chan->user);
86 return ret;
87 }
75 } 88 }
89 oclass++;
76 } 90 }
91
77 return -ENOSYS; 92 return -ENOSYS;
78} 93}
79 94
@@ -110,6 +125,7 @@ nv50_pioc_create(struct nvif_object *disp, const u32 *oclass, u8 head,
110 125
111struct nv50_curs { 126struct nv50_curs {
112 struct nv50_pioc base; 127 struct nv50_pioc base;
128 struct nouveau_bo *image;
113}; 129};
114 130
115static int 131static int
@@ -265,6 +281,7 @@ nv50_core_create(struct nvif_object *disp, u64 syncbuf, struct nv50_mast *core)
265 .pushbuf = 0xb0007d00, 281 .pushbuf = 0xb0007d00,
266 }; 282 };
267 static const u32 oclass[] = { 283 static const u32 oclass[] = {
284 GM204_DISP_CORE_CHANNEL_DMA,
268 GM107_DISP_CORE_CHANNEL_DMA, 285 GM107_DISP_CORE_CHANNEL_DMA,
269 GK110_DISP_CORE_CHANNEL_DMA, 286 GK110_DISP_CORE_CHANNEL_DMA,
270 GK104_DISP_CORE_CHANNEL_DMA, 287 GK104_DISP_CORE_CHANNEL_DMA,
@@ -424,8 +441,21 @@ evo_kick(u32 *push, void *evoc)
424 mutex_unlock(&dmac->lock); 441 mutex_unlock(&dmac->lock);
425} 442}
426 443
444#if 1
427#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m)) 445#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
428#define evo_data(p,d) *((p)++) = (d) 446#define evo_data(p,d) *((p)++) = (d)
447#else
448#define evo_mthd(p,m,s) do { \
449 const u32 _m = (m), _s = (s); \
450 printk(KERN_ERR "%04x %d %s\n", _m, _s, __func__); \
451 *((p)++) = ((_s << 18) | _m); \
452} while(0)
453#define evo_data(p,d) do { \
454 const u32 _d = (d); \
455 printk(KERN_ERR "\t%08x\n", _d); \
456 *((p)++) = _d; \
457} while(0)
458#endif
429 459
430static bool 460static bool
431evo_sync_wait(void *data) 461evo_sync_wait(void *data)
@@ -887,23 +917,24 @@ static void
887nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc) 917nv50_crtc_cursor_show(struct nouveau_crtc *nv_crtc)
888{ 918{
889 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 919 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
920 struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
890 u32 *push = evo_wait(mast, 16); 921 u32 *push = evo_wait(mast, 16);
891 if (push) { 922 if (push) {
892 if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) { 923 if (nv50_vers(mast) < G82_DISP_CORE_CHANNEL_DMA) {
893 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); 924 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
894 evo_data(push, 0x85000000); 925 evo_data(push, 0x85000000);
895 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); 926 evo_data(push, curs->image->bo.offset >> 8);
896 } else 927 } else
897 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) { 928 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA) {
898 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2); 929 evo_mthd(push, 0x0880 + (nv_crtc->index * 0x400), 2);
899 evo_data(push, 0x85000000); 930 evo_data(push, 0x85000000);
900 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); 931 evo_data(push, curs->image->bo.offset >> 8);
901 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1); 932 evo_mthd(push, 0x089c + (nv_crtc->index * 0x400), 1);
902 evo_data(push, mast->base.vram.handle); 933 evo_data(push, mast->base.vram.handle);
903 } else { 934 } else {
904 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2); 935 evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
905 evo_data(push, 0x85000000); 936 evo_data(push, 0x85000000);
906 evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8); 937 evo_data(push, curs->image->bo.offset >> 8);
907 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1); 938 evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
908 evo_data(push, mast->base.vram.handle); 939 evo_data(push, mast->base.vram.handle);
909 } 940 }
@@ -940,8 +971,9 @@ static void
940nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update) 971nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
941{ 972{
942 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 973 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
974 struct nv50_curs *curs = nv50_curs(&nv_crtc->base);
943 975
944 if (show) 976 if (show && curs->image)
945 nv50_crtc_cursor_show(nv_crtc); 977 nv50_crtc_cursor_show(nv_crtc);
946 else 978 else
947 nv50_crtc_cursor_hide(nv_crtc); 979 nv50_crtc_cursor_hide(nv_crtc);
@@ -1041,7 +1073,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
1041 evo_kick(push, mast); 1073 evo_kick(push, mast);
1042 } 1074 }
1043 1075
1044 nv50_crtc_cursor_show_hide(nv_crtc, nv_crtc->cursor.visible, true); 1076 nv50_crtc_cursor_show_hide(nv_crtc, true, true);
1045 nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1); 1077 nv50_display_flip_next(crtc, crtc->primary->fb, NULL, 1);
1046} 1078}
1047 1079
@@ -1060,7 +1092,7 @@ nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
1060 struct nv50_head *head = nv50_head(crtc); 1092 struct nv50_head *head = nv50_head(crtc);
1061 int ret; 1093 int ret;
1062 1094
1063 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); 1095 ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM, true);
1064 if (ret == 0) { 1096 if (ret == 0) {
1065 if (head->image) 1097 if (head->image)
1066 nouveau_bo_unpin(head->image); 1098 nouveau_bo_unpin(head->image);
@@ -1241,13 +1273,13 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1241 uint32_t handle, uint32_t width, uint32_t height) 1273 uint32_t handle, uint32_t width, uint32_t height)
1242{ 1274{
1243 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); 1275 struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
1276 struct nv50_curs *curs = nv50_curs(crtc);
1244 struct drm_device *dev = crtc->dev; 1277 struct drm_device *dev = crtc->dev;
1245 struct drm_gem_object *gem; 1278 struct drm_gem_object *gem = NULL;
1246 struct nouveau_bo *nvbo; 1279 struct nouveau_bo *nvbo = NULL;
1247 bool visible = (handle != 0); 1280 int ret = 0;
1248 int i, ret = 0;
1249 1281
1250 if (visible) { 1282 if (handle) {
1251 if (width != 64 || height != 64) 1283 if (width != 64 || height != 64)
1252 return -EINVAL; 1284 return -EINVAL;
1253 1285
@@ -1256,23 +1288,17 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
1256 return -ENOENT; 1288 return -ENOENT;
1257 nvbo = nouveau_gem_object(gem); 1289 nvbo = nouveau_gem_object(gem);
1258 1290
1259 ret = nouveau_bo_map(nvbo); 1291 ret = nouveau_bo_pin(nvbo, TTM_PL_FLAG_VRAM, true);
1260 if (ret == 0) {
1261 for (i = 0; i < 64 * 64; i++) {
1262 u32 v = nouveau_bo_rd32(nvbo, i);
1263 nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
1264 }
1265 nouveau_bo_unmap(nvbo);
1266 }
1267
1268 drm_gem_object_unreference_unlocked(gem);
1269 } 1292 }
1270 1293
1271 if (visible != nv_crtc->cursor.visible) { 1294 if (ret == 0) {
1272 nv50_crtc_cursor_show_hide(nv_crtc, visible, true); 1295 if (curs->image)
1273 nv_crtc->cursor.visible = visible; 1296 nouveau_bo_unpin(curs->image);
1297 nouveau_bo_ref(nvbo, &curs->image);
1274 } 1298 }
1299 drm_gem_object_unreference_unlocked(gem);
1275 1300
1301 nv50_crtc_cursor_show_hide(nv_crtc, true, true);
1276 return ret; 1302 return ret;
1277} 1303}
1278 1304
@@ -1327,10 +1353,10 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
1327 nouveau_bo_unpin(head->image); 1353 nouveau_bo_unpin(head->image);
1328 nouveau_bo_ref(NULL, &head->image); 1354 nouveau_bo_ref(NULL, &head->image);
1329 1355
1330 nouveau_bo_unmap(nv_crtc->cursor.nvbo); 1356 /*XXX: ditto */
1331 if (nv_crtc->cursor.nvbo) 1357 if (head->curs.image)
1332 nouveau_bo_unpin(nv_crtc->cursor.nvbo); 1358 nouveau_bo_unpin(head->curs.image);
1333 nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); 1359 nouveau_bo_ref(NULL, &head->curs.image);
1334 1360
1335 nouveau_bo_unmap(nv_crtc->lut.nvbo); 1361 nouveau_bo_unmap(nv_crtc->lut.nvbo);
1336 if (nv_crtc->lut.nvbo) 1362 if (nv_crtc->lut.nvbo)
@@ -1362,16 +1388,6 @@ static const struct drm_crtc_funcs nv50_crtc_func = {
1362 .page_flip = nouveau_crtc_page_flip, 1388 .page_flip = nouveau_crtc_page_flip,
1363}; 1389};
1364 1390
1365static void
1366nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
1367{
1368}
1369
1370static void
1371nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
1372{
1373}
1374
1375static int 1391static int
1376nv50_crtc_create(struct drm_device *dev, int index) 1392nv50_crtc_create(struct drm_device *dev, int index)
1377{ 1393{
@@ -1390,8 +1406,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
1390 head->base.set_color_vibrance = nv50_crtc_set_color_vibrance; 1406 head->base.set_color_vibrance = nv50_crtc_set_color_vibrance;
1391 head->base.color_vibrance = 50; 1407 head->base.color_vibrance = 50;
1392 head->base.vibrant_hue = 0; 1408 head->base.vibrant_hue = 0;
1393 head->base.cursor.set_offset = nv50_cursor_set_offset;
1394 head->base.cursor.set_pos = nv50_cursor_set_pos;
1395 for (i = 0; i < 256; i++) { 1409 for (i = 0; i < 256; i++) {
1396 head->base.lut.r[i] = i << 8; 1410 head->base.lut.r[i] = i << 8;
1397 head->base.lut.g[i] = i << 8; 1411 head->base.lut.g[i] = i << 8;
@@ -1406,7 +1420,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
1406 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM, 1420 ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
1407 0, 0x0000, NULL, NULL, &head->base.lut.nvbo); 1421 0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
1408 if (!ret) { 1422 if (!ret) {
1409 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM); 1423 ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
1410 if (!ret) { 1424 if (!ret) {
1411 ret = nouveau_bo_map(head->base.lut.nvbo); 1425 ret = nouveau_bo_map(head->base.lut.nvbo);
1412 if (ret) 1426 if (ret)
@@ -1426,22 +1440,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
1426 if (ret) 1440 if (ret)
1427 goto out; 1441 goto out;
1428 1442
1429 ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
1430 0, 0x0000, NULL, NULL, &head->base.cursor.nvbo);
1431 if (!ret) {
1432 ret = nouveau_bo_pin(head->base.cursor.nvbo, TTM_PL_FLAG_VRAM);
1433 if (!ret) {
1434 ret = nouveau_bo_map(head->base.cursor.nvbo);
1435 if (ret)
1436 nouveau_bo_unpin(head->base.lut.nvbo);
1437 }
1438 if (ret)
1439 nouveau_bo_ref(NULL, &head->base.cursor.nvbo);
1440 }
1441
1442 if (ret)
1443 goto out;
1444
1445 /* allocate page flip / sync resources */ 1443 /* allocate page flip / sync resources */
1446 ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset, 1444 ret = nv50_base_create(disp->disp, index, disp->sync->bo.offset,
1447 &head->sync); 1445 &head->sync);
@@ -1701,7 +1699,8 @@ nv50_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
1701 drm_edid_to_eld(&nv_connector->base, nv_connector->edid); 1699 drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
1702 memcpy(args.data, nv_connector->base.eld, sizeof(args.data)); 1700 memcpy(args.data, nv_connector->base.eld, sizeof(args.data));
1703 1701
1704 nvif_mthd(disp->disp, 0, &args, sizeof(args.base) + args.data[2] * 4); 1702 nvif_mthd(disp->disp, 0, &args,
1703 sizeof(args.base) + drm_eld_size(args.data));
1705} 1704}
1706 1705
1707static void 1706static void
@@ -2373,11 +2372,6 @@ nv50_fb_ctor(struct drm_framebuffer *fb)
2373 u8 kind = nouveau_bo_tile_layout(nvbo) >> 8; 2372 u8 kind = nouveau_bo_tile_layout(nvbo) >> 8;
2374 u8 tile = nvbo->tile_mode; 2373 u8 tile = nvbo->tile_mode;
2375 2374
2376 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
2377 NV_ERROR(drm, "framebuffer requires contiguous bo\n");
2378 return -EINVAL;
2379 }
2380
2381 if (drm->device.info.chipset >= 0xc0) 2375 if (drm->device.info.chipset >= 0xc0)
2382 tile >>= 4; /* yep.. */ 2376 tile >>= 4; /* yep.. */
2383 2377
@@ -2491,7 +2485,7 @@ nv50_display_create(struct drm_device *dev)
2491 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 2485 ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
2492 0, 0x0000, NULL, NULL, &disp->sync); 2486 0, 0x0000, NULL, NULL, &disp->sync);
2493 if (!ret) { 2487 if (!ret) {
2494 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM); 2488 ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
2495 if (!ret) { 2489 if (!ret) {
2496 ret = nouveau_bo_map(disp->sync); 2490 ret = nouveau_bo_map(disp->sync);
2497 if (ret) 2491 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c
index 22d242b37962..a82d9ea7c6fd 100644
--- a/drivers/gpu/drm/nouveau/nv50_fence.c
+++ b/drivers/gpu/drm/nouveau/nv50_fence.c
@@ -102,7 +102,7 @@ nv50_fence_create(struct nouveau_drm *drm)
102 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM, 102 ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
103 0, 0x0000, NULL, NULL, &priv->bo); 103 0, 0x0000, NULL, NULL, &priv->bo);
104 if (!ret) { 104 if (!ret) {
105 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 105 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
106 if (!ret) { 106 if (!ret) {
107 ret = nouveau_bo_map(priv->bo); 107 ret = nouveau_bo_map(priv->bo);
108 if (ret) 108 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index d6c6c87c3f07..cb5b88938d45 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -234,7 +234,7 @@ nv84_fence_create(struct nouveau_drm *drm)
234 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, 234 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
235 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo); 235 TTM_PL_FLAG_VRAM, 0, 0, NULL, NULL, &priv->bo);
236 if (ret == 0) { 236 if (ret == 0) {
237 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM); 237 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
238 if (ret == 0) { 238 if (ret == 0) {
239 ret = nouveau_bo_map(priv->bo); 239 ret = nouveau_bo_map(priv->bo);
240 if (ret) 240 if (ret)
@@ -246,10 +246,10 @@ nv84_fence_create(struct nouveau_drm *drm)
246 246
247 if (ret == 0) 247 if (ret == 0)
248 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, 248 ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
249 TTM_PL_FLAG_TT, 0, 0, NULL, NULL, 249 TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
250 &priv->bo_gart); 250 0, NULL, NULL, &priv->bo_gart);
251 if (ret == 0) { 251 if (ret == 0) {
252 ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT); 252 ret = nouveau_bo_pin(priv->bo_gart, TTM_PL_FLAG_TT, false);
253 if (ret == 0) { 253 if (ret == 0) {
254 ret = nouveau_bo_map(priv->bo_gart); 254 ret = nouveau_bo_map(priv->bo_gart);
255 if (ret) 255 if (ret)
diff --git a/drivers/gpu/drm/nouveau/nvif/class.h b/drivers/gpu/drm/nouveau/nvif/class.h
index e5a27df0672b..4e308eacb27a 100644
--- a/drivers/gpu/drm/nouveau/nvif/class.h
+++ b/drivers/gpu/drm/nouveau/nvif/class.h
@@ -35,6 +35,7 @@
35#define GK104_DISP 0x00009170 35#define GK104_DISP 0x00009170
36#define GK110_DISP 0x00009270 36#define GK110_DISP 0x00009270
37#define GM107_DISP 0x00009470 37#define GM107_DISP 0x00009470
38#define GM204_DISP 0x00009570
38 39
39#define NV50_DISP_CURSOR 0x0000507a 40#define NV50_DISP_CURSOR 0x0000507a
40#define G82_DISP_CURSOR 0x0000827a 41#define G82_DISP_CURSOR 0x0000827a
@@ -65,6 +66,7 @@
65#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d 66#define GK104_DISP_CORE_CHANNEL_DMA 0x0000917d
66#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d 67#define GK110_DISP_CORE_CHANNEL_DMA 0x0000927d
67#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d 68#define GM107_DISP_CORE_CHANNEL_DMA 0x0000947d
69#define GM204_DISP_CORE_CHANNEL_DMA 0x0000957d
68 70
69#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e 71#define NV50_DISP_OVERLAY_CHANNEL_DMA 0x0000507e
70#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e 72#define G82_DISP_OVERLAY_CHANNEL_DMA 0x0000827e
@@ -131,6 +133,7 @@ struct nv_device_v0 {
131#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL 133#define NV_DEVICE_V0_DISABLE_COPY1 0x0000010000000000ULL
132#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL 134#define NV_DEVICE_V0_DISABLE_VIC 0x0000020000000000ULL
133#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL 135#define NV_DEVICE_V0_DISABLE_VENC 0x0000040000000000ULL
136#define NV_DEVICE_V0_DISABLE_COPY2 0x0000080000000000ULL
134 __u64 disable; /* disable particular subsystems */ 137 __u64 disable; /* disable particular subsystems */
135 __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */ 138 __u64 debug0; /* as above, but *internal* ids, and *NOT* ABI */
136}; 139};
diff --git a/drivers/gpu/drm/nouveau/nvif/client.c b/drivers/gpu/drm/nouveau/nvif/client.c
index 3c4df1fc26dc..3f7ac5bc8e03 100644
--- a/drivers/gpu/drm/nouveau/nvif/client.c
+++ b/drivers/gpu/drm/nouveau/nvif/client.c
@@ -62,6 +62,7 @@ nvif_drivers[] = {
62#else 62#else
63 &nvif_driver_drm, 63 &nvif_driver_drm,
64 &nvif_driver_lib, 64 &nvif_driver_lib,
65 &nvif_driver_null,
65#endif 66#endif
66 NULL 67 NULL
67}; 68};
diff --git a/drivers/gpu/drm/nouveau/nvif/driver.h b/drivers/gpu/drm/nouveau/nvif/driver.h
index ac4bdb3ea506..8bd39e69229c 100644
--- a/drivers/gpu/drm/nouveau/nvif/driver.h
+++ b/drivers/gpu/drm/nouveau/nvif/driver.h
@@ -17,5 +17,6 @@ struct nvif_driver {
17extern const struct nvif_driver nvif_driver_nvkm; 17extern const struct nvif_driver nvif_driver_nvkm;
18extern const struct nvif_driver nvif_driver_drm; 18extern const struct nvif_driver nvif_driver_drm;
19extern const struct nvif_driver nvif_driver_lib; 19extern const struct nvif_driver nvif_driver_lib;
20extern const struct nvif_driver nvif_driver_null;
20 21
21#endif 22#endif
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 2d28dc337cfb..b0566a1ca28f 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -20,6 +20,7 @@
20#include "omap_drv.h" 20#include "omap_drv.h"
21 21
22#include <drm/drm_mode.h> 22#include <drm/drm_mode.h>
23#include <drm/drm_plane_helper.h>
23#include "drm_crtc.h" 24#include "drm_crtc.h"
24#include "drm_crtc_helper.h" 25#include "drm_crtc_helper.h"
25 26
diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c
index e4849413ee80..aeb91ed653c9 100644
--- a/drivers/gpu/drm/omapdrm/omap_gem.c
+++ b/drivers/gpu/drm/omapdrm/omap_gem.c
@@ -612,8 +612,7 @@ int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
612{ 612{
613 union omap_gem_size gsize; 613 union omap_gem_size gsize;
614 614
615 /* in case someone tries to feed us a completely bogus stride: */ 615 args->pitch = align_pitch(0, args->width, args->bpp);
616 args->pitch = align_pitch(args->pitch, args->width, args->bpp);
617 args->size = PAGE_ALIGN(args->pitch * args->height); 616 args->size = PAGE_ALIGN(args->pitch * args->height);
618 617
619 gsize = (union omap_gem_size){ 618 gsize = (union omap_gem_size){
diff --git a/drivers/gpu/drm/omapdrm/omap_plane.c b/drivers/gpu/drm/omapdrm/omap_plane.c
index 891a4dc608af..ee8e2b3a117e 100644
--- a/drivers/gpu/drm/omapdrm/omap_plane.c
+++ b/drivers/gpu/drm/omapdrm/omap_plane.c
@@ -388,20 +388,15 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
388 struct drm_plane *plane = NULL; 388 struct drm_plane *plane = NULL;
389 struct omap_plane *omap_plane; 389 struct omap_plane *omap_plane;
390 struct omap_overlay_info *info; 390 struct omap_overlay_info *info;
391 int ret;
392 391
393 DBG("%s: priv=%d", plane_names[id], private_plane); 392 DBG("%s: priv=%d", plane_names[id], private_plane);
394 393
395 omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL); 394 omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
396 if (!omap_plane) 395 if (!omap_plane)
397 goto fail; 396 return NULL;
398 397
399 ret = drm_flip_work_init(&omap_plane->unpin_work, 16, 398 drm_flip_work_init(&omap_plane->unpin_work,
400 "unpin", unpin_worker); 399 "unpin", unpin_worker);
401 if (ret) {
402 dev_err(dev->dev, "could not allocate unpin FIFO\n");
403 goto fail;
404 }
405 400
406 omap_plane->nformats = omap_framebuffer_get_formats( 401 omap_plane->nformats = omap_framebuffer_get_formats(
407 omap_plane->formats, ARRAY_SIZE(omap_plane->formats), 402 omap_plane->formats, ARRAY_SIZE(omap_plane->formats),
@@ -443,10 +438,4 @@ struct drm_plane *omap_plane_init(struct drm_device *dev,
443 omap_plane->info.zorder = id; 438 omap_plane->info.zorder = id;
444 439
445 return plane; 440 return plane;
446
447fail:
448 if (plane)
449 omap_plane_destroy(plane);
450
451 return NULL;
452} 441}
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index bee9f72b3a93..024e98ef8e4d 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -27,4 +27,17 @@ config DRM_PANEL_S6E8AA0
27 select DRM_MIPI_DSI 27 select DRM_MIPI_DSI
28 select VIDEOMODE_HELPERS 28 select VIDEOMODE_HELPERS
29 29
30config DRM_PANEL_SHARP_LQ101R1SX01
31 tristate "Sharp LQ101R1SX01 panel"
32 depends on OF
33 depends on DRM_MIPI_DSI
34 help
35 Say Y here if you want to enable support for Sharp LQ101R1SX01
36 TFT-LCD modules. The panel has a 2560x1600 resolution and uses
37 24 bit RGB per pixel. It provides a dual MIPI DSI interface to
38 the host and has a built-in LED backlight.
39
40 To compile this driver as a module, choose M here: the module
41 will be called panel-sharp-lq101r1sx01.
42
30endmenu 43endmenu
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 8b929212fad7..4b2a0430804b 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,3 +1,4 @@
1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o 1obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
2obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o 2obj-$(CONFIG_DRM_PANEL_LD9040) += panel-ld9040.o
3obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o 3obj-$(CONFIG_DRM_PANEL_S6E8AA0) += panel-s6e8aa0.o
4obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-ld9040.c b/drivers/gpu/drm/panel/panel-ld9040.c
index 42ac67b21e9f..08cf2c588c3d 100644
--- a/drivers/gpu/drm/panel/panel-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-ld9040.c
@@ -145,7 +145,7 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
145 if (ctx->error < 0 || len == 0) 145 if (ctx->error < 0 || len == 0)
146 return; 146 return;
147 147
148 dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", len, data); 148 dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data);
149 ret = ld9040_spi_write_word(ctx, *data); 149 ret = ld9040_spi_write_word(ctx, *data);
150 150
151 while (!ret && --len) { 151 while (!ret && --len) {
@@ -154,8 +154,8 @@ static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len)
154 } 154 }
155 155
156 if (ret) { 156 if (ret) {
157 dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, len, 157 dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret,
158 data); 158 (int)len, data);
159 ctx->error = ret; 159 ctx->error = ret;
160 } 160 }
161 161
@@ -336,17 +336,12 @@ static int ld9040_probe(struct spi_device *spi)
336 if (ret < 0) 336 if (ret < 0)
337 return ret; 337 return ret;
338 338
339 ctx->reset_gpio = devm_gpiod_get(dev, "reset"); 339 ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
340 if (IS_ERR(ctx->reset_gpio)) { 340 if (IS_ERR(ctx->reset_gpio)) {
341 dev_err(dev, "cannot get reset-gpios %ld\n", 341 dev_err(dev, "cannot get reset-gpios %ld\n",
342 PTR_ERR(ctx->reset_gpio)); 342 PTR_ERR(ctx->reset_gpio));
343 return PTR_ERR(ctx->reset_gpio); 343 return PTR_ERR(ctx->reset_gpio);
344 } 344 }
345 ret = gpiod_direction_output(ctx->reset_gpio, 1);
346 if (ret < 0) {
347 dev_err(dev, "cannot configure reset-gpios %d\n", ret);
348 return ret;
349 }
350 345
351 spi->bits_per_word = 9; 346 spi->bits_per_word = 9;
352 ret = spi_setup(spi); 347 ret = spi_setup(spi);
diff --git a/drivers/gpu/drm/panel/panel-s6e8aa0.c b/drivers/gpu/drm/panel/panel-s6e8aa0.c
index b5217fe37f02..144b2733e3d7 100644
--- a/drivers/gpu/drm/panel/panel-s6e8aa0.c
+++ b/drivers/gpu/drm/panel/panel-s6e8aa0.c
@@ -141,10 +141,10 @@ static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len)
141 if (ctx->error < 0) 141 if (ctx->error < 0)
142 return; 142 return;
143 143
144 ret = mipi_dsi_dcs_write(dsi, data, len); 144 ret = mipi_dsi_dcs_write_buffer(dsi, data, len);
145 if (ret < 0) { 145 if (ret < 0) {
146 dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, len, 146 dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret,
147 data); 147 (int)len, data);
148 ctx->error = ret; 148 ctx->error = ret;
149 } 149 }
150} 150}
@@ -800,27 +800,15 @@ static void s6e8aa0_panel_init(struct s6e8aa0 *ctx)
800} 800}
801 801
802static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx, 802static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx,
803 int size) 803 u16 size)
804{ 804{
805 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); 805 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
806 const struct mipi_dsi_host_ops *ops = dsi->host->ops;
807 u8 buf[] = {size, 0};
808 struct mipi_dsi_msg msg = {
809 .channel = dsi->channel,
810 .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
811 .tx_len = sizeof(buf),
812 .tx_buf = buf
813 };
814 int ret; 806 int ret;
815 807
816 if (ctx->error < 0) 808 if (ctx->error < 0)
817 return; 809 return;
818 810
819 if (!ops || !ops->transfer) 811 ret = mipi_dsi_set_maximum_return_packet_size(dsi, size);
820 ret = -EIO;
821 else
822 ret = ops->transfer(dsi->host, &msg);
823
824 if (ret < 0) { 812 if (ret < 0) {
825 dev_err(ctx->dev, 813 dev_err(ctx->dev,
826 "error %d setting maximum return packet size to %d\n", 814 "error %d setting maximum return packet size to %d\n",
@@ -1019,17 +1007,12 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi)
1019 return ret; 1007 return ret;
1020 } 1008 }
1021 1009
1022 ctx->reset_gpio = devm_gpiod_get(dev, "reset"); 1010 ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
1023 if (IS_ERR(ctx->reset_gpio)) { 1011 if (IS_ERR(ctx->reset_gpio)) {
1024 dev_err(dev, "cannot get reset-gpios %ld\n", 1012 dev_err(dev, "cannot get reset-gpios %ld\n",
1025 PTR_ERR(ctx->reset_gpio)); 1013 PTR_ERR(ctx->reset_gpio));
1026 return PTR_ERR(ctx->reset_gpio); 1014 return PTR_ERR(ctx->reset_gpio);
1027 } 1015 }
1028 ret = gpiod_direction_output(ctx->reset_gpio, 1);
1029 if (ret < 0) {
1030 dev_err(dev, "cannot configure reset-gpios %d\n", ret);
1031 return ret;
1032 }
1033 1016
1034 ctx->brightness = GAMMA_LEVEL_NUM - 1; 1017 ctx->brightness = GAMMA_LEVEL_NUM - 1;
1035 1018
@@ -1069,7 +1052,6 @@ static struct mipi_dsi_driver s6e8aa0_driver = {
1069 .remove = s6e8aa0_remove, 1052 .remove = s6e8aa0_remove,
1070 .driver = { 1053 .driver = {
1071 .name = "panel_s6e8aa0", 1054 .name = "panel_s6e8aa0",
1072 .owner = THIS_MODULE,
1073 .of_match_table = s6e8aa0_of_match, 1055 .of_match_table = s6e8aa0_of_match,
1074 }, 1056 },
1075}; 1057};
diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
new file mode 100644
index 000000000000..9d81759d82fc
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c
@@ -0,0 +1,464 @@
1/*
2 * Copyright (C) 2014 NVIDIA Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/backlight.h>
10#include <linux/gpio/consumer.h>
11#include <linux/module.h>
12#include <linux/of.h>
13#include <linux/regulator/consumer.h>
14
15#include <drm/drmP.h>
16#include <drm/drm_crtc.h>
17#include <drm/drm_mipi_dsi.h>
18#include <drm/drm_panel.h>
19
20#include <video/mipi_display.h>
21
22#include <linux/host1x.h>
23
24struct sharp_panel {
25 struct drm_panel base;
26 /* the datasheet refers to them as DSI-LINK1 and DSI-LINK2 */
27 struct mipi_dsi_device *link1;
28 struct mipi_dsi_device *link2;
29
30 struct backlight_device *backlight;
31 struct regulator *supply;
32
33 bool prepared;
34 bool enabled;
35
36 const struct drm_display_mode *mode;
37};
38
39static inline struct sharp_panel *to_sharp_panel(struct drm_panel *panel)
40{
41 return container_of(panel, struct sharp_panel, base);
42}
43
44static int sharp_panel_write(struct sharp_panel *sharp, u16 offset, u8 value)
45{
46 u8 payload[3] = { offset >> 8, offset & 0xff, value };
47 struct mipi_dsi_device *dsi = sharp->link1;
48 ssize_t err;
49
50 err = mipi_dsi_generic_write(dsi, payload, sizeof(payload));
51 if (err < 0) {
52 dev_err(&dsi->dev, "failed to write %02x to %04x: %zd\n",
53 value, offset, err);
54 return err;
55 }
56
57 err = mipi_dsi_dcs_nop(dsi);
58 if (err < 0) {
59 dev_err(&dsi->dev, "failed to send DCS nop: %zd\n", err);
60 return err;
61 }
62
63 usleep_range(10, 20);
64
65 return 0;
66}
67
68static __maybe_unused int sharp_panel_read(struct sharp_panel *sharp,
69 u16 offset, u8 *value)
70{
71 ssize_t err;
72
73 cpu_to_be16s(&offset);
74
75 err = mipi_dsi_generic_read(sharp->link1, &offset, sizeof(offset),
76 value, sizeof(*value));
77 if (err < 0)
78 dev_err(&sharp->link1->dev, "failed to read from %04x: %zd\n",
79 offset, err);
80
81 return err;
82}
83
84static int sharp_panel_disable(struct drm_panel *panel)
85{
86 struct sharp_panel *sharp = to_sharp_panel(panel);
87
88 if (!sharp->enabled)
89 return 0;
90
91 if (sharp->backlight) {
92 sharp->backlight->props.power = FB_BLANK_POWERDOWN;
93 backlight_update_status(sharp->backlight);
94 }
95
96 sharp->enabled = false;
97
98 return 0;
99}
100
101static int sharp_panel_unprepare(struct drm_panel *panel)
102{
103 struct sharp_panel *sharp = to_sharp_panel(panel);
104 int err;
105
106 if (!sharp->prepared)
107 return 0;
108
109 err = mipi_dsi_dcs_set_display_off(sharp->link1);
110 if (err < 0)
111 dev_err(panel->dev, "failed to set display off: %d\n", err);
112
113 err = mipi_dsi_dcs_enter_sleep_mode(sharp->link1);
114 if (err < 0)
115 dev_err(panel->dev, "failed to enter sleep mode: %d\n", err);
116
117 msleep(120);
118
119 regulator_disable(sharp->supply);
120
121 sharp->prepared = false;
122
123 return 0;
124}
125
126static int sharp_setup_symmetrical_split(struct mipi_dsi_device *left,
127 struct mipi_dsi_device *right,
128 const struct drm_display_mode *mode)
129{
130 int err;
131
132 err = mipi_dsi_dcs_set_column_address(left, 0, mode->hdisplay / 2 - 1);
133 if (err < 0) {
134 dev_err(&left->dev, "failed to set column address: %d\n", err);
135 return err;
136 }
137
138 err = mipi_dsi_dcs_set_page_address(left, 0, mode->vdisplay - 1);
139 if (err < 0) {
140 dev_err(&left->dev, "failed to set page address: %d\n", err);
141 return err;
142 }
143
144 err = mipi_dsi_dcs_set_column_address(right, mode->hdisplay / 2,
145 mode->hdisplay - 1);
146 if (err < 0) {
147 dev_err(&right->dev, "failed to set column address: %d\n", err);
148 return err;
149 }
150
151 err = mipi_dsi_dcs_set_page_address(right, 0, mode->vdisplay - 1);
152 if (err < 0) {
153 dev_err(&right->dev, "failed to set page address: %d\n", err);
154 return err;
155 }
156
157 return 0;
158}
159
160static int sharp_panel_prepare(struct drm_panel *panel)
161{
162 struct sharp_panel *sharp = to_sharp_panel(panel);
163 u8 format = MIPI_DCS_PIXEL_FMT_24BIT;
164 int err;
165
166 if (sharp->prepared)
167 return 0;
168
169 err = regulator_enable(sharp->supply);
170 if (err < 0)
171 return err;
172
173 usleep_range(10000, 20000);
174
175 err = mipi_dsi_dcs_soft_reset(sharp->link1);
176 if (err < 0) {
177 dev_err(panel->dev, "soft reset failed: %d\n", err);
178 goto poweroff;
179 }
180
181 msleep(120);
182
183 err = mipi_dsi_dcs_exit_sleep_mode(sharp->link1);
184 if (err < 0) {
185 dev_err(panel->dev, "failed to exit sleep mode: %d\n", err);
186 goto poweroff;
187 }
188
189 /*
190 * The MIPI DCS specification mandates this delay only between the
191 * exit_sleep_mode and enter_sleep_mode commands, so it isn't strictly
192 * necessary here.
193 */
194 /*
195 msleep(120);
196 */
197
198 /* set left-right mode */
199 err = sharp_panel_write(sharp, 0x1000, 0x2a);
200 if (err < 0) {
201 dev_err(panel->dev, "failed to set left-right mode: %d\n", err);
202 goto poweroff;
203 }
204
205 /* enable command mode */
206 err = sharp_panel_write(sharp, 0x1001, 0x01);
207 if (err < 0) {
208 dev_err(panel->dev, "failed to enable command mode: %d\n", err);
209 goto poweroff;
210 }
211
212 err = mipi_dsi_dcs_set_pixel_format(sharp->link1, format);
213 if (err < 0) {
214 dev_err(panel->dev, "failed to set pixel format: %d\n", err);
215 goto poweroff;
216 }
217
218 /*
219 * TODO: The device supports both left-right and even-odd split
220 * configurations, but this driver currently supports only the left-
221 * right split. To support a different mode a mechanism needs to be
222 * put in place to communicate the configuration back to the DSI host
223 * controller.
224 */
225 err = sharp_setup_symmetrical_split(sharp->link1, sharp->link2,
226 sharp->mode);
227 if (err < 0) {
228 dev_err(panel->dev, "failed to set up symmetrical split: %d\n",
229 err);
230 goto poweroff;
231 }
232
233 err = mipi_dsi_dcs_set_display_on(sharp->link1);
234 if (err < 0) {
235 dev_err(panel->dev, "failed to set display on: %d\n", err);
236 goto poweroff;
237 }
238
239 sharp->prepared = true;
240
241 return 0;
242
243poweroff:
244 regulator_disable(sharp->supply);
245 return err;
246}
247
248static int sharp_panel_enable(struct drm_panel *panel)
249{
250 struct sharp_panel *sharp = to_sharp_panel(panel);
251
252 if (sharp->enabled)
253 return 0;
254
255 if (sharp->backlight) {
256 sharp->backlight->props.power = FB_BLANK_UNBLANK;
257 backlight_update_status(sharp->backlight);
258 }
259
260 sharp->enabled = true;
261
262 return 0;
263}
264
265static const struct drm_display_mode default_mode = {
266 .clock = 278000,
267 .hdisplay = 2560,
268 .hsync_start = 2560 + 128,
269 .hsync_end = 2560 + 128 + 64,
270 .htotal = 2560 + 128 + 64 + 64,
271 .vdisplay = 1600,
272 .vsync_start = 1600 + 4,
273 .vsync_end = 1600 + 4 + 8,
274 .vtotal = 1600 + 4 + 8 + 32,
275 .vrefresh = 60,
276};
277
278static int sharp_panel_get_modes(struct drm_panel *panel)
279{
280 struct drm_display_mode *mode;
281
282 mode = drm_mode_duplicate(panel->drm, &default_mode);
283 if (!mode) {
284 dev_err(panel->drm->dev, "failed to add mode %ux%ux@%u\n",
285 default_mode.hdisplay, default_mode.vdisplay,
286 default_mode.vrefresh);
287 return -ENOMEM;
288 }
289
290 drm_mode_set_name(mode);
291
292 drm_mode_probed_add(panel->connector, mode);
293
294 panel->connector->display_info.width_mm = 217;
295 panel->connector->display_info.height_mm = 136;
296
297 return 1;
298}
299
300static const struct drm_panel_funcs sharp_panel_funcs = {
301 .disable = sharp_panel_disable,
302 .unprepare = sharp_panel_unprepare,
303 .prepare = sharp_panel_prepare,
304 .enable = sharp_panel_enable,
305 .get_modes = sharp_panel_get_modes,
306};
307
308static const struct of_device_id sharp_of_match[] = {
309 { .compatible = "sharp,lq101r1sx01", },
310 { }
311};
312MODULE_DEVICE_TABLE(of, sharp_of_match);
313
314static int sharp_panel_add(struct sharp_panel *sharp)
315{
316 struct device_node *np;
317 int err;
318
319 sharp->mode = &default_mode;
320
321 sharp->supply = devm_regulator_get(&sharp->link1->dev, "power");
322 if (IS_ERR(sharp->supply))
323 return PTR_ERR(sharp->supply);
324
325 np = of_parse_phandle(sharp->link1->dev.of_node, "backlight", 0);
326 if (np) {
327 sharp->backlight = of_find_backlight_by_node(np);
328 of_node_put(np);
329
330 if (!sharp->backlight)
331 return -EPROBE_DEFER;
332 }
333
334 drm_panel_init(&sharp->base);
335 sharp->base.funcs = &sharp_panel_funcs;
336 sharp->base.dev = &sharp->link1->dev;
337
338 err = drm_panel_add(&sharp->base);
339 if (err < 0)
340 goto put_backlight;
341
342 return 0;
343
344put_backlight:
345 if (sharp->backlight)
346 put_device(&sharp->backlight->dev);
347
348 return err;
349}
350
351static void sharp_panel_del(struct sharp_panel *sharp)
352{
353 if (sharp->base.dev)
354 drm_panel_remove(&sharp->base);
355
356 if (sharp->backlight)
357 put_device(&sharp->backlight->dev);
358
359 if (sharp->link2)
360 put_device(&sharp->link2->dev);
361}
362
363static int sharp_panel_probe(struct mipi_dsi_device *dsi)
364{
365 struct mipi_dsi_device *secondary = NULL;
366 struct sharp_panel *sharp;
367 struct device_node *np;
368 int err;
369
370 dsi->lanes = 4;
371 dsi->format = MIPI_DSI_FMT_RGB888;
372 dsi->mode_flags = MIPI_DSI_MODE_LPM;
373
374 /* Find DSI-LINK1 */
375 np = of_parse_phandle(dsi->dev.of_node, "link2", 0);
376 if (np) {
377 secondary = of_find_mipi_dsi_device_by_node(np);
378 of_node_put(np);
379
380 if (!secondary)
381 return -EPROBE_DEFER;
382 }
383
384 /* register a panel for only the DSI-LINK1 interface */
385 if (secondary) {
386 sharp = devm_kzalloc(&dsi->dev, sizeof(*sharp), GFP_KERNEL);
387 if (!sharp) {
388 put_device(&secondary->dev);
389 return -ENOMEM;
390 }
391
392 mipi_dsi_set_drvdata(dsi, sharp);
393
394 sharp->link2 = secondary;
395 sharp->link1 = dsi;
396
397 err = sharp_panel_add(sharp);
398 if (err < 0) {
399 put_device(&secondary->dev);
400 return err;
401 }
402 }
403
404 err = mipi_dsi_attach(dsi);
405 if (err < 0) {
406 if (secondary)
407 sharp_panel_del(sharp);
408
409 return err;
410 }
411
412 return 0;
413}
414
415static int sharp_panel_remove(struct mipi_dsi_device *dsi)
416{
417 struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
418 int err;
419
420 /* only detach from host for the DSI-LINK2 interface */
421 if (!sharp) {
422 mipi_dsi_detach(dsi);
423 return 0;
424 }
425
426 err = sharp_panel_disable(&sharp->base);
427 if (err < 0)
428 dev_err(&dsi->dev, "failed to disable panel: %d\n", err);
429
430 err = mipi_dsi_detach(dsi);
431 if (err < 0)
432 dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", err);
433
434 drm_panel_detach(&sharp->base);
435 sharp_panel_del(sharp);
436
437 return 0;
438}
439
440static void sharp_panel_shutdown(struct mipi_dsi_device *dsi)
441{
442 struct sharp_panel *sharp = mipi_dsi_get_drvdata(dsi);
443
444 /* nothing to do for DSI-LINK2 */
445 if (!sharp)
446 return;
447
448 sharp_panel_disable(&sharp->base);
449}
450
451static struct mipi_dsi_driver sharp_panel_driver = {
452 .driver = {
453 .name = "panel-sharp-lq101r1sx01",
454 .of_match_table = sharp_of_match,
455 },
456 .probe = sharp_panel_probe,
457 .remove = sharp_panel_remove,
458 .shutdown = sharp_panel_shutdown,
459};
460module_mipi_dsi_driver(sharp_panel_driver);
461
462MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
463MODULE_DESCRIPTION("Sharp LQ101R1SX01 panel driver");
464MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 12bc8a0ab1cf..e95385bf8356 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -247,21 +247,14 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
247 if (IS_ERR(panel->supply)) 247 if (IS_ERR(panel->supply))
248 return PTR_ERR(panel->supply); 248 return PTR_ERR(panel->supply);
249 249
250 panel->enable_gpio = devm_gpiod_get_optional(dev, "enable"); 250 panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
251 GPIOD_OUT_LOW);
251 if (IS_ERR(panel->enable_gpio)) { 252 if (IS_ERR(panel->enable_gpio)) {
252 err = PTR_ERR(panel->enable_gpio); 253 err = PTR_ERR(panel->enable_gpio);
253 dev_err(dev, "failed to request GPIO: %d\n", err); 254 dev_err(dev, "failed to request GPIO: %d\n", err);
254 return err; 255 return err;
255 } 256 }
256 257
257 if (panel->enable_gpio) {
258 err = gpiod_direction_output(panel->enable_gpio, 0);
259 if (err < 0) {
260 dev_err(dev, "failed to setup GPIO: %d\n", err);
261 return err;
262 }
263 }
264
265 backlight = of_parse_phandle(dev->of_node, "backlight", 0); 258 backlight = of_parse_phandle(dev->of_node, "backlight", 0);
266 if (backlight) { 259 if (backlight) {
267 panel->backlight = of_find_backlight_by_node(backlight); 260 panel->backlight = of_find_backlight_by_node(backlight);
@@ -376,6 +369,29 @@ static const struct panel_desc auo_b101xtn01 = {
376 }, 369 },
377}; 370};
378 371
372static const struct drm_display_mode auo_b116xw03_mode = {
373 .clock = 70589,
374 .hdisplay = 1366,
375 .hsync_start = 1366 + 40,
376 .hsync_end = 1366 + 40 + 40,
377 .htotal = 1366 + 40 + 40 + 32,
378 .vdisplay = 768,
379 .vsync_start = 768 + 10,
380 .vsync_end = 768 + 10 + 12,
381 .vtotal = 768 + 10 + 12 + 6,
382 .vrefresh = 60,
383};
384
385static const struct panel_desc auo_b116xw03 = {
386 .modes = &auo_b116xw03_mode,
387 .num_modes = 1,
388 .bpc = 6,
389 .size = {
390 .width = 256,
391 .height = 144,
392 },
393};
394
379static const struct drm_display_mode auo_b133xtn01_mode = { 395static const struct drm_display_mode auo_b133xtn01_mode = {
380 .clock = 69500, 396 .clock = 69500,
381 .hdisplay = 1366, 397 .hdisplay = 1366,
@@ -415,6 +431,7 @@ static const struct drm_display_mode auo_b133htn01_mode = {
415static const struct panel_desc auo_b133htn01 = { 431static const struct panel_desc auo_b133htn01 = {
416 .modes = &auo_b133htn01_mode, 432 .modes = &auo_b133htn01_mode,
417 .num_modes = 1, 433 .num_modes = 1,
434 .bpc = 6,
418 .size = { 435 .size = {
419 .width = 293, 436 .width = 293,
420 .height = 165, 437 .height = 165,
@@ -536,22 +553,92 @@ static const struct drm_display_mode foxlink_fl500wvr00_a0t_mode = {
536static const struct panel_desc foxlink_fl500wvr00_a0t = { 553static const struct panel_desc foxlink_fl500wvr00_a0t = {
537 .modes = &foxlink_fl500wvr00_a0t_mode, 554 .modes = &foxlink_fl500wvr00_a0t_mode,
538 .num_modes = 1, 555 .num_modes = 1,
556 .bpc = 8,
539 .size = { 557 .size = {
540 .width = 108, 558 .width = 108,
541 .height = 65, 559 .height = 65,
542 }, 560 },
543}; 561};
544 562
545static const struct drm_display_mode innolux_n116bge_mode = { 563static const struct drm_display_mode hannstar_hsd070pww1_mode = {
564 .clock = 71100,
565 .hdisplay = 1280,
566 .hsync_start = 1280 + 1,
567 .hsync_end = 1280 + 1 + 158,
568 .htotal = 1280 + 1 + 158 + 1,
569 .vdisplay = 800,
570 .vsync_start = 800 + 1,
571 .vsync_end = 800 + 1 + 21,
572 .vtotal = 800 + 1 + 21 + 1,
573 .vrefresh = 60,
574};
575
576static const struct panel_desc hannstar_hsd070pww1 = {
577 .modes = &hannstar_hsd070pww1_mode,
578 .num_modes = 1,
579 .bpc = 6,
580 .size = {
581 .width = 151,
582 .height = 94,
583 },
584};
585
586static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = {
587 .clock = 33333,
588 .hdisplay = 800,
589 .hsync_start = 800 + 85,
590 .hsync_end = 800 + 85 + 86,
591 .htotal = 800 + 85 + 86 + 85,
592 .vdisplay = 480,
593 .vsync_start = 480 + 16,
594 .vsync_end = 480 + 16 + 13,
595 .vtotal = 480 + 16 + 13 + 16,
596 .vrefresh = 60,
597};
598
599static const struct panel_desc hitachi_tx23d38vm0caa = {
600 .modes = &hitachi_tx23d38vm0caa_mode,
601 .num_modes = 1,
602 .bpc = 6,
603 .size = {
604 .width = 195,
605 .height = 117,
606 },
607};
608
609static const struct drm_display_mode innolux_g121i1_l01_mode = {
546 .clock = 71000, 610 .clock = 71000,
611 .hdisplay = 1280,
612 .hsync_start = 1280 + 64,
613 .hsync_end = 1280 + 64 + 32,
614 .htotal = 1280 + 64 + 32 + 64,
615 .vdisplay = 800,
616 .vsync_start = 800 + 9,
617 .vsync_end = 800 + 9 + 6,
618 .vtotal = 800 + 9 + 6 + 9,
619 .vrefresh = 60,
620};
621
622static const struct panel_desc innolux_g121i1_l01 = {
623 .modes = &innolux_g121i1_l01_mode,
624 .num_modes = 1,
625 .bpc = 6,
626 .size = {
627 .width = 261,
628 .height = 163,
629 },
630};
631
632static const struct drm_display_mode innolux_n116bge_mode = {
633 .clock = 76420,
547 .hdisplay = 1366, 634 .hdisplay = 1366,
548 .hsync_start = 1366 + 64, 635 .hsync_start = 1366 + 136,
549 .hsync_end = 1366 + 64 + 6, 636 .hsync_end = 1366 + 136 + 30,
550 .htotal = 1366 + 64 + 6 + 64, 637 .htotal = 1366 + 136 + 30 + 60,
551 .vdisplay = 768, 638 .vdisplay = 768,
552 .vsync_start = 768 + 8, 639 .vsync_start = 768 + 8,
553 .vsync_end = 768 + 8 + 4, 640 .vsync_end = 768 + 8 + 12,
554 .vtotal = 768 + 8 + 4 + 8, 641 .vtotal = 768 + 8 + 12 + 12,
555 .vrefresh = 60, 642 .vrefresh = 60,
556 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC, 643 .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
557}; 644};
@@ -643,6 +730,9 @@ static const struct of_device_id platform_of_match[] = {
643 .compatible = "auo,b101xtn01", 730 .compatible = "auo,b101xtn01",
644 .data = &auo_b101xtn01, 731 .data = &auo_b101xtn01,
645 }, { 732 }, {
733 .compatible = "auo,b116xw03",
734 .data = &auo_b116xw03,
735 }, {
646 .compatible = "auo,b133htn01", 736 .compatible = "auo,b133htn01",
647 .data = &auo_b133htn01, 737 .data = &auo_b133htn01,
648 }, { 738 }, {
@@ -667,6 +757,15 @@ static const struct of_device_id platform_of_match[] = {
667 .compatible = "foxlink,fl500wvr00-a0t", 757 .compatible = "foxlink,fl500wvr00-a0t",
668 .data = &foxlink_fl500wvr00_a0t, 758 .data = &foxlink_fl500wvr00_a0t,
669 }, { 759 }, {
760 .compatible = "hannstar,hsd070pww1",
761 .data = &hannstar_hsd070pww1,
762 }, {
763 .compatible = "hit,tx23d38vm0caa",
764 .data = &hitachi_tx23d38vm0caa
765 }, {
766 .compatible ="innolux,g121i1-l01",
767 .data = &innolux_g121i1_l01
768 }, {
670 .compatible = "innolux,n116bge", 769 .compatible = "innolux,n116bge",
671 .data = &innolux_n116bge, 770 .data = &innolux_n116bge,
672 }, { 771 }, {
@@ -740,6 +839,7 @@ static const struct panel_desc_dsi lg_ld070wx3_sl01 = {
740 .desc = { 839 .desc = {
741 .modes = &lg_ld070wx3_sl01_mode, 840 .modes = &lg_ld070wx3_sl01_mode,
742 .num_modes = 1, 841 .num_modes = 1,
842 .bpc = 8,
743 .size = { 843 .size = {
744 .width = 94, 844 .width = 94,
745 .height = 151, 845 .height = 151,
@@ -767,6 +867,7 @@ static const struct panel_desc_dsi lg_lh500wx1_sd03 = {
767 .desc = { 867 .desc = {
768 .modes = &lg_lh500wx1_sd03_mode, 868 .modes = &lg_lh500wx1_sd03_mode,
769 .num_modes = 1, 869 .num_modes = 1,
870 .bpc = 8,
770 .size = { 871 .size = {
771 .width = 62, 872 .width = 62,
772 .height = 110, 873 .height = 110,
@@ -794,6 +895,7 @@ static const struct panel_desc_dsi panasonic_vvx10f004b00 = {
794 .desc = { 895 .desc = {
795 .modes = &panasonic_vvx10f004b00_mode, 896 .modes = &panasonic_vvx10f004b00_mode,
796 .num_modes = 1, 897 .num_modes = 1,
898 .bpc = 8,
797 .size = { 899 .size = {
798 .width = 217, 900 .width = 217,
799 .height = 136, 901 .height = 136,
@@ -863,7 +965,6 @@ static void panel_simple_dsi_shutdown(struct mipi_dsi_device *dsi)
863static struct mipi_dsi_driver panel_simple_dsi_driver = { 965static struct mipi_dsi_driver panel_simple_dsi_driver = {
864 .driver = { 966 .driver = {
865 .name = "panel-simple-dsi", 967 .name = "panel-simple-dsi",
866 .owner = THIS_MODULE,
867 .of_match_table = dsi_of_match, 968 .of_match_table = dsi_of_match,
868 }, 969 },
869 .probe = panel_simple_dsi_probe, 970 .probe = panel_simple_dsi_probe,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0d1396266857..4a0a8b29b0a1 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -29,6 +29,7 @@
29#include "qxl_drv.h" 29#include "qxl_drv.h"
30#include "qxl_object.h" 30#include "qxl_object.h"
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include <drm/drm_plane_helper.h>
32 33
33static bool qxl_head_enabled(struct qxl_head *head) 34static bool qxl_head_enabled(struct qxl_head *head)
34{ 35{
@@ -100,14 +101,37 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
100 return 0; 101 return 0;
101} 102}
102 103
104static void qxl_update_offset_props(struct qxl_device *qdev)
105{
106 struct drm_device *dev = qdev->ddev;
107 struct drm_connector *connector;
108 struct qxl_output *output;
109 struct qxl_head *head;
110
111 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
112 output = drm_connector_to_qxl_output(connector);
113
114 head = &qdev->client_monitors_config->heads[output->index];
115
116 drm_object_property_set_value(&connector->base,
117 dev->mode_config.suggested_x_property, head->x);
118 drm_object_property_set_value(&connector->base,
119 dev->mode_config.suggested_y_property, head->y);
120 }
121}
122
103void qxl_display_read_client_monitors_config(struct qxl_device *qdev) 123void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
104{ 124{
105 125
126 struct drm_device *dev = qdev->ddev;
106 while (qxl_display_copy_rom_client_monitors_config(qdev)) { 127 while (qxl_display_copy_rom_client_monitors_config(qdev)) {
107 qxl_io_log(qdev, "failed crc check for client_monitors_config," 128 qxl_io_log(qdev, "failed crc check for client_monitors_config,"
108 " retrying\n"); 129 " retrying\n");
109 } 130 }
110 131
132 drm_modeset_lock_all(dev);
133 qxl_update_offset_props(qdev);
134 drm_modeset_unlock_all(dev);
111 if (!drm_helper_hpd_irq_event(qdev->ddev)) { 135 if (!drm_helper_hpd_irq_event(qdev->ddev)) {
112 /* notify that the monitor configuration changed, to 136 /* notify that the monitor configuration changed, to
113 adjust at the arbitrary resolution */ 137 adjust at the arbitrary resolution */
@@ -568,7 +592,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
568{ 592{
569 struct drm_device *dev = crtc->dev; 593 struct drm_device *dev = crtc->dev;
570 struct qxl_device *qdev = dev->dev_private; 594 struct qxl_device *qdev = dev->dev_private;
571 struct qxl_mode *m = (void *)mode->private;
572 struct qxl_framebuffer *qfb; 595 struct qxl_framebuffer *qfb;
573 struct qxl_bo *bo, *old_bo = NULL; 596 struct qxl_bo *bo, *old_bo = NULL;
574 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 597 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
@@ -586,12 +609,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
586 } 609 }
587 qfb = to_qxl_framebuffer(crtc->primary->fb); 610 qfb = to_qxl_framebuffer(crtc->primary->fb);
588 bo = gem_to_qxl_bo(qfb->obj); 611 bo = gem_to_qxl_bo(qfb->obj);
589 if (!m)
590 /* and do we care? */
591 DRM_DEBUG("%dx%d: not a native mode\n", x, y);
592 else
593 DRM_DEBUG("%dx%d: qxl id %d\n",
594 mode->hdisplay, mode->vdisplay, m->id);
595 DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n", 612 DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n",
596 x, y, 613 x, y,
597 mode->hdisplay, mode->vdisplay, 614 mode->hdisplay, mode->vdisplay,
@@ -951,6 +968,10 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
951 968
952 drm_object_attach_property(&connector->base, 969 drm_object_attach_property(&connector->base,
953 qdev->hotplug_mode_update_property, 0); 970 qdev->hotplug_mode_update_property, 0);
971 drm_object_attach_property(&connector->base,
972 dev->mode_config.suggested_x_property, 0);
973 drm_object_attach_property(&connector->base,
974 dev->mode_config.suggested_y_property, 0);
954 drm_connector_register(connector); 975 drm_connector_register(connector);
955 return 0; 976 return 0;
956} 977}
@@ -1064,6 +1085,7 @@ int qxl_modeset_init(struct qxl_device *qdev)
1064 1085
1065 qdev->ddev->mode_config.fb_base = qdev->vram_base; 1086 qdev->ddev->mode_config.fb_base = qdev->vram_base;
1066 1087
1088 drm_mode_create_suggested_offset_properties(qdev->ddev);
1067 qxl_mode_create_hotplug_mode_update_property(qdev); 1089 qxl_mode_create_hotplug_mode_update_property(qdev);
1068 1090
1069 for (i = 0 ; i < qxl_num_crtc; ++i) { 1091 for (i = 0 ; i < qxl_num_crtc; ++i) {
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index 446e71ca36cb..d9b25684ac98 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
264 if (list_is_singular(&release->bos)) 264 if (list_is_singular(&release->bos))
265 return 0; 265 return 0;
266 266
267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr); 267 ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
268 !no_intr, NULL);
268 if (ret) 269 if (ret)
269 return ret; 270 return ret;
270 271
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 575e986f82a7..8fd2d9f58f77 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -905,7 +905,7 @@ static int r128_cce_dispatch_write_span(struct drm_device *dev,
905 if (IS_ERR(buffer)) 905 if (IS_ERR(buffer))
906 return PTR_ERR(buffer); 906 return PTR_ERR(buffer);
907 907
908 mask_size = depth->n * sizeof(u8); 908 mask_size = depth->n;
909 if (depth->mask) { 909 if (depth->mask) {
910 mask = memdup_user(depth->mask, mask_size); 910 mask = memdup_user(depth->mask, mask_size);
911 if (IS_ERR(mask)) { 911 if (IS_ERR(mask)) {
@@ -1010,7 +1010,7 @@ static int r128_cce_dispatch_write_pixels(struct drm_device *dev,
1010 } 1010 }
1011 1011
1012 if (depth->mask) { 1012 if (depth->mask) {
1013 mask_size = depth->n * sizeof(u8); 1013 mask_size = depth->n;
1014 mask = memdup_user(depth->mask, mask_size); 1014 mask = memdup_user(depth->mask, mask_size);
1015 if (IS_ERR(mask)) { 1015 if (IS_ERR(mask)) {
1016 kfree(x); 1016 kfree(x);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index d01b87991422..12bc21219a0e 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -80,7 +80,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \ 80 r600_dpm.o rs780_dpm.o rv6xx_dpm.o rv770_dpm.o rv730_dpm.o rv740_dpm.o \
81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \ 81 rv770_smc.o cypress_dpm.o btc_dpm.o sumo_dpm.o sumo_smc.o trinity_dpm.o \
82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \ 82 trinity_smc.o ni_dpm.o si_smc.o si_dpm.o kv_smc.o kv_dpm.o ci_smc.o \
83 ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o 83 ci_dpm.o dce6_afmt.o radeon_vm.o radeon_ucode.o radeon_ib.o radeon_mn.o \
84 radeon_sync.o
84 85
85# add async DMA block 86# add async DMA block
86radeon-y += \ 87radeon-y += \
@@ -104,6 +105,7 @@ radeon-y += \
104 radeon_vce.o \ 105 radeon_vce.o \
105 vce_v1_0.o \ 106 vce_v1_0.o \
106 vce_v2_0.o \ 107 vce_v2_0.o \
108 radeon_kfd.o
107 109
108radeon-$(CONFIG_COMPAT) += radeon_ioc32.o 110radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
109radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o 111radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 30d242b25078..d59ec491dbb9 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2039,6 +2039,7 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
2039 atombios_crtc_set_base(crtc, x, y, old_fb); 2039 atombios_crtc_set_base(crtc, x, y, old_fb);
2040 atombios_overscan_setup(crtc, mode, adjusted_mode); 2040 atombios_overscan_setup(crtc, mode, adjusted_mode);
2041 atombios_scaler_setup(crtc); 2041 atombios_scaler_setup(crtc);
2042 radeon_cursor_reset(crtc);
2042 /* update the hw version fpr dpm */ 2043 /* update the hw version fpr dpm */
2043 radeon_crtc->hw_mode = *adjusted_mode; 2044 radeon_crtc->hw_mode = *adjusted_mode;
2044 2045
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 11a55e9dad7f..f373a81ba3d5 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -46,15 +46,15 @@
46static const struct ci_pt_defaults defaults_hawaii_xt = 46static const struct ci_pt_defaults defaults_hawaii_xt =
47{ 47{
48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000, 48 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
49 { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, 49 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
50 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } 50 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
51}; 51};
52 52
53static const struct ci_pt_defaults defaults_hawaii_pro = 53static const struct ci_pt_defaults defaults_hawaii_pro =
54{ 54{
55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062, 55 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
56 { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 }, 56 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
57 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC } 57 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
58}; 58};
59 59
60static const struct ci_pt_defaults defaults_bonaire_xt = 60static const struct ci_pt_defaults defaults_bonaire_xt =
@@ -184,6 +184,9 @@ static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
184 u32 target_tdp); 184 u32 target_tdp);
185static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate); 185static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
186 186
187static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
188 PPSMC_Msg msg, u32 parameter);
189
187static struct ci_power_info *ci_get_pi(struct radeon_device *rdev) 190static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
188{ 191{
189 struct ci_power_info *pi = rdev->pm.dpm.priv; 192 struct ci_power_info *pi = rdev->pm.dpm.priv;
@@ -249,7 +252,10 @@ static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
249 252
250 if (pi->caps_power_containment) { 253 if (pi->caps_power_containment) {
251 pi->caps_cac = true; 254 pi->caps_cac = true;
252 pi->enable_bapm_feature = true; 255 if (rdev->family == CHIP_HAWAII)
256 pi->enable_bapm_feature = false;
257 else
258 pi->enable_bapm_feature = true;
253 pi->enable_tdc_limit_feature = true; 259 pi->enable_tdc_limit_feature = true;
254 pi->enable_pkg_pwr_tracking_feature = true; 260 pi->enable_pkg_pwr_tracking_feature = true;
255 } 261 }
@@ -352,6 +358,21 @@ static int ci_populate_dw8(struct radeon_device *rdev)
352 return 0; 358 return 0;
353} 359}
354 360
361static int ci_populate_fuzzy_fan(struct radeon_device *rdev)
362{
363 struct ci_power_info *pi = ci_get_pi(rdev);
364
365 if ((rdev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
366 (rdev->pm.dpm.fan.fan_output_sensitivity == 0))
367 rdev->pm.dpm.fan.fan_output_sensitivity =
368 rdev->pm.dpm.fan.default_fan_output_sensitivity;
369
370 pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
371 cpu_to_be16(rdev->pm.dpm.fan.fan_output_sensitivity);
372
373 return 0;
374}
375
355static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev) 376static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
356{ 377{
357 struct ci_power_info *pi = ci_get_pi(rdev); 378 struct ci_power_info *pi = ci_get_pi(rdev);
@@ -477,6 +498,9 @@ static int ci_populate_pm_base(struct radeon_device *rdev)
477 ret = ci_populate_dw8(rdev); 498 ret = ci_populate_dw8(rdev);
478 if (ret) 499 if (ret)
479 return ret; 500 return ret;
501 ret = ci_populate_fuzzy_fan(rdev);
502 if (ret)
503 return ret;
480 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev); 504 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
481 if (ret) 505 if (ret)
482 return ret; 506 return ret;
@@ -690,6 +714,25 @@ static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
690 return ret; 714 return ret;
691} 715}
692 716
717static int ci_enable_thermal_based_sclk_dpm(struct radeon_device *rdev,
718 bool enable)
719{
720 struct ci_power_info *pi = ci_get_pi(rdev);
721 PPSMC_Result smc_result = PPSMC_Result_OK;
722
723 if (pi->thermal_sclk_dpm_enabled) {
724 if (enable)
725 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_ENABLE_THERMAL_DPM);
726 else
727 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DISABLE_THERMAL_DPM);
728 }
729
730 if (smc_result == PPSMC_Result_OK)
731 return 0;
732 else
733 return -EINVAL;
734}
735
693static int ci_power_control_set_level(struct radeon_device *rdev) 736static int ci_power_control_set_level(struct radeon_device *rdev)
694{ 737{
695 struct ci_power_info *pi = ci_get_pi(rdev); 738 struct ci_power_info *pi = ci_get_pi(rdev);
@@ -700,13 +743,11 @@ static int ci_power_control_set_level(struct radeon_device *rdev)
700 int ret = 0; 743 int ret = 0;
701 bool adjust_polarity = false; /* ??? */ 744 bool adjust_polarity = false; /* ??? */
702 745
703 if (pi->caps_power_containment && 746 if (pi->caps_power_containment) {
704 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
705 adjust_percent = adjust_polarity ? 747 adjust_percent = adjust_polarity ?
706 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment); 748 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
707 target_tdp = ((100 + adjust_percent) * 749 target_tdp = ((100 + adjust_percent) *
708 (s32)cac_tdp_table->configurable_tdp) / 100; 750 (s32)cac_tdp_table->configurable_tdp) / 100;
709 target_tdp *= 256;
710 751
711 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp); 752 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
712 } 753 }
@@ -814,7 +855,7 @@ static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
814 } 855 }
815} 856}
816 857
817static int ci_set_thermal_temperature_range(struct radeon_device *rdev, 858static int ci_thermal_set_temperature_range(struct radeon_device *rdev,
818 int min_temp, int max_temp) 859 int min_temp, int max_temp)
819{ 860{
820 int low_temp = 0 * 1000; 861 int low_temp = 0 * 1000;
@@ -850,6 +891,350 @@ static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
850 return 0; 891 return 0;
851} 892}
852 893
894static int ci_thermal_enable_alert(struct radeon_device *rdev,
895 bool enable)
896{
897 u32 thermal_int = RREG32_SMC(CG_THERMAL_INT);
898 PPSMC_Result result;
899
900 if (enable) {
901 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
902 WREG32_SMC(CG_THERMAL_INT, thermal_int);
903 rdev->irq.dpm_thermal = false;
904 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Enable);
905 if (result != PPSMC_Result_OK) {
906 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
907 return -EINVAL;
908 }
909 } else {
910 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
911 WREG32_SMC(CG_THERMAL_INT, thermal_int);
912 rdev->irq.dpm_thermal = true;
913 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Thermal_Cntl_Disable);
914 if (result != PPSMC_Result_OK) {
915 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
916 return -EINVAL;
917 }
918 }
919
920 return 0;
921}
922
923static void ci_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
924{
925 struct ci_power_info *pi = ci_get_pi(rdev);
926 u32 tmp;
927
928 if (pi->fan_ctrl_is_in_default_mode) {
929 tmp = (RREG32_SMC(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
930 pi->fan_ctrl_default_mode = tmp;
931 tmp = (RREG32_SMC(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
932 pi->t_min = tmp;
933 pi->fan_ctrl_is_in_default_mode = false;
934 }
935
936 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
937 tmp |= TMIN(0);
938 WREG32_SMC(CG_FDO_CTRL2, tmp);
939
940 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
941 tmp |= FDO_PWM_MODE(mode);
942 WREG32_SMC(CG_FDO_CTRL2, tmp);
943}
944
945static int ci_thermal_setup_fan_table(struct radeon_device *rdev)
946{
947 struct ci_power_info *pi = ci_get_pi(rdev);
948 SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
949 u32 duty100;
950 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
951 u16 fdo_min, slope1, slope2;
952 u32 reference_clock, tmp;
953 int ret;
954 u64 tmp64;
955
956 if (!pi->fan_table_start) {
957 rdev->pm.dpm.fan.ucode_fan_control = false;
958 return 0;
959 }
960
961 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
962
963 if (duty100 == 0) {
964 rdev->pm.dpm.fan.ucode_fan_control = false;
965 return 0;
966 }
967
968 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
969 do_div(tmp64, 10000);
970 fdo_min = (u16)tmp64;
971
972 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
973 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
974
975 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
976 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
977
978 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
979 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
980
981 fan_table.TempMin = cpu_to_be16((50 + rdev->pm.dpm.fan.t_min) / 100);
982 fan_table.TempMed = cpu_to_be16((50 + rdev->pm.dpm.fan.t_med) / 100);
983 fan_table.TempMax = cpu_to_be16((50 + rdev->pm.dpm.fan.t_max) / 100);
984
985 fan_table.Slope1 = cpu_to_be16(slope1);
986 fan_table.Slope2 = cpu_to_be16(slope2);
987
988 fan_table.FdoMin = cpu_to_be16(fdo_min);
989
990 fan_table.HystDown = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
991
992 fan_table.HystUp = cpu_to_be16(1);
993
994 fan_table.HystSlope = cpu_to_be16(1);
995
996 fan_table.TempRespLim = cpu_to_be16(5);
997
998 reference_clock = radeon_get_xclk(rdev);
999
1000 fan_table.RefreshPeriod = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
1001 reference_clock) / 1600);
1002
1003 fan_table.FdoMax = cpu_to_be16((u16)duty100);
1004
1005 tmp = (RREG32_SMC(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
1006 fan_table.TempSrc = (uint8_t)tmp;
1007
1008 ret = ci_copy_bytes_to_smc(rdev,
1009 pi->fan_table_start,
1010 (u8 *)(&fan_table),
1011 sizeof(fan_table),
1012 pi->sram_end);
1013
1014 if (ret) {
1015 DRM_ERROR("Failed to load fan table to the SMC.");
1016 rdev->pm.dpm.fan.ucode_fan_control = false;
1017 }
1018
1019 return 0;
1020}
1021
1022static int ci_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
1023{
1024 struct ci_power_info *pi = ci_get_pi(rdev);
1025 PPSMC_Result ret;
1026
1027 if (pi->caps_od_fuzzy_fan_control_support) {
1028 ret = ci_send_msg_to_smc_with_parameter(rdev,
1029 PPSMC_StartFanControl,
1030 FAN_CONTROL_FUZZY);
1031 if (ret != PPSMC_Result_OK)
1032 return -EINVAL;
1033 ret = ci_send_msg_to_smc_with_parameter(rdev,
1034 PPSMC_MSG_SetFanPwmMax,
1035 rdev->pm.dpm.fan.default_max_fan_pwm);
1036 if (ret != PPSMC_Result_OK)
1037 return -EINVAL;
1038 } else {
1039 ret = ci_send_msg_to_smc_with_parameter(rdev,
1040 PPSMC_StartFanControl,
1041 FAN_CONTROL_TABLE);
1042 if (ret != PPSMC_Result_OK)
1043 return -EINVAL;
1044 }
1045
1046 return 0;
1047}
1048
1049#if 0
1050static int ci_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
1051{
1052 PPSMC_Result ret;
1053
1054 ret = ci_send_msg_to_smc(rdev, PPSMC_StopFanControl);
1055 if (ret == PPSMC_Result_OK)
1056 return 0;
1057 else
1058 return -EINVAL;
1059}
1060
1061static int ci_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
1062 u32 *speed)
1063{
1064 u32 duty, duty100;
1065 u64 tmp64;
1066
1067 if (rdev->pm.no_fan)
1068 return -ENOENT;
1069
1070 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1071 duty = (RREG32_SMC(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
1072
1073 if (duty100 == 0)
1074 return -EINVAL;
1075
1076 tmp64 = (u64)duty * 100;
1077 do_div(tmp64, duty100);
1078 *speed = (u32)tmp64;
1079
1080 if (*speed > 100)
1081 *speed = 100;
1082
1083 return 0;
1084}
1085
1086static int ci_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
1087 u32 speed)
1088{
1089 u32 tmp;
1090 u32 duty, duty100;
1091 u64 tmp64;
1092
1093 if (rdev->pm.no_fan)
1094 return -ENOENT;
1095
1096 if (speed > 100)
1097 return -EINVAL;
1098
1099 if (rdev->pm.dpm.fan.ucode_fan_control)
1100 ci_fan_ctrl_stop_smc_fan_control(rdev);
1101
1102 duty100 = (RREG32_SMC(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
1103
1104 if (duty100 == 0)
1105 return -EINVAL;
1106
1107 tmp64 = (u64)speed * duty100;
1108 do_div(tmp64, 100);
1109 duty = (u32)tmp64;
1110
1111 tmp = RREG32_SMC(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
1112 tmp |= FDO_STATIC_DUTY(duty);
1113 WREG32_SMC(CG_FDO_CTRL0, tmp);
1114
1115 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1116
1117 return 0;
1118}
1119
1120static int ci_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
1121 u32 *speed)
1122{
1123 u32 tach_period;
1124 u32 xclk = radeon_get_xclk(rdev);
1125
1126 if (rdev->pm.no_fan)
1127 return -ENOENT;
1128
1129 if (rdev->pm.fan_pulses_per_revolution == 0)
1130 return -ENOENT;
1131
1132 tach_period = (RREG32_SMC(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
1133 if (tach_period == 0)
1134 return -ENOENT;
1135
1136 *speed = 60 * xclk * 10000 / tach_period;
1137
1138 return 0;
1139}
1140
1141static int ci_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
1142 u32 speed)
1143{
1144 u32 tach_period, tmp;
1145 u32 xclk = radeon_get_xclk(rdev);
1146
1147 if (rdev->pm.no_fan)
1148 return -ENOENT;
1149
1150 if (rdev->pm.fan_pulses_per_revolution == 0)
1151 return -ENOENT;
1152
1153 if ((speed < rdev->pm.fan_min_rpm) ||
1154 (speed > rdev->pm.fan_max_rpm))
1155 return -EINVAL;
1156
1157 if (rdev->pm.dpm.fan.ucode_fan_control)
1158 ci_fan_ctrl_stop_smc_fan_control(rdev);
1159
1160 tach_period = 60 * xclk * 10000 / (8 * speed);
1161 tmp = RREG32_SMC(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
1162 tmp |= TARGET_PERIOD(tach_period);
1163 WREG32_SMC(CG_TACH_CTRL, tmp);
1164
1165 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
1166
1167 return 0;
1168}
1169#endif
1170
1171static void ci_fan_ctrl_set_default_mode(struct radeon_device *rdev)
1172{
1173 struct ci_power_info *pi = ci_get_pi(rdev);
1174 u32 tmp;
1175
1176 if (!pi->fan_ctrl_is_in_default_mode) {
1177 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
1178 tmp |= FDO_PWM_MODE(pi->fan_ctrl_default_mode);
1179 WREG32_SMC(CG_FDO_CTRL2, tmp);
1180
1181 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TMIN_MASK;
1182 tmp |= TMIN(pi->t_min);
1183 WREG32_SMC(CG_FDO_CTRL2, tmp);
1184 pi->fan_ctrl_is_in_default_mode = true;
1185 }
1186}
1187
1188static void ci_thermal_start_smc_fan_control(struct radeon_device *rdev)
1189{
1190 if (rdev->pm.dpm.fan.ucode_fan_control) {
1191 ci_fan_ctrl_start_smc_fan_control(rdev);
1192 ci_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
1193 }
1194}
1195
1196static void ci_thermal_initialize(struct radeon_device *rdev)
1197{
1198 u32 tmp;
1199
1200 if (rdev->pm.fan_pulses_per_revolution) {
1201 tmp = RREG32_SMC(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
1202 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
1203 WREG32_SMC(CG_TACH_CTRL, tmp);
1204 }
1205
1206 tmp = RREG32_SMC(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
1207 tmp |= TACH_PWM_RESP_RATE(0x28);
1208 WREG32_SMC(CG_FDO_CTRL2, tmp);
1209}
1210
1211static int ci_thermal_start_thermal_controller(struct radeon_device *rdev)
1212{
1213 int ret;
1214
1215 ci_thermal_initialize(rdev);
1216 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1217 if (ret)
1218 return ret;
1219 ret = ci_thermal_enable_alert(rdev, true);
1220 if (ret)
1221 return ret;
1222 if (rdev->pm.dpm.fan.ucode_fan_control) {
1223 ret = ci_thermal_setup_fan_table(rdev);
1224 if (ret)
1225 return ret;
1226 ci_thermal_start_smc_fan_control(rdev);
1227 }
1228
1229 return 0;
1230}
1231
1232static void ci_thermal_stop_thermal_controller(struct radeon_device *rdev)
1233{
1234 if (!rdev->pm.no_fan)
1235 ci_fan_ctrl_set_default_mode(rdev);
1236}
1237
853#if 0 1238#if 0
854static int ci_read_smc_soft_register(struct radeon_device *rdev, 1239static int ci_read_smc_soft_register(struct radeon_device *rdev,
855 u16 reg_offset, u32 *value) 1240 u16 reg_offset, u32 *value)
@@ -1253,7 +1638,7 @@ static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1253 1638
1254 if (!pi->sclk_dpm_key_disabled) { 1639 if (!pi->sclk_dpm_key_disabled) {
1255 PPSMC_Result smc_result = 1640 PPSMC_Result smc_result =
1256 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n); 1641 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1257 if (smc_result != PPSMC_Result_OK) 1642 if (smc_result != PPSMC_Result_OK)
1258 return -EINVAL; 1643 return -EINVAL;
1259 } 1644 }
@@ -1267,7 +1652,7 @@ static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1267 1652
1268 if (!pi->mclk_dpm_key_disabled) { 1653 if (!pi->mclk_dpm_key_disabled) {
1269 PPSMC_Result smc_result = 1654 PPSMC_Result smc_result =
1270 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n); 1655 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1271 if (smc_result != PPSMC_Result_OK) 1656 if (smc_result != PPSMC_Result_OK)
1272 return -EINVAL; 1657 return -EINVAL;
1273 } 1658 }
@@ -2042,6 +2427,33 @@ static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2042 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0); 2427 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2043} 2428}
2044 2429
2430static void ci_register_patching_mc_arb(struct radeon_device *rdev,
2431 const u32 engine_clock,
2432 const u32 memory_clock,
2433 u32 *dram_timimg2)
2434{
2435 bool patch;
2436 u32 tmp, tmp2;
2437
2438 tmp = RREG32(MC_SEQ_MISC0);
2439 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2440
2441 if (patch &&
2442 ((rdev->pdev->device == 0x67B0) ||
2443 (rdev->pdev->device == 0x67B1))) {
2444 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2445 tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2446 *dram_timimg2 &= ~0x00ff0000;
2447 *dram_timimg2 |= tmp2 << 16;
2448 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2449 tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2450 *dram_timimg2 &= ~0x00ff0000;
2451 *dram_timimg2 |= tmp2 << 16;
2452 }
2453 }
2454}
2455
2456
2045static int ci_populate_memory_timing_parameters(struct radeon_device *rdev, 2457static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2046 u32 sclk, 2458 u32 sclk,
2047 u32 mclk, 2459 u32 mclk,
@@ -2057,6 +2469,8 @@ static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2057 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); 2469 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2058 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; 2470 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2059 2471
2472 ci_register_patching_mc_arb(rdev, sclk, mclk, &dram_timing2);
2473
2060 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing); 2474 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2061 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2); 2475 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2062 arb_regs->McArbBurstTime = (u8)burst_time; 2476 arb_regs->McArbBurstTime = (u8)burst_time;
@@ -2351,10 +2765,10 @@ static int ci_calculate_mclk_params(struct radeon_device *rdev,
2351 u32 tmp; 2765 u32 tmp;
2352 u32 reference_clock = rdev->clock.mpll.reference_freq; 2766 u32 reference_clock = rdev->clock.mpll.reference_freq;
2353 2767
2354 if (pi->mem_gddr5) 2768 if (mpll_param.qdr == 1)
2355 freq_nom = memory_clock * 4; 2769 freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2356 else 2770 else
2357 freq_nom = memory_clock * 2; 2771 freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2358 2772
2359 tmp = (freq_nom / reference_clock); 2773 tmp = (freq_nom / reference_clock);
2360 tmp = tmp * tmp; 2774 tmp = tmp * tmp;
@@ -2434,7 +2848,6 @@ static int ci_populate_single_memory_level(struct radeon_device *rdev,
2434 &memory_level->MinVddcPhases); 2848 &memory_level->MinVddcPhases);
2435 2849
2436 memory_level->EnabledForThrottle = 1; 2850 memory_level->EnabledForThrottle = 1;
2437 memory_level->EnabledForActivity = 1;
2438 memory_level->UpH = 0; 2851 memory_level->UpH = 0;
2439 memory_level->DownH = 100; 2852 memory_level->DownH = 100;
2440 memory_level->VoltageDownH = 0; 2853 memory_level->VoltageDownH = 0;
@@ -2767,7 +3180,6 @@ static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2767 3180
2768 graphic_level->CcPwrDynRm = 0; 3181 graphic_level->CcPwrDynRm = 0;
2769 graphic_level->CcPwrDynRm1 = 0; 3182 graphic_level->CcPwrDynRm1 = 0;
2770 graphic_level->EnabledForActivity = 1;
2771 graphic_level->EnabledForThrottle = 1; 3183 graphic_level->EnabledForThrottle = 1;
2772 graphic_level->UpH = 0; 3184 graphic_level->UpH = 0;
2773 graphic_level->DownH = 0; 3185 graphic_level->DownH = 0;
@@ -2816,10 +3228,13 @@ static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2816 &pi->smc_state_table.GraphicsLevel[i]); 3228 &pi->smc_state_table.GraphicsLevel[i]);
2817 if (ret) 3229 if (ret)
2818 return ret; 3230 return ret;
3231 if (i > 1)
3232 pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
2819 if (i == (dpm_table->sclk_table.count - 1)) 3233 if (i == (dpm_table->sclk_table.count - 1))
2820 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark = 3234 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2821 PPSMC_DISPLAY_WATERMARK_HIGH; 3235 PPSMC_DISPLAY_WATERMARK_HIGH;
2822 } 3236 }
3237 pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
2823 3238
2824 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count; 3239 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2825 pi->dpm_level_enable_mask.sclk_dpm_enable_mask = 3240 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
@@ -2863,6 +3278,16 @@ static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2863 return ret; 3278 return ret;
2864 } 3279 }
2865 3280
3281 pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3282
3283 if ((dpm_table->mclk_table.count >= 2) &&
3284 ((rdev->pdev->device == 0x67B0) || (rdev->pdev->device == 0x67B1))) {
3285 pi->smc_state_table.MemoryLevel[1].MinVddc =
3286 pi->smc_state_table.MemoryLevel[0].MinVddc;
3287 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3288 pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3289 }
3290
2866 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F); 3291 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2867 3292
2868 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count; 3293 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
@@ -2919,9 +3344,14 @@ static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2919 &pi->dpm_table.pcie_speed_table, 3344 &pi->dpm_table.pcie_speed_table,
2920 SMU7_MAX_LEVELS_LINK); 3345 SMU7_MAX_LEVELS_LINK);
2921 3346
2922 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0, 3347 if (rdev->family == CHIP_BONAIRE)
2923 pi->pcie_gen_powersaving.min, 3348 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2924 pi->pcie_lane_powersaving.min); 3349 pi->pcie_gen_powersaving.min,
3350 pi->pcie_lane_powersaving.max);
3351 else
3352 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3353 pi->pcie_gen_powersaving.min,
3354 pi->pcie_lane_powersaving.min);
2925 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1, 3355 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2926 pi->pcie_gen_performance.min, 3356 pi->pcie_gen_performance.min,
2927 pi->pcie_lane_performance.min); 3357 pi->pcie_lane_performance.min);
@@ -2988,19 +3418,21 @@ static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2988 allowed_sclk_vddc_table->entries[i].clk)) { 3418 allowed_sclk_vddc_table->entries[i].clk)) {
2989 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value = 3419 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2990 allowed_sclk_vddc_table->entries[i].clk; 3420 allowed_sclk_vddc_table->entries[i].clk;
2991 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true; 3421 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3422 (i == 0) ? true : false;
2992 pi->dpm_table.sclk_table.count++; 3423 pi->dpm_table.sclk_table.count++;
2993 } 3424 }
2994 } 3425 }
2995 3426
2996 pi->dpm_table.mclk_table.count = 0; 3427 pi->dpm_table.mclk_table.count = 0;
2997 for (i = 0; i < allowed_mclk_table->count; i++) { 3428 for (i = 0; i < allowed_mclk_table->count; i++) {
2998 if ((i==0) || 3429 if ((i == 0) ||
2999 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value != 3430 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3000 allowed_mclk_table->entries[i].clk)) { 3431 allowed_mclk_table->entries[i].clk)) {
3001 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value = 3432 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3002 allowed_mclk_table->entries[i].clk; 3433 allowed_mclk_table->entries[i].clk;
3003 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true; 3434 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3435 (i == 0) ? true : false;
3004 pi->dpm_table.mclk_table.count++; 3436 pi->dpm_table.mclk_table.count++;
3005 } 3437 }
3006 } 3438 }
@@ -3166,7 +3598,7 @@ static int ci_init_smc_table(struct radeon_device *rdev)
3166 table->VddcVddciDelta = 4000; 3598 table->VddcVddciDelta = 4000;
3167 table->PhaseResponseTime = 0; 3599 table->PhaseResponseTime = 0;
3168 table->MemoryThermThrottleEnable = 1; 3600 table->MemoryThermThrottleEnable = 1;
3169 table->PCIeBootLinkLevel = 0; 3601 table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3170 table->PCIeGenInterval = 1; 3602 table->PCIeGenInterval = 1;
3171 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) 3603 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3172 table->SVI2Enable = 1; 3604 table->SVI2Enable = 1;
@@ -3320,6 +3752,8 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3320 struct ci_power_info *pi = ci_get_pi(rdev); 3752 struct ci_power_info *pi = ci_get_pi(rdev);
3321 PPSMC_Result result; 3753 PPSMC_Result result;
3322 3754
3755 ci_apply_disp_minimum_voltage_request(rdev);
3756
3323 if (!pi->sclk_dpm_key_disabled) { 3757 if (!pi->sclk_dpm_key_disabled) {
3324 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 3758 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3325 result = ci_send_msg_to_smc_with_parameter(rdev, 3759 result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3339,7 +3773,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3339 return -EINVAL; 3773 return -EINVAL;
3340 } 3774 }
3341 } 3775 }
3342 3776#if 0
3343 if (!pi->pcie_dpm_key_disabled) { 3777 if (!pi->pcie_dpm_key_disabled) {
3344 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 3778 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3345 result = ci_send_msg_to_smc_with_parameter(rdev, 3779 result = ci_send_msg_to_smc_with_parameter(rdev,
@@ -3349,9 +3783,7 @@ static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3349 return -EINVAL; 3783 return -EINVAL;
3350 } 3784 }
3351 } 3785 }
3352 3786#endif
3353 ci_apply_disp_minimum_voltage_request(rdev);
3354
3355 return 0; 3787 return 0;
3356} 3788}
3357 3789
@@ -3377,7 +3809,7 @@ static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3377 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; 3809 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3378 } else { 3810 } else {
3379 /* XXX check display min clock requirements */ 3811 /* XXX check display min clock requirements */
3380 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK) 3812 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3381 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK; 3813 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3382 } 3814 }
3383 3815
@@ -3707,62 +4139,61 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
3707 enum radeon_dpm_forced_level level) 4139 enum radeon_dpm_forced_level level)
3708{ 4140{
3709 struct ci_power_info *pi = ci_get_pi(rdev); 4141 struct ci_power_info *pi = ci_get_pi(rdev);
3710 PPSMC_Result smc_result;
3711 u32 tmp, levels, i; 4142 u32 tmp, levels, i;
3712 int ret; 4143 int ret;
3713 4144
3714 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { 4145 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3715 if ((!pi->sclk_dpm_key_disabled) && 4146 if ((!pi->pcie_dpm_key_disabled) &&
3716 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) { 4147 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3717 levels = 0; 4148 levels = 0;
3718 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask; 4149 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3719 while (tmp >>= 1) 4150 while (tmp >>= 1)
3720 levels++; 4151 levels++;
3721 if (levels) { 4152 if (levels) {
3722 ret = ci_dpm_force_state_sclk(rdev, levels); 4153 ret = ci_dpm_force_state_pcie(rdev, level);
3723 if (ret) 4154 if (ret)
3724 return ret; 4155 return ret;
3725 for (i = 0; i < rdev->usec_timeout; i++) { 4156 for (i = 0; i < rdev->usec_timeout; i++) {
3726 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4157 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3727 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT; 4158 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3728 if (tmp == levels) 4159 if (tmp == levels)
3729 break; 4160 break;
3730 udelay(1); 4161 udelay(1);
3731 } 4162 }
3732 } 4163 }
3733 } 4164 }
3734 if ((!pi->mclk_dpm_key_disabled) && 4165 if ((!pi->sclk_dpm_key_disabled) &&
3735 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) { 4166 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3736 levels = 0; 4167 levels = 0;
3737 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask; 4168 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3738 while (tmp >>= 1) 4169 while (tmp >>= 1)
3739 levels++; 4170 levels++;
3740 if (levels) { 4171 if (levels) {
3741 ret = ci_dpm_force_state_mclk(rdev, levels); 4172 ret = ci_dpm_force_state_sclk(rdev, levels);
3742 if (ret) 4173 if (ret)
3743 return ret; 4174 return ret;
3744 for (i = 0; i < rdev->usec_timeout; i++) { 4175 for (i = 0; i < rdev->usec_timeout; i++) {
3745 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) & 4176 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3746 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT; 4177 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3747 if (tmp == levels) 4178 if (tmp == levels)
3748 break; 4179 break;
3749 udelay(1); 4180 udelay(1);
3750 } 4181 }
3751 } 4182 }
3752 } 4183 }
3753 if ((!pi->pcie_dpm_key_disabled) && 4184 if ((!pi->mclk_dpm_key_disabled) &&
3754 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) { 4185 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3755 levels = 0; 4186 levels = 0;
3756 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask; 4187 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3757 while (tmp >>= 1) 4188 while (tmp >>= 1)
3758 levels++; 4189 levels++;
3759 if (levels) { 4190 if (levels) {
3760 ret = ci_dpm_force_state_pcie(rdev, level); 4191 ret = ci_dpm_force_state_mclk(rdev, levels);
3761 if (ret) 4192 if (ret)
3762 return ret; 4193 return ret;
3763 for (i = 0; i < rdev->usec_timeout; i++) { 4194 for (i = 0; i < rdev->usec_timeout; i++) {
3764 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) & 4195 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3765 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT; 4196 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3766 if (tmp == levels) 4197 if (tmp == levels)
3767 break; 4198 break;
3768 udelay(1); 4199 udelay(1);
@@ -3816,21 +4247,17 @@ int ci_dpm_force_performance_level(struct radeon_device *rdev,
3816 } 4247 }
3817 } 4248 }
3818 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { 4249 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3819 if (!pi->sclk_dpm_key_disabled) {
3820 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3821 if (smc_result != PPSMC_Result_OK)
3822 return -EINVAL;
3823 }
3824 if (!pi->mclk_dpm_key_disabled) {
3825 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3826 if (smc_result != PPSMC_Result_OK)
3827 return -EINVAL;
3828 }
3829 if (!pi->pcie_dpm_key_disabled) { 4250 if (!pi->pcie_dpm_key_disabled) {
3830 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel); 4251 PPSMC_Result smc_result;
4252
4253 smc_result = ci_send_msg_to_smc(rdev,
4254 PPSMC_MSG_PCIeDPM_UnForceLevel);
3831 if (smc_result != PPSMC_Result_OK) 4255 if (smc_result != PPSMC_Result_OK)
3832 return -EINVAL; 4256 return -EINVAL;
3833 } 4257 }
4258 ret = ci_upload_dpm_level_enable_mask(rdev);
4259 if (ret)
4260 return ret;
3834 } 4261 }
3835 4262
3836 rdev->pm.dpm.forced_level = level; 4263 rdev->pm.dpm.forced_level = level;
@@ -4036,6 +4463,96 @@ static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4036 return 0; 4463 return 0;
4037} 4464}
4038 4465
4466static int ci_register_patching_mc_seq(struct radeon_device *rdev,
4467 struct ci_mc_reg_table *table)
4468{
4469 u8 i, k;
4470 u32 tmp;
4471 bool patch;
4472
4473 tmp = RREG32(MC_SEQ_MISC0);
4474 patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4475
4476 if (patch &&
4477 ((rdev->pdev->device == 0x67B0) ||
4478 (rdev->pdev->device == 0x67B1))) {
4479 for (i = 0; i < table->last; i++) {
4480 if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4481 return -EINVAL;
4482 switch(table->mc_reg_address[i].s1 >> 2) {
4483 case MC_SEQ_MISC1:
4484 for (k = 0; k < table->num_entries; k++) {
4485 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4486 (table->mc_reg_table_entry[k].mclk_max == 137500))
4487 table->mc_reg_table_entry[k].mc_data[i] =
4488 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4489 0x00000007;
4490 }
4491 break;
4492 case MC_SEQ_WR_CTL_D0:
4493 for (k = 0; k < table->num_entries; k++) {
4494 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4495 (table->mc_reg_table_entry[k].mclk_max == 137500))
4496 table->mc_reg_table_entry[k].mc_data[i] =
4497 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4498 0x0000D0DD;
4499 }
4500 break;
4501 case MC_SEQ_WR_CTL_D1:
4502 for (k = 0; k < table->num_entries; k++) {
4503 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4504 (table->mc_reg_table_entry[k].mclk_max == 137500))
4505 table->mc_reg_table_entry[k].mc_data[i] =
4506 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4507 0x0000D0DD;
4508 }
4509 break;
4510 case MC_SEQ_WR_CTL_2:
4511 for (k = 0; k < table->num_entries; k++) {
4512 if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4513 (table->mc_reg_table_entry[k].mclk_max == 137500))
4514 table->mc_reg_table_entry[k].mc_data[i] = 0;
4515 }
4516 break;
4517 case MC_SEQ_CAS_TIMING:
4518 for (k = 0; k < table->num_entries; k++) {
4519 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4520 table->mc_reg_table_entry[k].mc_data[i] =
4521 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4522 0x000C0140;
4523 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4524 table->mc_reg_table_entry[k].mc_data[i] =
4525 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4526 0x000C0150;
4527 }
4528 break;
4529 case MC_SEQ_MISC_TIMING:
4530 for (k = 0; k < table->num_entries; k++) {
4531 if (table->mc_reg_table_entry[k].mclk_max == 125000)
4532 table->mc_reg_table_entry[k].mc_data[i] =
4533 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4534 0x00000030;
4535 else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4536 table->mc_reg_table_entry[k].mc_data[i] =
4537 (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4538 0x00000035;
4539 }
4540 break;
4541 default:
4542 break;
4543 }
4544 }
4545
4546 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4547 tmp = RREG32(MC_SEQ_IO_DEBUG_DATA);
4548 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4549 WREG32(MC_SEQ_IO_DEBUG_INDEX, 3);
4550 WREG32(MC_SEQ_IO_DEBUG_DATA, tmp);
4551 }
4552
4553 return 0;
4554}
4555
4039static int ci_initialize_mc_reg_table(struct radeon_device *rdev) 4556static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4040{ 4557{
4041 struct ci_power_info *pi = ci_get_pi(rdev); 4558 struct ci_power_info *pi = ci_get_pi(rdev);
@@ -4079,6 +4596,10 @@ static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4079 4596
4080 ci_set_s0_mc_reg_index(ci_table); 4597 ci_set_s0_mc_reg_index(ci_table);
4081 4598
4599 ret = ci_register_patching_mc_seq(rdev, ci_table);
4600 if (ret)
4601 goto init_mc_done;
4602
4082 ret = ci_set_mc_special_registers(rdev, ci_table); 4603 ret = ci_set_mc_special_registers(rdev, ci_table);
4083 if (ret) 4604 if (ret)
4084 goto init_mc_done; 4605 goto init_mc_done;
@@ -4675,36 +5196,51 @@ int ci_dpm_enable(struct radeon_device *rdev)
4675 return ret; 5196 return ret;
4676 } 5197 }
4677 5198
5199 ret = ci_power_control_set_level(rdev);
5200 if (ret) {
5201 DRM_ERROR("ci_power_control_set_level failed\n");
5202 return ret;
5203 }
5204
4678 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 5205 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4679 5206
5207 ret = ci_enable_thermal_based_sclk_dpm(rdev, true);
5208 if (ret) {
5209 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5210 return ret;
5211 }
5212
5213 ci_thermal_start_thermal_controller(rdev);
5214
4680 ci_update_current_ps(rdev, boot_ps); 5215 ci_update_current_ps(rdev, boot_ps);
4681 5216
4682 return 0; 5217 return 0;
4683} 5218}
4684 5219
4685int ci_dpm_late_enable(struct radeon_device *rdev) 5220static int ci_set_temperature_range(struct radeon_device *rdev)
4686{ 5221{
4687 int ret; 5222 int ret;
4688 5223
4689 if (rdev->irq.installed && 5224 ret = ci_thermal_enable_alert(rdev, false);
4690 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 5225 if (ret)
4691#if 0 5226 return ret;
4692 PPSMC_Result result; 5227 ret = ci_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4693#endif 5228 if (ret)
4694 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 5229 return ret;
4695 if (ret) { 5230 ret = ci_thermal_enable_alert(rdev, true);
4696 DRM_ERROR("ci_set_thermal_temperature_range failed\n"); 5231 if (ret)
4697 return ret; 5232 return ret;
4698 }
4699 rdev->irq.dpm_thermal = true;
4700 radeon_irq_set(rdev);
4701#if 0
4702 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4703 5233
4704 if (result != PPSMC_Result_OK) 5234 return ret;
4705 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 5235}
4706#endif 5236
4707 } 5237int ci_dpm_late_enable(struct radeon_device *rdev)
5238{
5239 int ret;
5240
5241 ret = ci_set_temperature_range(rdev);
5242 if (ret)
5243 return ret;
4708 5244
4709 ci_dpm_powergate_uvd(rdev, true); 5245 ci_dpm_powergate_uvd(rdev, true);
4710 5246
@@ -4721,6 +5257,8 @@ void ci_dpm_disable(struct radeon_device *rdev)
4721 if (!ci_is_smc_running(rdev)) 5257 if (!ci_is_smc_running(rdev))
4722 return; 5258 return;
4723 5259
5260 ci_thermal_stop_thermal_controller(rdev);
5261
4724 if (pi->thermal_protection) 5262 if (pi->thermal_protection)
4725 ci_enable_thermal_protection(rdev, false); 5263 ci_enable_thermal_protection(rdev, false);
4726 ci_enable_power_containment(rdev, false); 5264 ci_enable_power_containment(rdev, false);
@@ -4729,12 +5267,13 @@ void ci_dpm_disable(struct radeon_device *rdev)
4729 ci_enable_spread_spectrum(rdev, false); 5267 ci_enable_spread_spectrum(rdev, false);
4730 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false); 5268 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4731 ci_stop_dpm(rdev); 5269 ci_stop_dpm(rdev);
4732 ci_enable_ds_master_switch(rdev, true); 5270 ci_enable_ds_master_switch(rdev, false);
4733 ci_enable_ulv(rdev, false); 5271 ci_enable_ulv(rdev, false);
4734 ci_clear_vc(rdev); 5272 ci_clear_vc(rdev);
4735 ci_reset_to_default(rdev); 5273 ci_reset_to_default(rdev);
4736 ci_dpm_stop_smc(rdev); 5274 ci_dpm_stop_smc(rdev);
4737 ci_force_switch_to_arb_f0(rdev); 5275 ci_force_switch_to_arb_f0(rdev);
5276 ci_enable_thermal_based_sclk_dpm(rdev, false);
4738 5277
4739 ci_update_current_ps(rdev, boot_ps); 5278 ci_update_current_ps(rdev, boot_ps);
4740} 5279}
@@ -4804,11 +5343,6 @@ int ci_dpm_set_power_state(struct radeon_device *rdev)
4804 return 0; 5343 return 0;
4805} 5344}
4806 5345
4807int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4808{
4809 return ci_power_control_set_level(rdev);
4810}
4811
4812void ci_dpm_reset_asic(struct radeon_device *rdev) 5346void ci_dpm_reset_asic(struct radeon_device *rdev)
4813{ 5347{
4814 ci_set_boot_state(rdev); 5348 ci_set_boot_state(rdev);
@@ -5068,6 +5602,8 @@ void ci_dpm_fini(struct radeon_device *rdev)
5068int ci_dpm_init(struct radeon_device *rdev) 5602int ci_dpm_init(struct radeon_device *rdev)
5069{ 5603{
5070 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); 5604 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5605 SMU7_Discrete_DpmTable *dpm_table;
5606 struct radeon_gpio_rec gpio;
5071 u16 data_offset, size; 5607 u16 data_offset, size;
5072 u8 frev, crev; 5608 u8 frev, crev;
5073 struct ci_power_info *pi; 5609 struct ci_power_info *pi;
@@ -5137,6 +5673,7 @@ int ci_dpm_init(struct radeon_device *rdev)
5137 pi->sclk_dpm_key_disabled = 0; 5673 pi->sclk_dpm_key_disabled = 0;
5138 pi->mclk_dpm_key_disabled = 0; 5674 pi->mclk_dpm_key_disabled = 0;
5139 pi->pcie_dpm_key_disabled = 0; 5675 pi->pcie_dpm_key_disabled = 0;
5676 pi->thermal_sclk_dpm_enabled = 0;
5140 5677
5141 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */ 5678 /* mclk dpm is unstable on some R7 260X cards with the old mc ucode */
5142 if ((rdev->pdev->device == 0x6658) && 5679 if ((rdev->pdev->device == 0x6658) &&
@@ -5201,6 +5738,55 @@ int ci_dpm_init(struct radeon_device *rdev)
5201 5738
5202 pi->uvd_enabled = false; 5739 pi->uvd_enabled = false;
5203 5740
5741 dpm_table = &pi->smc_state_table;
5742
5743 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_VRHOT_GPIO_PINID);
5744 if (gpio.valid) {
5745 dpm_table->VRHotGpio = gpio.shift;
5746 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5747 } else {
5748 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5749 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5750 }
5751
5752 gpio = radeon_atombios_lookup_gpio(rdev, PP_AC_DC_SWITCH_GPIO_PINID);
5753 if (gpio.valid) {
5754 dpm_table->AcDcGpio = gpio.shift;
5755 rdev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5756 } else {
5757 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5758 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5759 }
5760
5761 gpio = radeon_atombios_lookup_gpio(rdev, VDDC_PCC_GPIO_PINID);
5762 if (gpio.valid) {
5763 u32 tmp = RREG32_SMC(CNB_PWRMGT_CNTL);
5764
5765 switch (gpio.shift) {
5766 case 0:
5767 tmp &= ~GNB_SLOW_MODE_MASK;
5768 tmp |= GNB_SLOW_MODE(1);
5769 break;
5770 case 1:
5771 tmp &= ~GNB_SLOW_MODE_MASK;
5772 tmp |= GNB_SLOW_MODE(2);
5773 break;
5774 case 2:
5775 tmp |= GNB_SLOW;
5776 break;
5777 case 3:
5778 tmp |= FORCE_NB_PS1;
5779 break;
5780 case 4:
5781 tmp |= DPM_ENABLED;
5782 break;
5783 default:
5784 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
5785 break;
5786 }
5787 WREG32_SMC(CNB_PWRMGT_CNTL, tmp);
5788 }
5789
5204 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5790 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5205 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5791 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5206 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE; 5792 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
@@ -5262,6 +5848,8 @@ int ci_dpm_init(struct radeon_device *rdev)
5262 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 5848 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5263 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 5849 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5264 5850
5851 pi->fan_ctrl_is_in_default_mode = true;
5852
5265 return 0; 5853 return 0;
5266} 5854}
5267 5855
diff --git a/drivers/gpu/drm/radeon/ci_dpm.h b/drivers/gpu/drm/radeon/ci_dpm.h
index 93bbed977ffb..84e3d3bcf9f3 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.h
+++ b/drivers/gpu/drm/radeon/ci_dpm.h
@@ -33,6 +33,8 @@
33 33
34#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2 34#define CISLANDS_MAX_HARDWARE_POWERLEVELS 2
35 35
36#define CISLANDS_UNUSED_GPIO_PIN 0x7F
37
36struct ci_pl { 38struct ci_pl {
37 u32 mclk; 39 u32 mclk;
38 u32 sclk; 40 u32 sclk;
@@ -237,6 +239,7 @@ struct ci_power_info {
237 u32 sclk_dpm_key_disabled; 239 u32 sclk_dpm_key_disabled;
238 u32 mclk_dpm_key_disabled; 240 u32 mclk_dpm_key_disabled;
239 u32 pcie_dpm_key_disabled; 241 u32 pcie_dpm_key_disabled;
242 u32 thermal_sclk_dpm_enabled;
240 struct ci_pcie_perf_range pcie_gen_performance; 243 struct ci_pcie_perf_range pcie_gen_performance;
241 struct ci_pcie_perf_range pcie_lane_performance; 244 struct ci_pcie_perf_range pcie_lane_performance;
242 struct ci_pcie_perf_range pcie_gen_powersaving; 245 struct ci_pcie_perf_range pcie_gen_powersaving;
@@ -264,6 +267,7 @@ struct ci_power_info {
264 bool caps_automatic_dc_transition; 267 bool caps_automatic_dc_transition;
265 bool caps_sclk_throttle_low_notification; 268 bool caps_sclk_throttle_low_notification;
266 bool caps_dynamic_ac_timing; 269 bool caps_dynamic_ac_timing;
270 bool caps_od_fuzzy_fan_control_support;
267 /* flags */ 271 /* flags */
268 bool thermal_protection; 272 bool thermal_protection;
269 bool pcie_performance_request; 273 bool pcie_performance_request;
@@ -285,6 +289,10 @@ struct ci_power_info {
285 struct ci_ps current_ps; 289 struct ci_ps current_ps;
286 struct radeon_ps requested_rps; 290 struct radeon_ps requested_rps;
287 struct ci_ps requested_ps; 291 struct ci_ps requested_ps;
292 /* fan control */
293 bool fan_ctrl_is_in_default_mode;
294 u32 t_min;
295 u32 fan_ctrl_default_mode;
288}; 296};
289 297
290#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0 298#define CISLANDS_VOLTAGE_CONTROL_NONE 0x0
diff --git a/drivers/gpu/drm/radeon/ci_smc.c b/drivers/gpu/drm/radeon/ci_smc.c
index b630edc2fd0c..e78bcad7a43e 100644
--- a/drivers/gpu/drm/radeon/ci_smc.c
+++ b/drivers/gpu/drm/radeon/ci_smc.c
@@ -129,7 +129,7 @@ void ci_reset_smc(struct radeon_device *rdev)
129 129
130int ci_program_jump_on_start(struct radeon_device *rdev) 130int ci_program_jump_on_start(struct radeon_device *rdev)
131{ 131{
132 static u8 data[] = { 0xE0, 0x00, 0x80, 0x40 }; 132 static const u8 data[] = { 0xE0, 0x00, 0x80, 0x40 };
133 133
134 return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1); 134 return ci_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
135} 135}
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 89c01fa6dd8e..6dcde3798b45 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -32,6 +32,7 @@
32#include "cik_blit_shaders.h" 32#include "cik_blit_shaders.h"
33#include "radeon_ucode.h" 33#include "radeon_ucode.h"
34#include "clearstate_ci.h" 34#include "clearstate_ci.h"
35#include "radeon_kfd.h"
35 36
36MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin"); 37MODULE_FIRMWARE("radeon/BONAIRE_pfp.bin");
37MODULE_FIRMWARE("radeon/BONAIRE_me.bin"); 38MODULE_FIRMWARE("radeon/BONAIRE_me.bin");
@@ -1563,6 +1564,8 @@ static const u32 godavari_golden_registers[] =
1563 1564
1564static void cik_init_golden_registers(struct radeon_device *rdev) 1565static void cik_init_golden_registers(struct radeon_device *rdev)
1565{ 1566{
1567 /* Some of the registers might be dependent on GRBM_GFX_INDEX */
1568 mutex_lock(&rdev->grbm_idx_mutex);
1566 switch (rdev->family) { 1569 switch (rdev->family) {
1567 case CHIP_BONAIRE: 1570 case CHIP_BONAIRE:
1568 radeon_program_register_sequence(rdev, 1571 radeon_program_register_sequence(rdev,
@@ -1637,6 +1640,7 @@ static void cik_init_golden_registers(struct radeon_device *rdev)
1637 default: 1640 default:
1638 break; 1641 break;
1639 } 1642 }
1643 mutex_unlock(&rdev->grbm_idx_mutex);
1640} 1644}
1641 1645
1642/** 1646/**
@@ -1806,7 +1810,7 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
1806{ 1810{
1807 const __be32 *fw_data = NULL; 1811 const __be32 *fw_data = NULL;
1808 const __le32 *new_fw_data = NULL; 1812 const __le32 *new_fw_data = NULL;
1809 u32 running, blackout = 0; 1813 u32 running, blackout = 0, tmp;
1810 u32 *io_mc_regs = NULL; 1814 u32 *io_mc_regs = NULL;
1811 const __le32 *new_io_mc_regs = NULL; 1815 const __le32 *new_io_mc_regs = NULL;
1812 int i, regs_size, ucode_size; 1816 int i, regs_size, ucode_size;
@@ -1866,6 +1870,15 @@ int ci_mc_load_microcode(struct radeon_device *rdev)
1866 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]); 1870 WREG32(MC_SEQ_IO_DEBUG_DATA, io_mc_regs[(i << 1) + 1]);
1867 } 1871 }
1868 } 1872 }
1873
1874 tmp = RREG32(MC_SEQ_MISC0);
1875 if ((rdev->pdev->device == 0x6649) && ((tmp & 0xff00) == 0x5600)) {
1876 WREG32(MC_SEQ_IO_DEBUG_INDEX, 5);
1877 WREG32(MC_SEQ_IO_DEBUG_DATA, 0x00000023);
1878 WREG32(MC_SEQ_IO_DEBUG_INDEX, 9);
1879 WREG32(MC_SEQ_IO_DEBUG_DATA, 0x000001f0);
1880 }
1881
1869 /* load the MC ucode */ 1882 /* load the MC ucode */
1870 for (i = 0; i < ucode_size; i++) { 1883 for (i = 0; i < ucode_size; i++) {
1871 if (rdev->new_fw) 1884 if (rdev->new_fw)
@@ -3419,6 +3432,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3419 u32 disabled_rbs = 0; 3432 u32 disabled_rbs = 0;
3420 u32 enabled_rbs = 0; 3433 u32 enabled_rbs = 0;
3421 3434
3435 mutex_lock(&rdev->grbm_idx_mutex);
3422 for (i = 0; i < se_num; i++) { 3436 for (i = 0; i < se_num; i++) {
3423 for (j = 0; j < sh_per_se; j++) { 3437 for (j = 0; j < sh_per_se; j++) {
3424 cik_select_se_sh(rdev, i, j); 3438 cik_select_se_sh(rdev, i, j);
@@ -3430,6 +3444,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3430 } 3444 }
3431 } 3445 }
3432 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3446 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3447 mutex_unlock(&rdev->grbm_idx_mutex);
3433 3448
3434 mask = 1; 3449 mask = 1;
3435 for (i = 0; i < max_rb_num_per_se * se_num; i++) { 3450 for (i = 0; i < max_rb_num_per_se * se_num; i++) {
@@ -3440,6 +3455,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3440 3455
3441 rdev->config.cik.backend_enable_mask = enabled_rbs; 3456 rdev->config.cik.backend_enable_mask = enabled_rbs;
3442 3457
3458 mutex_lock(&rdev->grbm_idx_mutex);
3443 for (i = 0; i < se_num; i++) { 3459 for (i = 0; i < se_num; i++) {
3444 cik_select_se_sh(rdev, i, 0xffffffff); 3460 cik_select_se_sh(rdev, i, 0xffffffff);
3445 data = 0; 3461 data = 0;
@@ -3467,6 +3483,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
3467 WREG32(PA_SC_RASTER_CONFIG, data); 3483 WREG32(PA_SC_RASTER_CONFIG, data);
3468 } 3484 }
3469 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 3485 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3486 mutex_unlock(&rdev->grbm_idx_mutex);
3470} 3487}
3471 3488
3472/** 3489/**
@@ -3684,6 +3701,12 @@ static void cik_gpu_init(struct radeon_device *rdev)
3684 /* set HW defaults for 3D engine */ 3701 /* set HW defaults for 3D engine */
3685 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60)); 3702 WREG32(CP_MEQ_THRESHOLDS, MEQ1_START(0x30) | MEQ2_START(0x60));
3686 3703
3704 mutex_lock(&rdev->grbm_idx_mutex);
3705 /*
3706 * making sure that the following register writes will be broadcasted
3707 * to all the shaders
3708 */
3709 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
3687 WREG32(SX_DEBUG_1, 0x20); 3710 WREG32(SX_DEBUG_1, 0x20);
3688 3711
3689 WREG32(TA_CNTL_AUX, 0x00010000); 3712 WREG32(TA_CNTL_AUX, 0x00010000);
@@ -3739,6 +3762,7 @@ static void cik_gpu_init(struct radeon_device *rdev)
3739 3762
3740 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3)); 3763 WREG32(PA_CL_ENHANCE, CLIP_VTX_REORDER_ENA | NUM_CLIP_SEQ(3));
3741 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER); 3764 WREG32(PA_SC_ENHANCE, ENABLE_PA_SC_OUT_OF_ORDER);
3765 mutex_unlock(&rdev->grbm_idx_mutex);
3742 3766
3743 udelay(50); 3767 udelay(50);
3744} 3768}
@@ -3970,31 +3994,27 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
3970 unsigned num_gpu_pages, 3994 unsigned num_gpu_pages,
3971 struct reservation_object *resv) 3995 struct reservation_object *resv)
3972{ 3996{
3973 struct radeon_semaphore *sem = NULL;
3974 struct radeon_fence *fence; 3997 struct radeon_fence *fence;
3998 struct radeon_sync sync;
3975 int ring_index = rdev->asic->copy.blit_ring_index; 3999 int ring_index = rdev->asic->copy.blit_ring_index;
3976 struct radeon_ring *ring = &rdev->ring[ring_index]; 4000 struct radeon_ring *ring = &rdev->ring[ring_index];
3977 u32 size_in_bytes, cur_size_in_bytes, control; 4001 u32 size_in_bytes, cur_size_in_bytes, control;
3978 int i, num_loops; 4002 int i, num_loops;
3979 int r = 0; 4003 int r = 0;
3980 4004
3981 r = radeon_semaphore_create(rdev, &sem); 4005 radeon_sync_create(&sync);
3982 if (r) {
3983 DRM_ERROR("radeon: moving bo (%d).\n", r);
3984 return ERR_PTR(r);
3985 }
3986 4006
3987 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 4007 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
3988 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 4008 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
3989 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18); 4009 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 18);
3990 if (r) { 4010 if (r) {
3991 DRM_ERROR("radeon: moving bo (%d).\n", r); 4011 DRM_ERROR("radeon: moving bo (%d).\n", r);
3992 radeon_semaphore_free(rdev, &sem, NULL); 4012 radeon_sync_free(rdev, &sync, NULL);
3993 return ERR_PTR(r); 4013 return ERR_PTR(r);
3994 } 4014 }
3995 4015
3996 radeon_semaphore_sync_resv(rdev, sem, resv, false); 4016 radeon_sync_resv(rdev, &sync, resv, false);
3997 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 4017 radeon_sync_rings(rdev, &sync, ring->idx);
3998 4018
3999 for (i = 0; i < num_loops; i++) { 4019 for (i = 0; i < num_loops; i++) {
4000 cur_size_in_bytes = size_in_bytes; 4020 cur_size_in_bytes = size_in_bytes;
@@ -4018,12 +4038,12 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
4018 r = radeon_fence_emit(rdev, &fence, ring->idx); 4038 r = radeon_fence_emit(rdev, &fence, ring->idx);
4019 if (r) { 4039 if (r) {
4020 radeon_ring_unlock_undo(rdev, ring); 4040 radeon_ring_unlock_undo(rdev, ring);
4021 radeon_semaphore_free(rdev, &sem, NULL); 4041 radeon_sync_free(rdev, &sync, NULL);
4022 return ERR_PTR(r); 4042 return ERR_PTR(r);
4023 } 4043 }
4024 4044
4025 radeon_ring_unlock_commit(rdev, ring, false); 4045 radeon_ring_unlock_commit(rdev, ring, false);
4026 radeon_semaphore_free(rdev, &sem, fence); 4046 radeon_sync_free(rdev, &sync, fence);
4027 4047
4028 return fence; 4048 return fence;
4029} 4049}
@@ -4046,6 +4066,7 @@ struct radeon_fence *cik_copy_cpdma(struct radeon_device *rdev,
4046void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 4066void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
4047{ 4067{
4048 struct radeon_ring *ring = &rdev->ring[ib->ring]; 4068 struct radeon_ring *ring = &rdev->ring[ib->ring];
4069 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
4049 u32 header, control = INDIRECT_BUFFER_VALID; 4070 u32 header, control = INDIRECT_BUFFER_VALID;
4050 4071
4051 if (ib->is_const_ib) { 4072 if (ib->is_const_ib) {
@@ -4074,8 +4095,7 @@ void cik_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
4074 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); 4095 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
4075 } 4096 }
4076 4097
4077 control |= ib->length_dw | 4098 control |= ib->length_dw | (vm_id << 24);
4078 (ib->vm ? (ib->vm->id << 24) : 0);
4079 4099
4080 radeon_ring_write(ring, header); 4100 radeon_ring_write(ring, header);
4081 radeon_ring_write(ring, 4101 radeon_ring_write(ring,
@@ -4675,12 +4695,11 @@ static int cik_mec_init(struct radeon_device *rdev)
4675 /* 4695 /*
4676 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total 4696 * KV: 2 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 64 Queues total
4677 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total 4697 * CI/KB: 1 MEC, 4 Pipes/MEC, 8 Queues/Pipe - 32 Queues total
4698 * Nonetheless, we assign only 1 pipe because all other pipes will
4699 * be handled by KFD
4678 */ 4700 */
4679 if (rdev->family == CHIP_KAVERI) 4701 rdev->mec.num_mec = 1;
4680 rdev->mec.num_mec = 2; 4702 rdev->mec.num_pipe = 1;
4681 else
4682 rdev->mec.num_mec = 1;
4683 rdev->mec.num_pipe = 4;
4684 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8; 4703 rdev->mec.num_queue = rdev->mec.num_mec * rdev->mec.num_pipe * 8;
4685 4704
4686 if (rdev->mec.hpd_eop_obj == NULL) { 4705 if (rdev->mec.hpd_eop_obj == NULL) {
@@ -4822,28 +4841,24 @@ static int cik_cp_compute_resume(struct radeon_device *rdev)
4822 4841
4823 /* init the pipes */ 4842 /* init the pipes */
4824 mutex_lock(&rdev->srbm_mutex); 4843 mutex_lock(&rdev->srbm_mutex);
4825 for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) {
4826 int me = (i < 4) ? 1 : 2;
4827 int pipe = (i < 4) ? i : (i - 4);
4828 4844
4829 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr + (i * MEC_HPD_SIZE * 2); 4845 eop_gpu_addr = rdev->mec.hpd_eop_gpu_addr;
4830 4846
4831 cik_srbm_select(rdev, me, pipe, 0, 0); 4847 cik_srbm_select(rdev, 0, 0, 0, 0);
4832 4848
4833 /* write the EOP addr */ 4849 /* write the EOP addr */
4834 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8); 4850 WREG32(CP_HPD_EOP_BASE_ADDR, eop_gpu_addr >> 8);
4835 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8); 4851 WREG32(CP_HPD_EOP_BASE_ADDR_HI, upper_32_bits(eop_gpu_addr) >> 8);
4836 4852
4837 /* set the VMID assigned */ 4853 /* set the VMID assigned */
4838 WREG32(CP_HPD_EOP_VMID, 0); 4854 WREG32(CP_HPD_EOP_VMID, 0);
4855
4856 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4857 tmp = RREG32(CP_HPD_EOP_CONTROL);
4858 tmp &= ~EOP_SIZE_MASK;
4859 tmp |= order_base_2(MEC_HPD_SIZE / 8);
4860 WREG32(CP_HPD_EOP_CONTROL, tmp);
4839 4861
4840 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
4841 tmp = RREG32(CP_HPD_EOP_CONTROL);
4842 tmp &= ~EOP_SIZE_MASK;
4843 tmp |= order_base_2(MEC_HPD_SIZE / 8);
4844 WREG32(CP_HPD_EOP_CONTROL, tmp);
4845 }
4846 cik_srbm_select(rdev, 0, 0, 0, 0);
4847 mutex_unlock(&rdev->srbm_mutex); 4862 mutex_unlock(&rdev->srbm_mutex);
4848 4863
4849 /* init the queues. Just two for now. */ 4864 /* init the queues. Just two for now. */
@@ -5897,8 +5912,13 @@ int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
5897 */ 5912 */
5898int cik_vm_init(struct radeon_device *rdev) 5913int cik_vm_init(struct radeon_device *rdev)
5899{ 5914{
5900 /* number of VMs */ 5915 /*
5901 rdev->vm_manager.nvm = 16; 5916 * number of VMs
5917 * VMID 0 is reserved for System
5918 * radeon graphics/compute will use VMIDs 1-7
5919 * amdkfd will use VMIDs 8-15
5920 */
5921 rdev->vm_manager.nvm = RADEON_NUM_OF_VMIDS;
5902 /* base offset of vram pages */ 5922 /* base offset of vram pages */
5903 if (rdev->flags & RADEON_IS_IGP) { 5923 if (rdev->flags & RADEON_IS_IGP) {
5904 u64 tmp = RREG32(MC_VM_FB_OFFSET); 5924 u64 tmp = RREG32(MC_VM_FB_OFFSET);
@@ -5958,26 +5978,23 @@ static void cik_vm_decode_fault(struct radeon_device *rdev,
5958 * Update the page table base and flush the VM TLB 5978 * Update the page table base and flush the VM TLB
5959 * using the CP (CIK). 5979 * using the CP (CIK).
5960 */ 5980 */
5961void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 5981void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5982 unsigned vm_id, uint64_t pd_addr)
5962{ 5983{
5963 struct radeon_ring *ring = &rdev->ring[ridx]; 5984 int usepfp = (ring->idx == RADEON_RING_TYPE_GFX_INDEX);
5964 int usepfp = (ridx == RADEON_RING_TYPE_GFX_INDEX);
5965
5966 if (vm == NULL)
5967 return;
5968 5985
5969 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5986 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5970 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 5987 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
5971 WRITE_DATA_DST_SEL(0))); 5988 WRITE_DATA_DST_SEL(0)));
5972 if (vm->id < 8) { 5989 if (vm_id < 8) {
5973 radeon_ring_write(ring, 5990 radeon_ring_write(ring,
5974 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 5991 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
5975 } else { 5992 } else {
5976 radeon_ring_write(ring, 5993 radeon_ring_write(ring,
5977 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 5994 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
5978 } 5995 }
5979 radeon_ring_write(ring, 0); 5996 radeon_ring_write(ring, 0);
5980 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 5997 radeon_ring_write(ring, pd_addr >> 12);
5981 5998
5982 /* update SH_MEM_* regs */ 5999 /* update SH_MEM_* regs */
5983 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6000 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5985,7 +6002,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5985 WRITE_DATA_DST_SEL(0))); 6002 WRITE_DATA_DST_SEL(0)));
5986 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 6003 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
5987 radeon_ring_write(ring, 0); 6004 radeon_ring_write(ring, 0);
5988 radeon_ring_write(ring, VMID(vm->id)); 6005 radeon_ring_write(ring, VMID(vm_id));
5989 6006
5990 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6)); 6007 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 6));
5991 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) | 6008 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(usepfp) |
@@ -6006,7 +6023,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
6006 radeon_ring_write(ring, VMID(0)); 6023 radeon_ring_write(ring, VMID(0));
6007 6024
6008 /* HDP flush */ 6025 /* HDP flush */
6009 cik_hdp_flush_cp_ring_emit(rdev, ridx); 6026 cik_hdp_flush_cp_ring_emit(rdev, ring->idx);
6010 6027
6011 /* bits 0-15 are the VM contexts0-15 */ 6028 /* bits 0-15 are the VM contexts0-15 */
6012 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 6029 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -6014,7 +6031,7 @@ void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
6014 WRITE_DATA_DST_SEL(0))); 6031 WRITE_DATA_DST_SEL(0)));
6015 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 6032 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
6016 radeon_ring_write(ring, 0); 6033 radeon_ring_write(ring, 0);
6017 radeon_ring_write(ring, 1 << vm->id); 6034 radeon_ring_write(ring, 1 << vm_id);
6018 6035
6019 /* compute doesn't have PFP */ 6036 /* compute doesn't have PFP */
6020 if (usepfp) { 6037 if (usepfp) {
@@ -6059,6 +6076,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
6059 u32 i, j, k; 6076 u32 i, j, k;
6060 u32 mask; 6077 u32 mask;
6061 6078
6079 mutex_lock(&rdev->grbm_idx_mutex);
6062 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) { 6080 for (i = 0; i < rdev->config.cik.max_shader_engines; i++) {
6063 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) { 6081 for (j = 0; j < rdev->config.cik.max_sh_per_se; j++) {
6064 cik_select_se_sh(rdev, i, j); 6082 cik_select_se_sh(rdev, i, j);
@@ -6070,6 +6088,7 @@ static void cik_wait_for_rlc_serdes(struct radeon_device *rdev)
6070 } 6088 }
6071 } 6089 }
6072 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6090 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6091 mutex_unlock(&rdev->grbm_idx_mutex);
6073 6092
6074 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY; 6093 mask = SE_MASTER_BUSY_MASK | GC_MASTER_BUSY | TC0_MASTER_BUSY | TC1_MASTER_BUSY;
6075 for (k = 0; k < rdev->usec_timeout; k++) { 6094 for (k = 0; k < rdev->usec_timeout; k++) {
@@ -6204,10 +6223,12 @@ static int cik_rlc_resume(struct radeon_device *rdev)
6204 WREG32(RLC_LB_CNTR_INIT, 0); 6223 WREG32(RLC_LB_CNTR_INIT, 0);
6205 WREG32(RLC_LB_CNTR_MAX, 0x00008000); 6224 WREG32(RLC_LB_CNTR_MAX, 0x00008000);
6206 6225
6226 mutex_lock(&rdev->grbm_idx_mutex);
6207 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6227 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6208 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff); 6228 WREG32(RLC_LB_INIT_CU_MASK, 0xffffffff);
6209 WREG32(RLC_LB_PARAMS, 0x00600408); 6229 WREG32(RLC_LB_PARAMS, 0x00600408);
6210 WREG32(RLC_LB_CNTL, 0x80000004); 6230 WREG32(RLC_LB_CNTL, 0x80000004);
6231 mutex_unlock(&rdev->grbm_idx_mutex);
6211 6232
6212 WREG32(RLC_MC_CNTL, 0); 6233 WREG32(RLC_MC_CNTL, 0);
6213 WREG32(RLC_UCODE_CNTL, 0); 6234 WREG32(RLC_UCODE_CNTL, 0);
@@ -6274,11 +6295,13 @@ static void cik_enable_cgcg(struct radeon_device *rdev, bool enable)
6274 6295
6275 tmp = cik_halt_rlc(rdev); 6296 tmp = cik_halt_rlc(rdev);
6276 6297
6298 mutex_lock(&rdev->grbm_idx_mutex);
6277 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6299 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6278 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6300 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
6279 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6301 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
6280 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE; 6302 tmp2 = BPM_ADDR_MASK | CGCG_OVERRIDE_0 | CGLS_ENABLE;
6281 WREG32(RLC_SERDES_WR_CTRL, tmp2); 6303 WREG32(RLC_SERDES_WR_CTRL, tmp2);
6304 mutex_unlock(&rdev->grbm_idx_mutex);
6282 6305
6283 cik_update_rlc(rdev, tmp); 6306 cik_update_rlc(rdev, tmp);
6284 6307
@@ -6314,17 +6337,20 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
6314 } 6337 }
6315 6338
6316 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); 6339 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
6340 data |= 0x00000001;
6317 data &= 0xfffffffd; 6341 data &= 0xfffffffd;
6318 if (orig != data) 6342 if (orig != data)
6319 WREG32(RLC_CGTT_MGCG_OVERRIDE, data); 6343 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
6320 6344
6321 tmp = cik_halt_rlc(rdev); 6345 tmp = cik_halt_rlc(rdev);
6322 6346
6347 mutex_lock(&rdev->grbm_idx_mutex);
6323 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6348 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6324 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6349 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
6325 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6350 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
6326 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0; 6351 data = BPM_ADDR_MASK | MGCG_OVERRIDE_0;
6327 WREG32(RLC_SERDES_WR_CTRL, data); 6352 WREG32(RLC_SERDES_WR_CTRL, data);
6353 mutex_unlock(&rdev->grbm_idx_mutex);
6328 6354
6329 cik_update_rlc(rdev, tmp); 6355 cik_update_rlc(rdev, tmp);
6330 6356
@@ -6345,7 +6371,7 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
6345 } 6371 }
6346 } else { 6372 } else {
6347 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE); 6373 orig = data = RREG32(RLC_CGTT_MGCG_OVERRIDE);
6348 data |= 0x00000002; 6374 data |= 0x00000003;
6349 if (orig != data) 6375 if (orig != data)
6350 WREG32(RLC_CGTT_MGCG_OVERRIDE, data); 6376 WREG32(RLC_CGTT_MGCG_OVERRIDE, data);
6351 6377
@@ -6368,11 +6394,13 @@ static void cik_enable_mgcg(struct radeon_device *rdev, bool enable)
6368 6394
6369 tmp = cik_halt_rlc(rdev); 6395 tmp = cik_halt_rlc(rdev);
6370 6396
6397 mutex_lock(&rdev->grbm_idx_mutex);
6371 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6398 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6372 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); 6399 WREG32(RLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff);
6373 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); 6400 WREG32(RLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff);
6374 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1; 6401 data = BPM_ADDR_MASK | MGCG_OVERRIDE_1;
6375 WREG32(RLC_SERDES_WR_CTRL, data); 6402 WREG32(RLC_SERDES_WR_CTRL, data);
6403 mutex_unlock(&rdev->grbm_idx_mutex);
6376 6404
6377 cik_update_rlc(rdev, tmp); 6405 cik_update_rlc(rdev, tmp);
6378 } 6406 }
@@ -6801,10 +6829,12 @@ static u32 cik_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh)
6801 u32 mask = 0, tmp, tmp1; 6829 u32 mask = 0, tmp, tmp1;
6802 int i; 6830 int i;
6803 6831
6832 mutex_lock(&rdev->grbm_idx_mutex);
6804 cik_select_se_sh(rdev, se, sh); 6833 cik_select_se_sh(rdev, se, sh);
6805 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG); 6834 tmp = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
6806 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG); 6835 tmp1 = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
6807 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff); 6836 cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
6837 mutex_unlock(&rdev->grbm_idx_mutex);
6808 6838
6809 tmp &= 0xffff0000; 6839 tmp &= 0xffff0000;
6810 6840
@@ -7288,8 +7318,7 @@ static int cik_irq_init(struct radeon_device *rdev)
7288int cik_irq_set(struct radeon_device *rdev) 7318int cik_irq_set(struct radeon_device *rdev)
7289{ 7319{
7290 u32 cp_int_cntl; 7320 u32 cp_int_cntl;
7291 u32 cp_m1p0, cp_m1p1, cp_m1p2, cp_m1p3; 7321 u32 cp_m1p0;
7292 u32 cp_m2p0, cp_m2p1, cp_m2p2, cp_m2p3;
7293 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 7322 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
7294 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 7323 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
7295 u32 grbm_int_cntl = 0; 7324 u32 grbm_int_cntl = 0;
@@ -7323,13 +7352,6 @@ int cik_irq_set(struct radeon_device *rdev)
7323 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 7352 dma_cntl1 = RREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
7324 7353
7325 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE; 7354 cp_m1p0 = RREG32(CP_ME1_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7326 cp_m1p1 = RREG32(CP_ME1_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7327 cp_m1p2 = RREG32(CP_ME1_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7328 cp_m1p3 = RREG32(CP_ME1_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7329 cp_m2p0 = RREG32(CP_ME2_PIPE0_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7330 cp_m2p1 = RREG32(CP_ME2_PIPE1_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7331 cp_m2p2 = RREG32(CP_ME2_PIPE2_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7332 cp_m2p3 = RREG32(CP_ME2_PIPE3_INT_CNTL) & ~TIME_STAMP_INT_ENABLE;
7333 7355
7334 if (rdev->flags & RADEON_IS_IGP) 7356 if (rdev->flags & RADEON_IS_IGP)
7335 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) & 7357 thermal_int = RREG32_SMC(CG_THERMAL_INT_CTRL) &
@@ -7351,33 +7373,6 @@ int cik_irq_set(struct radeon_device *rdev)
7351 case 0: 7373 case 0:
7352 cp_m1p0 |= TIME_STAMP_INT_ENABLE; 7374 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
7353 break; 7375 break;
7354 case 1:
7355 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
7356 break;
7357 case 2:
7358 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
7359 break;
7360 case 3:
7361 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
7362 break;
7363 default:
7364 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
7365 break;
7366 }
7367 } else if (ring->me == 2) {
7368 switch (ring->pipe) {
7369 case 0:
7370 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
7371 break;
7372 case 1:
7373 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
7374 break;
7375 case 2:
7376 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
7377 break;
7378 case 3:
7379 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
7380 break;
7381 default: 7376 default:
7382 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe); 7377 DRM_DEBUG("si_irq_set: sw int cp1 invalid pipe %d\n", ring->pipe);
7383 break; 7378 break;
@@ -7394,33 +7389,6 @@ int cik_irq_set(struct radeon_device *rdev)
7394 case 0: 7389 case 0:
7395 cp_m1p0 |= TIME_STAMP_INT_ENABLE; 7390 cp_m1p0 |= TIME_STAMP_INT_ENABLE;
7396 break; 7391 break;
7397 case 1:
7398 cp_m1p1 |= TIME_STAMP_INT_ENABLE;
7399 break;
7400 case 2:
7401 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
7402 break;
7403 case 3:
7404 cp_m1p2 |= TIME_STAMP_INT_ENABLE;
7405 break;
7406 default:
7407 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
7408 break;
7409 }
7410 } else if (ring->me == 2) {
7411 switch (ring->pipe) {
7412 case 0:
7413 cp_m2p0 |= TIME_STAMP_INT_ENABLE;
7414 break;
7415 case 1:
7416 cp_m2p1 |= TIME_STAMP_INT_ENABLE;
7417 break;
7418 case 2:
7419 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
7420 break;
7421 case 3:
7422 cp_m2p2 |= TIME_STAMP_INT_ENABLE;
7423 break;
7424 default: 7392 default:
7425 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe); 7393 DRM_DEBUG("si_irq_set: sw int cp2 invalid pipe %d\n", ring->pipe);
7426 break; 7394 break;
@@ -7509,13 +7477,6 @@ int cik_irq_set(struct radeon_device *rdev)
7509 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1); 7477 WREG32(SDMA0_CNTL + SDMA1_REGISTER_OFFSET, dma_cntl1);
7510 7478
7511 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0); 7479 WREG32(CP_ME1_PIPE0_INT_CNTL, cp_m1p0);
7512 WREG32(CP_ME1_PIPE1_INT_CNTL, cp_m1p1);
7513 WREG32(CP_ME1_PIPE2_INT_CNTL, cp_m1p2);
7514 WREG32(CP_ME1_PIPE3_INT_CNTL, cp_m1p3);
7515 WREG32(CP_ME2_PIPE0_INT_CNTL, cp_m2p0);
7516 WREG32(CP_ME2_PIPE1_INT_CNTL, cp_m2p1);
7517 WREG32(CP_ME2_PIPE2_INT_CNTL, cp_m2p2);
7518 WREG32(CP_ME2_PIPE3_INT_CNTL, cp_m2p3);
7519 7480
7520 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 7481 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
7521 7482
@@ -7832,6 +7793,10 @@ restart_ih:
7832 while (rptr != wptr) { 7793 while (rptr != wptr) {
7833 /* wptr/rptr are in bytes! */ 7794 /* wptr/rptr are in bytes! */
7834 ring_index = rptr / 4; 7795 ring_index = rptr / 4;
7796
7797 radeon_kfd_interrupt(rdev,
7798 (const void *) &rdev->ih.ring[ring_index]);
7799
7835 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff; 7800 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
7836 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff; 7801 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
7837 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff; 7802 ring_id = le32_to_cpu(rdev->ih.ring[ring_index + 2]) & 0xff;
@@ -8521,6 +8486,10 @@ static int cik_startup(struct radeon_device *rdev)
8521 if (r) 8486 if (r)
8522 return r; 8487 return r;
8523 8488
8489 r = radeon_kfd_resume(rdev);
8490 if (r)
8491 return r;
8492
8524 return 0; 8493 return 0;
8525} 8494}
8526 8495
@@ -8569,6 +8538,7 @@ int cik_resume(struct radeon_device *rdev)
8569 */ 8538 */
8570int cik_suspend(struct radeon_device *rdev) 8539int cik_suspend(struct radeon_device *rdev)
8571{ 8540{
8541 radeon_kfd_suspend(rdev);
8572 radeon_pm_suspend(rdev); 8542 radeon_pm_suspend(rdev);
8573 dce6_audio_fini(rdev); 8543 dce6_audio_fini(rdev);
8574 radeon_vm_manager_fini(rdev); 8544 radeon_vm_manager_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_reg.h b/drivers/gpu/drm/radeon/cik_reg.h
index ca1bb6133580..79c45e8a536b 100644
--- a/drivers/gpu/drm/radeon/cik_reg.h
+++ b/drivers/gpu/drm/radeon/cik_reg.h
@@ -147,4 +147,140 @@
147 147
148#define CIK_LB_DESKTOP_HEIGHT 0x6b0c 148#define CIK_LB_DESKTOP_HEIGHT 0x6b0c
149 149
150#define CP_HQD_IQ_RPTR 0xC970u
151#define AQL_ENABLE (1U << 0)
152
153#define IDLE (1 << 2)
154
155struct cik_mqd {
156 uint32_t header;
157 uint32_t compute_dispatch_initiator;
158 uint32_t compute_dim_x;
159 uint32_t compute_dim_y;
160 uint32_t compute_dim_z;
161 uint32_t compute_start_x;
162 uint32_t compute_start_y;
163 uint32_t compute_start_z;
164 uint32_t compute_num_thread_x;
165 uint32_t compute_num_thread_y;
166 uint32_t compute_num_thread_z;
167 uint32_t compute_pipelinestat_enable;
168 uint32_t compute_perfcount_enable;
169 uint32_t compute_pgm_lo;
170 uint32_t compute_pgm_hi;
171 uint32_t compute_tba_lo;
172 uint32_t compute_tba_hi;
173 uint32_t compute_tma_lo;
174 uint32_t compute_tma_hi;
175 uint32_t compute_pgm_rsrc1;
176 uint32_t compute_pgm_rsrc2;
177 uint32_t compute_vmid;
178 uint32_t compute_resource_limits;
179 uint32_t compute_static_thread_mgmt_se0;
180 uint32_t compute_static_thread_mgmt_se1;
181 uint32_t compute_tmpring_size;
182 uint32_t compute_static_thread_mgmt_se2;
183 uint32_t compute_static_thread_mgmt_se3;
184 uint32_t compute_restart_x;
185 uint32_t compute_restart_y;
186 uint32_t compute_restart_z;
187 uint32_t compute_thread_trace_enable;
188 uint32_t compute_misc_reserved;
189 uint32_t compute_user_data_0;
190 uint32_t compute_user_data_1;
191 uint32_t compute_user_data_2;
192 uint32_t compute_user_data_3;
193 uint32_t compute_user_data_4;
194 uint32_t compute_user_data_5;
195 uint32_t compute_user_data_6;
196 uint32_t compute_user_data_7;
197 uint32_t compute_user_data_8;
198 uint32_t compute_user_data_9;
199 uint32_t compute_user_data_10;
200 uint32_t compute_user_data_11;
201 uint32_t compute_user_data_12;
202 uint32_t compute_user_data_13;
203 uint32_t compute_user_data_14;
204 uint32_t compute_user_data_15;
205 uint32_t cp_compute_csinvoc_count_lo;
206 uint32_t cp_compute_csinvoc_count_hi;
207 uint32_t cp_mqd_base_addr_lo;
208 uint32_t cp_mqd_base_addr_hi;
209 uint32_t cp_hqd_active;
210 uint32_t cp_hqd_vmid;
211 uint32_t cp_hqd_persistent_state;
212 uint32_t cp_hqd_pipe_priority;
213 uint32_t cp_hqd_queue_priority;
214 uint32_t cp_hqd_quantum;
215 uint32_t cp_hqd_pq_base_lo;
216 uint32_t cp_hqd_pq_base_hi;
217 uint32_t cp_hqd_pq_rptr;
218 uint32_t cp_hqd_pq_rptr_report_addr_lo;
219 uint32_t cp_hqd_pq_rptr_report_addr_hi;
220 uint32_t cp_hqd_pq_wptr_poll_addr_lo;
221 uint32_t cp_hqd_pq_wptr_poll_addr_hi;
222 uint32_t cp_hqd_pq_doorbell_control;
223 uint32_t cp_hqd_pq_wptr;
224 uint32_t cp_hqd_pq_control;
225 uint32_t cp_hqd_ib_base_addr_lo;
226 uint32_t cp_hqd_ib_base_addr_hi;
227 uint32_t cp_hqd_ib_rptr;
228 uint32_t cp_hqd_ib_control;
229 uint32_t cp_hqd_iq_timer;
230 uint32_t cp_hqd_iq_rptr;
231 uint32_t cp_hqd_dequeue_request;
232 uint32_t cp_hqd_dma_offload;
233 uint32_t cp_hqd_sema_cmd;
234 uint32_t cp_hqd_msg_type;
235 uint32_t cp_hqd_atomic0_preop_lo;
236 uint32_t cp_hqd_atomic0_preop_hi;
237 uint32_t cp_hqd_atomic1_preop_lo;
238 uint32_t cp_hqd_atomic1_preop_hi;
239 uint32_t cp_hqd_hq_status0;
240 uint32_t cp_hqd_hq_control0;
241 uint32_t cp_mqd_control;
242 uint32_t cp_mqd_query_time_lo;
243 uint32_t cp_mqd_query_time_hi;
244 uint32_t cp_mqd_connect_start_time_lo;
245 uint32_t cp_mqd_connect_start_time_hi;
246 uint32_t cp_mqd_connect_end_time_lo;
247 uint32_t cp_mqd_connect_end_time_hi;
248 uint32_t cp_mqd_connect_end_wf_count;
249 uint32_t cp_mqd_connect_end_pq_rptr;
250 uint32_t cp_mqd_connect_end_pq_wptr;
251 uint32_t cp_mqd_connect_end_ib_rptr;
252 uint32_t reserved_96;
253 uint32_t reserved_97;
254 uint32_t reserved_98;
255 uint32_t reserved_99;
256 uint32_t iqtimer_pkt_header;
257 uint32_t iqtimer_pkt_dw0;
258 uint32_t iqtimer_pkt_dw1;
259 uint32_t iqtimer_pkt_dw2;
260 uint32_t iqtimer_pkt_dw3;
261 uint32_t iqtimer_pkt_dw4;
262 uint32_t iqtimer_pkt_dw5;
263 uint32_t iqtimer_pkt_dw6;
264 uint32_t reserved_108;
265 uint32_t reserved_109;
266 uint32_t reserved_110;
267 uint32_t reserved_111;
268 uint32_t queue_doorbell_id0;
269 uint32_t queue_doorbell_id1;
270 uint32_t queue_doorbell_id2;
271 uint32_t queue_doorbell_id3;
272 uint32_t queue_doorbell_id4;
273 uint32_t queue_doorbell_id5;
274 uint32_t queue_doorbell_id6;
275 uint32_t queue_doorbell_id7;
276 uint32_t queue_doorbell_id8;
277 uint32_t queue_doorbell_id9;
278 uint32_t queue_doorbell_id10;
279 uint32_t queue_doorbell_id11;
280 uint32_t queue_doorbell_id12;
281 uint32_t queue_doorbell_id13;
282 uint32_t queue_doorbell_id14;
283 uint32_t queue_doorbell_id15;
284};
285
150#endif 286#endif
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index d748963af08b..dde5c7e29eb2 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -134,7 +134,7 @@ void cik_sdma_ring_ib_execute(struct radeon_device *rdev,
134 struct radeon_ib *ib) 134 struct radeon_ib *ib)
135{ 135{
136 struct radeon_ring *ring = &rdev->ring[ib->ring]; 136 struct radeon_ring *ring = &rdev->ring[ib->ring];
137 u32 extra_bits = (ib->vm ? ib->vm->id : 0) & 0xf; 137 u32 extra_bits = (ib->vm ? ib->vm->ids[ib->ring].id : 0) & 0xf;
138 138
139 if (rdev->wb.enabled) { 139 if (rdev->wb.enabled) {
140 u32 next_rptr = ring->wptr + 5; 140 u32 next_rptr = ring->wptr + 5;
@@ -541,31 +541,27 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
541 unsigned num_gpu_pages, 541 unsigned num_gpu_pages,
542 struct reservation_object *resv) 542 struct reservation_object *resv)
543{ 543{
544 struct radeon_semaphore *sem = NULL;
545 struct radeon_fence *fence; 544 struct radeon_fence *fence;
545 struct radeon_sync sync;
546 int ring_index = rdev->asic->copy.dma_ring_index; 546 int ring_index = rdev->asic->copy.dma_ring_index;
547 struct radeon_ring *ring = &rdev->ring[ring_index]; 547 struct radeon_ring *ring = &rdev->ring[ring_index];
548 u32 size_in_bytes, cur_size_in_bytes; 548 u32 size_in_bytes, cur_size_in_bytes;
549 int i, num_loops; 549 int i, num_loops;
550 int r = 0; 550 int r = 0;
551 551
552 r = radeon_semaphore_create(rdev, &sem); 552 radeon_sync_create(&sync);
553 if (r) {
554 DRM_ERROR("radeon: moving bo (%d).\n", r);
555 return ERR_PTR(r);
556 }
557 553
558 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 554 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
559 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 555 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
560 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14); 556 r = radeon_ring_lock(rdev, ring, num_loops * 7 + 14);
561 if (r) { 557 if (r) {
562 DRM_ERROR("radeon: moving bo (%d).\n", r); 558 DRM_ERROR("radeon: moving bo (%d).\n", r);
563 radeon_semaphore_free(rdev, &sem, NULL); 559 radeon_sync_free(rdev, &sync, NULL);
564 return ERR_PTR(r); 560 return ERR_PTR(r);
565 } 561 }
566 562
567 radeon_semaphore_sync_resv(rdev, sem, resv, false); 563 radeon_sync_resv(rdev, &sync, resv, false);
568 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 564 radeon_sync_rings(rdev, &sync, ring->idx);
569 565
570 for (i = 0; i < num_loops; i++) { 566 for (i = 0; i < num_loops; i++) {
571 cur_size_in_bytes = size_in_bytes; 567 cur_size_in_bytes = size_in_bytes;
@@ -586,12 +582,12 @@ struct radeon_fence *cik_copy_dma(struct radeon_device *rdev,
586 r = radeon_fence_emit(rdev, &fence, ring->idx); 582 r = radeon_fence_emit(rdev, &fence, ring->idx);
587 if (r) { 583 if (r) {
588 radeon_ring_unlock_undo(rdev, ring); 584 radeon_ring_unlock_undo(rdev, ring);
589 radeon_semaphore_free(rdev, &sem, NULL); 585 radeon_sync_free(rdev, &sync, NULL);
590 return ERR_PTR(r); 586 return ERR_PTR(r);
591 } 587 }
592 588
593 radeon_ring_unlock_commit(rdev, ring, false); 589 radeon_ring_unlock_commit(rdev, ring, false);
594 radeon_semaphore_free(rdev, &sem, fence); 590 radeon_sync_free(rdev, &sync, fence);
595 591
596 return fence; 592 return fence;
597} 593}
@@ -904,25 +900,21 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
904 * Update the page table base and flush the VM TLB 900 * Update the page table base and flush the VM TLB
905 * using sDMA (CIK). 901 * using sDMA (CIK).
906 */ 902 */
907void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 903void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
904 unsigned vm_id, uint64_t pd_addr)
908{ 905{
909 struct radeon_ring *ring = &rdev->ring[ridx];
910
911 if (vm == NULL)
912 return;
913
914 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 906 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
915 if (vm->id < 8) { 907 if (vm_id < 8) {
916 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 908 radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
917 } else { 909 } else {
918 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 910 radeon_ring_write(ring, (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
919 } 911 }
920 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 912 radeon_ring_write(ring, pd_addr >> 12);
921 913
922 /* update SH_MEM_* regs */ 914 /* update SH_MEM_* regs */
923 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 915 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
924 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2); 916 radeon_ring_write(ring, SRBM_GFX_CNTL >> 2);
925 radeon_ring_write(ring, VMID(vm->id)); 917 radeon_ring_write(ring, VMID(vm_id));
926 918
927 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 919 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
928 radeon_ring_write(ring, SH_MEM_BASES >> 2); 920 radeon_ring_write(ring, SH_MEM_BASES >> 2);
@@ -945,11 +937,11 @@ void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm
945 radeon_ring_write(ring, VMID(0)); 937 radeon_ring_write(ring, VMID(0));
946 938
947 /* flush HDP */ 939 /* flush HDP */
948 cik_sdma_hdp_flush_ring_emit(rdev, ridx); 940 cik_sdma_hdp_flush_ring_emit(rdev, ring->idx);
949 941
950 /* flush TLB */ 942 /* flush TLB */
951 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); 943 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
952 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 944 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
953 radeon_ring_write(ring, 1 << vm->id); 945 radeon_ring_write(ring, 1 << vm_id);
954} 946}
955 947
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index 0c6e1b55d968..ba85986febea 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -30,6 +30,8 @@
30#define CIK_RB_BITMAP_WIDTH_PER_SH 2 30#define CIK_RB_BITMAP_WIDTH_PER_SH 2
31#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4 31#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4
32 32
33#define RADEON_NUM_OF_VMIDS 8
34
33/* DIDT IND registers */ 35/* DIDT IND registers */
34#define DIDT_SQ_CTRL0 0x0 36#define DIDT_SQ_CTRL0 0x0
35# define DIDT_CTRL_EN (1 << 0) 37# define DIDT_CTRL_EN (1 << 0)
@@ -184,7 +186,10 @@
184#define DIG_THERM_DPM(x) ((x) << 14) 186#define DIG_THERM_DPM(x) ((x) << 14)
185#define DIG_THERM_DPM_MASK 0x003FC000 187#define DIG_THERM_DPM_MASK 0x003FC000
186#define DIG_THERM_DPM_SHIFT 14 188#define DIG_THERM_DPM_SHIFT 14
187 189#define CG_THERMAL_STATUS 0xC0300008
190#define FDO_PWM_DUTY(x) ((x) << 9)
191#define FDO_PWM_DUTY_MASK (0xff << 9)
192#define FDO_PWM_DUTY_SHIFT 9
188#define CG_THERMAL_INT 0xC030000C 193#define CG_THERMAL_INT 0xC030000C
189#define CI_DIG_THERM_INTH(x) ((x) << 8) 194#define CI_DIG_THERM_INTH(x) ((x) << 8)
190#define CI_DIG_THERM_INTH_MASK 0x0000FF00 195#define CI_DIG_THERM_INTH_MASK 0x0000FF00
@@ -194,7 +199,10 @@
194#define CI_DIG_THERM_INTL_SHIFT 16 199#define CI_DIG_THERM_INTL_SHIFT 16
195#define THERM_INT_MASK_HIGH (1 << 24) 200#define THERM_INT_MASK_HIGH (1 << 24)
196#define THERM_INT_MASK_LOW (1 << 25) 201#define THERM_INT_MASK_LOW (1 << 25)
197 202#define CG_MULT_THERMAL_CTRL 0xC0300010
203#define TEMP_SEL(x) ((x) << 20)
204#define TEMP_SEL_MASK (0xff << 20)
205#define TEMP_SEL_SHIFT 20
198#define CG_MULT_THERMAL_STATUS 0xC0300014 206#define CG_MULT_THERMAL_STATUS 0xC0300014
199#define ASIC_MAX_TEMP(x) ((x) << 0) 207#define ASIC_MAX_TEMP(x) ((x) << 0)
200#define ASIC_MAX_TEMP_MASK 0x000001ff 208#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -203,6 +211,36 @@
203#define CTF_TEMP_MASK 0x0003fe00 211#define CTF_TEMP_MASK 0x0003fe00
204#define CTF_TEMP_SHIFT 9 212#define CTF_TEMP_SHIFT 9
205 213
214#define CG_FDO_CTRL0 0xC0300064
215#define FDO_STATIC_DUTY(x) ((x) << 0)
216#define FDO_STATIC_DUTY_MASK 0x000000FF
217#define FDO_STATIC_DUTY_SHIFT 0
218#define CG_FDO_CTRL1 0xC0300068
219#define FMAX_DUTY100(x) ((x) << 0)
220#define FMAX_DUTY100_MASK 0x000000FF
221#define FMAX_DUTY100_SHIFT 0
222#define CG_FDO_CTRL2 0xC030006C
223#define TMIN(x) ((x) << 0)
224#define TMIN_MASK 0x000000FF
225#define TMIN_SHIFT 0
226#define FDO_PWM_MODE(x) ((x) << 11)
227#define FDO_PWM_MODE_MASK (7 << 11)
228#define FDO_PWM_MODE_SHIFT 11
229#define TACH_PWM_RESP_RATE(x) ((x) << 25)
230#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
231#define TACH_PWM_RESP_RATE_SHIFT 25
232#define CG_TACH_CTRL 0xC0300070
233# define EDGE_PER_REV(x) ((x) << 0)
234# define EDGE_PER_REV_MASK (0x7 << 0)
235# define EDGE_PER_REV_SHIFT 0
236# define TARGET_PERIOD(x) ((x) << 3)
237# define TARGET_PERIOD_MASK 0xfffffff8
238# define TARGET_PERIOD_SHIFT 3
239#define CG_TACH_STATUS 0xC0300074
240# define TACH_PERIOD(x) ((x) << 0)
241# define TACH_PERIOD_MASK 0xffffffff
242# define TACH_PERIOD_SHIFT 0
243
206#define CG_ECLK_CNTL 0xC05000AC 244#define CG_ECLK_CNTL 0xC05000AC
207# define ECLK_DIVIDER_MASK 0x7f 245# define ECLK_DIVIDER_MASK 0x7f
208# define ECLK_DIR_CNTL_EN (1 << 8) 246# define ECLK_DIR_CNTL_EN (1 << 8)
@@ -1137,6 +1175,9 @@
1137#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3 1175#define SH_MEM_ALIGNMENT_MODE_UNALIGNED 3
1138#define DEFAULT_MTYPE(x) ((x) << 4) 1176#define DEFAULT_MTYPE(x) ((x) << 4)
1139#define APE1_MTYPE(x) ((x) << 7) 1177#define APE1_MTYPE(x) ((x) << 7)
1178/* valid for both DEFAULT_MTYPE and APE1_MTYPE */
1179#define MTYPE_CACHED 0
1180#define MTYPE_NONCACHED 3
1140 1181
1141#define SX_DEBUG_1 0x9060 1182#define SX_DEBUG_1 0x9060
1142 1183
@@ -1447,6 +1488,16 @@
1447#define CP_HQD_ACTIVE 0xC91C 1488#define CP_HQD_ACTIVE 0xC91C
1448#define CP_HQD_VMID 0xC920 1489#define CP_HQD_VMID 0xC920
1449 1490
1491#define CP_HQD_PERSISTENT_STATE 0xC924u
1492#define DEFAULT_CP_HQD_PERSISTENT_STATE (0x33U << 8)
1493
1494#define CP_HQD_PIPE_PRIORITY 0xC928u
1495#define CP_HQD_QUEUE_PRIORITY 0xC92Cu
1496#define CP_HQD_QUANTUM 0xC930u
1497#define QUANTUM_EN 1U
1498#define QUANTUM_SCALE_1MS (1U << 4)
1499#define QUANTUM_DURATION(x) ((x) << 8)
1500
1450#define CP_HQD_PQ_BASE 0xC934 1501#define CP_HQD_PQ_BASE 0xC934
1451#define CP_HQD_PQ_BASE_HI 0xC938 1502#define CP_HQD_PQ_BASE_HI 0xC938
1452#define CP_HQD_PQ_RPTR 0xC93C 1503#define CP_HQD_PQ_RPTR 0xC93C
@@ -1474,12 +1525,32 @@
1474#define PRIV_STATE (1 << 30) 1525#define PRIV_STATE (1 << 30)
1475#define KMD_QUEUE (1 << 31) 1526#define KMD_QUEUE (1 << 31)
1476 1527
1477#define CP_HQD_DEQUEUE_REQUEST 0xC974 1528#define CP_HQD_IB_BASE_ADDR 0xC95Cu
1529#define CP_HQD_IB_BASE_ADDR_HI 0xC960u
1530#define CP_HQD_IB_RPTR 0xC964u
1531#define CP_HQD_IB_CONTROL 0xC968u
1532#define IB_ATC_EN (1U << 23)
1533#define DEFAULT_MIN_IB_AVAIL_SIZE (3U << 20)
1534
1535#define CP_HQD_DEQUEUE_REQUEST 0xC974
1536#define DEQUEUE_REQUEST_DRAIN 1
1537#define DEQUEUE_REQUEST_RESET 2
1478 1538
1479#define CP_MQD_CONTROL 0xC99C 1539#define CP_MQD_CONTROL 0xC99C
1480#define MQD_VMID(x) ((x) << 0) 1540#define MQD_VMID(x) ((x) << 0)
1481#define MQD_VMID_MASK (0xf << 0) 1541#define MQD_VMID_MASK (0xf << 0)
1482 1542
1543#define CP_HQD_SEMA_CMD 0xC97Cu
1544#define CP_HQD_MSG_TYPE 0xC980u
1545#define CP_HQD_ATOMIC0_PREOP_LO 0xC984u
1546#define CP_HQD_ATOMIC0_PREOP_HI 0xC988u
1547#define CP_HQD_ATOMIC1_PREOP_LO 0xC98Cu
1548#define CP_HQD_ATOMIC1_PREOP_HI 0xC990u
1549#define CP_HQD_HQ_SCHEDULER0 0xC994u
1550#define CP_HQD_HQ_SCHEDULER1 0xC998u
1551
1552#define SH_STATIC_MEM_CONFIG 0x9604u
1553
1483#define DB_RENDER_CONTROL 0x28000 1554#define DB_RENDER_CONTROL 0x28000
1484 1555
1485#define PA_SC_RASTER_CONFIG 0x28350 1556#define PA_SC_RASTER_CONFIG 0x28350
@@ -2069,4 +2140,20 @@
2069#define VCE_CMD_IB_AUTO 0x00000005 2140#define VCE_CMD_IB_AUTO 0x00000005
2070#define VCE_CMD_SEMAPHORE 0x00000006 2141#define VCE_CMD_SEMAPHORE 0x00000006
2071 2142
2143#define ATC_VMID0_PASID_MAPPING 0x339Cu
2144#define ATC_VMID_PASID_MAPPING_UPDATE_STATUS 0x3398u
2145#define ATC_VMID_PASID_MAPPING_VALID (1U << 31)
2146
2147#define ATC_VM_APERTURE0_CNTL 0x3310u
2148#define ATS_ACCESS_MODE_NEVER 0
2149#define ATS_ACCESS_MODE_ALWAYS 1
2150
2151#define ATC_VM_APERTURE0_CNTL2 0x3318u
2152#define ATC_VM_APERTURE0_HIGH_ADDR 0x3308u
2153#define ATC_VM_APERTURE0_LOW_ADDR 0x3300u
2154#define ATC_VM_APERTURE1_CNTL 0x3314u
2155#define ATC_VM_APERTURE1_CNTL2 0x331Cu
2156#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
2157#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
2158
2072#endif 2159#endif
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 5c8b358f9fba..924b1b7ab455 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -35,7 +35,7 @@
35#define MIN(a,b) (((a)<(b))?(a):(b)) 35#define MIN(a,b) (((a)<(b))?(a):(b))
36 36
37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 37int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
38 struct radeon_cs_reloc **cs_reloc); 38 struct radeon_bo_list **cs_reloc);
39struct evergreen_cs_track { 39struct evergreen_cs_track {
40 u32 group_size; 40 u32 group_size;
41 u32 nbanks; 41 u32 nbanks;
@@ -1094,7 +1094,7 @@ static int evergreen_cs_parse_packet0(struct radeon_cs_parser *p,
1094static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 1094static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1095{ 1095{
1096 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track; 1096 struct evergreen_cs_track *track = (struct evergreen_cs_track *)p->track;
1097 struct radeon_cs_reloc *reloc; 1097 struct radeon_bo_list *reloc;
1098 u32 last_reg; 1098 u32 last_reg;
1099 u32 m, i, tmp, *ib; 1099 u32 m, i, tmp, *ib;
1100 int r; 1100 int r;
@@ -1792,7 +1792,7 @@ static bool evergreen_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1792static int evergreen_packet3_check(struct radeon_cs_parser *p, 1792static int evergreen_packet3_check(struct radeon_cs_parser *p,
1793 struct radeon_cs_packet *pkt) 1793 struct radeon_cs_packet *pkt)
1794{ 1794{
1795 struct radeon_cs_reloc *reloc; 1795 struct radeon_bo_list *reloc;
1796 struct evergreen_cs_track *track; 1796 struct evergreen_cs_track *track;
1797 volatile u32 *ib; 1797 volatile u32 *ib;
1798 unsigned idx; 1798 unsigned idx;
@@ -2661,7 +2661,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2661 p->track = NULL; 2661 p->track = NULL;
2662 return r; 2662 return r;
2663 } 2663 }
2664 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2664 } while (p->idx < p->chunk_ib->length_dw);
2665#if 0 2665#if 0
2666 for (r = 0; r < p->ib.length_dw; r++) { 2666 for (r = 0; r < p->ib.length_dw; r++) {
2667 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2667 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2684,8 +2684,8 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
2684 **/ 2684 **/
2685int evergreen_dma_cs_parse(struct radeon_cs_parser *p) 2685int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
2686{ 2686{
2687 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2687 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
2688 struct radeon_cs_reloc *src_reloc, *dst_reloc, *dst2_reloc; 2688 struct radeon_bo_list *src_reloc, *dst_reloc, *dst2_reloc;
2689 u32 header, cmd, count, sub_cmd; 2689 u32 header, cmd, count, sub_cmd;
2690 volatile u32 *ib = p->ib.ptr; 2690 volatile u32 *ib = p->ib.ptr;
2691 u32 idx; 2691 u32 idx;
@@ -3100,7 +3100,7 @@ int evergreen_dma_cs_parse(struct radeon_cs_parser *p)
3100 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 3100 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
3101 return -EINVAL; 3101 return -EINVAL;
3102 } 3102 }
3103 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 3103 } while (p->idx < p->chunk_ib->length_dw);
3104#if 0 3104#if 0
3105 for (r = 0; r < p->ib->length_dw; r++) { 3105 for (r = 0; r < p->ib->length_dw; r++) {
3106 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 3106 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/evergreen_dma.c b/drivers/gpu/drm/radeon/evergreen_dma.c
index 66bcfadeedd1..96535aa8659c 100644
--- a/drivers/gpu/drm/radeon/evergreen_dma.c
+++ b/drivers/gpu/drm/radeon/evergreen_dma.c
@@ -110,31 +110,27 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
110 unsigned num_gpu_pages, 110 unsigned num_gpu_pages,
111 struct reservation_object *resv) 111 struct reservation_object *resv)
112{ 112{
113 struct radeon_semaphore *sem = NULL;
114 struct radeon_fence *fence; 113 struct radeon_fence *fence;
114 struct radeon_sync sync;
115 int ring_index = rdev->asic->copy.dma_ring_index; 115 int ring_index = rdev->asic->copy.dma_ring_index;
116 struct radeon_ring *ring = &rdev->ring[ring_index]; 116 struct radeon_ring *ring = &rdev->ring[ring_index];
117 u32 size_in_dw, cur_size_in_dw; 117 u32 size_in_dw, cur_size_in_dw;
118 int i, num_loops; 118 int i, num_loops;
119 int r = 0; 119 int r = 0;
120 120
121 r = radeon_semaphore_create(rdev, &sem); 121 radeon_sync_create(&sync);
122 if (r) {
123 DRM_ERROR("radeon: moving bo (%d).\n", r);
124 return ERR_PTR(r);
125 }
126 122
127 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 123 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
128 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff); 124 num_loops = DIV_ROUND_UP(size_in_dw, 0xfffff);
129 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); 125 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
130 if (r) { 126 if (r) {
131 DRM_ERROR("radeon: moving bo (%d).\n", r); 127 DRM_ERROR("radeon: moving bo (%d).\n", r);
132 radeon_semaphore_free(rdev, &sem, NULL); 128 radeon_sync_free(rdev, &sync, NULL);
133 return ERR_PTR(r); 129 return ERR_PTR(r);
134 } 130 }
135 131
136 radeon_semaphore_sync_resv(rdev, sem, resv, false); 132 radeon_sync_resv(rdev, &sync, resv, false);
137 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 133 radeon_sync_rings(rdev, &sync, ring->idx);
138 134
139 for (i = 0; i < num_loops; i++) { 135 for (i = 0; i < num_loops; i++) {
140 cur_size_in_dw = size_in_dw; 136 cur_size_in_dw = size_in_dw;
@@ -153,12 +149,12 @@ struct radeon_fence *evergreen_copy_dma(struct radeon_device *rdev,
153 r = radeon_fence_emit(rdev, &fence, ring->idx); 149 r = radeon_fence_emit(rdev, &fence, ring->idx);
154 if (r) { 150 if (r) {
155 radeon_ring_unlock_undo(rdev, ring); 151 radeon_ring_unlock_undo(rdev, ring);
156 radeon_semaphore_free(rdev, &sem, NULL); 152 radeon_sync_free(rdev, &sync, NULL);
157 return ERR_PTR(r); 153 return ERR_PTR(r);
158 } 154 }
159 155
160 radeon_ring_unlock_commit(rdev, ring, false); 156 radeon_ring_unlock_commit(rdev, ring, false);
161 radeon_semaphore_free(rdev, &sem, fence); 157 radeon_sync_free(rdev, &sync, fence);
162 158
163 return fence; 159 return fence;
164} 160}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 3faee58946dd..360de9f1f491 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1373,6 +1373,7 @@ void cayman_fence_ring_emit(struct radeon_device *rdev,
1373void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 1373void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1374{ 1374{
1375 struct radeon_ring *ring = &rdev->ring[ib->ring]; 1375 struct radeon_ring *ring = &rdev->ring[ib->ring];
1376 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
1376 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA | 1377 u32 cp_coher_cntl = PACKET3_FULL_CACHE_ENA | PACKET3_TC_ACTION_ENA |
1377 PACKET3_SH_ACTION_ENA; 1378 PACKET3_SH_ACTION_ENA;
1378 1379
@@ -1395,15 +1396,14 @@ void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1395#endif 1396#endif
1396 (ib->gpu_addr & 0xFFFFFFFC)); 1397 (ib->gpu_addr & 0xFFFFFFFC));
1397 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF); 1398 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
1398 radeon_ring_write(ring, ib->length_dw | 1399 radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
1399 (ib->vm ? (ib->vm->id << 24) : 0));
1400 1400
1401 /* flush read cache over gart for this vmid */ 1401 /* flush read cache over gart for this vmid */
1402 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 1402 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
1403 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl); 1403 radeon_ring_write(ring, PACKET3_ENGINE_ME | cp_coher_cntl);
1404 radeon_ring_write(ring, 0xFFFFFFFF); 1404 radeon_ring_write(ring, 0xFFFFFFFF);
1405 radeon_ring_write(ring, 0); 1405 radeon_ring_write(ring, 0);
1406 radeon_ring_write(ring, ((ib->vm ? ib->vm->id : 0) << 24) | 10); /* poll interval */ 1406 radeon_ring_write(ring, (vm_id << 24) | 10); /* poll interval */
1407} 1407}
1408 1408
1409static void cayman_cp_enable(struct radeon_device *rdev, bool enable) 1409static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
@@ -2502,15 +2502,11 @@ void cayman_vm_decode_fault(struct radeon_device *rdev,
2502 * Update the page table base and flush the VM TLB 2502 * Update the page table base and flush the VM TLB
2503 * using the CP (cayman-si). 2503 * using the CP (cayman-si).
2504 */ 2504 */
2505void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 2505void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
2506 unsigned vm_id, uint64_t pd_addr)
2506{ 2507{
2507 struct radeon_ring *ring = &rdev->ring[ridx]; 2508 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2), 0));
2508 2509 radeon_ring_write(ring, pd_addr >> 12);
2509 if (vm == NULL)
2510 return;
2511
2512 radeon_ring_write(ring, PACKET0(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0));
2513 radeon_ring_write(ring, vm->pd_gpu_addr >> 12);
2514 2510
2515 /* flush hdp cache */ 2511 /* flush hdp cache */
2516 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0)); 2512 radeon_ring_write(ring, PACKET0(HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
@@ -2518,7 +2514,7 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
2518 2514
2519 /* bits 0-7 are the VM contexts0-7 */ 2515 /* bits 0-7 are the VM contexts0-7 */
2520 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); 2516 radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
2521 radeon_ring_write(ring, 1 << vm->id); 2517 radeon_ring_write(ring, 1 << vm_id);
2522 2518
2523 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 2519 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2524 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 2520 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index f26f0a9fb522..50f88611ff60 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -123,6 +123,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
123 struct radeon_ib *ib) 123 struct radeon_ib *ib)
124{ 124{
125 struct radeon_ring *ring = &rdev->ring[ib->ring]; 125 struct radeon_ring *ring = &rdev->ring[ib->ring];
126 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
126 127
127 if (rdev->wb.enabled) { 128 if (rdev->wb.enabled) {
128 u32 next_rptr = ring->wptr + 4; 129 u32 next_rptr = ring->wptr + 4;
@@ -140,7 +141,7 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev,
140 */ 141 */
141 while ((ring->wptr & 7) != 5) 142 while ((ring->wptr & 7) != 5)
142 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0)); 143 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0));
143 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, ib->vm ? ib->vm->id : 0, 0)); 144 radeon_ring_write(ring, DMA_IB_PACKET(DMA_PACKET_INDIRECT_BUFFER, vm_id, 0));
144 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0)); 145 radeon_ring_write(ring, (ib->gpu_addr & 0xFFFFFFE0));
145 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF)); 146 radeon_ring_write(ring, (ib->length_dw << 12) | (upper_32_bits(ib->gpu_addr) & 0xFF));
146 147
@@ -446,16 +447,12 @@ void cayman_dma_vm_pad_ib(struct radeon_ib *ib)
446 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); 447 ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0);
447} 448}
448 449
449void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 450void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
451 unsigned vm_id, uint64_t pd_addr)
450{ 452{
451 struct radeon_ring *ring = &rdev->ring[ridx];
452
453 if (vm == NULL)
454 return;
455
456 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 453 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
457 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); 454 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
458 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 455 radeon_ring_write(ring, pd_addr >> 12);
459 456
460 /* flush hdp cache */ 457 /* flush hdp cache */
461 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 458 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
@@ -465,6 +462,6 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm
465 /* bits 0-7 are the VM contexts0-7 */ 462 /* bits 0-7 are the VM contexts0-7 */
466 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); 463 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
467 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); 464 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
468 radeon_ring_write(ring, 1 << vm->id); 465 radeon_ring_write(ring, 1 << vm_id);
469} 466}
470 467
diff --git a/drivers/gpu/drm/radeon/ppsmc.h b/drivers/gpu/drm/radeon/ppsmc.h
index 5670b8291285..7e5724a12f8b 100644
--- a/drivers/gpu/drm/radeon/ppsmc.h
+++ b/drivers/gpu/drm/radeon/ppsmc.h
@@ -56,6 +56,14 @@
56#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 56#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20
57#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 57#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40
58 58
59#define FDO_MODE_HARDWARE 0
60#define FDO_MODE_PIECE_WISE_LINEAR 1
61
62enum FAN_CONTROL {
63 FAN_CONTROL_FUZZY,
64 FAN_CONTROL_TABLE
65};
66
59#define PPSMC_Result_OK ((uint8_t)0x01) 67#define PPSMC_Result_OK ((uint8_t)0x01)
60#define PPSMC_Result_Failed ((uint8_t)0xFF) 68#define PPSMC_Result_Failed ((uint8_t)0xFF)
61 69
@@ -79,6 +87,8 @@ typedef uint8_t PPSMC_Result;
79#define PPSMC_MSG_DisableCac ((uint8_t)0x54) 87#define PPSMC_MSG_DisableCac ((uint8_t)0x54)
80#define PPSMC_TDPClampingActive ((uint8_t)0x59) 88#define PPSMC_TDPClampingActive ((uint8_t)0x59)
81#define PPSMC_TDPClampingInactive ((uint8_t)0x5A) 89#define PPSMC_TDPClampingInactive ((uint8_t)0x5A)
90#define PPSMC_StartFanControl ((uint8_t)0x5B)
91#define PPSMC_StopFanControl ((uint8_t)0x5C)
82#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) 92#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D)
83#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) 93#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E)
84#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) 94#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60)
@@ -106,6 +116,7 @@ typedef uint8_t PPSMC_Result;
106#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) 116#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130)
107#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) 117#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131)
108#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) 118#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132)
119#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133)
109#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) 120#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135)
110#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) 121#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136)
111#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) 122#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d)
@@ -149,6 +160,10 @@ typedef uint8_t PPSMC_Result;
149#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) 160#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F)
150#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) 161#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190)
151#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) 162#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191)
163#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A)
164
165#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C)
166#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D)
152 167
153#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) 168#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200)
154#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) 169#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201)
@@ -157,10 +172,11 @@ typedef uint8_t PPSMC_Result;
157#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) 172#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102)
158#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) 173#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104)
159#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) 174#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108)
160#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) 175#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a)
161#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109) 176#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109)
162#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e) 177#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e)
163#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) 178#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f)
179#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112)
164#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) 180#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d)
165#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) 181#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e)
166#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) 182#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120)
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index 2d532996c697..4c2eec49dadc 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -96,6 +96,14 @@ typedef struct _ATOM_PPLIB_FANTABLE2
96 USHORT usTMax; // The max temperature 96 USHORT usTMax; // The max temperature
97} ATOM_PPLIB_FANTABLE2; 97} ATOM_PPLIB_FANTABLE2;
98 98
99typedef struct _ATOM_PPLIB_FANTABLE3
100{
101 ATOM_PPLIB_FANTABLE2 basicTable2;
102 UCHAR ucFanControlMode;
103 USHORT usFanPWMMax;
104 USHORT usFanOutputSensitivity;
105} ATOM_PPLIB_FANTABLE3;
106
99typedef struct _ATOM_PPLIB_EXTENDEDHEADER 107typedef struct _ATOM_PPLIB_EXTENDEDHEADER
100{ 108{
101 USHORT usSize; 109 USHORT usSize;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index b53b31a7b76f..74f06d540591 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -1254,7 +1254,7 @@ int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
1254 int r; 1254 int r;
1255 u32 tile_flags = 0; 1255 u32 tile_flags = 0;
1256 u32 tmp; 1256 u32 tmp;
1257 struct radeon_cs_reloc *reloc; 1257 struct radeon_bo_list *reloc;
1258 u32 value; 1258 u32 value;
1259 1259
1260 r = radeon_cs_packet_next_reloc(p, &reloc, 0); 1260 r = radeon_cs_packet_next_reloc(p, &reloc, 0);
@@ -1293,7 +1293,7 @@ int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
1293 int idx) 1293 int idx)
1294{ 1294{
1295 unsigned c, i; 1295 unsigned c, i;
1296 struct radeon_cs_reloc *reloc; 1296 struct radeon_bo_list *reloc;
1297 struct r100_cs_track *track; 1297 struct r100_cs_track *track;
1298 int r = 0; 1298 int r = 0;
1299 volatile uint32_t *ib; 1299 volatile uint32_t *ib;
@@ -1542,7 +1542,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1542 struct radeon_cs_packet *pkt, 1542 struct radeon_cs_packet *pkt,
1543 unsigned idx, unsigned reg) 1543 unsigned idx, unsigned reg)
1544{ 1544{
1545 struct radeon_cs_reloc *reloc; 1545 struct radeon_bo_list *reloc;
1546 struct r100_cs_track *track; 1546 struct r100_cs_track *track;
1547 volatile uint32_t *ib; 1547 volatile uint32_t *ib;
1548 uint32_t tmp; 1548 uint32_t tmp;
@@ -1901,7 +1901,7 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1901static int r100_packet3_check(struct radeon_cs_parser *p, 1901static int r100_packet3_check(struct radeon_cs_parser *p,
1902 struct radeon_cs_packet *pkt) 1902 struct radeon_cs_packet *pkt)
1903{ 1903{
1904 struct radeon_cs_reloc *reloc; 1904 struct radeon_bo_list *reloc;
1905 struct r100_cs_track *track; 1905 struct r100_cs_track *track;
1906 unsigned idx; 1906 unsigned idx;
1907 volatile uint32_t *ib; 1907 volatile uint32_t *ib;
@@ -2061,7 +2061,7 @@ int r100_cs_parse(struct radeon_cs_parser *p)
2061 } 2061 }
2062 if (r) 2062 if (r)
2063 return r; 2063 return r;
2064 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2064 } while (p->idx < p->chunk_ib->length_dw);
2065 return 0; 2065 return 0;
2066} 2066}
2067 2067
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 732d4938aab7..c70e6d5bcd19 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -146,7 +146,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
146 struct radeon_cs_packet *pkt, 146 struct radeon_cs_packet *pkt,
147 unsigned idx, unsigned reg) 147 unsigned idx, unsigned reg)
148{ 148{
149 struct radeon_cs_reloc *reloc; 149 struct radeon_bo_list *reloc;
150 struct r100_cs_track *track; 150 struct r100_cs_track *track;
151 volatile uint32_t *ib; 151 volatile uint32_t *ib;
152 uint32_t tmp; 152 uint32_t tmp;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 1bc4704034ce..064ad5569cca 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -598,7 +598,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
598 struct radeon_cs_packet *pkt, 598 struct radeon_cs_packet *pkt,
599 unsigned idx, unsigned reg) 599 unsigned idx, unsigned reg)
600{ 600{
601 struct radeon_cs_reloc *reloc; 601 struct radeon_bo_list *reloc;
602 struct r100_cs_track *track; 602 struct r100_cs_track *track;
603 volatile uint32_t *ib; 603 volatile uint32_t *ib;
604 uint32_t tmp, tile_flags = 0; 604 uint32_t tmp, tile_flags = 0;
@@ -1142,7 +1142,7 @@ fail:
1142static int r300_packet3_check(struct radeon_cs_parser *p, 1142static int r300_packet3_check(struct radeon_cs_parser *p,
1143 struct radeon_cs_packet *pkt) 1143 struct radeon_cs_packet *pkt)
1144{ 1144{
1145 struct radeon_cs_reloc *reloc; 1145 struct radeon_bo_list *reloc;
1146 struct r100_cs_track *track; 1146 struct r100_cs_track *track;
1147 volatile uint32_t *ib; 1147 volatile uint32_t *ib;
1148 unsigned idx; 1148 unsigned idx;
@@ -1283,7 +1283,7 @@ int r300_cs_parse(struct radeon_cs_parser *p)
1283 if (r) { 1283 if (r) {
1284 return r; 1284 return r;
1285 } 1285 }
1286 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 1286 } while (p->idx < p->chunk_ib->length_dw);
1287 return 0; 1287 return 0;
1288} 1288}
1289 1289
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56b02927cd3d..ef5d6066fa5b 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2889,31 +2889,27 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2889 unsigned num_gpu_pages, 2889 unsigned num_gpu_pages,
2890 struct reservation_object *resv) 2890 struct reservation_object *resv)
2891{ 2891{
2892 struct radeon_semaphore *sem = NULL;
2893 struct radeon_fence *fence; 2892 struct radeon_fence *fence;
2893 struct radeon_sync sync;
2894 int ring_index = rdev->asic->copy.blit_ring_index; 2894 int ring_index = rdev->asic->copy.blit_ring_index;
2895 struct radeon_ring *ring = &rdev->ring[ring_index]; 2895 struct radeon_ring *ring = &rdev->ring[ring_index];
2896 u32 size_in_bytes, cur_size_in_bytes, tmp; 2896 u32 size_in_bytes, cur_size_in_bytes, tmp;
2897 int i, num_loops; 2897 int i, num_loops;
2898 int r = 0; 2898 int r = 0;
2899 2899
2900 r = radeon_semaphore_create(rdev, &sem); 2900 radeon_sync_create(&sync);
2901 if (r) {
2902 DRM_ERROR("radeon: moving bo (%d).\n", r);
2903 return ERR_PTR(r);
2904 }
2905 2901
2906 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 2902 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
2907 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); 2903 num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff);
2908 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); 2904 r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24);
2909 if (r) { 2905 if (r) {
2910 DRM_ERROR("radeon: moving bo (%d).\n", r); 2906 DRM_ERROR("radeon: moving bo (%d).\n", r);
2911 radeon_semaphore_free(rdev, &sem, NULL); 2907 radeon_sync_free(rdev, &sync, NULL);
2912 return ERR_PTR(r); 2908 return ERR_PTR(r);
2913 } 2909 }
2914 2910
2915 radeon_semaphore_sync_resv(rdev, sem, resv, false); 2911 radeon_sync_resv(rdev, &sync, resv, false);
2916 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 2912 radeon_sync_rings(rdev, &sync, ring->idx);
2917 2913
2918 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 2914 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
2919 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); 2915 radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -2942,12 +2938,12 @@ struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev,
2942 r = radeon_fence_emit(rdev, &fence, ring->idx); 2938 r = radeon_fence_emit(rdev, &fence, ring->idx);
2943 if (r) { 2939 if (r) {
2944 radeon_ring_unlock_undo(rdev, ring); 2940 radeon_ring_unlock_undo(rdev, ring);
2945 radeon_semaphore_free(rdev, &sem, NULL); 2941 radeon_sync_free(rdev, &sync, NULL);
2946 return ERR_PTR(r); 2942 return ERR_PTR(r);
2947 } 2943 }
2948 2944
2949 radeon_ring_unlock_commit(rdev, ring, false); 2945 radeon_ring_unlock_commit(rdev, ring, false);
2950 radeon_semaphore_free(rdev, &sem, fence); 2946 radeon_sync_free(rdev, &sync, fence);
2951 2947
2952 return fence; 2948 return fence;
2953} 2949}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index c47537a1ddba..acc1f99c84d9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -969,7 +969,7 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
969static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) 969static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
970{ 970{
971 struct r600_cs_track *track = (struct r600_cs_track *)p->track; 971 struct r600_cs_track *track = (struct r600_cs_track *)p->track;
972 struct radeon_cs_reloc *reloc; 972 struct radeon_bo_list *reloc;
973 u32 m, i, tmp, *ib; 973 u32 m, i, tmp, *ib;
974 int r; 974 int r;
975 975
@@ -1626,7 +1626,7 @@ static bool r600_is_safe_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1626static int r600_packet3_check(struct radeon_cs_parser *p, 1626static int r600_packet3_check(struct radeon_cs_parser *p,
1627 struct radeon_cs_packet *pkt) 1627 struct radeon_cs_packet *pkt)
1628{ 1628{
1629 struct radeon_cs_reloc *reloc; 1629 struct radeon_bo_list *reloc;
1630 struct r600_cs_track *track; 1630 struct r600_cs_track *track;
1631 volatile u32 *ib; 1631 volatile u32 *ib;
1632 unsigned idx; 1632 unsigned idx;
@@ -2316,7 +2316,7 @@ int r600_cs_parse(struct radeon_cs_parser *p)
2316 p->track = NULL; 2316 p->track = NULL;
2317 return r; 2317 return r;
2318 } 2318 }
2319 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2319 } while (p->idx < p->chunk_ib->length_dw);
2320#if 0 2320#if 0
2321 for (r = 0; r < p->ib.length_dw; r++) { 2321 for (r = 0; r < p->ib.length_dw; r++) {
2322 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2322 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
@@ -2351,10 +2351,10 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
2351 2351
2352static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) 2352static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
2353{ 2353{
2354 if (p->chunk_relocs_idx == -1) { 2354 if (p->chunk_relocs == NULL) {
2355 return 0; 2355 return 0;
2356 } 2356 }
2357 p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); 2357 p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
2358 if (p->relocs == NULL) { 2358 if (p->relocs == NULL) {
2359 return -ENOMEM; 2359 return -ENOMEM;
2360 } 2360 }
@@ -2398,7 +2398,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
2398 /* Copy the packet into the IB, the parser will read from the 2398 /* Copy the packet into the IB, the parser will read from the
2399 * input memory (cached) and write to the IB (which can be 2399 * input memory (cached) and write to the IB (which can be
2400 * uncached). */ 2400 * uncached). */
2401 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 2401 ib_chunk = parser.chunk_ib;
2402 parser.ib.length_dw = ib_chunk->length_dw; 2402 parser.ib.length_dw = ib_chunk->length_dw;
2403 *l = parser.ib.length_dw; 2403 *l = parser.ib.length_dw;
2404 if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) { 2404 if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
@@ -2435,24 +2435,24 @@ void r600_cs_legacy_init(void)
2435 * GPU offset using the provided start. 2435 * GPU offset using the provided start.
2436 **/ 2436 **/
2437int r600_dma_cs_next_reloc(struct radeon_cs_parser *p, 2437int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2438 struct radeon_cs_reloc **cs_reloc) 2438 struct radeon_bo_list **cs_reloc)
2439{ 2439{
2440 struct radeon_cs_chunk *relocs_chunk; 2440 struct radeon_cs_chunk *relocs_chunk;
2441 unsigned idx; 2441 unsigned idx;
2442 2442
2443 *cs_reloc = NULL; 2443 *cs_reloc = NULL;
2444 if (p->chunk_relocs_idx == -1) { 2444 if (p->chunk_relocs == NULL) {
2445 DRM_ERROR("No relocation chunk !\n"); 2445 DRM_ERROR("No relocation chunk !\n");
2446 return -EINVAL; 2446 return -EINVAL;
2447 } 2447 }
2448 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 2448 relocs_chunk = p->chunk_relocs;
2449 idx = p->dma_reloc_idx; 2449 idx = p->dma_reloc_idx;
2450 if (idx >= p->nrelocs) { 2450 if (idx >= p->nrelocs) {
2451 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 2451 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
2452 idx, p->nrelocs); 2452 idx, p->nrelocs);
2453 return -EINVAL; 2453 return -EINVAL;
2454 } 2454 }
2455 *cs_reloc = p->relocs_ptr[idx]; 2455 *cs_reloc = &p->relocs[idx];
2456 p->dma_reloc_idx++; 2456 p->dma_reloc_idx++;
2457 return 0; 2457 return 0;
2458} 2458}
@@ -2472,8 +2472,8 @@ int r600_dma_cs_next_reloc(struct radeon_cs_parser *p,
2472 **/ 2472 **/
2473int r600_dma_cs_parse(struct radeon_cs_parser *p) 2473int r600_dma_cs_parse(struct radeon_cs_parser *p)
2474{ 2474{
2475 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 2475 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
2476 struct radeon_cs_reloc *src_reloc, *dst_reloc; 2476 struct radeon_bo_list *src_reloc, *dst_reloc;
2477 u32 header, cmd, count, tiled; 2477 u32 header, cmd, count, tiled;
2478 volatile u32 *ib = p->ib.ptr; 2478 volatile u32 *ib = p->ib.ptr;
2479 u32 idx, idx_value; 2479 u32 idx, idx_value;
@@ -2619,7 +2619,7 @@ int r600_dma_cs_parse(struct radeon_cs_parser *p)
2619 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx); 2619 DRM_ERROR("Unknown packet type %d at %d !\n", cmd, idx);
2620 return -EINVAL; 2620 return -EINVAL;
2621 } 2621 }
2622 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 2622 } while (p->idx < p->chunk_ib->length_dw);
2623#if 0 2623#if 0
2624 for (r = 0; r < p->ib->length_dw; r++) { 2624 for (r = 0; r < p->ib->length_dw; r++) {
2625 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]); 2625 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib.ptr[r]);
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index cf0df45d455e..d2dd29ab24fa 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -441,31 +441,27 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
441 unsigned num_gpu_pages, 441 unsigned num_gpu_pages,
442 struct reservation_object *resv) 442 struct reservation_object *resv)
443{ 443{
444 struct radeon_semaphore *sem = NULL;
445 struct radeon_fence *fence; 444 struct radeon_fence *fence;
445 struct radeon_sync sync;
446 int ring_index = rdev->asic->copy.dma_ring_index; 446 int ring_index = rdev->asic->copy.dma_ring_index;
447 struct radeon_ring *ring = &rdev->ring[ring_index]; 447 struct radeon_ring *ring = &rdev->ring[ring_index];
448 u32 size_in_dw, cur_size_in_dw; 448 u32 size_in_dw, cur_size_in_dw;
449 int i, num_loops; 449 int i, num_loops;
450 int r = 0; 450 int r = 0;
451 451
452 r = radeon_semaphore_create(rdev, &sem); 452 radeon_sync_create(&sync);
453 if (r) {
454 DRM_ERROR("radeon: moving bo (%d).\n", r);
455 return ERR_PTR(r);
456 }
457 453
458 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 454 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
459 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE); 455 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFE);
460 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8); 456 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 8);
461 if (r) { 457 if (r) {
462 DRM_ERROR("radeon: moving bo (%d).\n", r); 458 DRM_ERROR("radeon: moving bo (%d).\n", r);
463 radeon_semaphore_free(rdev, &sem, NULL); 459 radeon_sync_free(rdev, &sync, NULL);
464 return ERR_PTR(r); 460 return ERR_PTR(r);
465 } 461 }
466 462
467 radeon_semaphore_sync_resv(rdev, sem, resv, false); 463 radeon_sync_resv(rdev, &sync, resv, false);
468 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 464 radeon_sync_rings(rdev, &sync, ring->idx);
469 465
470 for (i = 0; i < num_loops; i++) { 466 for (i = 0; i < num_loops; i++) {
471 cur_size_in_dw = size_in_dw; 467 cur_size_in_dw = size_in_dw;
@@ -484,12 +480,12 @@ struct radeon_fence *r600_copy_dma(struct radeon_device *rdev,
484 r = radeon_fence_emit(rdev, &fence, ring->idx); 480 r = radeon_fence_emit(rdev, &fence, ring->idx);
485 if (r) { 481 if (r) {
486 radeon_ring_unlock_undo(rdev, ring); 482 radeon_ring_unlock_undo(rdev, ring);
487 radeon_semaphore_free(rdev, &sem, NULL); 483 radeon_sync_free(rdev, &sync, NULL);
488 return ERR_PTR(r); 484 return ERR_PTR(r);
489 } 485 }
490 486
491 radeon_ring_unlock_commit(rdev, ring, false); 487 radeon_ring_unlock_commit(rdev, ring, false);
492 radeon_semaphore_free(rdev, &sem, fence); 488 radeon_sync_free(rdev, &sync, fence);
493 489
494 return fence; 490 return fence;
495} 491}
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index b5c73df8e202..843b65f46ece 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -811,6 +811,7 @@ union power_info {
811union fan_info { 811union fan_info {
812 struct _ATOM_PPLIB_FANTABLE fan; 812 struct _ATOM_PPLIB_FANTABLE fan;
813 struct _ATOM_PPLIB_FANTABLE2 fan2; 813 struct _ATOM_PPLIB_FANTABLE2 fan2;
814 struct _ATOM_PPLIB_FANTABLE3 fan3;
814}; 815};
815 816
816static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table, 817static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
@@ -900,6 +901,14 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
900 else 901 else
901 rdev->pm.dpm.fan.t_max = 10900; 902 rdev->pm.dpm.fan.t_max = 10900;
902 rdev->pm.dpm.fan.cycle_delay = 100000; 903 rdev->pm.dpm.fan.cycle_delay = 100000;
904 if (fan_info->fan.ucFanTableFormat >= 3) {
905 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
906 rdev->pm.dpm.fan.default_max_fan_pwm =
907 le16_to_cpu(fan_info->fan3.usFanPWMMax);
908 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
909 rdev->pm.dpm.fan.fan_output_sensitivity =
910 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
911 }
903 rdev->pm.dpm.fan.ucode_fan_control = true; 912 rdev->pm.dpm.fan.ucode_fan_control = true;
904 } 913 }
905 } 914 }
diff --git a/drivers/gpu/drm/radeon/r600_dpm.h b/drivers/gpu/drm/radeon/r600_dpm.h
index 46b9d2a03018..bd499d749bc9 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.h
+++ b/drivers/gpu/drm/radeon/r600_dpm.h
@@ -96,6 +96,9 @@
96#define R600_TEMP_RANGE_MIN (90 * 1000) 96#define R600_TEMP_RANGE_MIN (90 * 1000)
97#define R600_TEMP_RANGE_MAX (120 * 1000) 97#define R600_TEMP_RANGE_MAX (120 * 1000)
98 98
99#define FDO_PWM_MODE_STATIC 1
100#define FDO_PWM_MODE_STATIC_RPM 5
101
99enum r600_power_level { 102enum r600_power_level {
100 R600_POWER_LEVEL_LOW = 0, 103 R600_POWER_LEVEL_LOW = 0,
101 R600_POWER_LEVEL_MEDIUM = 1, 104 R600_POWER_LEVEL_MEDIUM = 1,
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index a9717b3fbf1b..54529b837afa 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -150,9 +150,6 @@ extern int radeon_backlight;
150/* number of hw syncs before falling back on blocking */ 150/* number of hw syncs before falling back on blocking */
151#define RADEON_NUM_SYNCS 4 151#define RADEON_NUM_SYNCS 4
152 152
153/* number of hw syncs before falling back on blocking */
154#define RADEON_NUM_SYNCS 4
155
156/* hardcode those limit for now */ 153/* hardcode those limit for now */
157#define RADEON_VA_IB_OFFSET (1 << 20) 154#define RADEON_VA_IB_OFFSET (1 << 20)
158#define RADEON_VA_RESERVED_SIZE (8 << 20) 155#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -363,14 +360,15 @@ struct radeon_fence_driver {
363}; 360};
364 361
365struct radeon_fence { 362struct radeon_fence {
366 struct fence base; 363 struct fence base;
367 364
368 struct radeon_device *rdev; 365 struct radeon_device *rdev;
369 uint64_t seq; 366 uint64_t seq;
370 /* RB, DMA, etc. */ 367 /* RB, DMA, etc. */
371 unsigned ring; 368 unsigned ring;
369 bool is_vm_update;
372 370
373 wait_queue_t fence_wake; 371 wait_queue_t fence_wake;
374}; 372};
375 373
376int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring); 374int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
@@ -452,12 +450,22 @@ struct radeon_mman {
452#endif 450#endif
453}; 451};
454 452
453struct radeon_bo_list {
454 struct radeon_bo *robj;
455 struct ttm_validate_buffer tv;
456 uint64_t gpu_offset;
457 unsigned prefered_domains;
458 unsigned allowed_domains;
459 uint32_t tiling_flags;
460};
461
455/* bo virtual address in a specific vm */ 462/* bo virtual address in a specific vm */
456struct radeon_bo_va { 463struct radeon_bo_va {
457 /* protected by bo being reserved */ 464 /* protected by bo being reserved */
458 struct list_head bo_list; 465 struct list_head bo_list;
459 uint32_t flags; 466 uint32_t flags;
460 uint64_t addr; 467 uint64_t addr;
468 struct radeon_fence *last_pt_update;
461 unsigned ref_count; 469 unsigned ref_count;
462 470
463 /* protected by vm mutex */ 471 /* protected by vm mutex */
@@ -474,7 +482,7 @@ struct radeon_bo {
474 struct list_head list; 482 struct list_head list;
475 /* Protected by tbo.reserved */ 483 /* Protected by tbo.reserved */
476 u32 initial_domain; 484 u32 initial_domain;
477 struct ttm_place placements[3]; 485 struct ttm_place placements[4];
478 struct ttm_placement placement; 486 struct ttm_placement placement;
479 struct ttm_buffer_object tbo; 487 struct ttm_buffer_object tbo;
480 struct ttm_bo_kmap_obj kmap; 488 struct ttm_bo_kmap_obj kmap;
@@ -576,10 +584,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
576 * Semaphores. 584 * Semaphores.
577 */ 585 */
578struct radeon_semaphore { 586struct radeon_semaphore {
579 struct radeon_sa_bo *sa_bo; 587 struct radeon_sa_bo *sa_bo;
580 signed waiters; 588 signed waiters;
581 uint64_t gpu_addr; 589 uint64_t gpu_addr;
582 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
583}; 590};
584 591
585int radeon_semaphore_create(struct radeon_device *rdev, 592int radeon_semaphore_create(struct radeon_device *rdev,
@@ -588,20 +595,33 @@ bool radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
588 struct radeon_semaphore *semaphore); 595 struct radeon_semaphore *semaphore);
589bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring, 596bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
590 struct radeon_semaphore *semaphore); 597 struct radeon_semaphore *semaphore);
591void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
592 struct radeon_fence *fence);
593int radeon_semaphore_sync_resv(struct radeon_device *rdev,
594 struct radeon_semaphore *semaphore,
595 struct reservation_object *resv,
596 bool shared);
597int radeon_semaphore_sync_rings(struct radeon_device *rdev,
598 struct radeon_semaphore *semaphore,
599 int waiting_ring);
600void radeon_semaphore_free(struct radeon_device *rdev, 598void radeon_semaphore_free(struct radeon_device *rdev,
601 struct radeon_semaphore **semaphore, 599 struct radeon_semaphore **semaphore,
602 struct radeon_fence *fence); 600 struct radeon_fence *fence);
603 601
604/* 602/*
603 * Synchronization
604 */
605struct radeon_sync {
606 struct radeon_semaphore *semaphores[RADEON_NUM_SYNCS];
607 struct radeon_fence *sync_to[RADEON_NUM_RINGS];
608 struct radeon_fence *last_vm_update;
609};
610
611void radeon_sync_create(struct radeon_sync *sync);
612void radeon_sync_fence(struct radeon_sync *sync,
613 struct radeon_fence *fence);
614int radeon_sync_resv(struct radeon_device *rdev,
615 struct radeon_sync *sync,
616 struct reservation_object *resv,
617 bool shared);
618int radeon_sync_rings(struct radeon_device *rdev,
619 struct radeon_sync *sync,
620 int waiting_ring);
621void radeon_sync_free(struct radeon_device *rdev, struct radeon_sync *sync,
622 struct radeon_fence *fence);
623
624/*
605 * GART structures, functions & helpers 625 * GART structures, functions & helpers
606 */ 626 */
607struct radeon_mc; 627struct radeon_mc;
@@ -701,6 +721,10 @@ struct radeon_doorbell {
701 721
702int radeon_doorbell_get(struct radeon_device *rdev, u32 *page); 722int radeon_doorbell_get(struct radeon_device *rdev, u32 *page);
703void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell); 723void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell);
724void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
725 phys_addr_t *aperture_base,
726 size_t *aperture_size,
727 size_t *start_offset);
704 728
705/* 729/*
706 * IRQS. 730 * IRQS.
@@ -814,7 +838,7 @@ struct radeon_ib {
814 struct radeon_fence *fence; 838 struct radeon_fence *fence;
815 struct radeon_vm *vm; 839 struct radeon_vm *vm;
816 bool is_const_ib; 840 bool is_const_ib;
817 struct radeon_semaphore *semaphore; 841 struct radeon_sync sync;
818}; 842};
819 843
820struct radeon_ring { 844struct radeon_ring {
@@ -891,33 +915,40 @@ struct radeon_vm_pt {
891 uint64_t addr; 915 uint64_t addr;
892}; 916};
893 917
918struct radeon_vm_id {
919 unsigned id;
920 uint64_t pd_gpu_addr;
921 /* last flushed PD/PT update */
922 struct radeon_fence *flushed_updates;
923 /* last use of vmid */
924 struct radeon_fence *last_id_use;
925};
926
894struct radeon_vm { 927struct radeon_vm {
895 struct rb_root va; 928 struct mutex mutex;
896 unsigned id; 929
930 struct rb_root va;
931
932 /* protecting invalidated and freed */
933 spinlock_t status_lock;
897 934
898 /* BOs moved, but not yet updated in the PT */ 935 /* BOs moved, but not yet updated in the PT */
899 struct list_head invalidated; 936 struct list_head invalidated;
900 937
901 /* BOs freed, but not yet updated in the PT */ 938 /* BOs freed, but not yet updated in the PT */
902 struct list_head freed; 939 struct list_head freed;
903 940
904 /* contains the page directory */ 941 /* contains the page directory */
905 struct radeon_bo *page_directory; 942 struct radeon_bo *page_directory;
906 uint64_t pd_gpu_addr; 943 unsigned max_pde_used;
907 unsigned max_pde_used;
908 944
909 /* array of page tables, one for each page directory entry */ 945 /* array of page tables, one for each page directory entry */
910 struct radeon_vm_pt *page_tables; 946 struct radeon_vm_pt *page_tables;
911 947
912 struct radeon_bo_va *ib_bo_va; 948 struct radeon_bo_va *ib_bo_va;
913 949
914 struct mutex mutex; 950 /* for id and flush management per ring */
915 /* last fence for cs using this vm */ 951 struct radeon_vm_id ids[RADEON_NUM_RINGS];
916 struct radeon_fence *fence;
917 /* last flush or NULL if we still need to flush */
918 struct radeon_fence *last_flush;
919 /* last use of vmid */
920 struct radeon_fence *last_id_use;
921}; 952};
922 953
923struct radeon_vm_manager { 954struct radeon_vm_manager {
@@ -1025,19 +1056,7 @@ void cayman_dma_fini(struct radeon_device *rdev);
1025/* 1056/*
1026 * CS. 1057 * CS.
1027 */ 1058 */
1028struct radeon_cs_reloc {
1029 struct drm_gem_object *gobj;
1030 struct radeon_bo *robj;
1031 struct ttm_validate_buffer tv;
1032 uint64_t gpu_offset;
1033 unsigned prefered_domains;
1034 unsigned allowed_domains;
1035 uint32_t tiling_flags;
1036 uint32_t handle;
1037};
1038
1039struct radeon_cs_chunk { 1059struct radeon_cs_chunk {
1040 uint32_t chunk_id;
1041 uint32_t length_dw; 1060 uint32_t length_dw;
1042 uint32_t *kdata; 1061 uint32_t *kdata;
1043 void __user *user_ptr; 1062 void __user *user_ptr;
@@ -1055,16 +1074,15 @@ struct radeon_cs_parser {
1055 unsigned idx; 1074 unsigned idx;
1056 /* relocations */ 1075 /* relocations */
1057 unsigned nrelocs; 1076 unsigned nrelocs;
1058 struct radeon_cs_reloc *relocs; 1077 struct radeon_bo_list *relocs;
1059 struct radeon_cs_reloc **relocs_ptr; 1078 struct radeon_bo_list *vm_bos;
1060 struct radeon_cs_reloc *vm_bos;
1061 struct list_head validated; 1079 struct list_head validated;
1062 unsigned dma_reloc_idx; 1080 unsigned dma_reloc_idx;
1063 /* indices of various chunks */ 1081 /* indices of various chunks */
1064 int chunk_ib_idx; 1082 struct radeon_cs_chunk *chunk_ib;
1065 int chunk_relocs_idx; 1083 struct radeon_cs_chunk *chunk_relocs;
1066 int chunk_flags_idx; 1084 struct radeon_cs_chunk *chunk_flags;
1067 int chunk_const_ib_idx; 1085 struct radeon_cs_chunk *chunk_const_ib;
1068 struct radeon_ib ib; 1086 struct radeon_ib ib;
1069 struct radeon_ib const_ib; 1087 struct radeon_ib const_ib;
1070 void *track; 1088 void *track;
@@ -1078,7 +1096,7 @@ struct radeon_cs_parser {
1078 1096
1079static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx) 1097static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
1080{ 1098{
1081 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx]; 1099 struct radeon_cs_chunk *ibc = p->chunk_ib;
1082 1100
1083 if (ibc->kdata) 1101 if (ibc->kdata)
1084 return ibc->kdata[idx]; 1102 return ibc->kdata[idx];
@@ -1490,6 +1508,10 @@ struct radeon_dpm_fan {
1490 u8 t_hyst; 1508 u8 t_hyst;
1491 u32 cycle_delay; 1509 u32 cycle_delay;
1492 u16 t_max; 1510 u16 t_max;
1511 u8 control_mode;
1512 u16 default_max_fan_pwm;
1513 u16 default_fan_output_sensitivity;
1514 u16 fan_output_sensitivity;
1493 bool ucode_fan_control; 1515 bool ucode_fan_control;
1494}; 1516};
1495 1517
@@ -1623,6 +1645,11 @@ struct radeon_pm {
1623 /* internal thermal controller on rv6xx+ */ 1645 /* internal thermal controller on rv6xx+ */
1624 enum radeon_int_thermal_type int_thermal_type; 1646 enum radeon_int_thermal_type int_thermal_type;
1625 struct device *int_hwmon_dev; 1647 struct device *int_hwmon_dev;
1648 /* fan control parameters */
1649 bool no_fan;
1650 u8 fan_pulses_per_revolution;
1651 u8 fan_min_rpm;
1652 u8 fan_max_rpm;
1626 /* dpm */ 1653 /* dpm */
1627 bool dpm_enabled; 1654 bool dpm_enabled;
1628 struct radeon_dpm dpm; 1655 struct radeon_dpm dpm;
@@ -1785,7 +1812,8 @@ struct radeon_asic_ring {
1785 void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring); 1812 void (*hdp_flush)(struct radeon_device *rdev, struct radeon_ring *ring);
1786 bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp, 1813 bool (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
1787 struct radeon_semaphore *semaphore, bool emit_wait); 1814 struct radeon_semaphore *semaphore, bool emit_wait);
1788 void (*vm_flush)(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 1815 void (*vm_flush)(struct radeon_device *rdev, struct radeon_ring *ring,
1816 unsigned vm_id, uint64_t pd_addr);
1789 1817
1790 /* testing functions */ 1818 /* testing functions */
1791 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp); 1819 int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
@@ -2388,6 +2416,8 @@ struct radeon_device {
2388 struct radeon_atcs atcs; 2416 struct radeon_atcs atcs;
2389 /* srbm instance registers */ 2417 /* srbm instance registers */
2390 struct mutex srbm_mutex; 2418 struct mutex srbm_mutex;
2419 /* GRBM index mutex. Protects concurrents access to GRBM index */
2420 struct mutex grbm_idx_mutex;
2391 /* clock, powergating flags */ 2421 /* clock, powergating flags */
2392 u32 cg_flags; 2422 u32 cg_flags;
2393 u32 pg_flags; 2423 u32 pg_flags;
@@ -2400,6 +2430,10 @@ struct radeon_device {
2400 u64 vram_pin_size; 2430 u64 vram_pin_size;
2401 u64 gart_pin_size; 2431 u64 gart_pin_size;
2402 2432
2433 /* amdkfd interface */
2434 struct kfd_dev *kfd;
2435 struct radeon_sa_manager kfd_bo;
2436
2403 struct mutex mn_lock; 2437 struct mutex mn_lock;
2404 DECLARE_HASHTABLE(mn_hash, 7); 2438 DECLARE_HASHTABLE(mn_hash, 7);
2405}; 2439};
@@ -2831,7 +2865,7 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
2831#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib)) 2865#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_execute((rdev), (ib))
2832#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib)) 2866#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)]->ib_parse((rdev), (ib))
2833#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp)) 2867#define radeon_ring_is_lockup(rdev, r, cp) (rdev)->asic->ring[(r)]->is_lockup((rdev), (cp))
2834#define radeon_ring_vm_flush(rdev, r, vm) (rdev)->asic->ring[(r)]->vm_flush((rdev), (r), (vm)) 2868#define radeon_ring_vm_flush(rdev, r, vm_id, pd_addr) (rdev)->asic->ring[(r)->idx]->vm_flush((rdev), (r), (vm_id), (pd_addr))
2835#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r)) 2869#define radeon_ring_get_rptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_rptr((rdev), (r))
2836#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r)) 2870#define radeon_ring_get_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->get_wptr((rdev), (r))
2837#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r)) 2871#define radeon_ring_set_wptr(rdev, r) (rdev)->asic->ring[(r)->idx]->set_wptr((rdev), (r))
@@ -2940,14 +2974,14 @@ int radeon_vm_manager_init(struct radeon_device *rdev);
2940void radeon_vm_manager_fini(struct radeon_device *rdev); 2974void radeon_vm_manager_fini(struct radeon_device *rdev);
2941int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm); 2975int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
2942void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm); 2976void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
2943struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, 2977struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
2944 struct radeon_vm *vm, 2978 struct radeon_vm *vm,
2945 struct list_head *head); 2979 struct list_head *head);
2946struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 2980struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
2947 struct radeon_vm *vm, int ring); 2981 struct radeon_vm *vm, int ring);
2948void radeon_vm_flush(struct radeon_device *rdev, 2982void radeon_vm_flush(struct radeon_device *rdev,
2949 struct radeon_vm *vm, 2983 struct radeon_vm *vm,
2950 int ring); 2984 int ring, struct radeon_fence *fence);
2951void radeon_vm_fence(struct radeon_device *rdev, 2985void radeon_vm_fence(struct radeon_device *rdev,
2952 struct radeon_vm *vm, 2986 struct radeon_vm *vm,
2953 struct radeon_fence *fence); 2987 struct radeon_fence *fence);
@@ -3054,7 +3088,7 @@ bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p);
3054void radeon_cs_dump_packet(struct radeon_cs_parser *p, 3088void radeon_cs_dump_packet(struct radeon_cs_parser *p,
3055 struct radeon_cs_packet *pkt); 3089 struct radeon_cs_packet *pkt);
3056int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 3090int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
3057 struct radeon_cs_reloc **cs_reloc, 3091 struct radeon_bo_list **cs_reloc,
3058 int nomm); 3092 int nomm);
3059int r600_cs_common_vline_parse(struct radeon_cs_parser *p, 3093int r600_cs_common_vline_parse(struct radeon_cs_parser *p,
3060 uint32_t *vline_start_end, 3094 uint32_t *vline_start_end,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d8ace5b28a5b..2a45d548d5ec 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -599,7 +599,8 @@ int cayman_asic_reset(struct radeon_device *rdev);
599void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); 599void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
600int cayman_vm_init(struct radeon_device *rdev); 600int cayman_vm_init(struct radeon_device *rdev);
601void cayman_vm_fini(struct radeon_device *rdev); 601void cayman_vm_fini(struct radeon_device *rdev);
602void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 602void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
603 unsigned vm_id, uint64_t pd_addr);
603uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags); 604uint32_t cayman_vm_page_flags(struct radeon_device *rdev, uint32_t flags);
604int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 605int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
605int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 606int evergreen_dma_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -624,7 +625,8 @@ void cayman_dma_vm_set_pages(struct radeon_device *rdev,
624 uint32_t incr, uint32_t flags); 625 uint32_t incr, uint32_t flags);
625void cayman_dma_vm_pad_ib(struct radeon_ib *ib); 626void cayman_dma_vm_pad_ib(struct radeon_ib *ib);
626 627
627void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 628void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
629 unsigned vm_id, uint64_t pd_addr);
628 630
629u32 cayman_gfx_get_rptr(struct radeon_device *rdev, 631u32 cayman_gfx_get_rptr(struct radeon_device *rdev,
630 struct radeon_ring *ring); 632 struct radeon_ring *ring);
@@ -699,7 +701,8 @@ int si_irq_set(struct radeon_device *rdev);
699int si_irq_process(struct radeon_device *rdev); 701int si_irq_process(struct radeon_device *rdev);
700int si_vm_init(struct radeon_device *rdev); 702int si_vm_init(struct radeon_device *rdev);
701void si_vm_fini(struct radeon_device *rdev); 703void si_vm_fini(struct radeon_device *rdev);
702void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 704void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
705 unsigned vm_id, uint64_t pd_addr);
703int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 706int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
704struct radeon_fence *si_copy_dma(struct radeon_device *rdev, 707struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
705 uint64_t src_offset, uint64_t dst_offset, 708 uint64_t src_offset, uint64_t dst_offset,
@@ -721,7 +724,8 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
721 uint64_t addr, unsigned count, 724 uint64_t addr, unsigned count,
722 uint32_t incr, uint32_t flags); 725 uint32_t incr, uint32_t flags);
723 726
724void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 727void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
728 unsigned vm_id, uint64_t pd_addr);
725u32 si_get_xclk(struct radeon_device *rdev); 729u32 si_get_xclk(struct radeon_device *rdev);
726uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); 730uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev);
727int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk); 731int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk);
@@ -793,7 +797,8 @@ int cik_irq_set(struct radeon_device *rdev);
793int cik_irq_process(struct radeon_device *rdev); 797int cik_irq_process(struct radeon_device *rdev);
794int cik_vm_init(struct radeon_device *rdev); 798int cik_vm_init(struct radeon_device *rdev);
795void cik_vm_fini(struct radeon_device *rdev); 799void cik_vm_fini(struct radeon_device *rdev);
796void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 800void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
801 unsigned vm_id, uint64_t pd_addr);
797 802
798void cik_sdma_vm_copy_pages(struct radeon_device *rdev, 803void cik_sdma_vm_copy_pages(struct radeon_device *rdev,
799 struct radeon_ib *ib, 804 struct radeon_ib *ib,
@@ -811,7 +816,8 @@ void cik_sdma_vm_set_pages(struct radeon_device *rdev,
811 uint32_t incr, uint32_t flags); 816 uint32_t incr, uint32_t flags);
812void cik_sdma_vm_pad_ib(struct radeon_ib *ib); 817void cik_sdma_vm_pad_ib(struct radeon_ib *ib);
813 818
814void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); 819void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
820 unsigned vm_id, uint64_t pd_addr);
815int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); 821int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
816u32 cik_gfx_get_rptr(struct radeon_device *rdev, 822u32 cik_gfx_get_rptr(struct radeon_device *rdev,
817 struct radeon_ring *ring); 823 struct radeon_ring *ring);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index df69b92ba164..dbc94f300297 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -196,8 +196,8 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
196 } 196 }
197} 197}
198 198
199static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, 199struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
200 u8 id) 200 u8 id)
201{ 201{
202 struct atom_context *ctx = rdev->mode_info.atom_context; 202 struct atom_context *ctx = rdev->mode_info.atom_context;
203 struct radeon_gpio_rec gpio; 203 struct radeon_gpio_rec gpio;
@@ -221,6 +221,7 @@ static struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
221 if (id == pin->ucGPIO_ID) { 221 if (id == pin->ucGPIO_ID) {
222 gpio.id = pin->ucGPIO_ID; 222 gpio.id = pin->ucGPIO_ID;
223 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; 223 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
224 gpio.shift = pin->ucGpioPinBitShift;
224 gpio.mask = (1 << pin->ucGpioPinBitShift); 225 gpio.mask = (1 << pin->ucGpioPinBitShift);
225 gpio.valid = true; 226 gpio.valid = true;
226 break; 227 break;
@@ -801,7 +802,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
801 hpd_record = 802 hpd_record =
802 (ATOM_HPD_INT_RECORD *) 803 (ATOM_HPD_INT_RECORD *)
803 record; 804 record;
804 gpio = radeon_lookup_gpio(rdev, 805 gpio = radeon_atombios_lookup_gpio(rdev,
805 hpd_record->ucHPDIntGPIOID); 806 hpd_record->ucHPDIntGPIOID);
806 hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); 807 hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
807 hpd.plugged_state = hpd_record->ucPlugged_PinState; 808 hpd.plugged_state = hpd_record->ucPlugged_PinState;
@@ -2128,7 +2129,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2128 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 2129 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2129 VOLTAGE_GPIO; 2130 VOLTAGE_GPIO;
2130 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 2131 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
2131 radeon_lookup_gpio(rdev, 2132 radeon_atombios_lookup_gpio(rdev,
2132 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); 2133 power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
2133 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) 2134 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
2134 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = 2135 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2164,7 +2165,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2164 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 2165 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2165 VOLTAGE_GPIO; 2166 VOLTAGE_GPIO;
2166 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 2167 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
2167 radeon_lookup_gpio(rdev, 2168 radeon_atombios_lookup_gpio(rdev,
2168 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); 2169 power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
2169 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) 2170 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
2170 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = 2171 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2200,7 +2201,7 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2200 rdev->pm.power_state[state_index].clock_info[0].voltage.type = 2201 rdev->pm.power_state[state_index].clock_info[0].voltage.type =
2201 VOLTAGE_GPIO; 2202 VOLTAGE_GPIO;
2202 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = 2203 rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
2203 radeon_lookup_gpio(rdev, 2204 radeon_atombios_lookup_gpio(rdev,
2204 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); 2205 power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
2205 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) 2206 if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
2206 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = 2207 rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
@@ -2248,6 +2249,14 @@ static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *r
2248 2249
2249 /* add the i2c bus for thermal/fan chip */ 2250 /* add the i2c bus for thermal/fan chip */
2250 if (controller->ucType > 0) { 2251 if (controller->ucType > 0) {
2252 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
2253 rdev->pm.no_fan = true;
2254 rdev->pm.fan_pulses_per_revolution =
2255 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
2256 if (rdev->pm.fan_pulses_per_revolution) {
2257 rdev->pm.fan_min_rpm = controller->ucFanMinRPM;
2258 rdev->pm.fan_max_rpm = controller->ucFanMaxRPM;
2259 }
2251 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { 2260 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
2252 DRM_INFO("Internal thermal controller %s fan control\n", 2261 DRM_INFO("Internal thermal controller %s fan control\n",
2253 (controller->ucFanParameters & 2262 (controller->ucFanParameters &
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 6f377de099f9..c830863bc98a 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -77,22 +77,18 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev; 77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk; 78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets; 79 struct radeon_cs_buckets buckets;
80 unsigned i, j; 80 unsigned i;
81 bool duplicate, need_mmap_lock = false; 81 bool need_mmap_lock = false;
82 int r; 82 int r;
83 83
84 if (p->chunk_relocs_idx == -1) { 84 if (p->chunk_relocs == NULL) {
85 return 0; 85 return 0;
86 } 86 }
87 chunk = &p->chunks[p->chunk_relocs_idx]; 87 chunk = p->chunk_relocs;
88 p->dma_reloc_idx = 0; 88 p->dma_reloc_idx = 0;
89 /* FIXME: we assume that each relocs use 4 dwords */ 89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4; 90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL); 91 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
93 return -ENOMEM;
94 }
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) { 92 if (p->relocs == NULL) {
97 return -ENOMEM; 93 return -ENOMEM;
98 } 94 }
@@ -101,31 +97,17 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
101 97
102 for (i = 0; i < p->nrelocs; i++) { 98 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r; 99 struct drm_radeon_cs_reloc *r;
100 struct drm_gem_object *gobj;
104 unsigned priority; 101 unsigned priority;
105 102
106 duplicate = false;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4]; 103 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) { 104 gobj = drm_gem_object_lookup(ddev, p->filp, r->handle);
109 if (r->handle == p->relocs[j].handle) { 105 if (gobj == NULL) {
110 p->relocs_ptr[i] = &p->relocs[j];
111 duplicate = true;
112 break;
113 }
114 }
115 if (duplicate) {
116 p->relocs[i].handle = 0;
117 continue;
118 }
119
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
121 r->handle);
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n", 106 DRM_ERROR("gem object lookup failed 0x%x\n",
124 r->handle); 107 r->handle);
125 return -ENOENT; 108 return -ENOENT;
126 } 109 }
127 p->relocs_ptr[i] = &p->relocs[i]; 110 p->relocs[i].robj = gem_to_radeon_bo(gobj);
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
129 111
130 /* The userspace buffer priorities are from 0 to 15. A higher 112 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important. 113 * number means the buffer is more important.
@@ -184,7 +166,6 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
184 166
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo; 167 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = !r->write_domain; 168 p->relocs[i].tv.shared = !r->write_domain;
187 p->relocs[i].handle = r->handle;
188 169
189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head, 170 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
190 priority); 171 priority);
@@ -251,15 +232,15 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 232
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 233static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 234{
254 struct radeon_cs_reloc *reloc; 235 struct radeon_bo_list *reloc;
255 int r; 236 int r;
256 237
257 list_for_each_entry(reloc, &p->validated, tv.head) { 238 list_for_each_entry(reloc, &p->validated, tv.head) {
258 struct reservation_object *resv; 239 struct reservation_object *resv;
259 240
260 resv = reloc->robj->tbo.resv; 241 resv = reloc->robj->tbo.resv;
261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 242 r = radeon_sync_resv(p->rdev, &p->ib.sync, resv,
262 reloc->tv.shared); 243 reloc->tv.shared);
263 if (r) 244 if (r)
264 return r; 245 return r;
265 } 246 }
@@ -282,13 +263,11 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
282 INIT_LIST_HEAD(&p->validated); 263 INIT_LIST_HEAD(&p->validated);
283 p->idx = 0; 264 p->idx = 0;
284 p->ib.sa_bo = NULL; 265 p->ib.sa_bo = NULL;
285 p->ib.semaphore = NULL;
286 p->const_ib.sa_bo = NULL; 266 p->const_ib.sa_bo = NULL;
287 p->const_ib.semaphore = NULL; 267 p->chunk_ib = NULL;
288 p->chunk_ib_idx = -1; 268 p->chunk_relocs = NULL;
289 p->chunk_relocs_idx = -1; 269 p->chunk_flags = NULL;
290 p->chunk_flags_idx = -1; 270 p->chunk_const_ib = NULL;
291 p->chunk_const_ib_idx = -1;
292 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL); 271 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
293 if (p->chunks_array == NULL) { 272 if (p->chunks_array == NULL) {
294 return -ENOMEM; 273 return -ENOMEM;
@@ -315,24 +294,23 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
315 return -EFAULT; 294 return -EFAULT;
316 } 295 }
317 p->chunks[i].length_dw = user_chunk.length_dw; 296 p->chunks[i].length_dw = user_chunk.length_dw;
318 p->chunks[i].chunk_id = user_chunk.chunk_id; 297 if (user_chunk.chunk_id == RADEON_CHUNK_ID_RELOCS) {
319 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) { 298 p->chunk_relocs = &p->chunks[i];
320 p->chunk_relocs_idx = i;
321 } 299 }
322 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 300 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
323 p->chunk_ib_idx = i; 301 p->chunk_ib = &p->chunks[i];
324 /* zero length IB isn't useful */ 302 /* zero length IB isn't useful */
325 if (p->chunks[i].length_dw == 0) 303 if (p->chunks[i].length_dw == 0)
326 return -EINVAL; 304 return -EINVAL;
327 } 305 }
328 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) { 306 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB) {
329 p->chunk_const_ib_idx = i; 307 p->chunk_const_ib = &p->chunks[i];
330 /* zero length CONST IB isn't useful */ 308 /* zero length CONST IB isn't useful */
331 if (p->chunks[i].length_dw == 0) 309 if (p->chunks[i].length_dw == 0)
332 return -EINVAL; 310 return -EINVAL;
333 } 311 }
334 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 312 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
335 p->chunk_flags_idx = i; 313 p->chunk_flags = &p->chunks[i];
336 /* zero length flags aren't useful */ 314 /* zero length flags aren't useful */
337 if (p->chunks[i].length_dw == 0) 315 if (p->chunks[i].length_dw == 0)
338 return -EINVAL; 316 return -EINVAL;
@@ -341,10 +319,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
341 size = p->chunks[i].length_dw; 319 size = p->chunks[i].length_dw;
342 cdata = (void __user *)(unsigned long)user_chunk.chunk_data; 320 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
343 p->chunks[i].user_ptr = cdata; 321 p->chunks[i].user_ptr = cdata;
344 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) 322 if (user_chunk.chunk_id == RADEON_CHUNK_ID_CONST_IB)
345 continue; 323 continue;
346 324
347 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) { 325 if (user_chunk.chunk_id == RADEON_CHUNK_ID_IB) {
348 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP)) 326 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
349 continue; 327 continue;
350 } 328 }
@@ -357,7 +335,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
357 if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 335 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
358 return -EFAULT; 336 return -EFAULT;
359 } 337 }
360 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 338 if (user_chunk.chunk_id == RADEON_CHUNK_ID_FLAGS) {
361 p->cs_flags = p->chunks[i].kdata[0]; 339 p->cs_flags = p->chunks[i].kdata[0];
362 if (p->chunks[i].length_dw > 1) 340 if (p->chunks[i].length_dw > 1)
363 ring = p->chunks[i].kdata[1]; 341 ring = p->chunks[i].kdata[1];
@@ -398,8 +376,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
398static int cmp_size_smaller_first(void *priv, struct list_head *a, 376static int cmp_size_smaller_first(void *priv, struct list_head *a,
399 struct list_head *b) 377 struct list_head *b)
400{ 378{
401 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head); 379 struct radeon_bo_list *la = list_entry(a, struct radeon_bo_list, tv.head);
402 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head); 380 struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head);
403 381
404 /* Sort A before B if A is smaller. */ 382 /* Sort A before B if A is smaller. */
405 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages; 383 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
@@ -440,13 +418,15 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
440 418
441 if (parser->relocs != NULL) { 419 if (parser->relocs != NULL) {
442 for (i = 0; i < parser->nrelocs; i++) { 420 for (i = 0; i < parser->nrelocs; i++) {
443 if (parser->relocs[i].gobj) 421 struct radeon_bo *bo = parser->relocs[i].robj;
444 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj); 422 if (bo == NULL)
423 continue;
424
425 drm_gem_object_unreference_unlocked(&bo->gem_base);
445 } 426 }
446 } 427 }
447 kfree(parser->track); 428 kfree(parser->track);
448 kfree(parser->relocs); 429 kfree(parser->relocs);
449 kfree(parser->relocs_ptr);
450 drm_free_large(parser->vm_bos); 430 drm_free_large(parser->vm_bos);
451 for (i = 0; i < parser->nchunks; i++) 431 for (i = 0; i < parser->nchunks; i++)
452 drm_free_large(parser->chunks[i].kdata); 432 drm_free_large(parser->chunks[i].kdata);
@@ -461,7 +441,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
461{ 441{
462 int r; 442 int r;
463 443
464 if (parser->chunk_ib_idx == -1) 444 if (parser->chunk_ib == NULL)
465 return 0; 445 return 0;
466 446
467 if (parser->cs_flags & RADEON_CS_USE_VM) 447 if (parser->cs_flags & RADEON_CS_USE_VM)
@@ -521,10 +501,6 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
521 for (i = 0; i < p->nrelocs; i++) { 501 for (i = 0; i < p->nrelocs; i++) {
522 struct radeon_bo *bo; 502 struct radeon_bo *bo;
523 503
524 /* ignore duplicates */
525 if (p->relocs_ptr[i] != &p->relocs[i])
526 continue;
527
528 bo = p->relocs[i].robj; 504 bo = p->relocs[i].robj;
529 bo_va = radeon_vm_bo_find(vm, bo); 505 bo_va = radeon_vm_bo_find(vm, bo);
530 if (bo_va == NULL) { 506 if (bo_va == NULL) {
@@ -535,6 +511,8 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
535 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem); 511 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
536 if (r) 512 if (r)
537 return r; 513 return r;
514
515 radeon_sync_fence(&p->ib.sync, bo_va->last_pt_update);
538 } 516 }
539 517
540 return radeon_vm_clear_invalids(rdev, vm); 518 return radeon_vm_clear_invalids(rdev, vm);
@@ -547,7 +525,7 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
547 struct radeon_vm *vm = &fpriv->vm; 525 struct radeon_vm *vm = &fpriv->vm;
548 int r; 526 int r;
549 527
550 if (parser->chunk_ib_idx == -1) 528 if (parser->chunk_ib == NULL)
551 return 0; 529 return 0;
552 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) 530 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
553 return 0; 531 return 0;
@@ -579,10 +557,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
579 DRM_ERROR("Failed to sync rings: %i\n", r); 557 DRM_ERROR("Failed to sync rings: %i\n", r);
580 goto out; 558 goto out;
581 } 559 }
582 radeon_semaphore_sync_fence(parser->ib.semaphore, vm->fence);
583 560
584 if ((rdev->family >= CHIP_TAHITI) && 561 if ((rdev->family >= CHIP_TAHITI) &&
585 (parser->chunk_const_ib_idx != -1)) { 562 (parser->chunk_const_ib != NULL)) {
586 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true); 563 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
587 } else { 564 } else {
588 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true); 565 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
@@ -609,7 +586,7 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
609 struct radeon_vm *vm = NULL; 586 struct radeon_vm *vm = NULL;
610 int r; 587 int r;
611 588
612 if (parser->chunk_ib_idx == -1) 589 if (parser->chunk_ib == NULL)
613 return 0; 590 return 0;
614 591
615 if (parser->cs_flags & RADEON_CS_USE_VM) { 592 if (parser->cs_flags & RADEON_CS_USE_VM) {
@@ -617,8 +594,8 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
617 vm = &fpriv->vm; 594 vm = &fpriv->vm;
618 595
619 if ((rdev->family >= CHIP_TAHITI) && 596 if ((rdev->family >= CHIP_TAHITI) &&
620 (parser->chunk_const_ib_idx != -1)) { 597 (parser->chunk_const_ib != NULL)) {
621 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx]; 598 ib_chunk = parser->chunk_const_ib;
622 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 599 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
623 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw); 600 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
624 return -EINVAL; 601 return -EINVAL;
@@ -637,13 +614,13 @@ static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser
637 return -EFAULT; 614 return -EFAULT;
638 } 615 }
639 616
640 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 617 ib_chunk = parser->chunk_ib;
641 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) { 618 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
642 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw); 619 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
643 return -EINVAL; 620 return -EINVAL;
644 } 621 }
645 } 622 }
646 ib_chunk = &parser->chunks[parser->chunk_ib_idx]; 623 ib_chunk = parser->chunk_ib;
647 624
648 r = radeon_ib_get(rdev, parser->ring, &parser->ib, 625 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
649 vm, ib_chunk->length_dw * 4); 626 vm, ib_chunk->length_dw * 4);
@@ -735,7 +712,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p,
735 struct radeon_cs_packet *pkt, 712 struct radeon_cs_packet *pkt,
736 unsigned idx) 713 unsigned idx)
737{ 714{
738 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx]; 715 struct radeon_cs_chunk *ib_chunk = p->chunk_ib;
739 struct radeon_device *rdev = p->rdev; 716 struct radeon_device *rdev = p->rdev;
740 uint32_t header; 717 uint32_t header;
741 718
@@ -829,7 +806,7 @@ void radeon_cs_dump_packet(struct radeon_cs_parser *p,
829 * GPU offset using the provided start. 806 * GPU offset using the provided start.
830 **/ 807 **/
831int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p, 808int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
832 struct radeon_cs_reloc **cs_reloc, 809 struct radeon_bo_list **cs_reloc,
833 int nomm) 810 int nomm)
834{ 811{
835 struct radeon_cs_chunk *relocs_chunk; 812 struct radeon_cs_chunk *relocs_chunk;
@@ -837,12 +814,12 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
837 unsigned idx; 814 unsigned idx;
838 int r; 815 int r;
839 816
840 if (p->chunk_relocs_idx == -1) { 817 if (p->chunk_relocs == NULL) {
841 DRM_ERROR("No relocation chunk !\n"); 818 DRM_ERROR("No relocation chunk !\n");
842 return -EINVAL; 819 return -EINVAL;
843 } 820 }
844 *cs_reloc = NULL; 821 *cs_reloc = NULL;
845 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 822 relocs_chunk = p->chunk_relocs;
846 r = radeon_cs_packet_parse(p, &p3reloc, p->idx); 823 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
847 if (r) 824 if (r)
848 return r; 825 return r;
@@ -868,6 +845,6 @@ int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
868 (u64)relocs_chunk->kdata[idx + 3] << 32; 845 (u64)relocs_chunk->kdata[idx + 3] << 32;
869 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0]; 846 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
870 } else 847 } else
871 *cs_reloc = p->relocs_ptr[(idx / 4)]; 848 *cs_reloc = &p->relocs[(idx / 4)];
872 return 0; 849 return 0;
873} 850}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 9630e8d95fb4..45e54060ee97 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -117,106 +117,7 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
117 } 117 }
118} 118}
119 119
120static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj, 120static int radeon_cursor_move_locked(struct drm_crtc *crtc, int x, int y)
121 uint64_t gpu_addr)
122{
123 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
124 struct radeon_device *rdev = crtc->dev->dev_private;
125
126 if (ASIC_IS_DCE4(rdev)) {
127 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
128 upper_32_bits(gpu_addr));
129 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
130 gpu_addr & 0xffffffff);
131 } else if (ASIC_IS_AVIVO(rdev)) {
132 if (rdev->family >= CHIP_RV770) {
133 if (radeon_crtc->crtc_id)
134 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
135 else
136 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
137 }
138 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
139 gpu_addr & 0xffffffff);
140 } else {
141 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
142 /* offset is from DISP(2)_BASE_ADDRESS */
143 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
144 }
145}
146
147int radeon_crtc_cursor_set(struct drm_crtc *crtc,
148 struct drm_file *file_priv,
149 uint32_t handle,
150 uint32_t width,
151 uint32_t height)
152{
153 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
154 struct radeon_device *rdev = crtc->dev->dev_private;
155 struct drm_gem_object *obj;
156 struct radeon_bo *robj;
157 uint64_t gpu_addr;
158 int ret;
159
160 if (!handle) {
161 /* turn off cursor */
162 radeon_hide_cursor(crtc);
163 obj = NULL;
164 goto unpin;
165 }
166
167 if ((width > radeon_crtc->max_cursor_width) ||
168 (height > radeon_crtc->max_cursor_height)) {
169 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
170 return -EINVAL;
171 }
172
173 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
174 if (!obj) {
175 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
176 return -ENOENT;
177 }
178
179 robj = gem_to_radeon_bo(obj);
180 ret = radeon_bo_reserve(robj, false);
181 if (unlikely(ret != 0))
182 goto fail;
183 /* Only 27 bit offset for legacy cursor */
184 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
185 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
186 &gpu_addr);
187 radeon_bo_unreserve(robj);
188 if (ret)
189 goto fail;
190
191 radeon_crtc->cursor_width = width;
192 radeon_crtc->cursor_height = height;
193
194 radeon_lock_cursor(crtc, true);
195 radeon_set_cursor(crtc, obj, gpu_addr);
196 radeon_show_cursor(crtc);
197 radeon_lock_cursor(crtc, false);
198
199unpin:
200 if (radeon_crtc->cursor_bo) {
201 robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
202 ret = radeon_bo_reserve(robj, false);
203 if (likely(ret == 0)) {
204 radeon_bo_unpin(robj);
205 radeon_bo_unreserve(robj);
206 }
207 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
208 }
209
210 radeon_crtc->cursor_bo = obj;
211 return 0;
212fail:
213 drm_gem_object_unreference_unlocked(obj);
214
215 return ret;
216}
217
218int radeon_crtc_cursor_move(struct drm_crtc *crtc,
219 int x, int y)
220{ 121{
221 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 122 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
222 struct radeon_device *rdev = crtc->dev->dev_private; 123 struct radeon_device *rdev = crtc->dev->dev_private;
@@ -281,7 +182,6 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
281 } 182 }
282 } 183 }
283 184
284 radeon_lock_cursor(crtc, true);
285 if (ASIC_IS_DCE4(rdev)) { 185 if (ASIC_IS_DCE4(rdev)) {
286 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); 186 WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y);
287 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); 187 WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
@@ -308,7 +208,173 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
308 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + 208 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset +
309 (yorigin * 256))); 209 (yorigin * 256)));
310 } 210 }
211
212 radeon_crtc->cursor_x = x;
213 radeon_crtc->cursor_y = y;
214
215 return 0;
216}
217
218int radeon_crtc_cursor_move(struct drm_crtc *crtc,
219 int x, int y)
220{
221 int ret;
222
223 radeon_lock_cursor(crtc, true);
224 ret = radeon_cursor_move_locked(crtc, x, y);
311 radeon_lock_cursor(crtc, false); 225 radeon_lock_cursor(crtc, false);
312 226
227 return ret;
228}
229
230static int radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj)
231{
232 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
233 struct radeon_device *rdev = crtc->dev->dev_private;
234 struct radeon_bo *robj = gem_to_radeon_bo(obj);
235 uint64_t gpu_addr;
236 int ret;
237
238 ret = radeon_bo_reserve(robj, false);
239 if (unlikely(ret != 0))
240 goto fail;
241 /* Only 27 bit offset for legacy cursor */
242 ret = radeon_bo_pin_restricted(robj, RADEON_GEM_DOMAIN_VRAM,
243 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27,
244 &gpu_addr);
245 radeon_bo_unreserve(robj);
246 if (ret)
247 goto fail;
248
249 if (ASIC_IS_DCE4(rdev)) {
250 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
251 upper_32_bits(gpu_addr));
252 WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
253 gpu_addr & 0xffffffff);
254 } else if (ASIC_IS_AVIVO(rdev)) {
255 if (rdev->family >= CHIP_RV770) {
256 if (radeon_crtc->crtc_id)
257 WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
258 else
259 WREG32(R700_D1CUR_SURFACE_ADDRESS_HIGH, upper_32_bits(gpu_addr));
260 }
261 WREG32(AVIVO_D1CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
262 gpu_addr & 0xffffffff);
263 } else {
264 radeon_crtc->legacy_cursor_offset = gpu_addr - radeon_crtc->legacy_display_base_addr;
265 /* offset is from DISP(2)_BASE_ADDRESS */
266 WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, radeon_crtc->legacy_cursor_offset);
267 }
268
313 return 0; 269 return 0;
270
271fail:
272 drm_gem_object_unreference_unlocked(obj);
273
274 return ret;
275}
276
277int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
278 struct drm_file *file_priv,
279 uint32_t handle,
280 uint32_t width,
281 uint32_t height,
282 int32_t hot_x,
283 int32_t hot_y)
284{
285 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
286 struct drm_gem_object *obj;
287 int ret;
288
289 if (!handle) {
290 /* turn off cursor */
291 radeon_hide_cursor(crtc);
292 obj = NULL;
293 goto unpin;
294 }
295
296 if ((width > radeon_crtc->max_cursor_width) ||
297 (height > radeon_crtc->max_cursor_height)) {
298 DRM_ERROR("bad cursor width or height %d x %d\n", width, height);
299 return -EINVAL;
300 }
301
302 obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
303 if (!obj) {
304 DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
305 return -ENOENT;
306 }
307
308 radeon_crtc->cursor_width = width;
309 radeon_crtc->cursor_height = height;
310
311 radeon_lock_cursor(crtc, true);
312
313 if (hot_x != radeon_crtc->cursor_hot_x ||
314 hot_y != radeon_crtc->cursor_hot_y) {
315 int x, y;
316
317 x = radeon_crtc->cursor_x + radeon_crtc->cursor_hot_x - hot_x;
318 y = radeon_crtc->cursor_y + radeon_crtc->cursor_hot_y - hot_y;
319
320 radeon_cursor_move_locked(crtc, x, y);
321
322 radeon_crtc->cursor_hot_x = hot_x;
323 radeon_crtc->cursor_hot_y = hot_y;
324 }
325
326 ret = radeon_set_cursor(crtc, obj);
327
328 if (ret)
329 DRM_ERROR("radeon_set_cursor returned %d, not changing cursor\n",
330 ret);
331 else
332 radeon_show_cursor(crtc);
333
334 radeon_lock_cursor(crtc, false);
335
336unpin:
337 if (radeon_crtc->cursor_bo) {
338 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
339 ret = radeon_bo_reserve(robj, false);
340 if (likely(ret == 0)) {
341 radeon_bo_unpin(robj);
342 radeon_bo_unreserve(robj);
343 }
344 if (radeon_crtc->cursor_bo != obj)
345 drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
346 }
347
348 radeon_crtc->cursor_bo = obj;
349 return 0;
350}
351
352/**
353 * radeon_cursor_reset - Re-set the current cursor, if any.
354 *
355 * @crtc: drm crtc
356 *
357 * If the CRTC passed in currently has a cursor assigned, this function
358 * makes sure it's visible.
359 */
360void radeon_cursor_reset(struct drm_crtc *crtc)
361{
362 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
363 int ret;
364
365 if (radeon_crtc->cursor_bo) {
366 radeon_lock_cursor(crtc, true);
367
368 radeon_cursor_move_locked(crtc, radeon_crtc->cursor_x,
369 radeon_crtc->cursor_y);
370
371 ret = radeon_set_cursor(crtc, radeon_crtc->cursor_bo);
372 if (ret)
373 DRM_ERROR("radeon_set_cursor returned %d, not showing "
374 "cursor\n", ret);
375 else
376 radeon_show_cursor(crtc);
377
378 radeon_lock_cursor(crtc, false);
379 }
314} 380}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 995a8b1770dd..0ec65168f331 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -377,6 +377,37 @@ void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
377 __clear_bit(doorbell, rdev->doorbell.used); 377 __clear_bit(doorbell, rdev->doorbell.used);
378} 378}
379 379
380/**
381 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
382 * setup KFD
383 *
384 * @rdev: radeon_device pointer
385 * @aperture_base: output returning doorbell aperture base physical address
386 * @aperture_size: output returning doorbell aperture size in bytes
387 * @start_offset: output returning # of doorbell bytes reserved for radeon.
388 *
389 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
390 * takes doorbells required for its own rings and reports the setup to KFD.
391 * Radeon reserved doorbells are at the start of the doorbell aperture.
392 */
393void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
394 phys_addr_t *aperture_base,
395 size_t *aperture_size,
396 size_t *start_offset)
397{
398 /* The first num_doorbells are used by radeon.
399 * KFD takes whatever's left in the aperture. */
400 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
401 *aperture_base = rdev->doorbell.base;
402 *aperture_size = rdev->doorbell.size;
403 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
404 } else {
405 *aperture_base = 0;
406 *aperture_size = 0;
407 *start_offset = 0;
408 }
409}
410
380/* 411/*
381 * radeon_wb_*() 412 * radeon_wb_*()
382 * Writeback is the the method by which the the GPU updates special pages 413 * Writeback is the the method by which the the GPU updates special pages
@@ -1273,6 +1304,7 @@ int radeon_device_init(struct radeon_device *rdev,
1273 mutex_init(&rdev->pm.mutex); 1304 mutex_init(&rdev->pm.mutex);
1274 mutex_init(&rdev->gpu_clock_mutex); 1305 mutex_init(&rdev->gpu_clock_mutex);
1275 mutex_init(&rdev->srbm_mutex); 1306 mutex_init(&rdev->srbm_mutex);
1307 mutex_init(&rdev->grbm_idx_mutex);
1276 init_rwsem(&rdev->pm.mclk_lock); 1308 init_rwsem(&rdev->pm.mclk_lock);
1277 init_rwsem(&rdev->exclusive_lock); 1309 init_rwsem(&rdev->exclusive_lock);
1278 init_waitqueue_head(&rdev->irq.vblank_queue); 1310 init_waitqueue_head(&rdev->irq.vblank_queue);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 00ead8c2758a..102116902a07 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -32,6 +32,7 @@
32 32
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <drm/drm_crtc_helper.h> 34#include <drm/drm_crtc_helper.h>
35#include <drm/drm_plane_helper.h>
35#include <drm/drm_edid.h> 36#include <drm/drm_edid.h>
36 37
37#include <linux/gcd.h> 38#include <linux/gcd.h>
@@ -634,7 +635,7 @@ radeon_crtc_set_config(struct drm_mode_set *set)
634 return ret; 635 return ret;
635} 636}
636static const struct drm_crtc_funcs radeon_crtc_funcs = { 637static const struct drm_crtc_funcs radeon_crtc_funcs = {
637 .cursor_set = radeon_crtc_cursor_set, 638 .cursor_set2 = radeon_crtc_cursor_set2,
638 .cursor_move = radeon_crtc_cursor_move, 639 .cursor_move = radeon_crtc_cursor_move,
639 .gamma_set = radeon_crtc_gamma_set, 640 .gamma_set = radeon_crtc_gamma_set,
640 .set_config = radeon_crtc_set_config, 641 .set_config = radeon_crtc_set_config,
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index dcffa30ee2db..4f50fb0e3d93 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -41,6 +41,8 @@
41#include <drm/drm_gem.h> 41#include <drm/drm_gem.h>
42 42
43#include "drm_crtc_helper.h" 43#include "drm_crtc_helper.h"
44#include "radeon_kfd.h"
45
44/* 46/*
45 * KMS wrapper. 47 * KMS wrapper.
46 * - 2.0.0 - initial interface 48 * - 2.0.0 - initial interface
@@ -654,12 +656,15 @@ static int __init radeon_init(void)
654#endif 656#endif
655 } 657 }
656 658
659 radeon_kfd_init();
660
657 /* let modprobe override vga console setting */ 661 /* let modprobe override vga console setting */
658 return drm_pci_init(driver, pdriver); 662 return drm_pci_init(driver, pdriver);
659} 663}
660 664
661static void __exit radeon_exit(void) 665static void __exit radeon_exit(void)
662{ 666{
667 radeon_kfd_fini();
663 drm_pci_exit(driver, pdriver); 668 drm_pci_exit(driver, pdriver);
664 radeon_unregister_atpx_handler(); 669 radeon_unregister_atpx_handler();
665} 670}
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0ea1db83d573..29b9220ec399 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -48,10 +48,40 @@ struct radeon_fbdev {
48 struct radeon_device *rdev; 48 struct radeon_device *rdev;
49}; 49};
50 50
51/**
52 * radeon_fb_helper_set_par - Hide cursor on CRTCs used by fbdev.
53 *
54 * @info: fbdev info
55 *
56 * This function hides the cursor on all CRTCs used by fbdev.
57 */
58static int radeon_fb_helper_set_par(struct fb_info *info)
59{
60 int ret;
61
62 ret = drm_fb_helper_set_par(info);
63
64 /* XXX: with universal plane support fbdev will automatically disable
65 * all non-primary planes (including the cursor)
66 */
67 if (ret == 0) {
68 struct drm_fb_helper *fb_helper = info->par;
69 int i;
70
71 for (i = 0; i < fb_helper->crtc_count; i++) {
72 struct drm_crtc *crtc = fb_helper->crtc_info[i].mode_set.crtc;
73
74 radeon_crtc_cursor_set2(crtc, NULL, 0, 0, 0, 0, 0);
75 }
76 }
77
78 return ret;
79}
80
51static struct fb_ops radeonfb_ops = { 81static struct fb_ops radeonfb_ops = {
52 .owner = THIS_MODULE, 82 .owner = THIS_MODULE,
53 .fb_check_var = drm_fb_helper_check_var, 83 .fb_check_var = drm_fb_helper_check_var,
54 .fb_set_par = drm_fb_helper_set_par, 84 .fb_set_par = radeon_fb_helper_set_par,
55 .fb_fillrect = cfb_fillrect, 85 .fb_fillrect = cfb_fillrect,
56 .fb_copyarea = cfb_copyarea, 86 .fb_copyarea = cfb_copyarea,
57 .fb_imageblit = cfb_imageblit, 87 .fb_imageblit = cfb_imageblit,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 995167025282..d13d1b5a859f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -140,6 +140,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
140 (*fence)->rdev = rdev; 140 (*fence)->rdev = rdev;
141 (*fence)->seq = seq; 141 (*fence)->seq = seq;
142 (*fence)->ring = ring; 142 (*fence)->ring = ring;
143 (*fence)->is_vm_update = false;
143 fence_init(&(*fence)->base, &radeon_fence_ops, 144 fence_init(&(*fence)->base, &radeon_fence_ops,
144 &rdev->fence_queue.lock, rdev->fence_context + ring, seq); 145 &rdev->fence_queue.lock, rdev->fence_context + ring, seq);
145 radeon_fence_ring_emit(rdev, ring, *fence); 146 radeon_fence_ring_emit(rdev, ring, *fence);
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index c194497aa586..fe48f229043e 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,9 +394,10 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
394 return r; 394 return r;
395} 395}
396 396
397int radeon_mode_dumb_mmap(struct drm_file *filp, 397static int radeon_mode_mmap(struct drm_file *filp,
398 struct drm_device *dev, 398 struct drm_device *dev,
399 uint32_t handle, uint64_t *offset_p) 399 uint32_t handle, bool dumb,
400 uint64_t *offset_p)
400{ 401{
401 struct drm_gem_object *gobj; 402 struct drm_gem_object *gobj;
402 struct radeon_bo *robj; 403 struct radeon_bo *robj;
@@ -405,6 +406,14 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
405 if (gobj == NULL) { 406 if (gobj == NULL) {
406 return -ENOENT; 407 return -ENOENT;
407 } 408 }
409
410 /*
411 * We don't allow dumb mmaps on objects created using another
412 * interface.
413 */
414 WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
415 "Illegal dumb map of GPU buffer.\n");
416
408 robj = gem_to_radeon_bo(gobj); 417 robj = gem_to_radeon_bo(gobj);
409 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 418 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
410 drm_gem_object_unreference_unlocked(gobj); 419 drm_gem_object_unreference_unlocked(gobj);
@@ -415,12 +424,20 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
415 return 0; 424 return 0;
416} 425}
417 426
427int radeon_mode_dumb_mmap(struct drm_file *filp,
428 struct drm_device *dev,
429 uint32_t handle, uint64_t *offset_p)
430{
431 return radeon_mode_mmap(filp, dev, handle, true, offset_p);
432}
433
418int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 434int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
419 struct drm_file *filp) 435 struct drm_file *filp)
420{ 436{
421 struct drm_radeon_gem_mmap *args = data; 437 struct drm_radeon_gem_mmap *args = data;
422 438
423 return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 439 return radeon_mode_mmap(filp, dev, args->handle, false,
440 &args->addr_ptr);
424} 441}
425 442
426int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, 443int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -518,6 +535,68 @@ out:
518 return r; 535 return r;
519} 536}
520 537
538/**
539 * radeon_gem_va_update_vm -update the bo_va in its VM
540 *
541 * @rdev: radeon_device pointer
542 * @bo_va: bo_va to update
543 *
544 * Update the bo_va directly after setting it's address. Errors are not
545 * vital here, so they are not reported back to userspace.
546 */
547static void radeon_gem_va_update_vm(struct radeon_device *rdev,
548 struct radeon_bo_va *bo_va)
549{
550 struct ttm_validate_buffer tv, *entry;
551 struct radeon_bo_list *vm_bos;
552 struct ww_acquire_ctx ticket;
553 struct list_head list;
554 unsigned domain;
555 int r;
556
557 INIT_LIST_HEAD(&list);
558
559 tv.bo = &bo_va->bo->tbo;
560 tv.shared = true;
561 list_add(&tv.head, &list);
562
563 vm_bos = radeon_vm_get_bos(rdev, bo_va->vm, &list);
564 if (!vm_bos)
565 return;
566
567 r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
568 if (r)
569 goto error_free;
570
571 list_for_each_entry(entry, &list, head) {
572 domain = radeon_mem_type_to_domain(entry->bo->mem.mem_type);
573 /* if anything is swapped out don't swap it in here,
574 just abort and wait for the next CS */
575 if (domain == RADEON_GEM_DOMAIN_CPU)
576 goto error_unreserve;
577 }
578
579 mutex_lock(&bo_va->vm->mutex);
580 r = radeon_vm_clear_freed(rdev, bo_va->vm);
581 if (r)
582 goto error_unlock;
583
584 if (bo_va->it.start)
585 r = radeon_vm_bo_update(rdev, bo_va, &bo_va->bo->tbo.mem);
586
587error_unlock:
588 mutex_unlock(&bo_va->vm->mutex);
589
590error_unreserve:
591 ttm_eu_backoff_reservation(&ticket, &list);
592
593error_free:
594 drm_free_large(vm_bos);
595
596 if (r)
597 DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
598}
599
521int radeon_gem_va_ioctl(struct drm_device *dev, void *data, 600int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
522 struct drm_file *filp) 601 struct drm_file *filp)
523{ 602{
@@ -601,6 +680,7 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
601 if (bo_va->it.start) { 680 if (bo_va->it.start) {
602 args->operation = RADEON_VA_RESULT_VA_EXIST; 681 args->operation = RADEON_VA_RESULT_VA_EXIST;
603 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE; 682 args->offset = bo_va->it.start * RADEON_GPU_PAGE_SIZE;
683 radeon_bo_unreserve(rbo);
604 goto out; 684 goto out;
605 } 685 }
606 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags); 686 r = radeon_vm_bo_set_addr(rdev, bo_va, args->offset, args->flags);
@@ -611,12 +691,13 @@ int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
611 default: 691 default:
612 break; 692 break;
613 } 693 }
694 if (!r)
695 radeon_gem_va_update_vm(rdev, bo_va);
614 args->operation = RADEON_VA_RESULT_OK; 696 args->operation = RADEON_VA_RESULT_OK;
615 if (r) { 697 if (r) {
616 args->operation = RADEON_VA_RESULT_ERROR; 698 args->operation = RADEON_VA_RESULT_ERROR;
617 } 699 }
618out: 700out:
619 radeon_bo_unreserve(rbo);
620 drm_gem_object_unreference_unlocked(gobj); 701 drm_gem_object_unreference_unlocked(gobj);
621 return r; 702 return r;
622} 703}
@@ -682,6 +763,7 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
682 return -ENOMEM; 763 return -ENOMEM;
683 764
684 r = drm_gem_handle_create(file_priv, gobj, &handle); 765 r = drm_gem_handle_create(file_priv, gobj, &handle);
766 gobj->dumb = true;
685 /* drop reference from allocate - handle holds it now */ 767 /* drop reference from allocate - handle holds it now */
686 drm_gem_object_unreference_unlocked(gobj); 768 drm_gem_object_unreference_unlocked(gobj);
687 if (r) { 769 if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_ib.c b/drivers/gpu/drm/radeon/radeon_ib.c
index 3f39fcca4d07..c39ce1f05703 100644
--- a/drivers/gpu/drm/radeon/radeon_ib.c
+++ b/drivers/gpu/drm/radeon/radeon_ib.c
@@ -64,10 +64,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
64 return r; 64 return r;
65 } 65 }
66 66
67 r = radeon_semaphore_create(rdev, &ib->semaphore); 67 radeon_sync_create(&ib->sync);
68 if (r) {
69 return r;
70 }
71 68
72 ib->ring = ring; 69 ib->ring = ring;
73 ib->fence = NULL; 70 ib->fence = NULL;
@@ -96,7 +93,7 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
96 */ 93 */
97void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib) 94void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib *ib)
98{ 95{
99 radeon_semaphore_free(rdev, &ib->semaphore, ib->fence); 96 radeon_sync_free(rdev, &ib->sync, ib->fence);
100 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence); 97 radeon_sa_bo_free(rdev, &ib->sa_bo, ib->fence);
101 radeon_fence_unref(&ib->fence); 98 radeon_fence_unref(&ib->fence);
102} 99}
@@ -145,11 +142,11 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
145 if (ib->vm) { 142 if (ib->vm) {
146 struct radeon_fence *vm_id_fence; 143 struct radeon_fence *vm_id_fence;
147 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring); 144 vm_id_fence = radeon_vm_grab_id(rdev, ib->vm, ib->ring);
148 radeon_semaphore_sync_fence(ib->semaphore, vm_id_fence); 145 radeon_sync_fence(&ib->sync, vm_id_fence);
149 } 146 }
150 147
151 /* sync with other rings */ 148 /* sync with other rings */
152 r = radeon_semaphore_sync_rings(rdev, ib->semaphore, ib->ring); 149 r = radeon_sync_rings(rdev, &ib->sync, ib->ring);
153 if (r) { 150 if (r) {
154 dev_err(rdev->dev, "failed to sync rings (%d)\n", r); 151 dev_err(rdev->dev, "failed to sync rings (%d)\n", r);
155 radeon_ring_unlock_undo(rdev, ring); 152 radeon_ring_unlock_undo(rdev, ring);
@@ -157,11 +154,12 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
157 } 154 }
158 155
159 if (ib->vm) 156 if (ib->vm)
160 radeon_vm_flush(rdev, ib->vm, ib->ring); 157 radeon_vm_flush(rdev, ib->vm, ib->ring,
158 ib->sync.last_vm_update);
161 159
162 if (const_ib) { 160 if (const_ib) {
163 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib); 161 radeon_ring_ib_execute(rdev, const_ib->ring, const_ib);
164 radeon_semaphore_free(rdev, &const_ib->semaphore, NULL); 162 radeon_sync_free(rdev, &const_ib->sync, NULL);
165 } 163 }
166 radeon_ring_ib_execute(rdev, ib->ring, ib); 164 radeon_ring_ib_execute(rdev, ib->ring, ib);
167 r = radeon_fence_emit(rdev, &ib->fence, ib->ring); 165 r = radeon_fence_emit(rdev, &ib->fence, ib->ring);
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
new file mode 100644
index 000000000000..065d02068ec3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -0,0 +1,563 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/module.h>
24#include <linux/fdtable.h>
25#include <linux/uaccess.h>
26#include <drm/drmP.h>
27#include "radeon.h"
28#include "cikd.h"
29#include "cik_reg.h"
30#include "radeon_kfd.h"
31
32#define CIK_PIPE_PER_MEC (4)
33
34struct kgd_mem {
35 struct radeon_sa_bo *sa_bo;
36 uint64_t gpu_addr;
37 void *ptr;
38};
39
40static int init_sa_manager(struct kgd_dev *kgd, unsigned int size);
41static void fini_sa_manager(struct kgd_dev *kgd);
42
43static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
44 enum kgd_memory_pool pool, struct kgd_mem **mem);
45
46static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem);
47
48static uint64_t get_vmem_size(struct kgd_dev *kgd);
49static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
50
51static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
52
53/*
54 * Register access functions
55 */
56
57static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
58 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
59 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
60
61static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
62 unsigned int vmid);
63
64static int kgd_init_memory(struct kgd_dev *kgd);
65
66static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
67 uint32_t hpd_size, uint64_t hpd_gpu_addr);
68
69static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
70 uint32_t queue_id, uint32_t __user *wptr);
71
72static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
73 uint32_t pipe_id, uint32_t queue_id);
74
75static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
76 unsigned int timeout, uint32_t pipe_id,
77 uint32_t queue_id);
78
79static const struct kfd2kgd_calls kfd2kgd = {
80 .init_sa_manager = init_sa_manager,
81 .fini_sa_manager = fini_sa_manager,
82 .allocate_mem = allocate_mem,
83 .free_mem = free_mem,
84 .get_vmem_size = get_vmem_size,
85 .get_gpu_clock_counter = get_gpu_clock_counter,
86 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
87 .program_sh_mem_settings = kgd_program_sh_mem_settings,
88 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
89 .init_memory = kgd_init_memory,
90 .init_pipeline = kgd_init_pipeline,
91 .hqd_load = kgd_hqd_load,
92 .hqd_is_occupies = kgd_hqd_is_occupies,
93 .hqd_destroy = kgd_hqd_destroy,
94};
95
96static const struct kgd2kfd_calls *kgd2kfd;
97
98bool radeon_kfd_init(void)
99{
100 bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
101 const struct kgd2kfd_calls**);
102
103 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
104
105 if (kgd2kfd_init_p == NULL)
106 return false;
107
108 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
109 symbol_put(kgd2kfd_init);
110 kgd2kfd = NULL;
111
112 return false;
113 }
114
115 return true;
116}
117
118void radeon_kfd_fini(void)
119{
120 if (kgd2kfd) {
121 kgd2kfd->exit();
122 symbol_put(kgd2kfd_init);
123 }
124}
125
126void radeon_kfd_device_probe(struct radeon_device *rdev)
127{
128 if (kgd2kfd)
129 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev, rdev->pdev);
130}
131
132void radeon_kfd_device_init(struct radeon_device *rdev)
133{
134 if (rdev->kfd) {
135 struct kgd2kfd_shared_resources gpu_resources = {
136 .compute_vmid_bitmap = 0xFF00,
137
138 .first_compute_pipe = 1,
139 .compute_pipe_count = 8 - 1,
140 };
141
142 radeon_doorbell_get_kfd_info(rdev,
143 &gpu_resources.doorbell_physical_address,
144 &gpu_resources.doorbell_aperture_size,
145 &gpu_resources.doorbell_start_offset);
146
147 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
148 }
149}
150
151void radeon_kfd_device_fini(struct radeon_device *rdev)
152{
153 if (rdev->kfd) {
154 kgd2kfd->device_exit(rdev->kfd);
155 rdev->kfd = NULL;
156 }
157}
158
159void radeon_kfd_interrupt(struct radeon_device *rdev, const void *ih_ring_entry)
160{
161 if (rdev->kfd)
162 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
163}
164
165void radeon_kfd_suspend(struct radeon_device *rdev)
166{
167 if (rdev->kfd)
168 kgd2kfd->suspend(rdev->kfd);
169}
170
171int radeon_kfd_resume(struct radeon_device *rdev)
172{
173 int r = 0;
174
175 if (rdev->kfd)
176 r = kgd2kfd->resume(rdev->kfd);
177
178 return r;
179}
180
181static u32 pool_to_domain(enum kgd_memory_pool p)
182{
183 switch (p) {
184 case KGD_POOL_FRAMEBUFFER: return RADEON_GEM_DOMAIN_VRAM;
185 default: return RADEON_GEM_DOMAIN_GTT;
186 }
187}
188
189static int init_sa_manager(struct kgd_dev *kgd, unsigned int size)
190{
191 struct radeon_device *rdev = (struct radeon_device *)kgd;
192 int r;
193
194 BUG_ON(kgd == NULL);
195
196 r = radeon_sa_bo_manager_init(rdev, &rdev->kfd_bo,
197 size,
198 RADEON_GPU_PAGE_SIZE,
199 RADEON_GEM_DOMAIN_GTT,
200 RADEON_GEM_GTT_WC);
201
202 if (r)
203 return r;
204
205 r = radeon_sa_bo_manager_start(rdev, &rdev->kfd_bo);
206 if (r)
207 radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
208
209 return r;
210}
211
212static void fini_sa_manager(struct kgd_dev *kgd)
213{
214 struct radeon_device *rdev = (struct radeon_device *)kgd;
215
216 BUG_ON(kgd == NULL);
217
218 radeon_sa_bo_manager_suspend(rdev, &rdev->kfd_bo);
219 radeon_sa_bo_manager_fini(rdev, &rdev->kfd_bo);
220}
221
222static int allocate_mem(struct kgd_dev *kgd, size_t size, size_t alignment,
223 enum kgd_memory_pool pool, struct kgd_mem **mem)
224{
225 struct radeon_device *rdev = (struct radeon_device *)kgd;
226 u32 domain;
227 int r;
228
229 BUG_ON(kgd == NULL);
230
231 domain = pool_to_domain(pool);
232 if (domain != RADEON_GEM_DOMAIN_GTT) {
233 dev_err(rdev->dev,
234 "Only allowed to allocate gart memory for kfd\n");
235 return -EINVAL;
236 }
237
238 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
239 if ((*mem) == NULL)
240 return -ENOMEM;
241
242 r = radeon_sa_bo_new(rdev, &rdev->kfd_bo, &(*mem)->sa_bo, size,
243 alignment);
244 if (r) {
245 dev_err(rdev->dev, "failed to get memory for kfd (%d)\n", r);
246 return r;
247 }
248
249 (*mem)->ptr = radeon_sa_bo_cpu_addr((*mem)->sa_bo);
250 (*mem)->gpu_addr = radeon_sa_bo_gpu_addr((*mem)->sa_bo);
251
252 return 0;
253}
254
255static void free_mem(struct kgd_dev *kgd, struct kgd_mem *mem)
256{
257 struct radeon_device *rdev = (struct radeon_device *)kgd;
258
259 BUG_ON(kgd == NULL);
260
261 radeon_sa_bo_free(rdev, &mem->sa_bo, NULL);
262 kfree(mem);
263}
264
265static uint64_t get_vmem_size(struct kgd_dev *kgd)
266{
267 struct radeon_device *rdev = (struct radeon_device *)kgd;
268
269 BUG_ON(kgd == NULL);
270
271 return rdev->mc.real_vram_size;
272}
273
274static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
275{
276 struct radeon_device *rdev = (struct radeon_device *)kgd;
277
278 return rdev->asic->get_gpu_clock_counter(rdev);
279}
280
281static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
282{
283 struct radeon_device *rdev = (struct radeon_device *)kgd;
284
285 /* The sclk is in quantas of 10kHz */
286 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
287}
288
289static inline struct radeon_device *get_radeon_device(struct kgd_dev *kgd)
290{
291 return (struct radeon_device *)kgd;
292}
293
294static void write_register(struct kgd_dev *kgd, uint32_t offset, uint32_t value)
295{
296 struct radeon_device *rdev = get_radeon_device(kgd);
297
298 writel(value, (void __iomem *)(rdev->rmmio + offset));
299}
300
301static uint32_t read_register(struct kgd_dev *kgd, uint32_t offset)
302{
303 struct radeon_device *rdev = get_radeon_device(kgd);
304
305 return readl((void __iomem *)(rdev->rmmio + offset));
306}
307
308static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
309 uint32_t queue, uint32_t vmid)
310{
311 struct radeon_device *rdev = get_radeon_device(kgd);
312 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
313
314 mutex_lock(&rdev->srbm_mutex);
315 write_register(kgd, SRBM_GFX_CNTL, value);
316}
317
318static void unlock_srbm(struct kgd_dev *kgd)
319{
320 struct radeon_device *rdev = get_radeon_device(kgd);
321
322 write_register(kgd, SRBM_GFX_CNTL, 0);
323 mutex_unlock(&rdev->srbm_mutex);
324}
325
326static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
327 uint32_t queue_id)
328{
329 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
330 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
331
332 lock_srbm(kgd, mec, pipe, queue_id, 0);
333}
334
335static void release_queue(struct kgd_dev *kgd)
336{
337 unlock_srbm(kgd);
338}
339
340static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
341 uint32_t sh_mem_config,
342 uint32_t sh_mem_ape1_base,
343 uint32_t sh_mem_ape1_limit,
344 uint32_t sh_mem_bases)
345{
346 lock_srbm(kgd, 0, 0, 0, vmid);
347
348 write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
349 write_register(kgd, SH_MEM_APE1_BASE, sh_mem_ape1_base);
350 write_register(kgd, SH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
351 write_register(kgd, SH_MEM_BASES, sh_mem_bases);
352
353 unlock_srbm(kgd);
354}
355
356static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
357 unsigned int vmid)
358{
359 /*
360 * We have to assume that there is no outstanding mapping.
361 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0
362 * because a mapping is in progress or because a mapping finished and
363 * the SW cleared it.
364 * So the protocol is to always wait & clear.
365 */
366 uint32_t pasid_mapping = (pasid == 0) ? 0 :
367 (uint32_t)pasid | ATC_VMID_PASID_MAPPING_VALID;
368
369 write_register(kgd, ATC_VMID0_PASID_MAPPING + vmid*sizeof(uint32_t),
370 pasid_mapping);
371
372 while (!(read_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS) &
373 (1U << vmid)))
374 cpu_relax();
375 write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
376
377 return 0;
378}
379
380static int kgd_init_memory(struct kgd_dev *kgd)
381{
382 /*
383 * Configure apertures:
384 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
385 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
386 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
387 */
388 int i;
389 uint32_t sh_mem_bases = PRIVATE_BASE(0x6000) | SHARED_BASE(0x6000);
390
391 for (i = 8; i < 16; i++) {
392 uint32_t sh_mem_config;
393
394 lock_srbm(kgd, 0, 0, 0, i);
395
396 sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
397 sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
398
399 write_register(kgd, SH_MEM_CONFIG, sh_mem_config);
400
401 write_register(kgd, SH_MEM_BASES, sh_mem_bases);
402
403 /* Scratch aperture is not supported for now. */
404 write_register(kgd, SH_STATIC_MEM_CONFIG, 0);
405
406 /* APE1 disabled for now. */
407 write_register(kgd, SH_MEM_APE1_BASE, 1);
408 write_register(kgd, SH_MEM_APE1_LIMIT, 0);
409
410 unlock_srbm(kgd);
411 }
412
413 return 0;
414}
415
416static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
417 uint32_t hpd_size, uint64_t hpd_gpu_addr)
418{
419 uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
420 uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
421
422 lock_srbm(kgd, mec, pipe, 0, 0);
423 write_register(kgd, CP_HPD_EOP_BASE_ADDR,
424 lower_32_bits(hpd_gpu_addr >> 8));
425 write_register(kgd, CP_HPD_EOP_BASE_ADDR_HI,
426 upper_32_bits(hpd_gpu_addr >> 8));
427 write_register(kgd, CP_HPD_EOP_VMID, 0);
428 write_register(kgd, CP_HPD_EOP_CONTROL, hpd_size);
429 unlock_srbm(kgd);
430
431 return 0;
432}
433
434static inline struct cik_mqd *get_mqd(void *mqd)
435{
436 return (struct cik_mqd *)mqd;
437}
438
439static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
440 uint32_t queue_id, uint32_t __user *wptr)
441{
442 uint32_t wptr_shadow, is_wptr_shadow_valid;
443 struct cik_mqd *m;
444
445 m = get_mqd(mqd);
446
447 is_wptr_shadow_valid = !get_user(wptr_shadow, wptr);
448
449 acquire_queue(kgd, pipe_id, queue_id);
450 write_register(kgd, CP_MQD_BASE_ADDR, m->cp_mqd_base_addr_lo);
451 write_register(kgd, CP_MQD_BASE_ADDR_HI, m->cp_mqd_base_addr_hi);
452 write_register(kgd, CP_MQD_CONTROL, m->cp_mqd_control);
453
454 write_register(kgd, CP_HQD_PQ_BASE, m->cp_hqd_pq_base_lo);
455 write_register(kgd, CP_HQD_PQ_BASE_HI, m->cp_hqd_pq_base_hi);
456 write_register(kgd, CP_HQD_PQ_CONTROL, m->cp_hqd_pq_control);
457
458 write_register(kgd, CP_HQD_IB_CONTROL, m->cp_hqd_ib_control);
459 write_register(kgd, CP_HQD_IB_BASE_ADDR, m->cp_hqd_ib_base_addr_lo);
460 write_register(kgd, CP_HQD_IB_BASE_ADDR_HI, m->cp_hqd_ib_base_addr_hi);
461
462 write_register(kgd, CP_HQD_IB_RPTR, m->cp_hqd_ib_rptr);
463
464 write_register(kgd, CP_HQD_PERSISTENT_STATE,
465 m->cp_hqd_persistent_state);
466 write_register(kgd, CP_HQD_SEMA_CMD, m->cp_hqd_sema_cmd);
467 write_register(kgd, CP_HQD_MSG_TYPE, m->cp_hqd_msg_type);
468
469 write_register(kgd, CP_HQD_ATOMIC0_PREOP_LO,
470 m->cp_hqd_atomic0_preop_lo);
471
472 write_register(kgd, CP_HQD_ATOMIC0_PREOP_HI,
473 m->cp_hqd_atomic0_preop_hi);
474
475 write_register(kgd, CP_HQD_ATOMIC1_PREOP_LO,
476 m->cp_hqd_atomic1_preop_lo);
477
478 write_register(kgd, CP_HQD_ATOMIC1_PREOP_HI,
479 m->cp_hqd_atomic1_preop_hi);
480
481 write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR,
482 m->cp_hqd_pq_rptr_report_addr_lo);
483
484 write_register(kgd, CP_HQD_PQ_RPTR_REPORT_ADDR_HI,
485 m->cp_hqd_pq_rptr_report_addr_hi);
486
487 write_register(kgd, CP_HQD_PQ_RPTR, m->cp_hqd_pq_rptr);
488
489 write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR,
490 m->cp_hqd_pq_wptr_poll_addr_lo);
491
492 write_register(kgd, CP_HQD_PQ_WPTR_POLL_ADDR_HI,
493 m->cp_hqd_pq_wptr_poll_addr_hi);
494
495 write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL,
496 m->cp_hqd_pq_doorbell_control);
497
498 write_register(kgd, CP_HQD_VMID, m->cp_hqd_vmid);
499
500 write_register(kgd, CP_HQD_QUANTUM, m->cp_hqd_quantum);
501
502 write_register(kgd, CP_HQD_PIPE_PRIORITY, m->cp_hqd_pipe_priority);
503 write_register(kgd, CP_HQD_QUEUE_PRIORITY, m->cp_hqd_queue_priority);
504
505 write_register(kgd, CP_HQD_IQ_RPTR, m->cp_hqd_iq_rptr);
506
507 if (is_wptr_shadow_valid)
508 write_register(kgd, CP_HQD_PQ_WPTR, wptr_shadow);
509
510 write_register(kgd, CP_HQD_ACTIVE, m->cp_hqd_active);
511 release_queue(kgd);
512
513 return 0;
514}
515
516static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
517 uint32_t pipe_id, uint32_t queue_id)
518{
519 uint32_t act;
520 bool retval = false;
521 uint32_t low, high;
522
523 acquire_queue(kgd, pipe_id, queue_id);
524 act = read_register(kgd, CP_HQD_ACTIVE);
525 if (act) {
526 low = lower_32_bits(queue_address >> 8);
527 high = upper_32_bits(queue_address >> 8);
528
529 if (low == read_register(kgd, CP_HQD_PQ_BASE) &&
530 high == read_register(kgd, CP_HQD_PQ_BASE_HI))
531 retval = true;
532 }
533 release_queue(kgd);
534 return retval;
535}
536
537static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
538 unsigned int timeout, uint32_t pipe_id,
539 uint32_t queue_id)
540{
541 uint32_t temp;
542
543 acquire_queue(kgd, pipe_id, queue_id);
544 write_register(kgd, CP_HQD_PQ_DOORBELL_CONTROL, 0);
545
546 write_register(kgd, CP_HQD_DEQUEUE_REQUEST, reset_type);
547
548 while (true) {
549 temp = read_register(kgd, CP_HQD_ACTIVE);
550 if (temp & 0x1)
551 break;
552 if (timeout == 0) {
553 pr_err("kfd: cp queue preemption time out (%dms)\n",
554 temp);
555 return -ETIME;
556 }
557 msleep(20);
558 timeout -= 20;
559 }
560
561 release_queue(kgd);
562 return 0;
563}
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.h b/drivers/gpu/drm/radeon/radeon_kfd.h
new file mode 100644
index 000000000000..f90e161ca507
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_kfd.h
@@ -0,0 +1,47 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23/*
24 * radeon_kfd.h defines the private interface between the
25 * AMD kernel graphics drivers and the AMD KFD.
26 */
27
28#ifndef RADEON_KFD_H_INCLUDED
29#define RADEON_KFD_H_INCLUDED
30
31#include <linux/types.h>
32#include "../amd/include/kgd_kfd_interface.h"
33
34struct radeon_device;
35
36bool radeon_kfd_init(void);
37void radeon_kfd_fini(void);
38
39void radeon_kfd_suspend(struct radeon_device *rdev);
40int radeon_kfd_resume(struct radeon_device *rdev);
41void radeon_kfd_interrupt(struct radeon_device *rdev,
42 const void *ih_ring_entry);
43void radeon_kfd_device_probe(struct radeon_device *rdev);
44void radeon_kfd_device_init(struct radeon_device *rdev);
45void radeon_kfd_device_fini(struct radeon_device *rdev);
46
47#endif /* RADEON_KFD_H_INCLUDED */
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 03586763ee86..3cf9c1fa6475 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -34,6 +34,8 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36 36
37#include "radeon_kfd.h"
38
37#if defined(CONFIG_VGA_SWITCHEROO) 39#if defined(CONFIG_VGA_SWITCHEROO)
38bool radeon_has_atpx(void); 40bool radeon_has_atpx(void);
39#else 41#else
@@ -63,6 +65,8 @@ int radeon_driver_unload_kms(struct drm_device *dev)
63 65
64 pm_runtime_get_sync(dev->dev); 66 pm_runtime_get_sync(dev->dev);
65 67
68 radeon_kfd_device_fini(rdev);
69
66 radeon_acpi_fini(rdev); 70 radeon_acpi_fini(rdev);
67 71
68 radeon_modeset_fini(rdev); 72 radeon_modeset_fini(rdev);
@@ -142,6 +146,9 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
142 "Error during ACPI methods call\n"); 146 "Error during ACPI methods call\n");
143 } 147 }
144 148
149 radeon_kfd_device_probe(rdev);
150 radeon_kfd_device_init(rdev);
151
145 if (radeon_is_px(dev)) { 152 if (radeon_is_px(dev)) {
146 pm_runtime_use_autosuspend(dev->dev); 153 pm_runtime_use_autosuspend(dev->dev);
147 pm_runtime_set_autosuspend_delay(dev->dev, 5000); 154 pm_runtime_set_autosuspend_delay(dev->dev, 5000);
@@ -621,8 +628,6 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
621 RADEON_VA_IB_OFFSET, 628 RADEON_VA_IB_OFFSET,
622 RADEON_VM_PAGE_READABLE | 629 RADEON_VM_PAGE_READABLE |
623 RADEON_VM_PAGE_SNOOPED); 630 RADEON_VM_PAGE_SNOOPED);
624
625 radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
626 if (r) { 631 if (r) {
627 radeon_vm_fini(rdev, vm); 632 radeon_vm_fini(rdev, vm);
628 kfree(fpriv); 633 kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cafb1ccf2ec3..678b4386540d 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1054,6 +1054,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1054 DRM_ERROR("Mode need scaling but only first crtc can do that.\n"); 1054 DRM_ERROR("Mode need scaling but only first crtc can do that.\n");
1055 } 1055 }
1056 } 1056 }
1057 radeon_cursor_reset(crtc);
1057 return 0; 1058 return 0;
1058} 1059}
1059 1060
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 04db2fdd8692..390db897f322 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -321,6 +321,10 @@ struct radeon_crtc {
321 uint32_t crtc_offset; 321 uint32_t crtc_offset;
322 struct drm_gem_object *cursor_bo; 322 struct drm_gem_object *cursor_bo;
323 uint64_t cursor_addr; 323 uint64_t cursor_addr;
324 int cursor_x;
325 int cursor_y;
326 int cursor_hot_x;
327 int cursor_hot_y;
324 int cursor_width; 328 int cursor_width;
325 int cursor_height; 329 int cursor_height;
326 int max_cursor_width; 330 int max_cursor_width;
@@ -462,6 +466,7 @@ struct radeon_gpio_rec {
462 u8 id; 466 u8 id;
463 u32 reg; 467 u32 reg;
464 u32 mask; 468 u32 mask;
469 u32 shift;
465}; 470};
466 471
467struct radeon_hpd { 472struct radeon_hpd {
@@ -748,6 +753,8 @@ extern bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev,
748extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, 753extern bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
749 struct radeon_atom_ss *ss, 754 struct radeon_atom_ss *ss,
750 int id, u32 clock); 755 int id, u32 clock);
756extern struct radeon_gpio_rec radeon_atombios_lookup_gpio(struct radeon_device *rdev,
757 u8 id);
751 758
752extern void radeon_compute_pll_legacy(struct radeon_pll *pll, 759extern void radeon_compute_pll_legacy(struct radeon_pll *pll,
753 uint64_t freq, 760 uint64_t freq,
@@ -802,13 +809,16 @@ extern int radeon_crtc_set_base_atomic(struct drm_crtc *crtc,
802extern int radeon_crtc_do_set_base(struct drm_crtc *crtc, 809extern int radeon_crtc_do_set_base(struct drm_crtc *crtc,
803 struct drm_framebuffer *fb, 810 struct drm_framebuffer *fb,
804 int x, int y, int atomic); 811 int x, int y, int atomic);
805extern int radeon_crtc_cursor_set(struct drm_crtc *crtc, 812extern int radeon_crtc_cursor_set2(struct drm_crtc *crtc,
806 struct drm_file *file_priv, 813 struct drm_file *file_priv,
807 uint32_t handle, 814 uint32_t handle,
808 uint32_t width, 815 uint32_t width,
809 uint32_t height); 816 uint32_t height,
817 int32_t hot_x,
818 int32_t hot_y);
810extern int radeon_crtc_cursor_move(struct drm_crtc *crtc, 819extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
811 int x, int y); 820 int x, int y);
821extern void radeon_cursor_reset(struct drm_crtc *crtc);
812 822
813extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc, 823extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
814 unsigned int flags, 824 unsigned int flags,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 4c0d786d5c7a..7d68223eb469 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -99,22 +99,39 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
99 99
100 rbo->placement.placement = rbo->placements; 100 rbo->placement.placement = rbo->placements;
101 rbo->placement.busy_placement = rbo->placements; 101 rbo->placement.busy_placement = rbo->placements;
102 if (domain & RADEON_GEM_DOMAIN_VRAM) 102 if (domain & RADEON_GEM_DOMAIN_VRAM) {
103 /* Try placing BOs which don't need CPU access outside of the
104 * CPU accessible part of VRAM
105 */
106 if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
107 rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
108 rbo->placements[c].fpfn =
109 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
110 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
111 TTM_PL_FLAG_UNCACHED |
112 TTM_PL_FLAG_VRAM;
113 }
114
115 rbo->placements[c].fpfn = 0;
103 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 116 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
104 TTM_PL_FLAG_UNCACHED | 117 TTM_PL_FLAG_UNCACHED |
105 TTM_PL_FLAG_VRAM; 118 TTM_PL_FLAG_VRAM;
119 }
106 120
107 if (domain & RADEON_GEM_DOMAIN_GTT) { 121 if (domain & RADEON_GEM_DOMAIN_GTT) {
108 if (rbo->flags & RADEON_GEM_GTT_UC) { 122 if (rbo->flags & RADEON_GEM_GTT_UC) {
123 rbo->placements[c].fpfn = 0;
109 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 124 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
110 TTM_PL_FLAG_TT; 125 TTM_PL_FLAG_TT;
111 126
112 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 127 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
113 (rbo->rdev->flags & RADEON_IS_AGP)) { 128 (rbo->rdev->flags & RADEON_IS_AGP)) {
129 rbo->placements[c].fpfn = 0;
114 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 130 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
115 TTM_PL_FLAG_UNCACHED | 131 TTM_PL_FLAG_UNCACHED |
116 TTM_PL_FLAG_TT; 132 TTM_PL_FLAG_TT;
117 } else { 133 } else {
134 rbo->placements[c].fpfn = 0;
118 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 135 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
119 TTM_PL_FLAG_TT; 136 TTM_PL_FLAG_TT;
120 } 137 }
@@ -122,30 +139,35 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
122 139
123 if (domain & RADEON_GEM_DOMAIN_CPU) { 140 if (domain & RADEON_GEM_DOMAIN_CPU) {
124 if (rbo->flags & RADEON_GEM_GTT_UC) { 141 if (rbo->flags & RADEON_GEM_GTT_UC) {
142 rbo->placements[c].fpfn = 0;
125 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | 143 rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
126 TTM_PL_FLAG_SYSTEM; 144 TTM_PL_FLAG_SYSTEM;
127 145
128 } else if ((rbo->flags & RADEON_GEM_GTT_WC) || 146 } else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
129 rbo->rdev->flags & RADEON_IS_AGP) { 147 rbo->rdev->flags & RADEON_IS_AGP) {
148 rbo->placements[c].fpfn = 0;
130 rbo->placements[c++].flags = TTM_PL_FLAG_WC | 149 rbo->placements[c++].flags = TTM_PL_FLAG_WC |
131 TTM_PL_FLAG_UNCACHED | 150 TTM_PL_FLAG_UNCACHED |
132 TTM_PL_FLAG_SYSTEM; 151 TTM_PL_FLAG_SYSTEM;
133 } else { 152 } else {
153 rbo->placements[c].fpfn = 0;
134 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | 154 rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
135 TTM_PL_FLAG_SYSTEM; 155 TTM_PL_FLAG_SYSTEM;
136 } 156 }
137 } 157 }
138 if (!c) 158 if (!c) {
159 rbo->placements[c].fpfn = 0;
139 rbo->placements[c++].flags = TTM_PL_MASK_CACHING | 160 rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
140 TTM_PL_FLAG_SYSTEM; 161 TTM_PL_FLAG_SYSTEM;
162 }
141 163
142 rbo->placement.num_placement = c; 164 rbo->placement.num_placement = c;
143 rbo->placement.num_busy_placement = c; 165 rbo->placement.num_busy_placement = c;
144 166
145 for (i = 0; i < c; ++i) { 167 for (i = 0; i < c; ++i) {
146 rbo->placements[i].fpfn = 0;
147 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && 168 if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
148 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) 169 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
170 !rbo->placements[i].fpfn)
149 rbo->placements[i].lpfn = 171 rbo->placements[i].lpfn =
150 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; 172 rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
151 else 173 else
@@ -157,9 +179,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
157 * improve fragmentation quality. 179 * improve fragmentation quality.
158 * 512kb was measured as the most optimal number. 180 * 512kb was measured as the most optimal number.
159 */ 181 */
160 if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && 182 if (rbo->tbo.mem.size > 512 * 1024) {
161 (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
162 rbo->tbo.mem.size > 512 * 1024) {
163 for (i = 0; i < c; i++) { 183 for (i = 0; i < c; i++) {
164 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; 184 rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
165 } 185 }
@@ -489,25 +509,29 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
489 struct ww_acquire_ctx *ticket, 509 struct ww_acquire_ctx *ticket,
490 struct list_head *head, int ring) 510 struct list_head *head, int ring)
491{ 511{
492 struct radeon_cs_reloc *lobj; 512 struct radeon_bo_list *lobj;
493 struct radeon_bo *bo; 513 struct list_head duplicates;
494 int r; 514 int r;
495 u64 bytes_moved = 0, initial_bytes_moved; 515 u64 bytes_moved = 0, initial_bytes_moved;
496 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); 516 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
497 517
498 r = ttm_eu_reserve_buffers(ticket, head, true); 518 INIT_LIST_HEAD(&duplicates);
519 r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
499 if (unlikely(r != 0)) { 520 if (unlikely(r != 0)) {
500 return r; 521 return r;
501 } 522 }
502 523
503 list_for_each_entry(lobj, head, tv.head) { 524 list_for_each_entry(lobj, head, tv.head) {
504 bo = lobj->robj; 525 struct radeon_bo *bo = lobj->robj;
505 if (!bo->pin_count) { 526 if (!bo->pin_count) {
506 u32 domain = lobj->prefered_domains; 527 u32 domain = lobj->prefered_domains;
507 u32 allowed = lobj->allowed_domains; 528 u32 allowed = lobj->allowed_domains;
508 u32 current_domain = 529 u32 current_domain =
509 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 530 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
510 531
532 WARN_ONCE(bo->gem_base.dumb,
533 "GPU use of dumb buffer is illegal.\n");
534
511 /* Check if this buffer will be moved and don't move it 535 /* Check if this buffer will be moved and don't move it
512 * if we have moved too many buffers for this IB already. 536 * if we have moved too many buffers for this IB already.
513 * 537 *
@@ -546,6 +570,12 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
546 lobj->gpu_offset = radeon_bo_gpu_offset(bo); 570 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
547 lobj->tiling_flags = bo->tiling_flags; 571 lobj->tiling_flags = bo->tiling_flags;
548 } 572 }
573
574 list_for_each_entry(lobj, &duplicates, tv.head) {
575 lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
576 lobj->tiling_flags = lobj->robj->tiling_flags;
577 }
578
549 return 0; 579 return 0;
550} 580}
551 581
@@ -750,8 +780,8 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
750{ 780{
751 struct radeon_device *rdev; 781 struct radeon_device *rdev;
752 struct radeon_bo *rbo; 782 struct radeon_bo *rbo;
753 unsigned long offset, size; 783 unsigned long offset, size, lpfn;
754 int r; 784 int i, r;
755 785
756 if (!radeon_ttm_bo_is_radeon_bo(bo)) 786 if (!radeon_ttm_bo_is_radeon_bo(bo))
757 return 0; 787 return 0;
@@ -768,7 +798,13 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
768 798
769 /* hurrah the memory is not visible ! */ 799 /* hurrah the memory is not visible ! */
770 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); 800 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
771 rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; 801 lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
802 for (i = 0; i < rbo->placement.num_placement; i++) {
803 /* Force into visible VRAM */
804 if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
805 (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
806 rbo->placements[i].lpfn = lpfn;
807 }
772 r = ttm_bo_validate(bo, &rbo->placement, false, false); 808 r = ttm_bo_validate(bo, &rbo->placement, false, false);
773 if (unlikely(r == -ENOMEM)) { 809 if (unlikely(r == -ENOMEM)) {
774 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 810 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -799,3 +835,22 @@ int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
799 ttm_bo_unreserve(&bo->tbo); 835 ttm_bo_unreserve(&bo->tbo);
800 return r; 836 return r;
801} 837}
838
839/**
840 * radeon_bo_fence - add fence to buffer object
841 *
842 * @bo: buffer object in question
843 * @fence: fence to add
844 * @shared: true if fence should be added shared
845 *
846 */
847void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
848 bool shared)
849{
850 struct reservation_object *resv = bo->tbo.resv;
851
852 if (shared)
853 reservation_object_add_shared_fence(resv, &fence->base);
854 else
855 reservation_object_add_excl_fence(resv, &fence->base);
856}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 1b8ec7917154..3b0b377f76cb 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -155,6 +155,8 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
155 struct ttm_mem_reg *new_mem); 155 struct ttm_mem_reg *new_mem);
156extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); 156extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
157extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); 157extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
158extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
159 bool shared);
158 160
159/* 161/*
160 * sub allocation 162 * sub allocation
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 6deb08f045b7..e6ad54cdfa62 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,15 +34,14 @@
34int radeon_semaphore_create(struct radeon_device *rdev, 34int radeon_semaphore_create(struct radeon_device *rdev,
35 struct radeon_semaphore **semaphore) 35 struct radeon_semaphore **semaphore)
36{ 36{
37 uint64_t *cpu_addr; 37 int r;
38 int i, r;
39 38
40 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL); 39 *semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
41 if (*semaphore == NULL) { 40 if (*semaphore == NULL) {
42 return -ENOMEM; 41 return -ENOMEM;
43 } 42 }
44 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo, 43 r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
45 8 * RADEON_NUM_SYNCS, 8); 44 &(*semaphore)->sa_bo, 8, 8);
46 if (r) { 45 if (r) {
47 kfree(*semaphore); 46 kfree(*semaphore);
48 *semaphore = NULL; 47 *semaphore = NULL;
@@ -51,12 +50,7 @@ int radeon_semaphore_create(struct radeon_device *rdev,
51 (*semaphore)->waiters = 0; 50 (*semaphore)->waiters = 0;
52 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo); 51 (*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
53 52
54 cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo); 53 *((uint64_t *)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
55 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
56 cpu_addr[i] = 0;
57
58 for (i = 0; i < RADEON_NUM_RINGS; ++i)
59 (*semaphore)->sync_to[i] = NULL;
60 54
61 return 0; 55 return 0;
62} 56}
@@ -95,146 +89,6 @@ bool radeon_semaphore_emit_wait(struct radeon_device *rdev, int ridx,
95 return false; 89 return false;
96} 90}
97 91
98/**
99 * radeon_semaphore_sync_fence - use the semaphore to sync to a fence
100 *
101 * @semaphore: semaphore object to add fence to
102 * @fence: fence to sync to
103 *
104 * Sync to the fence using this semaphore object
105 */
106void radeon_semaphore_sync_fence(struct radeon_semaphore *semaphore,
107 struct radeon_fence *fence)
108{
109 struct radeon_fence *other;
110
111 if (!fence)
112 return;
113
114 other = semaphore->sync_to[fence->ring];
115 semaphore->sync_to[fence->ring] = radeon_fence_later(fence, other);
116}
117
118/**
119 * radeon_semaphore_sync_to - use the semaphore to sync to a reservation object
120 *
121 * @sema: semaphore object to add fence from reservation object to
122 * @resv: reservation object with embedded fence
123 * @shared: true if we should onyl sync to the exclusive fence
124 *
125 * Sync to the fence using this semaphore object
126 */
127int radeon_semaphore_sync_resv(struct radeon_device *rdev,
128 struct radeon_semaphore *sema,
129 struct reservation_object *resv,
130 bool shared)
131{
132 struct reservation_object_list *flist;
133 struct fence *f;
134 struct radeon_fence *fence;
135 unsigned i;
136 int r = 0;
137
138 /* always sync to the exclusive fence */
139 f = reservation_object_get_excl(resv);
140 fence = f ? to_radeon_fence(f) : NULL;
141 if (fence && fence->rdev == rdev)
142 radeon_semaphore_sync_fence(sema, fence);
143 else if (f)
144 r = fence_wait(f, true);
145
146 flist = reservation_object_get_list(resv);
147 if (shared || !flist || r)
148 return r;
149
150 for (i = 0; i < flist->shared_count; ++i) {
151 f = rcu_dereference_protected(flist->shared[i],
152 reservation_object_held(resv));
153 fence = to_radeon_fence(f);
154 if (fence && fence->rdev == rdev)
155 radeon_semaphore_sync_fence(sema, fence);
156 else
157 r = fence_wait(f, true);
158
159 if (r)
160 break;
161 }
162 return r;
163}
164
165/**
166 * radeon_semaphore_sync_rings - sync ring to all registered fences
167 *
168 * @rdev: radeon_device pointer
169 * @semaphore: semaphore object to use for sync
170 * @ring: ring that needs sync
171 *
172 * Ensure that all registered fences are signaled before letting
173 * the ring continue. The caller must hold the ring lock.
174 */
175int radeon_semaphore_sync_rings(struct radeon_device *rdev,
176 struct radeon_semaphore *semaphore,
177 int ring)
178{
179 unsigned count = 0;
180 int i, r;
181
182 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
183 struct radeon_fence *fence = semaphore->sync_to[i];
184
185 /* check if we really need to sync */
186 if (!radeon_fence_need_sync(fence, ring))
187 continue;
188
189 /* prevent GPU deadlocks */
190 if (!rdev->ring[i].ready) {
191 dev_err(rdev->dev, "Syncing to a disabled ring!");
192 return -EINVAL;
193 }
194
195 if (++count > RADEON_NUM_SYNCS) {
196 /* not enough room, wait manually */
197 r = radeon_fence_wait(fence, false);
198 if (r)
199 return r;
200 continue;
201 }
202
203 /* allocate enough space for sync command */
204 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
205 if (r) {
206 return r;
207 }
208
209 /* emit the signal semaphore */
210 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
211 /* signaling wasn't successful wait manually */
212 radeon_ring_undo(&rdev->ring[i]);
213 r = radeon_fence_wait(fence, false);
214 if (r)
215 return r;
216 continue;
217 }
218
219 /* we assume caller has already allocated space on waiters ring */
220 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
221 /* waiting wasn't successful wait manually */
222 radeon_ring_undo(&rdev->ring[i]);
223 r = radeon_fence_wait(fence, false);
224 if (r)
225 return r;
226 continue;
227 }
228
229 radeon_ring_commit(rdev, &rdev->ring[i], false);
230 radeon_fence_note_sync(fence, ring);
231
232 semaphore->gpu_addr += 8;
233 }
234
235 return 0;
236}
237
238void radeon_semaphore_free(struct radeon_device *rdev, 92void radeon_semaphore_free(struct radeon_device *rdev,
239 struct radeon_semaphore **semaphore, 93 struct radeon_semaphore **semaphore,
240 struct radeon_fence *fence) 94 struct radeon_fence *fence)
diff --git a/drivers/gpu/drm/radeon/radeon_sync.c b/drivers/gpu/drm/radeon/radeon_sync.c
new file mode 100644
index 000000000000..02ac8a1de4ff
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sync.c
@@ -0,0 +1,220 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Christian König <christian.koenig@amd.com>
29 */
30
31#include <drm/drmP.h>
32#include "radeon.h"
33#include "radeon_trace.h"
34
35/**
36 * radeon_sync_create - zero init sync object
37 *
38 * @sync: sync object to initialize
39 *
40 * Just clear the sync object for now.
41 */
42void radeon_sync_create(struct radeon_sync *sync)
43{
44 unsigned i;
45
46 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
47 sync->semaphores[i] = NULL;
48
49 for (i = 0; i < RADEON_NUM_RINGS; ++i)
50 sync->sync_to[i] = NULL;
51
52 sync->last_vm_update = NULL;
53}
54
55/**
56 * radeon_sync_fence - use the semaphore to sync to a fence
57 *
58 * @sync: sync object to add fence to
59 * @fence: fence to sync to
60 *
61 * Sync to the fence using the semaphore objects
62 */
63void radeon_sync_fence(struct radeon_sync *sync,
64 struct radeon_fence *fence)
65{
66 struct radeon_fence *other;
67
68 if (!fence)
69 return;
70
71 other = sync->sync_to[fence->ring];
72 sync->sync_to[fence->ring] = radeon_fence_later(fence, other);
73
74 if (fence->is_vm_update) {
75 other = sync->last_vm_update;
76 sync->last_vm_update = radeon_fence_later(fence, other);
77 }
78}
79
80/**
81 * radeon_sync_resv - use the semaphores to sync to a reservation object
82 *
83 * @sync: sync object to add fences from reservation object to
84 * @resv: reservation object with embedded fence
85 * @shared: true if we should only sync to the exclusive fence
86 *
87 * Sync to the fence using the semaphore objects
88 */
89int radeon_sync_resv(struct radeon_device *rdev,
90 struct radeon_sync *sync,
91 struct reservation_object *resv,
92 bool shared)
93{
94 struct reservation_object_list *flist;
95 struct fence *f;
96 struct radeon_fence *fence;
97 unsigned i;
98 int r = 0;
99
100 /* always sync to the exclusive fence */
101 f = reservation_object_get_excl(resv);
102 fence = f ? to_radeon_fence(f) : NULL;
103 if (fence && fence->rdev == rdev)
104 radeon_sync_fence(sync, fence);
105 else if (f)
106 r = fence_wait(f, true);
107
108 flist = reservation_object_get_list(resv);
109 if (shared || !flist || r)
110 return r;
111
112 for (i = 0; i < flist->shared_count; ++i) {
113 f = rcu_dereference_protected(flist->shared[i],
114 reservation_object_held(resv));
115 fence = to_radeon_fence(f);
116 if (fence && fence->rdev == rdev)
117 radeon_sync_fence(sync, fence);
118 else
119 r = fence_wait(f, true);
120
121 if (r)
122 break;
123 }
124 return r;
125}
126
127/**
128 * radeon_sync_rings - sync ring to all registered fences
129 *
130 * @rdev: radeon_device pointer
131 * @sync: sync object to use
132 * @ring: ring that needs sync
133 *
134 * Ensure that all registered fences are signaled before letting
135 * the ring continue. The caller must hold the ring lock.
136 */
137int radeon_sync_rings(struct radeon_device *rdev,
138 struct radeon_sync *sync,
139 int ring)
140{
141 unsigned count = 0;
142 int i, r;
143
144 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
145 struct radeon_fence *fence = sync->sync_to[i];
146 struct radeon_semaphore *semaphore;
147
148 /* check if we really need to sync */
149 if (!radeon_fence_need_sync(fence, ring))
150 continue;
151
152 /* prevent GPU deadlocks */
153 if (!rdev->ring[i].ready) {
154 dev_err(rdev->dev, "Syncing to a disabled ring!");
155 return -EINVAL;
156 }
157
158 if (count >= RADEON_NUM_SYNCS) {
159 /* not enough room, wait manually */
160 r = radeon_fence_wait(fence, false);
161 if (r)
162 return r;
163 continue;
164 }
165 r = radeon_semaphore_create(rdev, &semaphore);
166 if (r)
167 return r;
168
169 sync->semaphores[count++] = semaphore;
170
171 /* allocate enough space for sync command */
172 r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
173 if (r)
174 return r;
175
176 /* emit the signal semaphore */
177 if (!radeon_semaphore_emit_signal(rdev, i, semaphore)) {
178 /* signaling wasn't successful wait manually */
179 radeon_ring_undo(&rdev->ring[i]);
180 r = radeon_fence_wait(fence, false);
181 if (r)
182 return r;
183 continue;
184 }
185
186 /* we assume caller has already allocated space on waiters ring */
187 if (!radeon_semaphore_emit_wait(rdev, ring, semaphore)) {
188 /* waiting wasn't successful wait manually */
189 radeon_ring_undo(&rdev->ring[i]);
190 r = radeon_fence_wait(fence, false);
191 if (r)
192 return r;
193 continue;
194 }
195
196 radeon_ring_commit(rdev, &rdev->ring[i], false);
197 radeon_fence_note_sync(fence, ring);
198 }
199
200 return 0;
201}
202
203/**
204 * radeon_sync_free - free the sync object
205 *
206 * @rdev: radeon_device pointer
207 * @sync: sync object to use
208 * @fence: fence to use for the free
209 *
210 * Free the sync object by freeing all semaphores in it.
211 */
212void radeon_sync_free(struct radeon_device *rdev,
213 struct radeon_sync *sync,
214 struct radeon_fence *fence)
215{
216 unsigned i;
217
218 for (i = 0; i < RADEON_NUM_SYNCS; ++i)
219 radeon_semaphore_free(rdev, &sync->semaphores[i], fence);
220}
diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h
index 9db74a96ef61..ce075cb08cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_trace.h
+++ b/drivers/gpu/drm/radeon/radeon_trace.h
@@ -38,7 +38,7 @@ TRACE_EVENT(radeon_cs,
38 38
39 TP_fast_assign( 39 TP_fast_assign(
40 __entry->ring = p->ring; 40 __entry->ring = p->ring;
41 __entry->dw = p->chunks[p->chunk_ib_idx].length_dw; 41 __entry->dw = p->chunk_ib->length_dw;
42 __entry->fences = radeon_fence_count_emitted( 42 __entry->fences = radeon_fence_count_emitted(
43 p->rdev, p->ring); 43 p->rdev, p->ring);
44 ), 44 ),
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 8624979afb65..d02aa1d0f588 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -196,9 +196,32 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
196 rbo = container_of(bo, struct radeon_bo, tbo); 196 rbo = container_of(bo, struct radeon_bo, tbo);
197 switch (bo->mem.mem_type) { 197 switch (bo->mem.mem_type) {
198 case TTM_PL_VRAM: 198 case TTM_PL_VRAM:
199 if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) 199 if (rbo->rdev->ring[radeon_copy_ring_index(rbo->rdev)].ready == false)
200 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); 200 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
201 else 201 else if (rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size &&
202 bo->mem.start < (rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT)) {
203 unsigned fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
204 int i;
205
206 /* Try evicting to the CPU inaccessible part of VRAM
207 * first, but only set GTT as busy placement, so this
208 * BO will be evicted to GTT rather than causing other
209 * BOs to be evicted from VRAM
210 */
211 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM |
212 RADEON_GEM_DOMAIN_GTT);
213 rbo->placement.num_busy_placement = 0;
214 for (i = 0; i < rbo->placement.num_placement; i++) {
215 if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
216 if (rbo->placements[0].fpfn < fpfn)
217 rbo->placements[0].fpfn = fpfn;
218 } else {
219 rbo->placement.busy_placement =
220 &rbo->placements[i];
221 rbo->placement.num_busy_placement = 1;
222 }
223 }
224 } else
202 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); 225 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
203 break; 226 break;
204 case TTM_PL_TT: 227 case TTM_PL_TT:
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 11b662469253..c10b2aec6450 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -488,12 +488,12 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
488 unsigned buf_sizes[], bool *has_msg_cmd) 488 unsigned buf_sizes[], bool *has_msg_cmd)
489{ 489{
490 struct radeon_cs_chunk *relocs_chunk; 490 struct radeon_cs_chunk *relocs_chunk;
491 struct radeon_cs_reloc *reloc; 491 struct radeon_bo_list *reloc;
492 unsigned idx, cmd, offset; 492 unsigned idx, cmd, offset;
493 uint64_t start, end; 493 uint64_t start, end;
494 int r; 494 int r;
495 495
496 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 496 relocs_chunk = p->chunk_relocs;
497 offset = radeon_get_ib_value(p, data0); 497 offset = radeon_get_ib_value(p, data0);
498 idx = radeon_get_ib_value(p, data1); 498 idx = radeon_get_ib_value(p, data1);
499 if (idx >= relocs_chunk->length_dw) { 499 if (idx >= relocs_chunk->length_dw) {
@@ -502,7 +502,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
502 return -EINVAL; 502 return -EINVAL;
503 } 503 }
504 504
505 reloc = p->relocs_ptr[(idx / 4)]; 505 reloc = &p->relocs[(idx / 4)];
506 start = reloc->gpu_offset; 506 start = reloc->gpu_offset;
507 end = start + radeon_bo_size(reloc->robj); 507 end = start + radeon_bo_size(reloc->robj);
508 start += offset; 508 start += offset;
@@ -610,13 +610,13 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
610 [0x00000003] = 2048, 610 [0x00000003] = 2048,
611 }; 611 };
612 612
613 if (p->chunks[p->chunk_ib_idx].length_dw % 16) { 613 if (p->chunk_ib->length_dw % 16) {
614 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", 614 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
615 p->chunks[p->chunk_ib_idx].length_dw); 615 p->chunk_ib->length_dw);
616 return -EINVAL; 616 return -EINVAL;
617 } 617 }
618 618
619 if (p->chunk_relocs_idx == -1) { 619 if (p->chunk_relocs == NULL) {
620 DRM_ERROR("No relocation chunk !\n"); 620 DRM_ERROR("No relocation chunk !\n");
621 return -EINVAL; 621 return -EINVAL;
622 } 622 }
@@ -640,7 +640,7 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p)
640 DRM_ERROR("Unknown packet type %d !\n", pkt.type); 640 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
641 return -EINVAL; 641 return -EINVAL;
642 } 642 }
643 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); 643 } while (p->idx < p->chunk_ib->length_dw);
644 644
645 if (!has_msg_cmd) { 645 if (!has_msg_cmd) {
646 DRM_ERROR("UVD-IBs need a msg command!\n"); 646 DRM_ERROR("UVD-IBs need a msg command!\n");
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 9e85757d5599..976fe432f4e2 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -453,11 +453,11 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
453 unsigned size) 453 unsigned size)
454{ 454{
455 struct radeon_cs_chunk *relocs_chunk; 455 struct radeon_cs_chunk *relocs_chunk;
456 struct radeon_cs_reloc *reloc; 456 struct radeon_bo_list *reloc;
457 uint64_t start, end, offset; 457 uint64_t start, end, offset;
458 unsigned idx; 458 unsigned idx;
459 459
460 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 460 relocs_chunk = p->chunk_relocs;
461 offset = radeon_get_ib_value(p, lo); 461 offset = radeon_get_ib_value(p, lo);
462 idx = radeon_get_ib_value(p, hi); 462 idx = radeon_get_ib_value(p, hi);
463 463
@@ -467,7 +467,7 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
467 return -EINVAL; 467 return -EINVAL;
468 } 468 }
469 469
470 reloc = p->relocs_ptr[(idx / 4)]; 470 reloc = &p->relocs[(idx / 4)];
471 start = reloc->gpu_offset; 471 start = reloc->gpu_offset;
472 end = start + radeon_bo_size(reloc->robj); 472 end = start + radeon_bo_size(reloc->robj);
473 start += offset; 473 start += offset;
@@ -534,7 +534,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
534 uint32_t *size = &tmp; 534 uint32_t *size = &tmp;
535 int i, r; 535 int i, r;
536 536
537 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) { 537 while (p->idx < p->chunk_ib->length_dw) {
538 uint32_t len = radeon_get_ib_value(p, p->idx); 538 uint32_t len = radeon_get_ib_value(p, p->idx);
539 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1); 539 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
540 540
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index dfde266529e2..cde48c42b30a 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -125,41 +125,37 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
125 * Add the page directory to the list of BOs to 125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+). 126 * validate for command submission (cayman+).
127 */ 127 */
128struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev, 128struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm, 129 struct radeon_vm *vm,
130 struct list_head *head) 130 struct list_head *head)
131{ 131{
132 struct radeon_cs_reloc *list; 132 struct radeon_bo_list *list;
133 unsigned i, idx; 133 unsigned i, idx;
134 134
135 list = drm_malloc_ab(vm->max_pde_used + 2, 135 list = drm_malloc_ab(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc)); 136 sizeof(struct radeon_bo_list));
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
140 /* add the vm page table to the list */ 140 /* add the vm page table to the list */
141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory; 141 list[0].robj = vm->page_directory;
143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 142 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 143 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo; 144 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tv.shared = false; 145 list[0].tv.shared = true;
147 list[0].tiling_flags = 0; 146 list[0].tiling_flags = 0;
148 list[0].handle = 0;
149 list_add(&list[0].tv.head, head); 147 list_add(&list[0].tv.head, head);
150 148
151 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { 149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
152 if (!vm->page_tables[i].bo) 150 if (!vm->page_tables[i].bo)
153 continue; 151 continue;
154 152
155 list[idx].gobj = NULL;
156 list[idx].robj = vm->page_tables[i].bo; 153 list[idx].robj = vm->page_tables[i].bo;
157 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 154 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 155 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
159 list[idx].tv.bo = &list[idx].robj->tbo; 156 list[idx].tv.bo = &list[idx].robj->tbo;
160 list[idx].tv.shared = false; 157 list[idx].tv.shared = true;
161 list[idx].tiling_flags = 0; 158 list[idx].tiling_flags = 0;
162 list[idx].handle = 0;
163 list_add(&list[idx++].tv.head, head); 159 list_add(&list[idx++].tv.head, head);
164 } 160 }
165 161
@@ -182,15 +178,18 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
182 struct radeon_vm *vm, int ring) 178 struct radeon_vm *vm, int ring)
183{ 179{
184 struct radeon_fence *best[RADEON_NUM_RINGS] = {}; 180 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
181 struct radeon_vm_id *vm_id = &vm->ids[ring];
182
185 unsigned choices[2] = {}; 183 unsigned choices[2] = {};
186 unsigned i; 184 unsigned i;
187 185
188 /* check if the id is still valid */ 186 /* check if the id is still valid */
189 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id]) 187 if (vm_id->id && vm_id->last_id_use &&
188 vm_id->last_id_use == rdev->vm_manager.active[vm_id->id])
190 return NULL; 189 return NULL;
191 190
192 /* we definately need to flush */ 191 /* we definately need to flush */
193 radeon_fence_unref(&vm->last_flush); 192 vm_id->pd_gpu_addr = ~0ll;
194 193
195 /* skip over VMID 0, since it is the system VM */ 194 /* skip over VMID 0, since it is the system VM */
196 for (i = 1; i < rdev->vm_manager.nvm; ++i) { 195 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
@@ -198,8 +197,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
198 197
199 if (fence == NULL) { 198 if (fence == NULL) {
200 /* found a free one */ 199 /* found a free one */
201 vm->id = i; 200 vm_id->id = i;
202 trace_radeon_vm_grab_id(vm->id, ring); 201 trace_radeon_vm_grab_id(i, ring);
203 return NULL; 202 return NULL;
204 } 203 }
205 204
@@ -211,8 +210,8 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
211 210
212 for (i = 0; i < 2; ++i) { 211 for (i = 0; i < 2; ++i) {
213 if (choices[i]) { 212 if (choices[i]) {
214 vm->id = choices[i]; 213 vm_id->id = choices[i];
215 trace_radeon_vm_grab_id(vm->id, ring); 214 trace_radeon_vm_grab_id(choices[i], ring);
216 return rdev->vm_manager.active[choices[i]]; 215 return rdev->vm_manager.active[choices[i]];
217 } 216 }
218 } 217 }
@@ -228,6 +227,7 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
228 * @rdev: radeon_device pointer 227 * @rdev: radeon_device pointer
229 * @vm: vm we want to flush 228 * @vm: vm we want to flush
230 * @ring: ring to use for flush 229 * @ring: ring to use for flush
230 * @updates: last vm update that is waited for
231 * 231 *
232 * Flush the vm (cayman+). 232 * Flush the vm (cayman+).
233 * 233 *
@@ -235,15 +235,21 @@ struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
235 */ 235 */
236void radeon_vm_flush(struct radeon_device *rdev, 236void radeon_vm_flush(struct radeon_device *rdev,
237 struct radeon_vm *vm, 237 struct radeon_vm *vm,
238 int ring) 238 int ring, struct radeon_fence *updates)
239{ 239{
240 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 240 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
241 struct radeon_vm_id *vm_id = &vm->ids[ring];
242
243 if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates ||
244 radeon_fence_is_earlier(vm_id->flushed_updates, updates)) {
245
246 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id);
247 radeon_fence_unref(&vm_id->flushed_updates);
248 vm_id->flushed_updates = radeon_fence_ref(updates);
249 vm_id->pd_gpu_addr = pd_addr;
250 radeon_ring_vm_flush(rdev, &rdev->ring[ring],
251 vm_id->id, vm_id->pd_gpu_addr);
241 252
242 /* if we can't remember our last VM flush then flush now! */
243 if (!vm->last_flush || pd_addr != vm->pd_gpu_addr) {
244 trace_radeon_vm_flush(pd_addr, ring, vm->id);
245 vm->pd_gpu_addr = pd_addr;
246 radeon_ring_vm_flush(rdev, ring, vm);
247 } 253 }
248} 254}
249 255
@@ -263,18 +269,13 @@ void radeon_vm_fence(struct radeon_device *rdev,
263 struct radeon_vm *vm, 269 struct radeon_vm *vm,
264 struct radeon_fence *fence) 270 struct radeon_fence *fence)
265{ 271{
266 radeon_fence_unref(&vm->fence); 272 unsigned vm_id = vm->ids[fence->ring].id;
267 vm->fence = radeon_fence_ref(fence);
268
269 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
270 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
271 273
272 radeon_fence_unref(&vm->last_id_use); 274 radeon_fence_unref(&rdev->vm_manager.active[vm_id]);
273 vm->last_id_use = radeon_fence_ref(fence); 275 rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence);
274 276
275 /* we just flushed the VM, remember that */ 277 radeon_fence_unref(&vm->ids[fence->ring].last_id_use);
276 if (!vm->last_flush) 278 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence);
277 vm->last_flush = radeon_fence_ref(fence);
278} 279}
279 280
280/** 281/**
@@ -387,35 +388,25 @@ static void radeon_vm_set_pages(struct radeon_device *rdev,
387static int radeon_vm_clear_bo(struct radeon_device *rdev, 388static int radeon_vm_clear_bo(struct radeon_device *rdev,
388 struct radeon_bo *bo) 389 struct radeon_bo *bo)
389{ 390{
390 struct ttm_validate_buffer tv;
391 struct ww_acquire_ctx ticket;
392 struct list_head head;
393 struct radeon_ib ib; 391 struct radeon_ib ib;
394 unsigned entries; 392 unsigned entries;
395 uint64_t addr; 393 uint64_t addr;
396 int r; 394 int r;
397 395
398 memset(&tv, 0, sizeof(tv)); 396 r = radeon_bo_reserve(bo, false);
399 tv.bo = &bo->tbo; 397 if (r)
400 tv.shared = false;
401
402 INIT_LIST_HEAD(&head);
403 list_add(&tv.head, &head);
404
405 r = ttm_eu_reserve_buffers(&ticket, &head, true);
406 if (r)
407 return r; 398 return r;
408 399
409 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 400 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
410 if (r) 401 if (r)
411 goto error; 402 goto error_unreserve;
412 403
413 addr = radeon_bo_gpu_offset(bo); 404 addr = radeon_bo_gpu_offset(bo);
414 entries = radeon_bo_size(bo) / 8; 405 entries = radeon_bo_size(bo) / 8;
415 406
416 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); 407 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256);
417 if (r) 408 if (r)
418 goto error; 409 goto error_unreserve;
419 410
420 ib.length_dw = 0; 411 ib.length_dw = 0;
421 412
@@ -425,15 +416,16 @@ static int radeon_vm_clear_bo(struct radeon_device *rdev,
425 416
426 r = radeon_ib_schedule(rdev, &ib, NULL, false); 417 r = radeon_ib_schedule(rdev, &ib, NULL, false);
427 if (r) 418 if (r)
428 goto error; 419 goto error_free;
429 420
430 ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); 421 ib.fence->is_vm_update = true;
431 radeon_ib_free(rdev, &ib); 422 radeon_bo_fence(bo, ib.fence, false);
432 423
433 return 0; 424error_free:
425 radeon_ib_free(rdev, &ib);
434 426
435error: 427error_unreserve:
436 ttm_eu_backoff_reservation(&ticket, &head); 428 radeon_bo_unreserve(bo);
437 return r; 429 return r;
438} 430}
439 431
@@ -449,7 +441,7 @@ error:
449 * Validate and set the offset requested within the vm address space. 441 * Validate and set the offset requested within the vm address space.
450 * Returns 0 for success, error for failure. 442 * Returns 0 for success, error for failure.
451 * 443 *
452 * Object has to be reserved! 444 * Object has to be reserved and gets unreserved by this function!
453 */ 445 */
454int radeon_vm_bo_set_addr(struct radeon_device *rdev, 446int radeon_vm_bo_set_addr(struct radeon_device *rdev,
455 struct radeon_bo_va *bo_va, 447 struct radeon_bo_va *bo_va,
@@ -495,7 +487,9 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
495 tmp->vm = vm; 487 tmp->vm = vm;
496 tmp->addr = bo_va->addr; 488 tmp->addr = bo_va->addr;
497 tmp->bo = radeon_bo_ref(bo_va->bo); 489 tmp->bo = radeon_bo_ref(bo_va->bo);
490 spin_lock(&vm->status_lock);
498 list_add(&tmp->vm_status, &vm->freed); 491 list_add(&tmp->vm_status, &vm->freed);
492 spin_unlock(&vm->status_lock);
499 } 493 }
500 494
501 interval_tree_remove(&bo_va->it, &vm->va); 495 interval_tree_remove(&bo_va->it, &vm->va);
@@ -575,7 +569,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
575 } 569 }
576 570
577 mutex_unlock(&vm->mutex); 571 mutex_unlock(&vm->mutex);
578 return radeon_bo_reserve(bo_va->bo, false); 572 return 0;
579} 573}
580 574
581/** 575/**
@@ -699,17 +693,15 @@ int radeon_vm_update_page_directory(struct radeon_device *rdev,
699 if (ib.length_dw != 0) { 693 if (ib.length_dw != 0) {
700 radeon_asic_vm_pad_ib(rdev, &ib); 694 radeon_asic_vm_pad_ib(rdev, &ib);
701 695
702 radeon_semaphore_sync_resv(rdev, ib.semaphore, pd->tbo.resv, false); 696 radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true);
703 radeon_semaphore_sync_fence(ib.semaphore, vm->last_id_use);
704 WARN_ON(ib.length_dw > ndw); 697 WARN_ON(ib.length_dw > ndw);
705 r = radeon_ib_schedule(rdev, &ib, NULL, false); 698 r = radeon_ib_schedule(rdev, &ib, NULL, false);
706 if (r) { 699 if (r) {
707 radeon_ib_free(rdev, &ib); 700 radeon_ib_free(rdev, &ib);
708 return r; 701 return r;
709 } 702 }
710 radeon_fence_unref(&vm->fence); 703 ib.fence->is_vm_update = true;
711 vm->fence = radeon_fence_ref(ib.fence); 704 radeon_bo_fence(pd, ib.fence, false);
712 radeon_fence_unref(&vm->last_flush);
713 } 705 }
714 radeon_ib_free(rdev, &ib); 706 radeon_ib_free(rdev, &ib);
715 707
@@ -808,11 +800,11 @@ static void radeon_vm_frag_ptes(struct radeon_device *rdev,
808 * 800 *
809 * Global and local mutex must be locked! 801 * Global and local mutex must be locked!
810 */ 802 */
811static void radeon_vm_update_ptes(struct radeon_device *rdev, 803static int radeon_vm_update_ptes(struct radeon_device *rdev,
812 struct radeon_vm *vm, 804 struct radeon_vm *vm,
813 struct radeon_ib *ib, 805 struct radeon_ib *ib,
814 uint64_t start, uint64_t end, 806 uint64_t start, uint64_t end,
815 uint64_t dst, uint32_t flags) 807 uint64_t dst, uint32_t flags)
816{ 808{
817 uint64_t mask = RADEON_VM_PTE_COUNT - 1; 809 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
818 uint64_t last_pte = ~0, last_dst = ~0; 810 uint64_t last_pte = ~0, last_dst = ~0;
@@ -825,8 +817,12 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
825 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; 817 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
826 unsigned nptes; 818 unsigned nptes;
827 uint64_t pte; 819 uint64_t pte;
820 int r;
828 821
829 radeon_semaphore_sync_resv(rdev, ib->semaphore, pt->tbo.resv, false); 822 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true);
823 r = reservation_object_reserve_shared(pt->tbo.resv);
824 if (r)
825 return r;
830 826
831 if ((addr & ~mask) == (end & ~mask)) 827 if ((addr & ~mask) == (end & ~mask))
832 nptes = end - addr; 828 nptes = end - addr;
@@ -860,6 +856,33 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
860 last_pte + 8 * count, 856 last_pte + 8 * count,
861 last_dst, flags); 857 last_dst, flags);
862 } 858 }
859
860 return 0;
861}
862
863/**
864 * radeon_vm_fence_pts - fence page tables after an update
865 *
866 * @vm: requested vm
867 * @start: start of GPU address range
868 * @end: end of GPU address range
869 * @fence: fence to use
870 *
871 * Fence the page tables in the range @start - @end (cayman+).
872 *
873 * Global and local mutex must be locked!
874 */
875static void radeon_vm_fence_pts(struct radeon_vm *vm,
876 uint64_t start, uint64_t end,
877 struct radeon_fence *fence)
878{
879 unsigned i;
880
881 start >>= radeon_vm_block_size;
882 end >>= radeon_vm_block_size;
883
884 for (i = start; i <= end; ++i)
885 radeon_bo_fence(vm->page_tables[i].bo, fence, true);
863} 886}
864 887
865/** 888/**
@@ -892,7 +915,9 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
892 return -EINVAL; 915 return -EINVAL;
893 } 916 }
894 917
918 spin_lock(&vm->status_lock);
895 list_del_init(&bo_va->vm_status); 919 list_del_init(&bo_va->vm_status);
920 spin_unlock(&vm->status_lock);
896 921
897 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 922 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
898 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 923 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
@@ -961,23 +986,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
961 return r; 986 return r;
962 ib.length_dw = 0; 987 ib.length_dw = 0;
963 988
964 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, 989 if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) {
965 bo_va->it.last + 1, addr, 990 unsigned i;
966 radeon_vm_page_flags(bo_va->flags)); 991
992 for (i = 0; i < RADEON_NUM_RINGS; ++i)
993 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use);
994 }
995
996 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
997 bo_va->it.last + 1, addr,
998 radeon_vm_page_flags(bo_va->flags));
999 if (r) {
1000 radeon_ib_free(rdev, &ib);
1001 return r;
1002 }
967 1003
968 radeon_asic_vm_pad_ib(rdev, &ib); 1004 radeon_asic_vm_pad_ib(rdev, &ib);
969 WARN_ON(ib.length_dw > ndw); 1005 WARN_ON(ib.length_dw > ndw);
970 1006
971 radeon_semaphore_sync_fence(ib.semaphore, vm->fence);
972 r = radeon_ib_schedule(rdev, &ib, NULL, false); 1007 r = radeon_ib_schedule(rdev, &ib, NULL, false);
973 if (r) { 1008 if (r) {
974 radeon_ib_free(rdev, &ib); 1009 radeon_ib_free(rdev, &ib);
975 return r; 1010 return r;
976 } 1011 }
977 radeon_fence_unref(&vm->fence); 1012 ib.fence->is_vm_update = true;
978 vm->fence = radeon_fence_ref(ib.fence); 1013 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence);
1014 radeon_fence_unref(&bo_va->last_pt_update);
1015 bo_va->last_pt_update = radeon_fence_ref(ib.fence);
979 radeon_ib_free(rdev, &ib); 1016 radeon_ib_free(rdev, &ib);
980 radeon_fence_unref(&vm->last_flush);
981 1017
982 return 0; 1018 return 0;
983} 1019}
@@ -996,16 +1032,25 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
996int radeon_vm_clear_freed(struct radeon_device *rdev, 1032int radeon_vm_clear_freed(struct radeon_device *rdev,
997 struct radeon_vm *vm) 1033 struct radeon_vm *vm)
998{ 1034{
999 struct radeon_bo_va *bo_va, *tmp; 1035 struct radeon_bo_va *bo_va;
1000 int r; 1036 int r;
1001 1037
1002 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { 1038 spin_lock(&vm->status_lock);
1039 while (!list_empty(&vm->freed)) {
1040 bo_va = list_first_entry(&vm->freed,
1041 struct radeon_bo_va, vm_status);
1042 spin_unlock(&vm->status_lock);
1043
1003 r = radeon_vm_bo_update(rdev, bo_va, NULL); 1044 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1004 radeon_bo_unref(&bo_va->bo); 1045 radeon_bo_unref(&bo_va->bo);
1046 radeon_fence_unref(&bo_va->last_pt_update);
1005 kfree(bo_va); 1047 kfree(bo_va);
1006 if (r) 1048 if (r)
1007 return r; 1049 return r;
1050
1051 spin_lock(&vm->status_lock);
1008 } 1052 }
1053 spin_unlock(&vm->status_lock);
1009 return 0; 1054 return 0;
1010 1055
1011} 1056}
@@ -1024,14 +1069,23 @@ int radeon_vm_clear_freed(struct radeon_device *rdev,
1024int radeon_vm_clear_invalids(struct radeon_device *rdev, 1069int radeon_vm_clear_invalids(struct radeon_device *rdev,
1025 struct radeon_vm *vm) 1070 struct radeon_vm *vm)
1026{ 1071{
1027 struct radeon_bo_va *bo_va, *tmp; 1072 struct radeon_bo_va *bo_va;
1028 int r; 1073 int r;
1029 1074
1030 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) { 1075 spin_lock(&vm->status_lock);
1076 while (!list_empty(&vm->invalidated)) {
1077 bo_va = list_first_entry(&vm->invalidated,
1078 struct radeon_bo_va, vm_status);
1079 spin_unlock(&vm->status_lock);
1080
1031 r = radeon_vm_bo_update(rdev, bo_va, NULL); 1081 r = radeon_vm_bo_update(rdev, bo_va, NULL);
1032 if (r) 1082 if (r)
1033 return r; 1083 return r;
1084
1085 spin_lock(&vm->status_lock);
1034 } 1086 }
1087 spin_unlock(&vm->status_lock);
1088
1035 return 0; 1089 return 0;
1036} 1090}
1037 1091
@@ -1054,14 +1108,17 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
1054 1108
1055 mutex_lock(&vm->mutex); 1109 mutex_lock(&vm->mutex);
1056 interval_tree_remove(&bo_va->it, &vm->va); 1110 interval_tree_remove(&bo_va->it, &vm->va);
1111 spin_lock(&vm->status_lock);
1057 list_del(&bo_va->vm_status); 1112 list_del(&bo_va->vm_status);
1058 1113
1059 if (bo_va->addr) { 1114 if (bo_va->addr) {
1060 bo_va->bo = radeon_bo_ref(bo_va->bo); 1115 bo_va->bo = radeon_bo_ref(bo_va->bo);
1061 list_add(&bo_va->vm_status, &vm->freed); 1116 list_add(&bo_va->vm_status, &vm->freed);
1062 } else { 1117 } else {
1118 radeon_fence_unref(&bo_va->last_pt_update);
1063 kfree(bo_va); 1119 kfree(bo_va);
1064 } 1120 }
1121 spin_unlock(&vm->status_lock);
1065 1122
1066 mutex_unlock(&vm->mutex); 1123 mutex_unlock(&vm->mutex);
1067} 1124}
@@ -1082,10 +1139,10 @@ void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1082 1139
1083 list_for_each_entry(bo_va, &bo->va, bo_list) { 1140 list_for_each_entry(bo_va, &bo->va, bo_list) {
1084 if (bo_va->addr) { 1141 if (bo_va->addr) {
1085 mutex_lock(&bo_va->vm->mutex); 1142 spin_lock(&bo_va->vm->status_lock);
1086 list_del(&bo_va->vm_status); 1143 list_del(&bo_va->vm_status);
1087 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1144 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1088 mutex_unlock(&bo_va->vm->mutex); 1145 spin_unlock(&bo_va->vm->status_lock);
1089 } 1146 }
1090 } 1147 }
1091} 1148}
@@ -1103,15 +1160,17 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1103 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, 1160 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1104 RADEON_VM_PTE_COUNT * 8); 1161 RADEON_VM_PTE_COUNT * 8);
1105 unsigned pd_size, pd_entries, pts_size; 1162 unsigned pd_size, pd_entries, pts_size;
1106 int r; 1163 int i, r;
1107 1164
1108 vm->id = 0;
1109 vm->ib_bo_va = NULL; 1165 vm->ib_bo_va = NULL;
1110 vm->fence = NULL; 1166 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1111 vm->last_flush = NULL; 1167 vm->ids[i].id = 0;
1112 vm->last_id_use = NULL; 1168 vm->ids[i].flushed_updates = NULL;
1169 vm->ids[i].last_id_use = NULL;
1170 }
1113 mutex_init(&vm->mutex); 1171 mutex_init(&vm->mutex);
1114 vm->va = RB_ROOT; 1172 vm->va = RB_ROOT;
1173 spin_lock_init(&vm->status_lock);
1115 INIT_LIST_HEAD(&vm->invalidated); 1174 INIT_LIST_HEAD(&vm->invalidated);
1116 INIT_LIST_HEAD(&vm->freed); 1175 INIT_LIST_HEAD(&vm->freed);
1117 1176
@@ -1165,11 +1224,13 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1165 if (!r) { 1224 if (!r) {
1166 list_del_init(&bo_va->bo_list); 1225 list_del_init(&bo_va->bo_list);
1167 radeon_bo_unreserve(bo_va->bo); 1226 radeon_bo_unreserve(bo_va->bo);
1227 radeon_fence_unref(&bo_va->last_pt_update);
1168 kfree(bo_va); 1228 kfree(bo_va);
1169 } 1229 }
1170 } 1230 }
1171 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { 1231 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
1172 radeon_bo_unref(&bo_va->bo); 1232 radeon_bo_unref(&bo_va->bo);
1233 radeon_fence_unref(&bo_va->last_pt_update);
1173 kfree(bo_va); 1234 kfree(bo_va);
1174 } 1235 }
1175 1236
@@ -1179,9 +1240,10 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1179 1240
1180 radeon_bo_unref(&vm->page_directory); 1241 radeon_bo_unref(&vm->page_directory);
1181 1242
1182 radeon_fence_unref(&vm->fence); 1243 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1183 radeon_fence_unref(&vm->last_flush); 1244 radeon_fence_unref(&vm->ids[i].flushed_updates);
1184 radeon_fence_unref(&vm->last_id_use); 1245 radeon_fence_unref(&vm->ids[i].last_id_use);
1246 }
1185 1247
1186 mutex_destroy(&vm->mutex); 1248 mutex_destroy(&vm->mutex);
1187} 1249}
diff --git a/drivers/gpu/drm/radeon/rv770_dma.c b/drivers/gpu/drm/radeon/rv770_dma.c
index 7f34bad2e724..acff6e09cc40 100644
--- a/drivers/gpu/drm/radeon/rv770_dma.c
+++ b/drivers/gpu/drm/radeon/rv770_dma.c
@@ -44,31 +44,27 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
44 unsigned num_gpu_pages, 44 unsigned num_gpu_pages,
45 struct reservation_object *resv) 45 struct reservation_object *resv)
46{ 46{
47 struct radeon_semaphore *sem = NULL;
48 struct radeon_fence *fence; 47 struct radeon_fence *fence;
48 struct radeon_sync sync;
49 int ring_index = rdev->asic->copy.dma_ring_index; 49 int ring_index = rdev->asic->copy.dma_ring_index;
50 struct radeon_ring *ring = &rdev->ring[ring_index]; 50 struct radeon_ring *ring = &rdev->ring[ring_index];
51 u32 size_in_dw, cur_size_in_dw; 51 u32 size_in_dw, cur_size_in_dw;
52 int i, num_loops; 52 int i, num_loops;
53 int r = 0; 53 int r = 0;
54 54
55 r = radeon_semaphore_create(rdev, &sem); 55 radeon_sync_create(&sync);
56 if (r) {
57 DRM_ERROR("radeon: moving bo (%d).\n", r);
58 return ERR_PTR(r);
59 }
60 56
61 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4; 57 size_in_dw = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT) / 4;
62 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF); 58 num_loops = DIV_ROUND_UP(size_in_dw, 0xFFFF);
63 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); 59 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8);
64 if (r) { 60 if (r) {
65 DRM_ERROR("radeon: moving bo (%d).\n", r); 61 DRM_ERROR("radeon: moving bo (%d).\n", r);
66 radeon_semaphore_free(rdev, &sem, NULL); 62 radeon_sync_free(rdev, &sync, NULL);
67 return ERR_PTR(r); 63 return ERR_PTR(r);
68 } 64 }
69 65
70 radeon_semaphore_sync_resv(rdev, sem, resv, false); 66 radeon_sync_resv(rdev, &sync, resv, false);
71 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 67 radeon_sync_rings(rdev, &sync, ring->idx);
72 68
73 for (i = 0; i < num_loops; i++) { 69 for (i = 0; i < num_loops; i++) {
74 cur_size_in_dw = size_in_dw; 70 cur_size_in_dw = size_in_dw;
@@ -87,12 +83,12 @@ struct radeon_fence *rv770_copy_dma(struct radeon_device *rdev,
87 r = radeon_fence_emit(rdev, &fence, ring->idx); 83 r = radeon_fence_emit(rdev, &fence, ring->idx);
88 if (r) { 84 if (r) {
89 radeon_ring_unlock_undo(rdev, ring); 85 radeon_ring_unlock_undo(rdev, ring);
90 radeon_semaphore_free(rdev, &sem, NULL); 86 radeon_sync_free(rdev, &sync, NULL);
91 return ERR_PTR(r); 87 return ERR_PTR(r);
92 } 88 }
93 89
94 radeon_ring_unlock_commit(rdev, ring, false); 90 radeon_ring_unlock_commit(rdev, ring, false);
95 radeon_semaphore_free(rdev, &sem, fence); 91 radeon_sync_free(rdev, &sync, fence);
96 92
97 return fence; 93 return fence;
98} 94}
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 7d5083dc4acb..60df444bd075 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -3365,6 +3365,7 @@ void si_fence_ring_emit(struct radeon_device *rdev,
3365void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib) 3365void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3366{ 3366{
3367 struct radeon_ring *ring = &rdev->ring[ib->ring]; 3367 struct radeon_ring *ring = &rdev->ring[ib->ring];
3368 unsigned vm_id = ib->vm ? ib->vm->ids[ib->ring].id : 0;
3368 u32 header; 3369 u32 header;
3369 3370
3370 if (ib->is_const_ib) { 3371 if (ib->is_const_ib) {
@@ -3400,14 +3401,13 @@ void si_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
3400#endif 3401#endif
3401 (ib->gpu_addr & 0xFFFFFFFC)); 3402 (ib->gpu_addr & 0xFFFFFFFC));
3402 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF); 3403 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3403 radeon_ring_write(ring, ib->length_dw | 3404 radeon_ring_write(ring, ib->length_dw | (vm_id << 24));
3404 (ib->vm ? (ib->vm->id << 24) : 0));
3405 3405
3406 if (!ib->is_const_ib) { 3406 if (!ib->is_const_ib) {
3407 /* flush read cache over gart for this vmid */ 3407 /* flush read cache over gart for this vmid */
3408 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 3408 radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
3409 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2); 3409 radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
3410 radeon_ring_write(ring, ib->vm ? ib->vm->id : 0); 3410 radeon_ring_write(ring, vm_id);
3411 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3)); 3411 radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
3412 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA | 3412 radeon_ring_write(ring, PACKET3_TCL1_ACTION_ENA |
3413 PACKET3_TC_ACTION_ENA | 3413 PACKET3_TC_ACTION_ENA |
@@ -5023,27 +5023,23 @@ static void si_vm_decode_fault(struct radeon_device *rdev,
5023 block, mc_id); 5023 block, mc_id);
5024} 5024}
5025 5025
5026void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 5026void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
5027 unsigned vm_id, uint64_t pd_addr)
5027{ 5028{
5028 struct radeon_ring *ring = &rdev->ring[ridx];
5029
5030 if (vm == NULL)
5031 return;
5032
5033 /* write new base address */ 5029 /* write new base address */
5034 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5030 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
5035 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | 5031 radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) |
5036 WRITE_DATA_DST_SEL(0))); 5032 WRITE_DATA_DST_SEL(0)));
5037 5033
5038 if (vm->id < 8) { 5034 if (vm_id < 8) {
5039 radeon_ring_write(ring, 5035 radeon_ring_write(ring,
5040 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2); 5036 (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
5041 } else { 5037 } else {
5042 radeon_ring_write(ring, 5038 radeon_ring_write(ring,
5043 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2); 5039 (VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2);
5044 } 5040 }
5045 radeon_ring_write(ring, 0); 5041 radeon_ring_write(ring, 0);
5046 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 5042 radeon_ring_write(ring, pd_addr >> 12);
5047 5043
5048 /* flush hdp cache */ 5044 /* flush hdp cache */
5049 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); 5045 radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
@@ -5059,7 +5055,7 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
5059 WRITE_DATA_DST_SEL(0))); 5055 WRITE_DATA_DST_SEL(0)));
5060 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); 5056 radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
5061 radeon_ring_write(ring, 0); 5057 radeon_ring_write(ring, 0);
5062 radeon_ring_write(ring, 1 << vm->id); 5058 radeon_ring_write(ring, 1 << vm_id);
5063 5059
5064 /* sync PFP to ME, otherwise we might get invalid PFP reads */ 5060 /* sync PFP to ME, otherwise we might get invalid PFP reads */
5065 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); 5061 radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index b58f12b762d7..f5cc777e1c5f 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -185,20 +185,17 @@ void si_dma_vm_set_pages(struct radeon_device *rdev,
185 } 185 }
186} 186}
187 187
188void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm) 188void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
189{ 189 unsigned vm_id, uint64_t pd_addr)
190 struct radeon_ring *ring = &rdev->ring[ridx];
191
192 if (vm == NULL)
193 return;
194 190
191{
195 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 192 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
196 if (vm->id < 8) { 193 if (vm_id < 8) {
197 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2)) >> 2)); 194 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2));
198 } else { 195 } else {
199 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm->id - 8) << 2)) >> 2)); 196 radeon_ring_write(ring, (0xf << 16) | ((VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + ((vm_id - 8) << 2)) >> 2));
200 } 197 }
201 radeon_ring_write(ring, vm->pd_gpu_addr >> 12); 198 radeon_ring_write(ring, pd_addr >> 12);
202 199
203 /* flush hdp cache */ 200 /* flush hdp cache */
204 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 201 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
@@ -208,7 +205,7 @@ void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
208 /* bits 0-7 are the VM contexts0-7 */ 205 /* bits 0-7 are the VM contexts0-7 */
209 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); 206 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
210 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); 207 radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
211 radeon_ring_write(ring, 1 << vm->id); 208 radeon_ring_write(ring, 1 << vm_id);
212} 209}
213 210
214/** 211/**
@@ -229,31 +226,27 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
229 unsigned num_gpu_pages, 226 unsigned num_gpu_pages,
230 struct reservation_object *resv) 227 struct reservation_object *resv)
231{ 228{
232 struct radeon_semaphore *sem = NULL;
233 struct radeon_fence *fence; 229 struct radeon_fence *fence;
230 struct radeon_sync sync;
234 int ring_index = rdev->asic->copy.dma_ring_index; 231 int ring_index = rdev->asic->copy.dma_ring_index;
235 struct radeon_ring *ring = &rdev->ring[ring_index]; 232 struct radeon_ring *ring = &rdev->ring[ring_index];
236 u32 size_in_bytes, cur_size_in_bytes; 233 u32 size_in_bytes, cur_size_in_bytes;
237 int i, num_loops; 234 int i, num_loops;
238 int r = 0; 235 int r = 0;
239 236
240 r = radeon_semaphore_create(rdev, &sem); 237 radeon_sync_create(&sync);
241 if (r) {
242 DRM_ERROR("radeon: moving bo (%d).\n", r);
243 return ERR_PTR(r);
244 }
245 238
246 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); 239 size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT);
247 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff); 240 num_loops = DIV_ROUND_UP(size_in_bytes, 0xfffff);
248 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11); 241 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 11);
249 if (r) { 242 if (r) {
250 DRM_ERROR("radeon: moving bo (%d).\n", r); 243 DRM_ERROR("radeon: moving bo (%d).\n", r);
251 radeon_semaphore_free(rdev, &sem, NULL); 244 radeon_sync_free(rdev, &sync, NULL);
252 return ERR_PTR(r); 245 return ERR_PTR(r);
253 } 246 }
254 247
255 radeon_semaphore_sync_resv(rdev, sem, resv, false); 248 radeon_sync_resv(rdev, &sync, resv, false);
256 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 249 radeon_sync_rings(rdev, &sync, ring->idx);
257 250
258 for (i = 0; i < num_loops; i++) { 251 for (i = 0; i < num_loops; i++) {
259 cur_size_in_bytes = size_in_bytes; 252 cur_size_in_bytes = size_in_bytes;
@@ -272,12 +265,12 @@ struct radeon_fence *si_copy_dma(struct radeon_device *rdev,
272 r = radeon_fence_emit(rdev, &fence, ring->idx); 265 r = radeon_fence_emit(rdev, &fence, ring->idx);
273 if (r) { 266 if (r) {
274 radeon_ring_unlock_undo(rdev, ring); 267 radeon_ring_unlock_undo(rdev, ring);
275 radeon_semaphore_free(rdev, &sem, NULL); 268 radeon_sync_free(rdev, &sync, NULL);
276 return ERR_PTR(r); 269 return ERR_PTR(r);
277 } 270 }
278 271
279 radeon_ring_unlock_commit(rdev, ring, false); 272 radeon_ring_unlock_commit(rdev, ring, false);
280 radeon_semaphore_free(rdev, &sem, fence); 273 radeon_sync_free(rdev, &sync, fence);
281 274
282 return fence; 275 return fence;
283} 276}
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 676e6c2ba90a..32e354b8b0ab 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3398,6 +3398,15 @@ static int si_process_firmware_header(struct radeon_device *rdev)
3398 3398
3399 ret = si_read_smc_sram_dword(rdev, 3399 ret = si_read_smc_sram_dword(rdev,
3400 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + 3400 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3401 SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
3402 &tmp, si_pi->sram_end);
3403 if (ret)
3404 return ret;
3405
3406 si_pi->fan_table_start = tmp;
3407
3408 ret = si_read_smc_sram_dword(rdev,
3409 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3401 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, 3410 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
3402 &tmp, si_pi->sram_end); 3411 &tmp, si_pi->sram_end);
3403 if (ret) 3412 if (ret)
@@ -5817,8 +5826,33 @@ void si_dpm_setup_asic(struct radeon_device *rdev)
5817 si_enable_acpi_power_management(rdev); 5826 si_enable_acpi_power_management(rdev);
5818} 5827}
5819 5828
5820static int si_set_thermal_temperature_range(struct radeon_device *rdev, 5829static int si_thermal_enable_alert(struct radeon_device *rdev,
5821 int min_temp, int max_temp) 5830 bool enable)
5831{
5832 u32 thermal_int = RREG32(CG_THERMAL_INT);
5833
5834 if (enable) {
5835 PPSMC_Result result;
5836
5837 thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
5838 WREG32(CG_THERMAL_INT, thermal_int);
5839 rdev->irq.dpm_thermal = false;
5840 result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
5841 if (result != PPSMC_Result_OK) {
5842 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
5843 return -EINVAL;
5844 }
5845 } else {
5846 thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
5847 WREG32(CG_THERMAL_INT, thermal_int);
5848 rdev->irq.dpm_thermal = true;
5849 }
5850
5851 return 0;
5852}
5853
5854static int si_thermal_set_temperature_range(struct radeon_device *rdev,
5855 int min_temp, int max_temp)
5822{ 5856{
5823 int low_temp = 0 * 1000; 5857 int low_temp = 0 * 1000;
5824 int high_temp = 255 * 1000; 5858 int high_temp = 255 * 1000;
@@ -5842,6 +5876,309 @@ static int si_set_thermal_temperature_range(struct radeon_device *rdev,
5842 return 0; 5876 return 0;
5843} 5877}
5844 5878
5879static void si_fan_ctrl_set_static_mode(struct radeon_device *rdev, u32 mode)
5880{
5881 struct si_power_info *si_pi = si_get_pi(rdev);
5882 u32 tmp;
5883
5884 if (si_pi->fan_ctrl_is_in_default_mode) {
5885 tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
5886 si_pi->fan_ctrl_default_mode = tmp;
5887 tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
5888 si_pi->t_min = tmp;
5889 si_pi->fan_ctrl_is_in_default_mode = false;
5890 }
5891
5892 tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
5893 tmp |= TMIN(0);
5894 WREG32(CG_FDO_CTRL2, tmp);
5895
5896 tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
5897 tmp |= FDO_PWM_MODE(mode);
5898 WREG32(CG_FDO_CTRL2, tmp);
5899}
5900
5901static int si_thermal_setup_fan_table(struct radeon_device *rdev)
5902{
5903 struct si_power_info *si_pi = si_get_pi(rdev);
5904 PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
5905 u32 duty100;
5906 u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
5907 u16 fdo_min, slope1, slope2;
5908 u32 reference_clock, tmp;
5909 int ret;
5910 u64 tmp64;
5911
5912 if (!si_pi->fan_table_start) {
5913 rdev->pm.dpm.fan.ucode_fan_control = false;
5914 return 0;
5915 }
5916
5917 duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
5918
5919 if (duty100 == 0) {
5920 rdev->pm.dpm.fan.ucode_fan_control = false;
5921 return 0;
5922 }
5923
5924 tmp64 = (u64)rdev->pm.dpm.fan.pwm_min * duty100;
5925 do_div(tmp64, 10000);
5926 fdo_min = (u16)tmp64;
5927
5928 t_diff1 = rdev->pm.dpm.fan.t_med - rdev->pm.dpm.fan.t_min;
5929 t_diff2 = rdev->pm.dpm.fan.t_high - rdev->pm.dpm.fan.t_med;
5930
5931 pwm_diff1 = rdev->pm.dpm.fan.pwm_med - rdev->pm.dpm.fan.pwm_min;
5932 pwm_diff2 = rdev->pm.dpm.fan.pwm_high - rdev->pm.dpm.fan.pwm_med;
5933
5934 slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
5935 slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
5936
5937 fan_table.slope1 = cpu_to_be16(slope1);
5938 fan_table.slope2 = cpu_to_be16(slope2);
5939
5940 fan_table.fdo_min = cpu_to_be16(fdo_min);
5941
5942 fan_table.hys_down = cpu_to_be16(rdev->pm.dpm.fan.t_hyst);
5943
5944 fan_table.hys_up = cpu_to_be16(1);
5945
5946 fan_table.hys_slope = cpu_to_be16(1);
5947
5948 fan_table.temp_resp_lim = cpu_to_be16(5);
5949
5950 reference_clock = radeon_get_xclk(rdev);
5951
5952 fan_table.refresh_period = cpu_to_be32((rdev->pm.dpm.fan.cycle_delay *
5953 reference_clock) / 1600);
5954
5955 fan_table.fdo_max = cpu_to_be16((u16)duty100);
5956
5957 tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
5958 fan_table.temp_src = (uint8_t)tmp;
5959
5960 ret = si_copy_bytes_to_smc(rdev,
5961 si_pi->fan_table_start,
5962 (u8 *)(&fan_table),
5963 sizeof(fan_table),
5964 si_pi->sram_end);
5965
5966 if (ret) {
5967 DRM_ERROR("Failed to load fan table to the SMC.");
5968 rdev->pm.dpm.fan.ucode_fan_control = false;
5969 }
5970
5971 return 0;
5972}
5973
5974static int si_fan_ctrl_start_smc_fan_control(struct radeon_device *rdev)
5975{
5976 PPSMC_Result ret;
5977
5978 ret = si_send_msg_to_smc(rdev, PPSMC_StartFanControl);
5979 if (ret == PPSMC_Result_OK)
5980 return 0;
5981 else
5982 return -EINVAL;
5983}
5984
5985static int si_fan_ctrl_stop_smc_fan_control(struct radeon_device *rdev)
5986{
5987 PPSMC_Result ret;
5988
5989 ret = si_send_msg_to_smc(rdev, PPSMC_StopFanControl);
5990 if (ret == PPSMC_Result_OK)
5991 return 0;
5992 else
5993 return -EINVAL;
5994}
5995
5996#if 0
5997static int si_fan_ctrl_get_fan_speed_percent(struct radeon_device *rdev,
5998 u32 *speed)
5999{
6000 u32 duty, duty100;
6001 u64 tmp64;
6002
6003 if (rdev->pm.no_fan)
6004 return -ENOENT;
6005
6006 duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6007 duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
6008
6009 if (duty100 == 0)
6010 return -EINVAL;
6011
6012 tmp64 = (u64)duty * 100;
6013 do_div(tmp64, duty100);
6014 *speed = (u32)tmp64;
6015
6016 if (*speed > 100)
6017 *speed = 100;
6018
6019 return 0;
6020}
6021
6022static int si_fan_ctrl_set_fan_speed_percent(struct radeon_device *rdev,
6023 u32 speed)
6024{
6025 u32 tmp;
6026 u32 duty, duty100;
6027 u64 tmp64;
6028
6029 if (rdev->pm.no_fan)
6030 return -ENOENT;
6031
6032 if (speed > 100)
6033 return -EINVAL;
6034
6035 if (rdev->pm.dpm.fan.ucode_fan_control)
6036 si_fan_ctrl_stop_smc_fan_control(rdev);
6037
6038 duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6039
6040 if (duty100 == 0)
6041 return -EINVAL;
6042
6043 tmp64 = (u64)speed * duty100;
6044 do_div(tmp64, 100);
6045 duty = (u32)tmp64;
6046
6047 tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
6048 tmp |= FDO_STATIC_DUTY(duty);
6049 WREG32(CG_FDO_CTRL0, tmp);
6050
6051 si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
6052
6053 return 0;
6054}
6055
6056static int si_fan_ctrl_get_fan_speed_rpm(struct radeon_device *rdev,
6057 u32 *speed)
6058{
6059 u32 tach_period;
6060 u32 xclk = radeon_get_xclk(rdev);
6061
6062 if (rdev->pm.no_fan)
6063 return -ENOENT;
6064
6065 if (rdev->pm.fan_pulses_per_revolution == 0)
6066 return -ENOENT;
6067
6068 tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
6069 if (tach_period == 0)
6070 return -ENOENT;
6071
6072 *speed = 60 * xclk * 10000 / tach_period;
6073
6074 return 0;
6075}
6076
6077static int si_fan_ctrl_set_fan_speed_rpm(struct radeon_device *rdev,
6078 u32 speed)
6079{
6080 u32 tach_period, tmp;
6081 u32 xclk = radeon_get_xclk(rdev);
6082
6083 if (rdev->pm.no_fan)
6084 return -ENOENT;
6085
6086 if (rdev->pm.fan_pulses_per_revolution == 0)
6087 return -ENOENT;
6088
6089 if ((speed < rdev->pm.fan_min_rpm) ||
6090 (speed > rdev->pm.fan_max_rpm))
6091 return -EINVAL;
6092
6093 if (rdev->pm.dpm.fan.ucode_fan_control)
6094 si_fan_ctrl_stop_smc_fan_control(rdev);
6095
6096 tach_period = 60 * xclk * 10000 / (8 * speed);
6097 tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
6098 tmp |= TARGET_PERIOD(tach_period);
6099 WREG32(CG_TACH_CTRL, tmp);
6100
6101 si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC_RPM);
6102
6103 return 0;
6104}
6105#endif
6106
6107static void si_fan_ctrl_set_default_mode(struct radeon_device *rdev)
6108{
6109 struct si_power_info *si_pi = si_get_pi(rdev);
6110 u32 tmp;
6111
6112 if (!si_pi->fan_ctrl_is_in_default_mode) {
6113 tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
6114 tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
6115 WREG32(CG_FDO_CTRL2, tmp);
6116
6117 tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
6118 tmp |= TMIN(si_pi->t_min);
6119 WREG32(CG_FDO_CTRL2, tmp);
6120 si_pi->fan_ctrl_is_in_default_mode = true;
6121 }
6122}
6123
6124static void si_thermal_start_smc_fan_control(struct radeon_device *rdev)
6125{
6126 if (rdev->pm.dpm.fan.ucode_fan_control) {
6127 si_fan_ctrl_start_smc_fan_control(rdev);
6128 si_fan_ctrl_set_static_mode(rdev, FDO_PWM_MODE_STATIC);
6129 }
6130}
6131
6132static void si_thermal_initialize(struct radeon_device *rdev)
6133{
6134 u32 tmp;
6135
6136 if (rdev->pm.fan_pulses_per_revolution) {
6137 tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
6138 tmp |= EDGE_PER_REV(rdev->pm.fan_pulses_per_revolution -1);
6139 WREG32(CG_TACH_CTRL, tmp);
6140 }
6141
6142 tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
6143 tmp |= TACH_PWM_RESP_RATE(0x28);
6144 WREG32(CG_FDO_CTRL2, tmp);
6145}
6146
6147static int si_thermal_start_thermal_controller(struct radeon_device *rdev)
6148{
6149 int ret;
6150
6151 si_thermal_initialize(rdev);
6152 ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6153 if (ret)
6154 return ret;
6155 ret = si_thermal_enable_alert(rdev, true);
6156 if (ret)
6157 return ret;
6158 if (rdev->pm.dpm.fan.ucode_fan_control) {
6159 ret = si_halt_smc(rdev);
6160 if (ret)
6161 return ret;
6162 ret = si_thermal_setup_fan_table(rdev);
6163 if (ret)
6164 return ret;
6165 ret = si_resume_smc(rdev);
6166 if (ret)
6167 return ret;
6168 si_thermal_start_smc_fan_control(rdev);
6169 }
6170
6171 return 0;
6172}
6173
6174static void si_thermal_stop_thermal_controller(struct radeon_device *rdev)
6175{
6176 if (!rdev->pm.no_fan) {
6177 si_fan_ctrl_set_default_mode(rdev);
6178 si_fan_ctrl_stop_smc_fan_control(rdev);
6179 }
6180}
6181
5845int si_dpm_enable(struct radeon_device *rdev) 6182int si_dpm_enable(struct radeon_device *rdev)
5846{ 6183{
5847 struct rv7xx_power_info *pi = rv770_get_pi(rdev); 6184 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
@@ -5954,31 +6291,39 @@ int si_dpm_enable(struct radeon_device *rdev)
5954 6291
5955 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true); 6292 si_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5956 6293
6294 si_thermal_start_thermal_controller(rdev);
6295
5957 ni_update_current_ps(rdev, boot_ps); 6296 ni_update_current_ps(rdev, boot_ps);
5958 6297
5959 return 0; 6298 return 0;
5960} 6299}
5961 6300
5962int si_dpm_late_enable(struct radeon_device *rdev) 6301static int si_set_temperature_range(struct radeon_device *rdev)
5963{ 6302{
5964 int ret; 6303 int ret;
5965 6304
5966 if (rdev->irq.installed && 6305 ret = si_thermal_enable_alert(rdev, false);
5967 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) { 6306 if (ret)
5968 PPSMC_Result result; 6307 return ret;
6308 ret = si_thermal_set_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6309 if (ret)
6310 return ret;
6311 ret = si_thermal_enable_alert(rdev, true);
6312 if (ret)
6313 return ret;
5969 6314
5970 ret = si_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); 6315 return ret;
5971 if (ret) 6316}
5972 return ret;
5973 rdev->irq.dpm_thermal = true;
5974 radeon_irq_set(rdev);
5975 result = si_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
5976 6317
5977 if (result != PPSMC_Result_OK) 6318int si_dpm_late_enable(struct radeon_device *rdev)
5978 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); 6319{
5979 } 6320 int ret;
5980 6321
5981 return 0; 6322 ret = si_set_temperature_range(rdev);
6323 if (ret)
6324 return ret;
6325
6326 return ret;
5982} 6327}
5983 6328
5984void si_dpm_disable(struct radeon_device *rdev) 6329void si_dpm_disable(struct radeon_device *rdev)
@@ -5988,6 +6333,7 @@ void si_dpm_disable(struct radeon_device *rdev)
5988 6333
5989 if (!si_is_smc_running(rdev)) 6334 if (!si_is_smc_running(rdev))
5990 return; 6335 return;
6336 si_thermal_stop_thermal_controller(rdev);
5991 si_disable_ulv(rdev); 6337 si_disable_ulv(rdev);
5992 si_clear_vc(rdev); 6338 si_clear_vc(rdev);
5993 if (pi->thermal_protection) 6339 if (pi->thermal_protection)
@@ -6526,6 +6872,9 @@ int si_dpm_init(struct radeon_device *rdev)
6526 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc = 6872 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6527 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; 6873 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6528 6874
6875 si_pi->fan_ctrl_is_in_default_mode = true;
6876 rdev->pm.dpm.fan.ucode_fan_control = false;
6877
6529 return 0; 6878 return 0;
6530} 6879}
6531 6880
diff --git a/drivers/gpu/drm/radeon/si_dpm.h b/drivers/gpu/drm/radeon/si_dpm.h
index 8b5c06a0832d..d16bb1b5f10f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.h
+++ b/drivers/gpu/drm/radeon/si_dpm.h
@@ -182,6 +182,7 @@ struct si_power_info {
182 u32 dte_table_start; 182 u32 dte_table_start;
183 u32 spll_table_start; 183 u32 spll_table_start;
184 u32 papm_cfg_table_start; 184 u32 papm_cfg_table_start;
185 u32 fan_table_start;
185 /* CAC stuff */ 186 /* CAC stuff */
186 const struct si_cac_config_reg *cac_weights; 187 const struct si_cac_config_reg *cac_weights;
187 const struct si_cac_config_reg *lcac_config; 188 const struct si_cac_config_reg *lcac_config;
@@ -197,6 +198,10 @@ struct si_power_info {
197 /* SVI2 */ 198 /* SVI2 */
198 u8 svd_gpio_id; 199 u8 svd_gpio_id;
199 u8 svc_gpio_id; 200 u8 svc_gpio_id;
201 /* fan control */
202 bool fan_ctrl_is_in_default_mode;
203 u32 t_min;
204 u32 fan_ctrl_default_mode;
200}; 205};
201 206
202#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 207#define SISLANDS_INITIAL_STATE_ARB_INDEX 0
diff --git a/drivers/gpu/drm/radeon/si_smc.c b/drivers/gpu/drm/radeon/si_smc.c
index 73dbc79c959d..e5bb92f16775 100644
--- a/drivers/gpu/drm/radeon/si_smc.c
+++ b/drivers/gpu/drm/radeon/si_smc.c
@@ -135,7 +135,7 @@ void si_reset_smc(struct radeon_device *rdev)
135 135
136int si_program_jump_on_start(struct radeon_device *rdev) 136int si_program_jump_on_start(struct radeon_device *rdev)
137{ 137{
138 static u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; 138 static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 };
139 139
140 return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1); 140 return si_copy_bytes_to_smc(rdev, 0x0, data, 4, sizeof(data)+1);
141} 141}
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 6635da9ec986..4069be89e585 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -180,7 +180,10 @@
180#define DIG_THERM_DPM(x) ((x) << 14) 180#define DIG_THERM_DPM(x) ((x) << 14)
181#define DIG_THERM_DPM_MASK 0x003FC000 181#define DIG_THERM_DPM_MASK 0x003FC000
182#define DIG_THERM_DPM_SHIFT 14 182#define DIG_THERM_DPM_SHIFT 14
183 183#define CG_THERMAL_STATUS 0x704
184#define FDO_PWM_DUTY(x) ((x) << 9)
185#define FDO_PWM_DUTY_MASK (0xff << 9)
186#define FDO_PWM_DUTY_SHIFT 9
184#define CG_THERMAL_INT 0x708 187#define CG_THERMAL_INT 0x708
185#define DIG_THERM_INTH(x) ((x) << 8) 188#define DIG_THERM_INTH(x) ((x) << 8)
186#define DIG_THERM_INTH_MASK 0x0000FF00 189#define DIG_THERM_INTH_MASK 0x0000FF00
@@ -191,6 +194,10 @@
191#define THERM_INT_MASK_HIGH (1 << 24) 194#define THERM_INT_MASK_HIGH (1 << 24)
192#define THERM_INT_MASK_LOW (1 << 25) 195#define THERM_INT_MASK_LOW (1 << 25)
193 196
197#define CG_MULT_THERMAL_CTRL 0x710
198#define TEMP_SEL(x) ((x) << 20)
199#define TEMP_SEL_MASK (0xff << 20)
200#define TEMP_SEL_SHIFT 20
194#define CG_MULT_THERMAL_STATUS 0x714 201#define CG_MULT_THERMAL_STATUS 0x714
195#define ASIC_MAX_TEMP(x) ((x) << 0) 202#define ASIC_MAX_TEMP(x) ((x) << 0)
196#define ASIC_MAX_TEMP_MASK 0x000001ff 203#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -199,6 +206,37 @@
199#define CTF_TEMP_MASK 0x0003fe00 206#define CTF_TEMP_MASK 0x0003fe00
200#define CTF_TEMP_SHIFT 9 207#define CTF_TEMP_SHIFT 9
201 208
209#define CG_FDO_CTRL0 0x754
210#define FDO_STATIC_DUTY(x) ((x) << 0)
211#define FDO_STATIC_DUTY_MASK 0x000000FF
212#define FDO_STATIC_DUTY_SHIFT 0
213#define CG_FDO_CTRL1 0x758
214#define FMAX_DUTY100(x) ((x) << 0)
215#define FMAX_DUTY100_MASK 0x000000FF
216#define FMAX_DUTY100_SHIFT 0
217#define CG_FDO_CTRL2 0x75C
218#define TMIN(x) ((x) << 0)
219#define TMIN_MASK 0x000000FF
220#define TMIN_SHIFT 0
221#define FDO_PWM_MODE(x) ((x) << 11)
222#define FDO_PWM_MODE_MASK (7 << 11)
223#define FDO_PWM_MODE_SHIFT 11
224#define TACH_PWM_RESP_RATE(x) ((x) << 25)
225#define TACH_PWM_RESP_RATE_MASK (0x7f << 25)
226#define TACH_PWM_RESP_RATE_SHIFT 25
227
228#define CG_TACH_CTRL 0x770
229# define EDGE_PER_REV(x) ((x) << 0)
230# define EDGE_PER_REV_MASK (0x7 << 0)
231# define EDGE_PER_REV_SHIFT 0
232# define TARGET_PERIOD(x) ((x) << 3)
233# define TARGET_PERIOD_MASK 0xfffffff8
234# define TARGET_PERIOD_SHIFT 3
235#define CG_TACH_STATUS 0x774
236# define TACH_PERIOD(x) ((x) << 0)
237# define TACH_PERIOD_MASK 0xffffffff
238# define TACH_PERIOD_SHIFT 0
239
202#define GENERAL_PWRMGT 0x780 240#define GENERAL_PWRMGT 0x780
203# define GLOBAL_PWRMGT_EN (1 << 0) 241# define GLOBAL_PWRMGT_EN (1 << 0)
204# define STATIC_PM_EN (1 << 1) 242# define STATIC_PM_EN (1 << 1)
diff --git a/drivers/gpu/drm/radeon/sislands_smc.h b/drivers/gpu/drm/radeon/sislands_smc.h
index 623a0b1e2d9d..3c779838d9ab 100644
--- a/drivers/gpu/drm/radeon/sislands_smc.h
+++ b/drivers/gpu/drm/radeon/sislands_smc.h
@@ -245,6 +245,31 @@ typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE;
245#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c 245#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c
246#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 246#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120
247 247
248struct PP_SIslands_FanTable
249{
250 uint8_t fdo_mode;
251 uint8_t padding;
252 int16_t temp_min;
253 int16_t temp_med;
254 int16_t temp_max;
255 int16_t slope1;
256 int16_t slope2;
257 int16_t fdo_min;
258 int16_t hys_up;
259 int16_t hys_down;
260 int16_t hys_slope;
261 int16_t temp_resp_lim;
262 int16_t temp_curr;
263 int16_t slope_curr;
264 int16_t pwm_curr;
265 uint32_t refresh_period;
266 int16_t fdo_max;
267 uint8_t temp_src;
268 int8_t padding2;
269};
270
271typedef struct PP_SIslands_FanTable PP_SIslands_FanTable;
272
248#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 273#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16
249#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 274#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32
250 275
diff --git a/drivers/gpu/drm/radeon/smu7_discrete.h b/drivers/gpu/drm/radeon/smu7_discrete.h
index 82f70c90a9ee..0b0b404ff091 100644
--- a/drivers/gpu/drm/radeon/smu7_discrete.h
+++ b/drivers/gpu/drm/radeon/smu7_discrete.h
@@ -431,6 +431,31 @@ struct SMU7_Discrete_MCRegisters
431 431
432typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters; 432typedef struct SMU7_Discrete_MCRegisters SMU7_Discrete_MCRegisters;
433 433
434struct SMU7_Discrete_FanTable
435{
436 uint16_t FdoMode;
437 int16_t TempMin;
438 int16_t TempMed;
439 int16_t TempMax;
440 int16_t Slope1;
441 int16_t Slope2;
442 int16_t FdoMin;
443 int16_t HystUp;
444 int16_t HystDown;
445 int16_t HystSlope;
446 int16_t TempRespLim;
447 int16_t TempCurr;
448 int16_t SlopeCurr;
449 int16_t PwmCurr;
450 uint32_t RefreshPeriod;
451 int16_t FdoMax;
452 uint8_t TempSrc;
453 int8_t Padding;
454};
455
456typedef struct SMU7_Discrete_FanTable SMU7_Discrete_FanTable;
457
458
434struct SMU7_Discrete_PmFuses { 459struct SMU7_Discrete_PmFuses {
435 // dw0-dw1 460 // dw0-dw1
436 uint8_t BapmVddCVidHiSidd[8]; 461 uint8_t BapmVddCVidHiSidd[8];
@@ -462,7 +487,10 @@ struct SMU7_Discrete_PmFuses {
462 uint8_t BapmVddCVidHiSidd2[8]; 487 uint8_t BapmVddCVidHiSidd2[8];
463 488
464 // dw11-dw12 489 // dw11-dw12
465 uint32_t Reserved6[2]; 490 int16_t FuzzyFan_ErrorSetDelta;
491 int16_t FuzzyFan_ErrorRateSetDelta;
492 int16_t FuzzyFan_PwmSetDelta;
493 uint16_t CalcMeasPowerBlend;
466 494
467 // dw13-dw16 495 // dw13-dw16
468 uint8_t GnbLPML[16]; 496 uint8_t GnbLPML[16];
diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig
index c96f6089f8bf..2324a526de65 100644
--- a/drivers/gpu/drm/rcar-du/Kconfig
+++ b/drivers/gpu/drm/rcar-du/Kconfig
@@ -11,10 +11,17 @@ config DRM_RCAR_DU
11 Choose this option if you have an R-Car chipset. 11 Choose this option if you have an R-Car chipset.
12 If M is selected the module will be called rcar-du-drm. 12 If M is selected the module will be called rcar-du-drm.
13 13
14config DRM_RCAR_HDMI
15 bool "R-Car DU HDMI Encoder Support"
16 depends on DRM_RCAR_DU
17 depends on OF
18 help
19 Enable support for external HDMI encoders.
20
14config DRM_RCAR_LVDS 21config DRM_RCAR_LVDS
15 bool "R-Car DU LVDS Encoder Support" 22 bool "R-Car DU LVDS Encoder Support"
16 depends on DRM_RCAR_DU 23 depends on DRM_RCAR_DU
17 depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST 24 depends on ARCH_R8A7790 || ARCH_R8A7791 || COMPILE_TEST
18 help 25 help
19 Enable support the R-Car Display Unit embedded LVDS encoders 26 Enable support for the R-Car Display Unit embedded LVDS encoders
20 (currently only on R8A7790). 27 (currently only on R8A7790 and R8A7791).
diff --git a/drivers/gpu/drm/rcar-du/Makefile b/drivers/gpu/drm/rcar-du/Makefile
index 12b8d4477835..05de1c4097af 100644
--- a/drivers/gpu/drm/rcar-du/Makefile
+++ b/drivers/gpu/drm/rcar-du/Makefile
@@ -7,6 +7,8 @@ rcar-du-drm-y := rcar_du_crtc.o \
7 rcar_du_plane.o \ 7 rcar_du_plane.o \
8 rcar_du_vgacon.o 8 rcar_du_vgacon.o
9 9
10rcar-du-drm-$(CONFIG_DRM_RCAR_HDMI) += rcar_du_hdmicon.o \
11 rcar_du_hdmienc.o
10rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o 12rcar-du-drm-$(CONFIG_DRM_RCAR_LVDS) += rcar_du_lvdsenc.o
11 13
12obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o 14obj-$(CONFIG_DRM_RCAR_DU) += rcar-du-drm.o
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 148b50589181..23cc910951f4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -19,6 +19,7 @@
19#include <drm/drm_crtc_helper.h> 19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_fb_cma_helper.h> 20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h> 21#include <drm/drm_gem_cma_helper.h>
22#include <drm/drm_plane_helper.h>
22 23
23#include "rcar_du_crtc.h" 24#include "rcar_du_crtc.h"
24#include "rcar_du_drv.h" 25#include "rcar_du_drv.h"
@@ -585,7 +586,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index)
585 586
586 if (irq < 0) { 587 if (irq < 0) {
587 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index); 588 dev_err(rcdu->dev, "no IRQ for CRTC %u\n", index);
588 return ret; 589 return irq;
589 } 590 }
590 591
591 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags, 592 ret = devm_request_irq(rcdu->dev, irq, rcar_du_crtc_irq, irqflags,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index e97ae502dec5..984e6083699f 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -15,7 +15,6 @@
15#define __RCAR_DU_CRTC_H__ 15#define __RCAR_DU_CRTC_H__
16 16
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18#include <linux/platform_data/rcar-du.h>
19 18
20#include <drm/drmP.h> 19#include <drm/drmP.h>
21#include <drm/drm_crtc.h> 20#include <drm/drm_crtc.h>
@@ -41,6 +40,15 @@ struct rcar_du_crtc {
41 40
42#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc) 41#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
43 42
43enum rcar_du_output {
44 RCAR_DU_OUTPUT_DPAD0,
45 RCAR_DU_OUTPUT_DPAD1,
46 RCAR_DU_OUTPUT_LVDS0,
47 RCAR_DU_OUTPUT_LVDS1,
48 RCAR_DU_OUTPUT_TCON,
49 RCAR_DU_OUTPUT_MAX,
50};
51
44int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index); 52int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
45void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable); 53void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
46void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc, 54void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index e419aade2209..7bfa09cf18d5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -146,12 +146,11 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
146{ 146{
147 struct platform_device *pdev = dev->platformdev; 147 struct platform_device *pdev = dev->platformdev;
148 struct device_node *np = pdev->dev.of_node; 148 struct device_node *np = pdev->dev.of_node;
149 struct rcar_du_platform_data *pdata = pdev->dev.platform_data;
150 struct rcar_du_device *rcdu; 149 struct rcar_du_device *rcdu;
151 struct resource *mem; 150 struct resource *mem;
152 int ret; 151 int ret;
153 152
154 if (pdata == NULL && np == NULL) { 153 if (np == NULL) {
155 dev_err(dev->dev, "no platform data\n"); 154 dev_err(dev->dev, "no platform data\n");
156 return -ENODEV; 155 return -ENODEV;
157 } 156 }
@@ -163,7 +162,6 @@ static int rcar_du_load(struct drm_device *dev, unsigned long flags)
163 } 162 }
164 163
165 rcdu->dev = &pdev->dev; 164 rcdu->dev = &pdev->dev;
166 rcdu->pdata = pdata;
167 rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data 165 rcdu->info = np ? of_match_device(rcar_du_of_table, rcdu->dev)->data
168 : (void *)platform_get_device_id(pdev)->driver_data; 166 : (void *)platform_get_device_id(pdev)->driver_data;
169 rcdu->ddev = dev; 167 rcdu->ddev = dev;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 8e494633c3b3..0a724669f02d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -15,7 +15,6 @@
15#define __RCAR_DU_DRV_H__ 15#define __RCAR_DU_DRV_H__
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/platform_data/rcar-du.h>
19 18
20#include "rcar_du_crtc.h" 19#include "rcar_du_crtc.h"
21#include "rcar_du_group.h" 20#include "rcar_du_group.h"
@@ -67,7 +66,6 @@ struct rcar_du_device_info {
67 66
68struct rcar_du_device { 67struct rcar_du_device {
69 struct device *dev; 68 struct device *dev;
70 const struct rcar_du_platform_data *pdata;
71 const struct rcar_du_device_info *info; 69 const struct rcar_du_device_info *info;
72 70
73 void __iomem *mmio; 71 void __iomem *mmio;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 7c0ec95915ef..34a122a39664 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -19,6 +19,8 @@
19 19
20#include "rcar_du_drv.h" 20#include "rcar_du_drv.h"
21#include "rcar_du_encoder.h" 21#include "rcar_du_encoder.h"
22#include "rcar_du_hdmicon.h"
23#include "rcar_du_hdmienc.h"
22#include "rcar_du_kms.h" 24#include "rcar_du_kms.h"
23#include "rcar_du_lvdscon.h" 25#include "rcar_du_lvdscon.h"
24#include "rcar_du_lvdsenc.h" 26#include "rcar_du_lvdsenc.h"
@@ -33,7 +35,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector)
33{ 35{
34 struct rcar_du_connector *rcon = to_rcar_connector(connector); 36 struct rcar_du_connector *rcon = to_rcar_connector(connector);
35 37
36 return &rcon->encoder->encoder; 38 return rcar_encoder_to_drm_encoder(rcon->encoder);
37} 39}
38 40
39/* ----------------------------------------------------------------------------- 41/* -----------------------------------------------------------------------------
@@ -142,10 +144,11 @@ static const struct drm_encoder_funcs encoder_funcs = {
142int rcar_du_encoder_init(struct rcar_du_device *rcdu, 144int rcar_du_encoder_init(struct rcar_du_device *rcdu,
143 enum rcar_du_encoder_type type, 145 enum rcar_du_encoder_type type,
144 enum rcar_du_output output, 146 enum rcar_du_output output,
145 const struct rcar_du_encoder_data *data, 147 struct device_node *enc_node,
146 struct device_node *np) 148 struct device_node *con_node)
147{ 149{
148 struct rcar_du_encoder *renc; 150 struct rcar_du_encoder *renc;
151 struct drm_encoder *encoder;
149 unsigned int encoder_type; 152 unsigned int encoder_type;
150 int ret; 153 int ret;
151 154
@@ -154,6 +157,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
154 return -ENOMEM; 157 return -ENOMEM;
155 158
156 renc->output = output; 159 renc->output = output;
160 encoder = rcar_encoder_to_drm_encoder(renc);
157 161
158 switch (output) { 162 switch (output) {
159 case RCAR_DU_OUTPUT_LVDS0: 163 case RCAR_DU_OUTPUT_LVDS0:
@@ -175,6 +179,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
175 case RCAR_DU_ENCODER_LVDS: 179 case RCAR_DU_ENCODER_LVDS:
176 encoder_type = DRM_MODE_ENCODER_LVDS; 180 encoder_type = DRM_MODE_ENCODER_LVDS;
177 break; 181 break;
182 case RCAR_DU_ENCODER_HDMI:
183 encoder_type = DRM_MODE_ENCODER_TMDS;
184 break;
178 case RCAR_DU_ENCODER_NONE: 185 case RCAR_DU_ENCODER_NONE:
179 default: 186 default:
180 /* No external encoder, use the internal encoder type. */ 187 /* No external encoder, use the internal encoder type. */
@@ -182,23 +189,35 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
182 break; 189 break;
183 } 190 }
184 191
185 ret = drm_encoder_init(rcdu->ddev, &renc->encoder, &encoder_funcs, 192 if (type == RCAR_DU_ENCODER_HDMI) {
186 encoder_type); 193 if (renc->lvds) {
187 if (ret < 0) 194 dev_err(rcdu->dev,
188 return ret; 195 "Chaining LVDS and HDMI encoders not supported\n");
196 return -EINVAL;
197 }
189 198
190 drm_encoder_helper_add(&renc->encoder, &encoder_helper_funcs); 199 ret = rcar_du_hdmienc_init(rcdu, renc, enc_node);
200 if (ret < 0)
201 return ret;
202 } else {
203 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
204 encoder_type);
205 if (ret < 0)
206 return ret;
191 207
192 switch (encoder_type) { 208 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
193 case DRM_MODE_ENCODER_LVDS: {
194 const struct rcar_du_panel_data *pdata =
195 data ? &data->connector.lvds.panel : NULL;
196 return rcar_du_lvds_connector_init(rcdu, renc, pdata, np);
197 } 209 }
198 210
211 switch (encoder_type) {
212 case DRM_MODE_ENCODER_LVDS:
213 return rcar_du_lvds_connector_init(rcdu, renc, con_node);
214
199 case DRM_MODE_ENCODER_DAC: 215 case DRM_MODE_ENCODER_DAC:
200 return rcar_du_vga_connector_init(rcdu, renc); 216 return rcar_du_vga_connector_init(rcdu, renc);
201 217
218 case DRM_MODE_ENCODER_TMDS:
219 return rcar_du_hdmi_connector_init(rcdu, renc);
220
202 default: 221 default:
203 return -EINVAL; 222 return -EINVAL;
204 } 223 }
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
index bd624135ef1f..719b6f2a031c 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.h
@@ -14,21 +14,32 @@
14#ifndef __RCAR_DU_ENCODER_H__ 14#ifndef __RCAR_DU_ENCODER_H__
15#define __RCAR_DU_ENCODER_H__ 15#define __RCAR_DU_ENCODER_H__
16 16
17#include <linux/platform_data/rcar-du.h>
18
19#include <drm/drm_crtc.h> 17#include <drm/drm_crtc.h>
18#include <drm/drm_encoder_slave.h>
20 19
21struct rcar_du_device; 20struct rcar_du_device;
21struct rcar_du_hdmienc;
22struct rcar_du_lvdsenc; 22struct rcar_du_lvdsenc;
23 23
24enum rcar_du_encoder_type {
25 RCAR_DU_ENCODER_UNUSED = 0,
26 RCAR_DU_ENCODER_NONE,
27 RCAR_DU_ENCODER_VGA,
28 RCAR_DU_ENCODER_LVDS,
29 RCAR_DU_ENCODER_HDMI,
30};
31
24struct rcar_du_encoder { 32struct rcar_du_encoder {
25 struct drm_encoder encoder; 33 struct drm_encoder_slave slave;
26 enum rcar_du_output output; 34 enum rcar_du_output output;
35 struct rcar_du_hdmienc *hdmi;
27 struct rcar_du_lvdsenc *lvds; 36 struct rcar_du_lvdsenc *lvds;
28}; 37};
29 38
30#define to_rcar_encoder(e) \ 39#define to_rcar_encoder(e) \
31 container_of(e, struct rcar_du_encoder, encoder) 40 container_of(e, struct rcar_du_encoder, slave.base)
41
42#define rcar_encoder_to_drm_encoder(e) (&(e)->slave.base)
32 43
33struct rcar_du_connector { 44struct rcar_du_connector {
34 struct drm_connector connector; 45 struct drm_connector connector;
@@ -44,7 +55,7 @@ rcar_du_connector_best_encoder(struct drm_connector *connector);
44int rcar_du_encoder_init(struct rcar_du_device *rcdu, 55int rcar_du_encoder_init(struct rcar_du_device *rcdu,
45 enum rcar_du_encoder_type type, 56 enum rcar_du_encoder_type type,
46 enum rcar_du_output output, 57 enum rcar_du_output output,
47 const struct rcar_du_encoder_data *data, 58 struct device_node *enc_node,
48 struct device_node *np); 59 struct device_node *con_node);
49 60
50#endif /* __RCAR_DU_ENCODER_H__ */ 61#endif /* __RCAR_DU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
new file mode 100644
index 000000000000..4d7d4dd46d26
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -0,0 +1,121 @@
1/*
2 * R-Car Display Unit HDMI Connector
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_encoder_slave.h>
18
19#include "rcar_du_drv.h"
20#include "rcar_du_encoder.h"
21#include "rcar_du_hdmicon.h"
22#include "rcar_du_kms.h"
23
24#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
25
26static int rcar_du_hdmi_connector_get_modes(struct drm_connector *connector)
27{
28 struct rcar_du_connector *con = to_rcar_connector(connector);
29 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
30 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
31
32 if (sfuncs->get_modes == NULL)
33 return 0;
34
35 return sfuncs->get_modes(encoder, connector);
36}
37
38static int rcar_du_hdmi_connector_mode_valid(struct drm_connector *connector,
39 struct drm_display_mode *mode)
40{
41 struct rcar_du_connector *con = to_rcar_connector(connector);
42 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
43 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
44
45 if (sfuncs->mode_valid == NULL)
46 return MODE_OK;
47
48 return sfuncs->mode_valid(encoder, mode);
49}
50
51static const struct drm_connector_helper_funcs connector_helper_funcs = {
52 .get_modes = rcar_du_hdmi_connector_get_modes,
53 .mode_valid = rcar_du_hdmi_connector_mode_valid,
54 .best_encoder = rcar_du_connector_best_encoder,
55};
56
57static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
58{
59 drm_connector_unregister(connector);
60 drm_connector_cleanup(connector);
61}
62
63static enum drm_connector_status
64rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
65{
66 struct rcar_du_connector *con = to_rcar_connector(connector);
67 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(con->encoder);
68 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
69
70 if (sfuncs->detect == NULL)
71 return connector_status_unknown;
72
73 return sfuncs->detect(encoder, connector);
74}
75
76static const struct drm_connector_funcs connector_funcs = {
77 .dpms = drm_helper_connector_dpms,
78 .detect = rcar_du_hdmi_connector_detect,
79 .fill_modes = drm_helper_probe_single_connector_modes,
80 .destroy = rcar_du_hdmi_connector_destroy,
81};
82
83int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
84 struct rcar_du_encoder *renc)
85{
86 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
87 struct rcar_du_connector *rcon;
88 struct drm_connector *connector;
89 int ret;
90
91 rcon = devm_kzalloc(rcdu->dev, sizeof(*rcon), GFP_KERNEL);
92 if (rcon == NULL)
93 return -ENOMEM;
94
95 connector = &rcon->connector;
96 connector->display_info.width_mm = 0;
97 connector->display_info.height_mm = 0;
98
99 ret = drm_connector_init(rcdu->ddev, connector, &connector_funcs,
100 DRM_MODE_CONNECTOR_HDMIA);
101 if (ret < 0)
102 return ret;
103
104 drm_connector_helper_add(connector, &connector_helper_funcs);
105 ret = drm_connector_register(connector);
106 if (ret < 0)
107 return ret;
108
109 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
110 drm_object_property_set_value(&connector->base,
111 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
112
113 ret = drm_mode_connector_attach_encoder(connector, encoder);
114 if (ret < 0)
115 return ret;
116
117 connector->encoder = encoder;
118 rcon->encoder = renc;
119
120 return 0;
121}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
new file mode 100644
index 000000000000..87daa949227f
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.h
@@ -0,0 +1,31 @@
1/*
2 * R-Car Display Unit HDMI Connector
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_HDMICON_H__
15#define __RCAR_DU_HDMICON_H__
16
17struct rcar_du_device;
18struct rcar_du_encoder;
19
20#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
21int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
22 struct rcar_du_encoder *renc);
23#else
24static inline int rcar_du_hdmi_connector_init(struct rcar_du_device *rcdu,
25 struct rcar_du_encoder *renc)
26{
27 return -ENOSYS;
28}
29#endif
30
31#endif /* __RCAR_DU_HDMICON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
new file mode 100644
index 000000000000..359bc999a9c8
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.c
@@ -0,0 +1,151 @@
1/*
2 * R-Car Display Unit HDMI Encoder
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/slab.h>
15
16#include <drm/drmP.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_encoder_slave.h>
20
21#include "rcar_du_drv.h"
22#include "rcar_du_encoder.h"
23#include "rcar_du_hdmienc.h"
24
25struct rcar_du_hdmienc {
26 struct rcar_du_encoder *renc;
27 struct device *dev;
28 int dpms;
29};
30
31#define to_rcar_hdmienc(e) (to_rcar_encoder(e)->hdmi)
32#define to_slave_funcs(e) (to_rcar_encoder(e)->slave.slave_funcs)
33
34static void rcar_du_hdmienc_dpms(struct drm_encoder *encoder, int mode)
35{
36 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
37 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
38
39 if (hdmienc->dpms == mode)
40 return;
41
42 if (sfuncs->dpms)
43 sfuncs->dpms(encoder, mode);
44
45 hdmienc->dpms = mode;
46}
47
48static bool rcar_du_hdmienc_mode_fixup(struct drm_encoder *encoder,
49 const struct drm_display_mode *mode,
50 struct drm_display_mode *adjusted_mode)
51{
52 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
53
54 if (sfuncs->mode_fixup == NULL)
55 return true;
56
57 return sfuncs->mode_fixup(encoder, mode, adjusted_mode);
58}
59
60static void rcar_du_hdmienc_mode_prepare(struct drm_encoder *encoder)
61{
62 rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
63}
64
65static void rcar_du_hdmienc_mode_commit(struct drm_encoder *encoder)
66{
67 rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_ON);
68}
69
70static void rcar_du_hdmienc_mode_set(struct drm_encoder *encoder,
71 struct drm_display_mode *mode,
72 struct drm_display_mode *adjusted_mode)
73{
74 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
75 struct drm_encoder_slave_funcs *sfuncs = to_slave_funcs(encoder);
76
77 if (sfuncs->mode_set)
78 sfuncs->mode_set(encoder, mode, adjusted_mode);
79
80 rcar_du_crtc_route_output(encoder->crtc, hdmienc->renc->output);
81}
82
83static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
84 .dpms = rcar_du_hdmienc_dpms,
85 .mode_fixup = rcar_du_hdmienc_mode_fixup,
86 .prepare = rcar_du_hdmienc_mode_prepare,
87 .commit = rcar_du_hdmienc_mode_commit,
88 .mode_set = rcar_du_hdmienc_mode_set,
89};
90
91static void rcar_du_hdmienc_cleanup(struct drm_encoder *encoder)
92{
93 struct rcar_du_hdmienc *hdmienc = to_rcar_hdmienc(encoder);
94
95 rcar_du_hdmienc_dpms(encoder, DRM_MODE_DPMS_OFF);
96
97 drm_encoder_cleanup(encoder);
98 put_device(hdmienc->dev);
99}
100
101static const struct drm_encoder_funcs encoder_funcs = {
102 .destroy = rcar_du_hdmienc_cleanup,
103};
104
105int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
106 struct rcar_du_encoder *renc, struct device_node *np)
107{
108 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
109 struct drm_i2c_encoder_driver *driver;
110 struct i2c_client *i2c_slave;
111 struct rcar_du_hdmienc *hdmienc;
112 int ret;
113
114 hdmienc = devm_kzalloc(rcdu->dev, sizeof(*hdmienc), GFP_KERNEL);
115 if (hdmienc == NULL)
116 return -ENOMEM;
117
118 /* Locate the slave I2C device and driver. */
119 i2c_slave = of_find_i2c_device_by_node(np);
120 if (!i2c_slave || !i2c_get_clientdata(i2c_slave))
121 return -EPROBE_DEFER;
122
123 hdmienc->dev = &i2c_slave->dev;
124
125 if (hdmienc->dev->driver == NULL) {
126 ret = -EPROBE_DEFER;
127 goto error;
128 }
129
130 /* Initialize the slave encoder. */
131 driver = to_drm_i2c_encoder_driver(to_i2c_driver(hdmienc->dev->driver));
132 ret = driver->encoder_init(i2c_slave, rcdu->ddev, &renc->slave);
133 if (ret < 0)
134 goto error;
135
136 ret = drm_encoder_init(rcdu->ddev, encoder, &encoder_funcs,
137 DRM_MODE_ENCODER_TMDS);
138 if (ret < 0)
139 goto error;
140
141 drm_encoder_helper_add(encoder, &encoder_helper_funcs);
142
143 renc->hdmi = hdmienc;
144 hdmienc->renc = renc;
145
146 return 0;
147
148error:
149 put_device(hdmienc->dev);
150 return ret;
151}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
new file mode 100644
index 000000000000..2ff0128ac8e1
--- /dev/null
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmienc.h
@@ -0,0 +1,35 @@
1/*
2 * R-Car Display Unit HDMI Encoder
3 *
4 * Copyright (C) 2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_HDMIENC_H__
15#define __RCAR_DU_HDMIENC_H__
16
17#include <linux/module.h>
18
19struct device_node;
20struct rcar_du_device;
21struct rcar_du_encoder;
22
23#if IS_ENABLED(CONFIG_DRM_RCAR_HDMI)
24int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
25 struct rcar_du_encoder *renc, struct device_node *np);
26#else
27static inline int rcar_du_hdmienc_init(struct rcar_du_device *rcdu,
28 struct rcar_du_encoder *renc,
29 struct device_node *np)
30{
31 return -ENOSYS;
32}
33#endif
34
35#endif /* __RCAR_DU_HDMIENC_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index 6c24ad7d03ef..0c5ee616b5a3 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -126,9 +126,9 @@ int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
126 else 126 else
127 align = 16 * args->bpp / 8; 127 align = 16 * args->bpp / 8;
128 128
129 args->pitch = roundup(max(args->pitch, min_pitch), align); 129 args->pitch = roundup(min_pitch, align);
130 130
131 return drm_gem_cma_dumb_create(file, dev, args); 131 return drm_gem_cma_dumb_create_internal(file, dev, args);
132} 132}
133 133
134static struct drm_framebuffer * 134static struct drm_framebuffer *
@@ -190,49 +190,16 @@ static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
190 .output_poll_changed = rcar_du_output_poll_changed, 190 .output_poll_changed = rcar_du_output_poll_changed,
191}; 191};
192 192
193static int rcar_du_encoders_init_pdata(struct rcar_du_device *rcdu) 193static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
194{ 194 enum rcar_du_output output,
195 unsigned int num_encoders = 0; 195 struct of_endpoint *ep)
196 unsigned int i;
197 int ret;
198
199 for (i = 0; i < rcdu->pdata->num_encoders; ++i) {
200 const struct rcar_du_encoder_data *pdata =
201 &rcdu->pdata->encoders[i];
202 const struct rcar_du_output_routing *route =
203 &rcdu->info->routes[pdata->output];
204
205 if (pdata->type == RCAR_DU_ENCODER_UNUSED)
206 continue;
207
208 if (pdata->output >= RCAR_DU_OUTPUT_MAX ||
209 route->possible_crtcs == 0) {
210 dev_warn(rcdu->dev,
211 "encoder %u references unexisting output %u, skipping\n",
212 i, pdata->output);
213 continue;
214 }
215
216 ret = rcar_du_encoder_init(rcdu, pdata->type, pdata->output,
217 pdata, NULL);
218 if (ret < 0)
219 return ret;
220
221 num_encoders++;
222 }
223
224 return num_encoders;
225}
226
227static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
228 enum rcar_du_output output,
229 struct of_endpoint *ep)
230{ 196{
231 static const struct { 197 static const struct {
232 const char *compatible; 198 const char *compatible;
233 enum rcar_du_encoder_type type; 199 enum rcar_du_encoder_type type;
234 } encoders[] = { 200 } encoders[] = {
235 { "adi,adv7123", RCAR_DU_ENCODER_VGA }, 201 { "adi,adv7123", RCAR_DU_ENCODER_VGA },
202 { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
236 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS }, 203 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
237 }; 204 };
238 205
@@ -323,14 +290,14 @@ static int rcar_du_encoders_init_dt_one(struct rcar_du_device *rcdu,
323 connector = entity; 290 connector = entity;
324 } 291 }
325 292
326 ret = rcar_du_encoder_init(rcdu, enc_type, output, NULL, connector); 293 ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
327 of_node_put(encoder); 294 of_node_put(encoder);
328 of_node_put(connector); 295 of_node_put(connector);
329 296
330 return ret < 0 ? ret : 1; 297 return ret < 0 ? ret : 1;
331} 298}
332 299
333static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu) 300static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
334{ 301{
335 struct device_node *np = rcdu->dev->of_node; 302 struct device_node *np = rcdu->dev->of_node;
336 struct device_node *prev = NULL; 303 struct device_node *prev = NULL;
@@ -377,7 +344,7 @@ static int rcar_du_encoders_init_dt(struct rcar_du_device *rcdu)
377 } 344 }
378 345
379 /* Process the output pipeline. */ 346 /* Process the output pipeline. */
380 ret = rcar_du_encoders_init_dt_one(rcdu, output, &ep); 347 ret = rcar_du_encoders_init_one(rcdu, output, &ep);
381 if (ret < 0) { 348 if (ret < 0) {
382 of_node_put(ep_node); 349 of_node_put(ep_node);
383 return ret; 350 return ret;
@@ -442,11 +409,7 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu)
442 if (ret < 0) 409 if (ret < 0)
443 return ret; 410 return ret;
444 411
445 if (rcdu->pdata) 412 ret = rcar_du_encoders_init(rcdu);
446 ret = rcar_du_encoders_init_pdata(rcdu);
447 else
448 ret = rcar_du_encoders_init_dt(rcdu);
449
450 if (ret < 0) 413 if (ret < 0)
451 return ret; 414 return ret;
452 415
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 115eed20db12..6d9811c052c4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -27,7 +27,11 @@
27struct rcar_du_lvds_connector { 27struct rcar_du_lvds_connector {
28 struct rcar_du_connector connector; 28 struct rcar_du_connector connector;
29 29
30 struct rcar_du_panel_data panel; 30 struct {
31 unsigned int width_mm; /* Panel width in mm */
32 unsigned int height_mm; /* Panel height in mm */
33 struct videomode mode;
34 } panel;
31}; 35};
32 36
33#define to_rcar_lvds_connector(c) \ 37#define to_rcar_lvds_connector(c) \
@@ -78,31 +82,26 @@ static const struct drm_connector_funcs connector_funcs = {
78 82
79int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 83int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
80 struct rcar_du_encoder *renc, 84 struct rcar_du_encoder *renc,
81 const struct rcar_du_panel_data *panel,
82 /* TODO const */ struct device_node *np) 85 /* TODO const */ struct device_node *np)
83{ 86{
87 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
84 struct rcar_du_lvds_connector *lvdscon; 88 struct rcar_du_lvds_connector *lvdscon;
85 struct drm_connector *connector; 89 struct drm_connector *connector;
90 struct display_timing timing;
86 int ret; 91 int ret;
87 92
88 lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL); 93 lvdscon = devm_kzalloc(rcdu->dev, sizeof(*lvdscon), GFP_KERNEL);
89 if (lvdscon == NULL) 94 if (lvdscon == NULL)
90 return -ENOMEM; 95 return -ENOMEM;
91 96
92 if (panel) { 97 ret = of_get_display_timing(np, "panel-timing", &timing);
93 lvdscon->panel = *panel; 98 if (ret < 0)
94 } else { 99 return ret;
95 struct display_timing timing;
96
97 ret = of_get_display_timing(np, "panel-timing", &timing);
98 if (ret < 0)
99 return ret;
100 100
101 videomode_from_timing(&timing, &lvdscon->panel.mode); 101 videomode_from_timing(&timing, &lvdscon->panel.mode);
102 102
103 of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm); 103 of_property_read_u32(np, "width-mm", &lvdscon->panel.width_mm);
104 of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm); 104 of_property_read_u32(np, "height-mm", &lvdscon->panel.height_mm);
105 }
106 105
107 connector = &lvdscon->connector.connector; 106 connector = &lvdscon->connector.connector;
108 connector->display_info.width_mm = lvdscon->panel.width_mm; 107 connector->display_info.width_mm = lvdscon->panel.width_mm;
@@ -122,11 +121,11 @@ int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
122 drm_object_property_set_value(&connector->base, 121 drm_object_property_set_value(&connector->base,
123 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 122 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
124 123
125 ret = drm_mode_connector_attach_encoder(connector, &renc->encoder); 124 ret = drm_mode_connector_attach_encoder(connector, encoder);
126 if (ret < 0) 125 if (ret < 0)
127 return ret; 126 return ret;
128 127
129 connector->encoder = &renc->encoder; 128 connector->encoder = encoder;
130 lvdscon->connector.encoder = renc; 129 lvdscon->connector.encoder = renc;
131 130
132 return 0; 131 return 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
index d11424d537f9..d4881ee0be7e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.h
@@ -16,11 +16,9 @@
16 16
17struct rcar_du_device; 17struct rcar_du_device;
18struct rcar_du_encoder; 18struct rcar_du_encoder;
19struct rcar_du_panel_data;
20 19
21int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu, 20int rcar_du_lvds_connector_init(struct rcar_du_device *rcdu,
22 struct rcar_du_encoder *renc, 21 struct rcar_du_encoder *renc,
23 const struct rcar_du_panel_data *panel,
24 struct device_node *np); 22 struct device_node *np);
25 23
26#endif /* __RCAR_DU_LVDSCON_H__ */ 24#endif /* __RCAR_DU_LVDSCON_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
index 3303a55cec79..f65aabda0796 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.h
@@ -16,7 +16,6 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/platform_data/rcar-du.h>
20 19
21struct rcar_drm_crtc; 20struct rcar_drm_crtc;
22struct rcar_du_lvdsenc; 21struct rcar_du_lvdsenc;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index 564a723ede03..752747a5e920 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -52,6 +52,7 @@ static const struct drm_connector_funcs connector_funcs = {
52int rcar_du_vga_connector_init(struct rcar_du_device *rcdu, 52int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
53 struct rcar_du_encoder *renc) 53 struct rcar_du_encoder *renc)
54{ 54{
55 struct drm_encoder *encoder = rcar_encoder_to_drm_encoder(renc);
55 struct rcar_du_connector *rcon; 56 struct rcar_du_connector *rcon;
56 struct drm_connector *connector; 57 struct drm_connector *connector;
57 int ret; 58 int ret;
@@ -78,11 +79,11 @@ int rcar_du_vga_connector_init(struct rcar_du_device *rcdu,
78 drm_object_property_set_value(&connector->base, 79 drm_object_property_set_value(&connector->base,
79 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF); 80 rcdu->ddev->mode_config.dpms_property, DRM_MODE_DPMS_OFF);
80 81
81 ret = drm_mode_connector_attach_encoder(connector, &renc->encoder); 82 ret = drm_mode_connector_attach_encoder(connector, encoder);
82 if (ret < 0) 83 if (ret < 0)
83 return ret; 84 return ret;
84 85
85 connector->encoder = &renc->encoder; 86 connector->encoder = encoder;
86 rcon->encoder = renc; 87 rcon->encoder = renc;
87 88
88 return 0; 89 return 0;
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
new file mode 100644
index 000000000000..ca9f085efa92
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -0,0 +1,17 @@
1config DRM_ROCKCHIP
2 tristate "DRM Support for Rockchip"
3 depends on DRM && ROCKCHIP_IOMMU
4 select DRM_KMS_HELPER
5 select DRM_KMS_FB_HELPER
6 select DRM_PANEL
7 select FB_CFB_FILLRECT
8 select FB_CFB_COPYAREA
9 select FB_CFB_IMAGEBLIT
10 select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
11 select VIDEOMODE_HELPERS
12 help
13 Choose this option if you have a Rockchip soc chipset.
14 This driver provides kernel mode setting and buffer
15 management to userspace. This driver does not provide
16 2D or 3D acceleration; acceleration is performed by other
17 IP found on the SoC.
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
new file mode 100644
index 000000000000..2cb0672f57ed
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for the drm device driver. This driver provides support for the
3# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
4
5rockchipdrm-y := rockchip_drm_drv.o rockchip_drm_fb.o rockchip_drm_fbdev.o \
6 rockchip_drm_gem.o
7
8obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o rockchip_drm_vop.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
new file mode 100644
index 000000000000..a798c7c71f91
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -0,0 +1,551 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * based on exynos_drm_drv.c
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <asm/dma-iommu.h>
18
19#include <drm/drmP.h>
20#include <drm/drm_crtc_helper.h>
21#include <drm/drm_fb_helper.h>
22#include <linux/dma-mapping.h>
23#include <linux/pm_runtime.h>
24#include <linux/of_graph.h>
25#include <linux/component.h>
26
27#include "rockchip_drm_drv.h"
28#include "rockchip_drm_fb.h"
29#include "rockchip_drm_fbdev.h"
30#include "rockchip_drm_gem.h"
31
32#define DRIVER_NAME "rockchip"
33#define DRIVER_DESC "RockChip Soc DRM"
34#define DRIVER_DATE "20140818"
35#define DRIVER_MAJOR 1
36#define DRIVER_MINOR 0
37
38/*
39 * Attach a (component) device to the shared drm dma mapping from master drm
40 * device. This is used by the VOPs to map GEM buffers to a common DMA
41 * mapping.
42 */
43int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
44 struct device *dev)
45{
46 struct dma_iommu_mapping *mapping = drm_dev->dev->archdata.mapping;
47 int ret;
48
49 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
50 if (ret)
51 return ret;
52
53 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
54
55 return arm_iommu_attach_device(dev, mapping);
56}
57EXPORT_SYMBOL_GPL(rockchip_drm_dma_attach_device);
58
59void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
60 struct device *dev)
61{
62 arm_iommu_detach_device(dev);
63}
64EXPORT_SYMBOL_GPL(rockchip_drm_dma_detach_device);
65
66int rockchip_register_crtc_funcs(struct drm_device *dev,
67 const struct rockchip_crtc_funcs *crtc_funcs,
68 int pipe)
69{
70 struct rockchip_drm_private *priv = dev->dev_private;
71
72 if (pipe > ROCKCHIP_MAX_CRTC)
73 return -EINVAL;
74
75 priv->crtc_funcs[pipe] = crtc_funcs;
76
77 return 0;
78}
79EXPORT_SYMBOL_GPL(rockchip_register_crtc_funcs);
80
81void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe)
82{
83 struct rockchip_drm_private *priv = dev->dev_private;
84
85 if (pipe > ROCKCHIP_MAX_CRTC)
86 return;
87
88 priv->crtc_funcs[pipe] = NULL;
89}
90EXPORT_SYMBOL_GPL(rockchip_unregister_crtc_funcs);
91
92static struct drm_crtc *rockchip_crtc_from_pipe(struct drm_device *drm,
93 int pipe)
94{
95 struct drm_crtc *crtc;
96 int i = 0;
97
98 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head)
99 if (i++ == pipe)
100 return crtc;
101
102 return NULL;
103}
104
105static int rockchip_drm_crtc_enable_vblank(struct drm_device *dev, int pipe)
106{
107 struct rockchip_drm_private *priv = dev->dev_private;
108 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
109
110 if (crtc && priv->crtc_funcs[pipe] &&
111 priv->crtc_funcs[pipe]->enable_vblank)
112 return priv->crtc_funcs[pipe]->enable_vblank(crtc);
113
114 return 0;
115}
116
117static void rockchip_drm_crtc_disable_vblank(struct drm_device *dev, int pipe)
118{
119 struct rockchip_drm_private *priv = dev->dev_private;
120 struct drm_crtc *crtc = rockchip_crtc_from_pipe(dev, pipe);
121
122 if (crtc && priv->crtc_funcs[pipe] &&
123 priv->crtc_funcs[pipe]->enable_vblank)
124 priv->crtc_funcs[pipe]->disable_vblank(crtc);
125}
126
127static int rockchip_drm_load(struct drm_device *drm_dev, unsigned long flags)
128{
129 struct rockchip_drm_private *private;
130 struct dma_iommu_mapping *mapping;
131 struct device *dev = drm_dev->dev;
132 int ret;
133
134 private = devm_kzalloc(drm_dev->dev, sizeof(*private), GFP_KERNEL);
135 if (!private)
136 return -ENOMEM;
137
138 drm_dev->dev_private = private;
139
140 drm_mode_config_init(drm_dev);
141
142 rockchip_drm_mode_config_init(drm_dev);
143
144 dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
145 GFP_KERNEL);
146 if (!dev->dma_parms) {
147 ret = -ENOMEM;
148 goto err_config_cleanup;
149 }
150
151 /* TODO(djkurtz): fetch the mapping start/size from somewhere */
152 mapping = arm_iommu_create_mapping(&platform_bus_type, 0x00000000,
153 SZ_2G);
154 if (IS_ERR(mapping)) {
155 ret = PTR_ERR(mapping);
156 goto err_config_cleanup;
157 }
158
159 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
160 if (ret)
161 goto err_release_mapping;
162
163 dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
164
165 ret = arm_iommu_attach_device(dev, mapping);
166 if (ret)
167 goto err_release_mapping;
168
169 /* Try to bind all sub drivers. */
170 ret = component_bind_all(dev, drm_dev);
171 if (ret)
172 goto err_detach_device;
173
174 /* init kms poll for handling hpd */
175 drm_kms_helper_poll_init(drm_dev);
176
177 /*
178 * enable drm irq mode.
179 * - with irq_enabled = true, we can use the vblank feature.
180 */
181 drm_dev->irq_enabled = true;
182
183 ret = drm_vblank_init(drm_dev, ROCKCHIP_MAX_CRTC);
184 if (ret)
185 goto err_kms_helper_poll_fini;
186
187 /*
188 * with vblank_disable_allowed = true, vblank interrupt will be disabled
189 * by drm timer once a current process gives up ownership of
190 * vblank event.(after drm_vblank_put function is called)
191 */
192 drm_dev->vblank_disable_allowed = true;
193
194 ret = rockchip_drm_fbdev_init(drm_dev);
195 if (ret)
196 goto err_vblank_cleanup;
197
198 return 0;
199err_vblank_cleanup:
200 drm_vblank_cleanup(drm_dev);
201err_kms_helper_poll_fini:
202 drm_kms_helper_poll_fini(drm_dev);
203 component_unbind_all(dev, drm_dev);
204err_detach_device:
205 arm_iommu_detach_device(dev);
206err_release_mapping:
207 arm_iommu_release_mapping(dev->archdata.mapping);
208err_config_cleanup:
209 drm_mode_config_cleanup(drm_dev);
210 drm_dev->dev_private = NULL;
211 return ret;
212}
213
214static int rockchip_drm_unload(struct drm_device *drm_dev)
215{
216 struct device *dev = drm_dev->dev;
217
218 rockchip_drm_fbdev_fini(drm_dev);
219 drm_vblank_cleanup(drm_dev);
220 drm_kms_helper_poll_fini(drm_dev);
221 component_unbind_all(dev, drm_dev);
222 arm_iommu_detach_device(dev);
223 arm_iommu_release_mapping(dev->archdata.mapping);
224 drm_mode_config_cleanup(drm_dev);
225 drm_dev->dev_private = NULL;
226
227 return 0;
228}
229
230void rockchip_drm_lastclose(struct drm_device *dev)
231{
232 struct rockchip_drm_private *priv = dev->dev_private;
233
234 drm_fb_helper_restore_fbdev_mode_unlocked(&priv->fbdev_helper);
235}
236
237static const struct file_operations rockchip_drm_driver_fops = {
238 .owner = THIS_MODULE,
239 .open = drm_open,
240 .mmap = rockchip_gem_mmap,
241 .poll = drm_poll,
242 .read = drm_read,
243 .unlocked_ioctl = drm_ioctl,
244#ifdef CONFIG_COMPAT
245 .compat_ioctl = drm_compat_ioctl,
246#endif
247 .release = drm_release,
248};
249
250const struct vm_operations_struct rockchip_drm_vm_ops = {
251 .open = drm_gem_vm_open,
252 .close = drm_gem_vm_close,
253};
254
255static struct drm_driver rockchip_drm_driver = {
256 .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME,
257 .load = rockchip_drm_load,
258 .unload = rockchip_drm_unload,
259 .lastclose = rockchip_drm_lastclose,
260 .get_vblank_counter = drm_vblank_count,
261 .enable_vblank = rockchip_drm_crtc_enable_vblank,
262 .disable_vblank = rockchip_drm_crtc_disable_vblank,
263 .gem_vm_ops = &rockchip_drm_vm_ops,
264 .gem_free_object = rockchip_gem_free_object,
265 .dumb_create = rockchip_gem_dumb_create,
266 .dumb_map_offset = rockchip_gem_dumb_map_offset,
267 .dumb_destroy = drm_gem_dumb_destroy,
268 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
269 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
270 .gem_prime_import = drm_gem_prime_import,
271 .gem_prime_export = drm_gem_prime_export,
272 .gem_prime_get_sg_table = rockchip_gem_prime_get_sg_table,
273 .gem_prime_vmap = rockchip_gem_prime_vmap,
274 .gem_prime_vunmap = rockchip_gem_prime_vunmap,
275 .gem_prime_mmap = rockchip_gem_mmap_buf,
276 .fops = &rockchip_drm_driver_fops,
277 .name = DRIVER_NAME,
278 .desc = DRIVER_DESC,
279 .date = DRIVER_DATE,
280 .major = DRIVER_MAJOR,
281 .minor = DRIVER_MINOR,
282};
283
284#ifdef CONFIG_PM_SLEEP
285static int rockchip_drm_sys_suspend(struct device *dev)
286{
287 struct drm_device *drm = dev_get_drvdata(dev);
288 struct drm_connector *connector;
289
290 if (!drm)
291 return 0;
292
293 drm_modeset_lock_all(drm);
294 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
295 int old_dpms = connector->dpms;
296
297 if (connector->funcs->dpms)
298 connector->funcs->dpms(connector, DRM_MODE_DPMS_OFF);
299
300 /* Set the old mode back to the connector for resume */
301 connector->dpms = old_dpms;
302 }
303 drm_modeset_unlock_all(drm);
304
305 return 0;
306}
307
308static int rockchip_drm_sys_resume(struct device *dev)
309{
310 struct drm_device *drm = dev_get_drvdata(dev);
311 struct drm_connector *connector;
312 enum drm_connector_status status;
313 bool changed = false;
314
315 if (!drm)
316 return 0;
317
318 drm_modeset_lock_all(drm);
319 list_for_each_entry(connector, &drm->mode_config.connector_list, head) {
320 int desired_mode = connector->dpms;
321
322 /*
323 * at suspend time, we save dpms to connector->dpms,
324 * restore the old_dpms, and at current time, the connector
325 * dpms status must be DRM_MODE_DPMS_OFF.
326 */
327 connector->dpms = DRM_MODE_DPMS_OFF;
328
329 /*
330 * If the connector has been disconnected during suspend,
331 * disconnect it from the encoder and leave it off. We'll notify
332 * userspace at the end.
333 */
334 if (desired_mode == DRM_MODE_DPMS_ON) {
335 status = connector->funcs->detect(connector, true);
336 if (status == connector_status_disconnected) {
337 connector->encoder = NULL;
338 connector->status = status;
339 changed = true;
340 continue;
341 }
342 }
343 if (connector->funcs->dpms)
344 connector->funcs->dpms(connector, desired_mode);
345 }
346 drm_modeset_unlock_all(drm);
347
348 drm_helper_resume_force_mode(drm);
349
350 if (changed)
351 drm_kms_helper_hotplug_event(drm);
352
353 return 0;
354}
355#endif
356
357static const struct dev_pm_ops rockchip_drm_pm_ops = {
358 SET_SYSTEM_SLEEP_PM_OPS(rockchip_drm_sys_suspend,
359 rockchip_drm_sys_resume)
360};
361
362/*
363 * @node: device tree node containing encoder input ports
364 * @encoder: drm_encoder
365 */
366int rockchip_drm_encoder_get_mux_id(struct device_node *node,
367 struct drm_encoder *encoder)
368{
369 struct device_node *ep = NULL;
370 struct drm_crtc *crtc = encoder->crtc;
371 struct of_endpoint endpoint;
372 struct device_node *port;
373 int ret;
374
375 if (!node || !crtc)
376 return -EINVAL;
377
378 do {
379 ep = of_graph_get_next_endpoint(node, ep);
380 if (!ep)
381 break;
382
383 port = of_graph_get_remote_port(ep);
384 of_node_put(port);
385 if (port == crtc->port) {
386 ret = of_graph_parse_endpoint(ep, &endpoint);
387 return ret ?: endpoint.id;
388 }
389 } while (ep);
390
391 return -EINVAL;
392}
393
394static int compare_of(struct device *dev, void *data)
395{
396 struct device_node *np = data;
397
398 return dev->of_node == np;
399}
400
401static void rockchip_add_endpoints(struct device *dev,
402 struct component_match **match,
403 struct device_node *port)
404{
405 struct device_node *ep, *remote;
406
407 for_each_child_of_node(port, ep) {
408 remote = of_graph_get_remote_port_parent(ep);
409 if (!remote || !of_device_is_available(remote)) {
410 of_node_put(remote);
411 continue;
412 } else if (!of_device_is_available(remote->parent)) {
413 dev_warn(dev, "parent device of %s is not available\n",
414 remote->full_name);
415 of_node_put(remote);
416 continue;
417 }
418
419 component_match_add(dev, match, compare_of, remote);
420 of_node_put(remote);
421 }
422}
423
424static int rockchip_drm_bind(struct device *dev)
425{
426 struct drm_device *drm;
427 int ret;
428
429 drm = drm_dev_alloc(&rockchip_drm_driver, dev);
430 if (!drm)
431 return -ENOMEM;
432
433 ret = drm_dev_set_unique(drm, "%s", dev_name(dev));
434 if (ret)
435 goto err_free;
436
437 ret = drm_dev_register(drm, 0);
438 if (ret)
439 goto err_free;
440
441 dev_set_drvdata(dev, drm);
442
443 return 0;
444
445err_free:
446 drm_dev_unref(drm);
447 return ret;
448}
449
450static void rockchip_drm_unbind(struct device *dev)
451{
452 struct drm_device *drm = dev_get_drvdata(dev);
453
454 drm_dev_unregister(drm);
455 drm_dev_unref(drm);
456 dev_set_drvdata(dev, NULL);
457}
458
459static const struct component_master_ops rockchip_drm_ops = {
460 .bind = rockchip_drm_bind,
461 .unbind = rockchip_drm_unbind,
462};
463
464static int rockchip_drm_platform_probe(struct platform_device *pdev)
465{
466 struct device *dev = &pdev->dev;
467 struct component_match *match = NULL;
468 struct device_node *np = dev->of_node;
469 struct device_node *port;
470 int i;
471
472 if (!np)
473 return -ENODEV;
474 /*
475 * Bind the crtc ports first, so that
476 * drm_of_find_possible_crtcs called from encoder .bind callbacks
477 * works as expected.
478 */
479 for (i = 0;; i++) {
480 port = of_parse_phandle(np, "ports", i);
481 if (!port)
482 break;
483
484 if (!of_device_is_available(port->parent)) {
485 of_node_put(port);
486 continue;
487 }
488
489 component_match_add(dev, &match, compare_of, port->parent);
490 of_node_put(port);
491 }
492
493 if (i == 0) {
494 dev_err(dev, "missing 'ports' property\n");
495 return -ENODEV;
496 }
497
498 if (!match) {
499 dev_err(dev, "No available vop found for display-subsystem.\n");
500 return -ENODEV;
501 }
502 /*
503 * For each bound crtc, bind the encoders attached to its
504 * remote endpoint.
505 */
506 for (i = 0;; i++) {
507 port = of_parse_phandle(np, "ports", i);
508 if (!port)
509 break;
510
511 if (!of_device_is_available(port->parent)) {
512 of_node_put(port);
513 continue;
514 }
515
516 rockchip_add_endpoints(dev, &match, port);
517 of_node_put(port);
518 }
519
520 return component_master_add_with_match(dev, &rockchip_drm_ops, match);
521}
522
523static int rockchip_drm_platform_remove(struct platform_device *pdev)
524{
525 component_master_del(&pdev->dev, &rockchip_drm_ops);
526
527 return 0;
528}
529
530static const struct of_device_id rockchip_drm_dt_ids[] = {
531 { .compatible = "rockchip,display-subsystem", },
532 { /* sentinel */ },
533};
534MODULE_DEVICE_TABLE(of, rockchip_drm_dt_ids);
535
536static struct platform_driver rockchip_drm_platform_driver = {
537 .probe = rockchip_drm_platform_probe,
538 .remove = rockchip_drm_platform_remove,
539 .driver = {
540 .owner = THIS_MODULE,
541 .name = "rockchip-drm",
542 .of_match_table = rockchip_drm_dt_ids,
543 .pm = &rockchip_drm_pm_ops,
544 },
545};
546
547module_platform_driver(rockchip_drm_platform_driver);
548
549MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
550MODULE_DESCRIPTION("ROCKCHIP DRM Driver");
551MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
new file mode 100644
index 000000000000..dc4e5f03ac79
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * based on exynos_drm_drv.h
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#ifndef _ROCKCHIP_DRM_DRV_H
18#define _ROCKCHIP_DRM_DRV_H
19
20#include <drm/drm_fb_helper.h>
21#include <drm/drm_gem.h>
22
23#include <linux/module.h>
24#include <linux/component.h>
25
26#define ROCKCHIP_MAX_FB_BUFFER 3
27#define ROCKCHIP_MAX_CONNECTOR 2
28#define ROCKCHIP_MAX_CRTC 2
29
30struct drm_device;
31struct drm_connector;
32
33/*
34 * Rockchip drm private crtc funcs.
35 * @enable_vblank: enable crtc vblank irq.
36 * @disable_vblank: disable crtc vblank irq.
37 */
38struct rockchip_crtc_funcs {
39 int (*enable_vblank)(struct drm_crtc *crtc);
40 void (*disable_vblank)(struct drm_crtc *crtc);
41};
42
43/*
44 * Rockchip drm private structure.
45 *
46 * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc.
47 * @num_pipe: number of pipes for this device.
48 */
49struct rockchip_drm_private {
50 struct drm_fb_helper fbdev_helper;
51 struct drm_gem_object *fbdev_bo;
52 const struct rockchip_crtc_funcs *crtc_funcs[ROCKCHIP_MAX_CRTC];
53};
54
55int rockchip_register_crtc_funcs(struct drm_device *dev,
56 const struct rockchip_crtc_funcs *crtc_funcs,
57 int pipe);
58void rockchip_unregister_crtc_funcs(struct drm_device *dev, int pipe);
59int rockchip_drm_encoder_get_mux_id(struct device_node *node,
60 struct drm_encoder *encoder);
61int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc, int connector_type,
62 int out_mode);
63int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
64 struct device *dev);
65void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
66 struct device *dev);
67
68#endif /* _ROCKCHIP_DRM_DRV_H_ */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.c b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
new file mode 100644
index 000000000000..77d52893d40f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.c
@@ -0,0 +1,201 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <linux/kernel.h>
16#include <drm/drm.h>
17#include <drm/drmP.h>
18#include <drm/drm_fb_helper.h>
19#include <drm/drm_crtc_helper.h>
20
21#include "rockchip_drm_drv.h"
22#include "rockchip_drm_gem.h"
23
24#define to_rockchip_fb(x) container_of(x, struct rockchip_drm_fb, fb)
25
26struct rockchip_drm_fb {
27 struct drm_framebuffer fb;
28 struct drm_gem_object *obj[ROCKCHIP_MAX_FB_BUFFER];
29};
30
31struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
32 unsigned int plane)
33{
34 struct rockchip_drm_fb *rk_fb = to_rockchip_fb(fb);
35
36 if (plane >= ROCKCHIP_MAX_FB_BUFFER)
37 return NULL;
38
39 return rk_fb->obj[plane];
40}
41EXPORT_SYMBOL_GPL(rockchip_fb_get_gem_obj);
42
43static void rockchip_drm_fb_destroy(struct drm_framebuffer *fb)
44{
45 struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
46 struct drm_gem_object *obj;
47 int i;
48
49 for (i = 0; i < ROCKCHIP_MAX_FB_BUFFER; i++) {
50 obj = rockchip_fb->obj[i];
51 if (obj)
52 drm_gem_object_unreference_unlocked(obj);
53 }
54
55 drm_framebuffer_cleanup(fb);
56 kfree(rockchip_fb);
57}
58
59static int rockchip_drm_fb_create_handle(struct drm_framebuffer *fb,
60 struct drm_file *file_priv,
61 unsigned int *handle)
62{
63 struct rockchip_drm_fb *rockchip_fb = to_rockchip_fb(fb);
64
65 return drm_gem_handle_create(file_priv,
66 rockchip_fb->obj[0], handle);
67}
68
69static struct drm_framebuffer_funcs rockchip_drm_fb_funcs = {
70 .destroy = rockchip_drm_fb_destroy,
71 .create_handle = rockchip_drm_fb_create_handle,
72};
73
74static struct rockchip_drm_fb *
75rockchip_fb_alloc(struct drm_device *dev, struct drm_mode_fb_cmd2 *mode_cmd,
76 struct drm_gem_object **obj, unsigned int num_planes)
77{
78 struct rockchip_drm_fb *rockchip_fb;
79 int ret;
80 int i;
81
82 rockchip_fb = kzalloc(sizeof(*rockchip_fb), GFP_KERNEL);
83 if (!rockchip_fb)
84 return ERR_PTR(-ENOMEM);
85
86 drm_helper_mode_fill_fb_struct(&rockchip_fb->fb, mode_cmd);
87
88 for (i = 0; i < num_planes; i++)
89 rockchip_fb->obj[i] = obj[i];
90
91 ret = drm_framebuffer_init(dev, &rockchip_fb->fb,
92 &rockchip_drm_fb_funcs);
93 if (ret) {
94 dev_err(dev->dev, "Failed to initialize framebuffer: %d\n",
95 ret);
96 kfree(rockchip_fb);
97 return ERR_PTR(ret);
98 }
99
100 return rockchip_fb;
101}
102
103static struct drm_framebuffer *
104rockchip_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
105 struct drm_mode_fb_cmd2 *mode_cmd)
106{
107 struct rockchip_drm_fb *rockchip_fb;
108 struct drm_gem_object *objs[ROCKCHIP_MAX_FB_BUFFER];
109 struct drm_gem_object *obj;
110 unsigned int hsub;
111 unsigned int vsub;
112 int num_planes;
113 int ret;
114 int i;
115
116 hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
117 vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
118 num_planes = min(drm_format_num_planes(mode_cmd->pixel_format),
119 ROCKCHIP_MAX_FB_BUFFER);
120
121 for (i = 0; i < num_planes; i++) {
122 unsigned int width = mode_cmd->width / (i ? hsub : 1);
123 unsigned int height = mode_cmd->height / (i ? vsub : 1);
124 unsigned int min_size;
125
126 obj = drm_gem_object_lookup(dev, file_priv,
127 mode_cmd->handles[i]);
128 if (!obj) {
129 dev_err(dev->dev, "Failed to lookup GEM object\n");
130 ret = -ENXIO;
131 goto err_gem_object_unreference;
132 }
133
134 min_size = (height - 1) * mode_cmd->pitches[i] +
135 mode_cmd->offsets[i] +
136 width * drm_format_plane_cpp(mode_cmd->pixel_format, i);
137
138 if (obj->size < min_size) {
139 drm_gem_object_unreference_unlocked(obj);
140 ret = -EINVAL;
141 goto err_gem_object_unreference;
142 }
143 objs[i] = obj;
144 }
145
146 rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, objs, i);
147 if (IS_ERR(rockchip_fb)) {
148 ret = PTR_ERR(rockchip_fb);
149 goto err_gem_object_unreference;
150 }
151
152 return &rockchip_fb->fb;
153
154err_gem_object_unreference:
155 for (i--; i >= 0; i--)
156 drm_gem_object_unreference_unlocked(objs[i]);
157 return ERR_PTR(ret);
158}
159
160static void rockchip_drm_output_poll_changed(struct drm_device *dev)
161{
162 struct rockchip_drm_private *private = dev->dev_private;
163 struct drm_fb_helper *fb_helper = &private->fbdev_helper;
164
165 drm_fb_helper_hotplug_event(fb_helper);
166}
167
168static const struct drm_mode_config_funcs rockchip_drm_mode_config_funcs = {
169 .fb_create = rockchip_user_fb_create,
170 .output_poll_changed = rockchip_drm_output_poll_changed,
171};
172
173struct drm_framebuffer *
174rockchip_drm_framebuffer_init(struct drm_device *dev,
175 struct drm_mode_fb_cmd2 *mode_cmd,
176 struct drm_gem_object *obj)
177{
178 struct rockchip_drm_fb *rockchip_fb;
179
180 rockchip_fb = rockchip_fb_alloc(dev, mode_cmd, &obj, 1);
181 if (IS_ERR(rockchip_fb))
182 return NULL;
183
184 return &rockchip_fb->fb;
185}
186
187void rockchip_drm_mode_config_init(struct drm_device *dev)
188{
189 dev->mode_config.min_width = 0;
190 dev->mode_config.min_height = 0;
191
192 /*
193 * set max width and height as default value(4096x4096).
194 * this value would be used to check framebuffer size limitation
195 * at drm_mode_addfb().
196 */
197 dev->mode_config.max_width = 4096;
198 dev->mode_config.max_height = 4096;
199
200 dev->mode_config.funcs = &rockchip_drm_mode_config_funcs;
201}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fb.h b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
new file mode 100644
index 000000000000..09574d48226f
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fb.h
@@ -0,0 +1,28 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_FB_H
16#define _ROCKCHIP_DRM_FB_H
17
18struct drm_framebuffer *
19rockchip_drm_framebuffer_init(struct drm_device *dev,
20 struct drm_mode_fb_cmd2 *mode_cmd,
21 struct drm_gem_object *obj);
22void rockchip_drm_framebuffer_fini(struct drm_framebuffer *fb);
23
24void rockchip_drm_mode_config_init(struct drm_device *dev);
25
26struct drm_gem_object *rockchip_fb_get_gem_obj(struct drm_framebuffer *fb,
27 unsigned int plane);
28#endif /* _ROCKCHIP_DRM_FB_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
new file mode 100644
index 000000000000..a5d889a8716b
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -0,0 +1,210 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
17#include <drm/drm_fb_helper.h>
18#include <drm/drm_crtc_helper.h>
19
20#include "rockchip_drm_drv.h"
21#include "rockchip_drm_gem.h"
22#include "rockchip_drm_fb.h"
23
24#define PREFERRED_BPP 32
25#define to_drm_private(x) \
26 container_of(x, struct rockchip_drm_private, fbdev_helper)
27
28static int rockchip_fbdev_mmap(struct fb_info *info,
29 struct vm_area_struct *vma)
30{
31 struct drm_fb_helper *helper = info->par;
32 struct rockchip_drm_private *private = to_drm_private(helper);
33
34 return rockchip_gem_mmap_buf(private->fbdev_bo, vma);
35}
36
37static struct fb_ops rockchip_drm_fbdev_ops = {
38 .owner = THIS_MODULE,
39 .fb_mmap = rockchip_fbdev_mmap,
40 .fb_fillrect = cfb_fillrect,
41 .fb_copyarea = cfb_copyarea,
42 .fb_imageblit = cfb_imageblit,
43 .fb_check_var = drm_fb_helper_check_var,
44 .fb_set_par = drm_fb_helper_set_par,
45 .fb_blank = drm_fb_helper_blank,
46 .fb_pan_display = drm_fb_helper_pan_display,
47 .fb_setcmap = drm_fb_helper_setcmap,
48};
49
50static int rockchip_drm_fbdev_create(struct drm_fb_helper *helper,
51 struct drm_fb_helper_surface_size *sizes)
52{
53 struct rockchip_drm_private *private = to_drm_private(helper);
54 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
55 struct drm_device *dev = helper->dev;
56 struct rockchip_gem_object *rk_obj;
57 struct drm_framebuffer *fb;
58 unsigned int bytes_per_pixel;
59 unsigned long offset;
60 struct fb_info *fbi;
61 size_t size;
62 int ret;
63
64 bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8);
65
66 mode_cmd.width = sizes->surface_width;
67 mode_cmd.height = sizes->surface_height;
68 mode_cmd.pitches[0] = sizes->surface_width * bytes_per_pixel;
69 mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
70 sizes->surface_depth);
71
72 size = mode_cmd.pitches[0] * mode_cmd.height;
73
74 rk_obj = rockchip_gem_create_object(dev, size);
75 if (IS_ERR(rk_obj))
76 return -ENOMEM;
77
78 private->fbdev_bo = &rk_obj->base;
79
80 fbi = framebuffer_alloc(0, dev->dev);
81 if (!fbi) {
82 dev_err(dev->dev, "Failed to allocate framebuffer info.\n");
83 ret = -ENOMEM;
84 goto err_rockchip_gem_free_object;
85 }
86
87 helper->fb = rockchip_drm_framebuffer_init(dev, &mode_cmd,
88 private->fbdev_bo);
89 if (IS_ERR(helper->fb)) {
90 dev_err(dev->dev, "Failed to allocate DRM framebuffer.\n");
91 ret = PTR_ERR(helper->fb);
92 goto err_framebuffer_release;
93 }
94
95 helper->fbdev = fbi;
96
97 fbi->par = helper;
98 fbi->flags = FBINFO_FLAG_DEFAULT;
99 fbi->fbops = &rockchip_drm_fbdev_ops;
100
101 ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
102 if (ret) {
103 dev_err(dev->dev, "Failed to allocate color map.\n");
104 goto err_drm_framebuffer_unref;
105 }
106
107 fb = helper->fb;
108 drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
109 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
110
111 offset = fbi->var.xoffset * bytes_per_pixel;
112 offset += fbi->var.yoffset * fb->pitches[0];
113
114 dev->mode_config.fb_base = 0;
115 fbi->screen_base = rk_obj->kvaddr + offset;
116 fbi->screen_size = rk_obj->base.size;
117 fbi->fix.smem_len = rk_obj->base.size;
118
119 DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%p offset=%ld size=%d\n",
120 fb->width, fb->height, fb->depth, rk_obj->kvaddr,
121 offset, size);
122 return 0;
123
124err_drm_framebuffer_unref:
125 drm_framebuffer_unreference(helper->fb);
126err_framebuffer_release:
127 framebuffer_release(fbi);
128err_rockchip_gem_free_object:
129 rockchip_gem_free_object(&rk_obj->base);
130 return ret;
131}
132
133static const struct drm_fb_helper_funcs rockchip_drm_fb_helper_funcs = {
134 .fb_probe = rockchip_drm_fbdev_create,
135};
136
137int rockchip_drm_fbdev_init(struct drm_device *dev)
138{
139 struct rockchip_drm_private *private = dev->dev_private;
140 struct drm_fb_helper *helper;
141 unsigned int num_crtc;
142 int ret;
143
144 if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
145 return -EINVAL;
146
147 num_crtc = dev->mode_config.num_crtc;
148
149 helper = &private->fbdev_helper;
150
151 drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
152
153 ret = drm_fb_helper_init(dev, helper, num_crtc, ROCKCHIP_MAX_CONNECTOR);
154 if (ret < 0) {
155 dev_err(dev->dev, "Failed to initialize drm fb helper - %d.\n",
156 ret);
157 return ret;
158 }
159
160 ret = drm_fb_helper_single_add_all_connectors(helper);
161 if (ret < 0) {
162 dev_err(dev->dev, "Failed to add connectors - %d.\n", ret);
163 goto err_drm_fb_helper_fini;
164 }
165
166 /* disable all the possible outputs/crtcs before entering KMS mode */
167 drm_helper_disable_unused_functions(dev);
168
169 ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
170 if (ret < 0) {
171 dev_err(dev->dev, "Failed to set initial hw config - %d.\n",
172 ret);
173 goto err_drm_fb_helper_fini;
174 }
175
176 return 0;
177
178err_drm_fb_helper_fini:
179 drm_fb_helper_fini(helper);
180 return ret;
181}
182
183void rockchip_drm_fbdev_fini(struct drm_device *dev)
184{
185 struct rockchip_drm_private *private = dev->dev_private;
186 struct drm_fb_helper *helper;
187
188 helper = &private->fbdev_helper;
189
190 if (helper->fbdev) {
191 struct fb_info *info;
192 int ret;
193
194 info = helper->fbdev;
195 ret = unregister_framebuffer(info);
196 if (ret < 0)
197 DRM_DEBUG_KMS("failed unregister_framebuffer() - %d\n",
198 ret);
199
200 if (info->cmap.len)
201 fb_dealloc_cmap(&info->cmap);
202
203 framebuffer_release(info);
204 }
205
206 if (helper->fb)
207 drm_framebuffer_unreference(helper->fb);
208
209 drm_fb_helper_fini(helper);
210}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
new file mode 100644
index 000000000000..50432e9b5b37
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.h
@@ -0,0 +1,21 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_FBDEV_H
16#define _ROCKCHIP_DRM_FBDEV_H
17
18int rockchip_drm_fbdev_init(struct drm_device *dev);
19void rockchip_drm_fbdev_fini(struct drm_device *dev);
20
21#endif /* _ROCKCHIP_DRM_FBDEV_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
new file mode 100644
index 000000000000..bc98a227dc76
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -0,0 +1,294 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
17#include <drm/drm_gem.h>
18#include <drm/drm_vma_manager.h>
19
20#include <linux/dma-attrs.h>
21
22#include "rockchip_drm_drv.h"
23#include "rockchip_drm_gem.h"
24
25static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj)
26{
27 struct drm_gem_object *obj = &rk_obj->base;
28 struct drm_device *drm = obj->dev;
29
30 init_dma_attrs(&rk_obj->dma_attrs);
31 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);
32
33 /* TODO(djkurtz): Use DMA_ATTR_NO_KERNEL_MAPPING except for fbdev */
34 rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
35 &rk_obj->dma_addr, GFP_KERNEL,
36 &rk_obj->dma_attrs);
37 if (IS_ERR(rk_obj->kvaddr)) {
38 int ret = PTR_ERR(rk_obj->kvaddr);
39
40 DRM_ERROR("failed to allocate %#x byte dma buffer, %d",
41 obj->size, ret);
42 return ret;
43 }
44
45 return 0;
46}
47
48static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
49{
50 struct drm_gem_object *obj = &rk_obj->base;
51 struct drm_device *drm = obj->dev;
52
53 dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
54 &rk_obj->dma_attrs);
55}
56
57int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
58 struct vm_area_struct *vma)
59{
60 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
61 struct drm_device *drm = obj->dev;
62 unsigned long vm_size;
63
64 vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
65 vm_size = vma->vm_end - vma->vm_start;
66
67 if (vm_size > obj->size)
68 return -EINVAL;
69
70 return dma_mmap_attrs(drm->dev, vma, rk_obj->kvaddr, rk_obj->dma_addr,
71 obj->size, &rk_obj->dma_attrs);
72}
73
74/* drm driver mmap file operations */
75int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma)
76{
77 struct drm_file *priv = filp->private_data;
78 struct drm_device *dev = priv->minor->dev;
79 struct drm_gem_object *obj;
80 struct drm_vma_offset_node *node;
81 int ret;
82
83 if (drm_device_is_unplugged(dev))
84 return -ENODEV;
85
86 mutex_lock(&dev->struct_mutex);
87
88 node = drm_vma_offset_exact_lookup(dev->vma_offset_manager,
89 vma->vm_pgoff,
90 vma_pages(vma));
91 if (!node) {
92 mutex_unlock(&dev->struct_mutex);
93 DRM_ERROR("failed to find vma node.\n");
94 return -EINVAL;
95 } else if (!drm_vma_node_is_allowed(node, filp)) {
96 mutex_unlock(&dev->struct_mutex);
97 return -EACCES;
98 }
99
100 obj = container_of(node, struct drm_gem_object, vma_node);
101 ret = rockchip_gem_mmap_buf(obj, vma);
102
103 mutex_unlock(&dev->struct_mutex);
104
105 return ret;
106}
107
108struct rockchip_gem_object *
109 rockchip_gem_create_object(struct drm_device *drm, unsigned int size)
110{
111 struct rockchip_gem_object *rk_obj;
112 struct drm_gem_object *obj;
113 int ret;
114
115 size = round_up(size, PAGE_SIZE);
116
117 rk_obj = kzalloc(sizeof(*rk_obj), GFP_KERNEL);
118 if (!rk_obj)
119 return ERR_PTR(-ENOMEM);
120
121 obj = &rk_obj->base;
122
123 drm_gem_private_object_init(drm, obj, size);
124
125 ret = rockchip_gem_alloc_buf(rk_obj);
126 if (ret)
127 goto err_free_rk_obj;
128
129 return rk_obj;
130
131err_free_rk_obj:
132 kfree(rk_obj);
133 return ERR_PTR(ret);
134}
135
136/*
137 * rockchip_gem_free_object - (struct drm_driver)->gem_free_object callback
138 * function
139 */
140void rockchip_gem_free_object(struct drm_gem_object *obj)
141{
142 struct rockchip_gem_object *rk_obj;
143
144 drm_gem_free_mmap_offset(obj);
145
146 rk_obj = to_rockchip_obj(obj);
147
148 rockchip_gem_free_buf(rk_obj);
149
150 kfree(rk_obj);
151}
152
153/*
154 * rockchip_gem_create_with_handle - allocate an object with the given
155 * size and create a gem handle on it
156 *
157 * returns a struct rockchip_gem_object* on success or ERR_PTR values
158 * on failure.
159 */
160static struct rockchip_gem_object *
161rockchip_gem_create_with_handle(struct drm_file *file_priv,
162 struct drm_device *drm, unsigned int size,
163 unsigned int *handle)
164{
165 struct rockchip_gem_object *rk_obj;
166 struct drm_gem_object *obj;
167 int ret;
168
169 rk_obj = rockchip_gem_create_object(drm, size);
170 if (IS_ERR(rk_obj))
171 return ERR_CAST(rk_obj);
172
173 obj = &rk_obj->base;
174
175 /*
176 * allocate a id of idr table where the obj is registered
177 * and handle has the id what user can see.
178 */
179 ret = drm_gem_handle_create(file_priv, obj, handle);
180 if (ret)
181 goto err_handle_create;
182
183 /* drop reference from allocate - handle holds it now. */
184 drm_gem_object_unreference_unlocked(obj);
185
186 return rk_obj;
187
188err_handle_create:
189 rockchip_gem_free_object(obj);
190
191 return ERR_PTR(ret);
192}
193
194int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
195 struct drm_device *dev, uint32_t handle,
196 uint64_t *offset)
197{
198 struct drm_gem_object *obj;
199 int ret;
200
201 mutex_lock(&dev->struct_mutex);
202
203 obj = drm_gem_object_lookup(dev, file_priv, handle);
204 if (!obj) {
205 DRM_ERROR("failed to lookup gem object.\n");
206 ret = -EINVAL;
207 goto unlock;
208 }
209
210 ret = drm_gem_create_mmap_offset(obj);
211 if (ret)
212 goto out;
213
214 *offset = drm_vma_node_offset_addr(&obj->vma_node);
215 DRM_DEBUG_KMS("offset = 0x%llx\n", *offset);
216
217out:
218 drm_gem_object_unreference(obj);
219unlock:
220 mutex_unlock(&dev->struct_mutex);
221 return ret;
222}
223
224/*
225 * rockchip_gem_dumb_create - (struct drm_driver)->dumb_create callback
226 * function
227 *
228 * This aligns the pitch and size arguments to the minimum required. wrap
229 * this into your own function if you need bigger alignment.
230 */
231int rockchip_gem_dumb_create(struct drm_file *file_priv,
232 struct drm_device *dev,
233 struct drm_mode_create_dumb *args)
234{
235 struct rockchip_gem_object *rk_obj;
236 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
237
238 /*
239 * align to 64 bytes since Mali requires it.
240 */
241 min_pitch = ALIGN(min_pitch, 64);
242
243 if (args->pitch < min_pitch)
244 args->pitch = min_pitch;
245
246 if (args->size < args->pitch * args->height)
247 args->size = args->pitch * args->height;
248
249 rk_obj = rockchip_gem_create_with_handle(file_priv, dev, args->size,
250 &args->handle);
251
252 return PTR_ERR_OR_ZERO(rk_obj);
253}
254
255/*
256 * Allocate a sg_table for this GEM object.
257 * Note: Both the table's contents, and the sg_table itself must be freed by
258 * the caller.
259 * Returns a pointer to the newly allocated sg_table, or an ERR_PTR() error.
260 */
261struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj)
262{
263 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
264 struct drm_device *drm = obj->dev;
265 struct sg_table *sgt;
266 int ret;
267
268 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
269 if (!sgt)
270 return ERR_PTR(-ENOMEM);
271
272 ret = dma_get_sgtable_attrs(drm->dev, sgt, rk_obj->kvaddr,
273 rk_obj->dma_addr, obj->size,
274 &rk_obj->dma_attrs);
275 if (ret) {
276 DRM_ERROR("failed to allocate sgt, %d\n", ret);
277 kfree(sgt);
278 return ERR_PTR(ret);
279 }
280
281 return sgt;
282}
283
284void *rockchip_gem_prime_vmap(struct drm_gem_object *obj)
285{
286 struct rockchip_gem_object *rk_obj = to_rockchip_obj(obj);
287
288 return rk_obj->kvaddr;
289}
290
291void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
292{
293 /* Nothing to do */
294}
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.h b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
new file mode 100644
index 000000000000..67bcebe90003
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.h
@@ -0,0 +1,54 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_GEM_H
16#define _ROCKCHIP_DRM_GEM_H
17
18#define to_rockchip_obj(x) container_of(x, struct rockchip_gem_object, base)
19
20struct rockchip_gem_object {
21 struct drm_gem_object base;
22 unsigned int flags;
23
24 void *kvaddr;
25 dma_addr_t dma_addr;
26 struct dma_attrs dma_attrs;
27};
28
29struct sg_table *rockchip_gem_prime_get_sg_table(struct drm_gem_object *obj);
30struct drm_gem_object *
31rockchip_gem_prime_import_sg_table(struct drm_device *dev, size_t size,
32 struct sg_table *sgt);
33void *rockchip_gem_prime_vmap(struct drm_gem_object *obj);
34void rockchip_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
35
36/* drm driver mmap file operations */
37int rockchip_gem_mmap(struct file *filp, struct vm_area_struct *vma);
38
39/* mmap a gem object to userspace. */
40int rockchip_gem_mmap_buf(struct drm_gem_object *obj,
41 struct vm_area_struct *vma);
42
43struct rockchip_gem_object *
44 rockchip_gem_create_object(struct drm_device *drm, unsigned int size);
45
46void rockchip_gem_free_object(struct drm_gem_object *obj);
47
48int rockchip_gem_dumb_create(struct drm_file *file_priv,
49 struct drm_device *dev,
50 struct drm_mode_create_dumb *args);
51int rockchip_gem_dumb_map_offset(struct drm_file *file_priv,
52 struct drm_device *dev, uint32_t handle,
53 uint64_t *offset);
54#endif /* _ROCKCHIP_DRM_GEM_H */
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
new file mode 100644
index 000000000000..e7ca25b3fb38
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -0,0 +1,1455 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <drm/drm.h>
16#include <drm/drmP.h>
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_plane_helper.h>
20
21#include <linux/kernel.h>
22#include <linux/platform_device.h>
23#include <linux/clk.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/component.h>
28
29#include <linux/reset.h>
30#include <linux/delay.h>
31
32#include "rockchip_drm_drv.h"
33#include "rockchip_drm_gem.h"
34#include "rockchip_drm_fb.h"
35#include "rockchip_drm_vop.h"
36
37#define VOP_REG(off, _mask, s) \
38 {.offset = off, \
39 .mask = _mask, \
40 .shift = s,}
41
42#define __REG_SET_RELAXED(x, off, mask, shift, v) \
43 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
44#define __REG_SET_NORMAL(x, off, mask, shift, v) \
45 vop_mask_write(x, off, (mask) << shift, (v) << shift)
46
47#define REG_SET(x, base, reg, v, mode) \
48 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
49
50#define VOP_WIN_SET(x, win, name, v) \
51 REG_SET(x, win->base, win->phy->name, v, RELAXED)
52#define VOP_CTRL_SET(x, name, v) \
53 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
54
55#define VOP_WIN_GET(x, win, name) \
56 vop_read_reg(x, win->base, &win->phy->name)
57
58#define VOP_WIN_GET_YRGBADDR(vop, win) \
59 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
60
61#define to_vop(x) container_of(x, struct vop, crtc)
62#define to_vop_win(x) container_of(x, struct vop_win, base)
63
64struct vop_win_state {
65 struct list_head head;
66 struct drm_framebuffer *fb;
67 dma_addr_t yrgb_mst;
68 struct drm_pending_vblank_event *event;
69};
70
71struct vop_win {
72 struct drm_plane base;
73 const struct vop_win_data *data;
74 struct vop *vop;
75
76 struct list_head pending;
77 struct vop_win_state *active;
78};
79
80struct vop {
81 struct drm_crtc crtc;
82 struct device *dev;
83 struct drm_device *drm_dev;
84 unsigned int dpms;
85
86 int connector_type;
87 int connector_out_mode;
88
89 /* mutex vsync_ work */
90 struct mutex vsync_mutex;
91 bool vsync_work_pending;
92
93 const struct vop_data *data;
94
95 uint32_t *regsbak;
96 void __iomem *regs;
97
98 /* physical map length of vop register */
99 uint32_t len;
100
101 /* one time only one process allowed to config the register */
102 spinlock_t reg_lock;
103 /* lock vop irq reg */
104 spinlock_t irq_lock;
105
106 unsigned int irq;
107
108 /* vop AHP clk */
109 struct clk *hclk;
110 /* vop dclk */
111 struct clk *dclk;
112 /* vop share memory frequency */
113 struct clk *aclk;
114
115 /* vop dclk reset */
116 struct reset_control *dclk_rst;
117
118 int pipe;
119
120 struct vop_win win[];
121};
122
123enum vop_data_format {
124 VOP_FMT_ARGB8888 = 0,
125 VOP_FMT_RGB888,
126 VOP_FMT_RGB565,
127 VOP_FMT_YUV420SP = 4,
128 VOP_FMT_YUV422SP,
129 VOP_FMT_YUV444SP,
130};
131
132struct vop_reg_data {
133 uint32_t offset;
134 uint32_t value;
135};
136
137struct vop_reg {
138 uint32_t offset;
139 uint32_t shift;
140 uint32_t mask;
141};
142
143struct vop_ctrl {
144 struct vop_reg standby;
145 struct vop_reg data_blank;
146 struct vop_reg gate_en;
147 struct vop_reg mmu_en;
148 struct vop_reg rgb_en;
149 struct vop_reg edp_en;
150 struct vop_reg hdmi_en;
151 struct vop_reg mipi_en;
152 struct vop_reg out_mode;
153 struct vop_reg dither_down;
154 struct vop_reg dither_up;
155 struct vop_reg pin_pol;
156
157 struct vop_reg htotal_pw;
158 struct vop_reg hact_st_end;
159 struct vop_reg vtotal_pw;
160 struct vop_reg vact_st_end;
161 struct vop_reg hpost_st_end;
162 struct vop_reg vpost_st_end;
163};
164
165struct vop_win_phy {
166 const uint32_t *data_formats;
167 uint32_t nformats;
168
169 struct vop_reg enable;
170 struct vop_reg format;
171 struct vop_reg act_info;
172 struct vop_reg dsp_info;
173 struct vop_reg dsp_st;
174 struct vop_reg yrgb_mst;
175 struct vop_reg uv_mst;
176 struct vop_reg yrgb_vir;
177 struct vop_reg uv_vir;
178
179 struct vop_reg dst_alpha_ctl;
180 struct vop_reg src_alpha_ctl;
181};
182
183struct vop_win_data {
184 uint32_t base;
185 const struct vop_win_phy *phy;
186 enum drm_plane_type type;
187};
188
189struct vop_data {
190 const struct vop_reg_data *init_table;
191 unsigned int table_size;
192 const struct vop_ctrl *ctrl;
193 const struct vop_win_data *win;
194 unsigned int win_size;
195};
196
197static const uint32_t formats_01[] = {
198 DRM_FORMAT_XRGB8888,
199 DRM_FORMAT_ARGB8888,
200 DRM_FORMAT_RGB888,
201 DRM_FORMAT_RGB565,
202 DRM_FORMAT_NV12,
203 DRM_FORMAT_NV16,
204 DRM_FORMAT_NV24,
205};
206
207static const uint32_t formats_234[] = {
208 DRM_FORMAT_XRGB8888,
209 DRM_FORMAT_ARGB8888,
210 DRM_FORMAT_RGB888,
211 DRM_FORMAT_RGB565,
212};
213
214static const struct vop_win_phy win01_data = {
215 .data_formats = formats_01,
216 .nformats = ARRAY_SIZE(formats_01),
217 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
218 .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
219 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
220 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
221 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
222 .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
223 .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
224 .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
225 .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
226 .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
227 .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
228};
229
230static const struct vop_win_phy win23_data = {
231 .data_formats = formats_234,
232 .nformats = ARRAY_SIZE(formats_234),
233 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
234 .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
235 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
236 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
237 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
238 .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
239 .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
240 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
241};
242
243static const struct vop_win_phy cursor_data = {
244 .data_formats = formats_234,
245 .nformats = ARRAY_SIZE(formats_234),
246 .enable = VOP_REG(HWC_CTRL0, 0x1, 0),
247 .format = VOP_REG(HWC_CTRL0, 0x7, 1),
248 .dsp_st = VOP_REG(HWC_DSP_ST, 0x1fff1fff, 0),
249 .yrgb_mst = VOP_REG(HWC_MST, 0xffffffff, 0),
250};
251
252static const struct vop_ctrl ctrl_data = {
253 .standby = VOP_REG(SYS_CTRL, 0x1, 22),
254 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
255 .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
256 .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
257 .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
258 .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
259 .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
260 .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
261 .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
262 .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
263 .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
264 .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
265 .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
266 .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
267 .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
268 .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
269 .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
270 .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
271};
272
273static const struct vop_reg_data vop_init_reg_table[] = {
274 {SYS_CTRL, 0x00c00000},
275 {DSP_CTRL0, 0x00000000},
276 {WIN0_CTRL0, 0x00000080},
277 {WIN1_CTRL0, 0x00000080},
278};
279
280/*
281 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
282 * special support to get alpha blending working. For now, just use overlay
283 * window 1 for the drm cursor.
284 */
285static const struct vop_win_data rk3288_vop_win_data[] = {
286 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
287 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_CURSOR },
288 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
289 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
290 { .base = 0x00, .phy = &cursor_data, .type = DRM_PLANE_TYPE_OVERLAY },
291};
292
293static const struct vop_data rk3288_vop = {
294 .init_table = vop_init_reg_table,
295 .table_size = ARRAY_SIZE(vop_init_reg_table),
296 .ctrl = &ctrl_data,
297 .win = rk3288_vop_win_data,
298 .win_size = ARRAY_SIZE(rk3288_vop_win_data),
299};
300
301static const struct of_device_id vop_driver_dt_match[] = {
302 { .compatible = "rockchip,rk3288-vop",
303 .data = &rk3288_vop },
304 {},
305};
306
307static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
308{
309 writel(v, vop->regs + offset);
310 vop->regsbak[offset >> 2] = v;
311}
312
313static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
314{
315 return readl(vop->regs + offset);
316}
317
318static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
319 const struct vop_reg *reg)
320{
321 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
322}
323
324static inline void vop_cfg_done(struct vop *vop)
325{
326 writel(0x01, vop->regs + REG_CFG_DONE);
327}
328
329static inline void vop_mask_write(struct vop *vop, uint32_t offset,
330 uint32_t mask, uint32_t v)
331{
332 if (mask) {
333 uint32_t cached_val = vop->regsbak[offset >> 2];
334
335 cached_val = (cached_val & ~mask) | v;
336 writel(cached_val, vop->regs + offset);
337 vop->regsbak[offset >> 2] = cached_val;
338 }
339}
340
341static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
342 uint32_t mask, uint32_t v)
343{
344 if (mask) {
345 uint32_t cached_val = vop->regsbak[offset >> 2];
346
347 cached_val = (cached_val & ~mask) | v;
348 writel_relaxed(cached_val, vop->regs + offset);
349 vop->regsbak[offset >> 2] = cached_val;
350 }
351}
352
353static enum vop_data_format vop_convert_format(uint32_t format)
354{
355 switch (format) {
356 case DRM_FORMAT_XRGB8888:
357 case DRM_FORMAT_ARGB8888:
358 return VOP_FMT_ARGB8888;
359 case DRM_FORMAT_RGB888:
360 return VOP_FMT_RGB888;
361 case DRM_FORMAT_RGB565:
362 return VOP_FMT_RGB565;
363 case DRM_FORMAT_NV12:
364 return VOP_FMT_YUV420SP;
365 case DRM_FORMAT_NV16:
366 return VOP_FMT_YUV422SP;
367 case DRM_FORMAT_NV24:
368 return VOP_FMT_YUV444SP;
369 default:
370 DRM_ERROR("unsupport format[%08x]\n", format);
371 return -EINVAL;
372 }
373}
374
375static bool is_alpha_support(uint32_t format)
376{
377 switch (format) {
378 case DRM_FORMAT_ARGB8888:
379 return true;
380 default:
381 return false;
382 }
383}
384
385static void vop_enable(struct drm_crtc *crtc)
386{
387 struct vop *vop = to_vop(crtc);
388 int ret;
389
390 ret = clk_enable(vop->hclk);
391 if (ret < 0) {
392 dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
393 return;
394 }
395
396 ret = clk_enable(vop->dclk);
397 if (ret < 0) {
398 dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
399 goto err_disable_hclk;
400 }
401
402 ret = clk_enable(vop->aclk);
403 if (ret < 0) {
404 dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
405 goto err_disable_dclk;
406 }
407
408 /*
409 * Slave iommu shares power, irq and clock with vop. It was associated
410 * automatically with this master device via common driver code.
411 * Now that we have enabled the clock we attach it to the shared drm
412 * mapping.
413 */
414 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
415 if (ret) {
416 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
417 goto err_disable_aclk;
418 }
419
420 spin_lock(&vop->reg_lock);
421
422 VOP_CTRL_SET(vop, standby, 0);
423
424 spin_unlock(&vop->reg_lock);
425
426 enable_irq(vop->irq);
427
428 drm_vblank_on(vop->drm_dev, vop->pipe);
429
430 return;
431
432err_disable_aclk:
433 clk_disable(vop->aclk);
434err_disable_dclk:
435 clk_disable(vop->dclk);
436err_disable_hclk:
437 clk_disable(vop->hclk);
438}
439
440static void vop_disable(struct drm_crtc *crtc)
441{
442 struct vop *vop = to_vop(crtc);
443
444 drm_vblank_off(crtc->dev, vop->pipe);
445
446 disable_irq(vop->irq);
447
448 /*
449 * TODO: Since standby doesn't take effect until the next vblank,
450 * when we turn off dclk below, the vop is probably still active.
451 */
452 spin_lock(&vop->reg_lock);
453
454 VOP_CTRL_SET(vop, standby, 1);
455
456 spin_unlock(&vop->reg_lock);
457 /*
458 * disable dclk to stop frame scan, so we can safely detach iommu,
459 */
460 clk_disable(vop->dclk);
461
462 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
463
464 clk_disable(vop->aclk);
465 clk_disable(vop->hclk);
466}
467
468/*
469 * Caller must hold vsync_mutex.
470 */
471static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
472{
473 struct vop_win_state *last;
474 struct vop_win_state *active = vop_win->active;
475
476 if (list_empty(&vop_win->pending))
477 return active ? active->fb : NULL;
478
479 last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
480 return last ? last->fb : NULL;
481}
482
483/*
484 * Caller must hold vsync_mutex.
485 */
486static int vop_win_queue_fb(struct vop_win *vop_win,
487 struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
488 struct drm_pending_vblank_event *event)
489{
490 struct vop_win_state *state;
491
492 state = kzalloc(sizeof(*state), GFP_KERNEL);
493 if (!state)
494 return -ENOMEM;
495
496 state->fb = fb;
497 state->yrgb_mst = yrgb_mst;
498 state->event = event;
499
500 list_add_tail(&state->head, &vop_win->pending);
501
502 return 0;
503}
504
505static int vop_update_plane_event(struct drm_plane *plane,
506 struct drm_crtc *crtc,
507 struct drm_framebuffer *fb, int crtc_x,
508 int crtc_y, unsigned int crtc_w,
509 unsigned int crtc_h, uint32_t src_x,
510 uint32_t src_y, uint32_t src_w,
511 uint32_t src_h,
512 struct drm_pending_vblank_event *event)
513{
514 struct vop_win *vop_win = to_vop_win(plane);
515 const struct vop_win_data *win = vop_win->data;
516 struct vop *vop = to_vop(crtc);
517 struct drm_gem_object *obj;
518 struct rockchip_gem_object *rk_obj;
519 unsigned long offset;
520 unsigned int actual_w;
521 unsigned int actual_h;
522 unsigned int dsp_stx;
523 unsigned int dsp_sty;
524 unsigned int y_vir_stride;
525 dma_addr_t yrgb_mst;
526 enum vop_data_format format;
527 uint32_t val;
528 bool is_alpha;
529 bool visible;
530 int ret;
531 struct drm_rect dest = {
532 .x1 = crtc_x,
533 .y1 = crtc_y,
534 .x2 = crtc_x + crtc_w,
535 .y2 = crtc_y + crtc_h,
536 };
537 struct drm_rect src = {
538 /* 16.16 fixed point */
539 .x1 = src_x,
540 .y1 = src_y,
541 .x2 = src_x + src_w,
542 .y2 = src_y + src_h,
543 };
544 const struct drm_rect clip = {
545 .x2 = crtc->mode.hdisplay,
546 .y2 = crtc->mode.vdisplay,
547 };
548 bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
549
550 ret = drm_plane_helper_check_update(plane, crtc, fb,
551 &src, &dest, &clip,
552 DRM_PLANE_HELPER_NO_SCALING,
553 DRM_PLANE_HELPER_NO_SCALING,
554 can_position, false, &visible);
555 if (ret)
556 return ret;
557
558 if (!visible)
559 return 0;
560
561 is_alpha = is_alpha_support(fb->pixel_format);
562 format = vop_convert_format(fb->pixel_format);
563 if (format < 0)
564 return format;
565
566 obj = rockchip_fb_get_gem_obj(fb, 0);
567 if (!obj) {
568 DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
569 return -EINVAL;
570 }
571
572 rk_obj = to_rockchip_obj(obj);
573
574 actual_w = (src.x2 - src.x1) >> 16;
575 actual_h = (src.y2 - src.y1) >> 16;
576 crtc_x = max(0, crtc_x);
577 crtc_y = max(0, crtc_y);
578
579 dsp_stx = crtc_x + crtc->mode.htotal - crtc->mode.hsync_start;
580 dsp_sty = crtc_y + crtc->mode.vtotal - crtc->mode.vsync_start;
581
582 offset = (src.x1 >> 16) * (fb->bits_per_pixel >> 3);
583 offset += (src.y1 >> 16) * fb->pitches[0];
584 yrgb_mst = rk_obj->dma_addr + offset;
585
586 y_vir_stride = fb->pitches[0] / (fb->bits_per_pixel >> 3);
587
588 /*
589 * If this plane update changes the plane's framebuffer, (or more
590 * precisely, if this update has a different framebuffer than the last
591 * update), enqueue it so we can track when it completes.
592 *
593 * Only when we discover that this update has completed, can we
594 * unreference any previous framebuffers.
595 */
596 mutex_lock(&vop->vsync_mutex);
597 if (fb != vop_win_last_pending_fb(vop_win)) {
598 ret = drm_vblank_get(plane->dev, vop->pipe);
599 if (ret) {
600 DRM_ERROR("failed to get vblank, %d\n", ret);
601 mutex_unlock(&vop->vsync_mutex);
602 return ret;
603 }
604
605 drm_framebuffer_reference(fb);
606
607 ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
608 if (ret) {
609 drm_vblank_put(plane->dev, vop->pipe);
610 mutex_unlock(&vop->vsync_mutex);
611 return ret;
612 }
613
614 vop->vsync_work_pending = true;
615 }
616 mutex_unlock(&vop->vsync_mutex);
617
618 spin_lock(&vop->reg_lock);
619
620 VOP_WIN_SET(vop, win, format, format);
621 VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
622 VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
623 val = (actual_h - 1) << 16;
624 val |= (actual_w - 1) & 0xffff;
625 VOP_WIN_SET(vop, win, act_info, val);
626 VOP_WIN_SET(vop, win, dsp_info, val);
627 val = (dsp_sty - 1) << 16;
628 val |= (dsp_stx - 1) & 0xffff;
629 VOP_WIN_SET(vop, win, dsp_st, val);
630
631 if (is_alpha) {
632 VOP_WIN_SET(vop, win, dst_alpha_ctl,
633 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
634 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
635 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
636 SRC_BLEND_M0(ALPHA_PER_PIX) |
637 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
638 SRC_FACTOR_M0(ALPHA_ONE);
639 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
640 } else {
641 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
642 }
643
644 VOP_WIN_SET(vop, win, enable, 1);
645
646 vop_cfg_done(vop);
647 spin_unlock(&vop->reg_lock);
648
649 return 0;
650}
651
652static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
653 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
654 unsigned int crtc_w, unsigned int crtc_h,
655 uint32_t src_x, uint32_t src_y, uint32_t src_w,
656 uint32_t src_h)
657{
658 return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
659 crtc_h, src_x, src_y, src_w, src_h,
660 NULL);
661}
662
663static int vop_update_primary_plane(struct drm_crtc *crtc,
664 struct drm_pending_vblank_event *event)
665{
666 unsigned int crtc_w, crtc_h;
667
668 crtc_w = crtc->primary->fb->width - crtc->x;
669 crtc_h = crtc->primary->fb->height - crtc->y;
670
671 return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
672 0, 0, crtc_w, crtc_h, crtc->x << 16,
673 crtc->y << 16, crtc_w << 16,
674 crtc_h << 16, event);
675}
676
677static int vop_disable_plane(struct drm_plane *plane)
678{
679 struct vop_win *vop_win = to_vop_win(plane);
680 const struct vop_win_data *win = vop_win->data;
681 struct vop *vop;
682 int ret;
683
684 if (!plane->crtc)
685 return 0;
686
687 vop = to_vop(plane->crtc);
688
689 ret = drm_vblank_get(plane->dev, vop->pipe);
690 if (ret) {
691 DRM_ERROR("failed to get vblank, %d\n", ret);
692 return ret;
693 }
694
695 mutex_lock(&vop->vsync_mutex);
696
697 ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
698 if (ret) {
699 drm_vblank_put(plane->dev, vop->pipe);
700 mutex_unlock(&vop->vsync_mutex);
701 return ret;
702 }
703
704 vop->vsync_work_pending = true;
705 mutex_unlock(&vop->vsync_mutex);
706
707 spin_lock(&vop->reg_lock);
708 VOP_WIN_SET(vop, win, enable, 0);
709 vop_cfg_done(vop);
710 spin_unlock(&vop->reg_lock);
711
712 return 0;
713}
714
715static void vop_plane_destroy(struct drm_plane *plane)
716{
717 vop_disable_plane(plane);
718 drm_plane_cleanup(plane);
719}
720
721static const struct drm_plane_funcs vop_plane_funcs = {
722 .update_plane = vop_update_plane,
723 .disable_plane = vop_disable_plane,
724 .destroy = vop_plane_destroy,
725};
726
727int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
728 int connector_type,
729 int out_mode)
730{
731 struct vop *vop = to_vop(crtc);
732
733 vop->connector_type = connector_type;
734 vop->connector_out_mode = out_mode;
735
736 return 0;
737}
738
739static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
740{
741 struct vop *vop = to_vop(crtc);
742 unsigned long flags;
743
744 if (vop->dpms != DRM_MODE_DPMS_ON)
745 return -EPERM;
746
747 spin_lock_irqsave(&vop->irq_lock, flags);
748
749 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
750
751 spin_unlock_irqrestore(&vop->irq_lock, flags);
752
753 return 0;
754}
755
756static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
757{
758 struct vop *vop = to_vop(crtc);
759 unsigned long flags;
760
761 if (vop->dpms != DRM_MODE_DPMS_ON)
762 return;
763 spin_lock_irqsave(&vop->irq_lock, flags);
764 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
765 spin_unlock_irqrestore(&vop->irq_lock, flags);
766}
767
768static const struct rockchip_crtc_funcs private_crtc_funcs = {
769 .enable_vblank = vop_crtc_enable_vblank,
770 .disable_vblank = vop_crtc_disable_vblank,
771};
772
773static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
774{
775 struct vop *vop = to_vop(crtc);
776
777 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
778
779 if (vop->dpms == mode) {
780 DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
781 return;
782 }
783
784 switch (mode) {
785 case DRM_MODE_DPMS_ON:
786 vop_enable(crtc);
787 break;
788 case DRM_MODE_DPMS_STANDBY:
789 case DRM_MODE_DPMS_SUSPEND:
790 case DRM_MODE_DPMS_OFF:
791 vop_disable(crtc);
792 break;
793 default:
794 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
795 break;
796 }
797
798 vop->dpms = mode;
799}
800
801static void vop_crtc_prepare(struct drm_crtc *crtc)
802{
803 vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
804}
805
806static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
807 const struct drm_display_mode *mode,
808 struct drm_display_mode *adjusted_mode)
809{
810 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
811 return false;
812
813 return true;
814}
815
816static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
817 struct drm_framebuffer *old_fb)
818{
819 int ret;
820
821 crtc->x = x;
822 crtc->y = y;
823
824 ret = vop_update_primary_plane(crtc, NULL);
825 if (ret < 0) {
826 DRM_ERROR("fail to update plane\n");
827 return ret;
828 }
829
830 return 0;
831}
832
833static int vop_crtc_mode_set(struct drm_crtc *crtc,
834 struct drm_display_mode *mode,
835 struct drm_display_mode *adjusted_mode,
836 int x, int y, struct drm_framebuffer *fb)
837{
838 struct vop *vop = to_vop(crtc);
839 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
840 u16 hdisplay = adjusted_mode->hdisplay;
841 u16 htotal = adjusted_mode->htotal;
842 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
843 u16 hact_end = hact_st + hdisplay;
844 u16 vdisplay = adjusted_mode->vdisplay;
845 u16 vtotal = adjusted_mode->vtotal;
846 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
847 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
848 u16 vact_end = vact_st + vdisplay;
849 int ret;
850 uint32_t val;
851
852 /*
853 * disable dclk to stop frame scan, so that we can safe config mode and
854 * enable iommu.
855 */
856 clk_disable(vop->dclk);
857
858 switch (vop->connector_type) {
859 case DRM_MODE_CONNECTOR_LVDS:
860 VOP_CTRL_SET(vop, rgb_en, 1);
861 break;
862 case DRM_MODE_CONNECTOR_eDP:
863 VOP_CTRL_SET(vop, edp_en, 1);
864 break;
865 case DRM_MODE_CONNECTOR_HDMIA:
866 VOP_CTRL_SET(vop, hdmi_en, 1);
867 break;
868 default:
869 DRM_ERROR("unsupport connector_type[%d]\n",
870 vop->connector_type);
871 return -EINVAL;
872 };
873 VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
874
875 val = 0x8;
876 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
877 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? (1 << 1) : 0;
878 VOP_CTRL_SET(vop, pin_pol, val);
879
880 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
881 val = hact_st << 16;
882 val |= hact_end;
883 VOP_CTRL_SET(vop, hact_st_end, val);
884 VOP_CTRL_SET(vop, hpost_st_end, val);
885
886 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
887 val = vact_st << 16;
888 val |= vact_end;
889 VOP_CTRL_SET(vop, vact_st_end, val);
890 VOP_CTRL_SET(vop, vpost_st_end, val);
891
892 ret = vop_crtc_mode_set_base(crtc, x, y, fb);
893 if (ret)
894 return ret;
895
896 /*
897 * reset dclk, take all mode config affect, so the clk would run in
898 * correct frame.
899 */
900 reset_control_assert(vop->dclk_rst);
901 usleep_range(10, 20);
902 reset_control_deassert(vop->dclk_rst);
903
904 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
905 ret = clk_enable(vop->dclk);
906 if (ret < 0) {
907 dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
908 return ret;
909 }
910
911 return 0;
912}
913
914static void vop_crtc_commit(struct drm_crtc *crtc)
915{
916}
917
918static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
919 .dpms = vop_crtc_dpms,
920 .prepare = vop_crtc_prepare,
921 .mode_fixup = vop_crtc_mode_fixup,
922 .mode_set = vop_crtc_mode_set,
923 .mode_set_base = vop_crtc_mode_set_base,
924 .commit = vop_crtc_commit,
925};
926
927static int vop_crtc_page_flip(struct drm_crtc *crtc,
928 struct drm_framebuffer *fb,
929 struct drm_pending_vblank_event *event,
930 uint32_t page_flip_flags)
931{
932 struct vop *vop = to_vop(crtc);
933 struct drm_framebuffer *old_fb = crtc->primary->fb;
934 int ret;
935
936 /* when the page flip is requested, crtc's dpms should be on */
937 if (vop->dpms > DRM_MODE_DPMS_ON) {
938 DRM_DEBUG("failed page flip request at dpms[%d].\n", vop->dpms);
939 return 0;
940 }
941
942 crtc->primary->fb = fb;
943
944 ret = vop_update_primary_plane(crtc, event);
945 if (ret)
946 crtc->primary->fb = old_fb;
947
948 return ret;
949}
950
951static void vop_win_state_complete(struct vop_win *vop_win,
952 struct vop_win_state *state)
953{
954 struct vop *vop = vop_win->vop;
955 struct drm_crtc *crtc = &vop->crtc;
956 struct drm_device *drm = crtc->dev;
957 unsigned long flags;
958
959 if (state->event) {
960 spin_lock_irqsave(&drm->event_lock, flags);
961 drm_send_vblank_event(drm, -1, state->event);
962 spin_unlock_irqrestore(&drm->event_lock, flags);
963 }
964
965 list_del(&state->head);
966 drm_vblank_put(crtc->dev, vop->pipe);
967}
968
969static void vop_crtc_destroy(struct drm_crtc *crtc)
970{
971 drm_crtc_cleanup(crtc);
972}
973
974static const struct drm_crtc_funcs vop_crtc_funcs = {
975 .set_config = drm_crtc_helper_set_config,
976 .page_flip = vop_crtc_page_flip,
977 .destroy = vop_crtc_destroy,
978};
979
980static bool vop_win_state_is_active(struct vop_win *vop_win,
981 struct vop_win_state *state)
982{
983 bool active = false;
984
985 if (state->fb) {
986 dma_addr_t yrgb_mst;
987
988 /* check yrgb_mst to tell if pending_fb is now front */
989 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
990
991 active = (yrgb_mst == state->yrgb_mst);
992 } else {
993 bool enabled;
994
995 /* if enable bit is clear, plane is now disabled */
996 enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
997
998 active = (enabled == 0);
999 }
1000
1001 return active;
1002}
1003
1004static void vop_win_state_destroy(struct vop_win_state *state)
1005{
1006 struct drm_framebuffer *fb = state->fb;
1007
1008 if (fb)
1009 drm_framebuffer_unreference(fb);
1010
1011 kfree(state);
1012}
1013
1014static void vop_win_update_state(struct vop_win *vop_win)
1015{
1016 struct vop_win_state *state, *n, *new_active = NULL;
1017
1018 /* Check if any pending states are now active */
1019 list_for_each_entry(state, &vop_win->pending, head)
1020 if (vop_win_state_is_active(vop_win, state)) {
1021 new_active = state;
1022 break;
1023 }
1024
1025 if (!new_active)
1026 return;
1027
1028 /*
1029 * Destroy any 'skipped' pending states - states that were queued
1030 * before the newly active state.
1031 */
1032 list_for_each_entry_safe(state, n, &vop_win->pending, head) {
1033 if (state == new_active)
1034 break;
1035 vop_win_state_complete(vop_win, state);
1036 vop_win_state_destroy(state);
1037 }
1038
1039 vop_win_state_complete(vop_win, new_active);
1040
1041 if (vop_win->active)
1042 vop_win_state_destroy(vop_win->active);
1043 vop_win->active = new_active;
1044}
1045
1046static bool vop_win_has_pending_state(struct vop_win *vop_win)
1047{
1048 return !list_empty(&vop_win->pending);
1049}
1050
1051static irqreturn_t vop_isr_thread(int irq, void *data)
1052{
1053 struct vop *vop = data;
1054 const struct vop_data *vop_data = vop->data;
1055 unsigned int i;
1056
1057 mutex_lock(&vop->vsync_mutex);
1058
1059 if (!vop->vsync_work_pending)
1060 goto done;
1061
1062 vop->vsync_work_pending = false;
1063
1064 for (i = 0; i < vop_data->win_size; i++) {
1065 struct vop_win *vop_win = &vop->win[i];
1066
1067 vop_win_update_state(vop_win);
1068 if (vop_win_has_pending_state(vop_win))
1069 vop->vsync_work_pending = true;
1070 }
1071
1072done:
1073 mutex_unlock(&vop->vsync_mutex);
1074
1075 return IRQ_HANDLED;
1076}
1077
1078static irqreturn_t vop_isr(int irq, void *data)
1079{
1080 struct vop *vop = data;
1081 uint32_t intr0_reg, active_irqs;
1082 unsigned long flags;
1083
1084 /*
1085 * INTR_CTRL0 register has interrupt status, enable and clear bits, we
1086 * must hold irq_lock to avoid a race with enable/disable_vblank().
1087 */
1088 spin_lock_irqsave(&vop->irq_lock, flags);
1089 intr0_reg = vop_readl(vop, INTR_CTRL0);
1090 active_irqs = intr0_reg & INTR_MASK;
1091 /* Clear all active interrupt sources */
1092 if (active_irqs)
1093 vop_writel(vop, INTR_CTRL0,
1094 intr0_reg | (active_irqs << INTR_CLR_SHIFT));
1095 spin_unlock_irqrestore(&vop->irq_lock, flags);
1096
1097 /* This is expected for vop iommu irqs, since the irq is shared */
1098 if (!active_irqs)
1099 return IRQ_NONE;
1100
1101 /* Only Frame Start Interrupt is enabled; other irqs are spurious. */
1102 if (!(active_irqs & FS_INTR)) {
1103 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
1104 return IRQ_NONE;
1105 }
1106
1107 drm_handle_vblank(vop->drm_dev, vop->pipe);
1108
1109 return (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1110}
1111
1112static int vop_create_crtc(struct vop *vop)
1113{
1114 const struct vop_data *vop_data = vop->data;
1115 struct device *dev = vop->dev;
1116 struct drm_device *drm_dev = vop->drm_dev;
1117 struct drm_plane *primary = NULL, *cursor = NULL, *plane;
1118 struct drm_crtc *crtc = &vop->crtc;
1119 struct device_node *port;
1120 int ret;
1121 int i;
1122
1123 /*
1124 * Create drm_plane for primary and cursor planes first, since we need
1125 * to pass them to drm_crtc_init_with_planes, which sets the
1126 * "possible_crtcs" to the newly initialized crtc.
1127 */
1128 for (i = 0; i < vop_data->win_size; i++) {
1129 struct vop_win *vop_win = &vop->win[i];
1130 const struct vop_win_data *win_data = vop_win->data;
1131
1132 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1133 win_data->type != DRM_PLANE_TYPE_CURSOR)
1134 continue;
1135
1136 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1137 0, &vop_plane_funcs,
1138 win_data->phy->data_formats,
1139 win_data->phy->nformats,
1140 win_data->type);
1141 if (ret) {
1142 DRM_ERROR("failed to initialize plane\n");
1143 goto err_cleanup_planes;
1144 }
1145
1146 plane = &vop_win->base;
1147 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1148 primary = plane;
1149 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1150 cursor = plane;
1151 }
1152
1153 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1154 &vop_crtc_funcs);
1155 if (ret)
1156 return ret;
1157
1158 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1159
1160 /*
1161 * Create drm_planes for overlay windows with possible_crtcs restricted
1162 * to the newly created crtc.
1163 */
1164 for (i = 0; i < vop_data->win_size; i++) {
1165 struct vop_win *vop_win = &vop->win[i];
1166 const struct vop_win_data *win_data = vop_win->data;
1167 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1168
1169 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1170 continue;
1171
1172 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1173 possible_crtcs,
1174 &vop_plane_funcs,
1175 win_data->phy->data_formats,
1176 win_data->phy->nformats,
1177 win_data->type);
1178 if (ret) {
1179 DRM_ERROR("failed to initialize overlay plane\n");
1180 goto err_cleanup_crtc;
1181 }
1182 }
1183
1184 port = of_get_child_by_name(dev->of_node, "port");
1185 if (!port) {
1186 DRM_ERROR("no port node found in %s\n",
1187 dev->of_node->full_name);
1188 goto err_cleanup_crtc;
1189 }
1190
1191 crtc->port = port;
1192 vop->pipe = drm_crtc_index(crtc);
1193 rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
1194
1195 return 0;
1196
1197err_cleanup_crtc:
1198 drm_crtc_cleanup(crtc);
1199err_cleanup_planes:
1200 list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
1201 drm_plane_cleanup(plane);
1202 return ret;
1203}
1204
1205static void vop_destroy_crtc(struct vop *vop)
1206{
1207 struct drm_crtc *crtc = &vop->crtc;
1208
1209 rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
1210 of_node_put(crtc->port);
1211 drm_crtc_cleanup(crtc);
1212}
1213
1214static int vop_initial(struct vop *vop)
1215{
1216 const struct vop_data *vop_data = vop->data;
1217 const struct vop_reg_data *init_table = vop_data->init_table;
1218 struct reset_control *ahb_rst;
1219 int i, ret;
1220
1221 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1222 if (IS_ERR(vop->hclk)) {
1223 dev_err(vop->dev, "failed to get hclk source\n");
1224 return PTR_ERR(vop->hclk);
1225 }
1226 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1227 if (IS_ERR(vop->aclk)) {
1228 dev_err(vop->dev, "failed to get aclk source\n");
1229 return PTR_ERR(vop->aclk);
1230 }
1231 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1232 if (IS_ERR(vop->dclk)) {
1233 dev_err(vop->dev, "failed to get dclk source\n");
1234 return PTR_ERR(vop->dclk);
1235 }
1236
1237 ret = clk_prepare(vop->hclk);
1238 if (ret < 0) {
1239 dev_err(vop->dev, "failed to prepare hclk\n");
1240 return ret;
1241 }
1242
1243 ret = clk_prepare(vop->dclk);
1244 if (ret < 0) {
1245 dev_err(vop->dev, "failed to prepare dclk\n");
1246 goto err_unprepare_hclk;
1247 }
1248
1249 ret = clk_prepare(vop->aclk);
1250 if (ret < 0) {
1251 dev_err(vop->dev, "failed to prepare aclk\n");
1252 goto err_unprepare_dclk;
1253 }
1254
1255 /*
1256 * enable hclk, so that we can config vop register.
1257 */
1258 ret = clk_enable(vop->hclk);
1259 if (ret < 0) {
1260 dev_err(vop->dev, "failed to prepare aclk\n");
1261 goto err_unprepare_aclk;
1262 }
1263 /*
1264 * do hclk_reset, reset all vop registers.
1265 */
1266 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1267 if (IS_ERR(ahb_rst)) {
1268 dev_err(vop->dev, "failed to get ahb reset\n");
1269 ret = PTR_ERR(ahb_rst);
1270 goto err_disable_hclk;
1271 }
1272 reset_control_assert(ahb_rst);
1273 usleep_range(10, 20);
1274 reset_control_deassert(ahb_rst);
1275
1276 memcpy(vop->regsbak, vop->regs, vop->len);
1277
1278 for (i = 0; i < vop_data->table_size; i++)
1279 vop_writel(vop, init_table[i].offset, init_table[i].value);
1280
1281 for (i = 0; i < vop_data->win_size; i++) {
1282 const struct vop_win_data *win = &vop_data->win[i];
1283
1284 VOP_WIN_SET(vop, win, enable, 0);
1285 }
1286
1287 vop_cfg_done(vop);
1288
1289 /*
1290 * do dclk_reset, let all config take affect.
1291 */
1292 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1293 if (IS_ERR(vop->dclk_rst)) {
1294 dev_err(vop->dev, "failed to get dclk reset\n");
1295 ret = PTR_ERR(vop->dclk_rst);
1296 goto err_unprepare_aclk;
1297 }
1298 reset_control_assert(vop->dclk_rst);
1299 usleep_range(10, 20);
1300 reset_control_deassert(vop->dclk_rst);
1301
1302 clk_disable(vop->hclk);
1303
1304 vop->dpms = DRM_MODE_DPMS_OFF;
1305
1306 return 0;
1307
1308err_disable_hclk:
1309 clk_disable(vop->hclk);
1310err_unprepare_aclk:
1311 clk_unprepare(vop->aclk);
1312err_unprepare_dclk:
1313 clk_unprepare(vop->dclk);
1314err_unprepare_hclk:
1315 clk_unprepare(vop->hclk);
1316 return ret;
1317}
1318
1319/*
1320 * Initialize the vop->win array elements.
1321 */
1322static void vop_win_init(struct vop *vop)
1323{
1324 const struct vop_data *vop_data = vop->data;
1325 unsigned int i;
1326
1327 for (i = 0; i < vop_data->win_size; i++) {
1328 struct vop_win *vop_win = &vop->win[i];
1329 const struct vop_win_data *win_data = &vop_data->win[i];
1330
1331 vop_win->data = win_data;
1332 vop_win->vop = vop;
1333 INIT_LIST_HEAD(&vop_win->pending);
1334 }
1335}
1336
1337static int vop_bind(struct device *dev, struct device *master, void *data)
1338{
1339 struct platform_device *pdev = to_platform_device(dev);
1340 const struct of_device_id *of_id;
1341 const struct vop_data *vop_data;
1342 struct drm_device *drm_dev = data;
1343 struct vop *vop;
1344 struct resource *res;
1345 size_t alloc_size;
1346 int ret;
1347
1348 of_id = of_match_device(vop_driver_dt_match, dev);
1349 vop_data = of_id->data;
1350 if (!vop_data)
1351 return -ENODEV;
1352
1353 /* Allocate vop struct and its vop_win array */
1354 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1355 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1356 if (!vop)
1357 return -ENOMEM;
1358
1359 vop->dev = dev;
1360 vop->data = vop_data;
1361 vop->drm_dev = drm_dev;
1362 dev_set_drvdata(dev, vop);
1363
1364 vop_win_init(vop);
1365
1366 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1367 vop->len = resource_size(res);
1368 vop->regs = devm_ioremap_resource(dev, res);
1369 if (IS_ERR(vop->regs))
1370 return PTR_ERR(vop->regs);
1371
1372 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1373 if (!vop->regsbak)
1374 return -ENOMEM;
1375
1376 ret = vop_initial(vop);
1377 if (ret < 0) {
1378 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1379 return ret;
1380 }
1381
1382 vop->irq = platform_get_irq(pdev, 0);
1383 if (vop->irq < 0) {
1384 dev_err(dev, "cannot find irq for vop\n");
1385 return vop->irq;
1386 }
1387
1388 spin_lock_init(&vop->reg_lock);
1389 spin_lock_init(&vop->irq_lock);
1390
1391 mutex_init(&vop->vsync_mutex);
1392
1393 ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
1394 IRQF_SHARED, dev_name(dev), vop);
1395 if (ret)
1396 return ret;
1397
1398 /* IRQ is initially disabled; it gets enabled in power_on */
1399 disable_irq(vop->irq);
1400
1401 ret = vop_create_crtc(vop);
1402 if (ret)
1403 return ret;
1404
1405 pm_runtime_enable(&pdev->dev);
1406 return 0;
1407}
1408
1409static void vop_unbind(struct device *dev, struct device *master, void *data)
1410{
1411 struct vop *vop = dev_get_drvdata(dev);
1412
1413 pm_runtime_disable(dev);
1414 vop_destroy_crtc(vop);
1415}
1416
1417static const struct component_ops vop_component_ops = {
1418 .bind = vop_bind,
1419 .unbind = vop_unbind,
1420};
1421
1422static int vop_probe(struct platform_device *pdev)
1423{
1424 struct device *dev = &pdev->dev;
1425
1426 if (!dev->of_node) {
1427 dev_err(dev, "can't find vop devices\n");
1428 return -ENODEV;
1429 }
1430
1431 return component_add(dev, &vop_component_ops);
1432}
1433
1434static int vop_remove(struct platform_device *pdev)
1435{
1436 component_del(&pdev->dev, &vop_component_ops);
1437
1438 return 0;
1439}
1440
1441struct platform_driver vop_platform_driver = {
1442 .probe = vop_probe,
1443 .remove = vop_remove,
1444 .driver = {
1445 .name = "rockchip-vop",
1446 .owner = THIS_MODULE,
1447 .of_match_table = of_match_ptr(vop_driver_dt_match),
1448 },
1449};
1450
1451module_platform_driver(vop_platform_driver);
1452
1453MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
1454MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
1455MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
new file mode 100644
index 000000000000..63e9b3a084c5
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -0,0 +1,201 @@
1/*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef _ROCKCHIP_DRM_VOP_H
16#define _ROCKCHIP_DRM_VOP_H
17
18/* register definition */
19#define REG_CFG_DONE 0x0000
20#define VERSION_INFO 0x0004
21#define SYS_CTRL 0x0008
22#define SYS_CTRL1 0x000c
23#define DSP_CTRL0 0x0010
24#define DSP_CTRL1 0x0014
25#define DSP_BG 0x0018
26#define MCU_CTRL 0x001c
27#define INTR_CTRL0 0x0020
28#define INTR_CTRL1 0x0024
29#define WIN0_CTRL0 0x0030
30#define WIN0_CTRL1 0x0034
31#define WIN0_COLOR_KEY 0x0038
32#define WIN0_VIR 0x003c
33#define WIN0_YRGB_MST 0x0040
34#define WIN0_CBR_MST 0x0044
35#define WIN0_ACT_INFO 0x0048
36#define WIN0_DSP_INFO 0x004c
37#define WIN0_DSP_ST 0x0050
38#define WIN0_SCL_FACTOR_YRGB 0x0054
39#define WIN0_SCL_FACTOR_CBR 0x0058
40#define WIN0_SCL_OFFSET 0x005c
41#define WIN0_SRC_ALPHA_CTRL 0x0060
42#define WIN0_DST_ALPHA_CTRL 0x0064
43#define WIN0_FADING_CTRL 0x0068
44/* win1 register */
45#define WIN1_CTRL0 0x0070
46#define WIN1_CTRL1 0x0074
47#define WIN1_COLOR_KEY 0x0078
48#define WIN1_VIR 0x007c
49#define WIN1_YRGB_MST 0x0080
50#define WIN1_CBR_MST 0x0084
51#define WIN1_ACT_INFO 0x0088
52#define WIN1_DSP_INFO 0x008c
53#define WIN1_DSP_ST 0x0090
54#define WIN1_SCL_FACTOR_YRGB 0x0094
55#define WIN1_SCL_FACTOR_CBR 0x0098
56#define WIN1_SCL_OFFSET 0x009c
57#define WIN1_SRC_ALPHA_CTRL 0x00a0
58#define WIN1_DST_ALPHA_CTRL 0x00a4
59#define WIN1_FADING_CTRL 0x00a8
60/* win2 register */
61#define WIN2_CTRL0 0x00b0
62#define WIN2_CTRL1 0x00b4
63#define WIN2_VIR0_1 0x00b8
64#define WIN2_VIR2_3 0x00bc
65#define WIN2_MST0 0x00c0
66#define WIN2_DSP_INFO0 0x00c4
67#define WIN2_DSP_ST0 0x00c8
68#define WIN2_COLOR_KEY 0x00cc
69#define WIN2_MST1 0x00d0
70#define WIN2_DSP_INFO1 0x00d4
71#define WIN2_DSP_ST1 0x00d8
72#define WIN2_SRC_ALPHA_CTRL 0x00dc
73#define WIN2_MST2 0x00e0
74#define WIN2_DSP_INFO2 0x00e4
75#define WIN2_DSP_ST2 0x00e8
76#define WIN2_DST_ALPHA_CTRL 0x00ec
77#define WIN2_MST3 0x00f0
78#define WIN2_DSP_INFO3 0x00f4
79#define WIN2_DSP_ST3 0x00f8
80#define WIN2_FADING_CTRL 0x00fc
81/* win3 register */
82#define WIN3_CTRL0 0x0100
83#define WIN3_CTRL1 0x0104
84#define WIN3_VIR0_1 0x0108
85#define WIN3_VIR2_3 0x010c
86#define WIN3_MST0 0x0110
87#define WIN3_DSP_INFO0 0x0114
88#define WIN3_DSP_ST0 0x0118
89#define WIN3_COLOR_KEY 0x011c
90#define WIN3_MST1 0x0120
91#define WIN3_DSP_INFO1 0x0124
92#define WIN3_DSP_ST1 0x0128
93#define WIN3_SRC_ALPHA_CTRL 0x012c
94#define WIN3_MST2 0x0130
95#define WIN3_DSP_INFO2 0x0134
96#define WIN3_DSP_ST2 0x0138
97#define WIN3_DST_ALPHA_CTRL 0x013c
98#define WIN3_MST3 0x0140
99#define WIN3_DSP_INFO3 0x0144
100#define WIN3_DSP_ST3 0x0148
101#define WIN3_FADING_CTRL 0x014c
102/* hwc register */
103#define HWC_CTRL0 0x0150
104#define HWC_CTRL1 0x0154
105#define HWC_MST 0x0158
106#define HWC_DSP_ST 0x015c
107#define HWC_SRC_ALPHA_CTRL 0x0160
108#define HWC_DST_ALPHA_CTRL 0x0164
109#define HWC_FADING_CTRL 0x0168
110/* post process register */
111#define POST_DSP_HACT_INFO 0x0170
112#define POST_DSP_VACT_INFO 0x0174
113#define POST_SCL_FACTOR_YRGB 0x0178
114#define POST_SCL_CTRL 0x0180
115#define POST_DSP_VACT_INFO_F1 0x0184
116#define DSP_HTOTAL_HS_END 0x0188
117#define DSP_HACT_ST_END 0x018c
118#define DSP_VTOTAL_VS_END 0x0190
119#define DSP_VACT_ST_END 0x0194
120#define DSP_VS_ST_END_F1 0x0198
121#define DSP_VACT_ST_END_F1 0x019c
122/* register definition end */
123
124/* interrupt define */
125#define DSP_HOLD_VALID_INTR (1 << 0)
126#define FS_INTR (1 << 1)
127#define LINE_FLAG_INTR (1 << 2)
128#define BUS_ERROR_INTR (1 << 3)
129
130#define INTR_MASK (DSP_HOLD_VALID_INTR | FS_INTR | \
131 LINE_FLAG_INTR | BUS_ERROR_INTR)
132
133#define DSP_HOLD_VALID_INTR_EN(x) ((x) << 4)
134#define FS_INTR_EN(x) ((x) << 5)
135#define LINE_FLAG_INTR_EN(x) ((x) << 6)
136#define BUS_ERROR_INTR_EN(x) ((x) << 7)
137#define DSP_HOLD_VALID_INTR_MASK (1 << 4)
138#define FS_INTR_MASK (1 << 5)
139#define LINE_FLAG_INTR_MASK (1 << 6)
140#define BUS_ERROR_INTR_MASK (1 << 7)
141
142#define INTR_CLR_SHIFT 8
143#define DSP_HOLD_VALID_INTR_CLR (1 << (INTR_CLR_SHIFT + 0))
144#define FS_INTR_CLR (1 << (INTR_CLR_SHIFT + 1))
145#define LINE_FLAG_INTR_CLR (1 << (INTR_CLR_SHIFT + 2))
146#define BUS_ERROR_INTR_CLR (1 << (INTR_CLR_SHIFT + 3))
147
148#define DSP_LINE_NUM(x) (((x) & 0x1fff) << 12)
149#define DSP_LINE_NUM_MASK (0x1fff << 12)
150
151/* src alpha ctrl define */
152#define SRC_FADING_VALUE(x) (((x) & 0xff) << 24)
153#define SRC_GLOBAL_ALPHA(x) (((x) & 0xff) << 16)
154#define SRC_FACTOR_M0(x) (((x) & 0x7) << 6)
155#define SRC_ALPHA_CAL_M0(x) (((x) & 0x1) << 5)
156#define SRC_BLEND_M0(x) (((x) & 0x3) << 3)
157#define SRC_ALPHA_M0(x) (((x) & 0x1) << 2)
158#define SRC_COLOR_M0(x) (((x) & 0x1) << 1)
159#define SRC_ALPHA_EN(x) (((x) & 0x1) << 0)
160/* dst alpha ctrl define */
161#define DST_FACTOR_M0(x) (((x) & 0x7) << 6)
162
163/*
164 * display output interface supported by rockchip lcdc
165 */
166#define ROCKCHIP_OUT_MODE_P888 0
167#define ROCKCHIP_OUT_MODE_P666 1
168#define ROCKCHIP_OUT_MODE_P565 2
169/* for use special outface */
170#define ROCKCHIP_OUT_MODE_AAAA 15
171
172enum alpha_mode {
173 ALPHA_STRAIGHT,
174 ALPHA_INVERSE,
175};
176
177enum global_blend_mode {
178 ALPHA_GLOBAL,
179 ALPHA_PER_PIX,
180 ALPHA_PER_PIX_GLOBAL,
181};
182
183enum alpha_cal_mode {
184 ALPHA_SATURATION,
185 ALPHA_NO_SATURATION,
186};
187
188enum color_mode {
189 ALPHA_SRC_PRE_MUL,
190 ALPHA_SRC_NO_PRE_MUL,
191};
192
193enum factor_mode {
194 ALPHA_ZERO,
195 ALPHA_ONE,
196 ALPHA_SRC,
197 ALPHA_SRC_INVERSE,
198 ALPHA_SRC_GLOBAL,
199};
200
201#endif /* _ROCKCHIP_DRM_VOP_H */
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
index 0ddce4d046d9..859ccb658601 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_crtc.c
@@ -19,6 +19,7 @@
19#include <drm/drm_crtc_helper.h> 19#include <drm/drm_crtc_helper.h>
20#include <drm/drm_fb_cma_helper.h> 20#include <drm/drm_fb_cma_helper.h>
21#include <drm/drm_gem_cma_helper.h> 21#include <drm/drm_gem_cma_helper.h>
22#include <drm/drm_plane_helper.h>
22 23
23#include <video/sh_mobile_meram.h> 24#include <video/sh_mobile_meram.h>
24 25
diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig
index ae8850f3e63b..d6d6b705b8c1 100644
--- a/drivers/gpu/drm/sti/Kconfig
+++ b/drivers/gpu/drm/sti/Kconfig
@@ -5,6 +5,7 @@ config DRM_STI
5 select DRM_KMS_HELPER 5 select DRM_KMS_HELPER
6 select DRM_GEM_CMA_HELPER 6 select DRM_GEM_CMA_HELPER
7 select DRM_KMS_CMA_HELPER 7 select DRM_KMS_CMA_HELPER
8 select FW_LOADER_USER_HELPER_FALLBACK
8 help 9 help
9 Choose this option to enable DRM on STM stiH41x chipset 10 Choose this option to enable DRM on STM stiH41x chipset
10 11
diff --git a/drivers/gpu/drm/sti/Makefile b/drivers/gpu/drm/sti/Makefile
index 04ac2ceef27f..6ba9d27c1b90 100644
--- a/drivers/gpu/drm/sti/Makefile
+++ b/drivers/gpu/drm/sti/Makefile
@@ -3,6 +3,7 @@ sticompositor-y := \
3 sti_mixer.o \ 3 sti_mixer.o \
4 sti_gdp.o \ 4 sti_gdp.o \
5 sti_vid.o \ 5 sti_vid.o \
6 sti_cursor.o \
6 sti_compositor.o \ 7 sti_compositor.o \
7 sti_drm_crtc.o \ 8 sti_drm_crtc.o \
8 sti_drm_plane.o 9 sti_drm_plane.o
@@ -18,4 +19,5 @@ obj-$(CONFIG_DRM_STI) = \
18 sti_hda.o \ 19 sti_hda.o \
19 sti_tvout.o \ 20 sti_tvout.o \
20 sticompositor.o \ 21 sticompositor.o \
21 sti_drm_drv.o \ No newline at end of file 22 sti_hqvdp.o \
23 sti_drm_drv.o
diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c
index 9e31dfe154ed..43215d3020fb 100644
--- a/drivers/gpu/drm/sti/sti_compositor.c
+++ b/drivers/gpu/drm/sti/sti_compositor.c
@@ -24,14 +24,16 @@
24 * stiH407 compositor properties 24 * stiH407 compositor properties
25 */ 25 */
26struct sti_compositor_data stih407_compositor_data = { 26struct sti_compositor_data stih407_compositor_data = {
27 .nb_subdev = 6, 27 .nb_subdev = 8,
28 .subdev_desc = { 28 .subdev_desc = {
29 {STI_CURSOR_SUBDEV, (int)STI_CURSOR, 0x000},
29 {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100}, 30 {STI_GPD_SUBDEV, (int)STI_GDP_0, 0x100},
30 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200}, 31 {STI_GPD_SUBDEV, (int)STI_GDP_1, 0x200},
31 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300}, 32 {STI_GPD_SUBDEV, (int)STI_GDP_2, 0x300},
32 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400}, 33 {STI_GPD_SUBDEV, (int)STI_GDP_3, 0x400},
33 {STI_VID_SUBDEV, (int)STI_VID_0, 0x700}, 34 {STI_VID_SUBDEV, (int)STI_VID_0, 0x700},
34 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00} 35 {STI_MIXER_MAIN_SUBDEV, STI_MIXER_MAIN, 0xC00},
36 {STI_MIXER_AUX_SUBDEV, STI_MIXER_AUX, 0xD00},
35 }, 37 },
36}; 38};
37 39
@@ -67,11 +69,11 @@ static int sti_compositor_init_subdev(struct sti_compositor *compo,
67 break; 69 break;
68 case STI_GPD_SUBDEV: 70 case STI_GPD_SUBDEV:
69 case STI_VID_SUBDEV: 71 case STI_VID_SUBDEV:
72 case STI_CURSOR_SUBDEV:
70 compo->layer[layer_id++] = 73 compo->layer[layer_id++] =
71 sti_layer_create(compo->dev, desc[i].id, 74 sti_layer_create(compo->dev, desc[i].id,
72 compo->regs + desc[i].offset); 75 compo->regs + desc[i].offset);
73 break; 76 break;
74 /* case STI_CURSOR_SUBDEV : TODO */
75 default: 77 default:
76 DRM_ERROR("Unknow subdev compoment type\n"); 78 DRM_ERROR("Unknow subdev compoment type\n");
77 return 1; 79 return 1;
@@ -102,33 +104,35 @@ static int sti_compositor_bind(struct device *dev, struct device *master,
102 enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK; 104 enum sti_layer_type type = desc & STI_LAYER_TYPE_MASK;
103 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY; 105 enum drm_plane_type plane_type = DRM_PLANE_TYPE_OVERLAY;
104 106
105 if (compo->mixer[crtc]) 107 if (crtc < compo->nb_mixers)
106 plane_type = DRM_PLANE_TYPE_PRIMARY; 108 plane_type = DRM_PLANE_TYPE_PRIMARY;
107 109
108 switch (type) { 110 switch (type) {
109 case STI_CUR: 111 case STI_CUR:
110 cursor = sti_drm_plane_init(drm_dev, 112 cursor = sti_drm_plane_init(drm_dev,
111 compo->layer[i], 113 compo->layer[i],
112 (1 << crtc) - 1, 114 1, DRM_PLANE_TYPE_CURSOR);
113 DRM_PLANE_TYPE_CURSOR);
114 break; 115 break;
115 case STI_GDP: 116 case STI_GDP:
116 case STI_VID: 117 case STI_VID:
117 primary = sti_drm_plane_init(drm_dev, 118 primary = sti_drm_plane_init(drm_dev,
118 compo->layer[i], 119 compo->layer[i],
119 (1 << crtc) - 1, plane_type); 120 (1 << compo->nb_mixers) - 1,
121 plane_type);
120 plane++; 122 plane++;
121 break; 123 break;
122 case STI_BCK: 124 case STI_BCK:
125 case STI_VDP:
123 break; 126 break;
124 } 127 }
125 128
126 /* The first planes are reserved for primary planes*/ 129 /* The first planes are reserved for primary planes*/
127 if (compo->mixer[crtc]) { 130 if (crtc < compo->nb_mixers && primary) {
128 sti_drm_crtc_init(drm_dev, compo->mixer[crtc], 131 sti_drm_crtc_init(drm_dev, compo->mixer[crtc],
129 primary, cursor); 132 primary, cursor);
130 crtc++; 133 crtc++;
131 cursor = NULL; 134 cursor = NULL;
135 primary = NULL;
132 } 136 }
133 } 137 }
134 } 138 }
diff --git a/drivers/gpu/drm/sti/sti_compositor.h b/drivers/gpu/drm/sti/sti_compositor.h
index 3ea19db72e0f..019eb44c62cc 100644
--- a/drivers/gpu/drm/sti/sti_compositor.h
+++ b/drivers/gpu/drm/sti/sti_compositor.h
@@ -64,7 +64,6 @@ struct sti_compositor_data {
64 * @layer: array of layers 64 * @layer: array of layers
65 * @nb_mixers: number of mixers for this compositor 65 * @nb_mixers: number of mixers for this compositor
66 * @nb_layers: number of layers (GDP,VID,...) for this compositor 66 * @nb_layers: number of layers (GDP,VID,...) for this compositor
67 * @enable: true if compositor is enable else false
68 * @vtg_vblank_nb: callback for VTG VSYNC notification 67 * @vtg_vblank_nb: callback for VTG VSYNC notification
69 */ 68 */
70struct sti_compositor { 69struct sti_compositor {
@@ -83,7 +82,6 @@ struct sti_compositor {
83 struct sti_layer *layer[STI_MAX_LAYER]; 82 struct sti_layer *layer[STI_MAX_LAYER];
84 int nb_mixers; 83 int nb_mixers;
85 int nb_layers; 84 int nb_layers;
86 bool enable;
87 struct notifier_block vtg_vblank_nb; 85 struct notifier_block vtg_vblank_nb;
88}; 86};
89 87
diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c
new file mode 100644
index 000000000000..010eaee60bf7
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.c
@@ -0,0 +1,242 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Vincent Abriou <vincent.abriou@st.com>
4 * Fabien Dessenne <fabien.dessenne@st.com>
5 * for STMicroelectronics.
6 * License terms: GNU General Public License (GPL), version 2
7 */
8#include <drm/drmP.h>
9
10#include "sti_cursor.h"
11#include "sti_layer.h"
12#include "sti_vtg.h"
13
14/* Registers */
15#define CUR_CTL 0x00
16#define CUR_VPO 0x0C
17#define CUR_PML 0x14
18#define CUR_PMP 0x18
19#define CUR_SIZE 0x1C
20#define CUR_CML 0x20
21#define CUR_AWS 0x28
22#define CUR_AWE 0x2C
23
24#define CUR_CTL_CLUT_UPDATE BIT(1)
25
26#define STI_CURS_MIN_SIZE 1
27#define STI_CURS_MAX_SIZE 128
28
29/*
30 * pixmap dma buffer stucture
31 *
32 * @paddr: physical address
33 * @size: buffer size
34 * @base: virtual address
35 */
36struct dma_pixmap {
37 dma_addr_t paddr;
38 size_t size;
39 void *base;
40};
41
42/**
43 * STI Cursor structure
44 *
45 * @layer: layer structure
46 * @width: cursor width
47 * @height: cursor height
48 * @clut: color look up table
49 * @clut_paddr: color look up table physical address
50 * @pixmap: pixmap dma buffer (clut8-format cursor)
51 */
52struct sti_cursor {
53 struct sti_layer layer;
54 unsigned int width;
55 unsigned int height;
56 unsigned short *clut;
57 dma_addr_t clut_paddr;
58 struct dma_pixmap pixmap;
59};
60
61static const uint32_t cursor_supported_formats[] = {
62 DRM_FORMAT_ARGB8888,
63};
64
65#define to_sti_cursor(x) container_of(x, struct sti_cursor, layer)
66
67static const uint32_t *sti_cursor_get_formats(struct sti_layer *layer)
68{
69 return cursor_supported_formats;
70}
71
72static unsigned int sti_cursor_get_nb_formats(struct sti_layer *layer)
73{
74 return ARRAY_SIZE(cursor_supported_formats);
75}
76
77static void sti_cursor_argb8888_to_clut8(struct sti_layer *layer)
78{
79 struct sti_cursor *cursor = to_sti_cursor(layer);
80 u32 *src = layer->vaddr;
81 u8 *dst = cursor->pixmap.base;
82 unsigned int i, j;
83 u32 a, r, g, b;
84
85 for (i = 0; i < cursor->height; i++) {
86 for (j = 0; j < cursor->width; j++) {
87 /* Pick the 2 higher bits of each component */
88 a = (*src >> 30) & 3;
89 r = (*src >> 22) & 3;
90 g = (*src >> 14) & 3;
91 b = (*src >> 6) & 3;
92 *dst = a << 6 | r << 4 | g << 2 | b;
93 src++;
94 dst++;
95 }
96 }
97}
98
99static int sti_cursor_prepare_layer(struct sti_layer *layer, bool first_prepare)
100{
101 struct sti_cursor *cursor = to_sti_cursor(layer);
102 struct drm_display_mode *mode = layer->mode;
103 u32 y, x;
104 u32 val;
105
106 DRM_DEBUG_DRIVER("\n");
107
108 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
109
110 if (layer->src_w < STI_CURS_MIN_SIZE ||
111 layer->src_h < STI_CURS_MIN_SIZE ||
112 layer->src_w > STI_CURS_MAX_SIZE ||
113 layer->src_h > STI_CURS_MAX_SIZE) {
114 DRM_ERROR("Invalid cursor size (%dx%d)\n",
115 layer->src_w, layer->src_h);
116 return -EINVAL;
117 }
118
119 /* If the cursor size has changed, re-allocated the pixmap */
120 if (!cursor->pixmap.base ||
121 (cursor->width != layer->src_w) ||
122 (cursor->height != layer->src_h)) {
123 cursor->width = layer->src_w;
124 cursor->height = layer->src_h;
125
126 if (cursor->pixmap.base)
127 dma_free_writecombine(layer->dev,
128 cursor->pixmap.size,
129 cursor->pixmap.base,
130 cursor->pixmap.paddr);
131
132 cursor->pixmap.size = cursor->width * cursor->height;
133
134 cursor->pixmap.base = dma_alloc_writecombine(layer->dev,
135 cursor->pixmap.size,
136 &cursor->pixmap.paddr,
137 GFP_KERNEL | GFP_DMA);
138 if (!cursor->pixmap.base) {
139 DRM_ERROR("Failed to allocate memory for pixmap\n");
140 return -ENOMEM;
141 }
142 }
143
144 /* Convert ARGB8888 to CLUT8 */
145 sti_cursor_argb8888_to_clut8(layer);
146
147 /* AWS and AWE depend on the mode */
148 y = sti_vtg_get_line_number(*mode, 0);
149 x = sti_vtg_get_pixel_number(*mode, 0);
150 val = y << 16 | x;
151 writel(val, layer->regs + CUR_AWS);
152 y = sti_vtg_get_line_number(*mode, mode->vdisplay - 1);
153 x = sti_vtg_get_pixel_number(*mode, mode->hdisplay - 1);
154 val = y << 16 | x;
155 writel(val, layer->regs + CUR_AWE);
156
157 if (first_prepare) {
158 /* Set and fetch CLUT */
159 writel(cursor->clut_paddr, layer->regs + CUR_CML);
160 writel(CUR_CTL_CLUT_UPDATE, layer->regs + CUR_CTL);
161 }
162
163 return 0;
164}
165
166static int sti_cursor_commit_layer(struct sti_layer *layer)
167{
168 struct sti_cursor *cursor = to_sti_cursor(layer);
169 struct drm_display_mode *mode = layer->mode;
170 u32 ydo, xdo;
171
172 dev_dbg(layer->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
173
174 /* Set memory location, size, and position */
175 writel(cursor->pixmap.paddr, layer->regs + CUR_PML);
176 writel(cursor->width, layer->regs + CUR_PMP);
177 writel(cursor->height << 16 | cursor->width, layer->regs + CUR_SIZE);
178
179 ydo = sti_vtg_get_line_number(*mode, layer->dst_y);
180 xdo = sti_vtg_get_pixel_number(*mode, layer->dst_y);
181 writel((ydo << 16) | xdo, layer->regs + CUR_VPO);
182
183 return 0;
184}
185
186static int sti_cursor_disable_layer(struct sti_layer *layer)
187{
188 return 0;
189}
190
191static void sti_cursor_init(struct sti_layer *layer)
192{
193 struct sti_cursor *cursor = to_sti_cursor(layer);
194 unsigned short *base = cursor->clut;
195 unsigned int a, r, g, b;
196
197 /* Assign CLUT values, ARGB444 format */
198 for (a = 0; a < 4; a++)
199 for (r = 0; r < 4; r++)
200 for (g = 0; g < 4; g++)
201 for (b = 0; b < 4; b++)
202 *base++ = (a * 5) << 12 |
203 (r * 5) << 8 |
204 (g * 5) << 4 |
205 (b * 5);
206}
207
208static const struct sti_layer_funcs cursor_ops = {
209 .get_formats = sti_cursor_get_formats,
210 .get_nb_formats = sti_cursor_get_nb_formats,
211 .init = sti_cursor_init,
212 .prepare = sti_cursor_prepare_layer,
213 .commit = sti_cursor_commit_layer,
214 .disable = sti_cursor_disable_layer,
215};
216
217struct sti_layer *sti_cursor_create(struct device *dev)
218{
219 struct sti_cursor *cursor;
220
221 cursor = devm_kzalloc(dev, sizeof(*cursor), GFP_KERNEL);
222 if (!cursor) {
223 DRM_ERROR("Failed to allocate memory for cursor\n");
224 return NULL;
225 }
226
227 /* Allocate clut buffer */
228 cursor->clut = dma_alloc_writecombine(dev,
229 0x100 * sizeof(unsigned short),
230 &cursor->clut_paddr,
231 GFP_KERNEL | GFP_DMA);
232
233 if (!cursor->clut) {
234 DRM_ERROR("Failed to allocate memory for cursor clut\n");
235 devm_kfree(dev, cursor);
236 return NULL;
237 }
238
239 cursor->layer.ops = &cursor_ops;
240
241 return (struct sti_layer *)cursor;
242}
diff --git a/drivers/gpu/drm/sti/sti_cursor.h b/drivers/gpu/drm/sti/sti_cursor.h
new file mode 100644
index 000000000000..3c9827404f27
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_cursor.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2013
3 * Authors: Vincent Abriou <vincent.abriou@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_CURSOR_H_
8#define _STI_CURSOR_H_
9
10struct sti_layer *sti_cursor_create(struct device *dev);
11
12#endif
diff --git a/drivers/gpu/drm/sti/sti_drm_crtc.c b/drivers/gpu/drm/sti/sti_drm_crtc.c
index d2ae0c0e13be..4c651c200f20 100644
--- a/drivers/gpu/drm/sti/sti_drm_crtc.c
+++ b/drivers/gpu/drm/sti/sti_drm_crtc.c
@@ -10,6 +10,7 @@
10 10
11#include <drm/drmP.h> 11#include <drm/drmP.h>
12#include <drm/drm_crtc_helper.h> 12#include <drm/drm_crtc_helper.h>
13#include <drm/drm_plane_helper.h>
13 14
14#include "sti_compositor.h" 15#include "sti_compositor.h"
15#include "sti_drm_drv.h" 16#include "sti_drm_drv.h"
@@ -27,7 +28,7 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
27 struct device *dev = mixer->dev; 28 struct device *dev = mixer->dev;
28 struct sti_compositor *compo = dev_get_drvdata(dev); 29 struct sti_compositor *compo = dev_get_drvdata(dev);
29 30
30 compo->enable = true; 31 mixer->enabled = true;
31 32
32 /* Prepare and enable the compo IP clock */ 33 /* Prepare and enable the compo IP clock */
33 if (mixer->id == STI_MIXER_MAIN) { 34 if (mixer->id == STI_MIXER_MAIN) {
@@ -37,6 +38,8 @@ static void sti_drm_crtc_prepare(struct drm_crtc *crtc)
37 if (clk_prepare_enable(compo->clk_compo_aux)) 38 if (clk_prepare_enable(compo->clk_compo_aux))
38 DRM_INFO("Failed to prepare/enable compo_aux clk\n"); 39 DRM_INFO("Failed to prepare/enable compo_aux clk\n");
39 } 40 }
41
42 sti_mixer_clear_all_layers(mixer);
40} 43}
41 44
42static void sti_drm_crtc_commit(struct drm_crtc *crtc) 45static void sti_drm_crtc_commit(struct drm_crtc *crtc)
@@ -61,6 +64,8 @@ static void sti_drm_crtc_commit(struct drm_crtc *crtc)
61 /* Enable layer on mixer */ 64 /* Enable layer on mixer */
62 if (sti_mixer_set_layer_status(mixer, layer, true)) 65 if (sti_mixer_set_layer_status(mixer, layer, true))
63 DRM_ERROR("Can not enable layer at mixer\n"); 66 DRM_ERROR("Can not enable layer at mixer\n");
67
68 drm_crtc_vblank_on(crtc);
64} 69}
65 70
66static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc, 71static bool sti_drm_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -143,7 +148,8 @@ sti_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
143 w = crtc->primary->fb->width - x; 148 w = crtc->primary->fb->width - x;
144 h = crtc->primary->fb->height - y; 149 h = crtc->primary->fb->height - y;
145 150
146 return sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode, 151 return sti_layer_prepare(layer, crtc,
152 crtc->primary->fb, &crtc->mode,
147 mixer->id, 0, 0, w, h, x, y, w, h); 153 mixer->id, 0, 0, w, h, x, y, w, h);
148} 154}
149 155
@@ -170,7 +176,8 @@ static int sti_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
170 w = crtc->primary->fb->width - crtc->x; 176 w = crtc->primary->fb->width - crtc->x;
171 h = crtc->primary->fb->height - crtc->y; 177 h = crtc->primary->fb->height - crtc->y;
172 178
173 ret = sti_layer_prepare(layer, crtc->primary->fb, &crtc->mode, 179 ret = sti_layer_prepare(layer, crtc,
180 crtc->primary->fb, &crtc->mode,
174 mixer->id, 0, 0, w, h, 181 mixer->id, 0, 0, w, h,
175 crtc->x, crtc->y, w, h); 182 crtc->x, crtc->y, w, h);
176 if (ret) { 183 if (ret) {
@@ -195,7 +202,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
195 struct sti_compositor *compo = dev_get_drvdata(dev); 202 struct sti_compositor *compo = dev_get_drvdata(dev);
196 struct sti_layer *layer; 203 struct sti_layer *layer;
197 204
198 if (!compo->enable) 205 if (!mixer->enabled)
199 return; 206 return;
200 207
201 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer)); 208 DRM_DEBUG_KMS("CRTC:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer));
@@ -221,7 +228,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
221 /* Then disable layer itself */ 228 /* Then disable layer itself */
222 sti_layer_disable(layer); 229 sti_layer_disable(layer);
223 230
224 drm_vblank_off(crtc->dev, mixer->id); 231 drm_crtc_vblank_off(crtc);
225 232
226 /* Disable pixel clock and compo IP clocks */ 233 /* Disable pixel clock and compo IP clocks */
227 if (mixer->id == STI_MIXER_MAIN) { 234 if (mixer->id == STI_MIXER_MAIN) {
@@ -232,7 +239,7 @@ static void sti_drm_crtc_disable(struct drm_crtc *crtc)
232 clk_disable_unprepare(compo->clk_compo_aux); 239 clk_disable_unprepare(compo->clk_compo_aux);
233 } 240 }
234 241
235 compo->enable = false; 242 mixer->enabled = false;
236} 243}
237 244
238static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { 245static struct drm_crtc_helper_funcs sti_crtc_helper_funcs = {
@@ -363,7 +370,6 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
363 struct sti_drm_private *priv = dev->dev_private; 370 struct sti_drm_private *priv = dev->dev_private;
364 struct sti_compositor *compo = priv->compo; 371 struct sti_compositor *compo = priv->compo;
365 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb; 372 struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb;
366 unsigned long flags;
367 373
368 DRM_DEBUG_DRIVER("\n"); 374 DRM_DEBUG_DRIVER("\n");
369 375
@@ -372,13 +378,10 @@ void sti_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
372 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n"); 378 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
373 379
374 /* free the resources of the pending requests */ 380 /* free the resources of the pending requests */
375 spin_lock_irqsave(&dev->event_lock, flags);
376 if (compo->mixer[crtc]->pending_event) { 381 if (compo->mixer[crtc]->pending_event) {
377 drm_vblank_put(dev, crtc); 382 drm_vblank_put(dev, crtc);
378 compo->mixer[crtc]->pending_event = NULL; 383 compo->mixer[crtc]->pending_event = NULL;
379 } 384 }
380 spin_unlock_irqrestore(&dev->event_lock, flags);
381
382} 385}
383EXPORT_SYMBOL(sti_drm_crtc_disable_vblank); 386EXPORT_SYMBOL(sti_drm_crtc_disable_vblank);
384 387
@@ -398,6 +401,7 @@ bool sti_drm_crtc_is_main(struct drm_crtc *crtc)
398 401
399 return false; 402 return false;
400} 403}
404EXPORT_SYMBOL(sti_drm_crtc_is_main);
401 405
402int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer, 406int sti_drm_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
403 struct drm_plane *primary, struct drm_plane *cursor) 407 struct drm_plane *primary, struct drm_plane *cursor)
diff --git a/drivers/gpu/drm/sti/sti_drm_drv.c b/drivers/gpu/drm/sti/sti_drm_drv.c
index 8e64220e8796..5239fa121726 100644
--- a/drivers/gpu/drm/sti/sti_drm_drv.c
+++ b/drivers/gpu/drm/sti/sti_drm_drv.c
@@ -67,8 +67,12 @@ static int sti_drm_load(struct drm_device *dev, unsigned long flags)
67 sti_drm_mode_config_init(dev); 67 sti_drm_mode_config_init(dev);
68 68
69 ret = component_bind_all(dev->dev, dev); 69 ret = component_bind_all(dev->dev, dev);
70 if (ret) 70 if (ret) {
71 drm_kms_helper_poll_fini(dev);
72 drm_mode_config_cleanup(dev);
73 kfree(private);
71 return ret; 74 return ret;
75 }
72 76
73 drm_helper_disable_unused_functions(dev); 77 drm_helper_disable_unused_functions(dev);
74 78
diff --git a/drivers/gpu/drm/sti/sti_drm_plane.c b/drivers/gpu/drm/sti/sti_drm_plane.c
index f4118d4cac22..bb6a29339e10 100644
--- a/drivers/gpu/drm/sti/sti_drm_plane.c
+++ b/drivers/gpu/drm/sti/sti_drm_plane.c
@@ -45,7 +45,8 @@ sti_drm_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
45 } 45 }
46 46
47 /* src_x are in 16.16 format. */ 47 /* src_x are in 16.16 format. */
48 res = sti_layer_prepare(layer, fb, &crtc->mode, mixer->id, 48 res = sti_layer_prepare(layer, crtc, fb,
49 &crtc->mode, mixer->id,
49 crtc_x, crtc_y, crtc_w, crtc_h, 50 crtc_x, crtc_y, crtc_w, crtc_h,
50 src_x >> 16, src_y >> 16, 51 src_x >> 16, src_y >> 16,
51 src_w >> 16, src_h >> 16); 52 src_w >> 16, src_h >> 16);
@@ -193,3 +194,4 @@ struct drm_plane *sti_drm_plane_init(struct drm_device *dev,
193 194
194 return &layer->plane; 195 return &layer->plane;
195} 196}
197EXPORT_SYMBOL(sti_drm_plane_init);
diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c
index 4e30b74559f5..32448d1d1e8f 100644
--- a/drivers/gpu/drm/sti/sti_gdp.c
+++ b/drivers/gpu/drm/sti/sti_gdp.c
@@ -73,7 +73,9 @@ struct sti_gdp_node {
73 73
74struct sti_gdp_node_list { 74struct sti_gdp_node_list {
75 struct sti_gdp_node *top_field; 75 struct sti_gdp_node *top_field;
76 dma_addr_t top_field_paddr;
76 struct sti_gdp_node *btm_field; 77 struct sti_gdp_node *btm_field;
78 dma_addr_t btm_field_paddr;
77}; 79};
78 80
79/** 81/**
@@ -81,6 +83,8 @@ struct sti_gdp_node_list {
81 * 83 *
82 * @layer: layer structure 84 * @layer: layer structure
83 * @clk_pix: pixel clock for the current gdp 85 * @clk_pix: pixel clock for the current gdp
86 * @clk_main_parent: gdp parent clock if main path used
87 * @clk_aux_parent: gdp parent clock if aux path used
84 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification 88 * @vtg_field_nb: callback for VTG FIELD (top or bottom) notification
85 * @is_curr_top: true if the current node processed is the top field 89 * @is_curr_top: true if the current node processed is the top field
86 * @node_list: array of node list 90 * @node_list: array of node list
@@ -88,6 +92,8 @@ struct sti_gdp_node_list {
88struct sti_gdp { 92struct sti_gdp {
89 struct sti_layer layer; 93 struct sti_layer layer;
90 struct clk *clk_pix; 94 struct clk *clk_pix;
95 struct clk *clk_main_parent;
96 struct clk *clk_aux_parent;
91 struct notifier_block vtg_field_nb; 97 struct notifier_block vtg_field_nb;
92 bool is_curr_top; 98 bool is_curr_top;
93 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK]; 99 struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
@@ -168,7 +174,6 @@ static int sti_gdp_get_alpharange(int format)
168static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer) 174static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
169{ 175{
170 int hw_nvn; 176 int hw_nvn;
171 void *virt_nvn;
172 struct sti_gdp *gdp = to_sti_gdp(layer); 177 struct sti_gdp *gdp = to_sti_gdp(layer);
173 unsigned int i; 178 unsigned int i;
174 179
@@ -176,11 +181,9 @@ static struct sti_gdp_node_list *sti_gdp_get_free_nodes(struct sti_layer *layer)
176 if (!hw_nvn) 181 if (!hw_nvn)
177 goto end; 182 goto end;
178 183
179 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
180
181 for (i = 0; i < GDP_NODE_NB_BANK; i++) 184 for (i = 0; i < GDP_NODE_NB_BANK; i++)
182 if ((virt_nvn != gdp->node_list[i].btm_field) && 185 if ((hw_nvn != gdp->node_list[i].btm_field_paddr) &&
183 (virt_nvn != gdp->node_list[i].top_field)) 186 (hw_nvn != gdp->node_list[i].top_field_paddr))
184 return &gdp->node_list[i]; 187 return &gdp->node_list[i];
185 188
186 /* in hazardious cases restart with the first node */ 189 /* in hazardious cases restart with the first node */
@@ -204,7 +207,6 @@ static
204struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer) 207struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
205{ 208{
206 int hw_nvn; 209 int hw_nvn;
207 void *virt_nvn;
208 struct sti_gdp *gdp = to_sti_gdp(layer); 210 struct sti_gdp *gdp = to_sti_gdp(layer);
209 unsigned int i; 211 unsigned int i;
210 212
@@ -212,11 +214,9 @@ struct sti_gdp_node_list *sti_gdp_get_current_nodes(struct sti_layer *layer)
212 if (!hw_nvn) 214 if (!hw_nvn)
213 goto end; 215 goto end;
214 216
215 virt_nvn = dma_to_virt(layer->dev, (dma_addr_t) hw_nvn);
216
217 for (i = 0; i < GDP_NODE_NB_BANK; i++) 217 for (i = 0; i < GDP_NODE_NB_BANK; i++)
218 if ((virt_nvn == gdp->node_list[i].btm_field) || 218 if ((hw_nvn == gdp->node_list[i].btm_field_paddr) ||
219 (virt_nvn == gdp->node_list[i].top_field)) 219 (hw_nvn == gdp->node_list[i].top_field_paddr))
220 return &gdp->node_list[i]; 220 return &gdp->node_list[i];
221 221
222end: 222end:
@@ -292,8 +292,8 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
292 292
293 /* Same content and chained together */ 293 /* Same content and chained together */
294 memcpy(btm_field, top_field, sizeof(*btm_field)); 294 memcpy(btm_field, top_field, sizeof(*btm_field));
295 top_field->gam_gdp_nvn = virt_to_dma(dev, btm_field); 295 top_field->gam_gdp_nvn = list->btm_field_paddr;
296 btm_field->gam_gdp_nvn = virt_to_dma(dev, top_field); 296 btm_field->gam_gdp_nvn = list->top_field_paddr;
297 297
298 /* Interlaced mode */ 298 /* Interlaced mode */
299 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE) 299 if (layer->mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -311,6 +311,17 @@ static int sti_gdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
311 311
312 /* Set and enable gdp clock */ 312 /* Set and enable gdp clock */
313 if (gdp->clk_pix) { 313 if (gdp->clk_pix) {
314 struct clk *clkp;
315 /* According to the mixer used, the gdp pixel clock
316 * should have a different parent clock. */
317 if (layer->mixer_id == STI_MIXER_MAIN)
318 clkp = gdp->clk_main_parent;
319 else
320 clkp = gdp->clk_aux_parent;
321
322 if (clkp)
323 clk_set_parent(gdp->clk_pix, clkp);
324
314 res = clk_set_rate(gdp->clk_pix, rate); 325 res = clk_set_rate(gdp->clk_pix, rate);
315 if (res < 0) { 326 if (res < 0) {
316 DRM_ERROR("Cannot set rate (%dHz) for gdp\n", 327 DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
@@ -349,8 +360,8 @@ static int sti_gdp_commit_layer(struct sti_layer *layer)
349 struct sti_gdp_node *updated_top_node = updated_list->top_field; 360 struct sti_gdp_node *updated_top_node = updated_list->top_field;
350 struct sti_gdp_node *updated_btm_node = updated_list->btm_field; 361 struct sti_gdp_node *updated_btm_node = updated_list->btm_field;
351 struct sti_gdp *gdp = to_sti_gdp(layer); 362 struct sti_gdp *gdp = to_sti_gdp(layer);
352 u32 dma_updated_top = virt_to_dma(layer->dev, updated_top_node); 363 u32 dma_updated_top = updated_list->top_field_paddr;
353 u32 dma_updated_btm = virt_to_dma(layer->dev, updated_btm_node); 364 u32 dma_updated_btm = updated_list->btm_field_paddr;
354 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer); 365 struct sti_gdp_node_list *curr_list = sti_gdp_get_current_nodes(layer);
355 366
356 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__, 367 dev_dbg(layer->dev, "%s %s top/btm_node:0x%p/0x%p\n", __func__,
@@ -461,16 +472,16 @@ static void sti_gdp_init(struct sti_layer *layer)
461{ 472{
462 struct sti_gdp *gdp = to_sti_gdp(layer); 473 struct sti_gdp *gdp = to_sti_gdp(layer);
463 struct device_node *np = layer->dev->of_node; 474 struct device_node *np = layer->dev->of_node;
464 dma_addr_t dma; 475 dma_addr_t dma_addr;
465 void *base; 476 void *base;
466 unsigned int i, size; 477 unsigned int i, size;
467 478
468 /* Allocate all the nodes within a single memory page */ 479 /* Allocate all the nodes within a single memory page */
469 size = sizeof(struct sti_gdp_node) * 480 size = sizeof(struct sti_gdp_node) *
470 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK; 481 GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
471
472 base = dma_alloc_writecombine(layer->dev, 482 base = dma_alloc_writecombine(layer->dev,
473 size, &dma, GFP_KERNEL | GFP_DMA); 483 size, &dma_addr, GFP_KERNEL | GFP_DMA);
484
474 if (!base) { 485 if (!base) {
475 DRM_ERROR("Failed to allocate memory for GDP node\n"); 486 DRM_ERROR("Failed to allocate memory for GDP node\n");
476 return; 487 return;
@@ -478,21 +489,26 @@ static void sti_gdp_init(struct sti_layer *layer)
478 memset(base, 0, size); 489 memset(base, 0, size);
479 490
480 for (i = 0; i < GDP_NODE_NB_BANK; i++) { 491 for (i = 0; i < GDP_NODE_NB_BANK; i++) {
481 if (virt_to_dma(layer->dev, base) & 0xF) { 492 if (dma_addr & 0xF) {
482 DRM_ERROR("Mem alignment failed\n"); 493 DRM_ERROR("Mem alignment failed\n");
483 return; 494 return;
484 } 495 }
485 gdp->node_list[i].top_field = base; 496 gdp->node_list[i].top_field = base;
497 gdp->node_list[i].top_field_paddr = dma_addr;
498
486 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base); 499 DRM_DEBUG_DRIVER("node[%d].top_field=%p\n", i, base);
487 base += sizeof(struct sti_gdp_node); 500 base += sizeof(struct sti_gdp_node);
501 dma_addr += sizeof(struct sti_gdp_node);
488 502
489 if (virt_to_dma(layer->dev, base) & 0xF) { 503 if (dma_addr & 0xF) {
490 DRM_ERROR("Mem alignment failed\n"); 504 DRM_ERROR("Mem alignment failed\n");
491 return; 505 return;
492 } 506 }
493 gdp->node_list[i].btm_field = base; 507 gdp->node_list[i].btm_field = base;
508 gdp->node_list[i].btm_field_paddr = dma_addr;
494 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base); 509 DRM_DEBUG_DRIVER("node[%d].btm_field=%p\n", i, base);
495 base += sizeof(struct sti_gdp_node); 510 base += sizeof(struct sti_gdp_node);
511 dma_addr += sizeof(struct sti_gdp_node);
496 } 512 }
497 513
498 if (of_device_is_compatible(np, "st,stih407-compositor")) { 514 if (of_device_is_compatible(np, "st,stih407-compositor")) {
@@ -520,6 +536,14 @@ static void sti_gdp_init(struct sti_layer *layer)
520 gdp->clk_pix = devm_clk_get(layer->dev, clk_name); 536 gdp->clk_pix = devm_clk_get(layer->dev, clk_name);
521 if (IS_ERR(gdp->clk_pix)) 537 if (IS_ERR(gdp->clk_pix))
522 DRM_ERROR("Cannot get %s clock\n", clk_name); 538 DRM_ERROR("Cannot get %s clock\n", clk_name);
539
540 gdp->clk_main_parent = devm_clk_get(layer->dev, "main_parent");
541 if (IS_ERR(gdp->clk_main_parent))
542 DRM_ERROR("Cannot get main_parent clock\n");
543
544 gdp->clk_aux_parent = devm_clk_get(layer->dev, "aux_parent");
545 if (IS_ERR(gdp->clk_aux_parent))
546 DRM_ERROR("Cannot get aux_parent clock\n");
523 } 547 }
524} 548}
525 549
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index b22968c08d1f..d032e024b0b8 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -130,8 +130,7 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg)
130 130
131 /* Hot plug/unplug IRQ */ 131 /* Hot plug/unplug IRQ */
132 if (hdmi->irq_status & HDMI_INT_HOT_PLUG) { 132 if (hdmi->irq_status & HDMI_INT_HOT_PLUG) {
133 /* read gpio to get the status */ 133 hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
134 hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
135 if (hdmi->drm_dev) 134 if (hdmi->drm_dev)
136 drm_helper_hpd_irq_event(hdmi->drm_dev); 135 drm_helper_hpd_irq_event(hdmi->drm_dev);
137 } 136 }
@@ -273,31 +272,32 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi)
273 hdmi_write(hdmi, val, HDMI_SW_DI_CFG); 272 hdmi_write(hdmi, val, HDMI_SW_DI_CFG);
274 273
275 /* Infoframe header */ 274 /* Infoframe header */
276 val = buffer[0x0]; 275 val = buffer[0];
277 val |= buffer[0x1] << 8; 276 val |= buffer[1] << 8;
278 val |= buffer[0x2] << 16; 277 val |= buffer[2] << 16;
279 hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI)); 278 hdmi_write(hdmi, val, HDMI_SW_DI_N_HEAD_WORD(HDMI_IFRAME_SLOT_AVI));
280 279
281 /* Infoframe packet bytes */ 280 /* Infoframe packet bytes */
282 val = frame[0x0]; 281 val = buffer[3];
283 val |= frame[0x1] << 8; 282 val |= *(frame++) << 8;
284 val |= frame[0x2] << 16; 283 val |= *(frame++) << 16;
285 val |= frame[0x3] << 24; 284 val |= *(frame++) << 24;
286 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI)); 285 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD0(HDMI_IFRAME_SLOT_AVI));
287 286
288 val = frame[0x4]; 287 val = *(frame++);
289 val |= frame[0x5] << 8; 288 val |= *(frame++) << 8;
290 val |= frame[0x6] << 16; 289 val |= *(frame++) << 16;
291 val |= frame[0x7] << 24; 290 val |= *(frame++) << 24;
292 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI)); 291 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD1(HDMI_IFRAME_SLOT_AVI));
293 292
294 val = frame[0x8]; 293 val = *(frame++);
295 val |= frame[0x9] << 8; 294 val |= *(frame++) << 8;
296 val |= frame[0xA] << 16; 295 val |= *(frame++) << 16;
297 val |= frame[0xB] << 24; 296 val |= *(frame++) << 24;
298 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI)); 297 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD2(HDMI_IFRAME_SLOT_AVI));
299 298
300 val = frame[0xC]; 299 val = *(frame++);
300 val |= *(frame) << 8;
301 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI)); 301 hdmi_write(hdmi, val, HDMI_SW_DI_N_PKT_WORD3(HDMI_IFRAME_SLOT_AVI));
302 302
303 /* Enable transmission slot for AVI infoframe 303 /* Enable transmission slot for AVI infoframe
@@ -480,17 +480,15 @@ static const struct drm_bridge_funcs sti_hdmi_bridge_funcs = {
480 480
481static int sti_hdmi_connector_get_modes(struct drm_connector *connector) 481static int sti_hdmi_connector_get_modes(struct drm_connector *connector)
482{ 482{
483 struct i2c_adapter *i2c_adap; 483 struct sti_hdmi_connector *hdmi_connector
484 = to_sti_hdmi_connector(connector);
485 struct sti_hdmi *hdmi = hdmi_connector->hdmi;
484 struct edid *edid; 486 struct edid *edid;
485 int count; 487 int count;
486 488
487 DRM_DEBUG_DRIVER("\n"); 489 DRM_DEBUG_DRIVER("\n");
488 490
489 i2c_adap = i2c_get_adapter(1); 491 edid = drm_get_edid(connector, hdmi->ddc_adapt);
490 if (!i2c_adap)
491 goto fail;
492
493 edid = drm_get_edid(connector, i2c_adap);
494 if (!edid) 492 if (!edid)
495 goto fail; 493 goto fail;
496 494
@@ -603,29 +601,38 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
603 struct sti_hdmi_connector *connector; 601 struct sti_hdmi_connector *connector;
604 struct drm_connector *drm_connector; 602 struct drm_connector *drm_connector;
605 struct drm_bridge *bridge; 603 struct drm_bridge *bridge;
606 struct i2c_adapter *i2c_adap; 604 struct device_node *ddc;
607 int err; 605 int err;
608 606
609 i2c_adap = i2c_get_adapter(1); 607 ddc = of_parse_phandle(dev->of_node, "ddc", 0);
610 if (!i2c_adap) 608 if (ddc) {
611 return -EPROBE_DEFER; 609 hdmi->ddc_adapt = of_find_i2c_adapter_by_node(ddc);
610 if (!hdmi->ddc_adapt) {
611 err = -EPROBE_DEFER;
612 of_node_put(ddc);
613 return err;
614 }
615
616 of_node_put(ddc);
617 }
612 618
613 /* Set the drm device handle */ 619 /* Set the drm device handle */
614 hdmi->drm_dev = drm_dev; 620 hdmi->drm_dev = drm_dev;
615 621
616 encoder = sti_hdmi_find_encoder(drm_dev); 622 encoder = sti_hdmi_find_encoder(drm_dev);
617 if (!encoder) 623 if (!encoder)
618 return -ENOMEM; 624 goto err_adapt;
619 625
620 connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL); 626 connector = devm_kzalloc(dev, sizeof(*connector), GFP_KERNEL);
621 if (!connector) 627 if (!connector)
622 return -ENOMEM; 628 goto err_adapt;
629
623 630
624 connector->hdmi = hdmi; 631 connector->hdmi = hdmi;
625 632
626 bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL); 633 bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
627 if (!bridge) 634 if (!bridge)
628 return -ENOMEM; 635 goto err_adapt;
629 636
630 bridge->driver_private = hdmi; 637 bridge->driver_private = hdmi;
631 drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs); 638 drm_bridge_init(drm_dev, bridge, &sti_hdmi_bridge_funcs);
@@ -662,6 +669,8 @@ err_sysfs:
662err_connector: 669err_connector:
663 drm_bridge_cleanup(bridge); 670 drm_bridge_cleanup(bridge);
664 drm_connector_cleanup(drm_connector); 671 drm_connector_cleanup(drm_connector);
672err_adapt:
673 put_device(&hdmi->ddc_adapt->dev);
665 return -EINVAL; 674 return -EINVAL;
666} 675}
667 676
@@ -757,13 +766,7 @@ static int sti_hdmi_probe(struct platform_device *pdev)
757 return PTR_ERR(hdmi->clk_audio); 766 return PTR_ERR(hdmi->clk_audio);
758 } 767 }
759 768
760 hdmi->hpd_gpio = of_get_named_gpio(np, "hdmi,hpd-gpio", 0); 769 hdmi->hpd = readl(hdmi->regs + HDMI_STA) & HDMI_STA_HOT_PLUG;
761 if (hdmi->hpd_gpio < 0) {
762 DRM_ERROR("Failed to get hdmi hpd-gpio\n");
763 return -EIO;
764 }
765
766 hdmi->hpd = gpio_get_value(hdmi->hpd_gpio);
767 770
768 init_waitqueue_head(&hdmi->wait_event); 771 init_waitqueue_head(&hdmi->wait_event);
769 772
@@ -788,6 +791,11 @@ static int sti_hdmi_probe(struct platform_device *pdev)
788 791
789static int sti_hdmi_remove(struct platform_device *pdev) 792static int sti_hdmi_remove(struct platform_device *pdev)
790{ 793{
794 struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev);
795
796 if (hdmi->ddc_adapt)
797 put_device(&hdmi->ddc_adapt->dev);
798
791 component_del(&pdev->dev, &sti_hdmi_ops); 799 component_del(&pdev->dev, &sti_hdmi_ops);
792 return 0; 800 return 0;
793} 801}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h
index 61bec6557ceb..3d22390e1f3b 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.h
+++ b/drivers/gpu/drm/sti/sti_hdmi.h
@@ -14,6 +14,9 @@
14#define HDMI_STA 0x0010 14#define HDMI_STA 0x0010
15#define HDMI_STA_DLL_LCK BIT(5) 15#define HDMI_STA_DLL_LCK BIT(5)
16 16
17#define HDMI_STA_HOT_PLUG_SHIFT 4
18#define HDMI_STA_HOT_PLUG (1 << HDMI_STA_HOT_PLUG_SHIFT)
19
17struct sti_hdmi; 20struct sti_hdmi;
18 21
19struct hdmi_phy_ops { 22struct hdmi_phy_ops {
@@ -37,7 +40,6 @@ struct hdmi_phy_ops {
37 * @irq_status: interrupt status register 40 * @irq_status: interrupt status register
38 * @phy_ops: phy start/stop operations 41 * @phy_ops: phy start/stop operations
39 * @enabled: true if hdmi is enabled else false 42 * @enabled: true if hdmi is enabled else false
40 * @hpd_gpio: hdmi hot plug detect gpio number
41 * @hpd: hot plug detect status 43 * @hpd: hot plug detect status
42 * @wait_event: wait event 44 * @wait_event: wait event
43 * @event_received: wait event status 45 * @event_received: wait event status
@@ -57,11 +59,11 @@ struct sti_hdmi {
57 u32 irq_status; 59 u32 irq_status;
58 struct hdmi_phy_ops *phy_ops; 60 struct hdmi_phy_ops *phy_ops;
59 bool enabled; 61 bool enabled;
60 int hpd_gpio;
61 bool hpd; 62 bool hpd;
62 wait_queue_head_t wait_event; 63 wait_queue_head_t wait_event;
63 bool event_received; 64 bool event_received;
64 struct reset_control *reset; 65 struct reset_control *reset;
66 struct i2c_adapter *ddc_adapt;
65}; 67};
66 68
67u32 hdmi_read(struct sti_hdmi *hdmi, int offset); 69u32 hdmi_read(struct sti_hdmi *hdmi, int offset);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
new file mode 100644
index 000000000000..f3db05dab0ab
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -0,0 +1,1073 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/clk.h>
8#include <linux/component.h>
9#include <linux/firmware.h>
10#include <linux/module.h>
11#include <linux/platform_device.h>
12#include <linux/reset.h>
13
14#include <drm/drmP.h>
15
16#include "sti_drm_plane.h"
17#include "sti_hqvdp.h"
18#include "sti_hqvdp_lut.h"
19#include "sti_layer.h"
20#include "sti_vtg.h"
21
22/* Firmware name */
23#define HQVDP_FMW_NAME "hqvdp-stih407.bin"
24
25/* Regs address */
26#define HQVDP_DMEM 0x00000000 /* 0x00000000 */
27#define HQVDP_PMEM 0x00040000 /* 0x00040000 */
28#define HQVDP_RD_PLUG 0x000E0000 /* 0x000E0000 */
29#define HQVDP_RD_PLUG_CONTROL (HQVDP_RD_PLUG + 0x1000) /* 0x000E1000 */
30#define HQVDP_RD_PLUG_PAGE_SIZE (HQVDP_RD_PLUG + 0x1004) /* 0x000E1004 */
31#define HQVDP_RD_PLUG_MIN_OPC (HQVDP_RD_PLUG + 0x1008) /* 0x000E1008 */
32#define HQVDP_RD_PLUG_MAX_OPC (HQVDP_RD_PLUG + 0x100C) /* 0x000E100C */
33#define HQVDP_RD_PLUG_MAX_CHK (HQVDP_RD_PLUG + 0x1010) /* 0x000E1010 */
34#define HQVDP_RD_PLUG_MAX_MSG (HQVDP_RD_PLUG + 0x1014) /* 0x000E1014 */
35#define HQVDP_RD_PLUG_MIN_SPACE (HQVDP_RD_PLUG + 0x1018) /* 0x000E1018 */
36#define HQVDP_WR_PLUG 0x000E2000 /* 0x000E2000 */
37#define HQVDP_WR_PLUG_CONTROL (HQVDP_WR_PLUG + 0x1000) /* 0x000E3000 */
38#define HQVDP_WR_PLUG_PAGE_SIZE (HQVDP_WR_PLUG + 0x1004) /* 0x000E3004 */
39#define HQVDP_WR_PLUG_MIN_OPC (HQVDP_WR_PLUG + 0x1008) /* 0x000E3008 */
40#define HQVDP_WR_PLUG_MAX_OPC (HQVDP_WR_PLUG + 0x100C) /* 0x000E300C */
41#define HQVDP_WR_PLUG_MAX_CHK (HQVDP_WR_PLUG + 0x1010) /* 0x000E3010 */
42#define HQVDP_WR_PLUG_MAX_MSG (HQVDP_WR_PLUG + 0x1014) /* 0x000E3014 */
43#define HQVDP_WR_PLUG_MIN_SPACE (HQVDP_WR_PLUG + 0x1018) /* 0x000E3018 */
44#define HQVDP_MBX 0x000E4000 /* 0x000E4000 */
45#define HQVDP_MBX_IRQ_TO_XP70 (HQVDP_MBX + 0x0000) /* 0x000E4000 */
46#define HQVDP_MBX_INFO_HOST (HQVDP_MBX + 0x0004) /* 0x000E4004 */
47#define HQVDP_MBX_IRQ_TO_HOST (HQVDP_MBX + 0x0008) /* 0x000E4008 */
48#define HQVDP_MBX_INFO_XP70 (HQVDP_MBX + 0x000C) /* 0x000E400C */
49#define HQVDP_MBX_SW_RESET_CTRL (HQVDP_MBX + 0x0010) /* 0x000E4010 */
50#define HQVDP_MBX_STARTUP_CTRL1 (HQVDP_MBX + 0x0014) /* 0x000E4014 */
51#define HQVDP_MBX_STARTUP_CTRL2 (HQVDP_MBX + 0x0018) /* 0x000E4018 */
52#define HQVDP_MBX_GP_STATUS (HQVDP_MBX + 0x001C) /* 0x000E401C */
53#define HQVDP_MBX_NEXT_CMD (HQVDP_MBX + 0x0020) /* 0x000E4020 */
54#define HQVDP_MBX_CURRENT_CMD (HQVDP_MBX + 0x0024) /* 0x000E4024 */
55#define HQVDP_MBX_SOFT_VSYNC (HQVDP_MBX + 0x0028) /* 0x000E4028 */
56
57/* Plugs config */
58#define PLUG_CONTROL_ENABLE 0x00000001
59#define PLUG_PAGE_SIZE_256 0x00000002
60#define PLUG_MIN_OPC_8 0x00000003
61#define PLUG_MAX_OPC_64 0x00000006
62#define PLUG_MAX_CHK_2X 0x00000001
63#define PLUG_MAX_MSG_1X 0x00000000
64#define PLUG_MIN_SPACE_1 0x00000000
65
66/* SW reset CTRL */
67#define SW_RESET_CTRL_FULL BIT(0)
68#define SW_RESET_CTRL_CORE BIT(1)
69
70/* Startup ctrl 1 */
71#define STARTUP_CTRL1_RST_DONE BIT(0)
72#define STARTUP_CTRL1_AUTH_IDLE BIT(2)
73
74/* Startup ctrl 2 */
75#define STARTUP_CTRL2_FETCH_EN BIT(1)
76
77/* Info xP70 */
78#define INFO_XP70_FW_READY BIT(15)
79#define INFO_XP70_FW_PROCESSING BIT(14)
80#define INFO_XP70_FW_INITQUEUES BIT(13)
81
82/* SOFT_VSYNC */
83#define SOFT_VSYNC_HW 0x00000000
84#define SOFT_VSYNC_SW_CMD 0x00000001
85#define SOFT_VSYNC_SW_CTRL_IRQ 0x00000003
86
87/* Reset & boot poll config */
88#define POLL_MAX_ATTEMPT 50
89#define POLL_DELAY_MS 20
90
91#define SCALE_FACTOR 8192
92#define SCALE_MAX_FOR_LEG_LUT_F 4096
93#define SCALE_MAX_FOR_LEG_LUT_E 4915
94#define SCALE_MAX_FOR_LEG_LUT_D 6654
95#define SCALE_MAX_FOR_LEG_LUT_C 8192
96
97enum sti_hvsrc_orient {
98 HVSRC_HORI,
99 HVSRC_VERT
100};
101
102/* Command structures */
103struct sti_hqvdp_top {
104 u32 config;
105 u32 mem_format;
106 u32 current_luma;
107 u32 current_enh_luma;
108 u32 current_right_luma;
109 u32 current_enh_right_luma;
110 u32 current_chroma;
111 u32 current_enh_chroma;
112 u32 current_right_chroma;
113 u32 current_enh_right_chroma;
114 u32 output_luma;
115 u32 output_chroma;
116 u32 luma_src_pitch;
117 u32 luma_enh_src_pitch;
118 u32 luma_right_src_pitch;
119 u32 luma_enh_right_src_pitch;
120 u32 chroma_src_pitch;
121 u32 chroma_enh_src_pitch;
122 u32 chroma_right_src_pitch;
123 u32 chroma_enh_right_src_pitch;
124 u32 luma_processed_pitch;
125 u32 chroma_processed_pitch;
126 u32 input_frame_size;
127 u32 input_viewport_ori;
128 u32 input_viewport_ori_right;
129 u32 input_viewport_size;
130 u32 left_view_border_width;
131 u32 right_view_border_width;
132 u32 left_view_3d_offset_width;
133 u32 right_view_3d_offset_width;
134 u32 side_stripe_color;
135 u32 crc_reset_ctrl;
136};
137
138/* Configs for interlaced : no IT, no pass thru, 3 fields */
139#define TOP_CONFIG_INTER_BTM 0x00000000
140#define TOP_CONFIG_INTER_TOP 0x00000002
141
142/* Config for progressive : no IT, no pass thru, 3 fields */
143#define TOP_CONFIG_PROGRESSIVE 0x00000001
144
145/* Default MemFormat: in=420_raster_dual out=444_raster;opaque Mem2Tv mode */
146#define TOP_MEM_FORMAT_DFLT 0x00018060
147
148/* Min/Max size */
149#define MAX_WIDTH 0x1FFF
150#define MAX_HEIGHT 0x0FFF
151#define MIN_WIDTH 0x0030
152#define MIN_HEIGHT 0x0010
153
154struct sti_hqvdp_vc1re {
155 u32 ctrl_prv_csdi;
156 u32 ctrl_cur_csdi;
157 u32 ctrl_nxt_csdi;
158 u32 ctrl_cur_fmd;
159 u32 ctrl_nxt_fmd;
160};
161
162struct sti_hqvdp_fmd {
163 u32 config;
164 u32 viewport_ori;
165 u32 viewport_size;
166 u32 next_next_luma;
167 u32 next_next_right_luma;
168 u32 next_next_next_luma;
169 u32 next_next_next_right_luma;
170 u32 threshold_scd;
171 u32 threshold_rfd;
172 u32 threshold_move;
173 u32 threshold_cfd;
174};
175
176struct sti_hqvdp_csdi {
177 u32 config;
178 u32 config2;
179 u32 dcdi_config;
180 u32 prev_luma;
181 u32 prev_enh_luma;
182 u32 prev_right_luma;
183 u32 prev_enh_right_luma;
184 u32 next_luma;
185 u32 next_enh_luma;
186 u32 next_right_luma;
187 u32 next_enh_right_luma;
188 u32 prev_chroma;
189 u32 prev_enh_chroma;
190 u32 prev_right_chroma;
191 u32 prev_enh_right_chroma;
192 u32 next_chroma;
193 u32 next_enh_chroma;
194 u32 next_right_chroma;
195 u32 next_enh_right_chroma;
196 u32 prev_motion;
197 u32 prev_right_motion;
198 u32 cur_motion;
199 u32 cur_right_motion;
200 u32 next_motion;
201 u32 next_right_motion;
202};
203
204/* Config for progressive: by pass */
205#define CSDI_CONFIG_PROG 0x00000000
206/* Config for directional deinterlacing without motion */
207#define CSDI_CONFIG_INTER_DIR 0x00000016
208/* Additional configs for fader, blender, motion,... deinterlace algorithms */
209#define CSDI_CONFIG2_DFLT 0x000001B3
210#define CSDI_DCDI_CONFIG_DFLT 0x00203803
211
212struct sti_hqvdp_hvsrc {
213 u32 hor_panoramic_ctrl;
214 u32 output_picture_size;
215 u32 init_horizontal;
216 u32 init_vertical;
217 u32 param_ctrl;
218 u32 yh_coef[NB_COEF];
219 u32 ch_coef[NB_COEF];
220 u32 yv_coef[NB_COEF];
221 u32 cv_coef[NB_COEF];
222 u32 hori_shift;
223 u32 vert_shift;
224};
225
226/* Default ParamCtrl: all controls enabled */
227#define HVSRC_PARAM_CTRL_DFLT 0xFFFFFFFF
228
229struct sti_hqvdp_iqi {
230 u32 config;
231 u32 demo_wind_size;
232 u32 pk_config;
233 u32 coeff0_coeff1;
234 u32 coeff2_coeff3;
235 u32 coeff4;
236 u32 pk_lut;
237 u32 pk_gain;
238 u32 pk_coring_level;
239 u32 cti_config;
240 u32 le_config;
241 u32 le_lut[64];
242 u32 con_bri;
243 u32 sat_gain;
244 u32 pxf_conf;
245 u32 default_color;
246};
247
248/* Default Config : IQI bypassed */
249#define IQI_CONFIG_DFLT 0x00000001
250/* Default Contrast & Brightness gain = 256 */
251#define IQI_CON_BRI_DFLT 0x00000100
252/* Default Saturation gain = 256 */
253#define IQI_SAT_GAIN_DFLT 0x00000100
254/* Default PxfConf : P2I bypassed */
255#define IQI_PXF_CONF_DFLT 0x00000001
256
257struct sti_hqvdp_top_status {
258 u32 processing_time;
259 u32 input_y_crc;
260 u32 input_uv_crc;
261};
262
263struct sti_hqvdp_fmd_status {
264 u32 fmd_repeat_move_status;
265 u32 fmd_scene_count_status;
266 u32 cfd_sum;
267 u32 field_sum;
268 u32 next_y_fmd_crc;
269 u32 next_next_y_fmd_crc;
270 u32 next_next_next_y_fmd_crc;
271};
272
273struct sti_hqvdp_csdi_status {
274 u32 prev_y_csdi_crc;
275 u32 cur_y_csdi_crc;
276 u32 next_y_csdi_crc;
277 u32 prev_uv_csdi_crc;
278 u32 cur_uv_csdi_crc;
279 u32 next_uv_csdi_crc;
280 u32 y_csdi_crc;
281 u32 uv_csdi_crc;
282 u32 uv_cup_crc;
283 u32 mot_csdi_crc;
284 u32 mot_cur_csdi_crc;
285 u32 mot_prev_csdi_crc;
286};
287
288struct sti_hqvdp_hvsrc_status {
289 u32 y_hvsrc_crc;
290 u32 u_hvsrc_crc;
291 u32 v_hvsrc_crc;
292};
293
294struct sti_hqvdp_iqi_status {
295 u32 pxf_it_status;
296 u32 y_iqi_crc;
297 u32 u_iqi_crc;
298 u32 v_iqi_crc;
299};
300
301/* Main commands. We use 2 commands one being processed by the firmware, one
302 * ready to be fetched upon next Vsync*/
303#define NB_VDP_CMD 2
304
305struct sti_hqvdp_cmd {
306 struct sti_hqvdp_top top;
307 struct sti_hqvdp_vc1re vc1re;
308 struct sti_hqvdp_fmd fmd;
309 struct sti_hqvdp_csdi csdi;
310 struct sti_hqvdp_hvsrc hvsrc;
311 struct sti_hqvdp_iqi iqi;
312 struct sti_hqvdp_top_status top_status;
313 struct sti_hqvdp_fmd_status fmd_status;
314 struct sti_hqvdp_csdi_status csdi_status;
315 struct sti_hqvdp_hvsrc_status hvsrc_status;
316 struct sti_hqvdp_iqi_status iqi_status;
317};
318
319/*
320 * STI HQVDP structure
321 *
322 * @dev: driver device
323 * @drm_dev: the drm device
324 * @regs: registers
325 * @layer: layer structure for hqvdp it self
326 * @vid_plane: VID plug used as link with compositor IP
327 * @clk: IP clock
328 * @clk_pix_main: pix main clock
329 * @reset: reset control
330 * @vtg_nb: notifier to handle VTG Vsync
331 * @btm_field_pending: is there any bottom field (interlaced frame) to display
332 * @curr_field_count: number of field updates
333 * @last_field_count: number of field updates since last fps measure
334 * @hqvdp_cmd: buffer of commands
335 * @hqvdp_cmd_paddr: physical address of hqvdp_cmd
336 * @vtg: vtg for main data path
337 */
338struct sti_hqvdp {
339 struct device *dev;
340 struct drm_device *drm_dev;
341 void __iomem *regs;
342 struct sti_layer layer;
343 struct drm_plane *vid_plane;
344 struct clk *clk;
345 struct clk *clk_pix_main;
346 struct reset_control *reset;
347 struct notifier_block vtg_nb;
348 bool btm_field_pending;
349 unsigned int curr_field_count;
350 unsigned int last_field_count;
351 void *hqvdp_cmd;
352 dma_addr_t hqvdp_cmd_paddr;
353 struct sti_vtg *vtg;
354};
355
356#define to_sti_hqvdp(x) container_of(x, struct sti_hqvdp, layer)
357
358static const uint32_t hqvdp_supported_formats[] = {
359 DRM_FORMAT_NV12,
360};
361
362static const uint32_t *sti_hqvdp_get_formats(struct sti_layer *layer)
363{
364 return hqvdp_supported_formats;
365}
366
367static unsigned int sti_hqvdp_get_nb_formats(struct sti_layer *layer)
368{
369 return ARRAY_SIZE(hqvdp_supported_formats);
370}
371
372/**
373 * sti_hqvdp_get_free_cmd
374 * @hqvdp: hqvdp structure
375 *
376 * Look for a hqvdp_cmd that is not being used (or about to be used) by the FW.
377 *
378 * RETURNS:
379 * the offset of the command to be used.
380 * -1 in error cases
381 */
382static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
383{
384 int curr_cmd, next_cmd;
385 dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
386 int i;
387
388 curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
389 next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
390
391 for (i = 0; i < NB_VDP_CMD; i++) {
392 if ((cmd != curr_cmd) && (cmd != next_cmd))
393 return i * sizeof(struct sti_hqvdp_cmd);
394 cmd += sizeof(struct sti_hqvdp_cmd);
395 }
396
397 return -1;
398}
399
400/**
401 * sti_hqvdp_get_curr_cmd
402 * @hqvdp: hqvdp structure
403 *
404 * Look for the hqvdp_cmd that is being used by the FW.
405 *
406 * RETURNS:
407 * the offset of the command to be used.
408 * -1 in error cases
409 */
410static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
411{
412 int curr_cmd;
413 dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
414 unsigned int i;
415
416 curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
417
418 for (i = 0; i < NB_VDP_CMD; i++) {
419 if (cmd == curr_cmd)
420 return i * sizeof(struct sti_hqvdp_cmd);
421
422 cmd += sizeof(struct sti_hqvdp_cmd);
423 }
424
425 return -1;
426}
427
428/**
429 * sti_hqvdp_update_hvsrc
430 * @orient: horizontal or vertical
431 * @scale: scaling/zoom factor
432 * @hvsrc: the structure containing the LUT coef
433 *
434 * Update the Y and C Lut coef, as well as the shift param
435 *
436 * RETURNS:
437 * None.
438 */
439static void sti_hqvdp_update_hvsrc(enum sti_hvsrc_orient orient, int scale,
440 struct sti_hqvdp_hvsrc *hvsrc)
441{
442 const int *coef_c, *coef_y;
443 int shift_c, shift_y;
444
445 /* Get the appropriate coef tables */
446 if (scale < SCALE_MAX_FOR_LEG_LUT_F) {
447 coef_y = coef_lut_f_y_legacy;
448 coef_c = coef_lut_f_c_legacy;
449 shift_y = SHIFT_LUT_F_Y_LEGACY;
450 shift_c = SHIFT_LUT_F_C_LEGACY;
451 } else if (scale < SCALE_MAX_FOR_LEG_LUT_E) {
452 coef_y = coef_lut_e_y_legacy;
453 coef_c = coef_lut_e_c_legacy;
454 shift_y = SHIFT_LUT_E_Y_LEGACY;
455 shift_c = SHIFT_LUT_E_C_LEGACY;
456 } else if (scale < SCALE_MAX_FOR_LEG_LUT_D) {
457 coef_y = coef_lut_d_y_legacy;
458 coef_c = coef_lut_d_c_legacy;
459 shift_y = SHIFT_LUT_D_Y_LEGACY;
460 shift_c = SHIFT_LUT_D_C_LEGACY;
461 } else if (scale < SCALE_MAX_FOR_LEG_LUT_C) {
462 coef_y = coef_lut_c_y_legacy;
463 coef_c = coef_lut_c_c_legacy;
464 shift_y = SHIFT_LUT_C_Y_LEGACY;
465 shift_c = SHIFT_LUT_C_C_LEGACY;
466 } else if (scale == SCALE_MAX_FOR_LEG_LUT_C) {
467 coef_y = coef_c = coef_lut_b;
468 shift_y = shift_c = SHIFT_LUT_B;
469 } else {
470 coef_y = coef_c = coef_lut_a_legacy;
471 shift_y = shift_c = SHIFT_LUT_A_LEGACY;
472 }
473
474 if (orient == HVSRC_HORI) {
475 hvsrc->hori_shift = (shift_c << 16) | shift_y;
476 memcpy(hvsrc->yh_coef, coef_y, sizeof(hvsrc->yh_coef));
477 memcpy(hvsrc->ch_coef, coef_c, sizeof(hvsrc->ch_coef));
478 } else {
479 hvsrc->vert_shift = (shift_c << 16) | shift_y;
480 memcpy(hvsrc->yv_coef, coef_y, sizeof(hvsrc->yv_coef));
481 memcpy(hvsrc->cv_coef, coef_c, sizeof(hvsrc->cv_coef));
482 }
483}
484
485/**
486 * sti_hqvdp_check_hw_scaling
487 * @layer: hqvdp layer
488 *
489 * Check if the HW is able to perform the scaling request
490 * The firmware scaling limitation is "CEIL(1/Zy) <= FLOOR(LFW)" where:
491 * Zy = OutputHeight / InputHeight
492 * LFW = (Tx * IPClock) / (MaxNbCycles * Cp)
493 * Tx : Total video mode horizontal resolution
494 * IPClock : HQVDP IP clock (Mhz)
495 * MaxNbCycles: max(InputWidth, OutputWidth)
496 * Cp: Video mode pixel clock (Mhz)
497 *
498 * RETURNS:
499 * True if the HW can scale.
500 */
501static bool sti_hqvdp_check_hw_scaling(struct sti_layer *layer)
502{
503 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
504 unsigned long lfw;
505 unsigned int inv_zy;
506
507 lfw = layer->mode->htotal * (clk_get_rate(hqvdp->clk) / 1000000);
508 lfw /= max(layer->src_w, layer->dst_w) * layer->mode->clock / 1000;
509
510 inv_zy = DIV_ROUND_UP(layer->src_h, layer->dst_h);
511
512 return (inv_zy <= lfw) ? true : false;
513}
514
515/**
516 * sti_hqvdp_prepare_layer
517 * @layer: hqvdp layer
518 * @first_prepare: true if it is the first time this function is called
519 *
520 * Prepares a command for the firmware
521 *
522 * RETURNS:
523 * 0 on success.
524 */
525static int sti_hqvdp_prepare_layer(struct sti_layer *layer, bool first_prepare)
526{
527 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
528 struct sti_hqvdp_cmd *cmd;
529 int scale_h, scale_v;
530 int cmd_offset;
531
532 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
533
534 /* prepare and commit VID plane */
535 hqvdp->vid_plane->funcs->update_plane(hqvdp->vid_plane,
536 layer->crtc, layer->fb,
537 layer->dst_x, layer->dst_y,
538 layer->dst_w, layer->dst_h,
539 layer->src_x, layer->src_y,
540 layer->src_w, layer->src_h);
541
542 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
543 if (cmd_offset == -1) {
544 DRM_ERROR("No available hqvdp_cmd now\n");
545 return -EBUSY;
546 }
547 cmd = hqvdp->hqvdp_cmd + cmd_offset;
548
549 if (!sti_hqvdp_check_hw_scaling(layer)) {
550 DRM_ERROR("Scaling beyond HW capabilities\n");
551 return -EINVAL;
552 }
553
554 /* Static parameters, defaulting to progressive mode */
555 cmd->top.config = TOP_CONFIG_PROGRESSIVE;
556 cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
557 cmd->hvsrc.param_ctrl = HVSRC_PARAM_CTRL_DFLT;
558 cmd->csdi.config = CSDI_CONFIG_PROG;
559
560 /* VC1RE, FMD bypassed : keep everything set to 0
561 * IQI/P2I bypassed */
562 cmd->iqi.config = IQI_CONFIG_DFLT;
563 cmd->iqi.con_bri = IQI_CON_BRI_DFLT;
564 cmd->iqi.sat_gain = IQI_SAT_GAIN_DFLT;
565 cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
566
567 /* Buffer planes address */
568 cmd->top.current_luma = (u32) layer->paddr + layer->offsets[0];
569 cmd->top.current_chroma = (u32) layer->paddr + layer->offsets[1];
570
571 /* Pitches */
572 cmd->top.luma_processed_pitch = cmd->top.luma_src_pitch =
573 layer->pitches[0];
574 cmd->top.chroma_processed_pitch = cmd->top.chroma_src_pitch =
575 layer->pitches[1];
576
577 /* Input / output size
578 * Align to upper even value */
579 layer->dst_w = ALIGN(layer->dst_w, 2);
580 layer->dst_h = ALIGN(layer->dst_h, 2);
581
582 if ((layer->src_w > MAX_WIDTH) || (layer->src_w < MIN_WIDTH) ||
583 (layer->src_h > MAX_HEIGHT) || (layer->src_h < MIN_HEIGHT) ||
584 (layer->dst_w > MAX_WIDTH) || (layer->dst_w < MIN_WIDTH) ||
585 (layer->dst_h > MAX_HEIGHT) || (layer->dst_h < MIN_HEIGHT)) {
586 DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
587 layer->src_w, layer->src_h,
588 layer->dst_w, layer->dst_h);
589 return -EINVAL;
590 }
591 cmd->top.input_viewport_size = cmd->top.input_frame_size =
592 layer->src_h << 16 | layer->src_w;
593 cmd->hvsrc.output_picture_size = layer->dst_h << 16 | layer->dst_w;
594 cmd->top.input_viewport_ori = layer->src_y << 16 | layer->src_x;
595
596 /* Handle interlaced */
597 if (layer->fb->flags & DRM_MODE_FB_INTERLACED) {
598 /* Top field to display */
599 cmd->top.config = TOP_CONFIG_INTER_TOP;
600
601 /* Update pitches and vert size */
602 cmd->top.input_frame_size = (layer->src_h / 2) << 16 |
603 layer->src_w;
604 cmd->top.luma_processed_pitch *= 2;
605 cmd->top.luma_src_pitch *= 2;
606 cmd->top.chroma_processed_pitch *= 2;
607 cmd->top.chroma_src_pitch *= 2;
608
609 /* Enable directional deinterlacing processing */
610 cmd->csdi.config = CSDI_CONFIG_INTER_DIR;
611 cmd->csdi.config2 = CSDI_CONFIG2_DFLT;
612 cmd->csdi.dcdi_config = CSDI_DCDI_CONFIG_DFLT;
613 }
614
615 /* Update hvsrc lut coef */
616 scale_h = SCALE_FACTOR * layer->dst_w / layer->src_w;
617 sti_hqvdp_update_hvsrc(HVSRC_HORI, scale_h, &cmd->hvsrc);
618
619 scale_v = SCALE_FACTOR * layer->dst_h / layer->src_h;
620 sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
621
622 if (first_prepare) {
623 /* Prevent VTG shutdown */
624 if (clk_prepare_enable(hqvdp->clk_pix_main)) {
625 DRM_ERROR("Failed to prepare/enable pix main clk\n");
626 return -ENXIO;
627 }
628
629 /* Register VTG Vsync callback to handle bottom fields */
630 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
631 sti_vtg_register_client(hqvdp->vtg,
632 &hqvdp->vtg_nb, layer->mixer_id)) {
633 DRM_ERROR("Cannot register VTG notifier\n");
634 return -ENXIO;
635 }
636 }
637
638 return 0;
639}
640
641static int sti_hqvdp_commit_layer(struct sti_layer *layer)
642{
643 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
644 int cmd_offset;
645
646 dev_dbg(hqvdp->dev, "%s %s\n", __func__, sti_layer_to_str(layer));
647
648 cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
649 if (cmd_offset == -1) {
650 DRM_ERROR("No available hqvdp_cmd now\n");
651 return -EBUSY;
652 }
653
654 writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
655 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
656
657 hqvdp->curr_field_count++;
658
659 /* Interlaced : get ready to display the bottom field at next Vsync */
660 if (layer->fb->flags & DRM_MODE_FB_INTERLACED)
661 hqvdp->btm_field_pending = true;
662
663 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
664 __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
665
666 return 0;
667}
668
669static int sti_hqvdp_disable_layer(struct sti_layer *layer)
670{
671 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
672 int i;
673
674 DRM_DEBUG_DRIVER("%s\n", sti_layer_to_str(layer));
675
676 /* Unregister VTG Vsync callback */
677 if ((layer->fb->flags & DRM_MODE_FB_INTERLACED) &&
678 sti_vtg_unregister_client(hqvdp->vtg, &hqvdp->vtg_nb))
679 DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
680
681 /* Set next cmd to NULL */
682 writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
683
684 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
685 if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
686 & INFO_XP70_FW_READY)
687 break;
688 msleep(POLL_DELAY_MS);
689 }
690
691 /* VTG can stop now */
692 clk_disable_unprepare(hqvdp->clk_pix_main);
693
694 if (i == POLL_MAX_ATTEMPT) {
695 DRM_ERROR("XP70 could not revert to idle\n");
696 return -ENXIO;
697 }
698
699 /* disable VID plane */
700 hqvdp->vid_plane->funcs->disable_plane(hqvdp->vid_plane);
701
702 return 0;
703}
704
705/**
706 * sti_vdp_vtg_cb
707 * @nb: notifier block
708 * @evt: event message
709 * @data: private data
710 *
711 * Handle VTG Vsync event, display pending bottom field
712 *
713 * RETURNS:
714 * 0 on success.
715 */
716int sti_hqvdp_vtg_cb(struct notifier_block *nb, unsigned long evt, void *data)
717{
718 struct sti_hqvdp *hqvdp = container_of(nb, struct sti_hqvdp, vtg_nb);
719 int btm_cmd_offset, top_cmd_offest;
720 struct sti_hqvdp_cmd *btm_cmd, *top_cmd;
721
722 if ((evt != VTG_TOP_FIELD_EVENT) && (evt != VTG_BOTTOM_FIELD_EVENT)) {
723 DRM_DEBUG_DRIVER("Unknown event\n");
724 return 0;
725 }
726
727 if (hqvdp->btm_field_pending) {
728 /* Create the btm field command from the current one */
729 btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
730 top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
731 if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
732 DRM_ERROR("Cannot get cmds, skip btm field\n");
733 return -EBUSY;
734 }
735
736 btm_cmd = hqvdp->hqvdp_cmd + btm_cmd_offset;
737 top_cmd = hqvdp->hqvdp_cmd + top_cmd_offest;
738
739 memcpy(btm_cmd, top_cmd, sizeof(*btm_cmd));
740
741 btm_cmd->top.config = TOP_CONFIG_INTER_BTM;
742 btm_cmd->top.current_luma +=
743 btm_cmd->top.luma_src_pitch / 2;
744 btm_cmd->top.current_chroma +=
745 btm_cmd->top.chroma_src_pitch / 2;
746
747 /* Post the command to mailbox */
748 writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
749 hqvdp->regs + HQVDP_MBX_NEXT_CMD);
750
751 hqvdp->curr_field_count++;
752 hqvdp->btm_field_pending = false;
753
754 dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
755 __func__, hqvdp->hqvdp_cmd_paddr);
756 }
757
758 return 0;
759}
760
761static struct drm_plane *sti_hqvdp_find_vid(struct drm_device *dev, int id)
762{
763 struct drm_plane *plane;
764
765 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
766 struct sti_layer *layer = to_sti_layer(plane);
767
768 if (layer->desc == id)
769 return plane;
770 }
771
772 return NULL;
773}
774
775static void sti_hqvd_init(struct sti_layer *layer)
776{
777 struct sti_hqvdp *hqvdp = to_sti_hqvdp(layer);
778 int size;
779
780 /* find the plane macthing with vid 0 */
781 hqvdp->vid_plane = sti_hqvdp_find_vid(hqvdp->drm_dev, STI_VID_0);
782 if (!hqvdp->vid_plane) {
783 DRM_ERROR("Cannot find Main video layer\n");
784 return;
785 }
786
787 hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
788
789 /* Allocate memory for the VDP commands */
790 size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
791 hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
792 &hqvdp->hqvdp_cmd_paddr,
793 GFP_KERNEL | GFP_DMA);
794 if (!hqvdp->hqvdp_cmd) {
795 DRM_ERROR("Failed to allocate memory for VDP cmd\n");
796 return;
797 }
798
799 memset(hqvdp->hqvdp_cmd, 0, size);
800}
801
802static const struct sti_layer_funcs hqvdp_ops = {
803 .get_formats = sti_hqvdp_get_formats,
804 .get_nb_formats = sti_hqvdp_get_nb_formats,
805 .init = sti_hqvd_init,
806 .prepare = sti_hqvdp_prepare_layer,
807 .commit = sti_hqvdp_commit_layer,
808 .disable = sti_hqvdp_disable_layer,
809};
810
811struct sti_layer *sti_hqvdp_create(struct device *dev)
812{
813 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
814
815 hqvdp->layer.ops = &hqvdp_ops;
816
817 return &hqvdp->layer;
818}
819EXPORT_SYMBOL(sti_hqvdp_create);
820
821static void sti_hqvdp_init_plugs(struct sti_hqvdp *hqvdp)
822{
823 /* Configure Plugs (same for RD & WR) */
824 writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_RD_PLUG_PAGE_SIZE);
825 writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_RD_PLUG_MIN_OPC);
826 writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_RD_PLUG_MAX_OPC);
827 writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_RD_PLUG_MAX_CHK);
828 writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_RD_PLUG_MAX_MSG);
829 writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_RD_PLUG_MIN_SPACE);
830 writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_RD_PLUG_CONTROL);
831
832 writel(PLUG_PAGE_SIZE_256, hqvdp->regs + HQVDP_WR_PLUG_PAGE_SIZE);
833 writel(PLUG_MIN_OPC_8, hqvdp->regs + HQVDP_WR_PLUG_MIN_OPC);
834 writel(PLUG_MAX_OPC_64, hqvdp->regs + HQVDP_WR_PLUG_MAX_OPC);
835 writel(PLUG_MAX_CHK_2X, hqvdp->regs + HQVDP_WR_PLUG_MAX_CHK);
836 writel(PLUG_MAX_MSG_1X, hqvdp->regs + HQVDP_WR_PLUG_MAX_MSG);
837 writel(PLUG_MIN_SPACE_1, hqvdp->regs + HQVDP_WR_PLUG_MIN_SPACE);
838 writel(PLUG_CONTROL_ENABLE, hqvdp->regs + HQVDP_WR_PLUG_CONTROL);
839}
840
841/**
842 * sti_hqvdp_start_xp70
843 * @firmware: firmware found
844 * @ctxt: hqvdp structure
845 *
846 * Run the xP70 initialization sequence
847 */
848static void sti_hqvdp_start_xp70(const struct firmware *firmware, void *ctxt)
849{
850 struct sti_hqvdp *hqvdp = ctxt;
851 u32 *fw_rd_plug, *fw_wr_plug, *fw_pmem, *fw_dmem;
852 u8 *data;
853 int i;
854 struct fw_header {
855 int rd_size;
856 int wr_size;
857 int pmem_size;
858 int dmem_size;
859 } *header;
860
861 DRM_DEBUG_DRIVER("\n");
862 /* Check firmware parts */
863 if (!firmware) {
864 DRM_ERROR("Firmware not available\n");
865 return;
866 }
867
868 header = (struct fw_header *) firmware->data;
869 if (firmware->size < sizeof(*header)) {
870 DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
871 goto out;
872 }
873 if ((sizeof(*header) + header->rd_size + header->wr_size +
874 header->pmem_size + header->dmem_size) != firmware->size) {
875 DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
876 sizeof(*header), header->rd_size, header->wr_size,
877 header->pmem_size, header->dmem_size,
878 firmware->size);
879 goto out;
880 }
881
882 data = (u8 *) firmware->data;
883 data += sizeof(*header);
884 fw_rd_plug = (void *) data;
885 data += header->rd_size;
886 fw_wr_plug = (void *) data;
887 data += header->wr_size;
888 fw_pmem = (void *) data;
889 data += header->pmem_size;
890 fw_dmem = (void *) data;
891
892 /* Enable clock */
893 if (clk_prepare_enable(hqvdp->clk))
894 DRM_ERROR("Failed to prepare/enable HQVDP clk\n");
895
896 /* Reset */
897 writel(SW_RESET_CTRL_FULL, hqvdp->regs + HQVDP_MBX_SW_RESET_CTRL);
898
899 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
900 if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
901 & STARTUP_CTRL1_RST_DONE)
902 break;
903 msleep(POLL_DELAY_MS);
904 }
905 if (i == POLL_MAX_ATTEMPT) {
906 DRM_ERROR("Could not reset\n");
907 goto out;
908 }
909
910 /* Init Read & Write plugs */
911 for (i = 0; i < header->rd_size / 4; i++)
912 writel(fw_rd_plug[i], hqvdp->regs + HQVDP_RD_PLUG + i * 4);
913 for (i = 0; i < header->wr_size / 4; i++)
914 writel(fw_wr_plug[i], hqvdp->regs + HQVDP_WR_PLUG + i * 4);
915
916 sti_hqvdp_init_plugs(hqvdp);
917
918 /* Authorize Idle Mode */
919 writel(STARTUP_CTRL1_AUTH_IDLE, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1);
920
921 /* Prevent VTG interruption during the boot */
922 writel(SOFT_VSYNC_SW_CTRL_IRQ, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
923 writel(0, hqvdp->regs + HQVDP_MBX_NEXT_CMD);
924
925 /* Download PMEM & DMEM */
926 for (i = 0; i < header->pmem_size / 4; i++)
927 writel(fw_pmem[i], hqvdp->regs + HQVDP_PMEM + i * 4);
928 for (i = 0; i < header->dmem_size / 4; i++)
929 writel(fw_dmem[i], hqvdp->regs + HQVDP_DMEM + i * 4);
930
931 /* Enable fetch */
932 writel(STARTUP_CTRL2_FETCH_EN, hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2);
933
934 /* Wait end of boot */
935 for (i = 0; i < POLL_MAX_ATTEMPT; i++) {
936 if (readl(hqvdp->regs + HQVDP_MBX_INFO_XP70)
937 & INFO_XP70_FW_READY)
938 break;
939 msleep(POLL_DELAY_MS);
940 }
941 if (i == POLL_MAX_ATTEMPT) {
942 DRM_ERROR("Could not boot\n");
943 goto out;
944 }
945
946 /* Launch Vsync */
947 writel(SOFT_VSYNC_HW, hqvdp->regs + HQVDP_MBX_SOFT_VSYNC);
948
949 DRM_INFO("HQVDP XP70 started\n");
950out:
951 release_firmware(firmware);
952}
953
954int sti_hqvdp_bind(struct device *dev, struct device *master, void *data)
955{
956 struct sti_hqvdp *hqvdp = dev_get_drvdata(dev);
957 struct drm_device *drm_dev = data;
958 struct sti_layer *layer;
959 int err;
960
961 DRM_DEBUG_DRIVER("\n");
962
963 hqvdp->drm_dev = drm_dev;
964
965 /* Request for firmware */
966 err = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
967 HQVDP_FMW_NAME, hqvdp->dev,
968 GFP_KERNEL, hqvdp, sti_hqvdp_start_xp70);
969 if (err) {
970 DRM_ERROR("Can't get HQVDP firmware\n");
971 return err;
972 }
973
974 layer = sti_layer_create(hqvdp->dev, STI_HQVDP_0, hqvdp->regs);
975 if (!layer) {
976 DRM_ERROR("Can't create HQVDP plane\n");
977 return -ENOMEM;
978 }
979
980 sti_drm_plane_init(drm_dev, layer, 1, DRM_PLANE_TYPE_OVERLAY);
981
982 return 0;
983}
984
985static void sti_hqvdp_unbind(struct device *dev,
986 struct device *master, void *data)
987{
988 /* do nothing */
989}
990
991static const struct component_ops sti_hqvdp_ops = {
992 .bind = sti_hqvdp_bind,
993 .unbind = sti_hqvdp_unbind,
994};
995
996static int sti_hqvdp_probe(struct platform_device *pdev)
997{
998 struct device *dev = &pdev->dev;
999 struct device_node *vtg_np;
1000 struct sti_hqvdp *hqvdp;
1001 struct resource *res;
1002
1003 DRM_DEBUG_DRIVER("\n");
1004
1005 hqvdp = devm_kzalloc(dev, sizeof(*hqvdp), GFP_KERNEL);
1006 if (!hqvdp) {
1007 DRM_ERROR("Failed to allocate HQVDP context\n");
1008 return -ENOMEM;
1009 }
1010
1011 hqvdp->dev = dev;
1012
1013 /* Get Memory resources */
1014 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1015 if (res == NULL) {
1016 DRM_ERROR("Get memory resource failed\n");
1017 return -ENXIO;
1018 }
1019 hqvdp->regs = devm_ioremap(dev, res->start, resource_size(res));
1020 if (hqvdp->regs == NULL) {
1021 DRM_ERROR("Register mapping failed\n");
1022 return -ENXIO;
1023 }
1024
1025 /* Get clock resources */
1026 hqvdp->clk = devm_clk_get(dev, "hqvdp");
1027 hqvdp->clk_pix_main = devm_clk_get(dev, "pix_main");
1028 if (IS_ERR(hqvdp->clk) || IS_ERR(hqvdp->clk)) {
1029 DRM_ERROR("Cannot get clocks\n");
1030 return -ENXIO;
1031 }
1032
1033 /* Get reset resources */
1034 hqvdp->reset = devm_reset_control_get(dev, "hqvdp");
1035 if (!IS_ERR(hqvdp->reset))
1036 reset_control_deassert(hqvdp->reset);
1037
1038 vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0);
1039 if (vtg_np)
1040 hqvdp->vtg = of_vtg_find(vtg_np);
1041
1042 platform_set_drvdata(pdev, hqvdp);
1043
1044 return component_add(&pdev->dev, &sti_hqvdp_ops);
1045}
1046
1047static int sti_hqvdp_remove(struct platform_device *pdev)
1048{
1049 component_del(&pdev->dev, &sti_hqvdp_ops);
1050 return 0;
1051}
1052
1053static struct of_device_id hqvdp_of_match[] = {
1054 { .compatible = "st,stih407-hqvdp", },
1055 { /* end node */ }
1056};
1057MODULE_DEVICE_TABLE(of, hqvdp_of_match);
1058
1059struct platform_driver sti_hqvdp_driver = {
1060 .driver = {
1061 .name = "sti-hqvdp",
1062 .owner = THIS_MODULE,
1063 .of_match_table = hqvdp_of_match,
1064 },
1065 .probe = sti_hqvdp_probe,
1066 .remove = sti_hqvdp_remove,
1067};
1068
1069module_platform_driver(sti_hqvdp_driver);
1070
1071MODULE_AUTHOR("Benjamin Gaignard <benjamin.gaignard@st.com>");
1072MODULE_DESCRIPTION("STMicroelectronics SoC DRM driver");
1073MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.h b/drivers/gpu/drm/sti/sti_hqvdp.h
new file mode 100644
index 000000000000..cd5ecd0a6dea
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HQVDP_H_
8#define _STI_HQVDP_H_
9
10struct sti_layer *sti_hqvdp_create(struct device *dev);
11
12#endif
diff --git a/drivers/gpu/drm/sti/sti_hqvdp_lut.h b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
new file mode 100644
index 000000000000..619af7f4384e
--- /dev/null
+++ b/drivers/gpu/drm/sti/sti_hqvdp_lut.h
@@ -0,0 +1,373 @@
1/*
2 * Copyright (C) STMicroelectronics SA 2014
3 * Authors: Fabien Dessenne <fabien.dessenne@st.com> for STMicroelectronics.
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#ifndef _STI_HQVDP_LUT_H_
8#define _STI_HQVDP_LUT_H_
9
10#define NB_COEF 128
11
12#define SHIFT_LUT_A_LEGACY 8
13#define SHIFT_LUT_B 8
14#define SHIFT_LUT_C_Y_LEGACY 8
15#define SHIFT_LUT_C_C_LEGACY 8
16#define SHIFT_LUT_D_Y_LEGACY 8
17#define SHIFT_LUT_D_C_LEGACY 8
18#define SHIFT_LUT_E_Y_LEGACY 8
19#define SHIFT_LUT_E_C_LEGACY 8
20#define SHIFT_LUT_F_Y_LEGACY 8
21#define SHIFT_LUT_F_C_LEGACY 8
22
23static const u32 coef_lut_a_legacy[NB_COEF] = {
24 0x0000ffff, 0x00010000, 0x000100ff, 0x00000000,
25 0x00000000, 0x00050000, 0xfffc00ff, 0x00000000,
26 0x00000000, 0x00090000, 0xfff900fe, 0x00000000,
27 0x00000000, 0x0010ffff, 0xfff600fb, 0x00000000,
28 0x00000000, 0x0017fffe, 0xfff400f7, 0x00000000,
29 0x00000000, 0x001ffffd, 0xfff200f2, 0x00000000,
30 0x00000000, 0x0027fffc, 0xfff100ec, 0x00000000,
31 0x00000000, 0x0030fffb, 0xfff000e5, 0x00000000,
32 0x00000000, 0x003afffa, 0xffee00de, 0x00000000,
33 0x00000000, 0x0044fff9, 0xffed00d6, 0x00000000,
34 0x00000000, 0x004efff8, 0xffed00cd, 0x00000000,
35 0x00000000, 0x0059fff6, 0xffed00c4, 0x00000000,
36 0x00000000, 0x0064fff5, 0xffed00ba, 0x00000000,
37 0x00000000, 0x006ffff3, 0xffee00b0, 0x00000000,
38 0x00000000, 0x007afff2, 0xffee00a6, 0x00000000,
39 0x00000000, 0x0085fff1, 0xffef009b, 0x00000000,
40 0x00000000, 0x0090fff0, 0xfff00090, 0x00000000,
41 0x00000000, 0x009bffef, 0xfff10085, 0x00000000,
42 0x00000000, 0x00a6ffee, 0xfff2007a, 0x00000000,
43 0x00000000, 0x00b0ffee, 0xfff3006f, 0x00000000,
44 0x00000000, 0x00baffed, 0xfff50064, 0x00000000,
45 0x00000000, 0x00c4ffed, 0xfff60059, 0x00000000,
46 0x00000000, 0x00cdffed, 0xfff8004e, 0x00000000,
47 0x00000000, 0x00d6ffed, 0xfff90044, 0x00000000,
48 0x00000000, 0x00deffee, 0xfffa003a, 0x00000000,
49 0x00000000, 0x00e5fff0, 0xfffb0030, 0x00000000,
50 0x00000000, 0x00ecfff1, 0xfffc0027, 0x00000000,
51 0x00000000, 0x00f2fff2, 0xfffd001f, 0x00000000,
52 0x00000000, 0x00f7fff4, 0xfffe0017, 0x00000000,
53 0x00000000, 0x00fbfff6, 0xffff0010, 0x00000000,
54 0x00000000, 0x00fefff9, 0x00000009, 0x00000000,
55 0x00000000, 0x00fffffc, 0x00000005, 0x00000000
56};
57
58static const u32 coef_lut_b[NB_COEF] = {
59 0x00000000, 0x00000000, 0x00000100, 0x00000000,
60 0x00000000, 0x00000000, 0x00000100, 0x00000000,
61 0x00000000, 0x00000000, 0x00000100, 0x00000000,
62 0x00000000, 0x00000000, 0x00000100, 0x00000000,
63 0x00000000, 0x00000000, 0x00000100, 0x00000000,
64 0x00000000, 0x00000000, 0x00000100, 0x00000000,
65 0x00000000, 0x00000000, 0x00000100, 0x00000000,
66 0x00000000, 0x00000000, 0x00000100, 0x00000000,
67 0x00000000, 0x00000000, 0x00000100, 0x00000000,
68 0x00000000, 0x00000000, 0x00000100, 0x00000000,
69 0x00000000, 0x00000000, 0x00000100, 0x00000000,
70 0x00000000, 0x00000000, 0x00000100, 0x00000000,
71 0x00000000, 0x00000000, 0x00000100, 0x00000000,
72 0x00000000, 0x00000000, 0x00000100, 0x00000000,
73 0x00000000, 0x00000000, 0x00000100, 0x00000000,
74 0x00000000, 0x00000000, 0x00000100, 0x00000000,
75 0x00000000, 0x00000000, 0x00000100, 0x00000000,
76 0x00000000, 0x00000000, 0x00000100, 0x00000000,
77 0x00000000, 0x00000000, 0x00000100, 0x00000000,
78 0x00000000, 0x00000000, 0x00000100, 0x00000000,
79 0x00000000, 0x00000000, 0x00000100, 0x00000000,
80 0x00000000, 0x00000000, 0x00000100, 0x00000000,
81 0x00000000, 0x00000000, 0x00000100, 0x00000000,
82 0x00000000, 0x00000000, 0x00000100, 0x00000000,
83 0x00000000, 0x00000000, 0x00000100, 0x00000000,
84 0x00000000, 0x00000000, 0x00000100, 0x00000000,
85 0x00000000, 0x00000000, 0x00000100, 0x00000000,
86 0x00000000, 0x00000000, 0x00000100, 0x00000000,
87 0x00000000, 0x00000000, 0x00000100, 0x00000000,
88 0x00000000, 0x00000000, 0x00000100, 0x00000000,
89 0x00000000, 0x00000000, 0x00000100, 0x00000000,
90 0x00000000, 0x00000000, 0x00000100, 0x00000000
91};
92
93static const u32 coef_lut_c_y_legacy[NB_COEF] = {
94 0x00060004, 0x0038ffe1, 0x003800be, 0x0006ffe1,
95 0x00050005, 0x0042ffe1, 0x003800b3, 0x0007ffe1,
96 0x00040006, 0x0046ffe1, 0x003300b2, 0x0008ffe2,
97 0x00030007, 0x004cffe1, 0x002e00b1, 0x0008ffe2,
98 0x00020006, 0x0051ffe2, 0x002900b0, 0x0009ffe3,
99 0x00010008, 0x0056ffe2, 0x002400ae, 0x0009ffe4,
100 0xffff0008, 0x005cffe3, 0x001f00ad, 0x000affe4,
101 0xfffe0008, 0x0062ffe4, 0x001a00ab, 0x000affe5,
102 0xfffd000a, 0x0066ffe5, 0x001500a8, 0x000bffe6,
103 0xfffc0009, 0x006bffe7, 0x001100a5, 0x000bffe8,
104 0xfffa000a, 0x0070ffe8, 0x000d00a3, 0x000bffe9,
105 0xfff9000b, 0x0076ffea, 0x0008009f, 0x000bffea,
106 0xfff7000b, 0x007affec, 0x0005009b, 0x000cffec,
107 0xfff6000b, 0x007effef, 0x00010098, 0x000cffed,
108 0xfff4000b, 0x0084fff1, 0xfffd0095, 0x000cffee,
109 0xfff3000b, 0x0088fff4, 0xfffa0090, 0x000cfff0,
110 0xfff1000b, 0x008dfff7, 0xfff7008d, 0x000bfff1,
111 0xfff0000c, 0x0090fffa, 0xfff40088, 0x000bfff3,
112 0xffee000c, 0x0095fffd, 0xfff10084, 0x000bfff4,
113 0xffed000c, 0x00980001, 0xffef007e, 0x000bfff6,
114 0xffec000c, 0x009b0005, 0xffec007a, 0x000bfff7,
115 0xffea000b, 0x009f0008, 0xffea0076, 0x000bfff9,
116 0xffe9000b, 0x00a3000d, 0xffe80070, 0x000afffa,
117 0xffe8000b, 0x00a50011, 0xffe7006b, 0x0009fffc,
118 0xffe6000b, 0x00a80015, 0xffe50066, 0x000afffd,
119 0xffe5000a, 0x00ab001a, 0xffe40062, 0x0008fffe,
120 0xffe4000a, 0x00ad001f, 0xffe3005c, 0x0008ffff,
121 0xffe40009, 0x00ae0024, 0xffe20056, 0x00080001,
122 0xffe30009, 0x00b00029, 0xffe20051, 0x00060002,
123 0xffe20008, 0x00b1002e, 0xffe1004c, 0x00070003,
124 0xffe20008, 0x00b20033, 0xffe10046, 0x00060004,
125 0xffe10007, 0x00b30038, 0xffe10042, 0x00050005
126};
127
128static const u32 coef_lut_c_c_legacy[NB_COEF] = {
129 0x0001fff3, 0x003afffb, 0x003a00a1, 0x0001fffb,
130 0x0001fff5, 0x0041fffb, 0x0038009a, 0x0001fffb,
131 0x0001fff5, 0x0046fffb, 0x00340099, 0x0001fffb,
132 0x0001fff7, 0x0049fffb, 0x00300098, 0x0001fffb,
133 0x0001fff9, 0x004cfffb, 0x002d0096, 0x0001fffb,
134 0x0001fffa, 0x004ffffc, 0x00290095, 0x0001fffb,
135 0x0001fff9, 0x0054fffd, 0x00250093, 0x0001fffc,
136 0x0001fffa, 0x0058fffd, 0x00220092, 0x0000fffc,
137 0x0001fffb, 0x005bfffe, 0x001f0090, 0x0000fffc,
138 0x0001fffd, 0x005effff, 0x001c008c, 0x0000fffd,
139 0x0001fffd, 0x00620000, 0x0019008a, 0x0000fffd,
140 0x0001fffe, 0x00660001, 0x00160088, 0xfffffffd,
141 0x0000fffe, 0x006a0003, 0x00130085, 0xfffffffe,
142 0x0000fffe, 0x006e0004, 0x00100083, 0xfffffffe,
143 0x0000fffe, 0x00710006, 0x000e007f, 0xffffffff,
144 0x0000fffe, 0x00750008, 0x000c007c, 0xfffeffff,
145 0xfffffffe, 0x0079000a, 0x000a0079, 0xfffeffff,
146 0xfffffffe, 0x007c000c, 0x00080075, 0xfffe0000,
147 0xffffffff, 0x007f000e, 0x00060071, 0xfffe0000,
148 0xfffeffff, 0x00830010, 0x0004006e, 0xfffe0000,
149 0xfffeffff, 0x00850013, 0x0003006a, 0xfffe0000,
150 0xfffdffff, 0x00880016, 0x00010066, 0xfffe0001,
151 0xfffd0000, 0x008a0019, 0x00000062, 0xfffd0001,
152 0xfffd0000, 0x008c001c, 0xffff005e, 0xfffd0001,
153 0xfffc0000, 0x0090001f, 0xfffe005b, 0xfffb0001,
154 0xfffc0000, 0x00920022, 0xfffd0058, 0xfffa0001,
155 0xfffc0001, 0x00930025, 0xfffd0054, 0xfff90001,
156 0xfffb0001, 0x00950029, 0xfffc004f, 0xfffa0001,
157 0xfffb0001, 0x0096002d, 0xfffb004c, 0xfff90001,
158 0xfffb0001, 0x00980030, 0xfffb0049, 0xfff70001,
159 0xfffb0001, 0x00990034, 0xfffb0046, 0xfff50001,
160 0xfffb0001, 0x009a0038, 0xfffb0041, 0xfff50001
161};
162
163static const u32 coef_lut_d_y_legacy[NB_COEF] = {
164 0xfff80009, 0x0046ffec, 0x004600a3, 0xfff8ffec,
165 0xfff70009, 0x004effed, 0x0044009d, 0xfff9ffeb,
166 0xfff6000a, 0x0052ffee, 0x003f009d, 0xfffaffea,
167 0xfff50009, 0x0057ffef, 0x003b009d, 0xfffbffe9,
168 0xfff50008, 0x005bfff0, 0x0037009c, 0xfffcffe9,
169 0xfff40008, 0x005ffff2, 0x0033009b, 0xfffcffe9,
170 0xfff30007, 0x0064fff3, 0x002f009b, 0xfffdffe8,
171 0xfff20007, 0x0068fff5, 0x002b0099, 0xfffeffe8,
172 0xfff10008, 0x006bfff7, 0x00270097, 0xffffffe8,
173 0xfff00007, 0x006ffff9, 0x00230097, 0xffffffe8,
174 0xffef0006, 0x0073fffb, 0x00200095, 0x0000ffe8,
175 0xffee0005, 0x0077fffe, 0x001c0093, 0x0000ffe9,
176 0xffee0005, 0x007a0000, 0x00180091, 0x0001ffe9,
177 0xffed0005, 0x007d0003, 0x0015008e, 0x0002ffe9,
178 0xffec0005, 0x00800006, 0x0012008b, 0x0002ffea,
179 0xffeb0004, 0x00840008, 0x000e008a, 0x0003ffea,
180 0xffeb0003, 0x0087000b, 0x000b0087, 0x0003ffeb,
181 0xffea0003, 0x008a000e, 0x00080084, 0x0004ffeb,
182 0xffea0002, 0x008b0012, 0x00060080, 0x0005ffec,
183 0xffe90002, 0x008e0015, 0x0003007d, 0x0005ffed,
184 0xffe90001, 0x00910018, 0x0000007a, 0x0005ffee,
185 0xffe90000, 0x0093001c, 0xfffe0077, 0x0005ffee,
186 0xffe80000, 0x00950020, 0xfffb0073, 0x0006ffef,
187 0xffe8ffff, 0x00970023, 0xfff9006f, 0x0007fff0,
188 0xffe8ffff, 0x00970027, 0xfff7006b, 0x0008fff1,
189 0xffe8fffe, 0x0099002b, 0xfff50068, 0x0007fff2,
190 0xffe8fffd, 0x009b002f, 0xfff30064, 0x0007fff3,
191 0xffe9fffc, 0x009b0033, 0xfff2005f, 0x0008fff4,
192 0xffe9fffc, 0x009c0037, 0xfff0005b, 0x0008fff5,
193 0xffe9fffb, 0x009d003b, 0xffef0057, 0x0009fff5,
194 0xffeafffa, 0x009d003f, 0xffee0052, 0x000afff6,
195 0xffebfff9, 0x009d0044, 0xffed004e, 0x0009fff7
196};
197
198static const u32 coef_lut_d_c_legacy[NB_COEF] = {
199 0xfffeffff, 0x003fffff, 0x003f0089, 0xfffeffff,
200 0xfffe0000, 0x00460000, 0x0042007d, 0xfffffffe,
201 0xfffe0000, 0x00490001, 0x003f007d, 0xfffffffd,
202 0xfffd0001, 0x004b0002, 0x003c007d, 0x0000fffc,
203 0xfffd0001, 0x004e0003, 0x0039007c, 0x0000fffc,
204 0xfffc0001, 0x00510005, 0x0036007c, 0x0000fffb,
205 0xfffc0001, 0x00540006, 0x0033007b, 0x0001fffa,
206 0xfffc0003, 0x00550008, 0x00310078, 0x0001fffa,
207 0xfffb0003, 0x00580009, 0x002e0078, 0x0001fffa,
208 0xfffb0002, 0x005b000b, 0x002b0077, 0x0002fff9,
209 0xfffa0003, 0x005e000d, 0x00280075, 0x0002fff9,
210 0xfffa0002, 0x0060000f, 0x00260074, 0x0002fff9,
211 0xfffa0004, 0x00610011, 0x00230072, 0x0002fff9,
212 0xfffa0004, 0x00640013, 0x00200070, 0x0002fff9,
213 0xfff90004, 0x00660015, 0x001e006e, 0x0003fff9,
214 0xfff90004, 0x00680017, 0x001c006c, 0x0003fff9,
215 0xfff90003, 0x006b0019, 0x0019006b, 0x0003fff9,
216 0xfff90003, 0x006c001c, 0x00170068, 0x0004fff9,
217 0xfff90003, 0x006e001e, 0x00150066, 0x0004fff9,
218 0xfff90002, 0x00700020, 0x00130064, 0x0004fffa,
219 0xfff90002, 0x00720023, 0x00110061, 0x0004fffa,
220 0xfff90002, 0x00740026, 0x000f0060, 0x0002fffa,
221 0xfff90002, 0x00750028, 0x000d005e, 0x0003fffa,
222 0xfff90002, 0x0077002b, 0x000b005b, 0x0002fffb,
223 0xfffa0001, 0x0078002e, 0x00090058, 0x0003fffb,
224 0xfffa0001, 0x00780031, 0x00080055, 0x0003fffc,
225 0xfffa0001, 0x007b0033, 0x00060054, 0x0001fffc,
226 0xfffb0000, 0x007c0036, 0x00050051, 0x0001fffc,
227 0xfffc0000, 0x007c0039, 0x0003004e, 0x0001fffd,
228 0xfffc0000, 0x007d003c, 0x0002004b, 0x0001fffd,
229 0xfffdffff, 0x007d003f, 0x00010049, 0x0000fffe,
230 0xfffeffff, 0x007d0042, 0x00000046, 0x0000fffe
231};
232
233static const u32 coef_lut_e_y_legacy[NB_COEF] = {
234 0xfff10001, 0x00490004, 0x00490083, 0xfff10004,
235 0xfff10000, 0x00500006, 0x004b007b, 0xfff10002,
236 0xfff10000, 0x00530007, 0x0048007b, 0xfff10001,
237 0xfff10000, 0x00550009, 0x0046007a, 0xfff10000,
238 0xfff1fffe, 0x0058000b, 0x0043007b, 0xfff2fffe,
239 0xfff1ffff, 0x005a000d, 0x0040007a, 0xfff2fffd,
240 0xfff1fffd, 0x005d000f, 0x003e007a, 0xfff2fffc,
241 0xfff1fffd, 0x005f0011, 0x003b0079, 0xfff3fffb,
242 0xfff1fffc, 0x00610013, 0x00390079, 0xfff3fffa,
243 0xfff1fffb, 0x00640015, 0x00360079, 0xfff3fff9,
244 0xfff1fffa, 0x00660017, 0x00340078, 0xfff4fff8,
245 0xfff1fffb, 0x00680019, 0x00310077, 0xfff4fff7,
246 0xfff2fff9, 0x006a001b, 0x002f0076, 0xfff5fff6,
247 0xfff2fff9, 0x006c001e, 0x002c0075, 0xfff5fff5,
248 0xfff2fff9, 0x006d0020, 0x002a0073, 0xfff6fff5,
249 0xfff3fff7, 0x00700022, 0x00270073, 0xfff6fff4,
250 0xfff3fff7, 0x00710025, 0x00250071, 0xfff7fff3,
251 0xfff4fff6, 0x00730027, 0x00220070, 0xfff7fff3,
252 0xfff5fff6, 0x0073002a, 0x0020006d, 0xfff9fff2,
253 0xfff5fff5, 0x0075002c, 0x001e006c, 0xfff9fff2,
254 0xfff6fff5, 0x0076002f, 0x001b006a, 0xfff9fff2,
255 0xfff7fff4, 0x00770031, 0x00190068, 0xfffbfff1,
256 0xfff8fff4, 0x00780034, 0x00170066, 0xfffafff1,
257 0xfff9fff3, 0x00790036, 0x00150064, 0xfffbfff1,
258 0xfffafff3, 0x00790039, 0x00130061, 0xfffcfff1,
259 0xfffbfff3, 0x0079003b, 0x0011005f, 0xfffdfff1,
260 0xfffcfff2, 0x007a003e, 0x000f005d, 0xfffdfff1,
261 0xfffdfff2, 0x007a0040, 0x000d005a, 0xfffffff1,
262 0xfffefff2, 0x007b0043, 0x000b0058, 0xfffefff1,
263 0x0000fff1, 0x007a0046, 0x00090055, 0x0000fff1,
264 0x0001fff1, 0x007b0048, 0x00070053, 0x0000fff1,
265 0x0002fff1, 0x007b004b, 0x00060050, 0x0000fff1
266};
267
268static const u32 coef_lut_e_c_legacy[NB_COEF] = {
269 0xfffa0001, 0x003f0010, 0x003f006d, 0xfffa0010,
270 0xfffb0002, 0x00440011, 0x00440062, 0xfffa000e,
271 0xfffb0001, 0x00460013, 0x00420062, 0xfffa000d,
272 0xfffb0000, 0x00480014, 0x00410062, 0xfffa000c,
273 0xfffb0001, 0x00490015, 0x003f0061, 0xfffb000b,
274 0xfffb0000, 0x004b0017, 0x003d0061, 0xfffb000a,
275 0xfffb0000, 0x004d0018, 0x003b0062, 0xfffb0008,
276 0xfffcffff, 0x004f001a, 0x00390061, 0xfffb0007,
277 0xfffc0000, 0x004f001c, 0x00380060, 0xfffb0006,
278 0xfffcffff, 0x0052001d, 0x00360060, 0xfffb0005,
279 0xfffdfffe, 0x0053001f, 0x00340060, 0xfffb0004,
280 0xfffdfffe, 0x00540021, 0x0032005e, 0xfffc0004,
281 0xfffeffff, 0x00550022, 0x0030005d, 0xfffc0003,
282 0xfffeffff, 0x00560024, 0x002f005c, 0xfffc0002,
283 0xfffffffd, 0x00580026, 0x002d005c, 0xfffc0001,
284 0xfffffffd, 0x005a0027, 0x002b005c, 0xfffc0000,
285 0x0000fffd, 0x005a0029, 0x0029005a, 0xfffd0000,
286 0x0000fffc, 0x005c002b, 0x0027005a, 0xfffdffff,
287 0x0001fffc, 0x005c002d, 0x00260058, 0xfffdffff,
288 0x0002fffc, 0x005c002f, 0x00240056, 0xfffffffe,
289 0x0003fffc, 0x005d0030, 0x00220055, 0xfffffffe,
290 0x0004fffc, 0x005e0032, 0x00210054, 0xfffefffd,
291 0x0004fffb, 0x00600034, 0x001f0053, 0xfffefffd,
292 0x0005fffb, 0x00600036, 0x001d0052, 0xfffffffc,
293 0x0006fffb, 0x00600038, 0x001c004f, 0x0000fffc,
294 0x0007fffb, 0x00610039, 0x001a004f, 0xfffffffc,
295 0x0008fffb, 0x0062003b, 0x0018004d, 0x0000fffb,
296 0x000afffb, 0x0061003d, 0x0017004b, 0x0000fffb,
297 0x000bfffb, 0x0061003f, 0x00150049, 0x0001fffb,
298 0x000cfffa, 0x00620041, 0x00140048, 0x0000fffb,
299 0x000dfffa, 0x00620042, 0x00130046, 0x0001fffb,
300 0x000efffa, 0x00620044, 0x00110044, 0x0002fffb
301};
302
303static const u32 coef_lut_f_y_legacy[NB_COEF] = {
304 0xfff6fff0, 0x00490012, 0x0049006e, 0xfff60012,
305 0xfff7fff1, 0x004e0013, 0x00490068, 0xfff60010,
306 0xfff7fff2, 0x004f0015, 0x00470067, 0xfff6000f,
307 0xfff7fff5, 0x004f0017, 0x00450065, 0xfff6000e,
308 0xfff8fff5, 0x00500018, 0x00440065, 0xfff6000c,
309 0xfff8fff6, 0x0051001a, 0x00420064, 0xfff6000b,
310 0xfff8fff6, 0x0052001c, 0x00400064, 0xfff6000a,
311 0xfff9fff6, 0x0054001d, 0x003e0064, 0xfff60008,
312 0xfff9fff8, 0x0054001f, 0x003c0063, 0xfff60007,
313 0xfffafff8, 0x00550021, 0x003a0062, 0xfff60006,
314 0xfffbfff7, 0x00560022, 0x00390062, 0xfff60005,
315 0xfffbfff8, 0x00570024, 0x00370061, 0xfff60004,
316 0xfffcfff8, 0x00580026, 0x00350060, 0xfff60003,
317 0xfffdfff8, 0x00590028, 0x0033005f, 0xfff60002,
318 0xfffdfff7, 0x005b002a, 0x0031005f, 0xfff60001,
319 0xfffefff7, 0x005c002c, 0x002f005e, 0xfff60000,
320 0xfffffff6, 0x005e002d, 0x002d005e, 0xfff6ffff,
321 0x0000fff6, 0x005e002f, 0x002c005c, 0xfff7fffe,
322 0x0001fff6, 0x005f0031, 0x002a005b, 0xfff7fffd,
323 0x0002fff6, 0x005f0033, 0x00280059, 0xfff8fffd,
324 0x0003fff6, 0x00600035, 0x00260058, 0xfff8fffc,
325 0x0004fff6, 0x00610037, 0x00240057, 0xfff8fffb,
326 0x0005fff6, 0x00620039, 0x00220056, 0xfff7fffb,
327 0x0006fff6, 0x0062003a, 0x00210055, 0xfff8fffa,
328 0x0007fff6, 0x0063003c, 0x001f0054, 0xfff8fff9,
329 0x0008fff6, 0x0064003e, 0x001d0054, 0xfff6fff9,
330 0x000afff6, 0x00640040, 0x001c0052, 0xfff6fff8,
331 0x000bfff6, 0x00640042, 0x001a0051, 0xfff6fff8,
332 0x000cfff6, 0x00650044, 0x00180050, 0xfff5fff8,
333 0x000efff6, 0x00650045, 0x0017004f, 0xfff5fff7,
334 0x000ffff6, 0x00670047, 0x0015004f, 0xfff2fff7,
335 0x0010fff6, 0x00680049, 0x0013004e, 0xfff1fff7
336};
337
338static const u32 coef_lut_f_c_legacy[NB_COEF] = {
339 0x0000fffb, 0x003a001a, 0x003a005d, 0x0000001a,
340 0x0001fffb, 0x003f001b, 0x00400051, 0x00000019,
341 0x0001fffc, 0x0040001c, 0x003f0051, 0x00000017,
342 0x0002fffb, 0x0042001d, 0x003e0051, 0xffff0016,
343 0x0002fffb, 0x0043001e, 0x003d0051, 0xffff0015,
344 0x0003fffc, 0x00430020, 0x003b0050, 0xffff0014,
345 0x0003fffb, 0x00450021, 0x003a0051, 0xfffe0013,
346 0x0004fffc, 0x00450022, 0x00390050, 0xfffe0012,
347 0x0005fffc, 0x00460023, 0x0038004f, 0xfffe0011,
348 0x0005fffb, 0x00480025, 0x00360050, 0xfffd0010,
349 0x0006fffc, 0x00480026, 0x0035004f, 0xfffd000f,
350 0x0006fffc, 0x00490027, 0x0034004f, 0xfffd000e,
351 0x0007fffd, 0x00490028, 0x0033004e, 0xfffd000d,
352 0x0008fffc, 0x004a002a, 0x0031004d, 0xfffd000d,
353 0x0009fffd, 0x004a002b, 0x0030004d, 0xfffc000c,
354 0x0009fffc, 0x004c002c, 0x002f004d, 0xfffc000b,
355 0x000afffc, 0x004c002e, 0x002e004c, 0xfffc000a,
356 0x000bfffc, 0x004d002f, 0x002c004c, 0xfffc0009,
357 0x000cfffc, 0x004d0030, 0x002b004a, 0xfffd0009,
358 0x000dfffd, 0x004d0031, 0x002a004a, 0xfffc0008,
359 0x000dfffd, 0x004e0033, 0x00280049, 0xfffd0007,
360 0x000efffd, 0x004f0034, 0x00270049, 0xfffc0006,
361 0x000ffffd, 0x004f0035, 0x00260048, 0xfffc0006,
362 0x0010fffd, 0x00500036, 0x00250048, 0xfffb0005,
363 0x0011fffe, 0x004f0038, 0x00230046, 0xfffc0005,
364 0x0012fffe, 0x00500039, 0x00220045, 0xfffc0004,
365 0x0013fffe, 0x0051003a, 0x00210045, 0xfffb0003,
366 0x0014ffff, 0x0050003b, 0x00200043, 0xfffc0003,
367 0x0015ffff, 0x0051003d, 0x001e0043, 0xfffb0002,
368 0x0016ffff, 0x0051003e, 0x001d0042, 0xfffb0002,
369 0x00170000, 0x0051003f, 0x001c0040, 0xfffc0001,
370 0x00190000, 0x00510040, 0x001b003f, 0xfffb0001
371};
372
373#endif
diff --git a/drivers/gpu/drm/sti/sti_layer.c b/drivers/gpu/drm/sti/sti_layer.c
index 06a587c4f1bb..899104f9d4bc 100644
--- a/drivers/gpu/drm/sti/sti_layer.c
+++ b/drivers/gpu/drm/sti/sti_layer.c
@@ -11,7 +11,9 @@
11#include <drm/drm_fb_cma_helper.h> 11#include <drm/drm_fb_cma_helper.h>
12 12
13#include "sti_compositor.h" 13#include "sti_compositor.h"
14#include "sti_cursor.h"
14#include "sti_gdp.h" 15#include "sti_gdp.h"
16#include "sti_hqvdp.h"
15#include "sti_layer.h" 17#include "sti_layer.h"
16#include "sti_vid.h" 18#include "sti_vid.h"
17 19
@@ -32,10 +34,13 @@ const char *sti_layer_to_str(struct sti_layer *layer)
32 return "VID1"; 34 return "VID1";
33 case STI_CURSOR: 35 case STI_CURSOR:
34 return "CURSOR"; 36 return "CURSOR";
37 case STI_HQVDP_0:
38 return "HQVDP0";
35 default: 39 default:
36 return "<UNKNOWN LAYER>"; 40 return "<UNKNOWN LAYER>";
37 } 41 }
38} 42}
43EXPORT_SYMBOL(sti_layer_to_str);
39 44
40struct sti_layer *sti_layer_create(struct device *dev, int desc, 45struct sti_layer *sti_layer_create(struct device *dev, int desc,
41 void __iomem *baseaddr) 46 void __iomem *baseaddr)
@@ -50,6 +55,12 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
50 case STI_VID: 55 case STI_VID:
51 layer = sti_vid_create(dev); 56 layer = sti_vid_create(dev);
52 break; 57 break;
58 case STI_CUR:
59 layer = sti_cursor_create(dev);
60 break;
61 case STI_VDP:
62 layer = sti_hqvdp_create(dev);
63 break;
53 } 64 }
54 65
55 if (!layer) { 66 if (!layer) {
@@ -67,8 +78,11 @@ struct sti_layer *sti_layer_create(struct device *dev, int desc,
67 78
68 return layer; 79 return layer;
69} 80}
81EXPORT_SYMBOL(sti_layer_create);
70 82
71int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, 83int sti_layer_prepare(struct sti_layer *layer,
84 struct drm_crtc *crtc,
85 struct drm_framebuffer *fb,
72 struct drm_display_mode *mode, int mixer_id, 86 struct drm_display_mode *mode, int mixer_id,
73 int dest_x, int dest_y, int dest_w, int dest_h, 87 int dest_x, int dest_y, int dest_w, int dest_h,
74 int src_x, int src_y, int src_w, int src_h) 88 int src_x, int src_y, int src_w, int src_h)
@@ -88,6 +102,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
88 return 1; 102 return 1;
89 } 103 }
90 104
105 layer->crtc = crtc;
91 layer->fb = fb; 106 layer->fb = fb;
92 layer->mode = mode; 107 layer->mode = mode;
93 layer->mixer_id = mixer_id; 108 layer->mixer_id = mixer_id;
@@ -100,6 +115,7 @@ int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb,
100 layer->src_w = src_w; 115 layer->src_w = src_w;
101 layer->src_h = src_h; 116 layer->src_h = src_h;
102 layer->format = fb->pixel_format; 117 layer->format = fb->pixel_format;
118 layer->vaddr = cma_obj->vaddr;
103 layer->paddr = cma_obj->paddr; 119 layer->paddr = cma_obj->paddr;
104 for (i = 0; i < 4; i++) { 120 for (i = 0; i < 4; i++) {
105 layer->pitches[i] = fb->pitches[i]; 121 layer->pitches[i] = fb->pitches[i];
diff --git a/drivers/gpu/drm/sti/sti_layer.h b/drivers/gpu/drm/sti/sti_layer.h
index 198c3774cc12..ceff497f557e 100644
--- a/drivers/gpu/drm/sti/sti_layer.h
+++ b/drivers/gpu/drm/sti/sti_layer.h
@@ -22,7 +22,8 @@ enum sti_layer_type {
22 STI_GDP = 1 << STI_LAYER_TYPE_SHIFT, 22 STI_GDP = 1 << STI_LAYER_TYPE_SHIFT,
23 STI_VID = 2 << STI_LAYER_TYPE_SHIFT, 23 STI_VID = 2 << STI_LAYER_TYPE_SHIFT,
24 STI_CUR = 3 << STI_LAYER_TYPE_SHIFT, 24 STI_CUR = 3 << STI_LAYER_TYPE_SHIFT,
25 STI_BCK = 4 << STI_LAYER_TYPE_SHIFT 25 STI_BCK = 4 << STI_LAYER_TYPE_SHIFT,
26 STI_VDP = 5 << STI_LAYER_TYPE_SHIFT
26}; 27};
27 28
28enum sti_layer_id_of_type { 29enum sti_layer_id_of_type {
@@ -39,6 +40,7 @@ enum sti_layer_desc {
39 STI_GDP_3 = STI_GDP | STI_ID_3, 40 STI_GDP_3 = STI_GDP | STI_ID_3,
40 STI_VID_0 = STI_VID | STI_ID_0, 41 STI_VID_0 = STI_VID | STI_ID_0,
41 STI_VID_1 = STI_VID | STI_ID_1, 42 STI_VID_1 = STI_VID | STI_ID_1,
43 STI_HQVDP_0 = STI_VDP | STI_ID_0,
42 STI_CURSOR = STI_CUR, 44 STI_CURSOR = STI_CUR,
43 STI_BACK = STI_BCK 45 STI_BACK = STI_BCK
44}; 46};
@@ -67,6 +69,7 @@ struct sti_layer_funcs {
67 * 69 *
68 * @plane: drm plane it is bound to (if any) 70 * @plane: drm plane it is bound to (if any)
69 * @fb: drm fb it is bound to 71 * @fb: drm fb it is bound to
72 * @crtc: crtc it is bound to
70 * @mode: display mode 73 * @mode: display mode
71 * @desc: layer type & id 74 * @desc: layer type & id
72 * @device: driver device 75 * @device: driver device
@@ -82,11 +85,13 @@ struct sti_layer_funcs {
82 * @format: format 85 * @format: format
83 * @pitches: pitch of 'planes' (eg: Y, U, V) 86 * @pitches: pitch of 'planes' (eg: Y, U, V)
84 * @offsets: offset of 'planes' 87 * @offsets: offset of 'planes'
88 * @vaddr: virtual address of the input buffer
85 * @paddr: physical address of the input buffer 89 * @paddr: physical address of the input buffer
86 */ 90 */
87struct sti_layer { 91struct sti_layer {
88 struct drm_plane plane; 92 struct drm_plane plane;
89 struct drm_framebuffer *fb; 93 struct drm_framebuffer *fb;
94 struct drm_crtc *crtc;
90 struct drm_display_mode *mode; 95 struct drm_display_mode *mode;
91 enum sti_layer_desc desc; 96 enum sti_layer_desc desc;
92 struct device *dev; 97 struct device *dev;
@@ -102,12 +107,15 @@ struct sti_layer {
102 uint32_t format; 107 uint32_t format;
103 unsigned int pitches[4]; 108 unsigned int pitches[4];
104 unsigned int offsets[4]; 109 unsigned int offsets[4];
110 void *vaddr;
105 dma_addr_t paddr; 111 dma_addr_t paddr;
106}; 112};
107 113
108struct sti_layer *sti_layer_create(struct device *dev, int desc, 114struct sti_layer *sti_layer_create(struct device *dev, int desc,
109 void __iomem *baseaddr); 115 void __iomem *baseaddr);
110int sti_layer_prepare(struct sti_layer *layer, struct drm_framebuffer *fb, 116int sti_layer_prepare(struct sti_layer *layer,
117 struct drm_crtc *crtc,
118 struct drm_framebuffer *fb,
111 struct drm_display_mode *mode, 119 struct drm_display_mode *mode,
112 int mixer_id, 120 int mixer_id,
113 int dest_x, int dest_y, 121 int dest_x, int dest_y,
diff --git a/drivers/gpu/drm/sti/sti_mixer.c b/drivers/gpu/drm/sti/sti_mixer.c
index 79f369db9fb6..13a4b84deab6 100644
--- a/drivers/gpu/drm/sti/sti_mixer.c
+++ b/drivers/gpu/drm/sti/sti_mixer.c
@@ -45,6 +45,7 @@ static const u32 mixerColorSpaceMatIdentity[] = {
45#define GAM_CTL_GDP1_MASK BIT(4) 45#define GAM_CTL_GDP1_MASK BIT(4)
46#define GAM_CTL_GDP2_MASK BIT(5) 46#define GAM_CTL_GDP2_MASK BIT(5)
47#define GAM_CTL_GDP3_MASK BIT(6) 47#define GAM_CTL_GDP3_MASK BIT(6)
48#define GAM_CTL_CURSOR_MASK BIT(9)
48 49
49const char *sti_mixer_to_str(struct sti_mixer *mixer) 50const char *sti_mixer_to_str(struct sti_mixer *mixer)
50{ 51{
@@ -122,11 +123,15 @@ int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer)
122 layer_id = GAM_DEPTH_GDP3_ID; 123 layer_id = GAM_DEPTH_GDP3_ID;
123 break; 124 break;
124 case STI_VID_0: 125 case STI_VID_0:
126 case STI_HQVDP_0:
125 layer_id = GAM_DEPTH_VID0_ID; 127 layer_id = GAM_DEPTH_VID0_ID;
126 break; 128 break;
127 case STI_VID_1: 129 case STI_VID_1:
128 layer_id = GAM_DEPTH_VID1_ID; 130 layer_id = GAM_DEPTH_VID1_ID;
129 break; 131 break;
132 case STI_CURSOR:
133 /* no need to set depth for cursor */
134 return 0;
130 default: 135 default:
131 DRM_ERROR("Unknown layer %d\n", layer->desc); 136 DRM_ERROR("Unknown layer %d\n", layer->desc);
132 return 1; 137 return 1;
@@ -185,9 +190,12 @@ static u32 sti_mixer_get_layer_mask(struct sti_layer *layer)
185 case STI_GDP_3: 190 case STI_GDP_3:
186 return GAM_CTL_GDP3_MASK; 191 return GAM_CTL_GDP3_MASK;
187 case STI_VID_0: 192 case STI_VID_0:
193 case STI_HQVDP_0:
188 return GAM_CTL_VID0_MASK; 194 return GAM_CTL_VID0_MASK;
189 case STI_VID_1: 195 case STI_VID_1:
190 return GAM_CTL_VID1_MASK; 196 return GAM_CTL_VID1_MASK;
197 case STI_CURSOR:
198 return GAM_CTL_CURSOR_MASK;
191 default: 199 default:
192 return 0; 200 return 0;
193 } 201 }
@@ -215,6 +223,15 @@ int sti_mixer_set_layer_status(struct sti_mixer *mixer,
215 return 0; 223 return 0;
216} 224}
217 225
226void sti_mixer_clear_all_layers(struct sti_mixer *mixer)
227{
228 u32 val;
229
230 DRM_DEBUG_DRIVER("%s clear all layer\n", sti_mixer_to_str(mixer));
231 val = sti_mixer_reg_read(mixer, GAM_MIXER_CTL) & 0xFFFF0000;
232 sti_mixer_reg_write(mixer, GAM_MIXER_CTL, val);
233}
234
218void sti_mixer_set_matrix(struct sti_mixer *mixer) 235void sti_mixer_set_matrix(struct sti_mixer *mixer)
219{ 236{
220 unsigned int i; 237 unsigned int i;
diff --git a/drivers/gpu/drm/sti/sti_mixer.h b/drivers/gpu/drm/sti/sti_mixer.h
index 874372102e52..b97282182908 100644
--- a/drivers/gpu/drm/sti/sti_mixer.h
+++ b/drivers/gpu/drm/sti/sti_mixer.h
@@ -23,6 +23,7 @@
23 * @id: id of the mixer 23 * @id: id of the mixer
24 * @drm_crtc: crtc object link to the mixer 24 * @drm_crtc: crtc object link to the mixer
25 * @pending_event: set if a flip event is pending on crtc 25 * @pending_event: set if a flip event is pending on crtc
26 * @enabled: to know if the mixer is active or not
26 */ 27 */
27struct sti_mixer { 28struct sti_mixer {
28 struct device *dev; 29 struct device *dev;
@@ -30,6 +31,7 @@ struct sti_mixer {
30 int id; 31 int id;
31 struct drm_crtc drm_crtc; 32 struct drm_crtc drm_crtc;
32 struct drm_pending_vblank_event *pending_event; 33 struct drm_pending_vblank_event *pending_event;
34 bool enabled;
33}; 35};
34 36
35const char *sti_mixer_to_str(struct sti_mixer *mixer); 37const char *sti_mixer_to_str(struct sti_mixer *mixer);
@@ -39,6 +41,7 @@ struct sti_mixer *sti_mixer_create(struct device *dev, int id,
39 41
40int sti_mixer_set_layer_status(struct sti_mixer *mixer, 42int sti_mixer_set_layer_status(struct sti_mixer *mixer,
41 struct sti_layer *layer, bool status); 43 struct sti_layer *layer, bool status);
44void sti_mixer_clear_all_layers(struct sti_mixer *mixer);
42int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer); 45int sti_mixer_set_layer_depth(struct sti_mixer *mixer, struct sti_layer *layer);
43int sti_mixer_active_video_area(struct sti_mixer *mixer, 46int sti_mixer_active_video_area(struct sti_mixer *mixer,
44 struct drm_display_mode *mode); 47 struct drm_display_mode *mode);
diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c
index b8afe490356a..cb924aa2b321 100644
--- a/drivers/gpu/drm/sti/sti_tvout.c
+++ b/drivers/gpu/drm/sti/sti_tvout.c
@@ -16,6 +16,8 @@
16#include <drm/drmP.h> 16#include <drm/drmP.h>
17#include <drm/drm_crtc_helper.h> 17#include <drm/drm_crtc_helper.h>
18 18
19#include "sti_drm_crtc.h"
20
19/* glue registers */ 21/* glue registers */
20#define TVO_CSC_MAIN_M0 0x000 22#define TVO_CSC_MAIN_M0 0x000
21#define TVO_CSC_MAIN_M1 0x004 23#define TVO_CSC_MAIN_M1 0x004
@@ -96,7 +98,7 @@
96 98
97#define TVO_SYNC_HD_DCS_SHIFT 8 99#define TVO_SYNC_HD_DCS_SHIFT 8
98 100
99#define ENCODER_MAIN_CRTC_MASK BIT(0) 101#define ENCODER_CRTC_MASK (BIT(0) | BIT(1))
100 102
101/* enum listing the supported output data format */ 103/* enum listing the supported output data format */
102enum sti_tvout_video_out_type { 104enum sti_tvout_video_out_type {
@@ -149,14 +151,15 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset)
149 * Set the clipping mode of a VIP 151 * Set the clipping mode of a VIP
150 * 152 *
151 * @tvout: tvout structure 153 * @tvout: tvout structure
154 * @reg: register to set
152 * @cr_r: 155 * @cr_r:
153 * @y_g: 156 * @y_g:
154 * @cb_b: 157 * @cb_b:
155 */ 158 */
156static void tvout_vip_set_color_order(struct sti_tvout *tvout, 159static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg,
157 u32 cr_r, u32 y_g, u32 cb_b) 160 u32 cr_r, u32 y_g, u32 cb_b)
158{ 161{
159 u32 val = tvout_read(tvout, TVO_VIP_HDMI); 162 u32 val = tvout_read(tvout, reg);
160 163
161 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT); 164 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_R_SHIFT);
162 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT); 165 val &= ~(TVO_VIP_REORDER_MASK << TVO_VIP_REORDER_G_SHIFT);
@@ -165,52 +168,58 @@ static void tvout_vip_set_color_order(struct sti_tvout *tvout,
165 val |= y_g << TVO_VIP_REORDER_G_SHIFT; 168 val |= y_g << TVO_VIP_REORDER_G_SHIFT;
166 val |= cb_b << TVO_VIP_REORDER_B_SHIFT; 169 val |= cb_b << TVO_VIP_REORDER_B_SHIFT;
167 170
168 tvout_write(tvout, val, TVO_VIP_HDMI); 171 tvout_write(tvout, val, reg);
169} 172}
170 173
171/** 174/**
172 * Set the clipping mode of a VIP 175 * Set the clipping mode of a VIP
173 * 176 *
174 * @tvout: tvout structure 177 * @tvout: tvout structure
178 * @reg: register to set
175 * @range: clipping range 179 * @range: clipping range
176 */ 180 */
177static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, u32 range) 181static void tvout_vip_set_clip_mode(struct sti_tvout *tvout, int reg, u32 range)
178{ 182{
179 u32 val = tvout_read(tvout, TVO_VIP_HDMI); 183 u32 val = tvout_read(tvout, reg);
180 184
181 val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT); 185 val &= ~(TVO_VIP_CLIP_MASK << TVO_VIP_CLIP_SHIFT);
182 val |= range << TVO_VIP_CLIP_SHIFT; 186 val |= range << TVO_VIP_CLIP_SHIFT;
183 tvout_write(tvout, val, TVO_VIP_HDMI); 187 tvout_write(tvout, val, reg);
184} 188}
185 189
186/** 190/**
187 * Set the rounded value of a VIP 191 * Set the rounded value of a VIP
188 * 192 *
189 * @tvout: tvout structure 193 * @tvout: tvout structure
194 * @reg: register to set
190 * @rnd: rounded val per component 195 * @rnd: rounded val per component
191 */ 196 */
192static void tvout_vip_set_rnd(struct sti_tvout *tvout, u32 rnd) 197static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd)
193{ 198{
194 u32 val = tvout_read(tvout, TVO_VIP_HDMI); 199 u32 val = tvout_read(tvout, reg);
195 200
196 val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT); 201 val &= ~(TVO_VIP_RND_MASK << TVO_VIP_RND_SHIFT);
197 val |= rnd << TVO_VIP_RND_SHIFT; 202 val |= rnd << TVO_VIP_RND_SHIFT;
198 tvout_write(tvout, val, TVO_VIP_HDMI); 203 tvout_write(tvout, val, reg);
199} 204}
200 205
201/** 206/**
202 * Select the VIP input 207 * Select the VIP input
203 * 208 *
204 * @tvout: tvout structure 209 * @tvout: tvout structure
210 * @reg: register to set
211 * @main_path: main or auxiliary path
212 * @sel_input_logic_inverted: need to invert the logic
205 * @sel_input: selected_input (main/aux + conv) 213 * @sel_input: selected_input (main/aux + conv)
206 */ 214 */
207static void tvout_vip_set_sel_input(struct sti_tvout *tvout, 215static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
216 int reg,
208 bool main_path, 217 bool main_path,
209 bool sel_input_logic_inverted, 218 bool sel_input_logic_inverted,
210 enum sti_tvout_video_out_type video_out) 219 enum sti_tvout_video_out_type video_out)
211{ 220{
212 u32 sel_input; 221 u32 sel_input;
213 u32 val = tvout_read(tvout, TVO_VIP_HDMI); 222 u32 val = tvout_read(tvout, reg);
214 223
215 if (main_path) 224 if (main_path)
216 sel_input = TVO_VIP_SEL_INPUT_MAIN; 225 sel_input = TVO_VIP_SEL_INPUT_MAIN;
@@ -232,22 +241,24 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout,
232 241
233 val &= ~TVO_VIP_SEL_INPUT_MASK; 242 val &= ~TVO_VIP_SEL_INPUT_MASK;
234 val |= sel_input; 243 val |= sel_input;
235 tvout_write(tvout, val, TVO_VIP_HDMI); 244 tvout_write(tvout, val, reg);
236} 245}
237 246
238/** 247/**
239 * Select the input video signed or unsigned 248 * Select the input video signed or unsigned
240 * 249 *
241 * @tvout: tvout structure 250 * @tvout: tvout structure
251 * @reg: register to set
242 * @in_vid_signed: used video input format 252 * @in_vid_signed: used video input format
243 */ 253 */
244static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, u32 in_vid_fmt) 254static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout,
255 int reg, u32 in_vid_fmt)
245{ 256{
246 u32 val = tvout_read(tvout, TVO_VIP_HDMI); 257 u32 val = tvout_read(tvout, reg);
247 258
248 val &= ~TVO_IN_FMT_SIGNED; 259 val &= ~TVO_IN_FMT_SIGNED;
249 val |= in_vid_fmt; 260 val |= in_vid_fmt;
250 tvout_write(tvout, val, TVO_MAIN_IN_VID_FORMAT); 261 tvout_write(tvout, val, reg);
251} 262}
252 263
253/** 264/**
@@ -261,6 +272,7 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
261{ 272{
262 struct device_node *node = tvout->dev->of_node; 273 struct device_node *node = tvout->dev->of_node;
263 bool sel_input_logic_inverted = false; 274 bool sel_input_logic_inverted = false;
275 u32 tvo_in_vid_format;
264 276
265 dev_dbg(tvout->dev, "%s\n", __func__); 277 dev_dbg(tvout->dev, "%s\n", __func__);
266 278
@@ -268,33 +280,36 @@ static void tvout_hdmi_start(struct sti_tvout *tvout, bool main_path)
268 DRM_DEBUG_DRIVER("main vip for hdmi\n"); 280 DRM_DEBUG_DRIVER("main vip for hdmi\n");
269 /* select the input sync for hdmi = VTG set 1 */ 281 /* select the input sync for hdmi = VTG set 1 */
270 tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL); 282 tvout_write(tvout, TVO_SYNC_MAIN_VTG_SET_1, TVO_HDMI_SYNC_SEL);
283 tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
271 } else { 284 } else {
272 DRM_DEBUG_DRIVER("aux vip for hdmi\n"); 285 DRM_DEBUG_DRIVER("aux vip for hdmi\n");
273 /* select the input sync for hdmi = VTG set 1 */ 286 /* select the input sync for hdmi = VTG set 1 */
274 tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL); 287 tvout_write(tvout, TVO_SYNC_AUX_VTG_SET_1, TVO_HDMI_SYNC_SEL);
288 tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
275 } 289 }
276 290
277 /* set color channel order */ 291 /* set color channel order */
278 tvout_vip_set_color_order(tvout, 292 tvout_vip_set_color_order(tvout, TVO_VIP_HDMI,
279 TVO_VIP_REORDER_CR_R_SEL, 293 TVO_VIP_REORDER_CR_R_SEL,
280 TVO_VIP_REORDER_Y_G_SEL, 294 TVO_VIP_REORDER_Y_G_SEL,
281 TVO_VIP_REORDER_CB_B_SEL); 295 TVO_VIP_REORDER_CB_B_SEL);
282 296
283 /* set clipping mode (Limited range RGB/Y) */ 297 /* set clipping mode (Limited range RGB/Y) */
284 tvout_vip_set_clip_mode(tvout, TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y); 298 tvout_vip_set_clip_mode(tvout, TVO_VIP_HDMI,
299 TVO_VIP_CLIP_LIMITED_RANGE_RGB_Y);
285 300
286 /* set round mode (rounded to 8-bit per component) */ 301 /* set round mode (rounded to 8-bit per component) */
287 tvout_vip_set_rnd(tvout, TVO_VIP_RND_8BIT_ROUNDED); 302 tvout_vip_set_rnd(tvout, TVO_VIP_HDMI, TVO_VIP_RND_8BIT_ROUNDED);
288 303
289 if (of_device_is_compatible(node, "st,stih407-tvout")) { 304 if (of_device_is_compatible(node, "st,stih407-tvout")) {
290 /* set input video format */ 305 /* set input video format */
291 tvout_vip_set_in_vid_fmt(tvout->regs + TVO_MAIN_IN_VID_FORMAT, 306 tvout_vip_set_in_vid_fmt(tvout, tvo_in_vid_format,
292 TVO_IN_FMT_SIGNED); 307 TVO_IN_FMT_SIGNED);
293 sel_input_logic_inverted = true; 308 sel_input_logic_inverted = true;
294 } 309 }
295 310
296 /* input selection */ 311 /* input selection */
297 tvout_vip_set_sel_input(tvout, main_path, 312 tvout_vip_set_sel_input(tvout, TVO_VIP_HDMI, main_path,
298 sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB); 313 sel_input_logic_inverted, STI_TVOUT_VIDEO_OUT_RGB);
299} 314}
300 315
@@ -309,48 +324,47 @@ static void tvout_hda_start(struct sti_tvout *tvout, bool main_path)
309{ 324{
310 struct device_node *node = tvout->dev->of_node; 325 struct device_node *node = tvout->dev->of_node;
311 bool sel_input_logic_inverted = false; 326 bool sel_input_logic_inverted = false;
327 u32 tvo_in_vid_format;
328 int val;
312 329
313 dev_dbg(tvout->dev, "%s\n", __func__); 330 dev_dbg(tvout->dev, "%s\n", __func__);
314 331
315 if (!main_path) { 332 if (main_path) {
316 DRM_ERROR("HD Analog on aux not implemented\n"); 333 val = TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
317 return; 334 val |= TVO_SYNC_MAIN_VTG_SET_3;
335 tvout_write(tvout, val, TVO_HD_SYNC_SEL);
336 tvo_in_vid_format = TVO_MAIN_IN_VID_FORMAT;
337 } else {
338 val = TVO_SYNC_AUX_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT;
339 val |= TVO_SYNC_AUX_VTG_SET_3;
340 tvout_write(tvout, val, TVO_HD_SYNC_SEL);
341 tvo_in_vid_format = TVO_AUX_IN_VID_FORMAT;
318 } 342 }
319 343
320 DRM_DEBUG_DRIVER("main vip for HDF\n");
321
322 /* set color channel order */ 344 /* set color channel order */
323 tvout_vip_set_color_order(tvout->regs + TVO_VIP_HDF, 345 tvout_vip_set_color_order(tvout, TVO_VIP_HDF,
324 TVO_VIP_REORDER_CR_R_SEL, 346 TVO_VIP_REORDER_CR_R_SEL,
325 TVO_VIP_REORDER_Y_G_SEL, 347 TVO_VIP_REORDER_Y_G_SEL,
326 TVO_VIP_REORDER_CB_B_SEL); 348 TVO_VIP_REORDER_CB_B_SEL);
327 349
328 /* set clipping mode (Limited range RGB/Y) */ 350 /* set clipping mode (EAV/SAV clipping) */
329 tvout_vip_set_clip_mode(tvout->regs + TVO_VIP_HDF, 351 tvout_vip_set_clip_mode(tvout, TVO_VIP_HDF, TVO_VIP_CLIP_EAV_SAV);
330 TVO_VIP_CLIP_LIMITED_RANGE_CB_CR);
331 352
332 /* set round mode (rounded to 10-bit per component) */ 353 /* set round mode (rounded to 10-bit per component) */
333 tvout_vip_set_rnd(tvout->regs + TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED); 354 tvout_vip_set_rnd(tvout, TVO_VIP_HDF, TVO_VIP_RND_10BIT_ROUNDED);
334 355
335 if (of_device_is_compatible(node, "st,stih407-tvout")) { 356 if (of_device_is_compatible(node, "st,stih407-tvout")) {
336 /* set input video format */ 357 /* set input video format */
337 tvout_vip_set_in_vid_fmt(tvout, TVO_IN_FMT_SIGNED); 358 tvout_vip_set_in_vid_fmt(tvout,
359 tvo_in_vid_format, TVO_IN_FMT_SIGNED);
338 sel_input_logic_inverted = true; 360 sel_input_logic_inverted = true;
339 } 361 }
340 362
341 /* Input selection */ 363 /* Input selection */
342 tvout_vip_set_sel_input(tvout->regs + TVO_VIP_HDF, 364 tvout_vip_set_sel_input(tvout, TVO_VIP_HDF, main_path,
343 main_path,
344 sel_input_logic_inverted, 365 sel_input_logic_inverted,
345 STI_TVOUT_VIDEO_OUT_YUV); 366 STI_TVOUT_VIDEO_OUT_YUV);
346 367
347 /* select the input sync for HD analog = VTG set 3
348 * and HD DCS = VTG set 2 */
349 tvout_write(tvout,
350 (TVO_SYNC_MAIN_VTG_SET_2 << TVO_SYNC_HD_DCS_SHIFT)
351 | TVO_SYNC_MAIN_VTG_SET_3,
352 TVO_HD_SYNC_SEL);
353
354 /* power up HD DAC */ 368 /* power up HD DAC */
355 tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF); 369 tvout_write(tvout, 0, TVO_HD_DAC_CFG_OFF);
356} 370}
@@ -392,7 +406,7 @@ static void sti_hda_encoder_commit(struct drm_encoder *encoder)
392{ 406{
393 struct sti_tvout *tvout = to_sti_tvout(encoder); 407 struct sti_tvout *tvout = to_sti_tvout(encoder);
394 408
395 tvout_hda_start(tvout, true); 409 tvout_hda_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
396} 410}
397 411
398static void sti_hda_encoder_disable(struct drm_encoder *encoder) 412static void sti_hda_encoder_disable(struct drm_encoder *encoder)
@@ -429,7 +443,7 @@ static struct drm_encoder *sti_tvout_create_hda_encoder(struct drm_device *dev,
429 443
430 drm_encoder = (struct drm_encoder *) encoder; 444 drm_encoder = (struct drm_encoder *) encoder;
431 445
432 drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK; 446 drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
433 drm_encoder->possible_clones = 1 << 0; 447 drm_encoder->possible_clones = 1 << 0;
434 448
435 drm_encoder_init(dev, drm_encoder, 449 drm_encoder_init(dev, drm_encoder,
@@ -444,7 +458,7 @@ static void sti_hdmi_encoder_commit(struct drm_encoder *encoder)
444{ 458{
445 struct sti_tvout *tvout = to_sti_tvout(encoder); 459 struct sti_tvout *tvout = to_sti_tvout(encoder);
446 460
447 tvout_hdmi_start(tvout, true); 461 tvout_hdmi_start(tvout, sti_drm_crtc_is_main(encoder->crtc));
448} 462}
449 463
450static void sti_hdmi_encoder_disable(struct drm_encoder *encoder) 464static void sti_hdmi_encoder_disable(struct drm_encoder *encoder)
@@ -478,7 +492,7 @@ static struct drm_encoder *sti_tvout_create_hdmi_encoder(struct drm_device *dev,
478 492
479 drm_encoder = (struct drm_encoder *) encoder; 493 drm_encoder = (struct drm_encoder *) encoder;
480 494
481 drm_encoder->possible_crtcs = ENCODER_MAIN_CRTC_MASK; 495 drm_encoder->possible_crtcs = ENCODER_CRTC_MASK;
482 drm_encoder->possible_clones = 1 << 1; 496 drm_encoder->possible_clones = 1 << 1;
483 497
484 drm_encoder_init(dev, drm_encoder, 498 drm_encoder_init(dev, drm_encoder,
diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c
index 740d6e347a62..9564f2568e2c 100644
--- a/drivers/gpu/drm/sti/sti_vtg.c
+++ b/drivers/gpu/drm/sti/sti_vtg.c
@@ -51,10 +51,19 @@
51#define VTG_TOP_V_HD_3 0x010C 51#define VTG_TOP_V_HD_3 0x010C
52#define VTG_BOT_V_HD_3 0x0110 52#define VTG_BOT_V_HD_3 0x0110
53 53
54#define VTG_H_HD_4 0x0120
55#define VTG_TOP_V_VD_4 0x0124
56#define VTG_BOT_V_VD_4 0x0128
57#define VTG_TOP_V_HD_4 0x012c
58#define VTG_BOT_V_HD_4 0x0130
59
54#define VTG_IRQ_BOTTOM BIT(0) 60#define VTG_IRQ_BOTTOM BIT(0)
55#define VTG_IRQ_TOP BIT(1) 61#define VTG_IRQ_TOP BIT(1)
56#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM) 62#define VTG_IRQ_MASK (VTG_IRQ_TOP | VTG_IRQ_BOTTOM)
57 63
64/* Delay introduced by the HDMI in nb of pixel */
65#define HDMI_DELAY (6)
66
58/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ 67/* delay introduced by the Arbitrary Waveform Generator in nb of pixels */
59#define AWG_DELAY_HD (-9) 68#define AWG_DELAY_HD (-9)
60#define AWG_DELAY_ED (-8) 69#define AWG_DELAY_ED (-8)
@@ -133,10 +142,10 @@ static void vtg_set_mode(struct sti_vtg *vtg,
133 writel(tmp, vtg->regs + VTG_VID_TFS); 142 writel(tmp, vtg->regs + VTG_VID_TFS);
134 writel(tmp, vtg->regs + VTG_VID_BFS); 143 writel(tmp, vtg->regs + VTG_VID_BFS);
135 144
136 /* prepare VTG set 1 and 2 for HDMI and VTG set 3 for HD DAC */ 145 /* prepare VTG set 1 for HDMI */
137 tmp = (mode->hsync_end - mode->hsync_start) << 16; 146 tmp = (mode->hsync_end - mode->hsync_start + HDMI_DELAY) << 16;
147 tmp |= HDMI_DELAY;
138 writel(tmp, vtg->regs + VTG_H_HD_1); 148 writel(tmp, vtg->regs + VTG_H_HD_1);
139 writel(tmp, vtg->regs + VTG_H_HD_2);
140 149
141 tmp = (mode->vsync_end - mode->vsync_start + 1) << 16; 150 tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
142 tmp |= 1; 151 tmp |= 1;
@@ -146,6 +155,11 @@ static void vtg_set_mode(struct sti_vtg *vtg,
146 writel(0, vtg->regs + VTG_BOT_V_HD_1); 155 writel(0, vtg->regs + VTG_BOT_V_HD_1);
147 156
148 /* prepare VTG set 2 for for HD DCS */ 157 /* prepare VTG set 2 for for HD DCS */
158 tmp = (mode->hsync_end - mode->hsync_start) << 16;
159 writel(tmp, vtg->regs + VTG_H_HD_2);
160
161 tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
162 tmp |= 1;
149 writel(tmp, vtg->regs + VTG_TOP_V_VD_2); 163 writel(tmp, vtg->regs + VTG_TOP_V_VD_2);
150 writel(tmp, vtg->regs + VTG_BOT_V_VD_2); 164 writel(tmp, vtg->regs + VTG_BOT_V_VD_2);
151 writel(0, vtg->regs + VTG_TOP_V_HD_2); 165 writel(0, vtg->regs + VTG_TOP_V_HD_2);
@@ -166,6 +180,17 @@ static void vtg_set_mode(struct sti_vtg *vtg,
166 writel(tmp, vtg->regs + VTG_TOP_V_HD_3); 180 writel(tmp, vtg->regs + VTG_TOP_V_HD_3);
167 writel(tmp, vtg->regs + VTG_BOT_V_HD_3); 181 writel(tmp, vtg->regs + VTG_BOT_V_HD_3);
168 182
183 /* Prepare VTG set 4 for DVO */
184 tmp = (mode->hsync_end - mode->hsync_start) << 16;
185 writel(tmp, vtg->regs + VTG_H_HD_4);
186
187 tmp = (mode->vsync_end - mode->vsync_start + 1) << 16;
188 tmp |= 1;
189 writel(tmp, vtg->regs + VTG_TOP_V_VD_4);
190 writel(tmp, vtg->regs + VTG_BOT_V_VD_4);
191 writel(0, vtg->regs + VTG_TOP_V_HD_4);
192 writel(0, vtg->regs + VTG_BOT_V_HD_4);
193
169 /* mode */ 194 /* mode */
170 writel(type, vtg->regs + VTG_MODE); 195 writel(type, vtg->regs + VTG_MODE);
171} 196}
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 354ddb29231f..74d9d621453d 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -1,6 +1,7 @@
1config DRM_TEGRA 1config DRM_TEGRA
2 tristate "NVIDIA Tegra DRM" 2 tristate "NVIDIA Tegra DRM"
3 depends on ARCH_TEGRA || (ARM && COMPILE_TEST) 3 depends on ARCH_TEGRA || (ARM && COMPILE_TEST)
4 depends on COMMON_CLK
4 depends on DRM 5 depends on DRM
5 depends on RESET_CONTROLLER 6 depends on RESET_CONTROLLER
6 select DRM_KMS_HELPER 7 select DRM_KMS_HELPER
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 054a79f143ae..3367960286a6 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -9,17 +9,23 @@
9 9
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/debugfs.h> 11#include <linux/debugfs.h>
12#include <linux/iommu.h>
12#include <linux/reset.h> 13#include <linux/reset.h>
13 14
15#include <soc/tegra/pmc.h>
16
14#include "dc.h" 17#include "dc.h"
15#include "drm.h" 18#include "drm.h"
16#include "gem.h" 19#include "gem.h"
17 20
21#include <drm/drm_plane_helper.h>
22
18struct tegra_dc_soc_info { 23struct tegra_dc_soc_info {
19 bool supports_interlacing; 24 bool supports_interlacing;
20 bool supports_cursor; 25 bool supports_cursor;
21 bool supports_block_linear; 26 bool supports_block_linear;
22 unsigned int pitch_align; 27 unsigned int pitch_align;
28 bool has_powergate;
23}; 29};
24 30
25struct tegra_plane { 31struct tegra_plane {
@@ -32,6 +38,26 @@ static inline struct tegra_plane *to_tegra_plane(struct drm_plane *plane)
32 return container_of(plane, struct tegra_plane, base); 38 return container_of(plane, struct tegra_plane, base);
33} 39}
34 40
41static void tegra_dc_window_commit(struct tegra_dc *dc, unsigned int index)
42{
43 u32 value = WIN_A_ACT_REQ << index;
44
45 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
46 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
47}
48
49static void tegra_dc_cursor_commit(struct tegra_dc *dc)
50{
51 tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
52 tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
53}
54
55static void tegra_dc_commit(struct tegra_dc *dc)
56{
57 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
58 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
59}
60
35static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap) 61static unsigned int tegra_dc_format(uint32_t format, uint32_t *swap)
36{ 62{
37 /* assume no swapping of fetched data */ 63 /* assume no swapping of fetched data */
@@ -303,17 +329,260 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
303 break; 329 break;
304 } 330 }
305 331
306 tegra_dc_writel(dc, WIN_A_UPDATE << index, DC_CMD_STATE_CONTROL); 332 tegra_dc_window_commit(dc, index);
307 tegra_dc_writel(dc, WIN_A_ACT_REQ << index, DC_CMD_STATE_CONTROL); 333
334 return 0;
335}
336
337static int tegra_window_plane_disable(struct drm_plane *plane)
338{
339 struct tegra_dc *dc = to_tegra_dc(plane->crtc);
340 struct tegra_plane *p = to_tegra_plane(plane);
341 u32 value;
342
343 if (!plane->crtc)
344 return 0;
345
346 value = WINDOW_A_SELECT << p->index;
347 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
348
349 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
350 value &= ~WIN_ENABLE;
351 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
352
353 tegra_dc_window_commit(dc, p->index);
354
355 return 0;
356}
357
358static void tegra_plane_destroy(struct drm_plane *plane)
359{
360 struct tegra_plane *p = to_tegra_plane(plane);
361
362 drm_plane_cleanup(plane);
363 kfree(p);
364}
365
366static const u32 tegra_primary_plane_formats[] = {
367 DRM_FORMAT_XBGR8888,
368 DRM_FORMAT_XRGB8888,
369 DRM_FORMAT_RGB565,
370};
371
372static int tegra_primary_plane_update(struct drm_plane *plane,
373 struct drm_crtc *crtc,
374 struct drm_framebuffer *fb, int crtc_x,
375 int crtc_y, unsigned int crtc_w,
376 unsigned int crtc_h, uint32_t src_x,
377 uint32_t src_y, uint32_t src_w,
378 uint32_t src_h)
379{
380 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
381 struct tegra_plane *p = to_tegra_plane(plane);
382 struct tegra_dc *dc = to_tegra_dc(crtc);
383 struct tegra_dc_window window;
384 int err;
385
386 memset(&window, 0, sizeof(window));
387 window.src.x = src_x >> 16;
388 window.src.y = src_y >> 16;
389 window.src.w = src_w >> 16;
390 window.src.h = src_h >> 16;
391 window.dst.x = crtc_x;
392 window.dst.y = crtc_y;
393 window.dst.w = crtc_w;
394 window.dst.h = crtc_h;
395 window.format = tegra_dc_format(fb->pixel_format, &window.swap);
396 window.bits_per_pixel = fb->bits_per_pixel;
397 window.bottom_up = tegra_fb_is_bottom_up(fb);
398
399 err = tegra_fb_get_tiling(fb, &window.tiling);
400 if (err < 0)
401 return err;
402
403 window.base[0] = bo->paddr + fb->offsets[0];
404 window.stride[0] = fb->pitches[0];
405
406 err = tegra_dc_setup_window(dc, p->index, &window);
407 if (err < 0)
408 return err;
409
410 return 0;
411}
412
413static void tegra_primary_plane_destroy(struct drm_plane *plane)
414{
415 tegra_window_plane_disable(plane);
416 tegra_plane_destroy(plane);
417}
418
419static const struct drm_plane_funcs tegra_primary_plane_funcs = {
420 .update_plane = tegra_primary_plane_update,
421 .disable_plane = tegra_window_plane_disable,
422 .destroy = tegra_primary_plane_destroy,
423};
424
425static struct drm_plane *tegra_dc_primary_plane_create(struct drm_device *drm,
426 struct tegra_dc *dc)
427{
428 struct tegra_plane *plane;
429 unsigned int num_formats;
430 const u32 *formats;
431 int err;
432
433 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
434 if (!plane)
435 return ERR_PTR(-ENOMEM);
436
437 num_formats = ARRAY_SIZE(tegra_primary_plane_formats);
438 formats = tegra_primary_plane_formats;
439
440 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
441 &tegra_primary_plane_funcs, formats,
442 num_formats, DRM_PLANE_TYPE_PRIMARY);
443 if (err < 0) {
444 kfree(plane);
445 return ERR_PTR(err);
446 }
447
448 return &plane->base;
449}
450
451static const u32 tegra_cursor_plane_formats[] = {
452 DRM_FORMAT_RGBA8888,
453};
454
455static int tegra_cursor_plane_update(struct drm_plane *plane,
456 struct drm_crtc *crtc,
457 struct drm_framebuffer *fb, int crtc_x,
458 int crtc_y, unsigned int crtc_w,
459 unsigned int crtc_h, uint32_t src_x,
460 uint32_t src_y, uint32_t src_w,
461 uint32_t src_h)
462{
463 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
464 struct tegra_dc *dc = to_tegra_dc(crtc);
465 u32 value = CURSOR_CLIP_DISPLAY;
466
467 /* scaling not supported for cursor */
468 if ((src_w >> 16 != crtc_w) || (src_h >> 16 != crtc_h))
469 return -EINVAL;
470
471 /* only square cursors supported */
472 if (src_w != src_h)
473 return -EINVAL;
474
475 switch (crtc_w) {
476 case 32:
477 value |= CURSOR_SIZE_32x32;
478 break;
479
480 case 64:
481 value |= CURSOR_SIZE_64x64;
482 break;
483
484 case 128:
485 value |= CURSOR_SIZE_128x128;
486 break;
487
488 case 256:
489 value |= CURSOR_SIZE_256x256;
490 break;
491
492 default:
493 return -EINVAL;
494 }
495
496 value |= (bo->paddr >> 10) & 0x3fffff;
497 tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR);
498
499#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
500 value = (bo->paddr >> 32) & 0x3;
501 tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI);
502#endif
503
504 /* enable cursor and set blend mode */
505 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
506 value |= CURSOR_ENABLE;
507 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
508
509 value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
510 value &= ~CURSOR_DST_BLEND_MASK;
511 value &= ~CURSOR_SRC_BLEND_MASK;
512 value |= CURSOR_MODE_NORMAL;
513 value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
514 value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
515 value |= CURSOR_ALPHA;
516 tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
517
518 /* position the cursor */
519 value = (crtc_y & 0x3fff) << 16 | (crtc_x & 0x3fff);
520 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
521
522 /* apply changes */
523 tegra_dc_cursor_commit(dc);
524 tegra_dc_commit(dc);
525
526 return 0;
527}
528
529static int tegra_cursor_plane_disable(struct drm_plane *plane)
530{
531 struct tegra_dc *dc = to_tegra_dc(plane->crtc);
532 u32 value;
533
534 if (!plane->crtc)
535 return 0;
536
537 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
538 value &= ~CURSOR_ENABLE;
539 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
540
541 tegra_dc_cursor_commit(dc);
542 tegra_dc_commit(dc);
308 543
309 return 0; 544 return 0;
310} 545}
311 546
312static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc, 547static const struct drm_plane_funcs tegra_cursor_plane_funcs = {
313 struct drm_framebuffer *fb, int crtc_x, 548 .update_plane = tegra_cursor_plane_update,
314 int crtc_y, unsigned int crtc_w, 549 .disable_plane = tegra_cursor_plane_disable,
315 unsigned int crtc_h, uint32_t src_x, 550 .destroy = tegra_plane_destroy,
316 uint32_t src_y, uint32_t src_w, uint32_t src_h) 551};
552
553static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
554 struct tegra_dc *dc)
555{
556 struct tegra_plane *plane;
557 unsigned int num_formats;
558 const u32 *formats;
559 int err;
560
561 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
562 if (!plane)
563 return ERR_PTR(-ENOMEM);
564
565 num_formats = ARRAY_SIZE(tegra_cursor_plane_formats);
566 formats = tegra_cursor_plane_formats;
567
568 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
569 &tegra_cursor_plane_funcs, formats,
570 num_formats, DRM_PLANE_TYPE_CURSOR);
571 if (err < 0) {
572 kfree(plane);
573 return ERR_PTR(err);
574 }
575
576 return &plane->base;
577}
578
579static int tegra_overlay_plane_update(struct drm_plane *plane,
580 struct drm_crtc *crtc,
581 struct drm_framebuffer *fb, int crtc_x,
582 int crtc_y, unsigned int crtc_w,
583 unsigned int crtc_h, uint32_t src_x,
584 uint32_t src_y, uint32_t src_w,
585 uint32_t src_h)
317{ 586{
318 struct tegra_plane *p = to_tegra_plane(plane); 587 struct tegra_plane *p = to_tegra_plane(plane);
319 struct tegra_dc *dc = to_tegra_dc(crtc); 588 struct tegra_dc *dc = to_tegra_dc(crtc);
@@ -359,44 +628,19 @@ static int tegra_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
359 return tegra_dc_setup_window(dc, p->index, &window); 628 return tegra_dc_setup_window(dc, p->index, &window);
360} 629}
361 630
362static int tegra_plane_disable(struct drm_plane *plane) 631static void tegra_overlay_plane_destroy(struct drm_plane *plane)
363{ 632{
364 struct tegra_dc *dc = to_tegra_dc(plane->crtc); 633 tegra_window_plane_disable(plane);
365 struct tegra_plane *p = to_tegra_plane(plane); 634 tegra_plane_destroy(plane);
366 unsigned long value;
367
368 if (!plane->crtc)
369 return 0;
370
371 value = WINDOW_A_SELECT << p->index;
372 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
373
374 value = tegra_dc_readl(dc, DC_WIN_WIN_OPTIONS);
375 value &= ~WIN_ENABLE;
376 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS);
377
378 tegra_dc_writel(dc, WIN_A_UPDATE << p->index, DC_CMD_STATE_CONTROL);
379 tegra_dc_writel(dc, WIN_A_ACT_REQ << p->index, DC_CMD_STATE_CONTROL);
380
381 return 0;
382}
383
384static void tegra_plane_destroy(struct drm_plane *plane)
385{
386 struct tegra_plane *p = to_tegra_plane(plane);
387
388 tegra_plane_disable(plane);
389 drm_plane_cleanup(plane);
390 kfree(p);
391} 635}
392 636
393static const struct drm_plane_funcs tegra_plane_funcs = { 637static const struct drm_plane_funcs tegra_overlay_plane_funcs = {
394 .update_plane = tegra_plane_update, 638 .update_plane = tegra_overlay_plane_update,
395 .disable_plane = tegra_plane_disable, 639 .disable_plane = tegra_window_plane_disable,
396 .destroy = tegra_plane_destroy, 640 .destroy = tegra_overlay_plane_destroy,
397}; 641};
398 642
399static const uint32_t plane_formats[] = { 643static const uint32_t tegra_overlay_plane_formats[] = {
400 DRM_FORMAT_XBGR8888, 644 DRM_FORMAT_XBGR8888,
401 DRM_FORMAT_XRGB8888, 645 DRM_FORMAT_XRGB8888,
402 DRM_FORMAT_RGB565, 646 DRM_FORMAT_RGB565,
@@ -406,27 +650,44 @@ static const uint32_t plane_formats[] = {
406 DRM_FORMAT_YUV422, 650 DRM_FORMAT_YUV422,
407}; 651};
408 652
409static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc) 653static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
654 struct tegra_dc *dc,
655 unsigned int index)
410{ 656{
411 unsigned int i; 657 struct tegra_plane *plane;
412 int err = 0; 658 unsigned int num_formats;
659 const u32 *formats;
660 int err;
413 661
414 for (i = 0; i < 2; i++) { 662 plane = kzalloc(sizeof(*plane), GFP_KERNEL);
415 struct tegra_plane *plane; 663 if (!plane)
664 return ERR_PTR(-ENOMEM);
416 665
417 plane = kzalloc(sizeof(*plane), GFP_KERNEL); 666 plane->index = index;
418 if (!plane)
419 return -ENOMEM;
420 667
421 plane->index = 1 + i; 668 num_formats = ARRAY_SIZE(tegra_overlay_plane_formats);
669 formats = tegra_overlay_plane_formats;
422 670
423 err = drm_plane_init(drm, &plane->base, 1 << dc->pipe, 671 err = drm_universal_plane_init(drm, &plane->base, 1 << dc->pipe,
424 &tegra_plane_funcs, plane_formats, 672 &tegra_overlay_plane_funcs, formats,
425 ARRAY_SIZE(plane_formats), false); 673 num_formats, DRM_PLANE_TYPE_OVERLAY);
426 if (err < 0) { 674 if (err < 0) {
427 kfree(plane); 675 kfree(plane);
428 return err; 676 return ERR_PTR(err);
429 } 677 }
678
679 return &plane->base;
680}
681
682static int tegra_dc_add_planes(struct drm_device *drm, struct tegra_dc *dc)
683{
684 struct drm_plane *plane;
685 unsigned int i;
686
687 for (i = 0; i < 2; i++) {
688 plane = tegra_dc_overlay_plane_create(drm, dc, 1 + i);
689 if (IS_ERR(plane))
690 return PTR_ERR(plane);
430 } 691 }
431 692
432 return 0; 693 return 0;
@@ -513,10 +774,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
513 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET); 774 tegra_dc_writel(dc, h_offset, DC_WINBUF_ADDR_H_OFFSET);
514 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET); 775 tegra_dc_writel(dc, v_offset, DC_WINBUF_ADDR_V_OFFSET);
515 776
516 value = GENERAL_UPDATE | WIN_A_UPDATE;
517 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
518
519 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ; 777 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
778 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
520 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 779 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
521 780
522 return 0; 781 return 0;
@@ -548,109 +807,6 @@ void tegra_dc_disable_vblank(struct tegra_dc *dc)
548 spin_unlock_irqrestore(&dc->lock, flags); 807 spin_unlock_irqrestore(&dc->lock, flags);
549} 808}
550 809
551static int tegra_dc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file,
552 uint32_t handle, uint32_t width,
553 uint32_t height, int32_t hot_x, int32_t hot_y)
554{
555 unsigned long value = CURSOR_CLIP_DISPLAY;
556 struct tegra_dc *dc = to_tegra_dc(crtc);
557 struct drm_gem_object *gem;
558 struct tegra_bo *bo = NULL;
559
560 if (!dc->soc->supports_cursor)
561 return -ENXIO;
562
563 if (width != height)
564 return -EINVAL;
565
566 switch (width) {
567 case 32:
568 value |= CURSOR_SIZE_32x32;
569 break;
570
571 case 64:
572 value |= CURSOR_SIZE_64x64;
573 break;
574
575 case 128:
576 value |= CURSOR_SIZE_128x128;
577
578 case 256:
579 value |= CURSOR_SIZE_256x256;
580 break;
581
582 default:
583 return -EINVAL;
584 }
585
586 if (handle) {
587 gem = drm_gem_object_lookup(crtc->dev, file, handle);
588 if (!gem)
589 return -ENOENT;
590
591 bo = to_tegra_bo(gem);
592 }
593
594 if (bo) {
595 unsigned long addr = (bo->paddr & 0xfffffc00) >> 10;
596#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
597 unsigned long high = (bo->paddr & 0xfffffffc) >> 32;
598#endif
599
600 tegra_dc_writel(dc, value | addr, DC_DISP_CURSOR_START_ADDR);
601
602#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
603 tegra_dc_writel(dc, high, DC_DISP_CURSOR_START_ADDR_HI);
604#endif
605
606 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
607 value |= CURSOR_ENABLE;
608 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
609
610 value = tegra_dc_readl(dc, DC_DISP_BLEND_CURSOR_CONTROL);
611 value &= ~CURSOR_DST_BLEND_MASK;
612 value &= ~CURSOR_SRC_BLEND_MASK;
613 value |= CURSOR_MODE_NORMAL;
614 value |= CURSOR_DST_BLEND_NEG_K1_TIMES_SRC;
615 value |= CURSOR_SRC_BLEND_K1_TIMES_SRC;
616 value |= CURSOR_ALPHA;
617 tegra_dc_writel(dc, value, DC_DISP_BLEND_CURSOR_CONTROL);
618 } else {
619 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
620 value &= ~CURSOR_ENABLE;
621 tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS);
622 }
623
624 tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
625 tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
626
627 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
628 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
629
630 return 0;
631}
632
633static int tegra_dc_cursor_move(struct drm_crtc *crtc, int x, int y)
634{
635 struct tegra_dc *dc = to_tegra_dc(crtc);
636 unsigned long value;
637
638 if (!dc->soc->supports_cursor)
639 return -ENXIO;
640
641 value = ((y & 0x3fff) << 16) | (x & 0x3fff);
642 tegra_dc_writel(dc, value, DC_DISP_CURSOR_POSITION);
643
644 tegra_dc_writel(dc, CURSOR_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
645 tegra_dc_writel(dc, CURSOR_ACT_REQ, DC_CMD_STATE_CONTROL);
646
647 /* XXX: only required on generations earlier than Tegra124? */
648 tegra_dc_writel(dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
649 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
650
651 return 0;
652}
653
654static void tegra_dc_finish_page_flip(struct tegra_dc *dc) 810static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
655{ 811{
656 struct drm_device *drm = dc->base.dev; 812 struct drm_device *drm = dc->base.dev;
@@ -727,8 +883,6 @@ static void tegra_dc_destroy(struct drm_crtc *crtc)
727} 883}
728 884
729static const struct drm_crtc_funcs tegra_crtc_funcs = { 885static const struct drm_crtc_funcs tegra_crtc_funcs = {
730 .cursor_set2 = tegra_dc_cursor_set2,
731 .cursor_move = tegra_dc_cursor_move,
732 .page_flip = tegra_dc_page_flip, 886 .page_flip = tegra_dc_page_flip,
733 .set_config = drm_crtc_helper_set_config, 887 .set_config = drm_crtc_helper_set_config,
734 .destroy = tegra_dc_destroy, 888 .destroy = tegra_dc_destroy,
@@ -736,12 +890,13 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
736 890
737static void tegra_crtc_disable(struct drm_crtc *crtc) 891static void tegra_crtc_disable(struct drm_crtc *crtc)
738{ 892{
893 struct tegra_dc *dc = to_tegra_dc(crtc);
739 struct drm_device *drm = crtc->dev; 894 struct drm_device *drm = crtc->dev;
740 struct drm_plane *plane; 895 struct drm_plane *plane;
741 896
742 drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) { 897 drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
743 if (plane->crtc == crtc) { 898 if (plane->crtc == crtc) {
744 tegra_plane_disable(plane); 899 tegra_window_plane_disable(plane);
745 plane->crtc = NULL; 900 plane->crtc = NULL;
746 901
747 if (plane->fb) { 902 if (plane->fb) {
@@ -752,6 +907,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
752 } 907 }
753 908
754 drm_crtc_vblank_off(crtc); 909 drm_crtc_vblank_off(crtc);
910 tegra_dc_commit(dc);
755} 911}
756 912
757static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, 913static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -934,15 +1090,9 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
934static void tegra_crtc_commit(struct drm_crtc *crtc) 1090static void tegra_crtc_commit(struct drm_crtc *crtc)
935{ 1091{
936 struct tegra_dc *dc = to_tegra_dc(crtc); 1092 struct tegra_dc *dc = to_tegra_dc(crtc);
937 unsigned long value;
938
939 value = GENERAL_UPDATE | WIN_A_UPDATE;
940 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
941
942 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
943 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
944 1093
945 drm_crtc_vblank_on(crtc); 1094 drm_crtc_vblank_on(crtc);
1095 tegra_dc_commit(dc);
946} 1096}
947 1097
948static void tegra_crtc_load_lut(struct drm_crtc *crtc) 1098static void tegra_crtc_load_lut(struct drm_crtc *crtc)
@@ -996,7 +1146,7 @@ static int tegra_dc_show_regs(struct seq_file *s, void *data)
996 struct tegra_dc *dc = node->info_ent->data; 1146 struct tegra_dc *dc = node->info_ent->data;
997 1147
998#define DUMP_REG(name) \ 1148#define DUMP_REG(name) \
999 seq_printf(s, "%-40s %#05x %08lx\n", #name, name, \ 1149 seq_printf(s, "%-40s %#05x %08x\n", #name, name, \
1000 tegra_dc_readl(dc, name)) 1150 tegra_dc_readl(dc, name))
1001 1151
1002 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT); 1152 DUMP_REG(DC_CMD_GENERAL_INCR_SYNCPT);
@@ -1284,9 +1434,40 @@ static int tegra_dc_init(struct host1x_client *client)
1284 struct drm_device *drm = dev_get_drvdata(client->parent); 1434 struct drm_device *drm = dev_get_drvdata(client->parent);
1285 struct tegra_dc *dc = host1x_client_to_dc(client); 1435 struct tegra_dc *dc = host1x_client_to_dc(client);
1286 struct tegra_drm *tegra = drm->dev_private; 1436 struct tegra_drm *tegra = drm->dev_private;
1437 struct drm_plane *primary = NULL;
1438 struct drm_plane *cursor = NULL;
1287 int err; 1439 int err;
1288 1440
1289 drm_crtc_init(drm, &dc->base, &tegra_crtc_funcs); 1441 if (tegra->domain) {
1442 err = iommu_attach_device(tegra->domain, dc->dev);
1443 if (err < 0) {
1444 dev_err(dc->dev, "failed to attach to domain: %d\n",
1445 err);
1446 return err;
1447 }
1448
1449 dc->domain = tegra->domain;
1450 }
1451
1452 primary = tegra_dc_primary_plane_create(drm, dc);
1453 if (IS_ERR(primary)) {
1454 err = PTR_ERR(primary);
1455 goto cleanup;
1456 }
1457
1458 if (dc->soc->supports_cursor) {
1459 cursor = tegra_dc_cursor_plane_create(drm, dc);
1460 if (IS_ERR(cursor)) {
1461 err = PTR_ERR(cursor);
1462 goto cleanup;
1463 }
1464 }
1465
1466 err = drm_crtc_init_with_planes(drm, &dc->base, primary, cursor,
1467 &tegra_crtc_funcs);
1468 if (err < 0)
1469 goto cleanup;
1470
1290 drm_mode_crtc_set_gamma_size(&dc->base, 256); 1471 drm_mode_crtc_set_gamma_size(&dc->base, 256);
1291 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs); 1472 drm_crtc_helper_add(&dc->base, &tegra_crtc_helper_funcs);
1292 1473
@@ -1300,12 +1481,12 @@ static int tegra_dc_init(struct host1x_client *client)
1300 err = tegra_dc_rgb_init(drm, dc); 1481 err = tegra_dc_rgb_init(drm, dc);
1301 if (err < 0 && err != -ENODEV) { 1482 if (err < 0 && err != -ENODEV) {
1302 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err); 1483 dev_err(dc->dev, "failed to initialize RGB output: %d\n", err);
1303 return err; 1484 goto cleanup;
1304 } 1485 }
1305 1486
1306 err = tegra_dc_add_planes(drm, dc); 1487 err = tegra_dc_add_planes(drm, dc);
1307 if (err < 0) 1488 if (err < 0)
1308 return err; 1489 goto cleanup;
1309 1490
1310 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 1491 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1311 err = tegra_dc_debugfs_init(dc, drm->primary); 1492 err = tegra_dc_debugfs_init(dc, drm->primary);
@@ -1318,10 +1499,24 @@ static int tegra_dc_init(struct host1x_client *client)
1318 if (err < 0) { 1499 if (err < 0) {
1319 dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq, 1500 dev_err(dc->dev, "failed to request IRQ#%u: %d\n", dc->irq,
1320 err); 1501 err);
1321 return err; 1502 goto cleanup;
1322 } 1503 }
1323 1504
1324 return 0; 1505 return 0;
1506
1507cleanup:
1508 if (cursor)
1509 drm_plane_cleanup(cursor);
1510
1511 if (primary)
1512 drm_plane_cleanup(primary);
1513
1514 if (tegra->domain) {
1515 iommu_detach_device(tegra->domain, dc->dev);
1516 dc->domain = NULL;
1517 }
1518
1519 return err;
1325} 1520}
1326 1521
1327static int tegra_dc_exit(struct host1x_client *client) 1522static int tegra_dc_exit(struct host1x_client *client)
@@ -1343,6 +1538,11 @@ static int tegra_dc_exit(struct host1x_client *client)
1343 return err; 1538 return err;
1344 } 1539 }
1345 1540
1541 if (dc->domain) {
1542 iommu_detach_device(dc->domain, dc->dev);
1543 dc->domain = NULL;
1544 }
1545
1346 return 0; 1546 return 0;
1347} 1547}
1348 1548
@@ -1356,6 +1556,7 @@ static const struct tegra_dc_soc_info tegra20_dc_soc_info = {
1356 .supports_cursor = false, 1556 .supports_cursor = false,
1357 .supports_block_linear = false, 1557 .supports_block_linear = false,
1358 .pitch_align = 8, 1558 .pitch_align = 8,
1559 .has_powergate = false,
1359}; 1560};
1360 1561
1361static const struct tegra_dc_soc_info tegra30_dc_soc_info = { 1562static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
@@ -1363,6 +1564,7 @@ static const struct tegra_dc_soc_info tegra30_dc_soc_info = {
1363 .supports_cursor = false, 1564 .supports_cursor = false,
1364 .supports_block_linear = false, 1565 .supports_block_linear = false,
1365 .pitch_align = 8, 1566 .pitch_align = 8,
1567 .has_powergate = false,
1366}; 1568};
1367 1569
1368static const struct tegra_dc_soc_info tegra114_dc_soc_info = { 1570static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
@@ -1370,6 +1572,7 @@ static const struct tegra_dc_soc_info tegra114_dc_soc_info = {
1370 .supports_cursor = false, 1572 .supports_cursor = false,
1371 .supports_block_linear = false, 1573 .supports_block_linear = false,
1372 .pitch_align = 64, 1574 .pitch_align = 64,
1575 .has_powergate = true,
1373}; 1576};
1374 1577
1375static const struct tegra_dc_soc_info tegra124_dc_soc_info = { 1578static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
@@ -1377,6 +1580,7 @@ static const struct tegra_dc_soc_info tegra124_dc_soc_info = {
1377 .supports_cursor = true, 1580 .supports_cursor = true,
1378 .supports_block_linear = true, 1581 .supports_block_linear = true,
1379 .pitch_align = 64, 1582 .pitch_align = 64,
1583 .has_powergate = true,
1380}; 1584};
1381 1585
1382static const struct of_device_id tegra_dc_of_match[] = { 1586static const struct of_device_id tegra_dc_of_match[] = {
@@ -1384,6 +1588,9 @@ static const struct of_device_id tegra_dc_of_match[] = {
1384 .compatible = "nvidia,tegra124-dc", 1588 .compatible = "nvidia,tegra124-dc",
1385 .data = &tegra124_dc_soc_info, 1589 .data = &tegra124_dc_soc_info,
1386 }, { 1590 }, {
1591 .compatible = "nvidia,tegra114-dc",
1592 .data = &tegra114_dc_soc_info,
1593 }, {
1387 .compatible = "nvidia,tegra30-dc", 1594 .compatible = "nvidia,tegra30-dc",
1388 .data = &tegra30_dc_soc_info, 1595 .data = &tegra30_dc_soc_info,
1389 }, { 1596 }, {
@@ -1466,9 +1673,34 @@ static int tegra_dc_probe(struct platform_device *pdev)
1466 return PTR_ERR(dc->rst); 1673 return PTR_ERR(dc->rst);
1467 } 1674 }
1468 1675
1469 err = clk_prepare_enable(dc->clk); 1676 if (dc->soc->has_powergate) {
1470 if (err < 0) 1677 if (dc->pipe == 0)
1471 return err; 1678 dc->powergate = TEGRA_POWERGATE_DIS;
1679 else
1680 dc->powergate = TEGRA_POWERGATE_DISB;
1681
1682 err = tegra_powergate_sequence_power_up(dc->powergate, dc->clk,
1683 dc->rst);
1684 if (err < 0) {
1685 dev_err(&pdev->dev, "failed to power partition: %d\n",
1686 err);
1687 return err;
1688 }
1689 } else {
1690 err = clk_prepare_enable(dc->clk);
1691 if (err < 0) {
1692 dev_err(&pdev->dev, "failed to enable clock: %d\n",
1693 err);
1694 return err;
1695 }
1696
1697 err = reset_control_deassert(dc->rst);
1698 if (err < 0) {
1699 dev_err(&pdev->dev, "failed to deassert reset: %d\n",
1700 err);
1701 return err;
1702 }
1703 }
1472 1704
1473 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1705 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1474 dc->regs = devm_ioremap_resource(&pdev->dev, regs); 1706 dc->regs = devm_ioremap_resource(&pdev->dev, regs);
@@ -1522,6 +1754,10 @@ static int tegra_dc_remove(struct platform_device *pdev)
1522 } 1754 }
1523 1755
1524 reset_control_assert(dc->rst); 1756 reset_control_assert(dc->rst);
1757
1758 if (dc->soc->has_powergate)
1759 tegra_powergate_power_off(dc->powergate);
1760
1525 clk_disable_unprepare(dc->clk); 1761 clk_disable_unprepare(dc->clk);
1526 1762
1527 return 0; 1763 return 0;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 59736bb810cd..e549afeece1f 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -8,6 +8,7 @@
8 */ 8 */
9 9
10#include <linux/host1x.h> 10#include <linux/host1x.h>
11#include <linux/iommu.h>
11 12
12#include "drm.h" 13#include "drm.h"
13#include "gem.h" 14#include "gem.h"
@@ -33,6 +34,17 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
33 if (!tegra) 34 if (!tegra)
34 return -ENOMEM; 35 return -ENOMEM;
35 36
37 if (iommu_present(&platform_bus_type)) {
38 tegra->domain = iommu_domain_alloc(&platform_bus_type);
39 if (IS_ERR(tegra->domain)) {
40 err = PTR_ERR(tegra->domain);
41 goto free;
42 }
43
44 DRM_DEBUG("IOMMU context initialized\n");
45 drm_mm_init(&tegra->mm, 0, SZ_2G);
46 }
47
36 mutex_init(&tegra->clients_lock); 48 mutex_init(&tegra->clients_lock);
37 INIT_LIST_HEAD(&tegra->clients); 49 INIT_LIST_HEAD(&tegra->clients);
38 drm->dev_private = tegra; 50 drm->dev_private = tegra;
@@ -42,13 +54,13 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
42 54
43 err = tegra_drm_fb_prepare(drm); 55 err = tegra_drm_fb_prepare(drm);
44 if (err < 0) 56 if (err < 0)
45 return err; 57 goto config;
46 58
47 drm_kms_helper_poll_init(drm); 59 drm_kms_helper_poll_init(drm);
48 60
49 err = host1x_device_init(device); 61 err = host1x_device_init(device);
50 if (err < 0) 62 if (err < 0)
51 return err; 63 goto fbdev;
52 64
53 /* 65 /*
54 * We don't use the drm_irq_install() helpers provided by the DRM 66 * We don't use the drm_irq_install() helpers provided by the DRM
@@ -59,18 +71,37 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
59 71
60 err = drm_vblank_init(drm, drm->mode_config.num_crtc); 72 err = drm_vblank_init(drm, drm->mode_config.num_crtc);
61 if (err < 0) 73 if (err < 0)
62 return err; 74 goto device;
63 75
64 err = tegra_drm_fb_init(drm); 76 err = tegra_drm_fb_init(drm);
65 if (err < 0) 77 if (err < 0)
66 return err; 78 goto vblank;
67 79
68 return 0; 80 return 0;
81
82vblank:
83 drm_vblank_cleanup(drm);
84device:
85 host1x_device_exit(device);
86fbdev:
87 drm_kms_helper_poll_fini(drm);
88 tegra_drm_fb_free(drm);
89config:
90 drm_mode_config_cleanup(drm);
91
92 if (tegra->domain) {
93 iommu_domain_free(tegra->domain);
94 drm_mm_takedown(&tegra->mm);
95 }
96free:
97 kfree(tegra);
98 return err;
69} 99}
70 100
71static int tegra_drm_unload(struct drm_device *drm) 101static int tegra_drm_unload(struct drm_device *drm)
72{ 102{
73 struct host1x_device *device = to_host1x_device(drm->dev); 103 struct host1x_device *device = to_host1x_device(drm->dev);
104 struct tegra_drm *tegra = drm->dev_private;
74 int err; 105 int err;
75 106
76 drm_kms_helper_poll_fini(drm); 107 drm_kms_helper_poll_fini(drm);
@@ -82,6 +113,13 @@ static int tegra_drm_unload(struct drm_device *drm)
82 if (err < 0) 113 if (err < 0)
83 return err; 114 return err;
84 115
116 if (tegra->domain) {
117 iommu_domain_free(tegra->domain);
118 drm_mm_takedown(&tegra->mm);
119 }
120
121 kfree(tegra);
122
85 return 0; 123 return 0;
86} 124}
87 125
diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h
index e89c70fa82d5..3a3b2e7b5b3f 100644
--- a/drivers/gpu/drm/tegra/drm.h
+++ b/drivers/gpu/drm/tegra/drm.h
@@ -39,6 +39,9 @@ struct tegra_fbdev {
39struct tegra_drm { 39struct tegra_drm {
40 struct drm_device *drm; 40 struct drm_device *drm;
41 41
42 struct iommu_domain *domain;
43 struct drm_mm mm;
44
42 struct mutex clients_lock; 45 struct mutex clients_lock;
43 struct list_head clients; 46 struct list_head clients;
44 47
@@ -101,6 +104,7 @@ struct tegra_dc {
101 spinlock_t lock; 104 spinlock_t lock;
102 105
103 struct drm_crtc base; 106 struct drm_crtc base;
107 int powergate;
104 int pipe; 108 int pipe;
105 109
106 struct clk *clk; 110 struct clk *clk;
@@ -120,6 +124,8 @@ struct tegra_dc {
120 struct drm_pending_vblank_event *event; 124 struct drm_pending_vblank_event *event;
121 125
122 const struct tegra_dc_soc_info *soc; 126 const struct tegra_dc_soc_info *soc;
127
128 struct iommu_domain *domain;
123}; 129};
124 130
125static inline struct tegra_dc * 131static inline struct tegra_dc *
@@ -133,16 +139,15 @@ static inline struct tegra_dc *to_tegra_dc(struct drm_crtc *crtc)
133 return crtc ? container_of(crtc, struct tegra_dc, base) : NULL; 139 return crtc ? container_of(crtc, struct tegra_dc, base) : NULL;
134} 140}
135 141
136static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long value, 142static inline void tegra_dc_writel(struct tegra_dc *dc, u32 value,
137 unsigned long reg) 143 unsigned long offset)
138{ 144{
139 writel(value, dc->regs + (reg << 2)); 145 writel(value, dc->regs + (offset << 2));
140} 146}
141 147
142static inline unsigned long tegra_dc_readl(struct tegra_dc *dc, 148static inline u32 tegra_dc_readl(struct tegra_dc *dc, unsigned long offset)
143 unsigned long reg)
144{ 149{
145 return readl(dc->regs + (reg << 2)); 150 return readl(dc->regs + (offset << 2));
146} 151}
147 152
148struct tegra_dc_window { 153struct tegra_dc_window {
@@ -287,6 +292,7 @@ bool tegra_fb_is_bottom_up(struct drm_framebuffer *framebuffer);
287int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer, 292int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
288 struct tegra_bo_tiling *tiling); 293 struct tegra_bo_tiling *tiling);
289int tegra_drm_fb_prepare(struct drm_device *drm); 294int tegra_drm_fb_prepare(struct drm_device *drm);
295void tegra_drm_fb_free(struct drm_device *drm);
290int tegra_drm_fb_init(struct drm_device *drm); 296int tegra_drm_fb_init(struct drm_device *drm);
291void tegra_drm_fb_exit(struct drm_device *drm); 297void tegra_drm_fb_exit(struct drm_device *drm);
292#ifdef CONFIG_DRM_TEGRA_FBDEV 298#ifdef CONFIG_DRM_TEGRA_FBDEV
diff --git a/drivers/gpu/drm/tegra/dsi.c b/drivers/gpu/drm/tegra/dsi.c
index f7874458926a..33f67fd601c6 100644
--- a/drivers/gpu/drm/tegra/dsi.c
+++ b/drivers/gpu/drm/tegra/dsi.c
@@ -11,6 +11,7 @@
11#include <linux/host1x.h> 11#include <linux/host1x.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/of.h> 13#include <linux/of.h>
14#include <linux/of_platform.h>
14#include <linux/platform_device.h> 15#include <linux/platform_device.h>
15#include <linux/reset.h> 16#include <linux/reset.h>
16 17
@@ -26,9 +27,6 @@
26#include "dsi.h" 27#include "dsi.h"
27#include "mipi-phy.h" 28#include "mipi-phy.h"
28 29
29#define DSI_VIDEO_FIFO_DEPTH (1920 / 4)
30#define DSI_HOST_FIFO_DEPTH 64
31
32struct tegra_dsi { 30struct tegra_dsi {
33 struct host1x_client client; 31 struct host1x_client client;
34 struct tegra_output output; 32 struct tegra_output output;
@@ -54,6 +52,13 @@ struct tegra_dsi {
54 52
55 struct regulator *vdd; 53 struct regulator *vdd;
56 bool enabled; 54 bool enabled;
55
56 unsigned int video_fifo_depth;
57 unsigned int host_fifo_depth;
58
59 /* for ganged-mode support */
60 struct tegra_dsi *master;
61 struct tegra_dsi *slave;
57}; 62};
58 63
59static inline struct tegra_dsi * 64static inline struct tegra_dsi *
@@ -318,6 +323,21 @@ static const u32 pkt_seq_video_non_burst_sync_events[NUM_PKT_SEQ] = {
318 [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4), 323 [11] = PKT_ID0(MIPI_DSI_BLANKING_PACKET) | PKT_LEN0(4),
319}; 324};
320 325
326static const u32 pkt_seq_command_mode[NUM_PKT_SEQ] = {
327 [ 0] = 0,
328 [ 1] = 0,
329 [ 2] = 0,
330 [ 3] = 0,
331 [ 4] = 0,
332 [ 5] = 0,
333 [ 6] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(3) | PKT_LP,
334 [ 7] = 0,
335 [ 8] = 0,
336 [ 9] = 0,
337 [10] = PKT_ID0(MIPI_DSI_DCS_LONG_WRITE) | PKT_LEN0(5) | PKT_LP,
338 [11] = 0,
339};
340
321static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi) 341static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
322{ 342{
323 struct mipi_dphy_timing timing; 343 struct mipi_dphy_timing timing;
@@ -329,7 +349,7 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
329 if (rate < 0) 349 if (rate < 0)
330 return rate; 350 return rate;
331 351
332 period = DIV_ROUND_CLOSEST(1000000000UL, rate * 2); 352 period = DIV_ROUND_CLOSEST(NSEC_PER_SEC, rate * 2);
333 353
334 err = mipi_dphy_timing_get_default(&timing, period); 354 err = mipi_dphy_timing_get_default(&timing, period);
335 if (err < 0) 355 if (err < 0)
@@ -369,6 +389,9 @@ static int tegra_dsi_set_phy_timing(struct tegra_dsi *dsi)
369 DSI_TIMING_FIELD(timing.tago, period, 1); 389 DSI_TIMING_FIELD(timing.tago, period, 1);
370 tegra_dsi_writel(dsi, value, DSI_BTA_TIMING); 390 tegra_dsi_writel(dsi, value, DSI_BTA_TIMING);
371 391
392 if (dsi->slave)
393 return tegra_dsi_set_phy_timing(dsi->slave);
394
372 return 0; 395 return 0;
373} 396}
374 397
@@ -426,26 +449,59 @@ static int tegra_dsi_get_format(enum mipi_dsi_pixel_format format,
426 return 0; 449 return 0;
427} 450}
428 451
429static int tegra_output_dsi_enable(struct tegra_output *output) 452static void tegra_dsi_ganged_enable(struct tegra_dsi *dsi, unsigned int start,
453 unsigned int size)
454{
455 u32 value;
456
457 tegra_dsi_writel(dsi, start, DSI_GANGED_MODE_START);
458 tegra_dsi_writel(dsi, size << 16 | size, DSI_GANGED_MODE_SIZE);
459
460 value = DSI_GANGED_MODE_CONTROL_ENABLE;
461 tegra_dsi_writel(dsi, value, DSI_GANGED_MODE_CONTROL);
462}
463
464static void tegra_dsi_enable(struct tegra_dsi *dsi)
465{
466 u32 value;
467
468 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
469 value |= DSI_POWER_CONTROL_ENABLE;
470 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
471
472 if (dsi->slave)
473 tegra_dsi_enable(dsi->slave);
474}
475
476static unsigned int tegra_dsi_get_lanes(struct tegra_dsi *dsi)
477{
478 if (dsi->master)
479 return dsi->master->lanes + dsi->lanes;
480
481 if (dsi->slave)
482 return dsi->lanes + dsi->slave->lanes;
483
484 return dsi->lanes;
485}
486
487static int tegra_dsi_configure(struct tegra_dsi *dsi, unsigned int pipe,
488 const struct drm_display_mode *mode)
430{ 489{
431 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
432 struct drm_display_mode *mode = &dc->base.mode;
433 unsigned int hact, hsw, hbp, hfp, i, mul, div; 490 unsigned int hact, hsw, hbp, hfp, i, mul, div;
434 struct tegra_dsi *dsi = to_dsi(output);
435 enum tegra_dsi_format format; 491 enum tegra_dsi_format format;
436 unsigned long value;
437 const u32 *pkt_seq; 492 const u32 *pkt_seq;
493 u32 value;
438 int err; 494 int err;
439 495
440 if (dsi->enabled)
441 return 0;
442
443 if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) { 496 if (dsi->flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
444 DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n"); 497 DRM_DEBUG_KMS("Non-burst video mode with sync pulses\n");
445 pkt_seq = pkt_seq_video_non_burst_sync_pulses; 498 pkt_seq = pkt_seq_video_non_burst_sync_pulses;
446 } else { 499 } else if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
447 DRM_DEBUG_KMS("Non-burst video mode with sync events\n"); 500 DRM_DEBUG_KMS("Non-burst video mode with sync events\n");
448 pkt_seq = pkt_seq_video_non_burst_sync_events; 501 pkt_seq = pkt_seq_video_non_burst_sync_events;
502 } else {
503 DRM_DEBUG_KMS("Command mode\n");
504 pkt_seq = pkt_seq_command_mode;
449 } 505 }
450 506
451 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); 507 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
@@ -456,61 +512,136 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
456 if (err < 0) 512 if (err < 0)
457 return err; 513 return err;
458 514
459 err = clk_enable(dsi->clk);
460 if (err < 0)
461 return err;
462
463 reset_control_deassert(dsi->rst);
464
465 value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) | 515 value = DSI_CONTROL_CHANNEL(0) | DSI_CONTROL_FORMAT(format) |
466 DSI_CONTROL_LANES(dsi->lanes - 1) | 516 DSI_CONTROL_LANES(dsi->lanes - 1) |
467 DSI_CONTROL_SOURCE(dc->pipe); 517 DSI_CONTROL_SOURCE(pipe);
468 tegra_dsi_writel(dsi, value, DSI_CONTROL); 518 tegra_dsi_writel(dsi, value, DSI_CONTROL);
469 519
470 tegra_dsi_writel(dsi, DSI_VIDEO_FIFO_DEPTH, DSI_MAX_THRESHOLD); 520 tegra_dsi_writel(dsi, dsi->video_fifo_depth, DSI_MAX_THRESHOLD);
471 521
472 value = DSI_HOST_CONTROL_HS | DSI_HOST_CONTROL_CS | 522 value = DSI_HOST_CONTROL_HS;
473 DSI_HOST_CONTROL_ECC;
474 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL); 523 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
475 524
476 value = tegra_dsi_readl(dsi, DSI_CONTROL); 525 value = tegra_dsi_readl(dsi, DSI_CONTROL);
526
477 if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) 527 if (dsi->flags & MIPI_DSI_CLOCK_NON_CONTINUOUS)
478 value |= DSI_CONTROL_HS_CLK_CTRL; 528 value |= DSI_CONTROL_HS_CLK_CTRL;
529
479 value &= ~DSI_CONTROL_TX_TRIG(3); 530 value &= ~DSI_CONTROL_TX_TRIG(3);
480 value &= ~DSI_CONTROL_DCS_ENABLE; 531
532 /* enable DCS commands for command mode */
533 if (dsi->flags & MIPI_DSI_MODE_VIDEO)
534 value &= ~DSI_CONTROL_DCS_ENABLE;
535 else
536 value |= DSI_CONTROL_DCS_ENABLE;
537
481 value |= DSI_CONTROL_VIDEO_ENABLE; 538 value |= DSI_CONTROL_VIDEO_ENABLE;
482 value &= ~DSI_CONTROL_HOST_ENABLE; 539 value &= ~DSI_CONTROL_HOST_ENABLE;
483 tegra_dsi_writel(dsi, value, DSI_CONTROL); 540 tegra_dsi_writel(dsi, value, DSI_CONTROL);
484 541
485 err = tegra_dsi_set_phy_timing(dsi);
486 if (err < 0)
487 return err;
488
489 for (i = 0; i < NUM_PKT_SEQ; i++) 542 for (i = 0; i < NUM_PKT_SEQ; i++)
490 tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i); 543 tegra_dsi_writel(dsi, pkt_seq[i], DSI_PKT_SEQ_0_LO + i);
491 544
492 /* horizontal active pixels */ 545 if (dsi->flags & MIPI_DSI_MODE_VIDEO) {
493 hact = mode->hdisplay * mul / div; 546 /* horizontal active pixels */
547 hact = mode->hdisplay * mul / div;
494 548
495 /* horizontal sync width */ 549 /* horizontal sync width */
496 hsw = (mode->hsync_end - mode->hsync_start) * mul / div; 550 hsw = (mode->hsync_end - mode->hsync_start) * mul / div;
497 hsw -= 10; 551 hsw -= 10;
498 552
499 /* horizontal back porch */ 553 /* horizontal back porch */
500 hbp = (mode->htotal - mode->hsync_end) * mul / div; 554 hbp = (mode->htotal - mode->hsync_end) * mul / div;
501 hbp -= 14; 555 hbp -= 14;
502 556
503 /* horizontal front porch */ 557 /* horizontal front porch */
504 hfp = (mode->hsync_start - mode->hdisplay) * mul / div; 558 hfp = (mode->hsync_start - mode->hdisplay) * mul / div;
505 hfp -= 8; 559 hfp -= 8;
506 560
507 tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1); 561 tegra_dsi_writel(dsi, hsw << 16 | 0, DSI_PKT_LEN_0_1);
508 tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3); 562 tegra_dsi_writel(dsi, hact << 16 | hbp, DSI_PKT_LEN_2_3);
509 tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5); 563 tegra_dsi_writel(dsi, hfp, DSI_PKT_LEN_4_5);
510 tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7); 564 tegra_dsi_writel(dsi, 0x0f0f << 16, DSI_PKT_LEN_6_7);
511 565
512 /* set SOL delay */ 566 /* set SOL delay (for non-burst mode only) */
513 tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY); 567 tegra_dsi_writel(dsi, 8 * mul / div, DSI_SOL_DELAY);
568
569 /* TODO: implement ganged mode */
570 } else {
571 u16 bytes;
572
573 if (dsi->master || dsi->slave) {
574 /*
575 * For ganged mode, assume symmetric left-right mode.
576 */
577 bytes = 1 + (mode->hdisplay / 2) * mul / div;
578 } else {
579 /* 1 byte (DCS command) + pixel data */
580 bytes = 1 + mode->hdisplay * mul / div;
581 }
582
583 tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_0_1);
584 tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_2_3);
585 tegra_dsi_writel(dsi, bytes << 16, DSI_PKT_LEN_4_5);
586 tegra_dsi_writel(dsi, 0, DSI_PKT_LEN_6_7);
587
588 value = MIPI_DCS_WRITE_MEMORY_START << 8 |
589 MIPI_DCS_WRITE_MEMORY_CONTINUE;
590 tegra_dsi_writel(dsi, value, DSI_DCS_CMDS);
591
592 /* set SOL delay */
593 if (dsi->master || dsi->slave) {
594 unsigned int lanes = tegra_dsi_get_lanes(dsi);
595 unsigned long delay, bclk, bclk_ganged;
596
597 /* SOL to valid, valid to FIFO and FIFO write delay */
598 delay = 4 + 4 + 2;
599 delay = DIV_ROUND_UP(delay * mul, div * lanes);
600 /* FIFO read delay */
601 delay = delay + 6;
602
603 bclk = DIV_ROUND_UP(mode->htotal * mul, div * lanes);
604 bclk_ganged = DIV_ROUND_UP(bclk * lanes / 2, lanes);
605 value = bclk - bclk_ganged + delay + 20;
606 } else {
607 /* TODO: revisit for non-ganged mode */
608 value = 8 * mul / div;
609 }
610
611 tegra_dsi_writel(dsi, value, DSI_SOL_DELAY);
612 }
613
614 if (dsi->slave) {
615 err = tegra_dsi_configure(dsi->slave, pipe, mode);
616 if (err < 0)
617 return err;
618
619 /*
620 * TODO: Support modes other than symmetrical left-right
621 * split.
622 */
623 tegra_dsi_ganged_enable(dsi, 0, mode->hdisplay / 2);
624 tegra_dsi_ganged_enable(dsi->slave, mode->hdisplay / 2,
625 mode->hdisplay / 2);
626 }
627
628 return 0;
629}
630
631static int tegra_output_dsi_enable(struct tegra_output *output)
632{
633 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
634 const struct drm_display_mode *mode = &dc->base.mode;
635 struct tegra_dsi *dsi = to_dsi(output);
636 u32 value;
637 int err;
638
639 if (dsi->enabled)
640 return 0;
641
642 err = tegra_dsi_configure(dsi, dc->pipe, mode);
643 if (err < 0)
644 return err;
514 645
515 /* enable display controller */ 646 /* enable display controller */
516 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); 647 value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS);
@@ -531,28 +662,79 @@ static int tegra_output_dsi_enable(struct tegra_output *output)
531 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); 662 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
532 663
533 /* enable DSI controller */ 664 /* enable DSI controller */
534 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL); 665 tegra_dsi_enable(dsi);
535 value |= DSI_POWER_CONTROL_ENABLE;
536 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
537 666
538 dsi->enabled = true; 667 dsi->enabled = true;
539 668
540 return 0; 669 return 0;
541} 670}
542 671
672static int tegra_dsi_wait_idle(struct tegra_dsi *dsi, unsigned long timeout)
673{
674 u32 value;
675
676 timeout = jiffies + msecs_to_jiffies(timeout);
677
678 while (time_before(jiffies, timeout)) {
679 value = tegra_dsi_readl(dsi, DSI_STATUS);
680 if (value & DSI_STATUS_IDLE)
681 return 0;
682
683 usleep_range(1000, 2000);
684 }
685
686 return -ETIMEDOUT;
687}
688
689static void tegra_dsi_video_disable(struct tegra_dsi *dsi)
690{
691 u32 value;
692
693 value = tegra_dsi_readl(dsi, DSI_CONTROL);
694 value &= ~DSI_CONTROL_VIDEO_ENABLE;
695 tegra_dsi_writel(dsi, value, DSI_CONTROL);
696
697 if (dsi->slave)
698 tegra_dsi_video_disable(dsi->slave);
699}
700
701static void tegra_dsi_ganged_disable(struct tegra_dsi *dsi)
702{
703 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_START);
704 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_SIZE);
705 tegra_dsi_writel(dsi, 0, DSI_GANGED_MODE_CONTROL);
706}
707
708static void tegra_dsi_disable(struct tegra_dsi *dsi)
709{
710 u32 value;
711
712 if (dsi->slave) {
713 tegra_dsi_ganged_disable(dsi->slave);
714 tegra_dsi_ganged_disable(dsi);
715 }
716
717 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
718 value &= ~DSI_POWER_CONTROL_ENABLE;
719 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
720
721 if (dsi->slave)
722 tegra_dsi_disable(dsi->slave);
723
724 usleep_range(5000, 10000);
725}
726
543static int tegra_output_dsi_disable(struct tegra_output *output) 727static int tegra_output_dsi_disable(struct tegra_output *output)
544{ 728{
545 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 729 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
546 struct tegra_dsi *dsi = to_dsi(output); 730 struct tegra_dsi *dsi = to_dsi(output);
547 unsigned long value; 731 unsigned long value;
732 int err;
548 733
549 if (!dsi->enabled) 734 if (!dsi->enabled)
550 return 0; 735 return 0;
551 736
552 /* disable DSI controller */ 737 tegra_dsi_video_disable(dsi);
553 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
554 value &= ~DSI_POWER_CONTROL_ENABLE;
555 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
556 738
557 /* 739 /*
558 * The following accesses registers of the display controller, so make 740 * The following accesses registers of the display controller, so make
@@ -576,39 +758,68 @@ static int tegra_output_dsi_disable(struct tegra_output *output)
576 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL); 758 tegra_dc_writel(dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
577 } 759 }
578 760
579 clk_disable(dsi->clk); 761 err = tegra_dsi_wait_idle(dsi, 100);
762 if (err < 0)
763 dev_dbg(dsi->dev, "failed to idle DSI: %d\n", err);
764
765 tegra_dsi_disable(dsi);
580 766
581 dsi->enabled = false; 767 dsi->enabled = false;
582 768
583 return 0; 769 return 0;
584} 770}
585 771
772static void tegra_dsi_set_timeout(struct tegra_dsi *dsi, unsigned long bclk,
773 unsigned int vrefresh)
774{
775 unsigned int timeout;
776 u32 value;
777
778 /* one frame high-speed transmission timeout */
779 timeout = (bclk / vrefresh) / 512;
780 value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout);
781 tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
782
783 /* 2 ms peripheral timeout for panel */
784 timeout = 2 * bclk / 512 * 1000;
785 value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
786 tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
787
788 value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
789 tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
790
791 if (dsi->slave)
792 tegra_dsi_set_timeout(dsi->slave, bclk, vrefresh);
793}
794
586static int tegra_output_dsi_setup_clock(struct tegra_output *output, 795static int tegra_output_dsi_setup_clock(struct tegra_output *output,
587 struct clk *clk, unsigned long pclk, 796 struct clk *clk, unsigned long pclk,
588 unsigned int *divp) 797 unsigned int *divp)
589{ 798{
590 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc); 799 struct tegra_dc *dc = to_tegra_dc(output->encoder.crtc);
591 struct drm_display_mode *mode = &dc->base.mode; 800 struct drm_display_mode *mode = &dc->base.mode;
592 unsigned int timeout, mul, div, vrefresh;
593 struct tegra_dsi *dsi = to_dsi(output); 801 struct tegra_dsi *dsi = to_dsi(output);
594 unsigned long bclk, plld, value; 802 unsigned int mul, div, vrefresh, lanes;
803 unsigned long bclk, plld;
595 int err; 804 int err;
596 805
806 lanes = tegra_dsi_get_lanes(dsi);
807
597 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div); 808 err = tegra_dsi_get_muldiv(dsi->format, &mul, &div);
598 if (err < 0) 809 if (err < 0)
599 return err; 810 return err;
600 811
601 DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, dsi->lanes); 812 DRM_DEBUG_KMS("mul: %u, div: %u, lanes: %u\n", mul, div, lanes);
602 vrefresh = drm_mode_vrefresh(mode); 813 vrefresh = drm_mode_vrefresh(mode);
603 DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh); 814 DRM_DEBUG_KMS("vrefresh: %u\n", vrefresh);
604 815
605 /* compute byte clock */ 816 /* compute byte clock */
606 bclk = (pclk * mul) / (div * dsi->lanes); 817 bclk = (pclk * mul) / (div * lanes);
607 818
608 /* 819 /*
609 * Compute bit clock and round up to the next MHz. 820 * Compute bit clock and round up to the next MHz.
610 */ 821 */
611 plld = DIV_ROUND_UP(bclk * 8, 1000000) * 1000000; 822 plld = DIV_ROUND_UP(bclk * 8, USEC_PER_SEC) * USEC_PER_SEC;
612 823
613 /* 824 /*
614 * We divide the frequency by two here, but we make up for that by 825 * We divide the frequency by two here, but we make up for that by
@@ -640,25 +851,17 @@ static int tegra_output_dsi_setup_clock(struct tegra_output *output,
640 * not working properly otherwise. Perhaps the PLLs cannot generate 851 * not working properly otherwise. Perhaps the PLLs cannot generate
641 * frequencies sufficiently high. 852 * frequencies sufficiently high.
642 */ 853 */
643 *divp = ((8 * mul) / (div * dsi->lanes)) - 2; 854 *divp = ((8 * mul) / (div * lanes)) - 2;
644 855
645 /* 856 /*
646 * XXX: Move the below somewhere else so that we don't need to have 857 * XXX: Move the below somewhere else so that we don't need to have
647 * access to the vrefresh in this function? 858 * access to the vrefresh in this function?
648 */ 859 */
860 tegra_dsi_set_timeout(dsi, bclk, vrefresh);
649 861
650 /* one frame high-speed transmission timeout */ 862 err = tegra_dsi_set_phy_timing(dsi);
651 timeout = (bclk / vrefresh) / 512; 863 if (err < 0)
652 value = DSI_TIMEOUT_LRX(0x2000) | DSI_TIMEOUT_HTX(timeout); 864 return err;
653 tegra_dsi_writel(dsi, value, DSI_TIMEOUT_0);
654
655 /* 2 ms peripheral timeout for panel */
656 timeout = 2 * bclk / 512 * 1000;
657 value = DSI_TIMEOUT_PR(timeout) | DSI_TIMEOUT_TA(0x2000);
658 tegra_dsi_writel(dsi, value, DSI_TIMEOUT_1);
659
660 value = DSI_TALLY_TA(0) | DSI_TALLY_LRX(0) | DSI_TALLY_HTX(0);
661 tegra_dsi_writel(dsi, value, DSI_TO_TALLY);
662 865
663 return 0; 866 return 0;
664} 867}
@@ -695,7 +898,7 @@ static int tegra_dsi_pad_enable(struct tegra_dsi *dsi)
695 898
696static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi) 899static int tegra_dsi_pad_calibrate(struct tegra_dsi *dsi)
697{ 900{
698 unsigned long value; 901 u32 value;
699 902
700 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0); 903 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_0);
701 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1); 904 tegra_dsi_writel(dsi, 0, DSI_PAD_CONTROL_1);
@@ -720,14 +923,17 @@ static int tegra_dsi_init(struct host1x_client *client)
720 struct tegra_dsi *dsi = host1x_client_to_dsi(client); 923 struct tegra_dsi *dsi = host1x_client_to_dsi(client);
721 int err; 924 int err;
722 925
723 dsi->output.type = TEGRA_OUTPUT_DSI; 926 /* Gangsters must not register their own outputs. */
724 dsi->output.dev = client->dev; 927 if (!dsi->master) {
725 dsi->output.ops = &dsi_ops; 928 dsi->output.type = TEGRA_OUTPUT_DSI;
726 929 dsi->output.dev = client->dev;
727 err = tegra_output_init(drm, &dsi->output); 930 dsi->output.ops = &dsi_ops;
728 if (err < 0) { 931
729 dev_err(client->dev, "output setup failed: %d\n", err); 932 err = tegra_output_init(drm, &dsi->output);
730 return err; 933 if (err < 0) {
934 dev_err(client->dev, "output setup failed: %d\n", err);
935 return err;
936 }
731 } 937 }
732 938
733 if (IS_ENABLED(CONFIG_DEBUG_FS)) { 939 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
@@ -736,12 +942,6 @@ static int tegra_dsi_init(struct host1x_client *client)
736 dev_err(dsi->dev, "debugfs setup failed: %d\n", err); 942 dev_err(dsi->dev, "debugfs setup failed: %d\n", err);
737 } 943 }
738 944
739 err = tegra_dsi_pad_calibrate(dsi);
740 if (err < 0) {
741 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
742 return err;
743 }
744
745 return 0; 945 return 0;
746} 946}
747 947
@@ -756,16 +956,20 @@ static int tegra_dsi_exit(struct host1x_client *client)
756 dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err); 956 dev_err(dsi->dev, "debugfs cleanup failed: %d\n", err);
757 } 957 }
758 958
759 err = tegra_output_disable(&dsi->output); 959 if (!dsi->master) {
760 if (err < 0) { 960 err = tegra_output_disable(&dsi->output);
761 dev_err(client->dev, "output failed to disable: %d\n", err); 961 if (err < 0) {
762 return err; 962 dev_err(client->dev, "output failed to disable: %d\n",
763 } 963 err);
764 964 return err;
765 err = tegra_output_exit(&dsi->output); 965 }
766 if (err < 0) { 966
767 dev_err(client->dev, "output cleanup failed: %d\n", err); 967 err = tegra_output_exit(&dsi->output);
768 return err; 968 if (err < 0) {
969 dev_err(client->dev, "output cleanup failed: %d\n",
970 err);
971 return err;
972 }
769 } 973 }
770 974
771 return 0; 975 return 0;
@@ -792,20 +996,324 @@ static int tegra_dsi_setup_clocks(struct tegra_dsi *dsi)
792 return 0; 996 return 0;
793} 997}
794 998
999static const char * const error_report[16] = {
1000 "SoT Error",
1001 "SoT Sync Error",
1002 "EoT Sync Error",
1003 "Escape Mode Entry Command Error",
1004 "Low-Power Transmit Sync Error",
1005 "Peripheral Timeout Error",
1006 "False Control Error",
1007 "Contention Detected",
1008 "ECC Error, single-bit",
1009 "ECC Error, multi-bit",
1010 "Checksum Error",
1011 "DSI Data Type Not Recognized",
1012 "DSI VC ID Invalid",
1013 "Invalid Transmission Length",
1014 "Reserved",
1015 "DSI Protocol Violation",
1016};
1017
1018static ssize_t tegra_dsi_read_response(struct tegra_dsi *dsi,
1019 const struct mipi_dsi_msg *msg,
1020 size_t count)
1021{
1022 u8 *rx = msg->rx_buf;
1023 unsigned int i, j, k;
1024 size_t size = 0;
1025 u16 errors;
1026 u32 value;
1027
1028 /* read and parse packet header */
1029 value = tegra_dsi_readl(dsi, DSI_RD_DATA);
1030
1031 switch (value & 0x3f) {
1032 case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
1033 errors = (value >> 8) & 0xffff;
1034 dev_dbg(dsi->dev, "Acknowledge and error report: %04x\n",
1035 errors);
1036 for (i = 0; i < ARRAY_SIZE(error_report); i++)
1037 if (errors & BIT(i))
1038 dev_dbg(dsi->dev, " %2u: %s\n", i,
1039 error_report[i]);
1040 break;
1041
1042 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
1043 rx[0] = (value >> 8) & 0xff;
1044 size = 1;
1045 break;
1046
1047 case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_2BYTE:
1048 rx[0] = (value >> 8) & 0xff;
1049 rx[1] = (value >> 16) & 0xff;
1050 size = 2;
1051 break;
1052
1053 case MIPI_DSI_RX_DCS_LONG_READ_RESPONSE:
1054 size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
1055 break;
1056
1057 case MIPI_DSI_RX_GENERIC_LONG_READ_RESPONSE:
1058 size = ((value >> 8) & 0xff00) | ((value >> 8) & 0xff);
1059 break;
1060
1061 default:
1062 dev_err(dsi->dev, "unhandled response type: %02x\n",
1063 value & 0x3f);
1064 return -EPROTO;
1065 }
1066
1067 size = min(size, msg->rx_len);
1068
1069 if (msg->rx_buf && size > 0) {
1070 for (i = 0, j = 0; i < count - 1; i++, j += 4) {
1071 u8 *rx = msg->rx_buf + j;
1072
1073 value = tegra_dsi_readl(dsi, DSI_RD_DATA);
1074
1075 for (k = 0; k < 4 && (j + k) < msg->rx_len; k++)
1076 rx[j + k] = (value >> (k << 3)) & 0xff;
1077 }
1078 }
1079
1080 return size;
1081}
1082
1083static int tegra_dsi_transmit(struct tegra_dsi *dsi, unsigned long timeout)
1084{
1085 tegra_dsi_writel(dsi, DSI_TRIGGER_HOST, DSI_TRIGGER);
1086
1087 timeout = jiffies + msecs_to_jiffies(timeout);
1088
1089 while (time_before(jiffies, timeout)) {
1090 u32 value = tegra_dsi_readl(dsi, DSI_TRIGGER);
1091 if ((value & DSI_TRIGGER_HOST) == 0)
1092 return 0;
1093
1094 usleep_range(1000, 2000);
1095 }
1096
1097 DRM_DEBUG_KMS("timeout waiting for transmission to complete\n");
1098 return -ETIMEDOUT;
1099}
1100
1101static int tegra_dsi_wait_for_response(struct tegra_dsi *dsi,
1102 unsigned long timeout)
1103{
1104 timeout = jiffies + msecs_to_jiffies(250);
1105
1106 while (time_before(jiffies, timeout)) {
1107 u32 value = tegra_dsi_readl(dsi, DSI_STATUS);
1108 u8 count = value & 0x1f;
1109
1110 if (count > 0)
1111 return count;
1112
1113 usleep_range(1000, 2000);
1114 }
1115
1116 DRM_DEBUG_KMS("peripheral returned no data\n");
1117 return -ETIMEDOUT;
1118}
1119
1120static void tegra_dsi_writesl(struct tegra_dsi *dsi, unsigned long offset,
1121 const void *buffer, size_t size)
1122{
1123 const u8 *buf = buffer;
1124 size_t i, j;
1125 u32 value;
1126
1127 for (j = 0; j < size; j += 4) {
1128 value = 0;
1129
1130 for (i = 0; i < 4 && j + i < size; i++)
1131 value |= buf[j + i] << (i << 3);
1132
1133 tegra_dsi_writel(dsi, value, DSI_WR_DATA);
1134 }
1135}
1136
1137static ssize_t tegra_dsi_host_transfer(struct mipi_dsi_host *host,
1138 const struct mipi_dsi_msg *msg)
1139{
1140 struct tegra_dsi *dsi = host_to_tegra(host);
1141 struct mipi_dsi_packet packet;
1142 const u8 *header;
1143 size_t count;
1144 ssize_t err;
1145 u32 value;
1146
1147 err = mipi_dsi_create_packet(&packet, msg);
1148 if (err < 0)
1149 return err;
1150
1151 header = packet.header;
1152
1153 /* maximum FIFO depth is 1920 words */
1154 if (packet.size > dsi->video_fifo_depth * 4)
1155 return -ENOSPC;
1156
1157 /* reset underflow/overflow flags */
1158 value = tegra_dsi_readl(dsi, DSI_STATUS);
1159 if (value & (DSI_STATUS_UNDERFLOW | DSI_STATUS_OVERFLOW)) {
1160 value = DSI_HOST_CONTROL_FIFO_RESET;
1161 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
1162 usleep_range(10, 20);
1163 }
1164
1165 value = tegra_dsi_readl(dsi, DSI_POWER_CONTROL);
1166 value |= DSI_POWER_CONTROL_ENABLE;
1167 tegra_dsi_writel(dsi, value, DSI_POWER_CONTROL);
1168
1169 usleep_range(5000, 10000);
1170
1171 value = DSI_HOST_CONTROL_CRC_RESET | DSI_HOST_CONTROL_TX_TRIG_HOST |
1172 DSI_HOST_CONTROL_CS | DSI_HOST_CONTROL_ECC;
1173
1174 if ((msg->flags & MIPI_DSI_MSG_USE_LPM) == 0)
1175 value |= DSI_HOST_CONTROL_HS;
1176
1177 /*
1178 * The host FIFO has a maximum of 64 words, so larger transmissions
1179 * need to use the video FIFO.
1180 */
1181 if (packet.size > dsi->host_fifo_depth * 4)
1182 value |= DSI_HOST_CONTROL_FIFO_SEL;
1183
1184 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
1185
1186 /*
1187 * For reads and messages with explicitly requested ACK, generate a
1188 * BTA sequence after the transmission of the packet.
1189 */
1190 if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
1191 (msg->rx_buf && msg->rx_len > 0)) {
1192 value = tegra_dsi_readl(dsi, DSI_HOST_CONTROL);
1193 value |= DSI_HOST_CONTROL_PKT_BTA;
1194 tegra_dsi_writel(dsi, value, DSI_HOST_CONTROL);
1195 }
1196
1197 value = DSI_CONTROL_LANES(0) | DSI_CONTROL_HOST_ENABLE;
1198 tegra_dsi_writel(dsi, value, DSI_CONTROL);
1199
1200 /* write packet header, ECC is generated by hardware */
1201 value = header[2] << 16 | header[1] << 8 | header[0];
1202 tegra_dsi_writel(dsi, value, DSI_WR_DATA);
1203
1204 /* write payload (if any) */
1205 if (packet.payload_length > 0)
1206 tegra_dsi_writesl(dsi, DSI_WR_DATA, packet.payload,
1207 packet.payload_length);
1208
1209 err = tegra_dsi_transmit(dsi, 250);
1210 if (err < 0)
1211 return err;
1212
1213 if ((msg->flags & MIPI_DSI_MSG_REQ_ACK) ||
1214 (msg->rx_buf && msg->rx_len > 0)) {
1215 err = tegra_dsi_wait_for_response(dsi, 250);
1216 if (err < 0)
1217 return err;
1218
1219 count = err;
1220
1221 value = tegra_dsi_readl(dsi, DSI_RD_DATA);
1222 switch (value) {
1223 case 0x84:
1224 /*
1225 dev_dbg(dsi->dev, "ACK\n");
1226 */
1227 break;
1228
1229 case 0x87:
1230 /*
1231 dev_dbg(dsi->dev, "ESCAPE\n");
1232 */
1233 break;
1234
1235 default:
1236 dev_err(dsi->dev, "unknown status: %08x\n", value);
1237 break;
1238 }
1239
1240 if (count > 1) {
1241 err = tegra_dsi_read_response(dsi, msg, count);
1242 if (err < 0)
1243 dev_err(dsi->dev,
1244 "failed to parse response: %zd\n",
1245 err);
1246 else {
1247 /*
1248 * For read commands, return the number of
1249 * bytes returned by the peripheral.
1250 */
1251 count = err;
1252 }
1253 }
1254 } else {
1255 /*
1256 * For write commands, we have transmitted the 4-byte header
1257 * plus the variable-length payload.
1258 */
1259 count = 4 + packet.payload_length;
1260 }
1261
1262 return count;
1263}
1264
1265static int tegra_dsi_ganged_setup(struct tegra_dsi *dsi)
1266{
1267 struct clk *parent;
1268 int err;
1269
1270 /* make sure both DSI controllers share the same PLL */
1271 parent = clk_get_parent(dsi->slave->clk);
1272 if (!parent)
1273 return -EINVAL;
1274
1275 err = clk_set_parent(parent, dsi->clk_parent);
1276 if (err < 0)
1277 return err;
1278
1279 return 0;
1280}
1281
795static int tegra_dsi_host_attach(struct mipi_dsi_host *host, 1282static int tegra_dsi_host_attach(struct mipi_dsi_host *host,
796 struct mipi_dsi_device *device) 1283 struct mipi_dsi_device *device)
797{ 1284{
798 struct tegra_dsi *dsi = host_to_tegra(host); 1285 struct tegra_dsi *dsi = host_to_tegra(host);
799 struct tegra_output *output = &dsi->output;
800 1286
801 dsi->flags = device->mode_flags; 1287 dsi->flags = device->mode_flags;
802 dsi->format = device->format; 1288 dsi->format = device->format;
803 dsi->lanes = device->lanes; 1289 dsi->lanes = device->lanes;
804 1290
805 output->panel = of_drm_find_panel(device->dev.of_node); 1291 if (dsi->slave) {
806 if (output->panel) { 1292 int err;
807 if (output->connector.dev) 1293
1294 dev_dbg(dsi->dev, "attaching dual-channel device %s\n",
1295 dev_name(&device->dev));
1296
1297 err = tegra_dsi_ganged_setup(dsi);
1298 if (err < 0) {
1299 dev_err(dsi->dev, "failed to set up ganged mode: %d\n",
1300 err);
1301 return err;
1302 }
1303 }
1304
1305 /*
1306 * Slaves don't have a panel associated with them, so they provide
1307 * merely the second channel.
1308 */
1309 if (!dsi->master) {
1310 struct tegra_output *output = &dsi->output;
1311
1312 output->panel = of_drm_find_panel(device->dev.of_node);
1313 if (output->panel && output->connector.dev) {
1314 drm_panel_attach(output->panel, &output->connector);
808 drm_helper_hpd_irq_event(output->connector.dev); 1315 drm_helper_hpd_irq_event(output->connector.dev);
1316 }
809 } 1317 }
810 1318
811 return 0; 1319 return 0;
@@ -818,10 +1326,10 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
818 struct tegra_output *output = &dsi->output; 1326 struct tegra_output *output = &dsi->output;
819 1327
820 if (output->panel && &device->dev == output->panel->dev) { 1328 if (output->panel && &device->dev == output->panel->dev) {
1329 output->panel = NULL;
1330
821 if (output->connector.dev) 1331 if (output->connector.dev)
822 drm_helper_hpd_irq_event(output->connector.dev); 1332 drm_helper_hpd_irq_event(output->connector.dev);
823
824 output->panel = NULL;
825 } 1333 }
826 1334
827 return 0; 1335 return 0;
@@ -830,8 +1338,29 @@ static int tegra_dsi_host_detach(struct mipi_dsi_host *host,
830static const struct mipi_dsi_host_ops tegra_dsi_host_ops = { 1338static const struct mipi_dsi_host_ops tegra_dsi_host_ops = {
831 .attach = tegra_dsi_host_attach, 1339 .attach = tegra_dsi_host_attach,
832 .detach = tegra_dsi_host_detach, 1340 .detach = tegra_dsi_host_detach,
1341 .transfer = tegra_dsi_host_transfer,
833}; 1342};
834 1343
1344static int tegra_dsi_ganged_probe(struct tegra_dsi *dsi)
1345{
1346 struct device_node *np;
1347
1348 np = of_parse_phandle(dsi->dev->of_node, "nvidia,ganged-mode", 0);
1349 if (np) {
1350 struct platform_device *gangster = of_find_device_by_node(np);
1351
1352 dsi->slave = platform_get_drvdata(gangster);
1353 of_node_put(np);
1354
1355 if (!dsi->slave)
1356 return -EPROBE_DEFER;
1357
1358 dsi->slave->master = dsi;
1359 }
1360
1361 return 0;
1362}
1363
835static int tegra_dsi_probe(struct platform_device *pdev) 1364static int tegra_dsi_probe(struct platform_device *pdev)
836{ 1365{
837 struct tegra_dsi *dsi; 1366 struct tegra_dsi *dsi;
@@ -843,11 +1372,19 @@ static int tegra_dsi_probe(struct platform_device *pdev)
843 return -ENOMEM; 1372 return -ENOMEM;
844 1373
845 dsi->output.dev = dsi->dev = &pdev->dev; 1374 dsi->output.dev = dsi->dev = &pdev->dev;
1375 dsi->video_fifo_depth = 1920;
1376 dsi->host_fifo_depth = 64;
1377
1378 err = tegra_dsi_ganged_probe(dsi);
1379 if (err < 0)
1380 return err;
846 1381
847 err = tegra_output_probe(&dsi->output); 1382 err = tegra_output_probe(&dsi->output);
848 if (err < 0) 1383 if (err < 0)
849 return err; 1384 return err;
850 1385
1386 dsi->output.connector.polled = DRM_CONNECTOR_POLL_HPD;
1387
851 /* 1388 /*
852 * Assume these values by default. When a DSI peripheral driver 1389 * Assume these values by default. When a DSI peripheral driver
853 * attaches to the DSI host, the parameters will be taken from 1390 * attaches to the DSI host, the parameters will be taken from
@@ -861,68 +1398,83 @@ static int tegra_dsi_probe(struct platform_device *pdev)
861 if (IS_ERR(dsi->rst)) 1398 if (IS_ERR(dsi->rst))
862 return PTR_ERR(dsi->rst); 1399 return PTR_ERR(dsi->rst);
863 1400
1401 err = reset_control_deassert(dsi->rst);
1402 if (err < 0) {
1403 dev_err(&pdev->dev, "failed to bring DSI out of reset: %d\n",
1404 err);
1405 return err;
1406 }
1407
864 dsi->clk = devm_clk_get(&pdev->dev, NULL); 1408 dsi->clk = devm_clk_get(&pdev->dev, NULL);
865 if (IS_ERR(dsi->clk)) { 1409 if (IS_ERR(dsi->clk)) {
866 dev_err(&pdev->dev, "cannot get DSI clock\n"); 1410 dev_err(&pdev->dev, "cannot get DSI clock\n");
867 return PTR_ERR(dsi->clk); 1411 err = PTR_ERR(dsi->clk);
1412 goto reset;
868 } 1413 }
869 1414
870 err = clk_prepare_enable(dsi->clk); 1415 err = clk_prepare_enable(dsi->clk);
871 if (err < 0) { 1416 if (err < 0) {
872 dev_err(&pdev->dev, "cannot enable DSI clock\n"); 1417 dev_err(&pdev->dev, "cannot enable DSI clock\n");
873 return err; 1418 goto reset;
874 } 1419 }
875 1420
876 dsi->clk_lp = devm_clk_get(&pdev->dev, "lp"); 1421 dsi->clk_lp = devm_clk_get(&pdev->dev, "lp");
877 if (IS_ERR(dsi->clk_lp)) { 1422 if (IS_ERR(dsi->clk_lp)) {
878 dev_err(&pdev->dev, "cannot get low-power clock\n"); 1423 dev_err(&pdev->dev, "cannot get low-power clock\n");
879 return PTR_ERR(dsi->clk_lp); 1424 err = PTR_ERR(dsi->clk_lp);
1425 goto disable_clk;
880 } 1426 }
881 1427
882 err = clk_prepare_enable(dsi->clk_lp); 1428 err = clk_prepare_enable(dsi->clk_lp);
883 if (err < 0) { 1429 if (err < 0) {
884 dev_err(&pdev->dev, "cannot enable low-power clock\n"); 1430 dev_err(&pdev->dev, "cannot enable low-power clock\n");
885 return err; 1431 goto disable_clk;
886 } 1432 }
887 1433
888 dsi->clk_parent = devm_clk_get(&pdev->dev, "parent"); 1434 dsi->clk_parent = devm_clk_get(&pdev->dev, "parent");
889 if (IS_ERR(dsi->clk_parent)) { 1435 if (IS_ERR(dsi->clk_parent)) {
890 dev_err(&pdev->dev, "cannot get parent clock\n"); 1436 dev_err(&pdev->dev, "cannot get parent clock\n");
891 return PTR_ERR(dsi->clk_parent); 1437 err = PTR_ERR(dsi->clk_parent);
892 } 1438 goto disable_clk_lp;
893
894 err = clk_prepare_enable(dsi->clk_parent);
895 if (err < 0) {
896 dev_err(&pdev->dev, "cannot enable parent clock\n");
897 return err;
898 } 1439 }
899 1440
900 dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi"); 1441 dsi->vdd = devm_regulator_get(&pdev->dev, "avdd-dsi-csi");
901 if (IS_ERR(dsi->vdd)) { 1442 if (IS_ERR(dsi->vdd)) {
902 dev_err(&pdev->dev, "cannot get VDD supply\n"); 1443 dev_err(&pdev->dev, "cannot get VDD supply\n");
903 return PTR_ERR(dsi->vdd); 1444 err = PTR_ERR(dsi->vdd);
1445 goto disable_clk_lp;
904 } 1446 }
905 1447
906 err = regulator_enable(dsi->vdd); 1448 err = regulator_enable(dsi->vdd);
907 if (err < 0) { 1449 if (err < 0) {
908 dev_err(&pdev->dev, "cannot enable VDD supply\n"); 1450 dev_err(&pdev->dev, "cannot enable VDD supply\n");
909 return err; 1451 goto disable_clk_lp;
910 } 1452 }
911 1453
912 err = tegra_dsi_setup_clocks(dsi); 1454 err = tegra_dsi_setup_clocks(dsi);
913 if (err < 0) { 1455 if (err < 0) {
914 dev_err(&pdev->dev, "cannot setup clocks\n"); 1456 dev_err(&pdev->dev, "cannot setup clocks\n");
915 return err; 1457 goto disable_vdd;
916 } 1458 }
917 1459
918 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1460 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
919 dsi->regs = devm_ioremap_resource(&pdev->dev, regs); 1461 dsi->regs = devm_ioremap_resource(&pdev->dev, regs);
920 if (IS_ERR(dsi->regs)) 1462 if (IS_ERR(dsi->regs)) {
921 return PTR_ERR(dsi->regs); 1463 err = PTR_ERR(dsi->regs);
1464 goto disable_vdd;
1465 }
922 1466
923 dsi->mipi = tegra_mipi_request(&pdev->dev); 1467 dsi->mipi = tegra_mipi_request(&pdev->dev);
924 if (IS_ERR(dsi->mipi)) 1468 if (IS_ERR(dsi->mipi)) {
925 return PTR_ERR(dsi->mipi); 1469 err = PTR_ERR(dsi->mipi);
1470 goto disable_vdd;
1471 }
1472
1473 err = tegra_dsi_pad_calibrate(dsi);
1474 if (err < 0) {
1475 dev_err(dsi->dev, "MIPI calibration failed: %d\n", err);
1476 goto mipi_free;
1477 }
926 1478
927 dsi->host.ops = &tegra_dsi_host_ops; 1479 dsi->host.ops = &tegra_dsi_host_ops;
928 dsi->host.dev = &pdev->dev; 1480 dsi->host.dev = &pdev->dev;
@@ -930,7 +1482,7 @@ static int tegra_dsi_probe(struct platform_device *pdev)
930 err = mipi_dsi_host_register(&dsi->host); 1482 err = mipi_dsi_host_register(&dsi->host);
931 if (err < 0) { 1483 if (err < 0) {
932 dev_err(&pdev->dev, "failed to register DSI host: %d\n", err); 1484 dev_err(&pdev->dev, "failed to register DSI host: %d\n", err);
933 return err; 1485 goto mipi_free;
934 } 1486 }
935 1487
936 INIT_LIST_HEAD(&dsi->client.list); 1488 INIT_LIST_HEAD(&dsi->client.list);
@@ -941,12 +1493,26 @@ static int tegra_dsi_probe(struct platform_device *pdev)
941 if (err < 0) { 1493 if (err < 0) {
942 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 1494 dev_err(&pdev->dev, "failed to register host1x client: %d\n",
943 err); 1495 err);
944 return err; 1496 goto unregister;
945 } 1497 }
946 1498
947 platform_set_drvdata(pdev, dsi); 1499 platform_set_drvdata(pdev, dsi);
948 1500
949 return 0; 1501 return 0;
1502
1503unregister:
1504 mipi_dsi_host_unregister(&dsi->host);
1505mipi_free:
1506 tegra_mipi_free(dsi->mipi);
1507disable_vdd:
1508 regulator_disable(dsi->vdd);
1509disable_clk_lp:
1510 clk_disable_unprepare(dsi->clk_lp);
1511disable_clk:
1512 clk_disable_unprepare(dsi->clk);
1513reset:
1514 reset_control_assert(dsi->rst);
1515 return err;
950} 1516}
951 1517
952static int tegra_dsi_remove(struct platform_device *pdev) 1518static int tegra_dsi_remove(struct platform_device *pdev)
@@ -965,7 +1531,6 @@ static int tegra_dsi_remove(struct platform_device *pdev)
965 tegra_mipi_free(dsi->mipi); 1531 tegra_mipi_free(dsi->mipi);
966 1532
967 regulator_disable(dsi->vdd); 1533 regulator_disable(dsi->vdd);
968 clk_disable_unprepare(dsi->clk_parent);
969 clk_disable_unprepare(dsi->clk_lp); 1534 clk_disable_unprepare(dsi->clk_lp);
970 clk_disable_unprepare(dsi->clk); 1535 clk_disable_unprepare(dsi->clk);
971 reset_control_assert(dsi->rst); 1536 reset_control_assert(dsi->rst);
diff --git a/drivers/gpu/drm/tegra/dsi.h b/drivers/gpu/drm/tegra/dsi.h
index 5ce610d08d77..bad1006a5150 100644
--- a/drivers/gpu/drm/tegra/dsi.h
+++ b/drivers/gpu/drm/tegra/dsi.h
@@ -21,9 +21,16 @@
21#define DSI_INT_STATUS 0x0d 21#define DSI_INT_STATUS 0x0d
22#define DSI_INT_MASK 0x0e 22#define DSI_INT_MASK 0x0e
23#define DSI_HOST_CONTROL 0x0f 23#define DSI_HOST_CONTROL 0x0f
24#define DSI_HOST_CONTROL_FIFO_RESET (1 << 21)
25#define DSI_HOST_CONTROL_CRC_RESET (1 << 20)
26#define DSI_HOST_CONTROL_TX_TRIG_SOL (0 << 12)
27#define DSI_HOST_CONTROL_TX_TRIG_FIFO (1 << 12)
28#define DSI_HOST_CONTROL_TX_TRIG_HOST (2 << 12)
24#define DSI_HOST_CONTROL_RAW (1 << 6) 29#define DSI_HOST_CONTROL_RAW (1 << 6)
25#define DSI_HOST_CONTROL_HS (1 << 5) 30#define DSI_HOST_CONTROL_HS (1 << 5)
26#define DSI_HOST_CONTROL_BTA (1 << 2) 31#define DSI_HOST_CONTROL_FIFO_SEL (1 << 4)
32#define DSI_HOST_CONTROL_IMM_BTA (1 << 3)
33#define DSI_HOST_CONTROL_PKT_BTA (1 << 2)
27#define DSI_HOST_CONTROL_CS (1 << 1) 34#define DSI_HOST_CONTROL_CS (1 << 1)
28#define DSI_HOST_CONTROL_ECC (1 << 0) 35#define DSI_HOST_CONTROL_ECC (1 << 0)
29#define DSI_CONTROL 0x10 36#define DSI_CONTROL 0x10
@@ -39,9 +46,13 @@
39#define DSI_SOL_DELAY 0x11 46#define DSI_SOL_DELAY 0x11
40#define DSI_MAX_THRESHOLD 0x12 47#define DSI_MAX_THRESHOLD 0x12
41#define DSI_TRIGGER 0x13 48#define DSI_TRIGGER 0x13
49#define DSI_TRIGGER_HOST (1 << 1)
50#define DSI_TRIGGER_VIDEO (1 << 0)
42#define DSI_TX_CRC 0x14 51#define DSI_TX_CRC 0x14
43#define DSI_STATUS 0x15 52#define DSI_STATUS 0x15
44#define DSI_STATUS_IDLE (1 << 10) 53#define DSI_STATUS_IDLE (1 << 10)
54#define DSI_STATUS_UNDERFLOW (1 << 9)
55#define DSI_STATUS_OVERFLOW (1 << 8)
45#define DSI_INIT_SEQ_CONTROL 0x1a 56#define DSI_INIT_SEQ_CONTROL 0x1a
46#define DSI_INIT_SEQ_DATA_0 0x1b 57#define DSI_INIT_SEQ_DATA_0 0x1b
47#define DSI_INIT_SEQ_DATA_1 0x1c 58#define DSI_INIT_SEQ_DATA_1 0x1c
@@ -104,6 +115,7 @@
104#define DSI_PAD_CONTROL_3 0x51 115#define DSI_PAD_CONTROL_3 0x51
105#define DSI_PAD_CONTROL_4 0x52 116#define DSI_PAD_CONTROL_4 0x52
106#define DSI_GANGED_MODE_CONTROL 0x53 117#define DSI_GANGED_MODE_CONTROL 0x53
118#define DSI_GANGED_MODE_CONTROL_ENABLE (1 << 0)
107#define DSI_GANGED_MODE_START 0x54 119#define DSI_GANGED_MODE_START 0x54
108#define DSI_GANGED_MODE_SIZE 0x55 120#define DSI_GANGED_MODE_SIZE 0x55
109#define DSI_RAW_DATA_BYTE_COUNT 0x56 121#define DSI_RAW_DATA_BYTE_COUNT 0x56
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 3513d12d5aa1..e9c715d89261 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -65,8 +65,12 @@ static void tegra_fb_destroy(struct drm_framebuffer *framebuffer)
65 for (i = 0; i < fb->num_planes; i++) { 65 for (i = 0; i < fb->num_planes; i++) {
66 struct tegra_bo *bo = fb->planes[i]; 66 struct tegra_bo *bo = fb->planes[i];
67 67
68 if (bo) 68 if (bo) {
69 if (bo->pages && bo->vaddr)
70 vunmap(bo->vaddr);
71
69 drm_gem_object_unreference_unlocked(&bo->gem); 72 drm_gem_object_unreference_unlocked(&bo->gem);
73 }
70 } 74 }
71 75
72 drm_framebuffer_cleanup(framebuffer); 76 drm_framebuffer_cleanup(framebuffer);
@@ -223,14 +227,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
223 info = framebuffer_alloc(0, drm->dev); 227 info = framebuffer_alloc(0, drm->dev);
224 if (!info) { 228 if (!info) {
225 dev_err(drm->dev, "failed to allocate framebuffer info\n"); 229 dev_err(drm->dev, "failed to allocate framebuffer info\n");
226 tegra_bo_free_object(&bo->gem); 230 drm_gem_object_unreference_unlocked(&bo->gem);
227 return -ENOMEM; 231 return -ENOMEM;
228 } 232 }
229 233
230 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1); 234 fbdev->fb = tegra_fb_alloc(drm, &cmd, &bo, 1);
231 if (IS_ERR(fbdev->fb)) { 235 if (IS_ERR(fbdev->fb)) {
232 dev_err(drm->dev, "failed to allocate DRM framebuffer\n");
233 err = PTR_ERR(fbdev->fb); 236 err = PTR_ERR(fbdev->fb);
237 dev_err(drm->dev, "failed to allocate DRM framebuffer: %d\n",
238 err);
239 drm_gem_object_unreference_unlocked(&bo->gem);
234 goto release; 240 goto release;
235 } 241 }
236 242
@@ -254,6 +260,16 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper,
254 offset = info->var.xoffset * bytes_per_pixel + 260 offset = info->var.xoffset * bytes_per_pixel +
255 info->var.yoffset * fb->pitches[0]; 261 info->var.yoffset * fb->pitches[0];
256 262
263 if (bo->pages) {
264 bo->vaddr = vmap(bo->pages, bo->num_pages, VM_MAP,
265 pgprot_writecombine(PAGE_KERNEL));
266 if (!bo->vaddr) {
267 dev_err(drm->dev, "failed to vmap() framebuffer\n");
268 err = -ENOMEM;
269 goto destroy;
270 }
271 }
272
257 drm->mode_config.fb_base = (resource_size_t)bo->paddr; 273 drm->mode_config.fb_base = (resource_size_t)bo->paddr;
258 info->screen_base = (void __iomem *)bo->vaddr + offset; 274 info->screen_base = (void __iomem *)bo->vaddr + offset;
259 info->screen_size = size; 275 info->screen_size = size;
@@ -289,6 +305,11 @@ static struct tegra_fbdev *tegra_fbdev_create(struct drm_device *drm)
289 return fbdev; 305 return fbdev;
290} 306}
291 307
308static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
309{
310 kfree(fbdev);
311}
312
292static int tegra_fbdev_init(struct tegra_fbdev *fbdev, 313static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
293 unsigned int preferred_bpp, 314 unsigned int preferred_bpp,
294 unsigned int num_crtc, 315 unsigned int num_crtc,
@@ -299,19 +320,21 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
299 320
300 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors); 321 err = drm_fb_helper_init(drm, &fbdev->base, num_crtc, max_connectors);
301 if (err < 0) { 322 if (err < 0) {
302 dev_err(drm->dev, "failed to initialize DRM FB helper\n"); 323 dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
324 err);
303 return err; 325 return err;
304 } 326 }
305 327
306 err = drm_fb_helper_single_add_all_connectors(&fbdev->base); 328 err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
307 if (err < 0) { 329 if (err < 0) {
308 dev_err(drm->dev, "failed to add connectors\n"); 330 dev_err(drm->dev, "failed to add connectors: %d\n", err);
309 goto fini; 331 goto fini;
310 } 332 }
311 333
312 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp); 334 err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
313 if (err < 0) { 335 if (err < 0) {
314 dev_err(drm->dev, "failed to set initial configuration\n"); 336 dev_err(drm->dev, "failed to set initial configuration: %d\n",
337 err);
315 goto fini; 338 goto fini;
316 } 339 }
317 340
@@ -322,7 +345,7 @@ fini:
322 return err; 345 return err;
323} 346}
324 347
325static void tegra_fbdev_free(struct tegra_fbdev *fbdev) 348static void tegra_fbdev_exit(struct tegra_fbdev *fbdev)
326{ 349{
327 struct fb_info *info = fbdev->base.fbdev; 350 struct fb_info *info = fbdev->base.fbdev;
328 351
@@ -341,11 +364,11 @@ static void tegra_fbdev_free(struct tegra_fbdev *fbdev)
341 364
342 if (fbdev->fb) { 365 if (fbdev->fb) {
343 drm_framebuffer_unregister_private(&fbdev->fb->base); 366 drm_framebuffer_unregister_private(&fbdev->fb->base);
344 tegra_fb_destroy(&fbdev->fb->base); 367 drm_framebuffer_remove(&fbdev->fb->base);
345 } 368 }
346 369
347 drm_fb_helper_fini(&fbdev->base); 370 drm_fb_helper_fini(&fbdev->base);
348 kfree(fbdev); 371 tegra_fbdev_free(fbdev);
349} 372}
350 373
351void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev) 374void tegra_fbdev_restore_mode(struct tegra_fbdev *fbdev)
@@ -393,6 +416,15 @@ int tegra_drm_fb_prepare(struct drm_device *drm)
393 return 0; 416 return 0;
394} 417}
395 418
419void tegra_drm_fb_free(struct drm_device *drm)
420{
421#ifdef CONFIG_DRM_TEGRA_FBDEV
422 struct tegra_drm *tegra = drm->dev_private;
423
424 tegra_fbdev_free(tegra->fbdev);
425#endif
426}
427
396int tegra_drm_fb_init(struct drm_device *drm) 428int tegra_drm_fb_init(struct drm_device *drm)
397{ 429{
398#ifdef CONFIG_DRM_TEGRA_FBDEV 430#ifdef CONFIG_DRM_TEGRA_FBDEV
@@ -413,6 +445,6 @@ void tegra_drm_fb_exit(struct drm_device *drm)
413#ifdef CONFIG_DRM_TEGRA_FBDEV 445#ifdef CONFIG_DRM_TEGRA_FBDEV
414 struct tegra_drm *tegra = drm->dev_private; 446 struct tegra_drm *tegra = drm->dev_private;
415 447
416 tegra_fbdev_free(tegra->fbdev); 448 tegra_fbdev_exit(tegra->fbdev);
417#endif 449#endif
418} 450}
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index ce023fa3e8ae..da32086cbeaf 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/dma-buf.h> 16#include <linux/dma-buf.h>
17#include <linux/iommu.h>
17#include <drm/tegra_drm.h> 18#include <drm/tegra_drm.h>
18 19
19#include "drm.h" 20#include "drm.h"
@@ -91,13 +92,90 @@ static const struct host1x_bo_ops tegra_bo_ops = {
91 .kunmap = tegra_bo_kunmap, 92 .kunmap = tegra_bo_kunmap,
92}; 93};
93 94
94static void tegra_bo_destroy(struct drm_device *drm, struct tegra_bo *bo) 95/*
96 * A generic iommu_map_sg() function is being reviewed and will hopefully be
97 * merged soon. At that point this function can be dropped in favour of the
98 * one provided by the IOMMU API.
99 */
100static ssize_t __iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
101 struct scatterlist *sg, unsigned int nents,
102 int prot)
95{ 103{
96 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); 104 struct scatterlist *s;
105 size_t offset = 0;
106 unsigned int i;
107 int err;
108
109 for_each_sg(sg, s, nents, i) {
110 phys_addr_t phys = page_to_phys(sg_page(s));
111 size_t length = s->offset + s->length;
112
113 err = iommu_map(domain, iova + offset, phys, length, prot);
114 if (err < 0) {
115 iommu_unmap(domain, iova, offset);
116 return err;
117 }
118
119 offset += length;
120 }
121
122 return offset;
97} 123}
98 124
99struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, 125static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
100 unsigned long flags) 126{
127 int prot = IOMMU_READ | IOMMU_WRITE;
128 ssize_t err;
129
130 if (bo->mm)
131 return -EBUSY;
132
133 bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
134 if (!bo->mm)
135 return -ENOMEM;
136
137 err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
138 PAGE_SIZE, 0, 0, 0);
139 if (err < 0) {
140 dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
141 err);
142 goto free;
143 }
144
145 bo->paddr = bo->mm->start;
146
147 err = __iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
148 bo->sgt->nents, prot);
149 if (err < 0) {
150 dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
151 goto remove;
152 }
153
154 bo->size = err;
155
156 return 0;
157
158remove:
159 drm_mm_remove_node(bo->mm);
160free:
161 kfree(bo->mm);
162 return err;
163}
164
165static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
166{
167 if (!bo->mm)
168 return 0;
169
170 iommu_unmap(tegra->domain, bo->paddr, bo->size);
171 drm_mm_remove_node(bo->mm);
172 kfree(bo->mm);
173
174 return 0;
175}
176
177static struct tegra_bo *tegra_bo_alloc_object(struct drm_device *drm,
178 size_t size)
101{ 179{
102 struct tegra_bo *bo; 180 struct tegra_bo *bo;
103 int err; 181 int err;
@@ -109,22 +187,96 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
109 host1x_bo_init(&bo->base, &tegra_bo_ops); 187 host1x_bo_init(&bo->base, &tegra_bo_ops);
110 size = round_up(size, PAGE_SIZE); 188 size = round_up(size, PAGE_SIZE);
111 189
112 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
113 GFP_KERNEL | __GFP_NOWARN);
114 if (!bo->vaddr) {
115 dev_err(drm->dev, "failed to allocate buffer with size %u\n",
116 size);
117 err = -ENOMEM;
118 goto err_dma;
119 }
120
121 err = drm_gem_object_init(drm, &bo->gem, size); 190 err = drm_gem_object_init(drm, &bo->gem, size);
122 if (err) 191 if (err < 0)
123 goto err_init; 192 goto free;
124 193
125 err = drm_gem_create_mmap_offset(&bo->gem); 194 err = drm_gem_create_mmap_offset(&bo->gem);
126 if (err) 195 if (err < 0)
127 goto err_mmap; 196 goto release;
197
198 return bo;
199
200release:
201 drm_gem_object_release(&bo->gem);
202free:
203 kfree(bo);
204 return ERR_PTR(err);
205}
206
207static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
208{
209 if (bo->pages) {
210 drm_gem_put_pages(&bo->gem, bo->pages, true, true);
211 sg_free_table(bo->sgt);
212 kfree(bo->sgt);
213 } else if (bo->vaddr) {
214 dma_free_writecombine(drm->dev, bo->gem.size, bo->vaddr,
215 bo->paddr);
216 }
217}
218
219static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
220 size_t size)
221{
222 bo->pages = drm_gem_get_pages(&bo->gem);
223 if (IS_ERR(bo->pages))
224 return PTR_ERR(bo->pages);
225
226 bo->num_pages = size >> PAGE_SHIFT;
227
228 bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
229 if (IS_ERR(bo->sgt)) {
230 drm_gem_put_pages(&bo->gem, bo->pages, false, false);
231 return PTR_ERR(bo->sgt);
232 }
233
234 return 0;
235}
236
237static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
238 size_t size)
239{
240 struct tegra_drm *tegra = drm->dev_private;
241 int err;
242
243 if (tegra->domain) {
244 err = tegra_bo_get_pages(drm, bo, size);
245 if (err < 0)
246 return err;
247
248 err = tegra_bo_iommu_map(tegra, bo);
249 if (err < 0) {
250 tegra_bo_free(drm, bo);
251 return err;
252 }
253 } else {
254 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
255 GFP_KERNEL | __GFP_NOWARN);
256 if (!bo->vaddr) {
257 dev_err(drm->dev,
258 "failed to allocate buffer of size %zu\n",
259 size);
260 return -ENOMEM;
261 }
262 }
263
264 return 0;
265}
266
267struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
268 unsigned long flags)
269{
270 struct tegra_bo *bo;
271 int err;
272
273 bo = tegra_bo_alloc_object(drm, size);
274 if (IS_ERR(bo))
275 return bo;
276
277 err = tegra_bo_alloc(drm, bo, size);
278 if (err < 0)
279 goto release;
128 280
129 if (flags & DRM_TEGRA_GEM_CREATE_TILED) 281 if (flags & DRM_TEGRA_GEM_CREATE_TILED)
130 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED; 282 bo->tiling.mode = TEGRA_BO_TILING_MODE_TILED;
@@ -134,69 +286,52 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size,
134 286
135 return bo; 287 return bo;
136 288
137err_mmap: 289release:
138 drm_gem_object_release(&bo->gem); 290 drm_gem_object_release(&bo->gem);
139err_init:
140 tegra_bo_destroy(drm, bo);
141err_dma:
142 kfree(bo); 291 kfree(bo);
143
144 return ERR_PTR(err); 292 return ERR_PTR(err);
145} 293}
146 294
147struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 295struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
148 struct drm_device *drm, 296 struct drm_device *drm,
149 unsigned int size, 297 size_t size,
150 unsigned long flags, 298 unsigned long flags,
151 unsigned int *handle) 299 u32 *handle)
152{ 300{
153 struct tegra_bo *bo; 301 struct tegra_bo *bo;
154 int ret; 302 int err;
155 303
156 bo = tegra_bo_create(drm, size, flags); 304 bo = tegra_bo_create(drm, size, flags);
157 if (IS_ERR(bo)) 305 if (IS_ERR(bo))
158 return bo; 306 return bo;
159 307
160 ret = drm_gem_handle_create(file, &bo->gem, handle); 308 err = drm_gem_handle_create(file, &bo->gem, handle);
161 if (ret) 309 if (err) {
162 goto err; 310 tegra_bo_free_object(&bo->gem);
311 return ERR_PTR(err);
312 }
163 313
164 drm_gem_object_unreference_unlocked(&bo->gem); 314 drm_gem_object_unreference_unlocked(&bo->gem);
165 315
166 return bo; 316 return bo;
167
168err:
169 tegra_bo_free_object(&bo->gem);
170 return ERR_PTR(ret);
171} 317}
172 318
173static struct tegra_bo *tegra_bo_import(struct drm_device *drm, 319static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
174 struct dma_buf *buf) 320 struct dma_buf *buf)
175{ 321{
322 struct tegra_drm *tegra = drm->dev_private;
176 struct dma_buf_attachment *attach; 323 struct dma_buf_attachment *attach;
177 struct tegra_bo *bo; 324 struct tegra_bo *bo;
178 ssize_t size;
179 int err; 325 int err;
180 326
181 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 327 bo = tegra_bo_alloc_object(drm, buf->size);
182 if (!bo) 328 if (IS_ERR(bo))
183 return ERR_PTR(-ENOMEM); 329 return bo;
184
185 host1x_bo_init(&bo->base, &tegra_bo_ops);
186 size = round_up(buf->size, PAGE_SIZE);
187
188 err = drm_gem_object_init(drm, &bo->gem, size);
189 if (err < 0)
190 goto free;
191
192 err = drm_gem_create_mmap_offset(&bo->gem);
193 if (err < 0)
194 goto release;
195 330
196 attach = dma_buf_attach(buf, drm->dev); 331 attach = dma_buf_attach(buf, drm->dev);
197 if (IS_ERR(attach)) { 332 if (IS_ERR(attach)) {
198 err = PTR_ERR(attach); 333 err = PTR_ERR(attach);
199 goto free_mmap; 334 goto free;
200 } 335 }
201 336
202 get_dma_buf(buf); 337 get_dma_buf(buf);
@@ -212,12 +347,19 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm,
212 goto detach; 347 goto detach;
213 } 348 }
214 349
215 if (bo->sgt->nents > 1) { 350 if (tegra->domain) {
216 err = -EINVAL; 351 err = tegra_bo_iommu_map(tegra, bo);
217 goto detach; 352 if (err < 0)
353 goto detach;
354 } else {
355 if (bo->sgt->nents > 1) {
356 err = -EINVAL;
357 goto detach;
358 }
359
360 bo->paddr = sg_dma_address(bo->sgt->sgl);
218 } 361 }
219 362
220 bo->paddr = sg_dma_address(bo->sgt->sgl);
221 bo->gem.import_attach = attach; 363 bo->gem.import_attach = attach;
222 364
223 return bo; 365 return bo;
@@ -228,47 +370,41 @@ detach:
228 370
229 dma_buf_detach(buf, attach); 371 dma_buf_detach(buf, attach);
230 dma_buf_put(buf); 372 dma_buf_put(buf);
231free_mmap:
232 drm_gem_free_mmap_offset(&bo->gem);
233release:
234 drm_gem_object_release(&bo->gem);
235free: 373free:
374 drm_gem_object_release(&bo->gem);
236 kfree(bo); 375 kfree(bo);
237
238 return ERR_PTR(err); 376 return ERR_PTR(err);
239} 377}
240 378
241void tegra_bo_free_object(struct drm_gem_object *gem) 379void tegra_bo_free_object(struct drm_gem_object *gem)
242{ 380{
381 struct tegra_drm *tegra = gem->dev->dev_private;
243 struct tegra_bo *bo = to_tegra_bo(gem); 382 struct tegra_bo *bo = to_tegra_bo(gem);
244 383
384 if (tegra->domain)
385 tegra_bo_iommu_unmap(tegra, bo);
386
245 if (gem->import_attach) { 387 if (gem->import_attach) {
246 dma_buf_unmap_attachment(gem->import_attach, bo->sgt, 388 dma_buf_unmap_attachment(gem->import_attach, bo->sgt,
247 DMA_TO_DEVICE); 389 DMA_TO_DEVICE);
248 drm_prime_gem_destroy(gem, NULL); 390 drm_prime_gem_destroy(gem, NULL);
249 } else { 391 } else {
250 tegra_bo_destroy(gem->dev, bo); 392 tegra_bo_free(gem->dev, bo);
251 } 393 }
252 394
253 drm_gem_free_mmap_offset(gem);
254 drm_gem_object_release(gem); 395 drm_gem_object_release(gem);
255
256 kfree(bo); 396 kfree(bo);
257} 397}
258 398
259int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 399int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
260 struct drm_mode_create_dumb *args) 400 struct drm_mode_create_dumb *args)
261{ 401{
262 int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); 402 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
263 struct tegra_drm *tegra = drm->dev_private; 403 struct tegra_drm *tegra = drm->dev_private;
264 struct tegra_bo *bo; 404 struct tegra_bo *bo;
265 405
266 min_pitch = round_up(min_pitch, tegra->pitch_align); 406 args->pitch = round_up(min_pitch, tegra->pitch_align);
267 if (args->pitch < min_pitch) 407 args->size = args->pitch * args->height;
268 args->pitch = min_pitch;
269
270 if (args->size < args->pitch * args->height)
271 args->size = args->pitch * args->height;
272 408
273 bo = tegra_bo_create_with_handle(file, drm, args->size, 0, 409 bo = tegra_bo_create_with_handle(file, drm, args->size, 0,
274 &args->handle); 410 &args->handle);
@@ -279,7 +415,7 @@ int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
279} 415}
280 416
281int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 417int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
282 uint32_t handle, uint64_t *offset) 418 u32 handle, u64 *offset)
283{ 419{
284 struct drm_gem_object *gem; 420 struct drm_gem_object *gem;
285 struct tegra_bo *bo; 421 struct tegra_bo *bo;
@@ -304,7 +440,38 @@ int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
304 return 0; 440 return 0;
305} 441}
306 442
443static int tegra_bo_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
444{
445 struct drm_gem_object *gem = vma->vm_private_data;
446 struct tegra_bo *bo = to_tegra_bo(gem);
447 struct page *page;
448 pgoff_t offset;
449 int err;
450
451 if (!bo->pages)
452 return VM_FAULT_SIGBUS;
453
454 offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> PAGE_SHIFT;
455 page = bo->pages[offset];
456
457 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address, page);
458 switch (err) {
459 case -EAGAIN:
460 case 0:
461 case -ERESTARTSYS:
462 case -EINTR:
463 case -EBUSY:
464 return VM_FAULT_NOPAGE;
465
466 case -ENOMEM:
467 return VM_FAULT_OOM;
468 }
469
470 return VM_FAULT_SIGBUS;
471}
472
307const struct vm_operations_struct tegra_bo_vm_ops = { 473const struct vm_operations_struct tegra_bo_vm_ops = {
474 .fault = tegra_bo_fault,
308 .open = drm_gem_vm_open, 475 .open = drm_gem_vm_open,
309 .close = drm_gem_vm_close, 476 .close = drm_gem_vm_close,
310}; 477};
@@ -322,12 +489,30 @@ int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma)
322 gem = vma->vm_private_data; 489 gem = vma->vm_private_data;
323 bo = to_tegra_bo(gem); 490 bo = to_tegra_bo(gem);
324 491
325 ret = remap_pfn_range(vma, vma->vm_start, bo->paddr >> PAGE_SHIFT, 492 if (!bo->pages) {
326 vma->vm_end - vma->vm_start, vma->vm_page_prot); 493 unsigned long vm_pgoff = vma->vm_pgoff;
327 if (ret) 494
328 drm_gem_vm_close(vma); 495 vma->vm_flags &= ~VM_PFNMAP;
496 vma->vm_pgoff = 0;
497
498 ret = dma_mmap_writecombine(gem->dev->dev, vma, bo->vaddr,
499 bo->paddr, gem->size);
500 if (ret) {
501 drm_gem_vm_close(vma);
502 return ret;
503 }
504
505 vma->vm_pgoff = vm_pgoff;
506 } else {
507 pgprot_t prot = vm_get_page_prot(vma->vm_flags);
508
509 vma->vm_flags |= VM_MIXEDMAP;
510 vma->vm_flags &= ~VM_PFNMAP;
329 511
330 return ret; 512 vma->vm_page_prot = pgprot_writecombine(prot);
513 }
514
515 return 0;
331} 516}
332 517
333static struct sg_table * 518static struct sg_table *
@@ -342,21 +527,44 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
342 if (!sgt) 527 if (!sgt)
343 return NULL; 528 return NULL;
344 529
345 if (sg_alloc_table(sgt, 1, GFP_KERNEL)) { 530 if (bo->pages) {
346 kfree(sgt); 531 struct scatterlist *sg;
347 return NULL; 532 unsigned int i;
348 }
349 533
350 sg_dma_address(sgt->sgl) = bo->paddr; 534 if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL))
351 sg_dma_len(sgt->sgl) = gem->size; 535 goto free;
536
537 for_each_sg(sgt->sgl, sg, bo->num_pages, i)
538 sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0);
539
540 if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
541 goto free;
542 } else {
543 if (sg_alloc_table(sgt, 1, GFP_KERNEL))
544 goto free;
545
546 sg_dma_address(sgt->sgl) = bo->paddr;
547 sg_dma_len(sgt->sgl) = gem->size;
548 }
352 549
353 return sgt; 550 return sgt;
551
552free:
553 sg_free_table(sgt);
554 kfree(sgt);
555 return NULL;
354} 556}
355 557
356static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach, 558static void tegra_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
357 struct sg_table *sgt, 559 struct sg_table *sgt,
358 enum dma_data_direction dir) 560 enum dma_data_direction dir)
359{ 561{
562 struct drm_gem_object *gem = attach->dmabuf->priv;
563 struct tegra_bo *bo = to_tegra_bo(gem);
564
565 if (bo->pages)
566 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
567
360 sg_free_table(sgt); 568 sg_free_table(sgt);
361 kfree(sgt); 569 kfree(sgt);
362} 570}
diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h
index 6538b56780c2..6c5f12ac0087 100644
--- a/drivers/gpu/drm/tegra/gem.h
+++ b/drivers/gpu/drm/tegra/gem.h
@@ -38,6 +38,12 @@ struct tegra_bo {
38 dma_addr_t paddr; 38 dma_addr_t paddr;
39 void *vaddr; 39 void *vaddr;
40 40
41 struct drm_mm_node *mm;
42 unsigned long num_pages;
43 struct page **pages;
44 /* size of IOMMU mapping */
45 size_t size;
46
41 struct tegra_bo_tiling tiling; 47 struct tegra_bo_tiling tiling;
42}; 48};
43 49
@@ -46,18 +52,18 @@ static inline struct tegra_bo *to_tegra_bo(struct drm_gem_object *gem)
46 return container_of(gem, struct tegra_bo, gem); 52 return container_of(gem, struct tegra_bo, gem);
47} 53}
48 54
49struct tegra_bo *tegra_bo_create(struct drm_device *drm, unsigned int size, 55struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
50 unsigned long flags); 56 unsigned long flags);
51struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file, 57struct tegra_bo *tegra_bo_create_with_handle(struct drm_file *file,
52 struct drm_device *drm, 58 struct drm_device *drm,
53 unsigned int size, 59 size_t size,
54 unsigned long flags, 60 unsigned long flags,
55 unsigned int *handle); 61 u32 *handle);
56void tegra_bo_free_object(struct drm_gem_object *gem); 62void tegra_bo_free_object(struct drm_gem_object *gem);
57int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm, 63int tegra_bo_dumb_create(struct drm_file *file, struct drm_device *drm,
58 struct drm_mode_create_dumb *args); 64 struct drm_mode_create_dumb *args);
59int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm, 65int tegra_bo_dumb_map_offset(struct drm_file *file, struct drm_device *drm,
60 uint32_t handle, uint64_t *offset); 66 u32 handle, u64 *offset);
61 67
62int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma); 68int tegra_drm_mmap(struct file *file, struct vm_area_struct *vma);
63 69
diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c
index 0c67d7eebc94..6a5c7b81fbc5 100644
--- a/drivers/gpu/drm/tegra/output.c
+++ b/drivers/gpu/drm/tegra/output.c
@@ -157,22 +157,18 @@ static bool tegra_encoder_mode_fixup(struct drm_encoder *encoder,
157 157
158static void tegra_encoder_prepare(struct drm_encoder *encoder) 158static void tegra_encoder_prepare(struct drm_encoder *encoder)
159{ 159{
160 tegra_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
160} 161}
161 162
162static void tegra_encoder_commit(struct drm_encoder *encoder) 163static void tegra_encoder_commit(struct drm_encoder *encoder)
163{ 164{
165 tegra_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
164} 166}
165 167
166static void tegra_encoder_mode_set(struct drm_encoder *encoder, 168static void tegra_encoder_mode_set(struct drm_encoder *encoder,
167 struct drm_display_mode *mode, 169 struct drm_display_mode *mode,
168 struct drm_display_mode *adjusted) 170 struct drm_display_mode *adjusted)
169{ 171{
170 struct tegra_output *output = encoder_to_output(encoder);
171 int err;
172
173 err = tegra_output_enable(output);
174 if (err < 0)
175 dev_err(encoder->dev->dev, "tegra_output_enable(): %d\n", err);
176} 172}
177 173
178static const struct drm_encoder_helper_funcs encoder_helper_funcs = { 174static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
@@ -187,7 +183,8 @@ static irqreturn_t hpd_irq(int irq, void *data)
187{ 183{
188 struct tegra_output *output = data; 184 struct tegra_output *output = data;
189 185
190 drm_helper_hpd_irq_event(output->connector.dev); 186 if (output->connector.dev)
187 drm_helper_hpd_irq_event(output->connector.dev);
191 188
192 return IRQ_HANDLED; 189 return IRQ_HANDLED;
193} 190}
@@ -259,6 +256,13 @@ int tegra_output_probe(struct tegra_output *output)
259 } 256 }
260 257
261 output->connector.polled = DRM_CONNECTOR_POLL_HPD; 258 output->connector.polled = DRM_CONNECTOR_POLL_HPD;
259
260 /*
261 * Disable the interrupt until the connector has been
262 * initialized to avoid a race in the hotplug interrupt
263 * handler.
264 */
265 disable_irq(output->hpd_irq);
262 } 266 }
263 267
264 return 0; 268 return 0;
@@ -324,10 +328,27 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output)
324 328
325 output->encoder.possible_crtcs = 0x3; 329 output->encoder.possible_crtcs = 0x3;
326 330
331 /*
332 * The connector is now registered and ready to receive hotplug events
333 * so the hotplug interrupt can be enabled.
334 */
335 if (gpio_is_valid(output->hpd_gpio))
336 enable_irq(output->hpd_irq);
337
327 return 0; 338 return 0;
328} 339}
329 340
330int tegra_output_exit(struct tegra_output *output) 341int tegra_output_exit(struct tegra_output *output)
331{ 342{
343 /*
344 * The connector is going away, so the interrupt must be disabled to
345 * prevent the hotplug interrupt handler from potentially crashing.
346 */
347 if (gpio_is_valid(output->hpd_gpio))
348 disable_irq(output->hpd_irq);
349
350 if (output->panel)
351 drm_panel_detach(output->panel);
352
332 return 0; 353 return 0;
333} 354}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index d642d4a02134..c73588483be0 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -16,6 +16,7 @@
16 */ 16 */
17 17
18#include "drm_flip_work.h" 18#include "drm_flip_work.h"
19#include <drm/drm_plane_helper.h>
19 20
20#include "tilcdc_drv.h" 21#include "tilcdc_drv.h"
21#include "tilcdc_regs.h" 22#include "tilcdc_regs.h"
@@ -664,12 +665,8 @@ struct drm_crtc *tilcdc_crtc_create(struct drm_device *dev)
664 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF; 665 tilcdc_crtc->dpms = DRM_MODE_DPMS_OFF;
665 init_waitqueue_head(&tilcdc_crtc->frame_done_wq); 666 init_waitqueue_head(&tilcdc_crtc->frame_done_wq);
666 667
667 ret = drm_flip_work_init(&tilcdc_crtc->unref_work, 16, 668 drm_flip_work_init(&tilcdc_crtc->unref_work,
668 "unref", unref_worker); 669 "unref", unref_worker);
669 if (ret) {
670 dev_err(dev->dev, "could not allocate unref FIFO\n");
671 goto fail;
672 }
673 670
674 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs); 671 ret = drm_crtc_init(dev, crtc, &tilcdc_crtc_funcs);
675 if (ret < 0) 672 if (ret < 0)
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index f8546824d177..095fca91525c 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -58,8 +58,7 @@ static struct drm_framebuffer *tilcdc_fb_create(struct drm_device *dev,
58static void tilcdc_fb_output_poll_changed(struct drm_device *dev) 58static void tilcdc_fb_output_poll_changed(struct drm_device *dev)
59{ 59{
60 struct tilcdc_drm_private *priv = dev->dev_private; 60 struct tilcdc_drm_private *priv = dev->dev_private;
61 if (priv->fbdev) 61 drm_fbdev_cma_hotplug_event(priv->fbdev);
62 drm_fbdev_cma_hotplug_event(priv->fbdev);
63} 62}
64 63
65static const struct drm_mode_config_funcs mode_config_funcs = { 64static const struct drm_mode_config_funcs mode_config_funcs = {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 964387fc5c8f..aa0bd054d3e9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -55,6 +55,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
56 struct drm_mm *mm = &rman->mm; 56 struct drm_mm *mm = &rman->mm;
57 struct drm_mm_node *node = NULL; 57 struct drm_mm_node *node = NULL;
58 enum drm_mm_search_flags sflags = DRM_MM_SEARCH_BEST;
58 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT; 59 enum drm_mm_allocator_flags aflags = DRM_MM_CREATE_DEFAULT;
59 unsigned long lpfn; 60 unsigned long lpfn;
60 int ret; 61 int ret;
@@ -67,15 +68,16 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
67 if (!node) 68 if (!node)
68 return -ENOMEM; 69 return -ENOMEM;
69 70
70 if (place->flags & TTM_PL_FLAG_TOPDOWN) 71 if (place->flags & TTM_PL_FLAG_TOPDOWN) {
72 sflags = DRM_MM_SEARCH_BELOW;
71 aflags = DRM_MM_CREATE_TOP; 73 aflags = DRM_MM_CREATE_TOP;
74 }
72 75
73 spin_lock(&rman->lock); 76 spin_lock(&rman->lock);
74 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages, 77 ret = drm_mm_insert_node_in_range_generic(mm, node, mem->num_pages,
75 mem->page_alignment, 0, 78 mem->page_alignment, 0,
76 place->fpfn, lpfn, 79 place->fpfn, lpfn,
77 DRM_MM_SEARCH_BEST, 80 sflags, aflags);
78 aflags);
79 spin_unlock(&rman->lock); 81 spin_unlock(&rman->lock);
80 82
81 if (unlikely(ret)) { 83 if (unlikely(ret)) {
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index 8ce508e76208..3820ae97a030 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
93 */ 93 */
94 94
95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 95int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
96 struct list_head *list, bool intr) 96 struct list_head *list, bool intr,
97 struct list_head *dups)
97{ 98{
98 struct ttm_bo_global *glob; 99 struct ttm_bo_global *glob;
99 struct ttm_validate_buffer *entry; 100 struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
117 __ttm_bo_unreserve(bo); 118 __ttm_bo_unreserve(bo);
118 119
119 ret = -EBUSY; 120 ret = -EBUSY;
121
122 } else if (ret == -EALREADY && dups) {
123 struct ttm_validate_buffer *safe = entry;
124 entry = list_prev_entry(entry, head);
125 list_del(&safe->head);
126 list_add(&safe->head, dups);
127 continue;
120 } 128 }
121 129
122 if (!ret) { 130 if (!ret) {
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 09874d695188..025c429050c0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
297 * 297 *
298 * @pool: to free the pages from 298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool 299 * @free_all: If set to true will free all pages in pool
300 * @gfp: GFP flags. 300 * @use_static: Safe to use static buffer
301 **/ 301 **/
302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, 302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
303 gfp_t gfp) 303 bool use_static)
304{ 304{
305 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
305 unsigned long irq_flags; 306 unsigned long irq_flags;
306 struct page *p; 307 struct page *p;
307 struct page **pages_to_free; 308 struct page **pages_to_free;
@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
311 if (NUM_PAGES_TO_ALLOC < nr_free) 312 if (NUM_PAGES_TO_ALLOC < nr_free)
312 npages_to_free = NUM_PAGES_TO_ALLOC; 313 npages_to_free = NUM_PAGES_TO_ALLOC;
313 314
314 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); 315 if (use_static)
316 pages_to_free = static_buf;
317 else
318 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
319 GFP_KERNEL);
315 if (!pages_to_free) { 320 if (!pages_to_free) {
316 pr_err("Failed to allocate memory for pool free operation\n"); 321 pr_err("Failed to allocate memory for pool free operation\n");
317 return 0; 322 return 0;
@@ -374,7 +379,8 @@ restart:
374 if (freed_pages) 379 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages); 380 ttm_pages_put(pages_to_free, freed_pages);
376out: 381out:
377 kfree(pages_to_free); 382 if (pages_to_free != static_buf)
383 kfree(pages_to_free);
378 return nr_free; 384 return nr_free;
379} 385}
380 386
@@ -383,8 +389,6 @@ out:
383 * 389 *
384 * XXX: (dchinner) Deadlock warning! 390 * XXX: (dchinner) Deadlock warning!
385 * 391 *
386 * We need to pass sc->gfp_mask to ttm_page_pool_free().
387 *
388 * This code is crying out for a shrinker per pool.... 392 * This code is crying out for a shrinker per pool....
389 */ 393 */
390static unsigned long 394static unsigned long
@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
407 if (shrink_pages == 0) 411 if (shrink_pages == 0)
408 break; 412 break;
409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
410 shrink_pages = ttm_page_pool_free(pool, nr_free, 414 /* OK to use static buffer since global mutex is held. */
411 sc->gfp_mask); 415 shrink_pages = ttm_page_pool_free(pool, nr_free, true);
412 freed += nr_free - shrink_pages; 416 freed += nr_free - shrink_pages;
413 } 417 }
414 mutex_unlock(&lock); 418 mutex_unlock(&lock);
@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
710 } 714 }
711 spin_unlock_irqrestore(&pool->lock, irq_flags); 715 spin_unlock_irqrestore(&pool->lock, irq_flags);
712 if (npages) 716 if (npages)
713 ttm_page_pool_free(pool, npages, GFP_KERNEL); 717 ttm_page_pool_free(pool, npages, false);
714} 718}
715 719
716/* 720/*
@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
849 pr_info("Finalizing pool allocator\n"); 853 pr_info("Finalizing pool allocator\n");
850 ttm_pool_mm_shrink_fini(_manager); 854 ttm_pool_mm_shrink_fini(_manager);
851 855
856 /* OK to use static buffer since global mutex is no longer used. */
852 for (i = 0; i < NUM_POOLS; ++i) 857 for (i = 0; i < NUM_POOLS; ++i)
853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, 858 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
854 GFP_KERNEL);
855 859
856 kobject_put(&_manager->kobj); 860 kobject_put(&_manager->kobj);
857 _manager = NULL; 861 _manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index c96db433f8af..01e1d27eb078 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
411 * 411 *
412 * @pool: to free the pages from 412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool 413 * @nr_free: If set to true will free all pages in pool
414 * @gfp: GFP flags. 414 * @use_static: Safe to use static buffer
415 **/ 415 **/
416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, 416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
417 gfp_t gfp) 417 bool use_static)
418{ 418{
419 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
419 unsigned long irq_flags; 420 unsigned long irq_flags;
420 struct dma_page *dma_p, *tmp; 421 struct dma_page *dma_p, *tmp;
421 struct page **pages_to_free; 422 struct page **pages_to_free;
@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
432 npages_to_free, nr_free); 433 npages_to_free, nr_free);
433 } 434 }
434#endif 435#endif
435 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); 436 if (use_static)
437 pages_to_free = static_buf;
438 else
439 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
440 GFP_KERNEL);
436 441
437 if (!pages_to_free) { 442 if (!pages_to_free) {
438 pr_err("%s: Failed to allocate memory for pool free operation\n", 443 pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -502,7 +507,8 @@ restart:
502 if (freed_pages) 507 if (freed_pages)
503 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); 508 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
504out: 509out:
505 kfree(pages_to_free); 510 if (pages_to_free != static_buf)
511 kfree(pages_to_free);
506 return nr_free; 512 return nr_free;
507} 513}
508 514
@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
531 if (pool->type != type) 537 if (pool->type != type)
532 continue; 538 continue;
533 /* Takes a spinlock.. */ 539 /* Takes a spinlock.. */
534 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); 540 /* OK to use static buffer since global mutex is held. */
541 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
535 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 542 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
536 /* This code path is called after _all_ references to the 543 /* This code path is called after _all_ references to the
537 * struct device has been dropped - so nobody should be 544 * struct device has been dropped - so nobody should be
@@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
986 993
987 /* shrink pool if necessary (only on !is_cached pools)*/ 994 /* shrink pool if necessary (only on !is_cached pools)*/
988 if (npages) 995 if (npages)
989 ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); 996 ttm_dma_page_pool_free(pool, npages, false);
990 ttm->state = tt_unpopulated; 997 ttm->state = tt_unpopulated;
991} 998}
992EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 999EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
996 * 1003 *
997 * XXX: (dchinner) Deadlock warning! 1004 * XXX: (dchinner) Deadlock warning!
998 * 1005 *
999 * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
1000 *
1001 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 1006 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1002 * shrinkers 1007 * shrinkers
1003 */ 1008 */
@@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1030 if (++idx < pool_offset) 1035 if (++idx < pool_offset)
1031 continue; 1036 continue;
1032 nr_free = shrink_pages; 1037 nr_free = shrink_pages;
1033 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, 1038 /* OK to use static buffer since global mutex is held. */
1034 sc->gfp_mask); 1039 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1035 freed += nr_free - shrink_pages; 1040 freed += nr_free - shrink_pages;
1036 1041
1037 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1042 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
diff --git a/drivers/gpu/drm/udl/Makefile b/drivers/gpu/drm/udl/Makefile
index 05c7481bfd40..195bcac0b6c8 100644
--- a/drivers/gpu/drm/udl/Makefile
+++ b/drivers/gpu/drm/udl/Makefile
@@ -1,6 +1,6 @@
1 1
2ccflags-y := -Iinclude/drm 2ccflags-y := -Iinclude/drm
3 3
4udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o 4udl-y := udl_drv.o udl_modeset.o udl_connector.o udl_encoder.o udl_main.o udl_fb.o udl_transfer.o udl_gem.o udl_dmabuf.o
5 5
6obj-$(CONFIG_DRM_UDL) := udl.o 6obj-$(CONFIG_DRM_UDL) := udl.o
diff --git a/drivers/gpu/drm/udl/udl_dmabuf.c b/drivers/gpu/drm/udl/udl_dmabuf.c
new file mode 100644
index 000000000000..ac8a66b4dfc2
--- /dev/null
+++ b/drivers/gpu/drm/udl/udl_dmabuf.c
@@ -0,0 +1,276 @@
1/*
2 * udl_dmabuf.c
3 *
4 * Copyright (c) 2014 The Chromium OS Authors
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <drm/drmP.h>
21#include "udl_drv.h"
22#include <linux/shmem_fs.h>
23#include <linux/dma-buf.h>
24
25struct udl_drm_dmabuf_attachment {
26 struct sg_table sgt;
27 enum dma_data_direction dir;
28 bool is_mapped;
29};
30
31static int udl_attach_dma_buf(struct dma_buf *dmabuf,
32 struct device *dev,
33 struct dma_buf_attachment *attach)
34{
35 struct udl_drm_dmabuf_attachment *udl_attach;
36
37 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
38 attach->dmabuf->size);
39
40 udl_attach = kzalloc(sizeof(*udl_attach), GFP_KERNEL);
41 if (!udl_attach)
42 return -ENOMEM;
43
44 udl_attach->dir = DMA_NONE;
45 attach->priv = udl_attach;
46
47 return 0;
48}
49
50static void udl_detach_dma_buf(struct dma_buf *dmabuf,
51 struct dma_buf_attachment *attach)
52{
53 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
54 struct sg_table *sgt;
55
56 if (!udl_attach)
57 return;
58
59 DRM_DEBUG_PRIME("[DEV:%s] size:%zd\n", dev_name(attach->dev),
60 attach->dmabuf->size);
61
62 sgt = &udl_attach->sgt;
63
64 if (udl_attach->dir != DMA_NONE)
65 dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents,
66 udl_attach->dir);
67
68 sg_free_table(sgt);
69 kfree(udl_attach);
70 attach->priv = NULL;
71}
72
73static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
74 enum dma_data_direction dir)
75{
76 struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
77 struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
78 struct drm_device *dev = obj->base.dev;
79 struct scatterlist *rd, *wr;
80 struct sg_table *sgt = NULL;
81 unsigned int i;
82 int page_count;
83 int nents, ret;
84
85 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
86 attach->dmabuf->size, dir);
87
88 /* just return current sgt if already requested. */
89 if (udl_attach->dir == dir && udl_attach->is_mapped)
90 return &udl_attach->sgt;
91
92 if (!obj->pages) {
93 ret = udl_gem_get_pages(obj);
94 if (ret) {
95 DRM_ERROR("failed to map pages.\n");
96 return ERR_PTR(ret);
97 }
98 }
99
100 page_count = obj->base.size / PAGE_SIZE;
101 obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
102 if (IS_ERR(obj->sg)) {
103 DRM_ERROR("failed to allocate sgt.\n");
104 return ERR_CAST(obj->sg);
105 }
106
107 sgt = &udl_attach->sgt;
108
109 ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
110 if (ret) {
111 DRM_ERROR("failed to alloc sgt.\n");
112 return ERR_PTR(-ENOMEM);
113 }
114
115 mutex_lock(&dev->struct_mutex);
116
117 rd = obj->sg->sgl;
118 wr = sgt->sgl;
119 for (i = 0; i < sgt->orig_nents; ++i) {
120 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
121 rd = sg_next(rd);
122 wr = sg_next(wr);
123 }
124
125 if (dir != DMA_NONE) {
126 nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
127 if (!nents) {
128 DRM_ERROR("failed to map sgl with iommu.\n");
129 sg_free_table(sgt);
130 sgt = ERR_PTR(-EIO);
131 goto err_unlock;
132 }
133 }
134
135 udl_attach->is_mapped = true;
136 udl_attach->dir = dir;
137 attach->priv = udl_attach;
138
139err_unlock:
140 mutex_unlock(&dev->struct_mutex);
141 return sgt;
142}
143
144static void udl_unmap_dma_buf(struct dma_buf_attachment *attach,
145 struct sg_table *sgt,
146 enum dma_data_direction dir)
147{
148 /* Nothing to do. */
149 DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir:%d\n", dev_name(attach->dev),
150 attach->dmabuf->size, dir);
151}
152
153static void *udl_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num)
154{
155 /* TODO */
156
157 return NULL;
158}
159
160static void *udl_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
161 unsigned long page_num)
162{
163 /* TODO */
164
165 return NULL;
166}
167
168static void udl_dmabuf_kunmap(struct dma_buf *dma_buf,
169 unsigned long page_num, void *addr)
170{
171 /* TODO */
172}
173
174static void udl_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
175 unsigned long page_num,
176 void *addr)
177{
178 /* TODO */
179}
180
181static int udl_dmabuf_mmap(struct dma_buf *dma_buf,
182 struct vm_area_struct *vma)
183{
184 /* TODO */
185
186 return -EINVAL;
187}
188
189static struct dma_buf_ops udl_dmabuf_ops = {
190 .attach = udl_attach_dma_buf,
191 .detach = udl_detach_dma_buf,
192 .map_dma_buf = udl_map_dma_buf,
193 .unmap_dma_buf = udl_unmap_dma_buf,
194 .kmap = udl_dmabuf_kmap,
195 .kmap_atomic = udl_dmabuf_kmap_atomic,
196 .kunmap = udl_dmabuf_kunmap,
197 .kunmap_atomic = udl_dmabuf_kunmap_atomic,
198 .mmap = udl_dmabuf_mmap,
199 .release = drm_gem_dmabuf_release,
200};
201
202struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
203 struct drm_gem_object *obj, int flags)
204{
205 return dma_buf_export(obj, &udl_dmabuf_ops, obj->size, flags, NULL);
206}
207
208static int udl_prime_create(struct drm_device *dev,
209 size_t size,
210 struct sg_table *sg,
211 struct udl_gem_object **obj_p)
212{
213 struct udl_gem_object *obj;
214 int npages;
215
216 npages = size / PAGE_SIZE;
217
218 *obj_p = NULL;
219 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
220 if (!obj)
221 return -ENOMEM;
222
223 obj->sg = sg;
224 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
225 if (obj->pages == NULL) {
226 DRM_ERROR("obj pages is NULL %d\n", npages);
227 return -ENOMEM;
228 }
229
230 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
231
232 *obj_p = obj;
233 return 0;
234}
235
236struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
237 struct dma_buf *dma_buf)
238{
239 struct dma_buf_attachment *attach;
240 struct sg_table *sg;
241 struct udl_gem_object *uobj;
242 int ret;
243
244 /* need to attach */
245 get_device(dev->dev);
246 attach = dma_buf_attach(dma_buf, dev->dev);
247 if (IS_ERR(attach)) {
248 put_device(dev->dev);
249 return ERR_CAST(attach);
250 }
251
252 get_dma_buf(dma_buf);
253
254 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
255 if (IS_ERR(sg)) {
256 ret = PTR_ERR(sg);
257 goto fail_detach;
258 }
259
260 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
261 if (ret)
262 goto fail_unmap;
263
264 uobj->base.import_attach = attach;
265 uobj->flags = UDL_BO_WC;
266
267 return &uobj->base;
268
269fail_unmap:
270 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
271fail_detach:
272 dma_buf_detach(dma_buf, attach);
273 dma_buf_put(dma_buf);
274 put_device(dev->dev);
275 return ERR_PTR(ret);
276}
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 8607e9e513db..d5728ec85254 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -51,7 +51,9 @@ static struct drm_driver driver = {
51 .dumb_destroy = drm_gem_dumb_destroy, 51 .dumb_destroy = drm_gem_dumb_destroy,
52 .fops = &udl_driver_fops, 52 .fops = &udl_driver_fops,
53 53
54 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
54 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 55 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
56 .gem_prime_export = udl_gem_prime_export,
55 .gem_prime_import = udl_gem_prime_import, 57 .gem_prime_import = udl_gem_prime_import,
56 58
57 .name = DRIVER_NAME, 59 .name = DRIVER_NAME,
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index c7490a2489a7..80adbac82bde 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -25,6 +25,9 @@
25#define DRIVER_MINOR 0 25#define DRIVER_MINOR 0
26#define DRIVER_PATCHLEVEL 1 26#define DRIVER_PATCHLEVEL 1
27 27
28#define UDL_BO_CACHEABLE (1 << 0)
29#define UDL_BO_WC (1 << 1)
30
28struct udl_device; 31struct udl_device;
29 32
30struct urb_node { 33struct urb_node {
@@ -69,6 +72,7 @@ struct udl_gem_object {
69 struct page **pages; 72 struct page **pages;
70 void *vmapping; 73 void *vmapping;
71 struct sg_table *sg; 74 struct sg_table *sg;
75 unsigned int flags;
72}; 76};
73 77
74#define to_udl_bo(x) container_of(x, struct udl_gem_object, base) 78#define to_udl_bo(x) container_of(x, struct udl_gem_object, base)
@@ -120,9 +124,13 @@ int udl_gem_mmap(struct drm_file *file_priv, struct drm_device *dev,
120void udl_gem_free_object(struct drm_gem_object *gem_obj); 124void udl_gem_free_object(struct drm_gem_object *gem_obj);
121struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev, 125struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
122 size_t size); 126 size_t size);
127struct dma_buf *udl_gem_prime_export(struct drm_device *dev,
128 struct drm_gem_object *obj, int flags);
123struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, 129struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
124 struct dma_buf *dma_buf); 130 struct dma_buf *dma_buf);
125 131
132int udl_gem_get_pages(struct udl_gem_object *obj);
133void udl_gem_put_pages(struct udl_gem_object *obj);
126int udl_gem_vmap(struct udl_gem_object *obj); 134int udl_gem_vmap(struct udl_gem_object *obj);
127void udl_gem_vunmap(struct udl_gem_object *obj); 135void udl_gem_vunmap(struct udl_gem_object *obj);
128int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); 136int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
diff --git a/drivers/gpu/drm/udl/udl_gem.c b/drivers/gpu/drm/udl/udl_gem.c
index 8044f5fb7c49..2a0a784ab6ee 100644
--- a/drivers/gpu/drm/udl/udl_gem.c
+++ b/drivers/gpu/drm/udl/udl_gem.c
@@ -25,6 +25,7 @@ struct udl_gem_object *udl_gem_alloc_object(struct drm_device *dev,
25 return NULL; 25 return NULL;
26 } 26 }
27 27
28 obj->flags = UDL_BO_CACHEABLE;
28 return obj; 29 return obj;
29} 30}
30 31
@@ -56,6 +57,23 @@ udl_gem_create(struct drm_file *file,
56 return 0; 57 return 0;
57} 58}
58 59
60static void update_vm_cache_attr(struct udl_gem_object *obj,
61 struct vm_area_struct *vma)
62{
63 DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
64
65 /* non-cacheable as default. */
66 if (obj->flags & UDL_BO_CACHEABLE) {
67 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
68 } else if (obj->flags & UDL_BO_WC) {
69 vma->vm_page_prot =
70 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
71 } else {
72 vma->vm_page_prot =
73 pgprot_noncached(vm_get_page_prot(vma->vm_flags));
74 }
75}
76
59int udl_dumb_create(struct drm_file *file, 77int udl_dumb_create(struct drm_file *file,
60 struct drm_device *dev, 78 struct drm_device *dev,
61 struct drm_mode_create_dumb *args) 79 struct drm_mode_create_dumb *args)
@@ -77,6 +95,8 @@ int udl_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
77 vma->vm_flags &= ~VM_PFNMAP; 95 vma->vm_flags &= ~VM_PFNMAP;
78 vma->vm_flags |= VM_MIXEDMAP; 96 vma->vm_flags |= VM_MIXEDMAP;
79 97
98 update_vm_cache_attr(to_udl_bo(vma->vm_private_data), vma);
99
80 return ret; 100 return ret;
81} 101}
82 102
@@ -107,7 +127,7 @@ int udl_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
107 } 127 }
108} 128}
109 129
110static int udl_gem_get_pages(struct udl_gem_object *obj) 130int udl_gem_get_pages(struct udl_gem_object *obj)
111{ 131{
112 struct page **pages; 132 struct page **pages;
113 133
@@ -123,7 +143,7 @@ static int udl_gem_get_pages(struct udl_gem_object *obj)
123 return 0; 143 return 0;
124} 144}
125 145
126static void udl_gem_put_pages(struct udl_gem_object *obj) 146void udl_gem_put_pages(struct udl_gem_object *obj)
127{ 147{
128 if (obj->base.import_attach) { 148 if (obj->base.import_attach) {
129 drm_free_large(obj->pages); 149 drm_free_large(obj->pages);
@@ -164,8 +184,7 @@ void udl_gem_vunmap(struct udl_gem_object *obj)
164 return; 184 return;
165 } 185 }
166 186
167 if (obj->vmapping) 187 vunmap(obj->vmapping);
168 vunmap(obj->vmapping);
169 188
170 udl_gem_put_pages(obj); 189 udl_gem_put_pages(obj);
171} 190}
@@ -220,73 +239,3 @@ unlock:
220 mutex_unlock(&dev->struct_mutex); 239 mutex_unlock(&dev->struct_mutex);
221 return ret; 240 return ret;
222} 241}
223
224static int udl_prime_create(struct drm_device *dev,
225 size_t size,
226 struct sg_table *sg,
227 struct udl_gem_object **obj_p)
228{
229 struct udl_gem_object *obj;
230 int npages;
231
232 npages = size / PAGE_SIZE;
233
234 *obj_p = NULL;
235 obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
236 if (!obj)
237 return -ENOMEM;
238
239 obj->sg = sg;
240 obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
241 if (obj->pages == NULL) {
242 DRM_ERROR("obj pages is NULL %d\n", npages);
243 return -ENOMEM;
244 }
245
246 drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);
247
248 *obj_p = obj;
249 return 0;
250}
251
252struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
253 struct dma_buf *dma_buf)
254{
255 struct dma_buf_attachment *attach;
256 struct sg_table *sg;
257 struct udl_gem_object *uobj;
258 int ret;
259
260 /* need to attach */
261 get_device(dev->dev);
262 attach = dma_buf_attach(dma_buf, dev->dev);
263 if (IS_ERR(attach)) {
264 put_device(dev->dev);
265 return ERR_CAST(attach);
266 }
267
268 get_dma_buf(dma_buf);
269
270 sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
271 if (IS_ERR(sg)) {
272 ret = PTR_ERR(sg);
273 goto fail_detach;
274 }
275
276 ret = udl_prime_create(dev, dma_buf->size, sg, &uobj);
277 if (ret) {
278 goto fail_unmap;
279 }
280
281 uobj->base.import_attach = attach;
282
283 return &uobj->base;
284
285fail_unmap:
286 dma_buf_unmap_attachment(attach, sg, DMA_BIDIRECTIONAL);
287fail_detach:
288 dma_buf_detach(dma_buf, attach);
289 dma_buf_put(dma_buf);
290 put_device(dev->dev);
291 return ERR_PTR(ret);
292}
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index dc145d320b25..1701f1dfb23f 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -14,6 +14,7 @@
14#include <drm/drmP.h> 14#include <drm/drmP.h>
15#include <drm/drm_crtc.h> 15#include <drm/drm_crtc.h>
16#include <drm/drm_crtc_helper.h> 16#include <drm/drm_crtc_helper.h>
17#include <drm/drm_plane_helper.h>
17#include "udl_drv.h" 18#include "udl_drv.h"
18 19
19/* 20/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 25f3c250fd98..7b5d22110f25 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -889,8 +889,7 @@ static int vmw_driver_unload(struct drm_device *dev)
889 889
890 if (dev_priv->ctx.res_ht_initialized) 890 if (dev_priv->ctx.res_ht_initialized)
891 drm_ht_remove(&dev_priv->ctx.res_ht); 891 drm_ht_remove(&dev_priv->ctx.res_ht);
892 if (dev_priv->ctx.cmd_bounce) 892 vfree(dev_priv->ctx.cmd_bounce);
893 vfree(dev_priv->ctx.cmd_bounce);
894 if (dev_priv->enable_fb) { 893 if (dev_priv->enable_fb) {
895 vmw_fb_close(dev_priv); 894 vmw_fb_close(dev_priv);
896 vmw_kms_restore_vga(dev_priv); 895 vmw_kms_restore_vga(dev_priv);
@@ -1063,8 +1062,12 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
1063 1062
1064 vmaster = vmw_master_check(dev, file_priv, flags); 1063 vmaster = vmw_master_check(dev, file_priv, flags);
1065 if (unlikely(IS_ERR(vmaster))) { 1064 if (unlikely(IS_ERR(vmaster))) {
1066 DRM_INFO("IOCTL ERROR %d\n", nr); 1065 ret = PTR_ERR(vmaster);
1067 return PTR_ERR(vmaster); 1066
1067 if (ret != -ERESTARTSYS)
1068 DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
1069 nr, ret);
1070 return ret;
1068 } 1071 }
1069 1072
1070 ret = ioctl_func(filp, cmd, arg); 1073 ret = ioctl_func(filp, cmd, arg);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 596cd6dafd33..33176d05db35 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2487 if (unlikely(ret != 0)) 2487 if (unlikely(ret != 0))
2488 goto out_err_nores; 2488 goto out_err_nores;
2489 2489
2490 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true); 2490 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2491 true, NULL);
2491 if (unlikely(ret != 0)) 2492 if (unlikely(ret != 0))
2492 goto out_err; 2493 goto out_err;
2493 2494
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
2677 query_val.shared = false; 2678 query_val.shared = false;
2678 list_add_tail(&query_val.head, &validate_list); 2679 list_add_tail(&query_val.head, &validate_list);
2679 2680
2680 ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false); 2681 ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
2682 false, NULL);
2681 if (unlikely(ret != 0)) { 2683 if (unlikely(ret != 0)) {
2682 vmw_execbuf_unpin_panic(dev_priv); 2684 vmw_execbuf_unpin_panic(dev_priv);
2683 goto out_no_reserve; 2685 goto out_no_reserve;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 197164fd7803..b7594cb758af 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -545,35 +545,19 @@ void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
545 545
546static void vmw_fence_destroy(struct vmw_fence_obj *fence) 546static void vmw_fence_destroy(struct vmw_fence_obj *fence)
547{ 547{
548 struct vmw_fence_manager *fman = fman_from_fence(fence);
549
550 fence_free(&fence->base); 548 fence_free(&fence->base);
551
552 /*
553 * Free kernel space accounting.
554 */
555 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
556 fman->fence_size);
557} 549}
558 550
559int vmw_fence_create(struct vmw_fence_manager *fman, 551int vmw_fence_create(struct vmw_fence_manager *fman,
560 uint32_t seqno, 552 uint32_t seqno,
561 struct vmw_fence_obj **p_fence) 553 struct vmw_fence_obj **p_fence)
562{ 554{
563 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
564 struct vmw_fence_obj *fence; 555 struct vmw_fence_obj *fence;
565 int ret; 556 int ret;
566 557
567 ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
568 false, false);
569 if (unlikely(ret != 0))
570 return ret;
571
572 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 558 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
573 if (unlikely(fence == NULL)) { 559 if (unlikely(fence == NULL))
574 ret = -ENOMEM; 560 return -ENOMEM;
575 goto out_no_object;
576 }
577 561
578 ret = vmw_fence_obj_init(fman, fence, seqno, 562 ret = vmw_fence_obj_init(fman, fence, seqno,
579 vmw_fence_destroy); 563 vmw_fence_destroy);
@@ -585,8 +569,6 @@ int vmw_fence_create(struct vmw_fence_manager *fman,
585 569
586out_err_init: 570out_err_init:
587 kfree(fence); 571 kfree(fence);
588out_no_object:
589 ttm_mem_global_free(mem_glob, fman->fence_size);
590 return ret; 572 return ret;
591} 573}
592 574
@@ -1105,6 +1087,8 @@ static int vmw_event_fence_action_create(struct drm_file *file_priv,
1105 if (ret != 0) 1087 if (ret != 0)
1106 goto out_no_queue; 1088 goto out_no_queue;
1107 1089
1090 return 0;
1091
1108out_no_queue: 1092out_no_queue:
1109 event->base.destroy(&event->base); 1093 event->base.destroy(&event->base);
1110out_no_event: 1094out_no_event:
@@ -1180,17 +1164,10 @@ int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1180 1164
1181 BUG_ON(fence == NULL); 1165 BUG_ON(fence == NULL);
1182 1166
1183 if (arg->flags & DRM_VMW_FE_FLAG_REQ_TIME) 1167 ret = vmw_event_fence_action_create(file_priv, fence,
1184 ret = vmw_event_fence_action_create(file_priv, fence, 1168 arg->flags,
1185 arg->flags, 1169 arg->user_data,
1186 arg->user_data, 1170 true);
1187 true);
1188 else
1189 ret = vmw_event_fence_action_create(file_priv, fence,
1190 arg->flags,
1191 arg->user_data,
1192 true);
1193
1194 if (unlikely(ret != 0)) { 1171 if (unlikely(ret != 0)) {
1195 if (ret != -ERESTARTSYS) 1172 if (ret != -ERESTARTSYS)
1196 DRM_ERROR("Failed to attach event to fence.\n"); 1173 DRM_ERROR("Failed to attach event to fence.\n");
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 941a7bc0b791..3725b521d931 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
252 ret = 0; 252 ret = 0;
253out: 253out:
254 drm_modeset_unlock_all(dev_priv->dev); 254 drm_modeset_unlock_all(dev_priv->dev);
255 drm_modeset_lock_crtc(crtc); 255 drm_modeset_lock_crtc(crtc, crtc->cursor);
256 256
257 return ret; 257 return ret;
258} 258}
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
281 du->cursor_y + du->hotspot_y); 281 du->cursor_y + du->hotspot_y);
282 282
283 drm_modeset_unlock_all(dev_priv->dev); 283 drm_modeset_unlock_all(dev_priv->dev);
284 drm_modeset_lock_crtc(crtc); 284 drm_modeset_lock_crtc(crtc, crtc->cursor);
285 285
286 return 0; 286 return 0;
287} 287}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 15e185ae4c99..5c289f748ab4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -26,6 +26,7 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28#include "vmwgfx_kms.h" 28#include "vmwgfx_kms.h"
29#include <drm/drm_plane_helper.h>
29 30
30 31
31#define vmw_crtc_to_ldu(x) \ 32#define vmw_crtc_to_ldu(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 026de7cea0f6..210ef15b1d09 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
1222 val_buf->bo = ttm_bo_reference(&res->backup->base); 1222 val_buf->bo = ttm_bo_reference(&res->backup->base);
1223 val_buf->shared = false; 1223 val_buf->shared = false;
1224 list_add_tail(&val_buf->head, &val_list); 1224 list_add_tail(&val_buf->head, &val_list);
1225 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible); 1225 ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
1226 if (unlikely(ret != 0)) 1226 if (unlikely(ret != 0))
1227 goto out_no_reserve; 1227 goto out_no_reserve;
1228 1228
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index b295463a60b3..7dc591d04d9a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -26,6 +26,7 @@
26 **************************************************************************/ 26 **************************************************************************/
27 27
28#include "vmwgfx_kms.h" 28#include "vmwgfx_kms.h"
29#include <drm/drm_plane_helper.h>
29 30
30 31
31#define vmw_crtc_to_sou(x) \ 32#define vmw_crtc_to_sou(x) \
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 8719fb3cccc9..6a4584a43aa6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -198,7 +198,7 @@ static int vmw_gb_shader_bind(struct vmw_resource *res,
198 cmd->header.size = sizeof(cmd->body); 198 cmd->header.size = sizeof(cmd->body);
199 cmd->body.shid = res->id; 199 cmd->body.shid = res->id;
200 cmd->body.mobid = bo->mem.start; 200 cmd->body.mobid = bo->mem.start;
201 cmd->body.offsetInBytes = 0; 201 cmd->body.offsetInBytes = res->backup_offset;
202 res->backup_dirty = false; 202 res->backup_dirty = false;
203 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 203 vmw_fifo_commit(dev_priv, sizeof(*cmd));
204 204
diff --git a/drivers/gpu/host1x/cdma.c b/drivers/gpu/host1x/cdma.c
index 3995255b16c7..5a8c8d55317a 100644
--- a/drivers/gpu/host1x/cdma.c
+++ b/drivers/gpu/host1x/cdma.c
@@ -97,7 +97,7 @@ fail:
97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2) 97static void host1x_pushbuffer_push(struct push_buffer *pb, u32 op1, u32 op2)
98{ 98{
99 u32 pos = pb->pos; 99 u32 pos = pb->pos;
100 u32 *p = (u32 *)((u32)pb->mapped + pos); 100 u32 *p = (u32 *)((void *)pb->mapped + pos);
101 WARN_ON(pos == pb->fence); 101 WARN_ON(pos == pb->fence);
102 *(p++) = op1; 102 *(p++) = op1;
103 *(p++) = op2; 103 *(p++) = op2;
diff --git a/drivers/gpu/host1x/cdma.h b/drivers/gpu/host1x/cdma.h
index 313c4b784348..470087af8fe5 100644
--- a/drivers/gpu/host1x/cdma.h
+++ b/drivers/gpu/host1x/cdma.h
@@ -42,7 +42,7 @@ struct host1x_job;
42 */ 42 */
43 43
44struct push_buffer { 44struct push_buffer {
45 u32 *mapped; /* mapped pushbuffer memory */ 45 void *mapped; /* mapped pushbuffer memory */
46 dma_addr_t phys; /* physical address of pushbuffer */ 46 dma_addr_t phys; /* physical address of pushbuffer */
47 u32 fence; /* index we've written */ 47 u32 fence; /* index we've written */
48 u32 pos; /* index to write to */ 48 u32 pos; /* index to write to */
diff --git a/drivers/gpu/host1x/hw/cdma_hw.c b/drivers/gpu/host1x/hw/cdma_hw.c
index 6b09b71940c2..305ea8f3382d 100644
--- a/drivers/gpu/host1x/hw/cdma_hw.c
+++ b/drivers/gpu/host1x/hw/cdma_hw.c
@@ -26,11 +26,11 @@
26#include "../debug.h" 26#include "../debug.h"
27 27
28/* 28/*
29 * Put the restart at the end of pushbuffer memor 29 * Put the restart at the end of pushbuffer memory
30 */ 30 */
31static void push_buffer_init(struct push_buffer *pb) 31static void push_buffer_init(struct push_buffer *pb)
32{ 32{
33 *(pb->mapped + (pb->size_bytes >> 2)) = host1x_opcode_restart(0); 33 *(u32 *)(pb->mapped + pb->size_bytes) = host1x_opcode_restart(0);
34} 34}
35 35
36/* 36/*
@@ -51,11 +51,11 @@ static void cdma_timeout_cpu_incr(struct host1x_cdma *cdma, u32 getptr,
51 51
52 /* NOP all the PB slots */ 52 /* NOP all the PB slots */
53 while (nr_slots--) { 53 while (nr_slots--) {
54 u32 *p = (u32 *)((u32)pb->mapped + getptr); 54 u32 *p = (u32 *)(pb->mapped + getptr);
55 *(p++) = HOST1X_OPCODE_NOP; 55 *(p++) = HOST1X_OPCODE_NOP;
56 *(p++) = HOST1X_OPCODE_NOP; 56 *(p++) = HOST1X_OPCODE_NOP;
57 dev_dbg(host1x->dev, "%s: NOP at %#llx\n", __func__, 57 dev_dbg(host1x->dev, "%s: NOP at %pad+%#x\n", __func__,
58 (u64)pb->phys + getptr); 58 &pb->phys, getptr);
59 getptr = (getptr + 8) & (pb->size_bytes - 1); 59 getptr = (getptr + 8) & (pb->size_bytes - 1);
60 } 60 }
61 wmb(); 61 wmb();
diff --git a/drivers/gpu/host1x/hw/channel_hw.c b/drivers/gpu/host1x/hw/channel_hw.c
index 4608257ab656..946c332c3906 100644
--- a/drivers/gpu/host1x/hw/channel_hw.c
+++ b/drivers/gpu/host1x/hw/channel_hw.c
@@ -32,6 +32,7 @@
32static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo, 32static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
33 u32 offset, u32 words) 33 u32 offset, u32 words)
34{ 34{
35 struct device *dev = cdma_to_channel(cdma)->dev;
35 void *mem = NULL; 36 void *mem = NULL;
36 37
37 if (host1x_debug_trace_cmdbuf) 38 if (host1x_debug_trace_cmdbuf)
@@ -44,11 +45,14 @@ static void trace_write_gather(struct host1x_cdma *cdma, struct host1x_bo *bo,
44 * of how much you can output to ftrace at once. 45 * of how much you can output to ftrace at once.
45 */ 46 */
46 for (i = 0; i < words; i += TRACE_MAX_LENGTH) { 47 for (i = 0; i < words; i += TRACE_MAX_LENGTH) {
47 trace_host1x_cdma_push_gather( 48 u32 num_words = min(words - i, TRACE_MAX_LENGTH);
48 dev_name(cdma_to_channel(cdma)->dev), 49 offset += i * sizeof(u32);
49 (u32)bo, min(words - i, TRACE_MAX_LENGTH), 50
50 offset + i * sizeof(u32), mem); 51 trace_host1x_cdma_push_gather(dev_name(dev), bo,
52 num_words, offset,
53 mem);
51 } 54 }
55
52 host1x_bo_munmap(bo, mem); 56 host1x_bo_munmap(bo, mem);
53 } 57 }
54} 58}
diff --git a/drivers/gpu/host1x/hw/debug_hw.c b/drivers/gpu/host1x/hw/debug_hw.c
index f72c873eff81..791de9351eeb 100644
--- a/drivers/gpu/host1x/hw/debug_hw.c
+++ b/drivers/gpu/host1x/hw/debug_hw.c
@@ -163,8 +163,8 @@ static void show_channel_gathers(struct output *o, struct host1x_cdma *cdma)
163 continue; 163 continue;
164 } 164 }
165 165
166 host1x_debug_output(o, " GATHER at %#llx+%04x, %d words\n", 166 host1x_debug_output(o, " GATHER at %pad+%#x, %d words\n",
167 (u64)g->base, g->offset, g->words); 167 &g->base, g->offset, g->words);
168 168
169 show_gather(o, g->base + g->offset, g->words, cdma, 169 show_gather(o, g->base + g->offset, g->words, cdma,
170 g->base, mapped); 170 g->base, mapped);
diff --git a/drivers/gpu/host1x/job.h b/drivers/gpu/host1x/job.h
index 33a697d6dcef..8b3c15df0660 100644
--- a/drivers/gpu/host1x/job.h
+++ b/drivers/gpu/host1x/job.h
@@ -23,7 +23,7 @@ struct host1x_job_gather {
23 u32 words; 23 u32 words;
24 dma_addr_t base; 24 dma_addr_t base;
25 struct host1x_bo *bo; 25 struct host1x_bo *bo;
26 int offset; 26 u32 offset;
27 bool handled; 27 bool handled;
28}; 28};
29 29
diff --git a/drivers/gpu/host1x/mipi.c b/drivers/gpu/host1x/mipi.c
index 9882ea122024..fbc6ee6ca337 100644
--- a/drivers/gpu/host1x/mipi.c
+++ b/drivers/gpu/host1x/mipi.c
@@ -49,35 +49,47 @@
49#define MIPI_CAL_CONFIG_DSIC 0x10 49#define MIPI_CAL_CONFIG_DSIC 0x10
50#define MIPI_CAL_CONFIG_DSID 0x11 50#define MIPI_CAL_CONFIG_DSID 0x11
51 51
52#define MIPI_CAL_CONFIG_DSIAB_CLK 0x19
53#define MIPI_CAL_CONFIG_DSICD_CLK 0x1a
54#define MIPI_CAL_CONFIG_CSIAB_CLK 0x1b
55#define MIPI_CAL_CONFIG_CSICD_CLK 0x1c
56#define MIPI_CAL_CONFIG_CSIE_CLK 0x1d
57
58/* for data and clock lanes */
52#define MIPI_CAL_CONFIG_SELECT (1 << 21) 59#define MIPI_CAL_CONFIG_SELECT (1 << 21)
60
61/* for data lanes */
53#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16) 62#define MIPI_CAL_CONFIG_HSPDOS(x) (((x) & 0x1f) << 16)
54#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8) 63#define MIPI_CAL_CONFIG_HSPUOS(x) (((x) & 0x1f) << 8)
55#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0) 64#define MIPI_CAL_CONFIG_TERMOS(x) (((x) & 0x1f) << 0)
56 65
66/* for clock lanes */
67#define MIPI_CAL_CONFIG_HSCLKPDOSD(x) (((x) & 0x1f) << 8)
68#define MIPI_CAL_CONFIG_HSCLKPUOSD(x) (((x) & 0x1f) << 0)
69
57#define MIPI_CAL_BIAS_PAD_CFG0 0x16 70#define MIPI_CAL_BIAS_PAD_CFG0 0x16
58#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1) 71#define MIPI_CAL_BIAS_PAD_PDVCLAMP (1 << 1)
59#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0) 72#define MIPI_CAL_BIAS_PAD_E_VCLAMP_REF (1 << 0)
60 73
61#define MIPI_CAL_BIAS_PAD_CFG1 0x17 74#define MIPI_CAL_BIAS_PAD_CFG1 0x17
75#define MIPI_CAL_BIAS_PAD_DRV_DN_REF(x) (((x) & 0x7) << 16)
62 76
63#define MIPI_CAL_BIAS_PAD_CFG2 0x18 77#define MIPI_CAL_BIAS_PAD_CFG2 0x18
64#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1) 78#define MIPI_CAL_BIAS_PAD_PDVREG (1 << 1)
65 79
66static const struct module { 80struct tegra_mipi_pad {
67 unsigned long reg; 81 unsigned long data;
68} modules[] = { 82 unsigned long clk;
69 { .reg = MIPI_CAL_CONFIG_CSIA }, 83};
70 { .reg = MIPI_CAL_CONFIG_CSIB }, 84
71 { .reg = MIPI_CAL_CONFIG_CSIC }, 85struct tegra_mipi_soc {
72 { .reg = MIPI_CAL_CONFIG_CSID }, 86 bool has_clk_lane;
73 { .reg = MIPI_CAL_CONFIG_CSIE }, 87 const struct tegra_mipi_pad *pads;
74 { .reg = MIPI_CAL_CONFIG_DSIA }, 88 unsigned int num_pads;
75 { .reg = MIPI_CAL_CONFIG_DSIB },
76 { .reg = MIPI_CAL_CONFIG_DSIC },
77 { .reg = MIPI_CAL_CONFIG_DSID },
78}; 89};
79 90
80struct tegra_mipi { 91struct tegra_mipi {
92 const struct tegra_mipi_soc *soc;
81 void __iomem *regs; 93 void __iomem *regs;
82 struct mutex lock; 94 struct mutex lock;
83 struct clk *clk; 95 struct clk *clk;
@@ -90,16 +102,16 @@ struct tegra_mipi_device {
90 unsigned long pads; 102 unsigned long pads;
91}; 103};
92 104
93static inline unsigned long tegra_mipi_readl(struct tegra_mipi *mipi, 105static inline u32 tegra_mipi_readl(struct tegra_mipi *mipi,
94 unsigned long reg) 106 unsigned long offset)
95{ 107{
96 return readl(mipi->regs + (reg << 2)); 108 return readl(mipi->regs + (offset << 2));
97} 109}
98 110
99static inline void tegra_mipi_writel(struct tegra_mipi *mipi, 111static inline void tegra_mipi_writel(struct tegra_mipi *mipi, u32 value,
100 unsigned long value, unsigned long reg) 112 unsigned long offset)
101{ 113{
102 writel(value, mipi->regs + (reg << 2)); 114 writel(value, mipi->regs + (offset << 2));
103} 115}
104 116
105struct tegra_mipi_device *tegra_mipi_request(struct device *device) 117struct tegra_mipi_device *tegra_mipi_request(struct device *device)
@@ -117,36 +129,35 @@ struct tegra_mipi_device *tegra_mipi_request(struct device *device)
117 129
118 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 130 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
119 if (!dev) { 131 if (!dev) {
120 of_node_put(args.np);
121 err = -ENOMEM; 132 err = -ENOMEM;
122 goto out; 133 goto out;
123 } 134 }
124 135
125 dev->pdev = of_find_device_by_node(args.np); 136 dev->pdev = of_find_device_by_node(args.np);
126 if (!dev->pdev) { 137 if (!dev->pdev) {
127 of_node_put(args.np);
128 err = -ENODEV; 138 err = -ENODEV;
129 goto free; 139 goto free;
130 } 140 }
131 141
132 of_node_put(args.np);
133
134 dev->mipi = platform_get_drvdata(dev->pdev); 142 dev->mipi = platform_get_drvdata(dev->pdev);
135 if (!dev->mipi) { 143 if (!dev->mipi) {
136 err = -EPROBE_DEFER; 144 err = -EPROBE_DEFER;
137 goto pdev_put; 145 goto put;
138 } 146 }
139 147
148 of_node_put(args.np);
149
140 dev->pads = args.args[0]; 150 dev->pads = args.args[0];
141 dev->device = device; 151 dev->device = device;
142 152
143 return dev; 153 return dev;
144 154
145pdev_put: 155put:
146 platform_device_put(dev->pdev); 156 platform_device_put(dev->pdev);
147free: 157free:
148 kfree(dev); 158 kfree(dev);
149out: 159out:
160 of_node_put(args.np);
150 return ERR_PTR(err); 161 return ERR_PTR(err);
151} 162}
152EXPORT_SYMBOL(tegra_mipi_request); 163EXPORT_SYMBOL(tegra_mipi_request);
@@ -161,7 +172,7 @@ EXPORT_SYMBOL(tegra_mipi_free);
161static int tegra_mipi_wait(struct tegra_mipi *mipi) 172static int tegra_mipi_wait(struct tegra_mipi *mipi)
162{ 173{
163 unsigned long timeout = jiffies + msecs_to_jiffies(250); 174 unsigned long timeout = jiffies + msecs_to_jiffies(250);
164 unsigned long value; 175 u32 value;
165 176
166 while (time_before(jiffies, timeout)) { 177 while (time_before(jiffies, timeout)) {
167 value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS); 178 value = tegra_mipi_readl(mipi, MIPI_CAL_STATUS);
@@ -177,8 +188,9 @@ static int tegra_mipi_wait(struct tegra_mipi *mipi)
177 188
178int tegra_mipi_calibrate(struct tegra_mipi_device *device) 189int tegra_mipi_calibrate(struct tegra_mipi_device *device)
179{ 190{
180 unsigned long value; 191 const struct tegra_mipi_soc *soc = device->mipi->soc;
181 unsigned int i; 192 unsigned int i;
193 u32 value;
182 int err; 194 int err;
183 195
184 err = clk_enable(device->mipi->clk); 196 err = clk_enable(device->mipi->clk);
@@ -192,23 +204,35 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
192 value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF; 204 value |= MIPI_CAL_BIAS_PAD_E_VCLAMP_REF;
193 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0); 205 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG0);
194 206
207 tegra_mipi_writel(device->mipi, MIPI_CAL_BIAS_PAD_DRV_DN_REF(2),
208 MIPI_CAL_BIAS_PAD_CFG1);
209
195 value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2); 210 value = tegra_mipi_readl(device->mipi, MIPI_CAL_BIAS_PAD_CFG2);
196 value &= ~MIPI_CAL_BIAS_PAD_PDVREG; 211 value &= ~MIPI_CAL_BIAS_PAD_PDVREG;
197 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2); 212 tegra_mipi_writel(device->mipi, value, MIPI_CAL_BIAS_PAD_CFG2);
198 213
199 for (i = 0; i < ARRAY_SIZE(modules); i++) { 214 for (i = 0; i < soc->num_pads; i++) {
200 if (device->pads & BIT(i)) 215 u32 clk = 0, data = 0;
201 value = MIPI_CAL_CONFIG_SELECT | 216
202 MIPI_CAL_CONFIG_HSPDOS(0) | 217 if (device->pads & BIT(i)) {
203 MIPI_CAL_CONFIG_HSPUOS(4) | 218 data = MIPI_CAL_CONFIG_SELECT |
204 MIPI_CAL_CONFIG_TERMOS(5); 219 MIPI_CAL_CONFIG_HSPDOS(0) |
205 else 220 MIPI_CAL_CONFIG_HSPUOS(4) |
206 value = 0; 221 MIPI_CAL_CONFIG_TERMOS(5);
222 clk = MIPI_CAL_CONFIG_SELECT |
223 MIPI_CAL_CONFIG_HSCLKPDOSD(0) |
224 MIPI_CAL_CONFIG_HSCLKPUOSD(4);
225 }
207 226
208 tegra_mipi_writel(device->mipi, value, modules[i].reg); 227 tegra_mipi_writel(device->mipi, data, soc->pads[i].data);
228
229 if (soc->has_clk_lane)
230 tegra_mipi_writel(device->mipi, clk, soc->pads[i].clk);
209 } 231 }
210 232
211 tegra_mipi_writel(device->mipi, MIPI_CAL_CTRL_START, MIPI_CAL_CTRL); 233 value = tegra_mipi_readl(device->mipi, MIPI_CAL_CTRL);
234 value |= MIPI_CAL_CTRL_START;
235 tegra_mipi_writel(device->mipi, value, MIPI_CAL_CTRL);
212 236
213 err = tegra_mipi_wait(device->mipi); 237 err = tegra_mipi_wait(device->mipi);
214 238
@@ -219,16 +243,63 @@ int tegra_mipi_calibrate(struct tegra_mipi_device *device)
219} 243}
220EXPORT_SYMBOL(tegra_mipi_calibrate); 244EXPORT_SYMBOL(tegra_mipi_calibrate);
221 245
246static const struct tegra_mipi_pad tegra114_mipi_pads[] = {
247 { .data = MIPI_CAL_CONFIG_CSIA },
248 { .data = MIPI_CAL_CONFIG_CSIB },
249 { .data = MIPI_CAL_CONFIG_CSIC },
250 { .data = MIPI_CAL_CONFIG_CSID },
251 { .data = MIPI_CAL_CONFIG_CSIE },
252 { .data = MIPI_CAL_CONFIG_DSIA },
253 { .data = MIPI_CAL_CONFIG_DSIB },
254 { .data = MIPI_CAL_CONFIG_DSIC },
255 { .data = MIPI_CAL_CONFIG_DSID },
256};
257
258static const struct tegra_mipi_soc tegra114_mipi_soc = {
259 .has_clk_lane = false,
260 .pads = tegra114_mipi_pads,
261 .num_pads = ARRAY_SIZE(tegra114_mipi_pads),
262};
263
264static const struct tegra_mipi_pad tegra124_mipi_pads[] = {
265 { .data = MIPI_CAL_CONFIG_CSIA, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
266 { .data = MIPI_CAL_CONFIG_CSIB, .clk = MIPI_CAL_CONFIG_CSIAB_CLK },
267 { .data = MIPI_CAL_CONFIG_CSIC, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
268 { .data = MIPI_CAL_CONFIG_CSID, .clk = MIPI_CAL_CONFIG_CSICD_CLK },
269 { .data = MIPI_CAL_CONFIG_CSIE, .clk = MIPI_CAL_CONFIG_CSIE_CLK },
270 { .data = MIPI_CAL_CONFIG_DSIA, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
271 { .data = MIPI_CAL_CONFIG_DSIB, .clk = MIPI_CAL_CONFIG_DSIAB_CLK },
272};
273
274static const struct tegra_mipi_soc tegra124_mipi_soc = {
275 .has_clk_lane = true,
276 .pads = tegra124_mipi_pads,
277 .num_pads = ARRAY_SIZE(tegra124_mipi_pads),
278};
279
280static struct of_device_id tegra_mipi_of_match[] = {
281 { .compatible = "nvidia,tegra114-mipi", .data = &tegra114_mipi_soc },
282 { .compatible = "nvidia,tegra124-mipi", .data = &tegra124_mipi_soc },
283 { },
284};
285
222static int tegra_mipi_probe(struct platform_device *pdev) 286static int tegra_mipi_probe(struct platform_device *pdev)
223{ 287{
288 const struct of_device_id *match;
224 struct tegra_mipi *mipi; 289 struct tegra_mipi *mipi;
225 struct resource *res; 290 struct resource *res;
226 int err; 291 int err;
227 292
293 match = of_match_node(tegra_mipi_of_match, pdev->dev.of_node);
294 if (!match)
295 return -ENODEV;
296
228 mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL); 297 mipi = devm_kzalloc(&pdev->dev, sizeof(*mipi), GFP_KERNEL);
229 if (!mipi) 298 if (!mipi)
230 return -ENOMEM; 299 return -ENOMEM;
231 300
301 mipi->soc = match->data;
302
232 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 303 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
233 mipi->regs = devm_ioremap_resource(&pdev->dev, res); 304 mipi->regs = devm_ioremap_resource(&pdev->dev, res);
234 if (IS_ERR(mipi->regs)) 305 if (IS_ERR(mipi->regs))
@@ -260,11 +331,6 @@ static int tegra_mipi_remove(struct platform_device *pdev)
260 return 0; 331 return 0;
261} 332}
262 333
263static struct of_device_id tegra_mipi_of_match[] = {
264 { .compatible = "nvidia,tegra114-mipi", },
265 { },
266};
267
268struct platform_driver tegra_mipi_driver = { 334struct platform_driver tegra_mipi_driver = {
269 .driver = { 335 .driver = {
270 .name = "tegra-mipi", 336 .name = "tegra-mipi",
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index bea878f8e7d3..90f70d0e1141 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -92,13 +92,6 @@ static spinlock_t state_lock;
92 92
93static struct workqueue_struct *iommu_wq; 93static struct workqueue_struct *iommu_wq;
94 94
95/*
96 * Empty page table - Used between
97 * mmu_notifier_invalidate_range_start and
98 * mmu_notifier_invalidate_range_end
99 */
100static u64 *empty_page_table;
101
102static void free_pasid_states(struct device_state *dev_state); 95static void free_pasid_states(struct device_state *dev_state);
103 96
104static u16 device_id(struct pci_dev *pdev) 97static u16 device_id(struct pci_dev *pdev)
@@ -414,46 +407,21 @@ static void mn_invalidate_page(struct mmu_notifier *mn,
414 __mn_flush_page(mn, address); 407 __mn_flush_page(mn, address);
415} 408}
416 409
417static void mn_invalidate_range_start(struct mmu_notifier *mn, 410static void mn_invalidate_range(struct mmu_notifier *mn,
418 struct mm_struct *mm, 411 struct mm_struct *mm,
419 unsigned long start, unsigned long end) 412 unsigned long start, unsigned long end)
420{
421 struct pasid_state *pasid_state;
422 struct device_state *dev_state;
423 unsigned long flags;
424
425 pasid_state = mn_to_state(mn);
426 dev_state = pasid_state->device_state;
427
428 spin_lock_irqsave(&pasid_state->lock, flags);
429 if (pasid_state->mmu_notifier_count == 0) {
430 amd_iommu_domain_set_gcr3(dev_state->domain,
431 pasid_state->pasid,
432 __pa(empty_page_table));
433 }
434 pasid_state->mmu_notifier_count += 1;
435 spin_unlock_irqrestore(&pasid_state->lock, flags);
436}
437
438static void mn_invalidate_range_end(struct mmu_notifier *mn,
439 struct mm_struct *mm,
440 unsigned long start, unsigned long end)
441{ 413{
442 struct pasid_state *pasid_state; 414 struct pasid_state *pasid_state;
443 struct device_state *dev_state; 415 struct device_state *dev_state;
444 unsigned long flags;
445 416
446 pasid_state = mn_to_state(mn); 417 pasid_state = mn_to_state(mn);
447 dev_state = pasid_state->device_state; 418 dev_state = pasid_state->device_state;
448 419
449 spin_lock_irqsave(&pasid_state->lock, flags); 420 if ((start ^ (end - 1)) < PAGE_SIZE)
450 pasid_state->mmu_notifier_count -= 1; 421 amd_iommu_flush_page(dev_state->domain, pasid_state->pasid,
451 if (pasid_state->mmu_notifier_count == 0) { 422 start);
452 amd_iommu_domain_set_gcr3(dev_state->domain, 423 else
453 pasid_state->pasid, 424 amd_iommu_flush_tlb(dev_state->domain, pasid_state->pasid);
454 __pa(pasid_state->mm->pgd));
455 }
456 spin_unlock_irqrestore(&pasid_state->lock, flags);
457} 425}
458 426
459static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) 427static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -478,8 +446,7 @@ static struct mmu_notifier_ops iommu_mn = {
478 .release = mn_release, 446 .release = mn_release,
479 .clear_flush_young = mn_clear_flush_young, 447 .clear_flush_young = mn_clear_flush_young,
480 .invalidate_page = mn_invalidate_page, 448 .invalidate_page = mn_invalidate_page,
481 .invalidate_range_start = mn_invalidate_range_start, 449 .invalidate_range = mn_invalidate_range,
482 .invalidate_range_end = mn_invalidate_range_end,
483}; 450};
484 451
485static void set_pri_tag_status(struct pasid_state *pasid_state, 452static void set_pri_tag_status(struct pasid_state *pasid_state,
@@ -972,18 +939,10 @@ static int __init amd_iommu_v2_init(void)
972 if (iommu_wq == NULL) 939 if (iommu_wq == NULL)
973 goto out; 940 goto out;
974 941
975 ret = -ENOMEM;
976 empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
977 if (empty_page_table == NULL)
978 goto out_destroy_wq;
979
980 amd_iommu_register_ppr_notifier(&ppr_nb); 942 amd_iommu_register_ppr_notifier(&ppr_nb);
981 943
982 return 0; 944 return 0;
983 945
984out_destroy_wq:
985 destroy_workqueue(iommu_wq);
986
987out: 946out:
988 return ret; 947 return ret;
989} 948}
@@ -1017,8 +976,6 @@ static void __exit amd_iommu_v2_exit(void)
1017 } 976 }
1018 977
1019 destroy_workqueue(iommu_wq); 978 destroy_workqueue(iommu_wq);
1020
1021 free_page((unsigned long)empty_page_table);
1022} 979}
1023 980
1024module_init(amd_iommu_v2_init); 981module_init(amd_iommu_v2_init);
diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
index 11a5043959dc..011a3363c265 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra.c
@@ -31,6 +31,7 @@
31static u32 (*fuse_readl)(const unsigned int offset); 31static u32 (*fuse_readl)(const unsigned int offset);
32static int fuse_size; 32static int fuse_size;
33struct tegra_sku_info tegra_sku_info; 33struct tegra_sku_info tegra_sku_info;
34EXPORT_SYMBOL(tegra_sku_info);
34 35
35static const char *tegra_revision_name[TEGRA_REVISION_MAX] = { 36static const char *tegra_revision_name[TEGRA_REVISION_MAX] = {
36 [TEGRA_REVISION_UNKNOWN] = "unknown", 37 [TEGRA_REVISION_UNKNOWN] = "unknown",
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4690ae9a267f..9425728b7eb5 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -86,8 +86,6 @@ source "drivers/staging/gdm72xx/Kconfig"
86 86
87source "drivers/staging/gdm724x/Kconfig" 87source "drivers/staging/gdm724x/Kconfig"
88 88
89source "drivers/staging/imx-drm/Kconfig"
90
91source "drivers/staging/fwserial/Kconfig" 89source "drivers/staging/fwserial/Kconfig"
92 90
93source "drivers/staging/goldfish/Kconfig" 91source "drivers/staging/goldfish/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index c780a0e70e15..bc233dd98a95 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -36,7 +36,6 @@ obj-$(CONFIG_STAGING_BOARD) += board/
36obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/ 36obj-$(CONFIG_USB_WPAN_HCD) += ozwpan/
37obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/ 37obj-$(CONFIG_WIMAX_GDM72XX) += gdm72xx/
38obj-$(CONFIG_LTE_GDM724X) += gdm724x/ 38obj-$(CONFIG_LTE_GDM724X) += gdm724x/
39obj-$(CONFIG_DRM_IMX) += imx-drm/
40obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/ 39obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
41obj-$(CONFIG_GOLDFISH) += goldfish/ 40obj-$(CONFIG_GOLDFISH) += goldfish/
42obj-$(CONFIG_LUSTRE_FS) += lustre/ 41obj-$(CONFIG_LUSTRE_FS) += lustre/
diff --git a/drivers/staging/imx-drm/TODO b/drivers/staging/imx-drm/TODO
deleted file mode 100644
index 29636fb13959..000000000000
--- a/drivers/staging/imx-drm/TODO
+++ /dev/null
@@ -1,17 +0,0 @@
1TODO:
2- get DRM Maintainer review for this code
3- decide where to put the base driver. It is not specific to a subsystem
4 and would be used by DRM/KMS and media/V4L2
5
6Missing features (not necessarily for moving out of staging):
7
8- Add support for IC (Image converter)
9- Add support for CSI (CMOS Sensor interface)
10- Add support for VDIC (Video Deinterlacer)
11
12Many work-in-progress patches for the above features exist. Contact
13Sascha Hauer <kernel@pengutronix.de> if you are interested in working
14on a specific feature.
15
16Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org> and
17Sascha Hauer <kernel@pengutronix.de>
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 53ed87698a74..8ba35c622e22 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -125,8 +125,8 @@ struct dma_buf_attachment;
125extern __printf(2, 3) 125extern __printf(2, 3)
126void drm_ut_debug_printk(const char *function_name, 126void drm_ut_debug_printk(const char *function_name,
127 const char *format, ...); 127 const char *format, ...);
128extern __printf(2, 3) 128extern __printf(1, 2)
129void drm_err(const char *func, const char *format, ...); 129void drm_err(const char *format, ...);
130 130
131/***********************************************************************/ 131/***********************************************************************/
132/** \name DRM template customization defaults */ 132/** \name DRM template customization defaults */
@@ -155,7 +155,7 @@ void drm_err(const char *func, const char *format, ...);
155 * \param arg arguments 155 * \param arg arguments
156 */ 156 */
157#define DRM_ERROR(fmt, ...) \ 157#define DRM_ERROR(fmt, ...) \
158 drm_err(__func__, fmt, ##__VA_ARGS__) 158 drm_err(fmt, ##__VA_ARGS__)
159 159
160/** 160/**
161 * Rate limited error output. Like DRM_ERROR() but won't flood the log. 161 * Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -170,7 +170,7 @@ void drm_err(const char *func, const char *format, ...);
170 DEFAULT_RATELIMIT_BURST); \ 170 DEFAULT_RATELIMIT_BURST); \
171 \ 171 \
172 if (__ratelimit(&_rs)) \ 172 if (__ratelimit(&_rs)) \
173 drm_err(__func__, fmt, ##__VA_ARGS__); \ 173 drm_err(fmt, ##__VA_ARGS__); \
174}) 174})
175 175
176#define DRM_INFO(fmt, ...) \ 176#define DRM_INFO(fmt, ...) \
@@ -809,7 +809,7 @@ struct drm_device {
809 struct drm_local_map *agp_buffer_map; 809 struct drm_local_map *agp_buffer_map;
810 unsigned int agp_buffer_token; 810 unsigned int agp_buffer_token;
811 811
812 struct drm_mode_config mode_config; /**< Current mode config */ 812 struct drm_mode_config mode_config; /**< Current mode config */
813 813
814 /** \name GEM information */ 814 /** \name GEM information */
815 /*@{ */ 815 /*@{ */
@@ -986,7 +986,7 @@ extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
986 986
987extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages, 987extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
988 dma_addr_t *addrs, int max_pages); 988 dma_addr_t *addrs, int max_pages);
989extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, int nr_pages); 989extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
990extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); 990extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
991 991
992 992
@@ -1028,10 +1028,25 @@ void drm_pci_agp_destroy(struct drm_device *dev);
1028 1028
1029extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver); 1029extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
1030extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver); 1030extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
1031#ifdef CONFIG_PCI
1031extern int drm_get_pci_dev(struct pci_dev *pdev, 1032extern int drm_get_pci_dev(struct pci_dev *pdev,
1032 const struct pci_device_id *ent, 1033 const struct pci_device_id *ent,
1033 struct drm_driver *driver); 1034 struct drm_driver *driver);
1034extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); 1035extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
1036#else
1037static inline int drm_get_pci_dev(struct pci_dev *pdev,
1038 const struct pci_device_id *ent,
1039 struct drm_driver *driver)
1040{
1041 return -ENOSYS;
1042}
1043
1044static inline int drm_pci_set_busid(struct drm_device *dev,
1045 struct drm_master *master)
1046{
1047 return -ENOSYS;
1048}
1049#endif
1035 1050
1036#define DRM_PCIE_SPEED_25 1 1051#define DRM_PCIE_SPEED_25 1
1037#define DRM_PCIE_SPEED_50 2 1052#define DRM_PCIE_SPEED_50 2
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
new file mode 100644
index 000000000000..ad2229574dd9
--- /dev/null
+++ b/include/drm/drm_atomic.h
@@ -0,0 +1,69 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28#ifndef DRM_ATOMIC_H_
29#define DRM_ATOMIC_H_
30
31#include <drm/drm_crtc.h>
32
33struct drm_atomic_state * __must_check
34drm_atomic_state_alloc(struct drm_device *dev);
35void drm_atomic_state_clear(struct drm_atomic_state *state);
36void drm_atomic_state_free(struct drm_atomic_state *state);
37
38struct drm_crtc_state * __must_check
39drm_atomic_get_crtc_state(struct drm_atomic_state *state,
40 struct drm_crtc *crtc);
41struct drm_plane_state * __must_check
42drm_atomic_get_plane_state(struct drm_atomic_state *state,
43 struct drm_plane *plane);
44struct drm_connector_state * __must_check
45drm_atomic_get_connector_state(struct drm_atomic_state *state,
46 struct drm_connector *connector);
47
48int __must_check
49drm_atomic_set_crtc_for_plane(struct drm_atomic_state *state,
50 struct drm_plane *plane, struct drm_crtc *crtc);
51void drm_atomic_set_fb_for_plane(struct drm_plane_state *plane_state,
52 struct drm_framebuffer *fb);
53int __must_check
54drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
55 struct drm_crtc *crtc);
56int __must_check
57drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
58 struct drm_crtc *crtc);
59int
60drm_atomic_connectors_for_crtc(struct drm_atomic_state *state,
61 struct drm_crtc *crtc);
62
63void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
64
65int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
66int __must_check drm_atomic_commit(struct drm_atomic_state *state);
67int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
68
69#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
new file mode 100644
index 000000000000..f956b413311e
--- /dev/null
+++ b/include/drm/drm_atomic_helper.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright (C) 2014 Red Hat
3 * Copyright (C) 2014 Intel Corp.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Rob Clark <robdclark@gmail.com>
25 * Daniel Vetter <daniel.vetter@ffwll.ch>
26 */
27
28#ifndef DRM_ATOMIC_HELPER_H_
29#define DRM_ATOMIC_HELPER_H_
30
31#include <drm/drm_crtc.h>
32
33int drm_atomic_helper_check(struct drm_device *dev,
34 struct drm_atomic_state *state);
35int drm_atomic_helper_commit(struct drm_device *dev,
36 struct drm_atomic_state *state,
37 bool async);
38
39void drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
40 struct drm_atomic_state *old_state);
41
42void drm_atomic_helper_commit_pre_planes(struct drm_device *dev,
43 struct drm_atomic_state *state);
44void drm_atomic_helper_commit_post_planes(struct drm_device *dev,
45 struct drm_atomic_state *old_state);
46
47int drm_atomic_helper_prepare_planes(struct drm_device *dev,
48 struct drm_atomic_state *state);
49void drm_atomic_helper_commit_planes(struct drm_device *dev,
50 struct drm_atomic_state *state);
51void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
52 struct drm_atomic_state *old_state);
53
54void drm_atomic_helper_swap_state(struct drm_device *dev,
55 struct drm_atomic_state *state);
56
57/* implementations for legacy interfaces */
58int drm_atomic_helper_update_plane(struct drm_plane *plane,
59 struct drm_crtc *crtc,
60 struct drm_framebuffer *fb,
61 int crtc_x, int crtc_y,
62 unsigned int crtc_w, unsigned int crtc_h,
63 uint32_t src_x, uint32_t src_y,
64 uint32_t src_w, uint32_t src_h);
65int drm_atomic_helper_disable_plane(struct drm_plane *plane);
66int drm_atomic_helper_set_config(struct drm_mode_set *set);
67
68int drm_atomic_helper_crtc_set_property(struct drm_crtc *crtc,
69 struct drm_property *property,
70 uint64_t val);
71int drm_atomic_helper_plane_set_property(struct drm_plane *plane,
72 struct drm_property *property,
73 uint64_t val);
74int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
75 struct drm_property *property,
76 uint64_t val);
77int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
78 struct drm_framebuffer *fb,
79 struct drm_pending_vblank_event *event,
80 uint32_t flags);
81
82/* default implementations for state handling */
83void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
84struct drm_crtc_state *
85drm_atomic_helper_crtc_duplicate_state(struct drm_crtc *crtc);
86void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
87 struct drm_crtc_state *state);
88
89void drm_atomic_helper_plane_reset(struct drm_plane *plane);
90struct drm_plane_state *
91drm_atomic_helper_plane_duplicate_state(struct drm_plane *plane);
92void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
93 struct drm_plane_state *state);
94
95void drm_atomic_helper_connector_reset(struct drm_connector *connector);
96struct drm_connector_state *
97drm_atomic_helper_connector_duplicate_state(struct drm_connector *connector);
98void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
99 struct drm_connector_state *state);
100
101/**
102 * drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
103 * @plane: the loop cursor
104 * @crtc: the crtc whose planes are iterated
105 *
106 * This iterates over the current state, useful (for example) when applying
107 * atomic state after it has been checked and swapped. To iterate over the
108 * planes which *will* be attached (for ->atomic_check()) see
109 * drm_crtc_for_each_pending_plane()
110 */
111#define drm_atomic_crtc_for_each_plane(plane, crtc) \
112 drm_for_each_plane_mask(plane, (crtc)->dev, (crtc)->state->plane_mask)
113
114/**
115 * drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
116 * @plane: the loop cursor
117 * @crtc_state: the incoming crtc-state
118 *
119 * Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
120 * attached if the specified state is applied. Useful during (for example)
121 * ->atomic_check() operations, to validate the incoming state
122 */
123#define drm_atomic_crtc_state_for_each_plane(plane, crtc_state) \
124 drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask)
125
126#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c40070a92d6b..b86329813ad3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -42,6 +42,7 @@ struct drm_object_properties;
42struct drm_file; 42struct drm_file;
43struct drm_clip_rect; 43struct drm_clip_rect;
44struct device_node; 44struct device_node;
45struct fence;
45 46
46#define DRM_MODE_OBJECT_CRTC 0xcccccccc 47#define DRM_MODE_OBJECT_CRTC 0xcccccccc
47#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0 48#define DRM_MODE_OBJECT_CONNECTOR 0xc0c0c0c0
@@ -136,14 +137,22 @@ struct drm_display_info {
136 u8 cea_rev; 137 u8 cea_rev;
137}; 138};
138 139
140/* data corresponds to displayid vend/prod/serial */
141struct drm_tile_group {
142 struct kref refcount;
143 struct drm_device *dev;
144 int id;
145 u8 group_data[8];
146};
147
139struct drm_framebuffer_funcs { 148struct drm_framebuffer_funcs {
140 /* note: use drm_framebuffer_remove() */ 149 /* note: use drm_framebuffer_remove() */
141 void (*destroy)(struct drm_framebuffer *framebuffer); 150 void (*destroy)(struct drm_framebuffer *framebuffer);
142 int (*create_handle)(struct drm_framebuffer *fb, 151 int (*create_handle)(struct drm_framebuffer *fb,
143 struct drm_file *file_priv, 152 struct drm_file *file_priv,
144 unsigned int *handle); 153 unsigned int *handle);
145 /** 154 /*
146 * Optinal callback for the dirty fb ioctl. 155 * Optional callback for the dirty fb ioctl.
147 * 156 *
148 * Userspace can notify the driver via this callback 157 * Userspace can notify the driver via this callback
149 * that a area of the framebuffer has changed and should 158 * that a area of the framebuffer has changed and should
@@ -196,7 +205,7 @@ struct drm_framebuffer {
196struct drm_property_blob { 205struct drm_property_blob {
197 struct drm_mode_object base; 206 struct drm_mode_object base;
198 struct list_head head; 207 struct list_head head;
199 unsigned int length; 208 size_t length;
200 unsigned char data[]; 209 unsigned char data[];
201}; 210};
202 211
@@ -215,7 +224,7 @@ struct drm_property {
215 uint64_t *values; 224 uint64_t *values;
216 struct drm_device *dev; 225 struct drm_device *dev;
217 226
218 struct list_head enum_blob_list; 227 struct list_head enum_list;
219}; 228};
220 229
221struct drm_crtc; 230struct drm_crtc;
@@ -224,19 +233,65 @@ struct drm_encoder;
224struct drm_pending_vblank_event; 233struct drm_pending_vblank_event;
225struct drm_plane; 234struct drm_plane;
226struct drm_bridge; 235struct drm_bridge;
236struct drm_atomic_state;
227 237
228/** 238/**
229 * drm_crtc_funcs - control CRTCs for a given device 239 * struct drm_crtc_state - mutable CRTC state
240 * @enable: whether the CRTC should be enabled, gates all other state
241 * @mode_changed: for use by helpers and drivers when computing state updates
242 * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
243 * @last_vblank_count: for helpers and drivers to capture the vblank of the
244 * update to ensure framebuffer cleanup isn't done too early
245 * @planes_changed: for use by helpers and drivers when computing state updates
246 * @adjusted_mode: for use by helpers and drivers to compute adjusted mode timings
247 * @mode: current mode timings
248 * @event: optional pointer to a DRM event to signal upon completion of the
249 * state update
250 * @state: backpointer to global drm_atomic_state
251 */
252struct drm_crtc_state {
253 bool enable;
254
255 /* computed state bits used by helpers and drivers */
256 bool planes_changed : 1;
257 bool mode_changed : 1;
258
259 /* attached planes bitmask:
260 * WARNING: transitional helpers do not maintain plane_mask so
261 * drivers not converted over to atomic helpers should not rely
262 * on plane_mask being accurate!
263 */
264 u32 plane_mask;
265
266 /* last_vblank_count: for vblank waits before cleanup */
267 u32 last_vblank_count;
268
269 /* adjusted_mode: for use by helpers and drivers */
270 struct drm_display_mode adjusted_mode;
271
272 struct drm_display_mode mode;
273
274 struct drm_pending_vblank_event *event;
275
276 struct drm_atomic_state *state;
277};
278
279/**
280 * struct drm_crtc_funcs - control CRTCs for a given device
230 * @save: save CRTC state 281 * @save: save CRTC state
231 * @restore: restore CRTC state 282 * @restore: restore CRTC state
232 * @reset: reset CRTC after state has been invalidated (e.g. resume) 283 * @reset: reset CRTC after state has been invalidated (e.g. resume)
233 * @cursor_set: setup the cursor 284 * @cursor_set: setup the cursor
285 * @cursor_set2: setup the cursor with hotspot, superseeds @cursor_set if set
234 * @cursor_move: move the cursor 286 * @cursor_move: move the cursor
235 * @gamma_set: specify color ramp for CRTC 287 * @gamma_set: specify color ramp for CRTC
236 * @destroy: deinit and free object 288 * @destroy: deinit and free object
237 * @set_property: called when a property is changed 289 * @set_property: called when a property is changed
238 * @set_config: apply a new CRTC configuration 290 * @set_config: apply a new CRTC configuration
239 * @page_flip: initiate a page flip 291 * @page_flip: initiate a page flip
292 * @atomic_duplicate_state: duplicate the atomic state for this CRTC
293 * @atomic_destroy_state: destroy an atomic state for this CRTC
294 * @atomic_set_property: set a property on an atomic state for this CRTC
240 * 295 *
241 * The drm_crtc_funcs structure is the central CRTC management structure 296 * The drm_crtc_funcs structure is the central CRTC management structure
242 * in the DRM. Each CRTC controls one or more connectors (note that the name 297 * in the DRM. Each CRTC controls one or more connectors (note that the name
@@ -287,16 +342,28 @@ struct drm_crtc_funcs {
287 342
288 int (*set_property)(struct drm_crtc *crtc, 343 int (*set_property)(struct drm_crtc *crtc,
289 struct drm_property *property, uint64_t val); 344 struct drm_property *property, uint64_t val);
345
346 /* atomic update handling */
347 struct drm_crtc_state *(*atomic_duplicate_state)(struct drm_crtc *crtc);
348 void (*atomic_destroy_state)(struct drm_crtc *crtc,
349 struct drm_crtc_state *state);
350 int (*atomic_set_property)(struct drm_crtc *crtc,
351 struct drm_crtc_state *state,
352 struct drm_property *property,
353 uint64_t val);
290}; 354};
291 355
292/** 356/**
293 * drm_crtc - central CRTC control structure 357 * struct drm_crtc - central CRTC control structure
294 * @dev: parent DRM device 358 * @dev: parent DRM device
359 * @port: OF node used by drm_of_find_possible_crtcs()
295 * @head: list management 360 * @head: list management
296 * @mutex: per-CRTC locking 361 * @mutex: per-CRTC locking
297 * @base: base KMS object for ID tracking etc. 362 * @base: base KMS object for ID tracking etc.
298 * @primary: primary plane for this CRTC 363 * @primary: primary plane for this CRTC
299 * @cursor: cursor plane for this CRTC 364 * @cursor: cursor plane for this CRTC
365 * @cursor_x: current x position of the cursor, used for universal cursor planes
366 * @cursor_y: current y position of the cursor, used for universal cursor planes
300 * @enabled: is this CRTC enabled? 367 * @enabled: is this CRTC enabled?
301 * @mode: current mode timings 368 * @mode: current mode timings
302 * @hwmode: mode timings as programmed to hw regs 369 * @hwmode: mode timings as programmed to hw regs
@@ -309,10 +376,13 @@ struct drm_crtc_funcs {
309 * @gamma_size: size of gamma ramp 376 * @gamma_size: size of gamma ramp
310 * @gamma_store: gamma ramp values 377 * @gamma_store: gamma ramp values
311 * @framedur_ns: precise frame timing 378 * @framedur_ns: precise frame timing
312 * @framedur_ns: precise line timing 379 * @linedur_ns: precise line timing
313 * @pixeldur_ns: precise pixel timing 380 * @pixeldur_ns: precise pixel timing
314 * @helper_private: mid-layer private data 381 * @helper_private: mid-layer private data
315 * @properties: property tracking for this CRTC 382 * @properties: property tracking for this CRTC
383 * @state: current atomic state for this CRTC
384 * @acquire_ctx: per-CRTC implicit acquire context used by atomic drivers for
385 * legacy ioctls
316 * 386 *
317 * Each CRTC may have one or more connectors associated with it. This structure 387 * Each CRTC may have one or more connectors associated with it. This structure
318 * allows the CRTC to be controlled. 388 * allows the CRTC to be controlled.
@@ -322,7 +392,7 @@ struct drm_crtc {
322 struct device_node *port; 392 struct device_node *port;
323 struct list_head head; 393 struct list_head head;
324 394
325 /** 395 /*
326 * crtc mutex 396 * crtc mutex
327 * 397 *
328 * This provides a read lock for the overall crtc state (mode, dpms 398 * This provides a read lock for the overall crtc state (mode, dpms
@@ -368,6 +438,8 @@ struct drm_crtc {
368 438
369 struct drm_object_properties properties; 439 struct drm_object_properties properties;
370 440
441 struct drm_crtc_state *state;
442
371 /* 443 /*
372 * For legacy crtc ioctls so that atomic drivers can get at the locking 444 * For legacy crtc ioctls so that atomic drivers can get at the locking
373 * acquire context. 445 * acquire context.
@@ -375,9 +447,22 @@ struct drm_crtc {
375 struct drm_modeset_acquire_ctx *acquire_ctx; 447 struct drm_modeset_acquire_ctx *acquire_ctx;
376}; 448};
377 449
450/**
451 * struct drm_connector_state - mutable connector state
452 * @crtc: CRTC to connect connector to, NULL if disabled
453 * @best_encoder: can be used by helpers and drivers to select the encoder
454 * @state: backpointer to global drm_atomic_state
455 */
456struct drm_connector_state {
457 struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_connector() */
458
459 struct drm_encoder *best_encoder;
460
461 struct drm_atomic_state *state;
462};
378 463
379/** 464/**
380 * drm_connector_funcs - control connectors on a given device 465 * struct drm_connector_funcs - control connectors on a given device
381 * @dpms: set power state (see drm_crtc_funcs above) 466 * @dpms: set power state (see drm_crtc_funcs above)
382 * @save: save connector state 467 * @save: save connector state
383 * @restore: restore connector state 468 * @restore: restore connector state
@@ -387,6 +472,9 @@ struct drm_crtc {
387 * @set_property: property for this connector may need an update 472 * @set_property: property for this connector may need an update
388 * @destroy: make object go away 473 * @destroy: make object go away
389 * @force: notify the driver that the connector is forced on 474 * @force: notify the driver that the connector is forced on
475 * @atomic_duplicate_state: duplicate the atomic state for this connector
476 * @atomic_destroy_state: destroy an atomic state for this connector
477 * @atomic_set_property: set a property on an atomic state for this connector
390 * 478 *
391 * Each CRTC may have one or more connectors attached to it. The functions 479 * Each CRTC may have one or more connectors attached to it. The functions
392 * below allow the core DRM code to control connectors, enumerate available modes, 480 * below allow the core DRM code to control connectors, enumerate available modes,
@@ -411,10 +499,19 @@ struct drm_connector_funcs {
411 uint64_t val); 499 uint64_t val);
412 void (*destroy)(struct drm_connector *connector); 500 void (*destroy)(struct drm_connector *connector);
413 void (*force)(struct drm_connector *connector); 501 void (*force)(struct drm_connector *connector);
502
503 /* atomic update handling */
504 struct drm_connector_state *(*atomic_duplicate_state)(struct drm_connector *connector);
505 void (*atomic_destroy_state)(struct drm_connector *connector,
506 struct drm_connector_state *state);
507 int (*atomic_set_property)(struct drm_connector *connector,
508 struct drm_connector_state *state,
509 struct drm_property *property,
510 uint64_t val);
414}; 511};
415 512
416/** 513/**
417 * drm_encoder_funcs - encoder controls 514 * struct drm_encoder_funcs - encoder controls
418 * @reset: reset state (e.g. at init or resume time) 515 * @reset: reset state (e.g. at init or resume time)
419 * @destroy: cleanup and free associated data 516 * @destroy: cleanup and free associated data
420 * 517 *
@@ -428,7 +525,7 @@ struct drm_encoder_funcs {
428#define DRM_CONNECTOR_MAX_ENCODER 3 525#define DRM_CONNECTOR_MAX_ENCODER 3
429 526
430/** 527/**
431 * drm_encoder - central DRM encoder structure 528 * struct drm_encoder - central DRM encoder structure
432 * @dev: parent DRM device 529 * @dev: parent DRM device
433 * @head: list management 530 * @head: list management
434 * @base: base KMS object 531 * @base: base KMS object
@@ -472,7 +569,7 @@ struct drm_encoder {
472#define MAX_ELD_BYTES 128 569#define MAX_ELD_BYTES 128
473 570
474/** 571/**
475 * drm_connector - central DRM connector control structure 572 * struct drm_connector - central DRM connector control structure
476 * @dev: parent DRM device 573 * @dev: parent DRM device
477 * @kdev: kernel device for sysfs attributes 574 * @kdev: kernel device for sysfs attributes
478 * @attr: sysfs attributes 575 * @attr: sysfs attributes
@@ -483,6 +580,7 @@ struct drm_encoder {
483 * @connector_type_id: index into connector type enum 580 * @connector_type_id: index into connector type enum
484 * @interlace_allowed: can this connector handle interlaced modes? 581 * @interlace_allowed: can this connector handle interlaced modes?
485 * @doublescan_allowed: can this connector handle doublescan? 582 * @doublescan_allowed: can this connector handle doublescan?
583 * @stereo_allowed: can this connector handle stereo modes?
486 * @modes: modes available on this connector (from fill_modes() + user) 584 * @modes: modes available on this connector (from fill_modes() + user)
487 * @status: one of the drm_connector_status enums (connected, not, or unknown) 585 * @status: one of the drm_connector_status enums (connected, not, or unknown)
488 * @probed_modes: list of modes derived directly from the display 586 * @probed_modes: list of modes derived directly from the display
@@ -490,10 +588,13 @@ struct drm_encoder {
490 * @funcs: connector control functions 588 * @funcs: connector control functions
491 * @edid_blob_ptr: DRM property containing EDID if present 589 * @edid_blob_ptr: DRM property containing EDID if present
492 * @properties: property tracking for this connector 590 * @properties: property tracking for this connector
591 * @path_blob_ptr: DRM blob property data for the DP MST path property
493 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling 592 * @polled: a %DRM_CONNECTOR_POLL_<foo> value for core driven polling
494 * @dpms: current dpms state 593 * @dpms: current dpms state
495 * @helper_private: mid-layer private data 594 * @helper_private: mid-layer private data
595 * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
496 * @force: a %DRM_FORCE_<foo> state for forced mode sets 596 * @force: a %DRM_FORCE_<foo> state for forced mode sets
597 * @override_edid: has the EDID been overwritten through debugfs for testing?
497 * @encoder_ids: valid encoders for this connector 598 * @encoder_ids: valid encoders for this connector
498 * @encoder: encoder driving this connector, if any 599 * @encoder: encoder driving this connector, if any
499 * @eld: EDID-like data, if present 600 * @eld: EDID-like data, if present
@@ -503,6 +604,18 @@ struct drm_encoder {
503 * @video_latency: video latency info from ELD, if found 604 * @video_latency: video latency info from ELD, if found
504 * @audio_latency: audio latency info from ELD, if found 605 * @audio_latency: audio latency info from ELD, if found
505 * @null_edid_counter: track sinks that give us all zeros for the EDID 606 * @null_edid_counter: track sinks that give us all zeros for the EDID
607 * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
608 * @debugfs_entry: debugfs directory for this connector
609 * @state: current atomic state for this connector
610 * @has_tile: is this connector connected to a tiled monitor
611 * @tile_group: tile group for the connected monitor
612 * @tile_is_single_monitor: whether the tile is one monitor housing
613 * @num_h_tile: number of horizontal tiles in the tile group
614 * @num_v_tile: number of vertical tiles in the tile group
615 * @tile_h_loc: horizontal location of this tile
616 * @tile_v_loc: vertical location of this tile
617 * @tile_h_size: horizontal size of this tile.
618 * @tile_v_size: vertical size of this tile.
506 * 619 *
507 * Each connector may be connected to one or more CRTCs, or may be clonable by 620 * Each connector may be connected to one or more CRTCs, or may be clonable by
508 * another connector if they can share a CRTC. Each connector also has a specific 621 * another connector if they can share a CRTC. Each connector also has a specific
@@ -538,6 +651,8 @@ struct drm_connector {
538 651
539 struct drm_property_blob *path_blob_ptr; 652 struct drm_property_blob *path_blob_ptr;
540 653
654 struct drm_property_blob *tile_blob_ptr;
655
541 uint8_t polled; /* DRM_CONNECTOR_POLL_* */ 656 uint8_t polled; /* DRM_CONNECTOR_POLL_* */
542 657
543 /* requested DPMS state */ 658 /* requested DPMS state */
@@ -563,14 +678,63 @@ struct drm_connector {
563 unsigned bad_edid_counter; 678 unsigned bad_edid_counter;
564 679
565 struct dentry *debugfs_entry; 680 struct dentry *debugfs_entry;
681
682 struct drm_connector_state *state;
683
684 /* DisplayID bits */
685 bool has_tile;
686 struct drm_tile_group *tile_group;
687 bool tile_is_single_monitor;
688
689 uint8_t num_h_tile, num_v_tile;
690 uint8_t tile_h_loc, tile_v_loc;
691 uint16_t tile_h_size, tile_v_size;
692};
693
694/**
695 * struct drm_plane_state - mutable plane state
696 * @crtc: currently bound CRTC, NULL if disabled
697 * @fb: currently bound framebuffer
698 * @fence: optional fence to wait for before scanning out @fb
699 * @crtc_x: left position of visible portion of plane on crtc
700 * @crtc_y: upper position of visible portion of plane on crtc
701 * @crtc_w: width of visible portion of plane on crtc
702 * @crtc_h: height of visible portion of plane on crtc
703 * @src_x: left position of visible portion of plane within
704 * plane (in 16.16)
705 * @src_y: upper position of visible portion of plane within
706 * plane (in 16.16)
707 * @src_w: width of visible portion of plane (in 16.16)
708 * @src_h: height of visible portion of plane (in 16.16)
709 * @state: backpointer to global drm_atomic_state
710 */
711struct drm_plane_state {
712 struct drm_crtc *crtc; /* do not write directly, use drm_atomic_set_crtc_for_plane() */
713 struct drm_framebuffer *fb; /* do not write directly, use drm_atomic_set_fb_for_plane() */
714 struct fence *fence;
715
716 /* Signed dest location allows it to be partially off screen */
717 int32_t crtc_x, crtc_y;
718 uint32_t crtc_w, crtc_h;
719
720 /* Source values are 16.16 fixed point */
721 uint32_t src_x, src_y;
722 uint32_t src_h, src_w;
723
724 struct drm_atomic_state *state;
566}; 725};
567 726
727
568/** 728/**
569 * drm_plane_funcs - driver plane control functions 729 * struct drm_plane_funcs - driver plane control functions
570 * @update_plane: update the plane configuration 730 * @update_plane: update the plane configuration
571 * @disable_plane: shut down the plane 731 * @disable_plane: shut down the plane
572 * @destroy: clean up plane resources 732 * @destroy: clean up plane resources
733 * @reset: reset plane after state has been invalidated (e.g. resume)
573 * @set_property: called when a property is changed 734 * @set_property: called when a property is changed
735 * @atomic_duplicate_state: duplicate the atomic state for this plane
736 * @atomic_destroy_state: destroy an atomic state for this plane
737 * @atomic_set_property: set a property on an atomic state for this plane
574 */ 738 */
575struct drm_plane_funcs { 739struct drm_plane_funcs {
576 int (*update_plane)(struct drm_plane *plane, 740 int (*update_plane)(struct drm_plane *plane,
@@ -585,6 +749,15 @@ struct drm_plane_funcs {
585 749
586 int (*set_property)(struct drm_plane *plane, 750 int (*set_property)(struct drm_plane *plane,
587 struct drm_property *property, uint64_t val); 751 struct drm_property *property, uint64_t val);
752
753 /* atomic update handling */
754 struct drm_plane_state *(*atomic_duplicate_state)(struct drm_plane *plane);
755 void (*atomic_destroy_state)(struct drm_plane *plane,
756 struct drm_plane_state *state);
757 int (*atomic_set_property)(struct drm_plane *plane,
758 struct drm_plane_state *state,
759 struct drm_property *property,
760 uint64_t val);
588}; 761};
589 762
590enum drm_plane_type { 763enum drm_plane_type {
@@ -594,7 +767,7 @@ enum drm_plane_type {
594}; 767};
595 768
596/** 769/**
597 * drm_plane - central DRM plane control structure 770 * struct drm_plane - central DRM plane control structure
598 * @dev: DRM device this plane belongs to 771 * @dev: DRM device this plane belongs to
599 * @head: for list management 772 * @head: for list management
600 * @base: base mode object 773 * @base: base mode object
@@ -603,14 +776,19 @@ enum drm_plane_type {
603 * @format_count: number of formats supported 776 * @format_count: number of formats supported
604 * @crtc: currently bound CRTC 777 * @crtc: currently bound CRTC
605 * @fb: currently bound fb 778 * @fb: currently bound fb
779 * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
780 * drm_mode_set_config_internal() to implement correct refcounting.
606 * @funcs: helper functions 781 * @funcs: helper functions
607 * @properties: property tracking for this plane 782 * @properties: property tracking for this plane
608 * @type: type of plane (overlay, primary, cursor) 783 * @type: type of plane (overlay, primary, cursor)
784 * @state: current atomic state for this plane
609 */ 785 */
610struct drm_plane { 786struct drm_plane {
611 struct drm_device *dev; 787 struct drm_device *dev;
612 struct list_head head; 788 struct list_head head;
613 789
790 struct drm_modeset_lock mutex;
791
614 struct drm_mode_object base; 792 struct drm_mode_object base;
615 793
616 uint32_t possible_crtcs; 794 uint32_t possible_crtcs;
@@ -620,8 +798,6 @@ struct drm_plane {
620 struct drm_crtc *crtc; 798 struct drm_crtc *crtc;
621 struct drm_framebuffer *fb; 799 struct drm_framebuffer *fb;
622 800
623 /* Temporary tracking of the old fb while a modeset is ongoing. Used
624 * by drm_mode_set_config_internal to implement correct refcounting. */
625 struct drm_framebuffer *old_fb; 801 struct drm_framebuffer *old_fb;
626 802
627 const struct drm_plane_funcs *funcs; 803 const struct drm_plane_funcs *funcs;
@@ -629,10 +805,14 @@ struct drm_plane {
629 struct drm_object_properties properties; 805 struct drm_object_properties properties;
630 806
631 enum drm_plane_type type; 807 enum drm_plane_type type;
808
809 void *helper_private;
810
811 struct drm_plane_state *state;
632}; 812};
633 813
634/** 814/**
635 * drm_bridge_funcs - drm_bridge control functions 815 * struct drm_bridge_funcs - drm_bridge control functions
636 * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge 816 * @mode_fixup: Try to fixup (or reject entirely) proposed mode for this bridge
637 * @disable: Called right before encoder prepare, disables the bridge 817 * @disable: Called right before encoder prepare, disables the bridge
638 * @post_disable: Called right after encoder prepare, for lockstepped disable 818 * @post_disable: Called right after encoder prepare, for lockstepped disable
@@ -656,7 +836,7 @@ struct drm_bridge_funcs {
656}; 836};
657 837
658/** 838/**
659 * drm_bridge - central DRM bridge control structure 839 * struct drm_bridge - central DRM bridge control structure
660 * @dev: DRM device this bridge belongs to 840 * @dev: DRM device this bridge belongs to
661 * @head: list management 841 * @head: list management
662 * @base: base mode object 842 * @base: base mode object
@@ -674,8 +854,35 @@ struct drm_bridge {
674}; 854};
675 855
676/** 856/**
677 * drm_mode_set - new values for a CRTC config change 857 * struct struct drm_atomic_state - the global state object for atomic updates
678 * @head: list management 858 * @dev: parent DRM device
859 * @flags: state flags like async update
860 * @planes: pointer to array of plane pointers
861 * @plane_states: pointer to array of plane states pointers
862 * @crtcs: pointer to array of CRTC pointers
863 * @crtc_states: pointer to array of CRTC states pointers
864 * @num_connector: size of the @connectors and @connector_states arrays
865 * @connectors: pointer to array of connector pointers
866 * @connector_states: pointer to array of connector states pointers
867 * @acquire_ctx: acquire context for this atomic modeset state update
868 */
869struct drm_atomic_state {
870 struct drm_device *dev;
871 uint32_t flags;
872 struct drm_plane **planes;
873 struct drm_plane_state **plane_states;
874 struct drm_crtc **crtcs;
875 struct drm_crtc_state **crtc_states;
876 int num_connector;
877 struct drm_connector **connectors;
878 struct drm_connector_state **connector_states;
879
880 struct drm_modeset_acquire_ctx *acquire_ctx;
881};
882
883
884/**
885 * struct drm_mode_set - new values for a CRTC config change
679 * @fb: framebuffer to use for new config 886 * @fb: framebuffer to use for new config
680 * @crtc: CRTC whose configuration we're about to change 887 * @crtc: CRTC whose configuration we're about to change
681 * @mode: mode timings to use 888 * @mode: mode timings to use
@@ -705,6 +912,9 @@ struct drm_mode_set {
705 * struct drm_mode_config_funcs - basic driver provided mode setting functions 912 * struct drm_mode_config_funcs - basic driver provided mode setting functions
706 * @fb_create: create a new framebuffer object 913 * @fb_create: create a new framebuffer object
707 * @output_poll_changed: function to handle output configuration changes 914 * @output_poll_changed: function to handle output configuration changes
915 * @atomic_check: check whether a give atomic state update is possible
916 * @atomic_commit: commit an atomic state update previously verified with
917 * atomic_check()
708 * 918 *
709 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that 919 * Some global (i.e. not per-CRTC, connector, etc) mode setting functions that
710 * involve drivers. 920 * involve drivers.
@@ -714,13 +924,20 @@ struct drm_mode_config_funcs {
714 struct drm_file *file_priv, 924 struct drm_file *file_priv,
715 struct drm_mode_fb_cmd2 *mode_cmd); 925 struct drm_mode_fb_cmd2 *mode_cmd);
716 void (*output_poll_changed)(struct drm_device *dev); 926 void (*output_poll_changed)(struct drm_device *dev);
927
928 int (*atomic_check)(struct drm_device *dev,
929 struct drm_atomic_state *a);
930 int (*atomic_commit)(struct drm_device *dev,
931 struct drm_atomic_state *a,
932 bool async);
717}; 933};
718 934
719/** 935/**
720 * drm_mode_group - group of mode setting resources for potential sub-grouping 936 * struct drm_mode_group - group of mode setting resources for potential sub-grouping
721 * @num_crtcs: CRTC count 937 * @num_crtcs: CRTC count
722 * @num_encoders: encoder count 938 * @num_encoders: encoder count
723 * @num_connectors: connector count 939 * @num_connectors: connector count
940 * @num_bridges: bridge count
724 * @id_list: list of KMS object IDs in this group 941 * @id_list: list of KMS object IDs in this group
725 * 942 *
726 * Currently this simply tracks the global mode setting state. But in the 943 * Currently this simply tracks the global mode setting state. But in the
@@ -740,10 +957,14 @@ struct drm_mode_group {
740}; 957};
741 958
742/** 959/**
743 * drm_mode_config - Mode configuration control structure 960 * struct drm_mode_config - Mode configuration control structure
744 * @mutex: mutex protecting KMS related lists and structures 961 * @mutex: mutex protecting KMS related lists and structures
962 * @connection_mutex: ww mutex protecting connector state and routing
963 * @acquire_ctx: global implicit acquire context used by atomic drivers for
964 * legacy ioctls
745 * @idr_mutex: mutex for KMS ID allocation and management 965 * @idr_mutex: mutex for KMS ID allocation and management
746 * @crtc_idr: main KMS ID tracking object 966 * @crtc_idr: main KMS ID tracking object
967 * @fb_lock: mutex to protect fb state and lists
747 * @num_fb: number of fbs available 968 * @num_fb: number of fbs available
748 * @fb_list: list of framebuffers available 969 * @fb_list: list of framebuffers available
749 * @num_connector: number of connectors on this device 970 * @num_connector: number of connectors on this device
@@ -752,17 +973,28 @@ struct drm_mode_group {
752 * @bridge_list: list of bridge objects 973 * @bridge_list: list of bridge objects
753 * @num_encoder: number of encoders on this device 974 * @num_encoder: number of encoders on this device
754 * @encoder_list: list of encoder objects 975 * @encoder_list: list of encoder objects
976 * @num_overlay_plane: number of overlay planes on this device
977 * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
978 * @plane_list: list of plane objects
755 * @num_crtc: number of CRTCs on this device 979 * @num_crtc: number of CRTCs on this device
756 * @crtc_list: list of CRTC objects 980 * @crtc_list: list of CRTC objects
981 * @property_list: list of property objects
757 * @min_width: minimum pixel width on this device 982 * @min_width: minimum pixel width on this device
758 * @min_height: minimum pixel height on this device 983 * @min_height: minimum pixel height on this device
759 * @max_width: maximum pixel width on this device 984 * @max_width: maximum pixel width on this device
760 * @max_height: maximum pixel height on this device 985 * @max_height: maximum pixel height on this device
761 * @funcs: core driver provided mode setting functions 986 * @funcs: core driver provided mode setting functions
762 * @fb_base: base address of the framebuffer 987 * @fb_base: base address of the framebuffer
763 * @poll_enabled: track polling status for this device 988 * @poll_enabled: track polling support for this device
989 * @poll_running: track polling status for this device
764 * @output_poll_work: delayed work for polling in process context 990 * @output_poll_work: delayed work for polling in process context
991 * @property_blob_list: list of all the blob property objects
765 * @*_property: core property tracking 992 * @*_property: core property tracking
993 * @preferred_depth: preferred RBG pixel depth, used by fb helpers
994 * @prefer_shadow: hint to userspace to prefer shadow-fb rendering
995 * @async_page_flip: does this device support async flips on the primary plane?
996 * @cursor_width: hint to userspace for max cursor width
997 * @cursor_height: hint to userspace for max cursor height
766 * 998 *
767 * Core mode resource tracking structure. All CRTC, encoders, and connectors 999 * Core mode resource tracking structure. All CRTC, encoders, and connectors
768 * enumerated by the driver are added here, as are global properties. Some 1000 * enumerated by the driver are added here, as are global properties. Some
@@ -774,16 +1006,10 @@ struct drm_mode_config {
774 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */ 1006 struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
775 struct mutex idr_mutex; /* for IDR management */ 1007 struct mutex idr_mutex; /* for IDR management */
776 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */ 1008 struct idr crtc_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
1009 struct idr tile_idr; /* use this idr for all IDs, fb, crtc, connector, modes - just makes life easier */
777 /* this is limited to one for now */ 1010 /* this is limited to one for now */
778 1011
779 1012 struct mutex fb_lock; /* proctects global and per-file fb lists */
780 /**
781 * fb_lock - mutex to protect fb state
782 *
783 * Besides the global fb list his also protects the fbs list in the
784 * file_priv
785 */
786 struct mutex fb_lock;
787 int num_fb; 1013 int num_fb;
788 struct list_head fb_list; 1014 struct list_head fb_list;
789 1015
@@ -824,6 +1050,7 @@ struct drm_mode_config {
824 struct drm_property *edid_property; 1050 struct drm_property *edid_property;
825 struct drm_property *dpms_property; 1051 struct drm_property *dpms_property;
826 struct drm_property *path_property; 1052 struct drm_property *path_property;
1053 struct drm_property *tile_property;
827 struct drm_property *plane_type_property; 1054 struct drm_property *plane_type_property;
828 struct drm_property *rotation_property; 1055 struct drm_property *rotation_property;
829 1056
@@ -851,6 +1078,10 @@ struct drm_mode_config {
851 struct drm_property *aspect_ratio_property; 1078 struct drm_property *aspect_ratio_property;
852 struct drm_property *dirty_info_property; 1079 struct drm_property *dirty_info_property;
853 1080
1081 /* properties for virtual machine layout */
1082 struct drm_property *suggested_x_property;
1083 struct drm_property *suggested_y_property;
1084
854 /* dumb ioctl parameters */ 1085 /* dumb ioctl parameters */
855 uint32_t preferred_depth, prefer_shadow; 1086 uint32_t preferred_depth, prefer_shadow;
856 1087
@@ -861,6 +1092,19 @@ struct drm_mode_config {
861 uint32_t cursor_width, cursor_height; 1092 uint32_t cursor_width, cursor_height;
862}; 1093};
863 1094
1095/**
1096 * drm_for_each_plane_mask - iterate over planes specified by bitmask
1097 * @plane: the loop cursor
1098 * @dev: the DRM device
1099 * @plane_mask: bitmask of plane indices
1100 *
1101 * Iterate over all planes specified by bitmask.
1102 */
1103#define drm_for_each_plane_mask(plane, dev, plane_mask) \
1104 list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
1105 if ((plane_mask) & (1 << drm_plane_index(plane)))
1106
1107
864#define obj_to_crtc(x) container_of(x, struct drm_crtc, base) 1108#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
865#define obj_to_connector(x) container_of(x, struct drm_connector, base) 1109#define obj_to_connector(x) container_of(x, struct drm_connector, base)
866#define obj_to_encoder(x) container_of(x, struct drm_encoder, base) 1110#define obj_to_encoder(x) container_of(x, struct drm_encoder, base)
@@ -880,9 +1124,6 @@ extern int drm_crtc_init_with_planes(struct drm_device *dev,
880 struct drm_plane *primary, 1124 struct drm_plane *primary,
881 struct drm_plane *cursor, 1125 struct drm_plane *cursor,
882 const struct drm_crtc_funcs *funcs); 1126 const struct drm_crtc_funcs *funcs);
883extern int drm_crtc_init(struct drm_device *dev,
884 struct drm_crtc *crtc,
885 const struct drm_crtc_funcs *funcs);
886extern void drm_crtc_cleanup(struct drm_crtc *crtc); 1127extern void drm_crtc_cleanup(struct drm_crtc *crtc);
887extern unsigned int drm_crtc_index(struct drm_crtc *crtc); 1128extern unsigned int drm_crtc_index(struct drm_crtc *crtc);
888 1129
@@ -978,9 +1219,10 @@ extern void drm_mode_config_reset(struct drm_device *dev);
978extern void drm_mode_config_cleanup(struct drm_device *dev); 1219extern void drm_mode_config_cleanup(struct drm_device *dev);
979 1220
980extern int drm_mode_connector_set_path_property(struct drm_connector *connector, 1221extern int drm_mode_connector_set_path_property(struct drm_connector *connector,
981 char *path); 1222 const char *path);
1223int drm_mode_connector_set_tile_property(struct drm_connector *connector);
982extern int drm_mode_connector_update_edid_property(struct drm_connector *connector, 1224extern int drm_mode_connector_update_edid_property(struct drm_connector *connector,
983 struct edid *edid); 1225 const struct edid *edid);
984 1226
985static inline bool drm_property_type_is(struct drm_property *property, 1227static inline bool drm_property_type_is(struct drm_property *property,
986 uint32_t type) 1228 uint32_t type)
@@ -1041,11 +1283,13 @@ extern void drm_property_destroy(struct drm_device *dev, struct drm_property *pr
1041extern int drm_property_add_enum(struct drm_property *property, int index, 1283extern int drm_property_add_enum(struct drm_property *property, int index,
1042 uint64_t value, const char *name); 1284 uint64_t value, const char *name);
1043extern int drm_mode_create_dvi_i_properties(struct drm_device *dev); 1285extern int drm_mode_create_dvi_i_properties(struct drm_device *dev);
1044extern int drm_mode_create_tv_properties(struct drm_device *dev, int num_formats, 1286extern int drm_mode_create_tv_properties(struct drm_device *dev,
1045 char *formats[]); 1287 unsigned int num_modes,
1288 char *modes[]);
1046extern int drm_mode_create_scaling_mode_property(struct drm_device *dev); 1289extern int drm_mode_create_scaling_mode_property(struct drm_device *dev);
1047extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev); 1290extern int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
1048extern int drm_mode_create_dirty_info_property(struct drm_device *dev); 1291extern int drm_mode_create_dirty_info_property(struct drm_device *dev);
1292extern int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
1049 1293
1050extern int drm_mode_connector_attach_encoder(struct drm_connector *connector, 1294extern int drm_mode_connector_attach_encoder(struct drm_connector *connector,
1051 struct drm_encoder *encoder); 1295 struct drm_encoder *encoder);
@@ -1113,6 +1357,13 @@ extern void drm_set_preferred_mode(struct drm_connector *connector,
1113extern int drm_edid_header_is_valid(const u8 *raw_edid); 1357extern int drm_edid_header_is_valid(const u8 *raw_edid);
1114extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid); 1358extern bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid);
1115extern bool drm_edid_is_valid(struct edid *edid); 1359extern bool drm_edid_is_valid(struct edid *edid);
1360
1361extern struct drm_tile_group *drm_mode_create_tile_group(struct drm_device *dev,
1362 char topology[8]);
1363extern struct drm_tile_group *drm_mode_get_tile_group(struct drm_device *dev,
1364 char topology[8]);
1365extern void drm_mode_put_tile_group(struct drm_device *dev,
1366 struct drm_tile_group *tg);
1116struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev, 1367struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
1117 int hsize, int vsize, int fresh, 1368 int hsize, int vsize, int fresh,
1118 bool rb); 1369 bool rb);
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index a3d75fefd010..7adbb65ea8ae 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -68,6 +68,7 @@ struct drm_crtc_helper_funcs {
68 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode, 68 int (*mode_set)(struct drm_crtc *crtc, struct drm_display_mode *mode,
69 struct drm_display_mode *adjusted_mode, int x, int y, 69 struct drm_display_mode *adjusted_mode, int x, int y,
70 struct drm_framebuffer *old_fb); 70 struct drm_framebuffer *old_fb);
71 void (*mode_set_nofb)(struct drm_crtc *crtc);
71 72
72 /* Move the crtc on the current fb to the given position *optional* */ 73 /* Move the crtc on the current fb to the given position *optional* */
73 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y, 74 int (*mode_set_base)(struct drm_crtc *crtc, int x, int y,
@@ -81,6 +82,12 @@ struct drm_crtc_helper_funcs {
81 82
82 /* disable crtc when not in use - more explicit than dpms off */ 83 /* disable crtc when not in use - more explicit than dpms off */
83 void (*disable)(struct drm_crtc *crtc); 84 void (*disable)(struct drm_crtc *crtc);
85
86 /* atomic helpers */
87 int (*atomic_check)(struct drm_crtc *crtc,
88 struct drm_crtc_state *state);
89 void (*atomic_begin)(struct drm_crtc *crtc);
90 void (*atomic_flush)(struct drm_crtc *crtc);
84}; 91};
85 92
86/** 93/**
@@ -161,6 +168,12 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
161 168
162extern void drm_helper_resume_force_mode(struct drm_device *dev); 169extern void drm_helper_resume_force_mode(struct drm_device *dev);
163 170
171int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
172 struct drm_display_mode *adjusted_mode, int x, int y,
173 struct drm_framebuffer *old_fb);
174int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
175 struct drm_framebuffer *old_fb);
176
164/* drm_probe_helper.c */ 177/* drm_probe_helper.c */
165extern int drm_helper_probe_single_connector_modes(struct drm_connector 178extern int drm_helper_probe_single_connector_modes(struct drm_connector
166 *connector, uint32_t maxX, 179 *connector, uint32_t maxX,
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
new file mode 100644
index 000000000000..623b4e98e748
--- /dev/null
+++ b/include/drm/drm_displayid.h
@@ -0,0 +1,76 @@
1/*
2 * Copyright © 2014 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22#ifndef DRM_DISPLAYID_H
23#define DRM_DISPLAYID_H
24
25#define DATA_BLOCK_PRODUCT_ID 0x00
26#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
27#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
28#define DATA_BLOCK_TYPE_1_DETAILED_TIMING 0x03
29#define DATA_BLOCK_TYPE_2_DETAILED_TIMING 0x04
30#define DATA_BLOCK_TYPE_3_SHORT_TIMING 0x05
31#define DATA_BLOCK_TYPE_4_DMT_TIMING 0x06
32#define DATA_BLOCK_VESA_TIMING 0x07
33#define DATA_BLOCK_CEA_TIMING 0x08
34#define DATA_BLOCK_VIDEO_TIMING_RANGE 0x09
35#define DATA_BLOCK_PRODUCT_SERIAL_NUMBER 0x0a
36#define DATA_BLOCK_GP_ASCII_STRING 0x0b
37#define DATA_BLOCK_DISPLAY_DEVICE_DATA 0x0c
38#define DATA_BLOCK_INTERFACE_POWER_SEQUENCING 0x0d
39#define DATA_BLOCK_TRANSFER_CHARACTERISTICS 0x0e
40#define DATA_BLOCK_DISPLAY_INTERFACE 0x0f
41#define DATA_BLOCK_STEREO_DISPLAY_INTERFACE 0x10
42#define DATA_BLOCK_TILED_DISPLAY 0x12
43
44#define DATA_BLOCK_VENDOR_SPECIFIC 0x7f
45
46#define PRODUCT_TYPE_EXTENSION 0
47#define PRODUCT_TYPE_TEST 1
48#define PRODUCT_TYPE_PANEL 2
49#define PRODUCT_TYPE_MONITOR 3
50#define PRODUCT_TYPE_TV 4
51#define PRODUCT_TYPE_REPEATER 5
52#define PRODUCT_TYPE_DIRECT_DRIVE 6
53
54struct displayid_hdr {
55 u8 rev;
56 u8 bytes;
57 u8 prod_id;
58 u8 ext_count;
59} __packed;
60
61struct displayid_block {
62 u8 tag;
63 u8 rev;
64 u8 num_bytes;
65} __packed;
66
67struct displayid_tiled_block {
68 struct displayid_block base;
69 u8 tile_cap;
70 u8 topo[3];
71 u8 tile_size[4];
72 u8 tile_pixel_bezel[5];
73 u8 topology_id[8];
74} __packed;
75
76#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 9305c718d789..11f8c84f98ce 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -303,7 +303,8 @@
303#define DP_TEST_CRC_B_CB 0x244 303#define DP_TEST_CRC_B_CB 0x244
304 304
305#define DP_TEST_SINK_MISC 0x246 305#define DP_TEST_SINK_MISC 0x246
306#define DP_TEST_CRC_SUPPORTED (1 << 5) 306# define DP_TEST_CRC_SUPPORTED (1 << 5)
307# define DP_TEST_COUNT_MASK 0x7
307 308
308#define DP_TEST_RESPONSE 0x260 309#define DP_TEST_RESPONSE 0x260
309# define DP_TEST_ACK (1 << 0) 310# define DP_TEST_ACK (1 << 0)
@@ -313,7 +314,7 @@
313#define DP_TEST_EDID_CHECKSUM 0x261 314#define DP_TEST_EDID_CHECKSUM 0x261
314 315
315#define DP_TEST_SINK 0x270 316#define DP_TEST_SINK 0x270
316#define DP_TEST_SINK_START (1 << 0) 317# define DP_TEST_SINK_START (1 << 0)
317 318
318#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */ 319#define DP_PAYLOAD_TABLE_UPDATE_STATUS 0x2c0 /* 1.2 MST */
319# define DP_PAYLOAD_TABLE_UPDATED (1 << 0) 320# define DP_PAYLOAD_TABLE_UPDATED (1 << 0)
@@ -404,26 +405,6 @@
404#define MODE_I2C_READ 4 405#define MODE_I2C_READ 4
405#define MODE_I2C_STOP 8 406#define MODE_I2C_STOP 8
406 407
407/**
408 * struct i2c_algo_dp_aux_data - driver interface structure for i2c over dp
409 * aux algorithm
410 * @running: set by the algo indicating whether an i2c is ongoing or whether
411 * the i2c bus is quiescent
412 * @address: i2c target address for the currently ongoing transfer
413 * @aux_ch: driver callback to transfer a single byte of the i2c payload
414 */
415struct i2c_algo_dp_aux_data {
416 bool running;
417 u16 address;
418 int (*aux_ch) (struct i2c_adapter *adapter,
419 int mode, uint8_t write_byte,
420 uint8_t *read_byte);
421};
422
423int
424i2c_dp_aux_add_bus(struct i2c_adapter *adapter);
425
426
427#define DP_LINK_STATUS_SIZE 6 408#define DP_LINK_STATUS_SIZE 6
428bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE], 409bool drm_dp_channel_eq_ok(const u8 link_status[DP_LINK_STATUS_SIZE],
429 int lane_count); 410 int lane_count);
@@ -550,6 +531,7 @@ struct drm_dp_aux {
550 struct mutex hw_mutex; 531 struct mutex hw_mutex;
551 ssize_t (*transfer)(struct drm_dp_aux *aux, 532 ssize_t (*transfer)(struct drm_dp_aux *aux,
552 struct drm_dp_aux_msg *msg); 533 struct drm_dp_aux_msg *msg);
534 unsigned i2c_nack_count, i2c_defer_count;
553}; 535};
554 536
555ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset, 537ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 338fc1053835..00c1da927245 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -28,7 +28,7 @@
28struct drm_dp_mst_branch; 28struct drm_dp_mst_branch;
29 29
30/** 30/**
31 * struct drm_dp_vcpi - Virtual Channel Payload Identifer 31 * struct drm_dp_vcpi - Virtual Channel Payload Identifier
32 * @vcpi: Virtual channel ID. 32 * @vcpi: Virtual channel ID.
33 * @pbn: Payload Bandwidth Number for this channel 33 * @pbn: Payload Bandwidth Number for this channel
34 * @aligned_pbn: PBN aligned with slot size 34 * @aligned_pbn: PBN aligned with slot size
@@ -92,6 +92,8 @@ struct drm_dp_mst_port {
92 struct drm_dp_vcpi vcpi; 92 struct drm_dp_vcpi vcpi;
93 struct drm_connector *connector; 93 struct drm_connector *connector;
94 struct drm_dp_mst_topology_mgr *mgr; 94 struct drm_dp_mst_topology_mgr *mgr;
95
96 struct edid *cached_edid; /* for DP logical ports - make tiling work */
95}; 97};
96 98
97/** 99/**
@@ -371,7 +373,7 @@ struct drm_dp_sideband_msg_tx {
371struct drm_dp_mst_topology_mgr; 373struct drm_dp_mst_topology_mgr;
372struct drm_dp_mst_topology_cbs { 374struct drm_dp_mst_topology_cbs {
373 /* create a connector for a port */ 375 /* create a connector for a port */
374 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, char *path); 376 struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
375 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr, 377 void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
376 struct drm_connector *connector); 378 struct drm_connector *connector);
377 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr); 379 void (*hotplug)(struct drm_dp_mst_topology_mgr *mgr);
@@ -474,7 +476,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
474int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled); 476int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
475 477
476 478
477enum drm_connector_status drm_dp_mst_detect_port(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); 479enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
478 480
479struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); 481struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
480 482
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b96031d947a0..87d85e81d3a7 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -27,12 +27,14 @@
27 27
28#define EDID_LENGTH 128 28#define EDID_LENGTH 128
29#define DDC_ADDR 0x50 29#define DDC_ADDR 0x50
30#define DDC_ADDR2 0x52 /* E-DDC 1.2 - where DisplayID can hide */
30 31
31#define CEA_EXT 0x02 32#define CEA_EXT 0x02
32#define VTB_EXT 0x10 33#define VTB_EXT 0x10
33#define DI_EXT 0x40 34#define DI_EXT 0x40
34#define LS_EXT 0x50 35#define LS_EXT 0x50
35#define MI_EXT 0x60 36#define MI_EXT 0x60
37#define DISPLAYID_EXT 0x70
36 38
37struct est_timings { 39struct est_timings {
38 u8 t1; 40 u8 t1;
@@ -207,6 +209,61 @@ struct detailed_timing {
207#define DRM_EDID_HDMI_DC_30 (1 << 4) 209#define DRM_EDID_HDMI_DC_30 (1 << 4)
208#define DRM_EDID_HDMI_DC_Y444 (1 << 3) 210#define DRM_EDID_HDMI_DC_Y444 (1 << 3)
209 211
212/* ELD Header Block */
213#define DRM_ELD_HEADER_BLOCK_SIZE 4
214
215#define DRM_ELD_VER 0
216# define DRM_ELD_VER_SHIFT 3
217# define DRM_ELD_VER_MASK (0x1f << 3)
218
219#define DRM_ELD_BASELINE_ELD_LEN 2 /* in dwords! */
220
221/* ELD Baseline Block for ELD_Ver == 2 */
222#define DRM_ELD_CEA_EDID_VER_MNL 4
223# define DRM_ELD_CEA_EDID_VER_SHIFT 5
224# define DRM_ELD_CEA_EDID_VER_MASK (7 << 5)
225# define DRM_ELD_CEA_EDID_VER_NONE (0 << 5)
226# define DRM_ELD_CEA_EDID_VER_CEA861 (1 << 5)
227# define DRM_ELD_CEA_EDID_VER_CEA861A (2 << 5)
228# define DRM_ELD_CEA_EDID_VER_CEA861BCD (3 << 5)
229# define DRM_ELD_MNL_SHIFT 0
230# define DRM_ELD_MNL_MASK (0x1f << 0)
231
232#define DRM_ELD_SAD_COUNT_CONN_TYPE 5
233# define DRM_ELD_SAD_COUNT_SHIFT 4
234# define DRM_ELD_SAD_COUNT_MASK (0xf << 4)
235# define DRM_ELD_CONN_TYPE_SHIFT 2
236# define DRM_ELD_CONN_TYPE_MASK (3 << 2)
237# define DRM_ELD_CONN_TYPE_HDMI (0 << 2)
238# define DRM_ELD_CONN_TYPE_DP (1 << 2)
239# define DRM_ELD_SUPPORTS_AI (1 << 1)
240# define DRM_ELD_SUPPORTS_HDCP (1 << 0)
241
242#define DRM_ELD_AUD_SYNCH_DELAY 6 /* in units of 2 ms */
243# define DRM_ELD_AUD_SYNCH_DELAY_MAX 0xfa /* 500 ms */
244
245#define DRM_ELD_SPEAKER 7
246# define DRM_ELD_SPEAKER_RLRC (1 << 6)
247# define DRM_ELD_SPEAKER_FLRC (1 << 5)
248# define DRM_ELD_SPEAKER_RC (1 << 4)
249# define DRM_ELD_SPEAKER_RLR (1 << 3)
250# define DRM_ELD_SPEAKER_FC (1 << 2)
251# define DRM_ELD_SPEAKER_LFE (1 << 1)
252# define DRM_ELD_SPEAKER_FLR (1 << 0)
253
254#define DRM_ELD_PORT_ID 8 /* offsets 8..15 inclusive */
255# define DRM_ELD_PORT_ID_LEN 8
256
257#define DRM_ELD_MANUFACTURER_NAME0 16
258#define DRM_ELD_MANUFACTURER_NAME1 17
259
260#define DRM_ELD_PRODUCT_CODE0 18
261#define DRM_ELD_PRODUCT_CODE1 19
262
263#define DRM_ELD_MONITOR_NAME_STRING 20 /* offsets 20..(20+mnl-1) inclusive */
264
265#define DRM_ELD_CEA_SAD(mnl, sad) (20 + (mnl) + 3 * (sad))
266
210struct edid { 267struct edid {
211 u8 header[8]; 268 u8 header[8];
212 /* Vendor & product info */ 269 /* Vendor & product info */
@@ -279,4 +336,56 @@ int
279drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, 336drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame,
280 const struct drm_display_mode *mode); 337 const struct drm_display_mode *mode);
281 338
339/**
340 * drm_eld_mnl - Get ELD monitor name length in bytes.
341 * @eld: pointer to an eld memory structure with mnl set
342 */
343static inline int drm_eld_mnl(const uint8_t *eld)
344{
345 return (eld[DRM_ELD_CEA_EDID_VER_MNL] & DRM_ELD_MNL_MASK) >> DRM_ELD_MNL_SHIFT;
346}
347
348/**
349 * drm_eld_sad_count - Get ELD SAD count.
350 * @eld: pointer to an eld memory structure with sad_count set
351 */
352static inline int drm_eld_sad_count(const uint8_t *eld)
353{
354 return (eld[DRM_ELD_SAD_COUNT_CONN_TYPE] & DRM_ELD_SAD_COUNT_MASK) >>
355 DRM_ELD_SAD_COUNT_SHIFT;
356}
357
358/**
359 * drm_eld_calc_baseline_block_size - Calculate baseline block size in bytes
360 * @eld: pointer to an eld memory structure with mnl and sad_count set
361 *
362 * This is a helper for determining the payload size of the baseline block, in
363 * bytes, for e.g. setting the Baseline_ELD_Len field in the ELD header block.
364 */
365static inline int drm_eld_calc_baseline_block_size(const uint8_t *eld)
366{
367 return DRM_ELD_MONITOR_NAME_STRING - DRM_ELD_HEADER_BLOCK_SIZE +
368 drm_eld_mnl(eld) + drm_eld_sad_count(eld) * 3;
369}
370
371/**
372 * drm_eld_size - Get ELD size in bytes
373 * @eld: pointer to a complete eld memory structure
374 *
375 * The returned value does not include the vendor block. It's vendor specific,
376 * and comprises of the remaining bytes in the ELD memory buffer after
377 * drm_eld_size() bytes of header and baseline block.
378 *
379 * The returned value is guaranteed to be a multiple of 4.
380 */
381static inline int drm_eld_size(const uint8_t *eld)
382{
383 return DRM_ELD_HEADER_BLOCK_SIZE + eld[DRM_ELD_BASELINE_ELD_LEN] * 4;
384}
385
386struct edid *drm_do_get_edid(struct drm_connector *connector,
387 int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
388 size_t len),
389 void *data);
390
282#endif /* __DRM_EDID_H__ */ 391#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index f4ad254e3488..b597068103aa 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -34,9 +34,14 @@ struct drm_fb_helper;
34 34
35#include <linux/kgdb.h> 35#include <linux/kgdb.h>
36 36
37struct drm_fb_offset {
38 int x, y;
39};
40
37struct drm_fb_helper_crtc { 41struct drm_fb_helper_crtc {
38 struct drm_mode_set mode_set; 42 struct drm_mode_set mode_set;
39 struct drm_display_mode *desired_mode; 43 struct drm_display_mode *desired_mode;
44 int x, y;
40}; 45};
41 46
42struct drm_fb_helper_surface_size { 47struct drm_fb_helper_surface_size {
@@ -72,6 +77,7 @@ struct drm_fb_helper_funcs {
72 bool (*initial_config)(struct drm_fb_helper *fb_helper, 77 bool (*initial_config)(struct drm_fb_helper *fb_helper,
73 struct drm_fb_helper_crtc **crtcs, 78 struct drm_fb_helper_crtc **crtcs,
74 struct drm_display_mode **modes, 79 struct drm_display_mode **modes,
80 struct drm_fb_offset *offsets,
75 bool *enabled, int width, int height); 81 bool *enabled, int width, int height);
76}; 82};
77 83
diff --git a/include/drm/drm_flip_work.h b/include/drm/drm_flip_work.h
index 9eed34dcd6af..d387cf06ae05 100644
--- a/include/drm/drm_flip_work.h
+++ b/include/drm/drm_flip_work.h
@@ -25,6 +25,7 @@
25#define DRM_FLIP_WORK_H 25#define DRM_FLIP_WORK_H
26 26
27#include <linux/kfifo.h> 27#include <linux/kfifo.h>
28#include <linux/spinlock.h>
28#include <linux/workqueue.h> 29#include <linux/workqueue.h>
29 30
30/** 31/**
@@ -32,9 +33,9 @@
32 * 33 *
33 * Util to queue up work to run from work-queue context after flip/vblank. 34 * Util to queue up work to run from work-queue context after flip/vblank.
34 * Typically this can be used to defer unref of framebuffer's, cursor 35 * Typically this can be used to defer unref of framebuffer's, cursor
35 * bo's, etc until after vblank. The APIs are all safe (and lockless) 36 * bo's, etc until after vblank. The APIs are all thread-safe.
36 * for up to one producer and once consumer at a time. The single-consumer 37 * Moreover, drm_flip_work_queue_task and drm_flip_work_queue can be called
37 * aspect is ensured by committing the queued work to a single work-queue. 38 * in atomic context.
38 */ 39 */
39 40
40struct drm_flip_work; 41struct drm_flip_work;
@@ -51,26 +52,40 @@ struct drm_flip_work;
51typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val); 52typedef void (*drm_flip_func_t)(struct drm_flip_work *work, void *val);
52 53
53/** 54/**
55 * struct drm_flip_task - flip work task
56 * @node: list entry element
57 * @data: data to pass to work->func
58 */
59struct drm_flip_task {
60 struct list_head node;
61 void *data;
62};
63
64/**
54 * struct drm_flip_work - flip work queue 65 * struct drm_flip_work - flip work queue
55 * @name: debug name 66 * @name: debug name
56 * @pending: number of queued but not committed items
57 * @count: number of committed items
58 * @func: callback fxn called for each committed item 67 * @func: callback fxn called for each committed item
59 * @worker: worker which calls @func 68 * @worker: worker which calls @func
60 * @fifo: queue of committed items 69 * @queued: queued tasks
70 * @commited: commited tasks
71 * @lock: lock to access queued and commited lists
61 */ 72 */
62struct drm_flip_work { 73struct drm_flip_work {
63 const char *name; 74 const char *name;
64 atomic_t pending, count;
65 drm_flip_func_t func; 75 drm_flip_func_t func;
66 struct work_struct worker; 76 struct work_struct worker;
67 DECLARE_KFIFO_PTR(fifo, void *); 77 struct list_head queued;
78 struct list_head commited;
79 spinlock_t lock;
68}; 80};
69 81
82struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags);
83void drm_flip_work_queue_task(struct drm_flip_work *work,
84 struct drm_flip_task *task);
70void drm_flip_work_queue(struct drm_flip_work *work, void *val); 85void drm_flip_work_queue(struct drm_flip_work *work, void *val);
71void drm_flip_work_commit(struct drm_flip_work *work, 86void drm_flip_work_commit(struct drm_flip_work *work,
72 struct workqueue_struct *wq); 87 struct workqueue_struct *wq);
73int drm_flip_work_init(struct drm_flip_work *work, int size, 88void drm_flip_work_init(struct drm_flip_work *work,
74 const char *name, drm_flip_func_t func); 89 const char *name, drm_flip_func_t func);
75void drm_flip_work_cleanup(struct drm_flip_work *work); 90void drm_flip_work_cleanup(struct drm_flip_work *work);
76 91
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 1e6ae1458f7a..780511a459c0 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,6 +119,13 @@ struct drm_gem_object {
119 * simply leave it as NULL. 119 * simply leave it as NULL.
120 */ 120 */
121 struct dma_buf_attachment *import_attach; 121 struct dma_buf_attachment *import_attach;
122
123 /**
124 * dumb - created as dumb buffer
125 * Whether the gem object was created using the dumb buffer interface
126 * as such it may not be used for GPU rendering.
127 */
128 bool dumb;
122}; 129};
123 130
124void drm_gem_object_release(struct drm_gem_object *obj); 131void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 2ff35f3de9c5..acd6af8a8e67 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -4,6 +4,13 @@
4#include <drm/drmP.h> 4#include <drm/drmP.h>
5#include <drm/drm_gem.h> 5#include <drm/drm_gem.h>
6 6
7/**
8 * struct drm_gem_cma_object - GEM object backed by CMA memory allocations
9 * @base: base GEM object
10 * @paddr: physical address of the backing memory
11 * @sgt: scatter/gather table for imported PRIME buffers
12 * @vaddr: kernel virtual address of the backing memory
13 */
7struct drm_gem_cma_object { 14struct drm_gem_cma_object {
8 struct drm_gem_object base; 15 struct drm_gem_object base;
9 dma_addr_t paddr; 16 dma_addr_t paddr;
@@ -19,23 +26,30 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
19 return container_of(gem_obj, struct drm_gem_cma_object, base); 26 return container_of(gem_obj, struct drm_gem_cma_object, base);
20} 27}
21 28
22/* free gem object. */ 29/* free GEM object */
23void drm_gem_cma_free_object(struct drm_gem_object *gem_obj); 30void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
24 31
25/* create memory region for drm framebuffer. */ 32/* create memory region for DRM framebuffer */
33int drm_gem_cma_dumb_create_internal(struct drm_file *file_priv,
34 struct drm_device *drm,
35 struct drm_mode_create_dumb *args);
36
37/* create memory region for DRM framebuffer */
26int drm_gem_cma_dumb_create(struct drm_file *file_priv, 38int drm_gem_cma_dumb_create(struct drm_file *file_priv,
27 struct drm_device *drm, struct drm_mode_create_dumb *args); 39 struct drm_device *drm,
40 struct drm_mode_create_dumb *args);
28 41
29/* map memory region for drm framebuffer to user space. */ 42/* map memory region for DRM framebuffer to user space */
30int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv, 43int drm_gem_cma_dumb_map_offset(struct drm_file *file_priv,
31 struct drm_device *drm, uint32_t handle, uint64_t *offset); 44 struct drm_device *drm, u32 handle,
45 u64 *offset);
32 46
33/* set vm_flags and we can change the vm attribute to other one at here. */ 47/* set vm_flags and we can change the VM attribute to other one at here */
34int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma); 48int drm_gem_cma_mmap(struct file *filp, struct vm_area_struct *vma);
35 49
36/* allocate physical memory. */ 50/* allocate physical memory */
37struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm, 51struct drm_gem_cma_object *drm_gem_cma_create(struct drm_device *drm,
38 unsigned int size); 52 size_t size);
39 53
40extern const struct vm_operations_struct drm_gem_cma_vm_ops; 54extern const struct vm_operations_struct drm_gem_cma_vm_ops;
41 55
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 8569dc5a1026..f1d8d0dbb4f1 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -26,6 +26,7 @@ struct mipi_dsi_device;
26 * struct mipi_dsi_msg - read/write DSI buffer 26 * struct mipi_dsi_msg - read/write DSI buffer
27 * @channel: virtual channel id 27 * @channel: virtual channel id
28 * @type: payload data type 28 * @type: payload data type
29 * @flags: flags controlling this message transmission
29 * @tx_len: length of @tx_buf 30 * @tx_len: length of @tx_buf
30 * @tx_buf: data to be written 31 * @tx_buf: data to be written
31 * @rx_len: length of @rx_buf 32 * @rx_len: length of @rx_buf
@@ -43,12 +44,44 @@ struct mipi_dsi_msg {
43 void *rx_buf; 44 void *rx_buf;
44}; 45};
45 46
47bool mipi_dsi_packet_format_is_short(u8 type);
48bool mipi_dsi_packet_format_is_long(u8 type);
49
50/**
51 * struct mipi_dsi_packet - represents a MIPI DSI packet in protocol format
52 * @size: size (in bytes) of the packet
53 * @header: the four bytes that make up the header (Data ID, Word Count or
54 * Packet Data, and ECC)
55 * @payload_length: number of bytes in the payload
56 * @payload: a pointer to a buffer containing the payload, if any
57 */
58struct mipi_dsi_packet {
59 size_t size;
60 u8 header[4];
61 size_t payload_length;
62 const u8 *payload;
63};
64
65int mipi_dsi_create_packet(struct mipi_dsi_packet *packet,
66 const struct mipi_dsi_msg *msg);
67
46/** 68/**
47 * struct mipi_dsi_host_ops - DSI bus operations 69 * struct mipi_dsi_host_ops - DSI bus operations
48 * @attach: attach DSI device to DSI host 70 * @attach: attach DSI device to DSI host
49 * @detach: detach DSI device from DSI host 71 * @detach: detach DSI device from DSI host
50 * @transfer: send and/or receive DSI packet, return number of received bytes, 72 * @transfer: transmit a DSI packet
51 * or error 73 *
74 * DSI packets transmitted by .transfer() are passed in as mipi_dsi_msg
75 * structures. This structure contains information about the type of packet
76 * being transmitted as well as the transmit and receive buffers. When an
77 * error is encountered during transmission, this function will return a
78 * negative error code. On success it shall return the number of bytes
79 * transmitted for write packets or the number of bytes received for read
80 * packets.
81 *
82 * Note that typically DSI packet transmission is atomic, so the .transfer()
83 * function will seldomly return anything other than the number of bytes
84 * contained in the transmit buffer on success.
52 */ 85 */
53struct mipi_dsi_host_ops { 86struct mipi_dsi_host_ops {
54 int (*attach)(struct mipi_dsi_host *host, 87 int (*attach)(struct mipi_dsi_host *host,
@@ -56,7 +89,7 @@ struct mipi_dsi_host_ops {
56 int (*detach)(struct mipi_dsi_host *host, 89 int (*detach)(struct mipi_dsi_host *host,
57 struct mipi_dsi_device *dsi); 90 struct mipi_dsi_device *dsi);
58 ssize_t (*transfer)(struct mipi_dsi_host *host, 91 ssize_t (*transfer)(struct mipi_dsi_host *host,
59 struct mipi_dsi_msg *msg); 92 const struct mipi_dsi_msg *msg);
60}; 93};
61 94
62/** 95/**
@@ -130,12 +163,57 @@ static inline struct mipi_dsi_device *to_mipi_dsi_device(struct device *dev)
130 return container_of(dev, struct mipi_dsi_device, dev); 163 return container_of(dev, struct mipi_dsi_device, dev);
131} 164}
132 165
166struct mipi_dsi_device *of_find_mipi_dsi_device_by_node(struct device_node *np);
133int mipi_dsi_attach(struct mipi_dsi_device *dsi); 167int mipi_dsi_attach(struct mipi_dsi_device *dsi);
134int mipi_dsi_detach(struct mipi_dsi_device *dsi); 168int mipi_dsi_detach(struct mipi_dsi_device *dsi);
135ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, const void *data, 169int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
136 size_t len); 170 u16 value);
171
172ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
173 size_t size);
174ssize_t mipi_dsi_generic_read(struct mipi_dsi_device *dsi, const void *params,
175 size_t num_params, void *data, size_t size);
176
177/**
178 * enum mipi_dsi_dcs_tear_mode - Tearing Effect Output Line mode
179 * @MIPI_DSI_DCS_TEAR_MODE_VBLANK: the TE output line consists of V-Blanking
180 * information only
181 * @MIPI_DSI_DCS_TEAR_MODE_VHBLANK : the TE output line consists of both
182 * V-Blanking and H-Blanking information
183 */
184enum mipi_dsi_dcs_tear_mode {
185 MIPI_DSI_DCS_TEAR_MODE_VBLANK,
186 MIPI_DSI_DCS_TEAR_MODE_VHBLANK,
187};
188
189#define MIPI_DSI_DCS_POWER_MODE_DISPLAY (1 << 2)
190#define MIPI_DSI_DCS_POWER_MODE_NORMAL (1 << 3)
191#define MIPI_DSI_DCS_POWER_MODE_SLEEP (1 << 4)
192#define MIPI_DSI_DCS_POWER_MODE_PARTIAL (1 << 5)
193#define MIPI_DSI_DCS_POWER_MODE_IDLE (1 << 6)
194
195ssize_t mipi_dsi_dcs_write_buffer(struct mipi_dsi_device *dsi,
196 const void *data, size_t len);
197ssize_t mipi_dsi_dcs_write(struct mipi_dsi_device *dsi, u8 cmd,
198 const void *data, size_t len);
137ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data, 199ssize_t mipi_dsi_dcs_read(struct mipi_dsi_device *dsi, u8 cmd, void *data,
138 size_t len); 200 size_t len);
201int mipi_dsi_dcs_nop(struct mipi_dsi_device *dsi);
202int mipi_dsi_dcs_soft_reset(struct mipi_dsi_device *dsi);
203int mipi_dsi_dcs_get_power_mode(struct mipi_dsi_device *dsi, u8 *mode);
204int mipi_dsi_dcs_get_pixel_format(struct mipi_dsi_device *dsi, u8 *format);
205int mipi_dsi_dcs_enter_sleep_mode(struct mipi_dsi_device *dsi);
206int mipi_dsi_dcs_exit_sleep_mode(struct mipi_dsi_device *dsi);
207int mipi_dsi_dcs_set_display_off(struct mipi_dsi_device *dsi);
208int mipi_dsi_dcs_set_display_on(struct mipi_dsi_device *dsi);
209int mipi_dsi_dcs_set_column_address(struct mipi_dsi_device *dsi, u16 start,
210 u16 end);
211int mipi_dsi_dcs_set_page_address(struct mipi_dsi_device *dsi, u16 start,
212 u16 end);
213int mipi_dsi_dcs_set_tear_off(struct mipi_dsi_device *dsi);
214int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi,
215 enum mipi_dsi_dcs_tear_mode mode);
216int mipi_dsi_dcs_set_pixel_format(struct mipi_dsi_device *dsi, u8 format);
139 217
140/** 218/**
141 * struct mipi_dsi_driver - DSI driver 219 * struct mipi_dsi_driver - DSI driver
@@ -167,9 +245,13 @@ static inline void mipi_dsi_set_drvdata(struct mipi_dsi_device *dsi, void *data)
167 dev_set_drvdata(&dsi->dev, data); 245 dev_set_drvdata(&dsi->dev, data);
168} 246}
169 247
170int mipi_dsi_driver_register(struct mipi_dsi_driver *driver); 248int mipi_dsi_driver_register_full(struct mipi_dsi_driver *driver,
249 struct module *owner);
171void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver); 250void mipi_dsi_driver_unregister(struct mipi_dsi_driver *driver);
172 251
252#define mipi_dsi_driver_register(driver) \
253 mipi_dsi_driver_register_full(driver, THIS_MODULE)
254
173#define module_mipi_dsi_driver(__mipi_dsi_driver) \ 255#define module_mipi_dsi_driver(__mipi_dsi_driver) \
174 module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \ 256 module_driver(__mipi_dsi_driver, mipi_dsi_driver_register, \
175 mipi_dsi_driver_unregister) 257 mipi_dsi_driver_unregister)
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 75a5c45e21c7..70595ff565ba 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -33,6 +33,7 @@ struct drm_modeset_lock;
33 * @ww_ctx: base acquire ctx 33 * @ww_ctx: base acquire ctx
34 * @contended: used internally for -EDEADLK handling 34 * @contended: used internally for -EDEADLK handling
35 * @locked: list of held locks 35 * @locked: list of held locks
36 * @trylock_only: trylock mode used in atomic contexts/panic notifiers
36 * 37 *
37 * Each thread competing for a set of locks must use one acquire 38 * Each thread competing for a set of locks must use one acquire
38 * ctx. And if any lock fxn returns -EDEADLK, it must backoff and 39 * ctx. And if any lock fxn returns -EDEADLK, it must backoff and
@@ -126,11 +127,13 @@ void drm_modeset_unlock(struct drm_modeset_lock *lock);
126 127
127struct drm_device; 128struct drm_device;
128struct drm_crtc; 129struct drm_crtc;
130struct drm_plane;
129 131
130void drm_modeset_lock_all(struct drm_device *dev); 132void drm_modeset_lock_all(struct drm_device *dev);
131int __drm_modeset_lock_all(struct drm_device *dev, bool trylock); 133int __drm_modeset_lock_all(struct drm_device *dev, bool trylock);
132void drm_modeset_unlock_all(struct drm_device *dev); 134void drm_modeset_unlock_all(struct drm_device *dev);
133void drm_modeset_lock_crtc(struct drm_crtc *crtc); 135void drm_modeset_lock_crtc(struct drm_crtc *crtc,
136 struct drm_plane *plane);
134void drm_modeset_unlock_crtc(struct drm_crtc *crtc); 137void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
135void drm_warn_on_modeset_not_all_locked(struct drm_device *dev); 138void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
136struct drm_modeset_acquire_ctx * 139struct drm_modeset_acquire_ctx *
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 52e6870534b2..a185392cafeb 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -25,6 +25,7 @@
25#define DRM_PLANE_HELPER_H 25#define DRM_PLANE_HELPER_H
26 26
27#include <drm/drm_rect.h> 27#include <drm/drm_rect.h>
28#include <drm/drm_crtc.h>
28 29
29/* 30/*
30 * Drivers that don't allow primary plane scaling may pass this macro in place 31 * Drivers that don't allow primary plane scaling may pass this macro in place
@@ -42,6 +43,37 @@
42 * planes. 43 * planes.
43 */ 44 */
44 45
46extern int drm_crtc_init(struct drm_device *dev,
47 struct drm_crtc *crtc,
48 const struct drm_crtc_funcs *funcs);
49
50/**
51 * drm_plane_helper_funcs - helper operations for CRTCs
52 * @prepare_fb: prepare a framebuffer for use by the plane
53 * @cleanup_fb: cleanup a framebuffer when it's no longer used by the plane
54 * @atomic_check: check that a given atomic state is valid and can be applied
55 * @atomic_update: apply an atomic state to the plane
56 *
57 * The helper operations are called by the mid-layer CRTC helper.
58 */
59struct drm_plane_helper_funcs {
60 int (*prepare_fb)(struct drm_plane *plane,
61 struct drm_framebuffer *fb);
62 void (*cleanup_fb)(struct drm_plane *plane,
63 struct drm_framebuffer *fb);
64
65 int (*atomic_check)(struct drm_plane *plane,
66 struct drm_plane_state *state);
67 void (*atomic_update)(struct drm_plane *plane,
68 struct drm_plane_state *old_state);
69};
70
71static inline void drm_plane_helper_add(struct drm_plane *plane,
72 const struct drm_plane_helper_funcs *funcs)
73{
74 plane->helper_private = (void *)funcs;
75}
76
45extern int drm_plane_helper_check_update(struct drm_plane *plane, 77extern int drm_plane_helper_check_update(struct drm_plane *plane,
46 struct drm_crtc *crtc, 78 struct drm_crtc *crtc,
47 struct drm_framebuffer *fb, 79 struct drm_framebuffer *fb,
@@ -68,4 +100,16 @@ extern struct drm_plane *drm_primary_helper_create_plane(struct drm_device *dev,
68 int num_formats); 100 int num_formats);
69 101
70 102
103int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
104 struct drm_framebuffer *fb,
105 int crtc_x, int crtc_y,
106 unsigned int crtc_w, unsigned int crtc_h,
107 uint32_t src_x, uint32_t src_y,
108 uint32_t src_w, uint32_t src_h);
109int drm_plane_helper_disable(struct drm_plane *plane);
110
111/* For use by drm_crtc_helper.c */
112int drm_plane_helper_commit(struct drm_plane *plane,
113 struct drm_plane_state *plane_state,
114 struct drm_framebuffer *old_fb);
71#endif 115#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index a70d45647898..180ad0e6de21 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -259,4 +259,21 @@
259 INTEL_VGA_DEVICE(0x22b2, info), \ 259 INTEL_VGA_DEVICE(0x22b2, info), \
260 INTEL_VGA_DEVICE(0x22b3, info) 260 INTEL_VGA_DEVICE(0x22b3, info)
261 261
262#define INTEL_SKL_IDS(info) \
263 INTEL_VGA_DEVICE(0x1916, info), /* ULT GT2 */ \
264 INTEL_VGA_DEVICE(0x1906, info), /* ULT GT1 */ \
265 INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
266 INTEL_VGA_DEVICE(0x1921, info), /* ULT GT2F */ \
267 INTEL_VGA_DEVICE(0x190E, info), /* ULX GT1 */ \
268 INTEL_VGA_DEVICE(0x191E, info), /* ULX GT2 */ \
269 INTEL_VGA_DEVICE(0x1912, info), /* DT GT2 */ \
270 INTEL_VGA_DEVICE(0x1902, info), /* DT GT1 */ \
271 INTEL_VGA_DEVICE(0x191B, info), /* Halo GT2 */ \
272 INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
273 INTEL_VGA_DEVICE(0x190B, info), /* Halo GT1 */ \
274 INTEL_VGA_DEVICE(0x191A, info), /* SRV GT2 */ \
275 INTEL_VGA_DEVICE(0x192A, info), /* SRV GT3 */ \
276 INTEL_VGA_DEVICE(0x190A, info), /* SRV GT1 */ \
277 INTEL_VGA_DEVICE(0x191D, info) /* WKS GT2 */
278
262#endif /* _I915_PCIIDS_H */ 279#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 460441714413..b620c317c772 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
68 * non-blocking reserves should be tried. 68 * non-blocking reserves should be tried.
69 * @list: thread private list of ttm_validate_buffer structs. 69 * @list: thread private list of ttm_validate_buffer structs.
70 * @intr: should the wait be interruptible 70 * @intr: should the wait be interruptible
71 * @dups: [out] optional list of duplicates.
71 * 72 *
72 * Tries to reserve bos pointed to by the list entries for validation. 73 * Tries to reserve bos pointed to by the list entries for validation.
73 * If the function returns 0, all buffers are marked as "unfenced", 74 * If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
83 * calling process receives a signal while waiting. In that case, no 84 * calling process receives a signal while waiting. In that case, no
84 * buffers on the list will be reserved upon return. 85 * buffers on the list will be reserved upon return.
85 * 86 *
87 * If dups is non NULL all buffers already reserved by the current thread
88 * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
89 * on the first already reserved buffer and all buffers from the list are
90 * unreserved again.
91 *
86 * Buffers reserved by this function should be unreserved by 92 * Buffers reserved by this function should be unreserved by
87 * a call to either ttm_eu_backoff_reservation() or 93 * a call to either ttm_eu_backoff_reservation() or
88 * ttm_eu_fence_buffer_objects() when command submission is complete or 94 * ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
90 */ 96 */
91 97
92extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, 98extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
93 struct list_head *list, bool intr); 99 struct list_head *list, bool intr,
100 struct list_head *dups);
94 101
95/** 102/**
96 * function ttm_eu_fence_buffer_objects. 103 * function ttm_eu_fence_buffer_objects.
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 11c0182a153b..cbb5790a35cd 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -1,9 +1,24 @@
1/* 1/*
2 * Copyright (C) 2012 Avionic Design GmbH 2 * Copyright (C) 2012 Avionic Design GmbH
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * it under the terms of the GNU General Public License version 2 as 5 * copy of this software and associated documentation files (the "Software"),
6 * published by the Free Software Foundation. 6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
7 */ 22 */
8 23
9#ifndef __LINUX_HDMI_H_ 24#ifndef __LINUX_HDMI_H_
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index ab8564b03468..95243d28a0ee 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -98,11 +98,11 @@ struct mmu_notifier_ops {
98 /* 98 /*
99 * invalidate_range_start() and invalidate_range_end() must be 99 * invalidate_range_start() and invalidate_range_end() must be
100 * paired and are called only when the mmap_sem and/or the 100 * paired and are called only when the mmap_sem and/or the
101 * locks protecting the reverse maps are held. The subsystem 101 * locks protecting the reverse maps are held. If the subsystem
102 * must guarantee that no additional references are taken to 102 * can't guarantee that no additional references are taken to
103 * the pages in the range established between the call to 103 * the pages in the range, it has to implement the
104 * invalidate_range_start() and the matching call to 104 * invalidate_range() notifier to remove any references taken
105 * invalidate_range_end(). 105 * after invalidate_range_start().
106 * 106 *
107 * Invalidation of multiple concurrent ranges may be 107 * Invalidation of multiple concurrent ranges may be
108 * optionally permitted by the driver. Either way the 108 * optionally permitted by the driver. Either way the
@@ -144,6 +144,29 @@ struct mmu_notifier_ops {
144 void (*invalidate_range_end)(struct mmu_notifier *mn, 144 void (*invalidate_range_end)(struct mmu_notifier *mn,
145 struct mm_struct *mm, 145 struct mm_struct *mm,
146 unsigned long start, unsigned long end); 146 unsigned long start, unsigned long end);
147
148 /*
149 * invalidate_range() is either called between
150 * invalidate_range_start() and invalidate_range_end() when the
151 * VM has to free pages that where unmapped, but before the
152 * pages are actually freed, or outside of _start()/_end() when
153 * a (remote) TLB is necessary.
154 *
155 * If invalidate_range() is used to manage a non-CPU TLB with
156 * shared page-tables, it not necessary to implement the
157 * invalidate_range_start()/end() notifiers, as
158 * invalidate_range() alread catches the points in time when an
159 * external TLB range needs to be flushed.
160 *
161 * The invalidate_range() function is called under the ptl
162 * spin-lock and not allowed to sleep.
163 *
164 * Note that this function might be called with just a sub-range
165 * of what was passed to invalidate_range_start()/end(), if
166 * called between those functions.
167 */
168 void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
169 unsigned long start, unsigned long end);
147}; 170};
148 171
149/* 172/*
@@ -190,6 +213,8 @@ extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
190 unsigned long start, unsigned long end); 213 unsigned long start, unsigned long end);
191extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, 214extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
192 unsigned long start, unsigned long end); 215 unsigned long start, unsigned long end);
216extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
217 unsigned long start, unsigned long end);
193 218
194static inline void mmu_notifier_release(struct mm_struct *mm) 219static inline void mmu_notifier_release(struct mm_struct *mm)
195{ 220{
@@ -242,6 +267,13 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
242 __mmu_notifier_invalidate_range_end(mm, start, end); 267 __mmu_notifier_invalidate_range_end(mm, start, end);
243} 268}
244 269
270static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
271 unsigned long start, unsigned long end)
272{
273 if (mm_has_notifiers(mm))
274 __mmu_notifier_invalidate_range(mm, start, end);
275}
276
245static inline void mmu_notifier_mm_init(struct mm_struct *mm) 277static inline void mmu_notifier_mm_init(struct mm_struct *mm)
246{ 278{
247 mm->mmu_notifier_mm = NULL; 279 mm->mmu_notifier_mm = NULL;
@@ -279,6 +311,44 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
279 __young; \ 311 __young; \
280}) 312})
281 313
314#define ptep_clear_flush_notify(__vma, __address, __ptep) \
315({ \
316 unsigned long ___addr = __address & PAGE_MASK; \
317 struct mm_struct *___mm = (__vma)->vm_mm; \
318 pte_t ___pte; \
319 \
320 ___pte = ptep_clear_flush(__vma, __address, __ptep); \
321 mmu_notifier_invalidate_range(___mm, ___addr, \
322 ___addr + PAGE_SIZE); \
323 \
324 ___pte; \
325})
326
327#define pmdp_clear_flush_notify(__vma, __haddr, __pmd) \
328({ \
329 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
330 struct mm_struct *___mm = (__vma)->vm_mm; \
331 pmd_t ___pmd; \
332 \
333 ___pmd = pmdp_clear_flush(__vma, __haddr, __pmd); \
334 mmu_notifier_invalidate_range(___mm, ___haddr, \
335 ___haddr + HPAGE_PMD_SIZE); \
336 \
337 ___pmd; \
338})
339
340#define pmdp_get_and_clear_notify(__mm, __haddr, __pmd) \
341({ \
342 unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
343 pmd_t ___pmd; \
344 \
345 ___pmd = pmdp_get_and_clear(__mm, __haddr, __pmd); \
346 mmu_notifier_invalidate_range(__mm, ___haddr, \
347 ___haddr + HPAGE_PMD_SIZE); \
348 \
349 ___pmd; \
350})
351
282/* 352/*
283 * set_pte_at_notify() sets the pte _after_ running the notifier. 353 * set_pte_at_notify() sets the pte _after_ running the notifier.
284 * This is safe to start by updating the secondary MMUs, because the primary MMU 354 * This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -342,6 +412,11 @@ static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
342{ 412{
343} 413}
344 414
415static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
416 unsigned long start, unsigned long end)
417{
418}
419
345static inline void mmu_notifier_mm_init(struct mm_struct *mm) 420static inline void mmu_notifier_mm_init(struct mm_struct *mm)
346{ 421{
347} 422}
@@ -352,6 +427,9 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
352 427
353#define ptep_clear_flush_young_notify ptep_clear_flush_young 428#define ptep_clear_flush_young_notify ptep_clear_flush_young
354#define pmdp_clear_flush_young_notify pmdp_clear_flush_young 429#define pmdp_clear_flush_young_notify pmdp_clear_flush_young
430#define ptep_clear_flush_notify ptep_clear_flush
431#define pmdp_clear_flush_notify pmdp_clear_flush
432#define pmdp_get_and_clear_notify pmdp_get_and_clear
355#define set_pte_at_notify set_pte_at 433#define set_pte_at_notify set_pte_at
356 434
357#endif /* CONFIG_MMU_NOTIFIER */ 435#endif /* CONFIG_MMU_NOTIFIER */
diff --git a/include/linux/platform_data/rcar-du.h b/include/linux/platform_data/rcar-du.h
deleted file mode 100644
index a5f045e1d8fe..000000000000
--- a/include/linux/platform_data/rcar-du.h
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * rcar_du.h -- R-Car Display Unit DRM driver
3 *
4 * Copyright (C) 2013 Renesas Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#ifndef __RCAR_DU_H__
15#define __RCAR_DU_H__
16
17#include <video/videomode.h>
18
19enum rcar_du_output {
20 RCAR_DU_OUTPUT_DPAD0,
21 RCAR_DU_OUTPUT_DPAD1,
22 RCAR_DU_OUTPUT_LVDS0,
23 RCAR_DU_OUTPUT_LVDS1,
24 RCAR_DU_OUTPUT_TCON,
25 RCAR_DU_OUTPUT_MAX,
26};
27
28enum rcar_du_encoder_type {
29 RCAR_DU_ENCODER_UNUSED = 0,
30 RCAR_DU_ENCODER_NONE,
31 RCAR_DU_ENCODER_VGA,
32 RCAR_DU_ENCODER_LVDS,
33};
34
35struct rcar_du_panel_data {
36 unsigned int width_mm; /* Panel width in mm */
37 unsigned int height_mm; /* Panel height in mm */
38 struct videomode mode;
39};
40
41struct rcar_du_connector_lvds_data {
42 struct rcar_du_panel_data panel;
43};
44
45struct rcar_du_connector_vga_data {
46 /* TODO: Add DDC information for EDID retrieval */
47};
48
49/*
50 * struct rcar_du_encoder_data - Encoder platform data
51 * @type: the encoder type (RCAR_DU_ENCODER_*)
52 * @output: the DU output the connector is connected to (RCAR_DU_OUTPUT_*)
53 * @connector.lvds: platform data for LVDS connectors
54 * @connector.vga: platform data for VGA connectors
55 *
56 * Encoder platform data describes an on-board encoder, its associated DU SoC
57 * output, and the connector.
58 */
59struct rcar_du_encoder_data {
60 enum rcar_du_encoder_type type;
61 enum rcar_du_output output;
62
63 union {
64 struct rcar_du_connector_lvds_data lvds;
65 struct rcar_du_connector_vga_data vga;
66 } connector;
67};
68
69struct rcar_du_platform_data {
70 struct rcar_du_encoder_data *encoders;
71 unsigned int num_encoders;
72};
73
74#endif /* __RCAR_DU_H__ */
diff --git a/include/trace/events/host1x.h b/include/trace/events/host1x.h
index 94db6a2c3540..63116362543c 100644
--- a/include/trace/events/host1x.h
+++ b/include/trace/events/host1x.h
@@ -29,6 +29,8 @@
29#include <linux/ktime.h> 29#include <linux/ktime.h>
30#include <linux/tracepoint.h> 30#include <linux/tracepoint.h>
31 31
32struct host1x_bo;
33
32DECLARE_EVENT_CLASS(host1x, 34DECLARE_EVENT_CLASS(host1x,
33 TP_PROTO(const char *name), 35 TP_PROTO(const char *name),
34 TP_ARGS(name), 36 TP_ARGS(name),
@@ -79,14 +81,14 @@ TRACE_EVENT(host1x_cdma_push,
79); 81);
80 82
81TRACE_EVENT(host1x_cdma_push_gather, 83TRACE_EVENT(host1x_cdma_push_gather,
82 TP_PROTO(const char *name, u32 mem_id, 84 TP_PROTO(const char *name, struct host1x_bo *bo,
83 u32 words, u32 offset, void *cmdbuf), 85 u32 words, u32 offset, void *cmdbuf),
84 86
85 TP_ARGS(name, mem_id, words, offset, cmdbuf), 87 TP_ARGS(name, bo, words, offset, cmdbuf),
86 88
87 TP_STRUCT__entry( 89 TP_STRUCT__entry(
88 __field(const char *, name) 90 __field(const char *, name)
89 __field(u32, mem_id) 91 __field(struct host1x_bo *, bo)
90 __field(u32, words) 92 __field(u32, words)
91 __field(u32, offset) 93 __field(u32, offset)
92 __field(bool, cmdbuf) 94 __field(bool, cmdbuf)
@@ -100,13 +102,13 @@ TRACE_EVENT(host1x_cdma_push_gather,
100 } 102 }
101 __entry->cmdbuf = cmdbuf; 103 __entry->cmdbuf = cmdbuf;
102 __entry->name = name; 104 __entry->name = name;
103 __entry->mem_id = mem_id; 105 __entry->bo = bo;
104 __entry->words = words; 106 __entry->words = words;
105 __entry->offset = offset; 107 __entry->offset = offset;
106 ), 108 ),
107 109
108 TP_printk("name=%s, mem_id=%08x, words=%u, offset=%d, contents=[%s]", 110 TP_printk("name=%s, bo=%p, words=%u, offset=%d, contents=[%s]",
109 __entry->name, __entry->mem_id, 111 __entry->name, __entry->bo,
110 __entry->words, __entry->offset, 112 __entry->words, __entry->offset,
111 __print_hex(__get_dynamic_array(cmdbuf), 113 __print_hex(__get_dynamic_array(cmdbuf),
112 __entry->cmdbuf ? __entry->words * 4 : 0)) 114 __entry->cmdbuf ? __entry->words * 4 : 0))
@@ -221,12 +223,13 @@ TRACE_EVENT(host1x_syncpt_load_min,
221); 223);
222 224
223TRACE_EVENT(host1x_syncpt_wait_check, 225TRACE_EVENT(host1x_syncpt_wait_check,
224 TP_PROTO(void *mem_id, u32 offset, u32 syncpt_id, u32 thresh, u32 min), 226 TP_PROTO(struct host1x_bo *bo, u32 offset, u32 syncpt_id, u32 thresh,
227 u32 min),
225 228
226 TP_ARGS(mem_id, offset, syncpt_id, thresh, min), 229 TP_ARGS(bo, offset, syncpt_id, thresh, min),
227 230
228 TP_STRUCT__entry( 231 TP_STRUCT__entry(
229 __field(void *, mem_id) 232 __field(struct host1x_bo *, bo)
230 __field(u32, offset) 233 __field(u32, offset)
231 __field(u32, syncpt_id) 234 __field(u32, syncpt_id)
232 __field(u32, thresh) 235 __field(u32, thresh)
@@ -234,15 +237,15 @@ TRACE_EVENT(host1x_syncpt_wait_check,
234 ), 237 ),
235 238
236 TP_fast_assign( 239 TP_fast_assign(
237 __entry->mem_id = mem_id; 240 __entry->bo = bo;
238 __entry->offset = offset; 241 __entry->offset = offset;
239 __entry->syncpt_id = syncpt_id; 242 __entry->syncpt_id = syncpt_id;
240 __entry->thresh = thresh; 243 __entry->thresh = thresh;
241 __entry->min = min; 244 __entry->min = min;
242 ), 245 ),
243 246
244 TP_printk("mem_id=%p, offset=%05x, id=%d, thresh=%d, current=%d", 247 TP_printk("bo=%p, offset=%05x, id=%d, thresh=%d, current=%d",
245 __entry->mem_id, __entry->offset, 248 __entry->bo, __entry->offset,
246 __entry->syncpt_id, __entry->thresh, 249 __entry->syncpt_id, __entry->thresh,
247 __entry->min) 250 __entry->min)
248); 251);
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a0db2d4aa5f0..86574b0005ff 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -286,6 +286,8 @@ struct drm_mode_get_property {
286 char name[DRM_PROP_NAME_LEN]; 286 char name[DRM_PROP_NAME_LEN];
287 287
288 __u32 count_values; 288 __u32 count_values;
289 /* This is only used to count enum values, not blobs. The _blobs is
290 * simply because of a historical reason, i.e. backwards compat. */
289 __u32 count_enum_blobs; 291 __u32 count_enum_blobs;
290}; 292};
291 293
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index ff57f07c3249..250262265ee3 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -340,6 +340,7 @@ typedef struct drm_i915_irq_wait {
340#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26 340#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
341#define I915_PARAM_HAS_WT 27 341#define I915_PARAM_HAS_WT 27
342#define I915_PARAM_CMD_PARSER_VERSION 28 342#define I915_PARAM_CMD_PARSER_VERSION 28
343#define I915_PARAM_HAS_COHERENT_PHYS_GTT 29
343 344
344typedef struct drm_i915_getparam { 345typedef struct drm_i915_getparam {
345 int param; 346 int param;
@@ -876,6 +877,12 @@ struct drm_i915_gem_get_tiling {
876 * mmap mapping. 877 * mmap mapping.
877 */ 878 */
878 __u32 swizzle_mode; 879 __u32 swizzle_mode;
880
881 /**
882 * Returned address bit 6 swizzling required for CPU access through
883 * mmap mapping whilst bound.
884 */
885 __u32 phys_swizzle_mode;
879}; 886};
880 887
881struct drm_i915_gem_get_aperture { 888struct drm_i915_gem_get_aperture {
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
new file mode 100644
index 000000000000..7acef41fc209
--- /dev/null
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#ifndef KFD_IOCTL_H_INCLUDED
24#define KFD_IOCTL_H_INCLUDED
25
26#include <linux/types.h>
27#include <linux/ioctl.h>
28
29#define KFD_IOCTL_MAJOR_VERSION 1
30#define KFD_IOCTL_MINOR_VERSION 0
31
32struct kfd_ioctl_get_version_args {
33 uint32_t major_version; /* from KFD */
34 uint32_t minor_version; /* from KFD */
35};
36
37/* For kfd_ioctl_create_queue_args.queue_type. */
38#define KFD_IOC_QUEUE_TYPE_COMPUTE 0
39#define KFD_IOC_QUEUE_TYPE_SDMA 1
40#define KFD_IOC_QUEUE_TYPE_COMPUTE_AQL 2
41
42#define KFD_MAX_QUEUE_PERCENTAGE 100
43#define KFD_MAX_QUEUE_PRIORITY 15
44
45struct kfd_ioctl_create_queue_args {
46 uint64_t ring_base_address; /* to KFD */
47 uint64_t write_pointer_address; /* from KFD */
48 uint64_t read_pointer_address; /* from KFD */
49 uint64_t doorbell_offset; /* from KFD */
50
51 uint32_t ring_size; /* to KFD */
52 uint32_t gpu_id; /* to KFD */
53 uint32_t queue_type; /* to KFD */
54 uint32_t queue_percentage; /* to KFD */
55 uint32_t queue_priority; /* to KFD */
56 uint32_t queue_id; /* from KFD */
57
58 uint64_t eop_buffer_address; /* to KFD */
59 uint64_t eop_buffer_size; /* to KFD */
60 uint64_t ctx_save_restore_address; /* to KFD */
61 uint64_t ctx_save_restore_size; /* to KFD */
62};
63
64struct kfd_ioctl_destroy_queue_args {
65 uint32_t queue_id; /* to KFD */
66 uint32_t pad;
67};
68
69struct kfd_ioctl_update_queue_args {
70 uint64_t ring_base_address; /* to KFD */
71
72 uint32_t queue_id; /* to KFD */
73 uint32_t ring_size; /* to KFD */
74 uint32_t queue_percentage; /* to KFD */
75 uint32_t queue_priority; /* to KFD */
76};
77
78/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
79#define KFD_IOC_CACHE_POLICY_COHERENT 0
80#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
81
82struct kfd_ioctl_set_memory_policy_args {
83 uint64_t alternate_aperture_base; /* to KFD */
84 uint64_t alternate_aperture_size; /* to KFD */
85
86 uint32_t gpu_id; /* to KFD */
87 uint32_t default_policy; /* to KFD */
88 uint32_t alternate_policy; /* to KFD */
89 uint32_t pad;
90};
91
92/*
93 * All counters are monotonic. They are used for profiling of compute jobs.
94 * The profiling is done by userspace.
95 *
96 * In case of GPU reset, the counter should not be affected.
97 */
98
99struct kfd_ioctl_get_clock_counters_args {
100 uint64_t gpu_clock_counter; /* from KFD */
101 uint64_t cpu_clock_counter; /* from KFD */
102 uint64_t system_clock_counter; /* from KFD */
103 uint64_t system_clock_freq; /* from KFD */
104
105 uint32_t gpu_id; /* to KFD */
106 uint32_t pad;
107};
108
109#define NUM_OF_SUPPORTED_GPUS 7
110
111struct kfd_process_device_apertures {
112 uint64_t lds_base; /* from KFD */
113 uint64_t lds_limit; /* from KFD */
114 uint64_t scratch_base; /* from KFD */
115 uint64_t scratch_limit; /* from KFD */
116 uint64_t gpuvm_base; /* from KFD */
117 uint64_t gpuvm_limit; /* from KFD */
118 uint32_t gpu_id; /* from KFD */
119 uint32_t pad;
120};
121
122struct kfd_ioctl_get_process_apertures_args {
123 struct kfd_process_device_apertures
124 process_apertures[NUM_OF_SUPPORTED_GPUS];/* from KFD */
125
126 /* from KFD, should be in the range [1 - NUM_OF_SUPPORTED_GPUS] */
127 uint32_t num_of_nodes;
128 uint32_t pad;
129};
130
131#define KFD_IOC_MAGIC 'K'
132
133#define KFD_IOC_GET_VERSION \
134 _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
135
136#define KFD_IOC_CREATE_QUEUE \
137 _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
138
139#define KFD_IOC_DESTROY_QUEUE \
140 _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
141
142#define KFD_IOC_SET_MEMORY_POLICY \
143 _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
144
145#define KFD_IOC_GET_CLOCK_COUNTERS \
146 _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
147
148#define KFD_IOC_GET_PROCESS_APERTURES \
149 _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
150
151#define KFD_IOC_UPDATE_QUEUE \
152 _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
153
154#endif
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 995a95f61a19..cb346f26a22d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -193,7 +193,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
193 } 193 }
194 194
195 flush_cache_page(vma, addr, pte_pfn(*ptep)); 195 flush_cache_page(vma, addr, pte_pfn(*ptep));
196 ptep_clear_flush(vma, addr, ptep); 196 ptep_clear_flush_notify(vma, addr, ptep);
197 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 197 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
198 198
199 page_remove_rmap(page); 199 page_remove_rmap(page);
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 65015ff2f07c..6390517e77d4 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -741,6 +741,7 @@ u64 nsecs_to_jiffies64(u64 n)
741 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ); 741 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
742#endif 742#endif
743} 743}
744EXPORT_SYMBOL(nsecs_to_jiffies64);
744 745
745/** 746/**
746 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies 747 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
diff --git a/mm/fremap.c b/mm/fremap.c
index 11ef7ec40d13..2805d71cf476 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -37,7 +37,7 @@ static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
37 37
38 if (pte_present(pte)) { 38 if (pte_present(pte)) {
39 flush_cache_page(vma, addr, pte_pfn(pte)); 39 flush_cache_page(vma, addr, pte_pfn(pte));
40 pte = ptep_clear_flush(vma, addr, ptep); 40 pte = ptep_clear_flush_notify(vma, addr, ptep);
41 page = vm_normal_page(vma, addr, pte); 41 page = vm_normal_page(vma, addr, pte);
42 if (page) { 42 if (page) {
43 if (pte_dirty(pte)) 43 if (pte_dirty(pte))
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 46f96c23cc27..817a875f2b8c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1035,7 +1035,7 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1035 goto out_free_pages; 1035 goto out_free_pages;
1036 VM_BUG_ON_PAGE(!PageHead(page), page); 1036 VM_BUG_ON_PAGE(!PageHead(page), page);
1037 1037
1038 pmdp_clear_flush(vma, haddr, pmd); 1038 pmdp_clear_flush_notify(vma, haddr, pmd);
1039 /* leave pmd empty until pte is filled */ 1039 /* leave pmd empty until pte is filled */
1040 1040
1041 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 1041 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
@@ -1178,7 +1178,7 @@ alloc:
1178 pmd_t entry; 1178 pmd_t entry;
1179 entry = mk_huge_pmd(new_page, vma->vm_page_prot); 1179 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1180 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma); 1180 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1181 pmdp_clear_flush(vma, haddr, pmd); 1181 pmdp_clear_flush_notify(vma, haddr, pmd);
1182 page_add_new_anon_rmap(new_page, vma, haddr); 1182 page_add_new_anon_rmap(new_page, vma, haddr);
1183 mem_cgroup_commit_charge(new_page, memcg, false); 1183 mem_cgroup_commit_charge(new_page, memcg, false);
1184 lru_cache_add_active_or_unevictable(new_page, vma); 1184 lru_cache_add_active_or_unevictable(new_page, vma);
@@ -1512,7 +1512,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1512 pmd_t entry; 1512 pmd_t entry;
1513 ret = 1; 1513 ret = 1;
1514 if (!prot_numa) { 1514 if (!prot_numa) {
1515 entry = pmdp_get_and_clear(mm, addr, pmd); 1515 entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1516 if (pmd_numa(entry)) 1516 if (pmd_numa(entry))
1517 entry = pmd_mknonnuma(entry); 1517 entry = pmd_mknonnuma(entry);
1518 entry = pmd_modify(entry, newprot); 1518 entry = pmd_modify(entry, newprot);
@@ -1644,6 +1644,7 @@ static int __split_huge_page_splitting(struct page *page,
1644 * serialize against split_huge_page*. 1644 * serialize against split_huge_page*.
1645 */ 1645 */
1646 pmdp_splitting_flush(vma, address, pmd); 1646 pmdp_splitting_flush(vma, address, pmd);
1647
1647 ret = 1; 1648 ret = 1;
1648 spin_unlock(ptl); 1649 spin_unlock(ptl);
1649 } 1650 }
@@ -2834,7 +2835,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2834 pmd_t _pmd; 2835 pmd_t _pmd;
2835 int i; 2836 int i;
2836 2837
2837 pmdp_clear_flush(vma, haddr, pmd); 2838 pmdp_clear_flush_notify(vma, haddr, pmd);
2838 /* leave pmd empty until pte is filled */ 2839 /* leave pmd empty until pte is filled */
2839 2840
2840 pgtable = pgtable_trans_huge_withdraw(mm, pmd); 2841 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 47f6070d7c46..85032de5e20f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2598,8 +2598,11 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2598 } 2598 }
2599 set_huge_pte_at(dst, addr, dst_pte, entry); 2599 set_huge_pte_at(dst, addr, dst_pte, entry);
2600 } else { 2600 } else {
2601 if (cow) 2601 if (cow) {
2602 huge_ptep_set_wrprotect(src, addr, src_pte); 2602 huge_ptep_set_wrprotect(src, addr, src_pte);
2603 mmu_notifier_invalidate_range(src, mmun_start,
2604 mmun_end);
2605 }
2603 entry = huge_ptep_get(src_pte); 2606 entry = huge_ptep_get(src_pte);
2604 ptepage = pte_page(entry); 2607 ptepage = pte_page(entry);
2605 get_page(ptepage); 2608 get_page(ptepage);
@@ -2901,6 +2904,7 @@ retry_avoidcopy:
2901 2904
2902 /* Break COW */ 2905 /* Break COW */
2903 huge_ptep_clear_flush(vma, address, ptep); 2906 huge_ptep_clear_flush(vma, address, ptep);
2907 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
2904 set_huge_pte_at(mm, address, ptep, 2908 set_huge_pte_at(mm, address, ptep,
2905 make_huge_pte(vma, new_page, 1)); 2909 make_huge_pte(vma, new_page, 1));
2906 page_remove_rmap(old_page); 2910 page_remove_rmap(old_page);
@@ -3376,6 +3380,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3376 * and that page table be reused and filled with junk. 3380 * and that page table be reused and filled with junk.
3377 */ 3381 */
3378 flush_tlb_range(vma, start, end); 3382 flush_tlb_range(vma, start, end);
3383 mmu_notifier_invalidate_range(mm, start, end);
3379 i_mmap_unlock_write(vma->vm_file->f_mapping); 3384 i_mmap_unlock_write(vma->vm_file->f_mapping);
3380 mmu_notifier_invalidate_range_end(mm, start, end); 3385 mmu_notifier_invalidate_range_end(mm, start, end);
3381 3386
diff --git a/mm/ksm.c b/mm/ksm.c
index 6b2e337bc03c..d247efab5073 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -892,7 +892,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
892 * this assure us that no O_DIRECT can happen after the check 892 * this assure us that no O_DIRECT can happen after the check
893 * or in the middle of the check. 893 * or in the middle of the check.
894 */ 894 */
895 entry = ptep_clear_flush(vma, addr, ptep); 895 entry = ptep_clear_flush_notify(vma, addr, ptep);
896 /* 896 /*
897 * Check that no O_DIRECT or similar I/O is in progress on the 897 * Check that no O_DIRECT or similar I/O is in progress on the
898 * page 898 * page
@@ -960,7 +960,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
960 page_add_anon_rmap(kpage, vma, addr); 960 page_add_anon_rmap(kpage, vma, addr);
961 961
962 flush_cache_page(vma, addr, pte_pfn(*ptep)); 962 flush_cache_page(vma, addr, pte_pfn(*ptep));
963 ptep_clear_flush(vma, addr, ptep); 963 ptep_clear_flush_notify(vma, addr, ptep);
964 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 964 set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
965 965
966 page_remove_rmap(page); 966 page_remove_rmap(page);
diff --git a/mm/memory.c b/mm/memory.c
index fbf74112de5b..c3b9097251c5 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -239,6 +239,7 @@ static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
239 return; 239 return;
240 240
241 tlb_flush(tlb); 241 tlb_flush(tlb);
242 mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
242#ifdef CONFIG_HAVE_RCU_TABLE_FREE 243#ifdef CONFIG_HAVE_RCU_TABLE_FREE
243 tlb_table_flush(tlb); 244 tlb_table_flush(tlb);
244#endif 245#endif
@@ -2220,7 +2221,7 @@ gotten:
2220 * seen in the presence of one thread doing SMC and another 2221 * seen in the presence of one thread doing SMC and another
2221 * thread doing COW. 2222 * thread doing COW.
2222 */ 2223 */
2223 ptep_clear_flush(vma, address, page_table); 2224 ptep_clear_flush_notify(vma, address, page_table);
2224 page_add_new_anon_rmap(new_page, vma, address); 2225 page_add_new_anon_rmap(new_page, vma, address);
2225 mem_cgroup_commit_charge(new_page, memcg, false); 2226 mem_cgroup_commit_charge(new_page, memcg, false);
2226 lru_cache_add_active_or_unevictable(new_page, vma); 2227 lru_cache_add_active_or_unevictable(new_page, vma);
diff --git a/mm/migrate.c b/mm/migrate.c
index 253474c22239..b1d02127e1be 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1862,7 +1862,7 @@ fail_putback:
1862 */ 1862 */
1863 flush_cache_range(vma, mmun_start, mmun_end); 1863 flush_cache_range(vma, mmun_start, mmun_end);
1864 page_add_anon_rmap(new_page, vma, mmun_start); 1864 page_add_anon_rmap(new_page, vma, mmun_start);
1865 pmdp_clear_flush(vma, mmun_start, pmd); 1865 pmdp_clear_flush_notify(vma, mmun_start, pmd);
1866 set_pmd_at(mm, mmun_start, pmd, entry); 1866 set_pmd_at(mm, mmun_start, pmd, entry);
1867 flush_tlb_range(vma, mmun_start, mmun_end); 1867 flush_tlb_range(vma, mmun_start, mmun_end);
1868 update_mmu_cache_pmd(vma, address, &entry); 1868 update_mmu_cache_pmd(vma, address, &entry);
@@ -1870,6 +1870,7 @@ fail_putback:
1870 if (page_count(page) != 2) { 1870 if (page_count(page) != 2) {
1871 set_pmd_at(mm, mmun_start, pmd, orig_entry); 1871 set_pmd_at(mm, mmun_start, pmd, orig_entry);
1872 flush_tlb_range(vma, mmun_start, mmun_end); 1872 flush_tlb_range(vma, mmun_start, mmun_end);
1873 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
1873 update_mmu_cache_pmd(vma, address, &entry); 1874 update_mmu_cache_pmd(vma, address, &entry);
1874 page_remove_rmap(new_page); 1875 page_remove_rmap(new_page);
1875 goto fail_putback; 1876 goto fail_putback;
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 2c8da9825fe3..3b9b3d0741b2 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -193,6 +193,16 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
193 193
194 id = srcu_read_lock(&srcu); 194 id = srcu_read_lock(&srcu);
195 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) { 195 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
196 /*
197 * Call invalidate_range here too to avoid the need for the
198 * subsystem of having to register an invalidate_range_end
199 * call-back when there is invalidate_range already. Usually a
200 * subsystem registers either invalidate_range_start()/end() or
201 * invalidate_range(), so this will be no additional overhead
202 * (besides the pointer check).
203 */
204 if (mn->ops->invalidate_range)
205 mn->ops->invalidate_range(mn, mm, start, end);
196 if (mn->ops->invalidate_range_end) 206 if (mn->ops->invalidate_range_end)
197 mn->ops->invalidate_range_end(mn, mm, start, end); 207 mn->ops->invalidate_range_end(mn, mm, start, end);
198 } 208 }
@@ -200,6 +210,21 @@ void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
200} 210}
201EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end); 211EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range_end);
202 212
213void __mmu_notifier_invalidate_range(struct mm_struct *mm,
214 unsigned long start, unsigned long end)
215{
216 struct mmu_notifier *mn;
217 int id;
218
219 id = srcu_read_lock(&srcu);
220 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist) {
221 if (mn->ops->invalidate_range)
222 mn->ops->invalidate_range(mn, mm, start, end);
223 }
224 srcu_read_unlock(&srcu, id);
225}
226EXPORT_SYMBOL_GPL(__mmu_notifier_invalidate_range);
227
203static int do_mmu_notifier_register(struct mmu_notifier *mn, 228static int do_mmu_notifier_register(struct mmu_notifier *mn,
204 struct mm_struct *mm, 229 struct mm_struct *mm,
205 int take_mmap_sem) 230 int take_mmap_sem)
diff --git a/mm/rmap.c b/mm/rmap.c
index c52f43a69eea..45ba250babd8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1380,7 +1380,7 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1380 1380
1381 /* Nuke the page table entry. */ 1381 /* Nuke the page table entry. */
1382 flush_cache_page(vma, address, pte_pfn(*pte)); 1382 flush_cache_page(vma, address, pte_pfn(*pte));
1383 pteval = ptep_clear_flush(vma, address, pte); 1383 pteval = ptep_clear_flush_notify(vma, address, pte);
1384 1384
1385 /* If nonlinear, store the file page offset in the pte. */ 1385 /* If nonlinear, store the file page offset in the pte. */
1386 if (page->index != linear_page_index(vma, address)) { 1386 if (page->index != linear_page_index(vma, address)) {